diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt index 4f6a82cef1d1..cbe35b3de9e9 100644 --- a/Documentation/devicetree/bindings/arm/omap/omap.txt +++ b/Documentation/devicetree/bindings/arm/omap/omap.txt @@ -23,6 +23,7 @@ Optional properties: during suspend. - ti,no-reset-on-init: When present, the module should not be reset at init - ti,no-idle-on-init: When present, the module should not be idled at init +- ti,no-idle: When present, the module is never allowed to idle. Example: diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index c59bd9bc41ef..4176ab076f1c 100644 --- a/Documentation/virtual/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt @@ -352,7 +352,8 @@ In the first case there are two additional complications: - if CR4.SMEP is enabled: since we've turned the page into a kernel page, the kernel may now execute it. We handle this by also setting spte.nx. If we get a user fetch or read fault, we'll change spte.u=1 and - spte.nx=gpte.nx back. + spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when + shadow paging is in use. - if CR4.SMAP is disabled: since the page has been changed to a kernel page, it can not be reused when CR4.SMAP is enabled. We set CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note, diff --git a/Makefile b/Makefile index 39be1bbd373a..79fab0d55218 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 1 -SUBLEVEL = 20 +SUBLEVEL = 21 EXTRAVERSION = NAME = Series 4800 diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index dfcc0dd637e5..bc04b754fe36 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1411,6 +1411,16 @@ 0x48485200 0x2E00>; #address-cells = <1>; #size-cells = <1>; + + /* + * Do not allow gating of cpsw clock as workaround + * for errata i877. Keeping internal clock disabled + * causes the device switching characteristics + * to degrade over time and eventually fail to meet + * the data manual delay time/skew specs. + */ + ti,no-idle; + /* * rx_thresh_pend * rx_pend diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 5286e7773ed4..9185bb958503 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -876,6 +876,36 @@ static int _init_opt_clks(struct omap_hwmod *oh) return ret; } +static void _enable_optional_clocks(struct omap_hwmod *oh) +{ + struct omap_hwmod_opt_clk *oc; + int i; + + pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); + + for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) + if (oc->_clk) { + pr_debug("omap_hwmod: enable %s:%s\n", oc->role, + __clk_get_name(oc->_clk)); + clk_enable(oc->_clk); + } +} + +static void _disable_optional_clocks(struct omap_hwmod *oh) +{ + struct omap_hwmod_opt_clk *oc; + int i; + + pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); + + for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) + if (oc->_clk) { + pr_debug("omap_hwmod: disable %s:%s\n", oc->role, + __clk_get_name(oc->_clk)); + clk_disable(oc->_clk); + } +} + /** * _enable_clocks - enable hwmod main clock and interface clocks * @oh: struct omap_hwmod * @@ -903,6 +933,9 @@ static int _enable_clocks(struct omap_hwmod *oh) clk_enable(os->_clk); } + if (oh->flags & HWMOD_OPT_CLKS_NEEDED) + _enable_optional_clocks(oh); + /* The opt clocks are controlled by the device driver. */ return 0; @@ -934,41 +967,14 @@ static int _disable_clocks(struct omap_hwmod *oh) clk_disable(os->_clk); } + if (oh->flags & HWMOD_OPT_CLKS_NEEDED) + _disable_optional_clocks(oh); + /* The opt clocks are controlled by the device driver. */ return 0; } -static void _enable_optional_clocks(struct omap_hwmod *oh) -{ - struct omap_hwmod_opt_clk *oc; - int i; - - pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name); - - for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) - if (oc->_clk) { - pr_debug("omap_hwmod: enable %s:%s\n", oc->role, - __clk_get_name(oc->_clk)); - clk_enable(oc->_clk); - } -} - -static void _disable_optional_clocks(struct omap_hwmod *oh) -{ - struct omap_hwmod_opt_clk *oc; - int i; - - pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name); - - for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) - if (oc->_clk) { - pr_debug("omap_hwmod: disable %s:%s\n", oc->role, - __clk_get_name(oc->_clk)); - clk_disable(oc->_clk); - } -} - /** * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 * @oh: struct omap_hwmod * @@ -2180,6 +2186,11 @@ static int _enable(struct omap_hwmod *oh) */ static int _idle(struct omap_hwmod *oh) { + if (oh->flags & HWMOD_NO_IDLE) { + oh->_int_flags |= _HWMOD_SKIP_ENABLE; + return 0; + } + pr_debug("omap_hwmod: %s: idling\n", oh->name); if (oh->_state != _HWMOD_STATE_ENABLED) { @@ -2484,6 +2495,8 @@ static int __init _init(struct omap_hwmod *oh, void *data) oh->flags |= HWMOD_INIT_NO_RESET; if (of_find_property(np, "ti,no-idle-on-init", NULL)) oh->flags |= HWMOD_INIT_NO_IDLE; + if (of_find_property(np, "ti,no-idle", NULL)) + oh->flags |= HWMOD_NO_IDLE; } oh->_state = _HWMOD_STATE_INITIALIZED; @@ -2610,7 +2623,7 @@ static void __init _setup_postsetup(struct omap_hwmod *oh) * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data - * it should be set by the core code as a runtime flag during startup */ - if ((oh->flags & HWMOD_INIT_NO_IDLE) && + if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) && (postsetup_state == _HWMOD_STATE_IDLE)) { oh->_int_flags |= _HWMOD_SKIP_ENABLE; postsetup_state = _HWMOD_STATE_ENABLED; diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index 9611c91d9b82..ec289c5f099a 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -517,6 +517,10 @@ struct omap_hwmod_omap4_prcm { * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up * events by calling _reconfigure_io_chain() when a device is enabled * or idled. + * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to + * operate and they need to be handled at the same time as the main_clk. + * HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain + * IPs like CPSW on DRA7, where clocks to this module cannot be disabled. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -532,6 +536,8 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_FORCE_MSTANDBY (1 << 11) #define HWMOD_SWSUP_SIDLE_ACT (1 << 12) #define HWMOD_RECONFIG_IO_CHAIN (1 << 13) +#define HWMOD_OPT_CLKS_NEEDED (1 << 14) +#define HWMOD_NO_IDLE (1 << 15) /* * omap_hwmod._int_flags definitions diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index b056369fd47d..0d1d675f2cce 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -184,20 +184,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs) /* EL1 Single Step Handler hooks */ static LIST_HEAD(step_hook); -static DEFINE_RWLOCK(step_hook_lock); +static DEFINE_SPINLOCK(step_hook_lock); void register_step_hook(struct step_hook *hook) { - write_lock(&step_hook_lock); - list_add(&hook->node, &step_hook); - write_unlock(&step_hook_lock); + spin_lock(&step_hook_lock); + list_add_rcu(&hook->node, &step_hook); + spin_unlock(&step_hook_lock); } void unregister_step_hook(struct step_hook *hook) { - write_lock(&step_hook_lock); - list_del(&hook->node); - write_unlock(&step_hook_lock); + spin_lock(&step_hook_lock); + list_del_rcu(&hook->node); + spin_unlock(&step_hook_lock); + synchronize_rcu(); } /* @@ -211,15 +212,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr) struct step_hook *hook; int retval = DBG_HOOK_ERROR; - read_lock(&step_hook_lock); + rcu_read_lock(); - list_for_each_entry(hook, &step_hook, node) { + list_for_each_entry_rcu(hook, &step_hook, node) { retval = hook->fn(regs, esr); if (retval == DBG_HOOK_HANDLED) break; } - read_unlock(&step_hook_lock); + rcu_read_unlock(); return retval; } @@ -271,20 +272,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr, * Use reader/writer locks instead of plain spinlock. */ static LIST_HEAD(break_hook); -static DEFINE_RWLOCK(break_hook_lock); +static DEFINE_SPINLOCK(break_hook_lock); void register_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_add(&hook->node, &break_hook); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_add_rcu(&hook->node, &break_hook); + spin_unlock(&break_hook_lock); } void unregister_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_del(&hook->node); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_del_rcu(&hook->node); + spin_unlock(&break_hook_lock); + synchronize_rcu(); } static int call_break_hook(struct pt_regs *regs, unsigned int esr) @@ -292,11 +294,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) struct break_hook *hook; int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; - read_lock(&break_hook_lock); - list_for_each_entry(hook, &break_hook, node) + rcu_read_lock(); + list_for_each_entry_rcu(hook, &break_hook, node) if ((esr & hook->esr_mask) == hook->esr_val) fn = hook->fn; - read_unlock(&break_hook_lock); + rcu_read_unlock(); return fn ? fn(regs, esr) : DBG_HOOK_ERROR; } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a3b1ffe50aa0..c99e8a32bea4 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2103,11 +2103,11 @@ config CPU_R4K_CACHE_TLB config MIPS_MT_SMP bool "MIPS MT SMP support (1 TC on each available VPE)" - depends on SYS_SUPPORTS_MULTITHREADING + depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select SYNC_R4K - select MIPS_GIC_IPI + select MIPS_GIC_IPI if MIPS_GIC select MIPS_MT select SMP select SMP_UP @@ -2204,8 +2204,8 @@ config MIPS_VPE_APSP_API_MT config MIPS_CMP bool "MIPS CMP framework support (DEPRECATED)" - depends on SYS_SUPPORTS_MIPS_CMP - select MIPS_GIC_IPI + depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6 + select MIPS_GIC_IPI if MIPS_GIC select SMP select SYNC_R4K select SYS_SUPPORTS_SMP @@ -2221,11 +2221,11 @@ config MIPS_CMP config MIPS_CPS bool "MIPS Coherent Processing System support" - depends on SYS_SUPPORTS_MIPS_CPS && !64BIT + depends on SYS_SUPPORTS_MIPS_CPS && !CPU_MIPSR6 select MIPS_CM select MIPS_CPC select MIPS_CPS_PM if HOTPLUG_CPU - select MIPS_GIC_IPI + select MIPS_GIC_IPI if MIPS_GIC select SMP select SYNC_R4K if (CEVT_R4K || CSRC_R4K) select SYS_SUPPORTS_HOTPLUG_CPU @@ -2244,6 +2244,7 @@ config MIPS_CPS_PM bool config MIPS_GIC_IPI + depends on MIPS_GIC bool config MIPS_CM diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index f8338e6d3dd7..a34e43eec658 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1273,6 +1273,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) std r6, VCPU_ACOP(r9) stw r7, VCPU_GUEST_PID(r9) std r8, VCPU_WORT(r9) + /* + * Restore various registers to 0, where non-zero values + * set by the guest could disrupt the host. + */ + li r0, 0 + mtspr SPRN_IAMR, r0 + mtspr SPRN_CIABR, r0 + mtspr SPRN_DAWRX, r0 + mtspr SPRN_TCSCR, r0 + mtspr SPRN_WORT, r0 + /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ + li r0, 1 + sldi r0, r0, 31 + mtspr SPRN_MMCRS, r0 8: /* Save and reset AMR and UAMOR before turning on the MMU */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index fb1b93ea3e3f..e485817f7b1a 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -15,17 +15,25 @@ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + spin_lock_init(&mm->context.list_lock); + INIT_LIST_HEAD(&mm->context.pgtable_list); + INIT_LIST_HEAD(&mm->context.gmap_list); cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.attach_count, 0); mm->context.flush_mm = 0; - mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; - mm->context.asce_bits |= _ASCE_TYPE_REGION3; #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste; mm->context.has_pgste = 0; mm->context.use_skey = 0; #endif - mm->context.asce_limit = STACK_TOP_MAX; + if (mm->context.asce_limit == 0) { + /* context created by exec, set asce limit to 4TB */ + mm->context.asce_bits = _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | _ASCE_TYPE_REGION3; + mm->context.asce_limit = STACK_TOP_MAX; + } else if (mm->context.asce_limit == (1UL << 31)) { + mm_inc_nr_pmds(mm); + } crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; } @@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev, static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { - if (oldmm->context.asce_limit < mm->context.asce_limit) - crst_table_downgrade(mm, oldmm->context.asce_limit); } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 7b7858f158b4..d7cc79fb6191 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -100,12 +100,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - spin_lock_init(&mm->context.list_lock); - INIT_LIST_HEAD(&mm->context.pgtable_list); - INIT_LIST_HEAD(&mm->context.gmap_list); - return (pgd_t *) crst_table_alloc(mm); + unsigned long *table = crst_table_alloc(mm); + + if (!table) + return NULL; + if (mm->context.asce_limit == (1UL << 31)) { + /* Forking a compat process with 2 page table levels */ + if (!pgtable_pmd_page_ctor(virt_to_page(table))) { + crst_table_free(mm, table); + return NULL; + } + } + return (pgd_t *) table; +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + if (mm->context.asce_limit == (1UL << 31)) + pgtable_pmd_page_dtor(virt_to_page(pgd)); + crst_table_free(mm, (unsigned long *) pgd); } -#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 945f9e13f1aa..917148620f49 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1674,6 +1674,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, return; } break; + case MSR_IA32_PEBS_ENABLE: + /* PEBS needs a quiescent period after being disabled (to write + * a record). Disabling PEBS through VMX MSR swapping doesn't + * provide that period, so a CPU could write host's record into + * guest's memory. + */ + wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } for (i = 0; i < m->nr; ++i) @@ -1711,26 +1718,31 @@ static void reload_tss(void) static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) { - u64 guest_efer; - u64 ignore_bits; + u64 guest_efer = vmx->vcpu.arch.efer; + u64 ignore_bits = 0; - guest_efer = vmx->vcpu.arch.efer; + if (!enable_ept) { + /* + * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing + * host CPUID is more efficient than testing guest CPUID + * or CR4. Host SMEP is anyway a requirement for guest SMEP. + */ + if (boot_cpu_has(X86_FEATURE_SMEP)) + guest_efer |= EFER_NX; + else if (!(guest_efer & EFER_NX)) + ignore_bits |= EFER_NX; + } /* - * NX is emulated; LMA and LME handled by hardware; SCE meaningless - * outside long mode + * LMA and LME handled by hardware; SCE meaningless outside long mode. */ - ignore_bits = EFER_NX | EFER_SCE; + ignore_bits |= EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif - guest_efer &= ~ignore_bits; - guest_efer |= host_efer & ignore_bits; - vmx->guest_msrs[efer_offset].data = guest_efer; - vmx->guest_msrs[efer_offset].mask = ~ignore_bits; clear_atomic_switch_msr(vmx, MSR_EFER); @@ -1741,16 +1753,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) */ if (cpu_has_load_ia32_efer || (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { - guest_efer = vmx->vcpu.arch.efer; if (!(guest_efer & EFER_LMA)) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); return false; - } + } else { + guest_efer &= ~ignore_bits; + guest_efer |= host_efer & ignore_bits; - return true; + vmx->guest_msrs[efer_offset].data = guest_efer; + vmx->guest_msrs[efer_offset].mask = ~ignore_bits; + + return true; + } } static unsigned long segment_base(u16 selector) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 8d8c35623f2a..ffa809f30b19 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -176,6 +176,7 @@ #define AT_XDMAC_MAX_CHAN 0x20 #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ +#define AT_XDMAC_RESIDUE_MAX_RETRIES 5 #define AT_XDMAC_DMA_BUSWIDTHS\ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ @@ -925,8 +926,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct at_xdmac_desc *desc, *_desc; struct list_head *descs_list; enum dma_status ret; - int residue; - u32 cur_nda, mask, value; + int residue, retry; + u32 cur_nda, check_nda, cur_ubc, mask, value; u8 dwidth = 0; unsigned long flags; @@ -963,7 +964,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, cpu_relax(); } + /* + * When processing the residue, we need to read two registers but we + * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where + * we stand in the descriptor list and AT_XDMAC_CUBC is used + * to know how many data are remaining for the current descriptor. + * Since the dma channel is not paused to not loose data, between the + * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of + * descriptor. + * For that reason, after reading AT_XDMAC_CUBC, we check if we are + * still using the same descriptor by reading a second time + * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to + * read again AT_XDMAC_CUBC. + * Memory barriers are used to ensure the read order of the registers. + * A max number of retries is set because unlikely it can never ends if + * we are transferring a lot of data with small buffers. + */ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + rmb(); + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); + for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { + rmb(); + check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + + if (likely(cur_nda == check_nda)) + break; + + cur_nda = check_nda; + rmb(); + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); + } + + if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { + ret = DMA_ERROR; + goto spin_unlock; + } + /* * Remove size of all microblocks already transferred and the current * one. Then add the remaining size to transfer of the current @@ -976,7 +1012,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) break; } - residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; + residue += cur_ubc << dwidth; dma_set_residue(txstate, residue); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 447dbfa6c793..7ac42d063574 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -254,7 +254,7 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3 #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3 -static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], +static void dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count, u8 train_set[4]) { @@ -301,77 +301,43 @@ static int convert_bpc_to_bpp(int bpc) return bpc * 3; } -/* get the max pix clock supported by the link rate and lane num */ -static int dp_get_max_dp_pix_clock(int link_rate, - int lane_num, - int bpp) -{ - return (link_rate * lane_num * 8) / bpp; -} - /***** radeon specific DP functions *****/ -int radeon_dp_get_max_link_rate(struct drm_connector *connector, - u8 dpcd[DP_DPCD_SIZE]) -{ - int max_link_rate; - - if (radeon_connector_is_dp12_capable(connector)) - max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000); - else - max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000); - - return max_link_rate; -} - -/* First get the min lane# when low rate is used according to pixel clock - * (prefer low rate), second check max lane# supported by DP panel, - * if the max lane# < low rate lane# then use max lane# instead. - */ -static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, - u8 dpcd[DP_DPCD_SIZE], - int pix_clock) +int radeon_dp_get_dp_link_config(struct drm_connector *connector, + const u8 dpcd[DP_DPCD_SIZE], + unsigned pix_clock, + unsigned *dp_lanes, unsigned *dp_rate) { int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); - int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd); - int max_lane_num = drm_dp_max_lane_count(dpcd); - int lane_num; - int max_dp_pix_clock; - - for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) { - max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp); - if (pix_clock <= max_dp_pix_clock) - break; - } - - return lane_num; -} - -static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, - u8 dpcd[DP_DPCD_SIZE], - int pix_clock) -{ - int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); - int lane_num, max_pix_clock; + static const unsigned link_rates[3] = { 162000, 270000, 540000 }; + unsigned max_link_rate = drm_dp_max_link_rate(dpcd); + unsigned max_lane_num = drm_dp_max_lane_count(dpcd); + unsigned lane_num, i, max_pix_clock; if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == - ENCODER_OBJECT_ID_NUTMEG) - return 270000; - - lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); - max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp); - if (pix_clock <= max_pix_clock) - return 162000; - max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp); - if (pix_clock <= max_pix_clock) - return 270000; - if (radeon_connector_is_dp12_capable(connector)) { - max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp); - if (pix_clock <= max_pix_clock) - return 540000; + ENCODER_OBJECT_ID_NUTMEG) { + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { + max_pix_clock = (lane_num * 270000 * 8) / bpp; + if (max_pix_clock >= pix_clock) { + *dp_lanes = lane_num; + *dp_rate = 270000; + return 0; + } + } + } else { + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; + if (max_pix_clock >= pix_clock) { + *dp_lanes = lane_num; + *dp_rate = link_rates[i]; + return 0; + } + } + } } - return radeon_dp_get_max_link_rate(connector, dpcd); + return -EINVAL; } static u8 radeon_dp_encoder_service(struct radeon_device *rdev, @@ -490,6 +456,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector, { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; + int ret; if (!radeon_connector->con_priv) return; @@ -497,10 +464,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector, if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { - dig_connector->dp_clock = - radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); - dig_connector->dp_lane_count = - radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock); + ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd, + mode->clock, + &dig_connector->dp_lane_count, + &dig_connector->dp_clock); + if (ret) { + dig_connector->dp_clock = 0; + dig_connector->dp_lane_count = 0; + } } } @@ -509,7 +480,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector, { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; - int dp_clock; + unsigned dp_clock, dp_lanes; + int ret; if ((mode->clock > 340000) && (!radeon_connector_is_dp12_capable(connector))) @@ -519,8 +491,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector, return MODE_CLOCK_HIGH; dig_connector = radeon_connector->con_priv; - dp_clock = - radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); + ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd, + mode->clock, + &dp_lanes, + &dp_clock); + if (ret) + return MODE_CLOCK_HIGH; if ((dp_clock == 540000) && (!radeon_connector_is_dp12_capable(connector))) diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index c9ff4cf4c4e7..c4b4c0233937 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -520,11 +520,17 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder, drm_mode_set_crtcinfo(adjusted_mode, 0); { struct radeon_connector_atom_dig *dig_connector; + int ret; dig_connector = mst_enc->connector->con_priv; - dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd); - dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base, - dig_connector->dpcd); + ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base, + dig_connector->dpcd, adjusted_mode->clock, + &dig_connector->dp_lane_count, + &dig_connector->dp_clock); + if (ret) { + dig_connector->dp_lane_count = 0; + dig_connector->dp_clock = 0; + } DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector, dig_connector->dp_lane_count, dig_connector->dp_clock); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 9af2d8398e90..43ba333949c7 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -752,8 +752,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct drm_connector *connector); -int radeon_dp_get_max_link_rate(struct drm_connector *connector, - u8 *dpcd); +extern int radeon_dp_get_dp_link_config(struct drm_connector *connector, + const u8 *dpcd, + unsigned pix_clock, + unsigned *dp_lanes, unsigned *dp_rate); extern void radeon_dp_set_rx_power_state(struct drm_connector *connector, u8 power_state); extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index a56eab7f0ab1..8319eed613b0 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1079,6 +1079,8 @@ force: /* update display watermarks based on new power state */ radeon_bandwidth_update(rdev); + /* update displays */ + radeon_dpm_display_configuration_changed(rdev); /* wait for the rings to drain */ for (i = 0; i < RADEON_NUM_RINGS; i++) { @@ -1095,9 +1097,6 @@ force: radeon_dpm_post_set_power_state(rdev); - /* update displays */ - radeon_dpm_display_configuration_changed(rdev); - rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; rdev->pm.dpm.single_display = single_display; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 8b4d3e6875eb..21924f52863f 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface static void gs_destroy_candev(struct gs_can *dev) { unregister_candev(dev->netdev); - free_candev(dev->netdev); usb_kill_anchored_urbs(&dev->tx_submitted); - kfree(dev); + free_candev(dev->netdev); } static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) @@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id * for (i = 0; i < icount; i++) { dev->canch[i] = gs_make_candev(i, intf); if (IS_ERR_OR_NULL(dev->canch[i])) { + /* save error code to return later */ + rc = PTR_ERR(dev->canch[i]); + /* on failure destroy previously created candevs */ icount = i; - for (i = 0; i < icount; i++) { + for (i = 0; i < icount; i++) gs_destroy_candev(dev->canch[i]); - dev->canch[i] = NULL; - } + + usb_kill_anchored_urbs(&dev->rx_submitted); kfree(dev); return rc; } @@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf) return; } - for (i = 0; i < GS_MAX_INTF; i++) { - struct gs_can *can = dev->canch[i]; - - if (!can) - continue; - - gs_destroy_candev(can); - } + for (i = 0; i < GS_MAX_INTF; i++) + if (dev->canch[i]) + gs_destroy_candev(dev->canch[i]); usb_kill_anchored_urbs(&dev->rx_submitted); + kfree(dev); } static const struct usb_device_id gs_usb_table[] = { diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 281451c274ca..771097f2162d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -370,6 +370,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return -1; } + /* + * Increase the pending frames counter, so that later when a reply comes + * in and the counter is decreased - we don't start getting negative + * values. + * Note that we don't need to make sure it isn't agg'd, since we're + * TXing non-sta + */ + atomic_inc(&mvm->pending_frames[sta_id]); + return 0; } diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index ad48837ead42..eed7c5a31b15 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -181,7 +181,6 @@ void core_tmr_abort_task( if (!__target_check_io_state(se_cmd, se_sess, 0)) { spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - target_put_sess_cmd(se_cmd); goto out; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 9a83f149ac85..95dfff88de11 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -873,6 +873,15 @@ struct ext4_inode_info { * by other means, so we have i_data_sem. */ struct rw_semaphore i_data_sem; + /* + * i_mmap_sem is for serializing page faults with truncate / punch hole + * operations. We have to make sure that new page cannot be faulted in + * a section of the inode that is being punched. We cannot easily use + * i_data_sem for this since we need protection for the whole punch + * operation and i_data_sem ranks below transaction start so we have + * to occasionally drop it. + */ + struct rw_semaphore i_mmap_sem; struct inode vfs_inode; struct jbd2_inode *jinode; @@ -2287,6 +2296,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, loff_t lstart, loff_t lend); extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); +extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf); extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim); @@ -2632,6 +2642,9 @@ static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize) return changed; } +int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, + loff_t len); + struct ext4_group_info { unsigned long bb_state; struct rb_root bb_free_root; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 87ba10d1d3bc..ea12f565be24 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4741,7 +4741,6 @@ static long ext4_zero_range(struct file *file, loff_t offset, int partial_begin, partial_end; loff_t start, end; ext4_lblk_t lblk; - struct address_space *mapping = inode->i_mapping; unsigned int blkbits = inode->i_blkbits; trace_ext4_zero_range(inode, offset, len, mode); @@ -4757,17 +4756,6 @@ static long ext4_zero_range(struct file *file, loff_t offset, } /* - * Write out all dirty pages to avoid race conditions - * Then release them. - */ - if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { - ret = filemap_write_and_wait_range(mapping, offset, - offset + len - 1); - if (ret) - return ret; - } - - /* * Round up offset. This is not fallocate, we neet to zero out * blocks, so convert interior block aligned part of the range to * unwritten and possibly manually zero out unaligned parts of the @@ -4810,6 +4798,10 @@ static long ext4_zero_range(struct file *file, loff_t offset, if (mode & FALLOC_FL_KEEP_SIZE) flags |= EXT4_GET_BLOCKS_KEEP_SIZE; + /* Wait all existing dio workers, newcomers will block on i_mutex */ + ext4_inode_block_unlocked_dio(inode); + inode_dio_wait(inode); + /* Preallocate the range including the unaligned edges */ if (partial_begin || partial_end) { ret = ext4_alloc_file_blocks(file, @@ -4818,7 +4810,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, round_down(offset, 1 << blkbits)) >> blkbits, new_size, flags, mode); if (ret) - goto out_mutex; + goto out_dio; } @@ -4827,16 +4819,23 @@ static long ext4_zero_range(struct file *file, loff_t offset, flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE); - /* Now release the pages and zero block aligned part of pages*/ + /* + * Prevent page faults from reinstantiating pages we have + * released from page cache. + */ + down_write(&EXT4_I(inode)->i_mmap_sem); + ret = ext4_update_disksize_before_punch(inode, offset, len); + if (ret) { + up_write(&EXT4_I(inode)->i_mmap_sem); + goto out_dio; + } + /* Now release the pages and zero block aligned part of pages */ truncate_pagecache_range(inode, start, end - 1); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); - /* Wait all existing dio workers, newcomers will block on i_mutex */ - ext4_inode_block_unlocked_dio(inode); - inode_dio_wait(inode); - ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags, mode); + up_write(&EXT4_I(inode)->i_mmap_sem); if (ret) goto out_dio; } @@ -4964,8 +4963,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) goto out; } + /* Wait all existing dio workers, newcomers will block on i_mutex */ + ext4_inode_block_unlocked_dio(inode); + inode_dio_wait(inode); + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags, mode); + ext4_inode_resume_unlocked_dio(inode); if (ret) goto out; @@ -5424,21 +5428,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) return ret; } - /* - * Need to round down offset to be aligned with page size boundary - * for page size > block size. - */ - ioffset = round_down(offset, PAGE_SIZE); - - /* Write out all dirty pages */ - ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, - LLONG_MAX); - if (ret) - return ret; - - /* Take mutex lock */ mutex_lock(&inode->i_mutex); - /* * There is no need to overlap collapse range with EOF, in which case * it is effectively a truncate operation @@ -5454,17 +5444,43 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) goto out_mutex; } - truncate_pagecache(inode, ioffset); - /* Wait for existing dio to complete */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); + /* + * Prevent page faults from reinstantiating pages we have released from + * page cache. + */ + down_write(&EXT4_I(inode)->i_mmap_sem); + /* + * Need to round down offset to be aligned with page size boundary + * for page size > block size. + */ + ioffset = round_down(offset, PAGE_SIZE); + /* + * Write tail of the last page before removed range since it will get + * removed from the page cache below. + */ + ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset); + if (ret) + goto out_mmap; + /* + * Write data that will be shifted to preserve them when discarding + * page cache below. We are also protected from pages becoming dirty + * by i_mmap_sem. + */ + ret = filemap_write_and_wait_range(inode->i_mapping, offset + len, + LLONG_MAX); + if (ret) + goto out_mmap; + truncate_pagecache(inode, ioffset); + credits = ext4_writepage_trans_blocks(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); - goto out_dio; + goto out_mmap; } down_write(&EXT4_I(inode)->i_data_sem); @@ -5503,7 +5519,8 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) out_stop: ext4_journal_stop(handle); -out_dio: +out_mmap: + up_write(&EXT4_I(inode)->i_mmap_sem); ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 0613c256c344..dd65fac5ff2f 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -213,7 +213,7 @@ static const struct vm_operations_struct ext4_dax_vm_ops = { #endif static const struct vm_operations_struct ext4_file_vm_ops = { - .fault = filemap_fault, + .fault = ext4_filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = ext4_page_mkwrite, }; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2b3a53a51582..3291e1af0e24 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3524,6 +3524,35 @@ int ext4_can_truncate(struct inode *inode) } /* + * We have to make sure i_disksize gets properly updated before we truncate + * page cache due to hole punching or zero range. Otherwise i_disksize update + * can get lost as it may have been postponed to submission of writeback but + * that will never happen after we truncate page cache. + */ +int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, + loff_t len) +{ + handle_t *handle; + loff_t size = i_size_read(inode); + + WARN_ON(!mutex_is_locked(&inode->i_mutex)); + if (offset > size || offset + len < size) + return 0; + + if (EXT4_I(inode)->i_disksize >= size) + return 0; + + handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); + if (IS_ERR(handle)) + return PTR_ERR(handle); + ext4_update_i_disksize(inode, size); + ext4_mark_inode_dirty(handle, inode); + ext4_journal_stop(handle); + + return 0; +} + +/* * ext4_punch_hole: punches a hole in a file by releaseing the blocks * associated with the given offset and length * @@ -3588,17 +3617,26 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) } + /* Wait all existing dio workers, newcomers will block on i_mutex */ + ext4_inode_block_unlocked_dio(inode); + inode_dio_wait(inode); + + /* + * Prevent page faults from reinstantiating pages we have released from + * page cache. + */ + down_write(&EXT4_I(inode)->i_mmap_sem); first_block_offset = round_up(offset, sb->s_blocksize); last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; /* Now release the pages and zero block aligned part of pages*/ - if (last_block_offset > first_block_offset) + if (last_block_offset > first_block_offset) { + ret = ext4_update_disksize_before_punch(inode, offset, length); + if (ret) + goto out_dio; truncate_pagecache_range(inode, first_block_offset, last_block_offset); - - /* Wait all existing dio workers, newcomers will block on i_mutex */ - ext4_inode_block_unlocked_dio(inode); - inode_dio_wait(inode); + } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); @@ -3645,16 +3683,12 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) if (IS_SYNC(inode)) ext4_handle_sync(handle); - /* Now release the pages again to reduce race window */ - if (last_block_offset > first_block_offset) - truncate_pagecache_range(inode, first_block_offset, - last_block_offset); - inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); out_stop: ext4_journal_stop(handle); out_dio: + up_write(&EXT4_I(inode)->i_mmap_sem); ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); @@ -4775,11 +4809,13 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) } else ext4_wait_for_tail_page_commit(inode); } + down_write(&EXT4_I(inode)->i_mmap_sem); /* * Truncate pagecache after we've waited for commit * in data=journal mode to make pages freeable. */ truncate_pagecache(inode, inode->i_size); + up_write(&EXT4_I(inode)->i_mmap_sem); } /* * We want to call ext4_truncate() even if attr->ia_size == @@ -5234,6 +5270,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) sb_start_pagefault(inode->i_sb); file_update_time(vma->vm_file); + + down_read(&EXT4_I(inode)->i_mmap_sem); /* Delalloc case is easy... */ if (test_opt(inode->i_sb, DELALLOC) && !ext4_should_journal_data(inode) && @@ -5303,6 +5341,19 @@ retry_alloc: out_ret: ret = block_page_mkwrite_return(ret); out: + up_read(&EXT4_I(inode)->i_mmap_sem); sb_end_pagefault(inode->i_sb); return ret; } + +int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct inode *inode = file_inode(vma->vm_file); + int err; + + down_read(&EXT4_I(inode)->i_mmap_sem); + err = filemap_fault(vma, vmf); + up_read(&EXT4_I(inode)->i_mmap_sem); + + return err; +} diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8a3b9f14d198..6f5ca3e92246 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -945,6 +945,7 @@ static void init_once(void *foo) INIT_LIST_HEAD(&ei->i_orphan); init_rwsem(&ei->xattr_sem); init_rwsem(&ei->i_data_sem); + init_rwsem(&ei->i_mmap_sem); inode_init_once(&ei->vfs_inode); } diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h index 011ba6670d99..c70d06a383e2 100644 --- a/fs/ext4/truncate.h +++ b/fs/ext4/truncate.h @@ -10,8 +10,10 @@ */ static inline void ext4_truncate_failed_write(struct inode *inode) { + down_write(&EXT4_I(inode)->i_mmap_sem); truncate_inode_pages(inode->i_mapping, inode->i_size); ext4_truncate(inode); + up_write(&EXT4_I(inode)->i_mmap_sem); } /* diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 1ba5c97943b8..cfbceb116356 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -845,9 +845,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n", __func__, ret); - /* Might as well let the VFS know */ - d_instantiate(new_dentry, d_inode(old_dentry)); - ihold(d_inode(old_dentry)); + /* + * We can't keep the target in dcache after that. + * For one thing, we can't afford dentry aliases for directories. + * For another, if there was a victim, we _can't_ set new inode + * for that sucker and we have to trigger mount eviction - the + * caller won't do it on its own since we are returning an error. + */ + d_invalidate(new_dentry); new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now); return ret; } diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 80021c709af9..0c2632386f35 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -633,7 +633,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, d_rehash(newdent); } else { spin_lock(&dentry->d_lock); - NCP_FINFO(inode)->flags &= ~NCPI_DIR_CACHE; + NCP_FINFO(dir)->flags &= ~NCPI_DIR_CACHE; spin_unlock(&dentry->d_lock); } } else { diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 692ceda3bc21..a2b1d7ce3e1a 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -618,7 +618,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir) * sole user of this dentry. Too tricky... Just unhash for * now. */ - d_drop(dentry); + if (!err) + d_drop(dentry); mutex_unlock(&dir->i_mutex); return err; @@ -903,6 +904,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, if (!overwrite && new_is_dir && !old_opaque && new_opaque) ovl_remove_opaque(newdentry); + /* + * Old dentry now lives in different location. Dentries in + * lowerstack are stale. We cannot drop them here because + * access to them is lockless. This could be only pure upper + * or opaque directory - numlower is zero. Or upper non-dir + * entry - its pureness is tracked by flag opaque. + */ if (old_opaque != new_opaque) { ovl_dentry_set_opaque(old, new_opaque); if (!overwrite) diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index a1b069e5e363..e505b44a9184 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -66,6 +66,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr) if (upperdentry) { mutex_lock(&upperdentry->d_inode->i_mutex); err = notify_change(upperdentry, attr, NULL); + if (!err) + ovl_copyattr(upperdentry->d_inode, dentry->d_inode); mutex_unlock(&upperdentry->d_inode->i_mutex); } else { err = ovl_copy_up_last(dentry, attr, false); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index bd6d5c1e667d..39266655d2bd 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -76,12 +76,14 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry) if (oe->__upperdentry) { type = __OVL_PATH_UPPER; - if (oe->numlower) { - if (S_ISDIR(dentry->d_inode->i_mode)) - type |= __OVL_PATH_MERGE; - } else if (!oe->opaque) { + /* + * Non-dir dentry can hold lower dentry from previous + * location. Its purity depends only on opaque flag. + */ + if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode)) + type |= __OVL_PATH_MERGE; + else if (!oe->opaque) type |= __OVL_PATH_PURE; - } } else { if (oe->numlower > 1) type |= __OVL_PATH_MERGE; diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index a6e1bca88cc6..8454fb35fcbe 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -131,9 +131,6 @@ extern void syscall_unregfunc(void); void *it_func; \ void *__data; \ \ - if (!cpu_online(raw_smp_processor_id())) \ - return; \ - \ if (!(cond)) \ return; \ prercu; \ @@ -332,15 +329,19 @@ extern void syscall_unregfunc(void); * "void *__data, proto" as the callback prototype. */ #define DECLARE_TRACE_NOARGS(name) \ - __DECLARE_TRACE(name, void, , 1, void *__data, __data) + __DECLARE_TRACE(name, void, , \ + cpu_online(raw_smp_processor_id()), \ + void *__data, __data) #define DECLARE_TRACE(name, proto, args) \ - __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \ - PARAMS(void *__data, proto), \ - PARAMS(__data, args)) + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ + cpu_online(raw_smp_processor_id()), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ - __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ + cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ PARAMS(void *__data, proto), \ PARAMS(__data, args)) diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index 8f81bbbc38fc..e0f4109e64c6 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length); /* Send a single event to user space */ void wireless_send_event(struct net_device *dev, unsigned int cmd, union iwreq_data *wrqu, const char *extra); +#ifdef CONFIG_WEXT_CORE +/* flush all previous wext events - if work is done from netdev notifiers */ +void wireless_nlevent_flush(void); +#else +static inline void wireless_nlevent_flush(void) {} +#endif /* We may need a function to send a stream of events to user space. * More on that later... */ diff --git a/kernel/events/core.c b/kernel/events/core.c index e1af58e23bee..66e6568a4736 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1562,14 +1562,14 @@ event_sched_out(struct perf_event *event, perf_pmu_disable(event->pmu); + event->tstamp_stopped = tstamp; + event->pmu->del(event, 0); + event->oncpu = -1; event->state = PERF_EVENT_STATE_INACTIVE; if (event->pending_disable) { event->pending_disable = 0; event->state = PERF_EVENT_STATE_OFF; } - event->tstamp_stopped = tstamp; - event->pmu->del(event, 0); - event->oncpu = -1; if (!is_software_event(event)) cpuctx->active_oncpu--; @@ -7641,6 +7641,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, } } + /* symmetric to unaccount_event() in _free_event() */ + account_event(event); + return event; err_per_task: @@ -8004,8 +8007,6 @@ SYSCALL_DEFINE5(perf_event_open, } } - account_event(event); - /* * Special case software events and allow them to be part of * any hardware group. @@ -8221,7 +8222,12 @@ err_context: perf_unpin_context(ctx); put_ctx(ctx); err_alloc: - free_event(event); + /* + * If event_file is set, the fput() above will have called ->release() + * and that will take care of freeing the event. + */ + if (!event_file) + free_event(event); err_cpus: put_online_cpus(); err_task: @@ -8265,8 +8271,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, /* Mark owner so we could distinguish it from user events. */ event->owner = EVENT_OWNER_KERNEL; - account_event(event); - ctx = find_get_context(event->pmu, task, event); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 5c564a68fb50..d71edcbd0c58 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -289,7 +289,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, } /* prepare A-MPDU MLME for Rx aggregation */ - tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); + tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL); if (!tid_agg_rx) goto end; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c0a9187bc3a9..cdf8609a6240 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -90,7 +90,7 @@ struct ieee80211_fragment_entry { unsigned int last_frag; unsigned int extra_len; struct sk_buff_head skb_list; - int ccmp; /* Whether fragments were encrypted with CCMP */ + bool check_sequential_pn; /* needed for CCMP/GCMP */ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ }; diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 3ece7d1034c8..b54f398cda5d 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta) * computing cur_tp */ tmp_mrs = &mi->r[idx].stats; - tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma); + tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10; tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024; return tmp_cur_tp; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 7430a1df2ab1..1ec889dc2e46 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -691,7 +691,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) if (likely(sta->ampdu_mlme.tid_tx[tid])) return; - ieee80211_start_tx_ba_session(pubsta, tid, 5000); + ieee80211_start_tx_ba_session(pubsta, tid, 0); } static void @@ -1328,7 +1328,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta) prob = mi->groups[i].rates[j].prob_ewma; /* convert tp_avg from pkt per second in kbps */ - tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024; + tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10; + tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024; return tp_avg; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5793f75c5ffd..d4b08d87537c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1725,7 +1725,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, entry->seq = seq; entry->rx_queue = rx_queue; entry->last_frag = frag; - entry->ccmp = 0; + entry->check_sequential_pn = false; entry->extra_len = 0; return entry; @@ -1821,15 +1821,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) rx->seqno_idx, &(rx->skb)); if (rx->key && (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || - rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) && + rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && ieee80211_has_protected(fc)) { int queue = rx->security_idx; - /* Store CCMP PN so that we can verify that the next - * fragment has a sequential PN value. */ - entry->ccmp = 1; + + /* Store CCMP/GCMP PN so that we can verify that the + * next fragment has a sequential PN value. + */ + entry->check_sequential_pn = true; memcpy(entry->last_pn, rx->key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN); + BUILD_BUG_ON(offsetof(struct ieee80211_key, + u.ccmp.rx_pn) != + offsetof(struct ieee80211_key, + u.gcmp.rx_pn)); + BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != + sizeof(rx->key->u.gcmp.rx_pn[queue])); + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != + IEEE80211_GCMP_PN_LEN); } return RX_QUEUED; } @@ -1844,15 +1856,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) return RX_DROP_MONITOR; } - /* Verify that MPDUs within one MSDU have sequential PN values. - * (IEEE 802.11i, 8.3.3.4.5) */ - if (entry->ccmp) { + /* "The receiver shall discard MSDUs and MMPDUs whose constituent + * MPDU PN values are not incrementing in steps of 1." + * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) + * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) + */ + if (entry->check_sequential_pn) { int i; u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; int queue; + if (!rx->key || (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && - rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256)) + rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) return RX_DROP_UNUSABLE; memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { @@ -3359,6 +3377,7 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx, return false; /* ignore action frames to TDLS-peers */ if (ieee80211_is_action(hdr->frame_control) && + !is_broadcast_ether_addr(bssid) && !ether_addr_equal(bssid, hdr->addr1)) return false; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 2a0bbd22854b..71e9b84847f3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1138,6 +1138,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, return NOTIFY_DONE; } + wireless_nlevent_flush(); + return NOTIFY_OK; } diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index c8717c1d082e..b50ee5d622e1 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -342,6 +342,40 @@ static const int compat_event_type_size[] = { /* IW event code */ +void wireless_nlevent_flush(void) +{ + struct sk_buff *skb; + struct net *net; + + ASSERT_RTNL(); + + for_each_net(net) { + while ((skb = skb_dequeue(&net->wext_nlevents))) + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, + GFP_KERNEL); + } +} +EXPORT_SYMBOL_GPL(wireless_nlevent_flush); + +static int wext_netdev_notifier_call(struct notifier_block *nb, + unsigned long state, void *ptr) +{ + /* + * When a netdev changes state in any way, flush all pending messages + * to avoid them going out in a strange order, e.g. RTM_NEWLINK after + * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() + * or similar - all of which could otherwise happen due to delays from + * schedule_work(). + */ + wireless_nlevent_flush(); + + return NOTIFY_OK; +} + +static struct notifier_block wext_netdev_notifier = { + .notifier_call = wext_netdev_notifier_call, +}; + static int __net_init wext_pernet_init(struct net *net) { skb_queue_head_init(&net->wext_nlevents); @@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = { static int __init wireless_nlevent_init(void) { - return register_pernet_subsys(&wext_pernet_ops); + int err = register_pernet_subsys(&wext_pernet_ops); + + if (err) + return err; + + return register_netdevice_notifier(&wext_netdev_notifier); } subsys_initcall(wireless_nlevent_init); @@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init); /* Process events generated by the wireless layer or the driver. */ static void wireless_nlevent_process(struct work_struct *work) { - struct sk_buff *skb; - struct net *net; - rtnl_lock(); - - for_each_net(net) { - while ((skb = skb_dequeue(&net->wext_nlevents))) - rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, - GFP_KERNEL); - } - + wireless_nlevent_flush(); rtnl_unlock(); } diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh index 198580d245e0..1659b409ef10 100755 --- a/scripts/ld-version.sh +++ b/scripts/ld-version.sh @@ -1,7 +1,7 @@ #!/usr/bin/awk -f # extract linker version number from stdin and turn into single number { - gsub(".*)", ""); + gsub(".*\\)", ""); split($1,a, "."); print a[1]*10000000 + a[2]*100000 + a[3]*10000 + a[4]*100 + a[5]; exit diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c index c799cca5abeb..6b864c0fc2b6 100644 --- a/sound/soc/codecs/wm8958-dsp2.c +++ b/sound/soc/codecs/wm8958-dsp2.c @@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); struct wm8994 *control = wm8994->wm8994; - int value = ucontrol->value.integer.value[0]; + int value = ucontrol->value.enumerated.item[0]; int reg; /* Don't allow on the fly reconfiguration */ @@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); struct wm8994 *control = wm8994->wm8994; - int value = ucontrol->value.integer.value[0]; + int value = ucontrol->value.enumerated.item[0]; int reg; /* Don't allow on the fly reconfiguration */ @@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); struct wm8994 *control = wm8994->wm8994; - int value = ucontrol->value.integer.value[0]; + int value = ucontrol->value.enumerated.item[0]; int reg; /* Don't allow on the fly reconfiguration */ @@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); struct wm8994 *control = wm8994->wm8994; - int value = ucontrol->value.integer.value[0]; + int value = ucontrol->value.enumerated.item[0]; int reg; /* Don't allow on the fly reconfiguration */ diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index a1c04dab6684..a484ca8421af 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -361,7 +361,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol, struct wm8994 *control = wm8994->wm8994; struct wm8994_pdata *pdata = &control->pdata; int drc = wm8994_get_drc(kcontrol->id.name); - int value = ucontrol->value.integer.value[0]; + int value = ucontrol->value.enumerated.item[0]; if (drc < 0) return drc; @@ -468,7 +468,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol, struct wm8994 *control = wm8994->wm8994; struct wm8994_pdata *pdata = &control->pdata; int block = wm8994_get_retune_mobile_block(kcontrol->id.name); - int value = ucontrol->value.integer.value[0]; + int value = ucontrol->value.enumerated.item[0]; if (block < 0) return block; diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index d01c2095452f..431d94397219 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -248,7 +248,7 @@ static int wm_adsp_fw_get(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; struct wm_adsp *adsp = snd_soc_codec_get_drvdata(codec); - ucontrol->value.integer.value[0] = adsp[e->shift_l].fw; + ucontrol->value.enumerated.item[0] = adsp[e->shift_l].fw; return 0; } @@ -260,16 +260,16 @@ static int wm_adsp_fw_put(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; struct wm_adsp *adsp = snd_soc_codec_get_drvdata(codec); - if (ucontrol->value.integer.value[0] == adsp[e->shift_l].fw) + if (ucontrol->value.enumerated.item[0] == adsp[e->shift_l].fw) return 0; - if (ucontrol->value.integer.value[0] >= WM_ADSP_NUM_FW) + if (ucontrol->value.enumerated.item[0] >= WM_ADSP_NUM_FW) return -EINVAL; if (adsp[e->shift_l].running) return -EBUSY; - adsp[e->shift_l].fw = ucontrol->value.integer.value[0]; + adsp[e->shift_l].fw = ucontrol->value.enumerated.item[0]; return 0; } diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index b92ab40d2be6..5e8ccb0a7028 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -480,10 +480,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai, unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off; unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off; u32 mod, mask, val = 0; + unsigned long flags; - spin_lock(i2s->lock); + spin_lock_irqsave(i2s->lock, flags); mod = readl(i2s->addr + I2SMOD); - spin_unlock(i2s->lock); + spin_unlock_irqrestore(i2s->lock, flags); switch (clk_id) { case SAMSUNG_I2S_OPCLK: @@ -574,11 +575,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai, return -EINVAL; } - spin_lock(i2s->lock); + spin_lock_irqsave(i2s->lock, flags); mod = readl(i2s->addr + I2SMOD); mod = (mod & ~mask) | val; writel(mod, i2s->addr + I2SMOD); - spin_unlock(i2s->lock); + spin_unlock_irqrestore(i2s->lock, flags); return 0; } @@ -589,6 +590,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, struct i2s_dai *i2s = to_info(dai); int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; u32 mod, tmp = 0; + unsigned long flags; lrp_shift = i2s->variant_regs->lrp_off; sdf_shift = i2s->variant_regs->sdf_off; @@ -648,7 +650,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, return -EINVAL; } - spin_lock(i2s->lock); + spin_lock_irqsave(i2s->lock, flags); mod = readl(i2s->addr + I2SMOD); /* * Don't change the I2S mode if any controller is active on this @@ -656,7 +658,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, */ if (any_active(i2s) && ((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) { - spin_unlock(i2s->lock); + spin_unlock_irqrestore(i2s->lock, flags); dev_err(&i2s->pdev->dev, "%s:%d Other DAI busy\n", __func__, __LINE__); return -EAGAIN; @@ -665,7 +667,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, mod &= ~(sdf_mask | lrp_rlow | mod_slave); mod |= tmp; writel(mod, i2s->addr + I2SMOD); - spin_unlock(i2s->lock); + spin_unlock_irqrestore(i2s->lock, flags); return 0; } @@ -675,6 +677,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, { struct i2s_dai *i2s = to_info(dai); u32 mod, mask = 0, val = 0; + unsigned long flags; if (!is_secondary(i2s)) mask |= (MOD_DC2_EN | MOD_DC1_EN); @@ -743,11 +746,11 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } - spin_lock(i2s->lock); + spin_lock_irqsave(i2s->lock, flags); mod = readl(i2s->addr + I2SMOD); mod = (mod & ~mask) | val; writel(mod, i2s->addr + I2SMOD); - spin_unlock(i2s->lock); + spin_unlock_irqrestore(i2s->lock, flags); samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture); diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index b6c12dccb259..28df6adf362b 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -3324,7 +3324,7 @@ static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol, { struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol); - ucontrol->value.integer.value[0] = w->params_select; + ucontrol->value.enumerated.item[0] = w->params_select; return 0; } @@ -3338,13 +3338,13 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol, if (w->power) return -EBUSY; - if (ucontrol->value.integer.value[0] == w->params_select) + if (ucontrol->value.enumerated.item[0] == w->params_select) return 0; - if (ucontrol->value.integer.value[0] >= w->num_params) + if (ucontrol->value.enumerated.item[0] >= w->num_params) return -EINVAL; - w->params_select = ucontrol->value.integer.value[0]; + w->params_select = ucontrol->value.enumerated.item[0]; return 0; }