diff --git a/Makefile b/Makefile index c0c41c9fac0c..d2fe757a979d 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 9 -SUBLEVEL = 9 +SUBLEVEL = 10 EXTRAVERSION = NAME = Roaring Lionus diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 91ebe382147f..5f69c3bd59bb 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, /* clear any remanants of delay slot */ if (delay_mode(regs)) { - regs->ret = regs->bta ~1U; + regs->ret = regs->bta & ~1U; regs->status32 &= ~STATUS_DE_MASK; } else { regs->ret += state.instr_len; diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi index 1ade1951e620..7aa120fbdc71 100644 --- a/arch/arm/boot/dts/imx6dl.dtsi +++ b/arch/arm/boot/dts/imx6dl.dtsi @@ -137,7 +137,7 @@ &gpio4 { gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>, <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>, - <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>, + <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>, <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>, <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>; }; diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ce131ed5939d..ae738a6319f6 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { int ret; - struct pt_regs newregs; + struct pt_regs newregs = *task_pt_regs(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 3a2e678b8d30..0122ad1a6027 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr, void __init early_abt_enable(void) { - fsr_info[22].fn = early_abort_handler; + fsr_info[FSR_FS_AEA].fn = early_abort_handler; local_abt_enable(); - fsr_info[22].fn = do_bad; + fsr_info[FSR_FS_AEA].fn = do_bad; } #ifndef CONFIG_ARM_LPAE diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index 67532f242271..afc1f84e763b 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -11,11 +11,15 @@ #define FSR_FS5_0 (0x3f) #ifdef CONFIG_ARM_LPAE +#define FSR_FS_AEA 17 + static inline int fsr_fs(unsigned int fsr) { return fsr & FSR_FS5_0; } #else +#define FSR_FS_AEA 22 + static inline int fsr_fs(unsigned int fsr) { return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 9e1499f98def..13f5fad21066 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -641,9 +641,10 @@ #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ -#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ +#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */ #define SRR1_WAKESYSERR 0x00300000 /* System error */ #define SRR1_WAKEEE 0x00200000 /* External interrupt */ +#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */ #define SRR1_WAKEMT 0x00280000 /* mtctrl */ #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index f0b238516e9b..e0b9e576905a 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; } #ifdef CONFIG_PPC_POWERNV extern int icp_opal_init(void); +extern void icp_opal_flush_interrupt(void); #else static inline int icp_opal_init(void) { return -ENODEV; } #endif diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 3493cf4e0452..71697ff70879 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { __tlbiel_pid(pid, set, ric); } - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); - return; + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } static inline void _tlbie_pid(unsigned long pid, unsigned long ric) @@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); asm volatile("ptesync": : :"memory"); - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } static inline void _tlbie_va(unsigned long va, unsigned long pid, diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index c789258ae1e1..eec0e8d0454d 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void) wmask = SRR1_WAKEMASK_P8; idle_states = pnv_get_supported_cpuidle_states(); + /* We don't want to take decrementer interrupts while we are offline, - * so clear LPCR:PECE1. We keep PECE2 enabled. + * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9) + * enabled as to let IPIs in. */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); @@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void) * contains 0. */ if (((srr1 & wmask) == SRR1_WAKEEE) || + ((srr1 & wmask) == SRR1_WAKEHVI) || (local_paca->irq_happened & PACA_IRQ_EE)) { - icp_native_flush_interrupt(); + if (cpu_has_feature(CPU_FTR_ARCH_300)) + icp_opal_flush_interrupt(); + else + icp_native_flush_interrupt(); } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); @@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void) if (srr1 && !generic_check_cpu_restart(cpu)) DBG("CPU%d Unexpected exit while offline !\n", cpu); } + + /* Re-enable decrementer interrupts */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); DBG("CPU%d coming online...\n", cpu); } diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index 60c57657c772..c96c0cb95d87 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -132,6 +132,35 @@ static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) return smp_ipi_demux(); } +/* + * Called when an interrupt is received on an off-line CPU to + * clear the interrupt, so that the CPU can go back to nap mode. + */ +void icp_opal_flush_interrupt(void) +{ + unsigned int xirr; + unsigned int vec; + + do { + xirr = icp_opal_get_xirr(); + vec = xirr & 0x00ffffff; + if (vec == XICS_IRQ_SPURIOUS) + break; + if (vec == XICS_IPI) { + /* Clear pending IPI */ + int cpu = smp_processor_id(); + kvmppc_set_host_ipi(cpu, 0); + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); + } else { + pr_err("XICS: hw interrupt 0x%x to offline cpu, " + "disabling\n", vec); + xics_mask_unknown_vec(vec); + } + + /* EOI the interrupt */ + } while (opal_int_eoi(xirr) > 0); +} + #endif /* CONFIG_SMP */ static const struct icp_ops icp_opal_ops = { diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 984a7bf17f6a..83db0eae9979 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -104,6 +104,7 @@ struct cpuinfo_x86 { __u8 x86_phys_bits; /* CPUID returned core id bits: */ __u8 x86_coreid_bits; + __u8 cu_id; /* Max extended CPUID function supported: */ __u32 extended_cpuid_level; /* Maximum supported CPUID level, -1=no CPUID: */ diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 7249f1500bcb..d1e25564b3c1 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1876,7 +1876,6 @@ static struct irq_chip ioapic_chip __read_mostly = { .irq_ack = irq_chip_ack_parent, .irq_eoi = ioapic_ack_level, .irq_set_affinity = ioapic_set_affinity, - .irq_retrigger = irq_chip_retrigger_hierarchy, .flags = IRQCHIP_SKIP_SET_WAKE, }; @@ -1888,7 +1887,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { .irq_ack = irq_chip_ack_parent, .irq_eoi = ioapic_ir_ack_level, .irq_set_affinity = ioapic_set_affinity, - .irq_retrigger = irq_chip_retrigger_hierarchy, .flags = IRQCHIP_SKIP_SET_WAKE, }; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1d3167269a67..2b4cf04239b6 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c) /* get information required for multi-node processors */ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { + u32 eax, ebx, ecx, edx; - node_id = cpuid_ecx(0x8000001e) & 7; + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); + + node_id = ecx & 0xff; + smp_num_siblings = ((ebx >> 8) & 0xff) + 1; + + if (c->x86 == 0x15) + c->cu_id = ebx & 0xff; + + if (c->x86 >= 0x17) { + c->cpu_core_id = ebx & 0xff; + + if (smp_num_siblings > 1) + c->x86_max_cores /= smp_num_siblings; + } /* * We may have multiple LLCs if L3 caches exist, so check if we diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 023c7bfa24df..4eece91ada37 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) c->x86_model_id[0] = '\0'; /* Unset */ c->x86_max_cores = 1; c->x86_coreid_bits = 0; + c->cu_id = 0xff; #ifdef CONFIG_X86_64 c->x86_clflush_size = 64; c->x86_phys_bits = 36; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index e9bbe02950ad..36171bcd91f8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -423,9 +423,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) int cpu1 = c->cpu_index, cpu2 = o->cpu_index; if (c->phys_proc_id == o->phys_proc_id && - per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && - c->cpu_core_id == o->cpu_core_id) - return topology_sane(c, o, "smt"); + per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) { + if (c->cpu_core_id == o->cpu_core_id) + return topology_sane(c, o, "smt"); + + if ((c->cu_id != 0xff) && + (o->cu_id != 0xff) && + (c->cu_id == o->cu_id)) + return topology_sane(c, o, "smt"); + } } else if (c->phys_proc_id == o->phys_proc_id && c->cpu_core_id == o->cpu_core_id) { diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index ea9c49adaa1f..8aa6bea1cd6c 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, } else note_page(m, &st, __pgprot(0), 1); + cond_resched(); start++; } diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index e9c0993b131d..e8817e2f0597 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -671,9 +671,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) unlock: list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { af_alg_free_sg(&rsgl->sgl); + list_del(&rsgl->list); if (rsgl != &ctx->first_rsgl) sock_kfree_s(sk, rsgl, sizeof(*rsgl)); - list_del(&rsgl->list); } INIT_LIST_HEAD(&ctx->list); aead_wmem_wakeup(sk); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 312c4b4dc363..6eb6733a7a5c 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); struct device *dev = acpi_desc->dev; struct acpi_nfit_flush_work flush; + int rc; /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ device_lock(dev); @@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) INIT_WORK_ONSTACK(&flush.work, flush_probe); COMPLETION_INITIALIZER_ONSTACK(flush.cmp); queue_work(nfit_wq, &flush.work); - return wait_for_completion_interruptible(&flush.cmp); + + rc = wait_for_completion_interruptible(&flush.cmp); + cancel_work_sync(&flush.work); + return rc; } static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4737520ec823..80fa656da5ab 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -820,6 +820,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); } +#define MSR_IA32_POWER_CTL_BIT_EE 19 + +/* Disable energy efficiency optimization */ +static void intel_pstate_disable_ee(int cpu) +{ + u64 power_ctl; + int ret; + + ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl); + if (ret) + return; + + if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) { + pr_info("Disabling energy efficiency optimization\n"); + power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); + wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl); + } +} + static int atom_get_min_pstate(void) { u64 value; @@ -1420,6 +1439,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { {} }; +static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { + ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params), + {} +}; + static int intel_pstate_init_cpu(unsigned int cpunum) { struct cpudata *cpu; @@ -1435,6 +1459,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu->cpu = cpunum; if (hwp_active) { + const struct x86_cpu_id *id; + + id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); + if (id) + intel_pstate_disable_ee(cpunum); + intel_pstate_hwp_enable(cpu); pid_params.sample_rate_ms = 50; pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index faf3cb3ddce2..a388bf2d67f4 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -955,7 +955,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data) static void ccp5_config(struct ccp_device *ccp) { /* Public side */ - iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); + iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); } static void ccp5other_config(struct ccp_device *ccp) diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index da5f4a678083..340aef14d616 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -238,6 +238,7 @@ struct ccp_dma_chan { struct ccp_device *ccp; spinlock_t lock; + struct list_head created; struct list_head pending; struct list_head active; struct list_head complete; diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 6553912804f7..e5d9278f4019 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan) ccp_free_desc_resources(chan->ccp, &chan->complete); ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->pending); + ccp_free_desc_resources(chan->ccp, &chan->created); spin_unlock_irqrestore(&chan->lock, flags); } @@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc) spin_lock_irqsave(&chan->lock, flags); cookie = dma_cookie_assign(tx_desc); + list_del(&desc->entry); list_add_tail(&desc->entry, &chan->pending); spin_unlock_irqrestore(&chan->lock, flags); @@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, spin_lock_irqsave(&chan->lock, sflags); - list_add_tail(&desc->entry, &chan->pending); + list_add_tail(&desc->entry, &chan->created); spin_unlock_irqrestore(&chan->lock, sflags); @@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan) /*TODO: Purge the complete list? */ ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->pending); + ccp_free_desc_resources(chan->ccp, &chan->created); spin_unlock_irqrestore(&chan->lock, flags); @@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) chan->ccp = ccp; spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->created); INIT_LIST_HEAD(&chan->pending); INIT_LIST_HEAD(&chan->active); INIT_LIST_HEAD(&chan->complete); diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index fb5f9bbfa09c..6aece3f25b08 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -51,6 +51,7 @@ static struct cxgb4_uld_info chcr_uld_info = { int assign_chcr_device(struct chcr_dev **dev) { struct uld_ctx *u_ctx; + int ret = -ENXIO; /* * Which device to use if multiple devices are available TODO @@ -58,15 +59,14 @@ int assign_chcr_device(struct chcr_dev **dev) * must go to the same device to maintain the ordering. */ mutex_lock(&dev_mutex); /* TODO ? */ - u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); - if (!u_ctx) { - mutex_unlock(&dev_mutex); - return -ENXIO; + list_for_each_entry(u_ctx, &uld_ctx_list, entry) + if (u_ctx && u_ctx->dev) { + *dev = u_ctx->dev; + ret = 0; + break; } - - *dev = u_ctx->dev; mutex_unlock(&dev_mutex); - return 0; + return ret; } static int chcr_dev_add(struct uld_ctx *u_ctx) @@ -203,10 +203,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) static int __init chcr_crypto_init(void) { - if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { + if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) pr_err("ULD register fail: No chcr crypto support in cxgb4"); - return -1; - } return 0; } diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index bc5cbc193aae..5b2d78a5b5aa 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) &hw_data->accel_capabilities_mask); /* Find and map all the device's BARS */ - i = 0; + i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, ADF_PCI_MAX_BARS * 2) { diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index e8822536530b..33f0a6251e38 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -69,6 +69,7 @@ #define ADF_ERRSOU5 (0x3A000 + 0xD8) #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C +#define ADF_DEVICE_FUSECTL_MASK 0x80000000 #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 1e480f140663..8c4fd255a601 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) unsigned int csr_val; int times = 30; - if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) + if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID) return 0; csr_val = ADF_CSR_RD(csr_addr, 0); @@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET); handle->pci_dev = pci_info->pci_dev; - if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) { + if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) { sram_bar = &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; handle->hal_sram_addr_v = sram_bar->virt_addr; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index e6862a744210..4e19bde4bbff 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1759,16 +1759,16 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { /* - * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive, - * if they weren't, this code should be called on success - * for TEST_ONLY too. + * Free the allocated event. drm_atomic_helper_setup_commit + * can allocate an event too, so only free it if it's ours + * to prevent a double free in drm_atomic_state_clear. */ - for_each_crtc_in_state(state, crtc, crtc_state, i) { - if (!crtc_state->event) - continue; - - drm_event_cancel_free(dev, &crtc_state->event->base); + struct drm_pending_vblank_event *event = crtc_state->event; + if (event && (event->base.fence || event->base.file_priv)) { + drm_event_cancel_free(dev, &event->base); + crtc_state->event = NULL; + } } } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a218c2e395e7..0c400f852a76 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1215,14 +1215,14 @@ validate_exec_list(struct drm_device *dev, if (exec[i].offset != gen8_canonical_addr(exec[i].offset & PAGE_MASK)) return -EINVAL; - - /* From drm_mm perspective address space is continuous, - * so from this point we're always using non-canonical - * form internally. - */ - exec[i].offset = gen8_noncanonical_addr(exec[i].offset); } + /* From drm_mm perspective address space is continuous, + * so from this point we're always using non-canonical + * form internally. + */ + exec[i].offset = gen8_noncanonical_addr(exec[i].offset); + if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8079e5b380cb..b9be8a6141d8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4280,10 +4280,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) drm_crtc_vblank_put(&intel_crtc->base); wake_up_all(&dev_priv->pending_flip_queue); - queue_work(dev_priv->wq, &work->unpin_work); - trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); + + queue_work(dev_priv->wq, &work->unpin_work); } static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 1c59ca50c430..cae27c55dd99 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -1723,7 +1723,8 @@ bxt_get_dpll(struct intel_crtc *crtc, return NULL; if ((encoder->type == INTEL_OUTPUT_DP || - encoder->type == INTEL_OUTPUT_EDP) && + encoder->type == INTEL_OUTPUT_EDP || + encoder->type == INTEL_OUTPUT_DP_MST) && !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) return NULL; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 16f91c8490fe..5fb4c6d9209b 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -39,7 +39,7 @@ * vmbus_setevent- Trigger an event notification on the specified * channel. */ -static void vmbus_setevent(struct vmbus_channel *channel) +void vmbus_setevent(struct vmbus_channel *channel) { struct hv_monitor_page *monitorpage; @@ -65,6 +65,7 @@ static void vmbus_setevent(struct vmbus_channel *channel) vmbus_set_event(channel); } } +EXPORT_SYMBOL_GPL(vmbus_setevent); /* * vmbus_open - Open the specified channel. @@ -635,8 +636,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); struct kvec bufferlist[3]; u64 aligned_data = 0; - int ret; - bool signal = false; bool lock = channel->acquire_ring_lock; int num_vecs = ((bufferlen != 0) ? 3 : 1); @@ -656,33 +655,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, - &signal, lock, channel->signal_policy); - - /* - * Signalling the host is conditional on many factors: - * 1. The ring state changed from being empty to non-empty. - * This is tracked by the variable "signal". - * 2. The variable kick_q tracks if more data will be placed - * on the ring. We will not signal if more data is - * to be placed. - * - * Based on the channel signal state, we will decide - * which signaling policy will be applied. - * - * If we cannot write to the ring-buffer; signal the host - * even if we may not have written anything. This is a rare - * enough condition that it should not matter. - * NOTE: in this case, the hvsock channel is an exception, because - * it looks the host side's hvsock implementation has a throttling - * mechanism which can hurt the performance otherwise. - */ - - if (((ret == 0) && kick_q && signal) || - (ret && !is_hvsock_channel(channel))) - vmbus_setevent(channel); + return hv_ringbuffer_write(channel, bufferlist, num_vecs, + lock, kick_q); - return ret; } EXPORT_SYMBOL(vmbus_sendpacket_ctl); @@ -723,7 +698,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, u32 flags, bool kick_q) { - int ret; int i; struct vmbus_channel_packet_page_buffer desc; u32 descsize; @@ -731,7 +705,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, u32 packetlen_aligned; struct kvec bufferlist[3]; u64 aligned_data = 0; - bool signal = false; bool lock = channel->acquire_ring_lock; if (pagecount > MAX_PAGE_BUFFER_COUNT) @@ -769,29 +742,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, - &signal, lock, channel->signal_policy); - - /* - * Signalling the host is conditional on many factors: - * 1. The ring state changed from being empty to non-empty. - * This is tracked by the variable "signal". - * 2. The variable kick_q tracks if more data will be placed - * on the ring. We will not signal if more data is - * to be placed. - * - * Based on the channel signal state, we will decide - * which signaling policy will be applied. - * - * If we cannot write to the ring-buffer; signal the host - * even if we may not have written anything. This is a rare - * enough condition that it should not matter. - */ - - if (((ret == 0) && kick_q && signal) || (ret)) - vmbus_setevent(channel); - - return ret; + return hv_ringbuffer_write(channel, bufferlist, 3, + lock, kick_q); } EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); @@ -822,12 +774,10 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, u32 desc_size, void *buffer, u32 bufferlen, u64 requestid) { - int ret; u32 packetlen; u32 packetlen_aligned; struct kvec bufferlist[3]; u64 aligned_data = 0; - bool signal = false; bool lock = channel->acquire_ring_lock; packetlen = desc_size + bufferlen; @@ -848,13 +798,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, - &signal, lock, channel->signal_policy); - - if (ret == 0 && signal) - vmbus_setevent(channel); - - return ret; + return hv_ringbuffer_write(channel, bufferlist, 3, + lock, true); } EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); @@ -866,14 +811,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, struct hv_multipage_buffer *multi_pagebuffer, void *buffer, u32 bufferlen, u64 requestid) { - int ret; struct vmbus_channel_packet_multipage_buffer desc; u32 descsize; u32 packetlen; u32 packetlen_aligned; struct kvec bufferlist[3]; u64 aligned_data = 0; - bool signal = false; bool lock = channel->acquire_ring_lock; u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, multi_pagebuffer->len); @@ -913,13 +856,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, - &signal, lock, channel->signal_policy); - - if (ret == 0 && signal) - vmbus_setevent(channel); - - return ret; + return hv_ringbuffer_write(channel, bufferlist, 3, + lock, true); } EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); @@ -941,16 +879,9 @@ __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, u32 bufferlen, u32 *buffer_actual_len, u64 *requestid, bool raw) { - int ret; - bool signal = false; + return hv_ringbuffer_read(channel, buffer, bufferlen, + buffer_actual_len, requestid, raw); - ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen, - buffer_actual_len, requestid, &signal, raw); - - if (signal) - vmbus_setevent(channel); - - return ret; } int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 1bc1d4795243..caf341842464 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -449,8 +449,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) } dev_type = hv_get_dev_type(newchannel); - if (dev_type == HV_NIC) - set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT); init_vp_index(newchannel, dev_type); diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index a5b4442433c8..2b13f2a0a71e 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -527,14 +527,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); -int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, +int hv_ringbuffer_write(struct vmbus_channel *channel, struct kvec *kv_list, - u32 kv_count, bool *signal, bool lock, - enum hv_signal_policy policy); + u32 kv_count, bool lock, + bool kick_q); -int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, +int hv_ringbuffer_read(struct vmbus_channel *channel, void *buffer, u32 buflen, u32 *buffer_actual_len, - u64 *requestid, bool *signal, bool raw); + u64 *requestid, bool raw); void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, struct hv_ring_buffer_debug_info *debug_info); diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 08043da1a61c..308dbda700eb 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -66,21 +66,25 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi) * once the ring buffer is empty, it will clear the * interrupt_mask and re-check to see if new data has * arrived. + * + * KYS: Oct. 30, 2016: + * It looks like Windows hosts have logic to deal with DOS attacks that + * can be triggered if it receives interrupts when it is not expecting + * the interrupt. The host expects interrupts only when the ring + * transitions from empty to non-empty (or full to non full on the guest + * to host ring). + * So, base the signaling decision solely on the ring state until the + * host logic is fixed. */ -static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi, - enum hv_signal_policy policy) +static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel, + bool kick_q) { + struct hv_ring_buffer_info *rbi = &channel->outbound; + virt_mb(); if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) - return false; - - /* - * When the client wants to control signaling, - * we only honour the host interrupt mask. - */ - if (policy == HV_SIGNAL_POLICY_EXPLICIT) - return true; + return; /* check interrupt_mask before read_index */ virt_rmb(); @@ -89,9 +93,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi, * ring transitions from being empty to non-empty. */ if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) - return true; + vmbus_setevent(channel); - return false; + return; } /* Get the next write location for the specified ring buffer. */ @@ -280,9 +284,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) } /* Write to the ring buffer. */ -int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, - struct kvec *kv_list, u32 kv_count, bool *signal, bool lock, - enum hv_signal_policy policy) +int hv_ringbuffer_write(struct vmbus_channel *channel, + struct kvec *kv_list, u32 kv_count, bool lock, + bool kick_q) { int i = 0; u32 bytes_avail_towrite; @@ -292,6 +296,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, u32 old_write; u64 prev_indices = 0; unsigned long flags = 0; + struct hv_ring_buffer_info *outring_info = &channel->outbound; for (i = 0; i < kv_count; i++) totalbytes_towrite += kv_list[i].iov_len; @@ -344,13 +349,13 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, if (lock) spin_unlock_irqrestore(&outring_info->ring_lock, flags); - *signal = hv_need_to_signal(old_write, outring_info, policy); + hv_signal_on_write(old_write, channel, kick_q); return 0; } -int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, +int hv_ringbuffer_read(struct vmbus_channel *channel, void *buffer, u32 buflen, u32 *buffer_actual_len, - u64 *requestid, bool *signal, bool raw) + u64 *requestid, bool raw) { u32 bytes_avail_toread; u32 next_read_location = 0; @@ -359,6 +364,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, u32 offset; u32 packetlen; int ret = 0; + struct hv_ring_buffer_info *inring_info = &channel->inbound; if (buflen <= 0) return -EINVAL; @@ -377,6 +383,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, return ret; } + init_cached_read_index(channel); next_read_location = hv_get_next_read_location(inring_info); next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, sizeof(desc), @@ -416,7 +423,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, /* Update the read index */ hv_set_next_read_location(inring_info, next_read_location); - *signal = hv_need_to_signal_on_read(inring_info); + hv_signal_on_read(channel); return ret; } diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 1869152f1d23..9b732c5f89e1 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) case RXE_MEM_TYPE_MR: case RXE_MEM_TYPE_FMR: - return ((iova < mem->iova) || - ((iova + length) > (mem->iova + mem->length))) ? - -EFAULT : 0; + if (iova < mem->iova || + length > mem->length || + iova > mem->iova + mem->length - length) + return -EFAULT; + return 0; default: return -EFAULT; diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index dd3d88adc003..ccf624763565 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -472,7 +472,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp, goto err2; } - resid = mtu; + qp->resp.resid = mtu; } else { if (pktlen != resid) { state = RESPST_ERR_LENGTH; diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 92595b98e7ed..022be0e22eba 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -263,13 +263,21 @@ static int uinput_create_device(struct uinput_device *udev) return -EINVAL; } - if (test_bit(ABS_MT_SLOT, dev->absbit)) { - nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; - error = input_mt_init_slots(dev, nslot, 0); - if (error) + if (test_bit(EV_ABS, dev->evbit)) { + input_alloc_absinfo(dev); + if (!dev->absinfo) { + error = -EINVAL; goto fail1; - } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { - input_set_events_per_packet(dev, 60); + } + + if (test_bit(ABS_MT_SLOT, dev->absbit)) { + nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; + error = input_mt_init_slots(dev, nslot, 0); + if (error) + goto fail1; + } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { + input_set_events_per_packet(dev, 60); + } } if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) { diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 31a89c8832c0..2c965424d383 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -804,6 +804,10 @@ static void dm_old_request_fn(struct request_queue *q) int srcu_idx; struct dm_table *map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + dm_put_live_table(md, srcu_idx); + return; + } ti = dm_table_find_target(map, pos); dm_put_live_table(md, srcu_idx); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 878950a42e6c..2cf8b1d82d6a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -1007,9 +1007,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(base); - - writel(value, reg_addr + reg); + writel(value, base + reg); } #define dsaf_write_dev(a, reg, value) \ @@ -1017,9 +1015,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) { - u8 __iomem *reg_addr = ACCESS_ONCE(base); - - return readl(reg_addr + reg); + return readl(base + reg); } static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 27ff401cec20..51c6a57ca873 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -991,6 +991,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, { struct mlx5e_priv *priv = netdev_priv(dev); int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + bool hash_changed = false; void *in; if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && @@ -1012,14 +1013,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); } - if (key) + if (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != priv->params.rss_hfunc) { + priv->params.rss_hfunc = hfunc; + hash_changed = true; + } + + if (key) { memcpy(priv->params.toeplitz_hash_key, key, sizeof(priv->params.toeplitz_hash_key)); + hash_changed = hash_changed || + priv->params.rss_hfunc == ETH_RSS_HASH_TOP; + } - if (hfunc != ETH_RSS_HASH_NO_CHANGE) - priv->params.rss_hfunc = hfunc; - - mlx5e_modify_tirs_hash(priv, in, inlen); + if (hash_changed) + mlx5e_modify_tirs_hash(priv, in, inlen); mutex_unlock(&priv->state_lock); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 720b5fa9e625..c2ac39a940f7 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1288,6 +1288,9 @@ void netvsc_channel_cb(void *context) ndev = hv_get_drvdata(device); buffer = get_per_channel_state(channel); + /* commit_rd_index() -> hv_signal_on_read() needs this. */ + init_cached_read_index(channel); + do { desc = get_next_pkt_raw(channel); if (desc != NULL) { @@ -1340,6 +1343,9 @@ void netvsc_channel_cb(void *context) bufferlen = bytes_recvd; } + + init_cached_read_index(channel); + } while (1); if (bufferlen > NETVSC_PACKET_SIZE) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index 8b6e37ce3f66..20bfb373dcd6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -96,7 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - char *fw_name = "rtlwifi/rtl8192cfwU.bin"; + char *fw_name; rtl8192ce_bt_reg_init(hw); @@ -168,8 +168,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) } /* request fw */ - if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) + if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && + !IS_92C_SERIAL(rtlhal->version)) + fw_name = "rtlwifi/rtl8192cfwU.bin"; + else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) fw_name = "rtlwifi/rtl8192cfwU_B.bin"; + else + fw_name = "rtlwifi/rtl8192cfw.bin"; rtlpriv->max_fw_size = 0x4000; pr_info("Using firmware %s\n", fw_name); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index bf2744e1e3db..0cdcb2169083 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1397,6 +1397,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) for (i = 0; i < num_queues && info->queues; ++i) { struct netfront_queue *queue = &info->queues[i]; + del_timer_sync(&queue->rx_refill_timer); + if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) unbind_from_irqhandler(queue->tx_irq, queue); if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { @@ -1751,7 +1753,6 @@ static void xennet_destroy_queues(struct netfront_info *info) if (netif_running(info->netdev)) napi_disable(&queue->napi); - del_timer_sync(&queue->rx_refill_timer); netif_napi_del(&queue->napi); } diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 1480734c2d6e..aefca644219b 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -962,8 +962,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) struct nvdimm_drvdata *ndd; struct nd_label_id label_id; u32 flags = 0, remainder; + int rc, i, id = -1; u8 *uuid = NULL; - int rc, i; if (dev->driver || ndns->claim) return -EBUSY; @@ -972,11 +972,13 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); uuid = nspm->uuid; + id = nspm->id; } else if (is_namespace_blk(dev)) { struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); uuid = nsblk->uuid; flags = NSLABEL_FLAG_LOCAL; + id = nsblk->id; } /* @@ -1039,10 +1041,11 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) /* * Try to delete the namespace if we deleted all of its - * allocation, this is not the seed device for the region, and - * it is not actively claimed by a btt instance. + * allocation, this is not the seed or 0th device for the + * region, and it is not actively claimed by a btt, pfn, or dax + * instance. */ - if (val == 0 && nd_region->ns_seed != dev && !ndns->claim) + if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim) nd_device_unregister(dev, ND_ASYNC); return rc; diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index a2ac9e641aa9..6c033c9a2f06 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) size = resource_size(&nsio->res); npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K; if (nd_pfn->mode == PFN_MODE_PMEM) { - unsigned long memmap_size; - /* * vmemmap_populate_hugepages() allocates the memmap array in * HPAGE_SIZE chunks. */ - memmap_size = ALIGN(64 * npfns, HPAGE_SIZE); - offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve, - nd_pfn->align) - start; + offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve, + max(nd_pfn->align, HPAGE_SIZE)) - start; } else if (nd_pfn->mode == PFN_MODE_RAM) offset = ALIGN(start + SZ_8K + dax_label_reserve, nd_pfn->align) - start; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 75f820ca17b7..27ff38f839fc 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1583,7 +1583,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; - struct zfcp_fsf_req *req = NULL; + struct zfcp_fsf_req *req; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) zfcp_fsf_req_free(req); out: spin_unlock_irq(&qdio->req_q_lock); - if (req && !IS_ERR(req)) + if (!retval) zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); return retval; } @@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; - struct zfcp_fsf_req *req = NULL; + struct zfcp_fsf_req *req; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) zfcp_fsf_req_free(req); out: spin_unlock_irq(&qdio->req_q_lock); - if (req && !IS_ERR(req)) + if (!retval) zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); return retval; } diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 341ea327ae79..792d3e7e35e2 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -50,9 +50,13 @@ struct aac_common aac_config = { static inline int aac_is_msix_mode(struct aac_dev *dev) { - u32 status; + u32 status = 0; - status = src_readl(dev, MUnit.OMR); + if (dev->pdev->device == PMC_DEVICE_S6 || + dev->pdev->device == PMC_DEVICE_S7 || + dev->pdev->device == PMC_DEVICE_S8) { + status = src_readl(dev, MUnit.OMR); + } return (status & AAC_INT_MODE_MSIX); } diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index e3b911c895b4..91dfd58b175d 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -3929,6 +3929,7 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = { static const struct target_core_fabric_ops ibmvscsis_ops = { .module = THIS_MODULE, .name = "ibmvscsis", + .max_data_sg_nents = MAX_TXU / PAGE_SIZE, .get_fabric_name = ibmvscsis_get_fabric_name, .tpg_get_wwn = ibmvscsis_get_fabric_wwn, .tpg_get_tag = ibmvscsis_get_tag, diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index f84a6087cebd..8a7941b8189f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -8706,6 +8707,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) switch (hba_mpi_version) { case MPI2_VERSION: + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); /* Use mpt2sas driver host template for SAS 2.0 HBA's */ shost = scsi_host_alloc(&mpt2sas_driver_template, sizeof(struct MPT3SAS_ADAPTER)); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 078d797cb492..bea819e5336d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1459,7 +1459,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) /* Don't abort commands in adapter during EEH * recovery as it's not accessible/responding. */ - if (!ha->flags.eeh_busy) { + if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { /* Get a reference to the sp and drop the lock. * The reference ensures this sp->done() call * - and not the call in qla2xxx_eh_abort() - diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 6b423485c5d6..ea9617c7b403 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -351,7 +351,15 @@ int core_enable_device_list_for_node( kfree(new); return -EINVAL; } - BUG_ON(orig->se_lun_acl != NULL); + if (orig->se_lun_acl != NULL) { + pr_warn_ratelimited("Detected existing explicit" + " se_lun_acl->se_lun_group reference for %s" + " mapped_lun: %llu, failing\n", + nacl->initiatorname, mapped_lun); + mutex_unlock(&nacl->lun_entry_mutex); + kfree(new); + return -EINVAL; + } rcu_assign_pointer(new->se_lun, lun); rcu_assign_pointer(new->se_lun_acl, lun_acl); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 04f616b3ba0a..aabd6602da6c 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -450,6 +450,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, int *post_ret) { struct se_device *dev = cmd->se_dev; + sense_reason_t ret = TCM_NO_SENSE; /* * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through @@ -457,9 +458,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, * sent to the backend driver. */ spin_lock_irq(&cmd->t_state_lock); - if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { + if (cmd->transport_state & CMD_T_SENT) { cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; *post_ret = 1; + + if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } spin_unlock_irq(&cmd->t_state_lock); @@ -469,7 +473,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, */ up(&dev->caw_sem); - return TCM_NO_SENSE; + return ret; } static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7dfefd66df93..767d1eb6e035 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref) { struct se_node_acl *nacl = container_of(kref, struct se_node_acl, acl_kref); + struct se_portal_group *se_tpg = nacl->se_tpg; - complete(&nacl->acl_free_comp); + if (!nacl->dynamic_stop) { + complete(&nacl->acl_free_comp); + return; + } + + mutex_lock(&se_tpg->acl_node_mutex); + list_del(&nacl->acl_list); + mutex_unlock(&se_tpg->acl_node_mutex); + + core_tpg_wait_for_nacl_pr_ref(nacl); + core_free_device_list_for_node(nacl, se_tpg); + kfree(nacl); } void target_put_nacl(struct se_node_acl *nacl) @@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs); void transport_free_session(struct se_session *se_sess) { struct se_node_acl *se_nacl = se_sess->se_node_acl; + /* * Drop the se_node_acl->nacl_kref obtained from within * core_tpg_get_initiator_node_acl(). */ if (se_nacl) { + struct se_portal_group *se_tpg = se_nacl->se_tpg; + const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; + unsigned long flags; + se_sess->se_node_acl = NULL; + + /* + * Also determine if we need to drop the extra ->cmd_kref if + * it had been previously dynamically generated, and + * the endpoint is not caching dynamic ACLs. + */ + mutex_lock(&se_tpg->acl_node_mutex); + if (se_nacl->dynamic_node_acl && + !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); + if (list_empty(&se_nacl->acl_sess_list)) + se_nacl->dynamic_stop = true; + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); + + if (se_nacl->dynamic_stop) + list_del(&se_nacl->acl_list); + } + mutex_unlock(&se_tpg->acl_node_mutex); + + if (se_nacl->dynamic_stop) + target_put_nacl(se_nacl); + target_put_nacl(se_nacl); } if (se_sess->sess_cmd_map) { @@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session); void transport_deregister_session(struct se_session *se_sess) { struct se_portal_group *se_tpg = se_sess->se_tpg; - const struct target_core_fabric_ops *se_tfo; - struct se_node_acl *se_nacl; unsigned long flags; - bool drop_nacl = false; if (!se_tpg) { transport_free_session(se_sess); return; } - se_tfo = se_tpg->se_tpg_tfo; spin_lock_irqsave(&se_tpg->session_lock, flags); list_del(&se_sess->sess_list); @@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess) se_sess->fabric_sess_ptr = NULL; spin_unlock_irqrestore(&se_tpg->session_lock, flags); - /* - * Determine if we need to do extra work for this initiator node's - * struct se_node_acl if it had been previously dynamically generated. - */ - se_nacl = se_sess->se_node_acl; - - mutex_lock(&se_tpg->acl_node_mutex); - if (se_nacl && se_nacl->dynamic_node_acl) { - if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { - list_del(&se_nacl->acl_list); - drop_nacl = true; - } - } - mutex_unlock(&se_tpg->acl_node_mutex); - - if (drop_nacl) { - core_tpg_wait_for_nacl_pr_ref(se_nacl); - core_free_device_list_for_node(se_nacl, se_tpg); - se_sess->se_node_acl = NULL; - kfree(se_nacl); - } pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", se_tpg->se_tpg_tfo->get_fabric_name()); /* * If last kref is dropping now for an explicit NodeACL, awake sleeping * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * removal context from within transport_free_session() code. + * + * For dynamic ACL, target_put_nacl() uses target_complete_nacl() + * to release all remaining generate_node_acl=1 created ACL resources. */ transport_free_session(se_sess); @@ -3086,7 +3103,6 @@ static void target_tmr_work(struct work_struct *work) spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto check_stop; } - cmd->t_state = TRANSPORT_ISTATE_PROCESSING; spin_unlock_irqrestore(&cmd->t_state_lock, flags); cmd->se_tfo->queue_tm_rsp(cmd); @@ -3099,11 +3115,25 @@ int transport_generic_handle_tmr( struct se_cmd *cmd) { unsigned long flags; + bool aborted = false; spin_lock_irqsave(&cmd->t_state_lock, flags); - cmd->transport_state |= CMD_T_ACTIVE; + if (cmd->transport_state & CMD_T_ABORTED) { + aborted = true; + } else { + cmd->t_state = TRANSPORT_ISTATE_PROCESSING; + cmd->transport_state |= CMD_T_ACTIVE; + } spin_unlock_irqrestore(&cmd->t_state_lock, flags); + if (aborted) { + pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" + "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, + cmd->se_tmr_req->ref_task_tag, cmd->tag); + transport_cmd_check_stop_to_fabric(cmd); + return 0; + } + INIT_WORK(&cmd->work, target_tmr_work); queue_work(cmd->se_dev->tmr_wq, &cmd->work); return 0; diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 094a1440eacb..18848ba8d2ba 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -836,7 +836,7 @@ static void target_xcopy_do_work(struct work_struct *work) " CHECK_CONDITION -> sending response\n", rc); ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; } - target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); + target_complete_cmd(ec_cmd, ec_cmd->scsi_status); } sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7acbd2cf6192..1782804f6c26 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -5648,6 +5648,10 @@ long btrfs_ioctl(struct file *file, unsigned int #ifdef CONFIG_COMPAT long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + /* + * These all access 32-bit values anyway so no further + * handling is necessary. + */ switch (cmd) { case FS_IOC32_GETFLAGS: cmd = FS_IOC_GETFLAGS; @@ -5658,8 +5662,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; - default: - return -ENOIOCTLCMD; } return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index da7fbf1cdd56..fa3b155ce7e1 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp, static inline int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp) { - return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); } /** @@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp) { return bitmap_parselist_user(buf, len, cpumask_bits(dstp), - nr_cpu_ids); + nr_cpumask_bits); } /** @@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) char *nl = strchr(buf, '\n'); unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); - return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); + return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); } /** @@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) */ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) { - return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids); + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); } /** diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index cd184bdca58f..c92a083bcf16 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -128,6 +128,7 @@ struct hv_ring_buffer_info { u32 ring_data_startoffset; u32 priv_write_index; u32 priv_read_index; + u32 cached_read_index; }; /* @@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) return write; } +static inline u32 hv_get_cached_bytes_to_write( + const struct hv_ring_buffer_info *rbi) +{ + u32 read_loc, write_loc, dsize, write; + + dsize = rbi->ring_datasize; + read_loc = rbi->cached_read_index; + write_loc = rbi->ring_buffer->write_index; + + write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : + read_loc - write_loc; + return write; +} /* * VMBUS version is 32 bit entity broken up into * two 16 bit quantities: major_number. minor_number. @@ -1447,6 +1461,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel); void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); +void vmbus_setevent(struct vmbus_channel *channel); /* * Negotiated version with the Host. */ @@ -1479,10 +1494,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) * there is room for the producer to send the pending packet. */ -static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) +static inline void hv_signal_on_read(struct vmbus_channel *channel) { - u32 cur_write_sz; + u32 cur_write_sz, cached_write_sz; u32 pending_sz; + struct hv_ring_buffer_info *rbi = &channel->inbound; /* * Issue a full memory barrier before making the signaling decision. @@ -1500,14 +1516,26 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); /* If the other end is not blocked on write don't bother. */ if (pending_sz == 0) - return false; + return; cur_write_sz = hv_get_bytes_to_write(rbi); - if (cur_write_sz >= pending_sz) - return true; + if (cur_write_sz < pending_sz) + return; + + cached_write_sz = hv_get_cached_bytes_to_write(rbi); + if (cached_write_sz < pending_sz) + vmbus_setevent(channel); + + return; +} + +static inline void +init_cached_read_index(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *rbi = &channel->inbound; - return false; + rbi->cached_read_index = rbi->ring_buffer->read_index; } /* @@ -1571,6 +1599,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel, * This call commits the read index and potentially signals the host. * Here is the pattern for using the "in-place" consumption APIs: * + * init_cached_read_index(); + * * while (get_next_pkt_raw() { * process the packet "in-place"; * put_pkt_raw(); @@ -1589,8 +1619,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel) virt_rmb(); ring_info->ring_buffer->read_index = ring_info->priv_read_index; - if (hv_need_to_signal_on_read(ring_info)) - vmbus_set_event(channel); + hv_signal_on_read(channel); } diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index c2119008990a..48bc1ac1da43 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -538,6 +538,7 @@ struct se_node_acl { char initiatorname[TRANSPORT_IQN_LEN]; /* Used to signal demo mode created ACL, disabled by default */ bool dynamic_node_acl; + bool dynamic_stop; u32 queue_depth; u32 acl_index; enum target_prot_type saved_prot_type; diff --git a/kernel/events/core.c b/kernel/events/core.c index b1cfd7416db0..4b3323151a2f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3461,14 +3461,15 @@ struct perf_read_data { int ret; }; -static int find_cpu_to_read(struct perf_event *event, int local_cpu) +static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) { - int event_cpu = event->oncpu; u16 local_pkg, event_pkg; if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { - event_pkg = topology_physical_package_id(event_cpu); - local_pkg = topology_physical_package_id(local_cpu); + int local_cpu = smp_processor_id(); + + event_pkg = topology_physical_package_id(event_cpu); + local_pkg = topology_physical_package_id(local_cpu); if (event_pkg == local_pkg) return local_cpu; @@ -3598,7 +3599,7 @@ u64 perf_event_read_local(struct perf_event *event) static int perf_event_read(struct perf_event *event, bool group) { - int ret = 0, cpu_to_read, local_cpu; + int event_cpu, ret = 0; /* * If event is enabled and currently active on a CPU, update the @@ -3611,21 +3612,25 @@ static int perf_event_read(struct perf_event *event, bool group) .ret = 0, }; - local_cpu = get_cpu(); - cpu_to_read = find_cpu_to_read(event, local_cpu); - put_cpu(); + event_cpu = READ_ONCE(event->oncpu); + if ((unsigned)event_cpu >= nr_cpu_ids) + return 0; + + preempt_disable(); + event_cpu = __perf_event_read_cpu(event, event_cpu); /* * Purposely ignore the smp_call_function_single() return * value. * - * If event->oncpu isn't a valid CPU it means the event got + * If event_cpu isn't a valid CPU it means the event got * scheduled out and that will have updated the event count. * * Therefore, either way, we'll have an up-to-date event count * after this. */ - (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); + (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); + preempt_enable(); ret = data.ret; } else if (event->state == PERF_EVENT_STATE_INACTIVE) { struct perf_event_context *ctx = event->ctx; diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index b6e4c16377c7..9c15a9124e83 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c @@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces) if (WARN_ON(!trace->entries)) return; - for (i = 0; i < trace->nr_entries; i++) { - printk("%*c", 1 + spaces, ' '); - print_ip_sym(trace->entries[i]); - } + for (i = 0; i < trace->nr_entries; i++) + printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]); } EXPORT_SYMBOL_GPL(print_stack_trace); @@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size, struct stack_trace *trace, int spaces) { int i; - unsigned long ip; int generated; int total = 0; @@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size, return 0; for (i = 0; i < trace->nr_entries; i++) { - ip = trace->entries[i]; - generated = snprintf(buf, size, "%*c[<%p>] %pS\n", - 1 + spaces, ' ', (void *) ip, (void *) ip); + generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', + (void *)trace->entries[i]); total += generated; diff --git a/mm/slub.c b/mm/slub.c index 2b3e740609e9..7aa0e97af928 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1419,6 +1419,10 @@ static int init_cache_random_seq(struct kmem_cache *s) int err; unsigned long i, count = oo_objects(s->oo); + /* Bailout if already initialised */ + if (s->random_seq) + return 0; + err = cache_random_seq_create(s, count, GFP_KERNEL); if (err) { pr_err("SLUB: Unable to initialize free list for %s\n", diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 42120d965263..50e1b7f78bd4 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -339,7 +339,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, /* fast-forward to vendor IEs */ offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); - if (offset) { + if (offset < ifmsh->ie_len) { len = ifmsh->ie_len - offset; data = ifmsh->ie + offset; if (skb_tailroom(skb) < len) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 1b3c18c2c1ec..cd7a419faa21 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5874,6 +5874,7 @@ do { \ break; } cfg->ht_opmode = ht_opmode; + mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); } FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 1, 65535, mask, diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 09fd6108e421..c2da45ae5b2a 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -5858,7 +5858,7 @@ static int selinux_setprocattr(struct task_struct *p, return error; /* Obtain a SID for the context, if one was specified. */ - if (size && str[1] && str[1] != '\n') { + if (size && str[0] && str[0] != '\n') { if (str[size-1] == '\n') { str[size-1] = 0; size--; diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index c850345c43b5..dfa5156f3585 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool) { unsigned long flags; struct snd_seq_event_cell *ptr; - int max_count = 5 * HZ; if (snd_BUG_ON(!pool)) return -EINVAL; @@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool) if (waitqueue_active(&pool->output_sleep)) wake_up(&pool->output_sleep); - while (atomic_read(&pool->counter) > 0) { - if (max_count == 0) { - pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); - break; - } + while (atomic_read(&pool->counter) > 0) schedule_timeout_uninterruptible(1); - max_count--; - } /* release all resources */ spin_lock_irqsave(&pool->lock, flags); diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index 0bec02e89d51..450c5187eecb 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void) } } +static void queue_use(struct snd_seq_queue *queue, int client, int use); + /* allocate a new queue - * return queue index value or negative value for error */ @@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) if (q == NULL) return -ENOMEM; q->info_flags = info_flags; + queue_use(q, client, 1); if (queue_list_add(q) < 0) { queue_delete(q); return -ENOMEM; } - snd_seq_queue_use(q->queue, client, 1); /* use this queue */ return q->queue; } @@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client, return result; } - -/* use or unuse this queue - - * if it is the first client, starts the timer. - * if it is not longer used by any clients, stop the timer. - */ -int snd_seq_queue_use(int queueid, int client, int use) +/* use or unuse this queue */ +static void queue_use(struct snd_seq_queue *queue, int client, int use) { - struct snd_seq_queue *queue; - - queue = queueptr(queueid); - if (queue == NULL) - return -EINVAL; - mutex_lock(&queue->timer_mutex); if (use) { if (!test_and_set_bit(client, queue->clients_bitmap)) queue->clients++; @@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use) } else { snd_seq_timer_close(queue); } +} + +/* use or unuse this queue - + * if it is the first client, starts the timer. + * if it is not longer used by any clients, stop the timer. + */ +int snd_seq_queue_use(int queueid, int client, int use) +{ + struct snd_seq_queue *queue; + + queue = queueptr(queueid); + if (queue == NULL) + return -EINVAL; + mutex_lock(&queue->timer_mutex); + queue_use(queue, client, use); mutex_unlock(&queue->timer_mutex); queuefree(queue); return 0; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 56e5204ac9c1..4bf48336b0fc 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3638,6 +3638,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 90009c0b3a92..ab3c280a23d1 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -754,8 +754,9 @@ int line6_probe(struct usb_interface *interface, goto error; } + line6_get_interval(line6); + if (properties->capabilities & LINE6_CAP_CONTROL) { - line6_get_interval(line6); ret = line6_init_cap_control(line6); if (ret < 0) goto error; diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 9ff0db4e2d0c..933aeec46f4a 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -1199,7 +1199,7 @@ static int ui_init(void) BUG_ON(1); } - perf_hpp__register_sort_field(fmt); + perf_hpp__prepend_sort_field(fmt); return 0; } diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 37388397b5bc..18cfcdc90356 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, list_add_tail(&format->sort_list, &list->sorts); } +void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, + struct perf_hpp_fmt *format) +{ + list_add(&format->sort_list, &list->sorts); +} + void perf_hpp__column_unregister(struct perf_hpp_fmt *format) { list_del(&format->list); @@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list) perf_hpp_list__for_each_sort_list(list, fmt) { struct perf_hpp_fmt *pos; + /* skip sort-only fields ("sort_compute" in perf diff) */ + if (!fmt->entry && !fmt->color) + continue; + perf_hpp_list__for_each_format(list, pos) { if (fmt_equal(fmt, pos)) goto next; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 9928fed8bc59..a440a04a29ff 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -282,6 +282,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list, struct perf_hpp_fmt *format); void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, struct perf_hpp_fmt *format); +void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, + struct perf_hpp_fmt *format); static inline void perf_hpp__column_register(struct perf_hpp_fmt *format) { @@ -293,6 +295,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format) perf_hpp_list__register_sort_field(&perf_hpp_list, format); } +static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format) +{ + perf_hpp_list__prepend_sort_field(&perf_hpp_list, format); +} + #define perf_hpp_list__for_each_format(_list, format) \ list_for_each_entry(format, &(_list)->fields, list)