diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst index cb85af559dff..178f4104f5cf 100644 --- a/Documentation/networking/tls-offload.rst +++ b/Documentation/networking/tls-offload.rst @@ -445,24 +445,6 @@ These flags will be acted upon accordingly by the core ``ktls`` code. TLS device feature flags only control adding of new TLS connection offloads, old connections will remain active after flags are cleared. -Known bugs -========== - -skb_orphan() leaks clear text ------------------------------ - -Currently drivers depend on the :c:member:`sk` member of -:c:type:`struct sk_buff ` to identify segments requiring -encryption. Any operation which removes or does not preserve the socket -association such as :c:func:`skb_orphan` or :c:func:`skb_clone` -will cause the driver to miss the packets and lead to clear text leaks. - -Redirects leak clear text -------------------------- - -In the RX direction, if segment has already been decrypted by the device -and it gets redirected or mirrored - clear text will be transmitted out. - .. _pre_tls_data: Transmission of pre-TLS data diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst index 7cdf7282e022..65b6c1109cc8 100644 --- a/Documentation/vm/hmm.rst +++ b/Documentation/vm/hmm.rst @@ -231,7 +231,7 @@ respect in order to keep things properly synchronized. The usage pattern is:: ret = hmm_range_snapshot(&range); if (ret) { up_read(&mm->mmap_sem); - if (ret == -EAGAIN) { + if (ret == -EBUSY) { /* * No need to check hmm_range_wait_until_valid() return value * on retry we will get proper error with hmm_range_snapshot() diff --git a/Makefile b/Makefile index cfc667fe9959..35fee16d5006 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 2 -SUBLEVEL = 9 +SUBLEVEL = 10 EXTRAVERSION = NAME = Bobtail Squid diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 79155a8cfe7c..89e4c8b79349 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void) BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF | GIC_PRIO_PSR_I_SET)); BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON); + /* + * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared + * and non-secure PMR accesses are not subject to the shifts that + * are applied to IRQ priorities + */ + BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON); gic_write_pmr(GIC_PRIO_IRQOFF); } diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index ae7e605085d7..9c0e0178ea29 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -13,6 +13,8 @@ #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT #define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT) +#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) + /* mask/save/unmask/restore all exceptions, including interrupts. */ static inline void local_daif_mask(void) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index c9e9a6978e73..d3cb42fd51ec 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) #define alloc_screen_info(x...) &screen_info -#define free_screen_info(x...) + +static inline void free_screen_info(efi_system_table_t *sys_table_arg, + struct screen_info *si) +{ +} /* redeclare as 'hidden' so the compiler will generate relative references */ extern struct screen_info screen_info __attribute__((__visibility__("hidden"))); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b7ba75809751..fb04f10a78ab 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -210,7 +210,11 @@ extern u64 vabits_user; #define __tag_reset(addr) untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) #else -#define __tag_set(addr, tag) (addr) +static inline const void *__tag_set(const void *addr, u8 tag) +{ + return addr; +} + #define __tag_reset(addr) (addr) #define __tag_get(addr) 0 #endif @@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x) #define page_to_virt(page) ({ \ unsigned long __addr = \ ((__page_to_voff(page)) | PAGE_OFFSET); \ - unsigned long __addr_tag = \ - __tag_set(__addr, page_kasan_tag(page)); \ + const void *__addr_tag = \ + __tag_set((void *)__addr, page_kasan_tag(page)); \ ((void *)__addr_tag); \ }) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index fca26759081a..b9574d850f14 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -419,8 +419,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, PMD_TYPE_SECT) #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 -#define pud_sect(pud) (0) -#define pud_table(pud) (1) +static inline bool pud_sect(pud_t pud) { return false; } +static inline bool pud_table(pud_t pud) { return true; } #else #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ PUD_TYPE_SECT) diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 81693244f58d..701eaa738187 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -30,7 +30,7 @@ * in the the priority mask, it indicates that PSR.I should be set and * interrupt disabling temporarily does not rely on IRQ priorities. */ -#define GIC_PRIO_IRQON 0xc0 +#define GIC_PRIO_IRQON 0xe0 #define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) #define GIC_PRIO_PSR_I_SET (1 << 4) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 1285c7b2947f..171773257974 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (offset < -SZ_128M || offset >= SZ_128M) { #ifdef CONFIG_ARM64_MODULE_PLTS - struct plt_entry trampoline; + struct plt_entry trampoline, *dst; struct module *mod; /* @@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) * to check if the actual opcodes are in fact identical, * regardless of the offset in memory so use memcmp() instead. */ - trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); - if (memcmp(mod->arch.ftrace_trampoline, &trampoline, - sizeof(trampoline))) { - if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { + dst = mod->arch.ftrace_trampoline; + trampoline = get_plt_entry(addr, dst); + if (memcmp(dst, &trampoline, sizeof(trampoline))) { + if (plt_entry_is_initialized(dst)) { pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); return -EINVAL; } /* point the trampoline to our ftrace entry point */ module_disable_ro(mod); - *mod->arch.ftrace_trampoline = trampoline; + *dst = trampoline; module_enable_ro(mod, true); - /* update trampoline before patching in the branch */ - smp_wmb(); + /* + * Ensure updated trampoline is visible to instruction + * fetch before we patch in the branch. + */ + __flush_icache_range((unsigned long)&dst[0], + (unsigned long)&dst[1]); } - addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; + addr = (unsigned long)dst; #else /* CONFIG_ARM64_MODULE_PLTS */ return -EINVAL; #endif /* CONFIG_ARM64_MODULE_PLTS */ diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 88ce502c8e6f..624f2501f3f8 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -165,33 +166,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p) __this_cpu_write(current_kprobe, p); } -/* - * When PSTATE.D is set (masked), then software step exceptions can not be - * generated. - * SPSR's D bit shows the value of PSTATE.D immediately before the - * exception was taken. PSTATE.D is set while entering into any exception - * mode, however software clears it for any normal (none-debug-exception) - * mode in the exception entry. Therefore, when we are entering into kprobe - * breakpoint handler from any normal mode then SPSR.D bit is already - * cleared, however it is set when we are entering from any debug exception - * mode. - * Since we always need to generate single step exception after a kprobe - * breakpoint exception therefore we need to clear it unconditionally, when - * we become sure that the current breakpoint exception is for kprobe. - */ -static void __kprobes -spsr_set_debug_flag(struct pt_regs *regs, int mask) -{ - unsigned long spsr = regs->pstate; - - if (mask) - spsr |= PSR_D_BIT; - else - spsr &= ~PSR_D_BIT; - - regs->pstate = spsr; -} - /* * Interrupts need to be disabled before single-step mode is set, and not * reenabled until after single-step mode ends. @@ -203,17 +177,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask) static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - kcb->saved_irqflag = regs->pstate; + kcb->saved_irqflag = regs->pstate & DAIF_MASK; regs->pstate |= PSR_I_BIT; + /* Unmask PSTATE.D for enabling software step exceptions. */ + regs->pstate &= ~PSR_D_BIT; } static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - if (kcb->saved_irqflag & PSR_I_BIT) - regs->pstate |= PSR_I_BIT; - else - regs->pstate &= ~PSR_I_BIT; + regs->pstate &= ~DAIF_MASK; + regs->pstate |= kcb->saved_irqflag; } static void __kprobes @@ -250,8 +224,6 @@ static void __kprobes setup_singlestep(struct kprobe *p, set_ss_context(kcb, slot); /* mark pending ss */ - spsr_set_debug_flag(regs, 0); - /* IRQs and single stepping do not mix well. */ kprobes_save_local_irqflag(kcb, regs); kernel_enable_single_step(regs); diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index b21cba90f82d..491184a9f081 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d) return 0; } } +NOKPROBE_SYMBOL(save_return_addr); void *return_address(unsigned int level) { @@ -52,3 +54,4 @@ void *return_address(unsigned int level) return NULL; } EXPORT_SYMBOL_GPL(return_address); +NOKPROBE_SYMBOL(return_address); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 62d395151abe..cd7dab54d17b 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -73,6 +74,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) return 0; } +NOKPROBE_SYMBOL(unwind_frame); void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data) @@ -87,6 +89,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, break; } } +NOKPROBE_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE struct stack_trace_data { diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c index d66613e6ad08..8a38ccf8dc02 100644 --- a/arch/arm64/kvm/regmap.c +++ b/arch/arm64/kvm/regmap.c @@ -178,13 +178,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v) switch (spsr_idx) { case KVM_SPSR_SVC: write_sysreg_el1(v, spsr); + break; case KVM_SPSR_ABT: write_sysreg(v, spsr_abt); + break; case KVM_SPSR_UND: write_sysreg(v, spsr_und); + break; case KVM_SPSR_IRQ: write_sysreg(v, spsr_irq); + break; case KVM_SPSR_FIQ: write_sysreg(v, spsr_fiq); + break; } } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2d115016feb4..414b8e0f19e0 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -800,6 +800,53 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } +/* + * In debug exception context, we explicitly disable preemption despite + * having interrupts disabled. + * This serves two purposes: it makes it much less likely that we would + * accidentally schedule in exception context and it will force a warning + * if we somehow manage to schedule by accident. + */ +static void debug_exception_enter(struct pt_regs *regs) +{ + /* + * Tell lockdep we disabled irqs in entry.S. Do nothing if they were + * already disabled to preserve the last enabled/disabled addresses. + */ + if (interrupts_enabled(regs)) + trace_hardirqs_off(); + + if (user_mode(regs)) { + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); + } else { + /* + * We might have interrupted pretty much anything. In + * fact, if we're a debug exception, we can even interrupt + * NMI processing. We don't want this code makes in_nmi() + * to return true, but we need to notify RCU. + */ + rcu_nmi_enter(); + } + + preempt_disable(); + + /* This code is a bit fragile. Test it. */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); +} +NOKPROBE_SYMBOL(debug_exception_enter); + +static void debug_exception_exit(struct pt_regs *regs) +{ + preempt_enable_no_resched(); + + if (!user_mode(regs)) + rcu_nmi_exit(); + + if (interrupts_enabled(regs)) + trace_hardirqs_on(); +} +NOKPROBE_SYMBOL(debug_exception_exit); + #ifdef CONFIG_ARM64_ERRATUM_1463225 DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); @@ -840,12 +887,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, if (cortex_a76_erratum_1463225_debug_handler(regs)) return; - /* - * Tell lockdep we disabled irqs in entry.S. Do nothing if they were - * already disabled to preserve the last enabled/disabled addresses. - */ - if (interrupts_enabled(regs)) - trace_hardirqs_off(); + debug_exception_enter(regs); if (user_mode(regs) && !is_ttbr0_addr(pc)) arm64_apply_bp_hardening(); @@ -855,7 +897,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint, inf->sig, inf->code, (void __user *)pc, esr); } - if (interrupts_enabled(regs)) - trace_hardirqs_on(); + debug_exception_exit(regs); } NOKPROBE_SYMBOL(do_debug_exception); diff --git a/arch/mips/vdso/vdso.h b/arch/mips/vdso/vdso.h index 14b1931be69c..b65b169778e3 100644 --- a/arch/mips/vdso/vdso.h +++ b/arch/mips/vdso/vdso.h @@ -9,6 +9,7 @@ #if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT) /* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */ +#define BUILD_VDSO32_64 #undef CONFIG_64BIT #define CONFIG_32BIT 1 #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index dad9825e4087..3c17fc7c2b93 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -199,12 +199,32 @@ static const struct attribute_group *papr_scm_dimm_groups[] = { NULL, }; +static inline int papr_scm_node(int node) +{ + int min_dist = INT_MAX, dist; + int nid, min_node; + + if ((node == NUMA_NO_NODE) || node_online(node)) + return node; + + min_node = first_online_node; + for_each_online_node(nid) { + dist = node_distance(node, nid); + if (dist < min_dist) { + min_dist = dist; + min_node = nid; + } + } + return min_node; +} + static int papr_scm_nvdimm_init(struct papr_scm_priv *p) { struct device *dev = &p->pdev->dev; struct nd_mapping_desc mapping; struct nd_region_desc ndr_desc; unsigned long dimm_flags; + int target_nid, online_nid; p->bus_desc.ndctl = papr_scm_ndctl; p->bus_desc.module = THIS_MODULE; @@ -243,8 +263,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) memset(&ndr_desc, 0, sizeof(ndr_desc)); ndr_desc.attr_groups = region_attr_groups; - ndr_desc.numa_node = dev_to_node(&p->pdev->dev); - ndr_desc.target_node = ndr_desc.numa_node; + target_nid = dev_to_node(&p->pdev->dev); + online_nid = papr_scm_node(target_nid); + ndr_desc.numa_node = online_nid; + ndr_desc.target_node = target_nid; ndr_desc.res = &p->res; ndr_desc.of_node = p->dn; ndr_desc.provider_data = p; @@ -259,6 +281,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) ndr_desc.res, p->dn); goto err; } + if (target_nid != online_nid) + dev_info(dev, "Region registered with target node %d and online node %d", + target_nid, online_nid); return 0; diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 853b65ef656d..f0227bdce0f0 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from); static inline void __fstate_clean(struct pt_regs *regs) { - regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN; + regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN; +} + +static inline void fstate_off(struct task_struct *task, + struct pt_regs *regs) +{ + regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF; } static inline void fstate_save(struct task_struct *task, diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index f23794bd1e90..fb3a082362eb 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { regs->sstatus = SR_SPIE; - if (has_fpu) + if (has_fpu) { regs->sstatus |= SR_FS_INITIAL; + /* + * Restore the initial value to the FP register + * before starting the user program. + */ + fstate_restore(current, regs); + } regs->sepc = pc; regs->sp = sp; set_fs(USER_DS); @@ -75,10 +81,11 @@ void flush_thread(void) { #ifdef CONFIG_FPU /* - * Reset FPU context + * Reset FPU state and context * frm: round to nearest, ties to even (IEEE default) * fflags: accrued exceptions cleared */ + fstate_off(current, task_pt_regs(current)); memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate)); #endif } diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index f1d6ffe43e42..49a5852fd07d 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -37,7 +37,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE # these symbols in the kernel code rather than hand-coded addresses. SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ - -Wl,--hash-style=both + -Wl,--build-id -Wl,--hash-style=both $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE $(call if_changed,vdsold) diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index bc96b16288c1..af6a65ac04cf 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type, switch (sh_type) { case SH_BREAKPOINT_READ: *gen_type = HW_BREAKPOINT_R; + break; case SH_BREAKPOINT_WRITE: *gen_type = HW_BREAKPOINT_W; break; diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 176cb46bcf12..0634bfb82a0b 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -515,6 +515,7 @@ void cpu_reset(void) "add %2, %2, %7\n\t" "addi %0, %0, -1\n\t" "bnez %0, 1b\n\t" + "isync\n\t" /* Jump to identity mapping */ "jx %3\n" "2:\n\t" diff --git a/block/blk-mq.c b/block/blk-mq.c index ce0f5f4ede70..68106a41f90d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2674,8 +2674,6 @@ void blk_mq_release(struct request_queue *q) struct blk_mq_hw_ctx *hctx, *next; int i; - cancel_delayed_work_sync(&q->requeue_work); - queue_for_each_hw_ctx(q, hctx, i) WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 977c659dcd18..9bfa3ea4ed63 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -892,6 +892,9 @@ static void __blk_release_queue(struct work_struct *work) blk_free_queue_stats(q->stats); + if (queue_is_mq(q)) + cancel_delayed_work_sync(&q->requeue_work); + blk_exit_queue(q); blk_queue_free_zone_bitmaps(q); diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 72312ad2e142..c25cdbf817f1 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port, hpriv->phys[port] = NULL; rc = 0; break; + case -EPROBE_DEFER: + /* Do not complain yet */ + break; default: dev_err(dev, diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index 173e6f2dd9af..eefda51f97d3 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) unsigned int ret; struct rm_feature_desc *desc; struct ata_taskfile tf; - static const char cdb[] = { GPCMD_GET_CONFIGURATION, + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION, 2, /* only 1 feature descriptor requested */ 0, 3, /* 3, removable medium feature */ 0, 0, 0,/* reserved */ diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index bf868260f435..4838c6a9f0f2 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -554,6 +554,20 @@ static int tpm_add_hwrng(struct tpm_chip *chip) return hwrng_register(&chip->hwrng); } +static int tpm_get_pcr_allocation(struct tpm_chip *chip) +{ + int rc; + + rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ? + tpm2_get_pcr_allocation(chip) : + tpm1_get_pcr_allocation(chip); + + if (rc > 0) + return -ENODEV; + + return rc; +} + /* * tpm_chip_register() - create a character device for the TPM chip * @chip: TPM chip to use. @@ -573,6 +587,12 @@ int tpm_chip_register(struct tpm_chip *chip) if (rc) return rc; rc = tpm_auto_startup(chip); + if (rc) { + tpm_chip_stop(chip); + return rc; + } + + rc = tpm_get_pcr_allocation(chip); tpm_chip_stop(chip); if (rc) return rc; diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index e503ffc3aa39..a7fea3e0ca86 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -394,6 +394,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, const char *desc, size_t min_cap_length); int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max); +int tpm1_get_pcr_allocation(struct tpm_chip *chip); unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm_pm_suspend(struct device *dev); int tpm_pm_resume(struct device *dev); @@ -449,6 +450,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, const char *desc); +ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip); int tpm2_auto_startup(struct tpm_chip *chip); void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c index faacbe1ffa1a..149e953ca369 100644 --- a/drivers/char/tpm/tpm1-cmd.c +++ b/drivers/char/tpm/tpm1-cmd.c @@ -699,18 +699,6 @@ int tpm1_auto_startup(struct tpm_chip *chip) goto out; } - chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), - GFP_KERNEL); - if (!chip->allocated_banks) { - rc = -ENOMEM; - goto out; - } - - chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; - chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; - chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; - chip->nr_allocated_banks = 1; - return rc; out: if (rc > 0) @@ -779,3 +767,27 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr) return rc; } +/** + * tpm1_get_pcr_allocation() - initialize the allocated bank + * @chip: TPM chip to use. + * + * The function initializes the SHA1 allocated bank to extend PCR + * + * Return: + * * 0 on success, + * * < 0 on error. + */ +int tpm1_get_pcr_allocation(struct tpm_chip *chip) +{ + chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), + GFP_KERNEL); + if (!chip->allocated_banks) + return -ENOMEM; + + chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; + chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; + chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; + chip->nr_allocated_banks = 1; + + return 0; +} diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index d103545e4055..ba9acae83bff 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -840,7 +840,7 @@ struct tpm2_pcr_selection { u8 pcr_select[3]; } __packed; -static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) +ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) { struct tpm2_pcr_selection pcr_selection; struct tpm_buf buf; @@ -1040,10 +1040,6 @@ int tpm2_auto_startup(struct tpm_chip *chip) goto out; } - rc = tpm2_get_pcr_allocation(chip); - if (rc) - goto out; - rc = tpm2_get_cc_attrs_tbl(chip); out: diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index 44db83a6d01c..44a46dcc0518 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw, continue; div = DIV_ROUND_CLOSEST(parent_rate, req->rate); + if (div > GENERATED_MAX_DIV + 1) + div = GENERATED_MAX_DIV + 1; clk_generated_best_diff(req, parent, parent_rate, div, &best_diff, &best_rate); diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 0201809bbd37..9dfa28d6fd9f 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -576,17 +576,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev, unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); /* Reset module */ - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ udelay(35); @@ -603,16 +597,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id) unsigned int reg = id / 32; unsigned int bit = id % 32; u32 bitmask = BIT(bit); - unsigned long flags; - u32 value; dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); - spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SRCR(reg)); - value |= bitmask; - writel(value, priv->base + SRCR(reg)); - spin_unlock_irqrestore(&priv->rmw_lock, flags); + writel(bitmask, priv->base + SRCR(reg)); return 0; } diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig index 91d3d721c801..3c219af25100 100644 --- a/drivers/clk/sprd/Kconfig +++ b/drivers/clk/sprd/Kconfig @@ -3,6 +3,7 @@ config SPRD_COMMON_CLK tristate "Clock support for Spreadtrum SoCs" depends on ARCH_SPRD || COMPILE_TEST default ARCH_SPRD + select REGMAP_MMIO if SPRD_COMMON_CLK diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 4b192e0ce92f..ed7977d0dd01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1148,7 +1148,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( adev->asic_type != CHIP_FIJI && adev->asic_type != CHIP_POLARIS10 && adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12) ? + adev->asic_type != CHIP_POLARIS12 && + adev->asic_type != CHIP_VEGAM) ? VI_BO_SIZE_ALIGN : 1; mapping_flags = AMDGPU_VM_PAGE_READABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2f6239b6be6f..fe028561dc0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1093,29 +1093,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return r; } - fence = amdgpu_ctx_get_fence(ctx, entity, - deps[i].handle); + fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); + amdgpu_ctx_put(ctx); + + if (IS_ERR(fence)) + return PTR_ERR(fence); + else if (!fence) + continue; if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { - struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); + struct drm_sched_fence *s_fence; struct dma_fence *old = fence; + s_fence = to_drm_sched_fence(fence); fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(old); } - if (IS_ERR(fence)) { - r = PTR_ERR(fence); - amdgpu_ctx_put(ctx); + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + dma_fence_put(fence); + if (r) return r; - } else if (fence) { - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, - true); - dma_fence_put(fence); - amdgpu_ctx_put(ctx); - if (r) - return r; - } } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 8930d66f2204..91bfb24f963e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -703,7 +703,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, thread = (*pos & GENMASK_ULL(59, 52)) >> 52; bank = (*pos & GENMASK_ULL(61, 60)) >> 60; - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index abeaab4bf1bc..d55519bc34e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -144,12 +144,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); - else if (adev->powerplay.pp_funcs->get_current_power_state) + if (is_support_sw_smu(adev)) { + if (adev->smu.ppt_funcs->get_current_power_state) + pm = amdgpu_smu_get_current_power_state(adev); + else + pm = adev->pm.dpm.user_state; + } else if (adev->powerplay.pp_funcs->get_current_power_state) { pm = amdgpu_dpm_get_current_power_state(adev); - else + } else { pm = adev->pm.dpm.user_state; + } return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -176,7 +180,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, goto fail; } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); + } else if (adev->powerplay.pp_funcs->dispatch_tasks) { amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); } else { mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 2f18c64d531f..2f7f0a2e4a6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4553,7 +4553,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - WREG32(mmSQ_CMD, value); + WREG32_SOC15(GC, 0, mmSQ_CMD, value); } static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index eec329ab6037..61a6d183c153 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -63,7 +63,8 @@ int smu_get_power_num_states(struct smu_context *smu, /* not support power state */ memset(state_info, 0, sizeof(struct pp_states_info)); - state_info->nums = 0; + state_info->nums = 1; + state_info->states[0] = POWER_STATE_TYPE_DEFAULT; return 0; } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index ee777469293a..e4e22bbae2a7 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC config DRM_LVDS_ENCODER tristate "Transparent parallel to LVDS encoder support" depends on OF + select DRM_KMS_HELPER select DRM_PANEL_BRIDGE help Support for transparent parallel to LVDS encoders that don't require @@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024 config DRM_TOSHIBA_TC358764 tristate "TC358764 DSI/LVDS bridge" - depends on DRM && DRM_PANEL depends on OF select DRM_MIPI_DSI + select DRM_KMS_HELPER + select DRM_PANEL help Toshiba TC358764 DSI/LVDS bridge driver. diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index ec9c1b7d3103..8989f8af716b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler) scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); do { cpu_relax(); - } while (retry > 1 && + } while (--retry > 1 && scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); do { cpu_relax(); scaler_write(1, SCALER_INT_EN); - } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); + } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1); return retry ? 0 : -EIO; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 4a0fe8a25ad7..a56eef3cfee7 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1267,7 +1267,8 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - drm_of_component_match_add(dev, matchptr, compare_of, np); + if (of_device_is_available(np)) + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 847b7866137d..bdaf5ffd2504 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -766,16 +766,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); int slots; - /* When restoring duplicated states, we need to make sure that the - * bw remains the same and avoid recalculating it, as the connector's - * bpc may have changed after the state was duplicated - */ - if (!state->duplicated) - asyh->dp.pbn = - drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, - connector->display_info.bpc * 3); + if (crtc_state->mode_changed || crtc_state->connectors_changed) { + /* + * When restoring duplicated states, we need to make sure that + * the bw remains the same and avoid recalculating it, as the + * connector's bpc may have changed after the state was + * duplicated + */ + if (!state->duplicated) { + const int bpp = connector->display_info.bpc * 3; + const int clock = crtc_state->adjusted_mode.clock; + + asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp); + } - if (crtc_state->mode_changed) { slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn); diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index b3d502421b79..0a38e8e9bc78 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -123,9 +123,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, /* Locate the boot interface, to receive the LED change events */ struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0); + struct hid_device *boot_hid; + struct hid_input *boot_hid_input; - struct hid_device *boot_hid = usb_get_intfdata(boot_interface); - struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs, + if (unlikely(boot_interface == NULL)) + return -ENODEV; + + boot_hid = usb_get_intfdata(boot_interface); + boot_hid_input = list_first_entry(&boot_hid->inputs, struct hid_input, list); return boot_hid_input->input->event(boot_hid_input->input, type, code, diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 55b72573066b..4e11cc6fc34b 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -284,6 +284,14 @@ static int hiddev_open(struct inode *inode, struct file *file) spin_unlock_irq(&list->hiddev->list_lock); mutex_lock(&hiddev->existancelock); + /* + * recheck exist with existance lock held to + * avoid opening a disconnected device + */ + if (!list->hiddev->exist) { + res = -ENODEV; + goto bail_unlock; + } if (!list->hiddev->open++) if (list->hiddev->exist) { struct hid_device *hid = hiddev->hid; @@ -300,6 +308,10 @@ bail_normal_power: hid_hw_power(hid, PM_HINT_NORMAL); bail_unlock: mutex_unlock(&hiddev->existancelock); + + spin_lock_irq(&list->hiddev->list_lock); + list_del(&list->node); + spin_unlock_irq(&list->hiddev->list_lock); bail: file->private_data = NULL; vfree(list); diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index fd70b110e8f4..87564010ddbe 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx, } /* Functions for DMA support */ -static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, - dma_addr_t phy_addr) +static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, + dma_addr_t phy_addr) { struct imx_i2c_dma *dma; struct dma_slave_config dma_sconfig; @@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); if (!dma) - return -ENOMEM; + return; dma->chan_tx = dma_request_chan(dev, "tx"); if (IS_ERR(dma->chan_tx)) { @@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); - return 0; + return; fail_rx: dma_release_channel(dma->chan_rx); @@ -336,8 +336,6 @@ fail_tx: dma_release_channel(dma->chan_tx); fail_al: devm_kfree(dev, dma); - /* return successfully if there is no dma support */ - return ret == -ENODEV ? 0 : ret; } static void i2c_imx_dma_callback(void *arg) @@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev) dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res); dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", i2c_imx->adapter.name); + dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); /* Init DMA config if supported */ - ret = i2c_imx_dma_request(i2c_imx, phy_addr); - if (ret < 0) - goto del_adapter; + i2c_imx_dma_request(i2c_imx, phy_addr); - dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); return 0; /* Return OK */ -del_adapter: - i2c_del_adapter(&i2c_imx->adapter); clk_notifier_unregister: clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); rpm_disable: diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index 0e3c6529fc4c..da073d72f649 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c @@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611) if (ret) return ret; - regval = ret & MAX9611_TEMP_MASK; + regval &= MAX9611_TEMP_MASK; if ((regval > MAX9611_TEMP_MAX_POS && regval < MAX9611_TEMP_MIN_NEG) || diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index ff40a450b5d2..ff9e0d7fb4f3 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -292,7 +292,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, struct ib_udata *udata, struct ib_uobject *uobj) { + enum ib_qp_type qp_type = attr->qp_type; struct ib_qp *qp; + bool is_xrc; if (!dev->ops.create_qp) return ERR_PTR(-EOPNOTSUPP); @@ -310,7 +312,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, * and more importantly they are created internaly by driver, * see mlx5 create_dev_resources() as an example. */ - if (attr->qp_type < IB_QPT_XRC_INI) { + is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT; + if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) { qp->res.type = RDMA_RESTRACK_QP; if (uobj) rdma_restrack_uadd(&qp->res); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index cc99479b2c09..9947d16edef2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device, if (has_smi) cq_size *= 2; + port_priv->pd = ib_alloc_pd(device, 0); + if (IS_ERR(port_priv->pd)) { + dev_err(&device->dev, "Couldn't create ib_mad PD\n"); + ret = PTR_ERR(port_priv->pd); + goto error3; + } + port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, IB_POLL_UNBOUND_WORKQUEUE); if (IS_ERR(port_priv->cq)) { dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); - goto error3; - } - - port_priv->pd = ib_alloc_pd(device, 0); - if (IS_ERR(port_priv->pd)) { - dev_err(&device->dev, "Couldn't create ib_mad PD\n"); - ret = PTR_ERR(port_priv->pd); goto error4; } @@ -3278,11 +3278,11 @@ error8: error7: destroy_mad_qp(&port_priv->qp_info[0]); error6: - ib_dealloc_pd(port_priv->pd); -error4: ib_free_cq(port_priv->cq); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); +error4: + ib_dealloc_pd(port_priv->pd); error3: kfree(port_priv); @@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) destroy_workqueue(port_priv->wq); destroy_mad_qp(&port_priv->qp_info[1]); destroy_mad_qp(&port_priv->qp_info[0]); - ib_dealloc_pd(port_priv->pd); ib_free_cq(port_priv->cq); + ib_dealloc_pd(port_priv->pd); cleanup_recv_queue(&port_priv->qp_info[1]); cleanup_recv_queue(&port_priv->qp_info[0]); /* XXX: Handle deallocation of MAD registration tables */ diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 671f07ba1fad..025b6d86a61f 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -883,11 +884,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) if (get_user(id, arg)) return -EFAULT; + if (id >= IB_UMAD_MAX_AGENTS) + return -EINVAL; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); - if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { + id = array_index_nospec(id, IB_UMAD_MAX_AGENTS); + if (!__get_agent(file, id)) { ret = -EINVAL; goto out; } diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 0c6c1fe87705..d60453e98db7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, struct ib_udata *udata, unsigned long virt, struct hns_roce_db *db) { + unsigned long page_addr = virt & PAGE_MASK; struct hns_roce_user_db_page *page; + unsigned int offset; int ret = 0; mutex_lock(&context->page_mutex); list_for_each_entry(page, &context->page_list, list) - if (page->user_virt == (virt & PAGE_MASK)) + if (page->user_virt == page_addr) goto found; page = kmalloc(sizeof(*page), GFP_KERNEL); @@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, } refcount_set(&page->refcount, 1); - page->user_virt = (virt & PAGE_MASK); - page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); + page->user_virt = page_addr; + page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0); if (IS_ERR(page->umem)) { ret = PTR_ERR(page->umem); kfree(page); @@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, list_add(&page->list, &context->page_list); found: - db->dma = sg_dma_address(page->umem->sg_head.sgl) + - (virt & ~PAGE_MASK); - page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK; - db->virt_addr = sg_virt(page->umem->sg_head.sgl); + offset = virt - page_addr; + db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset; + db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset; db->u.user_page = page; refcount_inc(&page->refcount); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index e068a02122f5..9496c69fff3a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -745,8 +745,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) ibdev = &hr_dev->ib_dev; pd = rdma_zalloc_drv_obj(ibdev, ib_pd); - if (!pd) + if (!pd) { + ret = -ENOMEM; goto alloc_mem_failed; + } pd->device = ibdev; ret = hns_roce_alloc_pd(pd, NULL); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a6713a3b6c80..9ab276a8bc81 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -5687,13 +5687,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, return; } - if (mpi->mdev_events.notifier_call) - mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); - mpi->mdev_events.notifier_call = NULL; - mpi->ibdev = NULL; spin_unlock(&port->mp.mpi_lock); + if (mpi->mdev_events.notifier_call) + mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); + mpi->mdev_events.notifier_call = NULL; mlx5_remove_netdev_notifier(ibdev, port_num); spin_lock(&port->mp.mpi_lock); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index e54bec2c2965..d239fc58c002 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); -static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev) -{ - return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled); -} static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) { return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); } -static bool use_umr(struct mlx5_ib_dev *dev, int order) -{ - return order <= mr_cache_max_order(dev) && - umr_can_modify_entity_size(dev); -} - static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); @@ -1271,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_mr *mr = NULL; - bool populate_mtts = false; + bool use_umr; struct ib_umem *umem; int page_shift; int npages; @@ -1303,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - if (use_umr(dev, order)) { + use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && + (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) || + !MLX5_CAP_GEN(dev->mdev, atomic)); + + if (order <= mr_cache_max_order(dev) && use_umr) { mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, page_shift, order, access_flags); if (PTR_ERR(mr) == -EAGAIN) { mlx5_ib_dbg(dev, "cache empty for order %d\n", order); mr = NULL; } - populate_mtts = false; } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { if (access_flags & IB_ACCESS_ON_DEMAND) { err = -EINVAL; pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); goto error; } - populate_mtts = true; + use_umr = false; } if (!mr) { - if (!umr_can_modify_entity_size(dev)) - populate_mtts = true; mutex_lock(&dev->slow_path_mutex); mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, - page_shift, access_flags, populate_mtts); + page_shift, access_flags, !use_umr); mutex_unlock(&dev->slow_path_mutex); } @@ -1341,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, update_odp_mr(mr); - if (!populate_mtts) { + if (use_umr) { int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; if (access_flags & IB_ACCESS_ON_DEMAND) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 91507a2e9290..f6e5351ba4d5 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1765,7 +1765,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work) num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list, w->num_sge, 0); - kfree(w); + kvfree(w); } int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, @@ -1807,7 +1807,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, if (valid_req) queue_work(system_unbound_wq, &work->work); else - kfree(work); + kvfree(work); srcu_read_unlock(&dev->mr_srcu, srcu_key); diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 083c2c00a8e9..dfdd1e16de7f 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, struct qedr_dev *dev = rdma_device_to_drv_device(device, struct qedr_dev, ibdev); - return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor); + return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver); } static DEVICE_ATTR_RO(hw_rev); static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET"); + struct qedr_dev *dev = + rdma_device_to_drv_device(device, struct qedr_dev, ibdev); + + return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n", + dev->pdev->device, + rdma_protocol_iwarp(&dev->ibdev, 1) ? + "iWARP" : "RoCE"); } static DEVICE_ATTR_RO(hca_type); diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index f1569ae8381b..a0a686f56ac4 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -129,7 +129,12 @@ static int iforce_usb_probe(struct usb_interface *intf, return -ENODEV; epirq = &interface->endpoint[0].desc; + if (!usb_endpoint_is_int_in(epirq)) + return -ENODEV; + epout = &interface->endpoint[1].desc; + if (!usb_endpoint_is_int_out(epout)) + return -ENODEV; if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL))) goto fail; diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 0afffe8d824f..77110f3ec21d 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -158,7 +158,8 @@ struct trackpoint_data { #ifdef CONFIG_MOUSE_PS2_TRACKPOINT int trackpoint_detect(struct psmouse *psmouse, bool set_properties); #else -inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties) +static inline int trackpoint_detect(struct psmouse *psmouse, + bool set_properties) { return -ENOSYS; } diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index 04b85571f41e..aa577898e952 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c @@ -117,6 +117,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i if (intf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; + endpoint = &intf->cur_altsetting->endpoint[0].desc; + if (!usb_endpoint_is_int_in(endpoint)) + return -ENODEV; + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); input_dev = input_allocate_device(); if (!kbtab || !input_dev) @@ -155,8 +159,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0); - endpoint = &intf->cur_altsetting->endpoint[0].desc; - usb_fill_int_urb(kbtab->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), kbtab->data, 8, diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 35500801dc2b..20e5482d91b9 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe) if (!its_alloc_vpe_table(vpe_id)) { its_vpe_id_free(vpe_id); - its_free_pending_table(vpe->vpt_page); + its_free_pending_table(vpt_page); return -ENOMEM; } diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index bf2237ac5d09..4f74c15c4755 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = { .irq_unmask = imx_gpcv2_irq_unmask, .irq_set_wake = imx_gpcv2_irq_set_wake, .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_type = irq_chip_set_type_parent, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b0aab3a0a1bf..f183cadd14e3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1113,6 +1113,8 @@ static void bond_compute_features(struct bonding *bond) done: bond_dev->vlan_features = vlan_features; bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; bond_dev->gso_max_segs = gso_max_segs; netif_set_gso_max_size(bond_dev, gso_max_size); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4039a9599d79..9d582b3ebc88 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3057,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations */ - if (IS_VF(bp)) + if (IS_VF(bp)) { + bnx2x_clear_vlan_info(bp); bnx2x_vfpf_close_vf(bp); - else if (unload_mode != UNLOAD_RECOVERY) + } else if (unload_mode != UNLOAD_RECOVERY) { /* if this is a normal/close unload need to clean up chip*/ bnx2x_chip_cleanup(bp, unload_mode, keep_link); - else { + } else { /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c2f6e44e9a3f..8b08cb18e363 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp); void bnx2x_disable_close_the_gate(struct bnx2x *bp); int bnx2x_init_hw_func_cnic(struct bnx2x *bp); +void bnx2x_clear_vlan_info(struct bnx2x *bp); + /** * bnx2x_sp_event - handle ramrods completion. * diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2cc14db8f0ec..192ff8d5da32 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, return rc; } +void bnx2x_clear_vlan_info(struct bnx2x *bp) +{ + struct bnx2x_vlan_entry *vlan; + + /* Mark that hw forgot all entries */ + list_for_each_entry(vlan, &bp->vlan_reg, link) + vlan->hw = false; + + bp->vlan_cnt = 0; +} + static int bnx2x_del_all_vlans(struct bnx2x *bp) { struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; unsigned long ramrod_flags = 0, vlan_flags = 0; - struct bnx2x_vlan_entry *vlan; int rc; __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); @@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp) if (rc) return rc; - /* Mark that hw forgot all entries */ - list_for_each_entry(vlan, &bp->vlan_reg, link) - vlan->hw = false; - bp->vlan_cnt = 0; + bnx2x_clear_vlan_info(bp); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7afae9d80e75..36fe4f161cf1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2015,9 +2015,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) if (bnapi->events & BNXT_RX_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); if (bnapi->events & BNXT_AGG_EVENT) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); } bnapi->events = 0; } @@ -5011,6 +5011,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { + bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); int i, rc = 0; u32 type; @@ -5086,7 +5087,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) if (rc) goto err_out; bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); - bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + /* If we have agg rings, post agg buffers first. */ + if (!agg_rings) + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; if (bp->flags & BNXT_FLAG_CHIP_P5) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -5105,7 +5108,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) } } - if (bp->flags & BNXT_FLAG_AGG_RINGS) { + if (agg_rings) { type = HWRM_RING_ALLOC_AGG; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; @@ -5121,6 +5124,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, ring->fw_ring_id); bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } @@ -6963,19 +6967,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) bnxt_hwrm_vnic_set_rss(bp, i, false); } -static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, - bool irq_re_init) +static void bnxt_clear_vnic(struct bnxt *bp) { - if (bp->vnic_info) { - bnxt_hwrm_clear_vnic_filter(bp); + if (!bp->vnic_info) + return; + + bnxt_hwrm_clear_vnic_filter(bp); + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { /* clear all RSS setting before free vnic ctx */ bnxt_hwrm_clear_vnic_rss(bp); bnxt_hwrm_vnic_ctx_free(bp); - /* before free the vnic, undo the vnic tpa settings */ - if (bp->flags & BNXT_FLAG_TPA) - bnxt_set_tpa(bp, false); - bnxt_hwrm_vnic_free(bp); } + /* before free the vnic, undo the vnic tpa settings */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + bnxt_hwrm_vnic_free(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5) + bnxt_hwrm_vnic_ctx_free(bp); +} + +static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, + bool irq_re_init) +{ + bnxt_clear_vnic(bp); bnxt_hwrm_ring_free(bp, close_path); bnxt_hwrm_ring_grp_free(bp); if (irq_re_init) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 549c90d3e465..c05d663212b2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, if (idx) req->dimensions = cpu_to_le16(1); - if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) + if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { memcpy(data_addr, buf, bytesize); - - rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + } else { + rc = hwrm_send_message_silent(bp, msg, msg_len, + HWRM_CMD_TIMEOUT); + } if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) memcpy(buf, data_addr, bytesize); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index a6c7baf38036..b761a2e28a10 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2016,21 +2016,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev, mutex_lock(&bp->hwrm_cmd_lock); hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) - goto flash_pkg_exit; - - if (resp->error_code) { + if (hwrm_err) { u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; - if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { + if (resp->error_code && error_code == + NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { install.flags |= cpu_to_le16( NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), INSTALL_PACKAGE_TIMEOUT); - if (hwrm_err) - goto flash_pkg_exit; } + if (hwrm_err) + goto flash_pkg_exit; } if (resp->result) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 44d6c5743fb9..434470a6b9f3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, u16 src_fid) { - flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; + flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; } static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, @@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, goto free_node; bnxt_tc_set_src_fid(bp, flow, src_fid); - - if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - bnxt_tc_set_flow_dir(bp, flow, src_fid); + bnxt_tc_set_flow_dir(bp, flow, flow->src_fid); if (!bnxt_tc_can_offload(bp, flow)) { rc = -EOPNOTSUPP; @@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp, * 2. 15th bit of flow_handle must specify the flow * direction (TX/RX). */ - if (flow_node->flow.dir == BNXT_DIR_RX) + if (flow_node->flow.l2_key.dir == BNXT_DIR_RX) handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; else diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index 8a0968967bc5..8b0f1510bdc4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -23,6 +23,9 @@ struct bnxt_tc_l2_key { __be16 inner_vlan_tci; __be16 ether_type; u8 num_vlans; + u8 dir; +#define BNXT_DIR_RX 1 +#define BNXT_DIR_TX 0 }; struct bnxt_tc_l3_key { @@ -98,9 +101,6 @@ struct bnxt_tc_flow { /* flow applicable to pkts ingressing on this fid */ u16 src_fid; - u8 dir; -#define BNXT_DIR_RX 1 -#define BNXT_DIR_TX 0 struct bnxt_tc_l2_key l2_key; struct bnxt_tc_l2_key l2_mask; struct bnxt_tc_l3_key l3_key; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 6c01314e87b0..db3552f2d087 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); if (err) { en_err(priv, "Failed to allocate RSS indirection QP\n"); - goto rss_err; + goto qp_alloc_err; } rss_map->indir_qp->event = mlx4_en_sqp_event; @@ -1241,6 +1241,7 @@ indir_err: MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); mlx4_qp_remove(mdev->dev, rss_map->indir_qp); mlx4_qp_free(mdev->dev, rss_map->indir_qp); +qp_alloc_err: kfree(rss_map->indir_qp); rss_map->indir_qp = NULL; rss_err: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index f3d98748b211..c1caf14bc334 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -76,9 +76,6 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) u8 state; int err; - if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) - return 0; - err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); if (err) { netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", @@ -86,10 +83,8 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) return err; } - if (state != MLX5_SQC_STATE_ERR) { - netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); - return -EINVAL; - } + if (state != MLX5_SQC_STATE_ERR) + return 0; mlx5e_tx_disable_queue(sq->txq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 8657e0f26995..2c75b2752f58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port, return &arfs_t->rules_hash[bucket_idx]; } -static u8 arfs_get_ip_proto(const struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IP)) ? - ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; -} - static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, u8 ip_proto, __be16 etype) { @@ -602,31 +596,9 @@ out: arfs_may_expire_flow(priv); } -/* return L4 destination port from ip4/6 packets */ -static __be16 arfs_get_dst_port(const struct sk_buff *skb) -{ - char *transport_header; - - transport_header = skb_transport_header(skb); - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) - return ((struct tcphdr *)transport_header)->dest; - return ((struct udphdr *)transport_header)->dest; -} - -/* return L4 source port from ip4/6 packets */ -static __be16 arfs_get_src_port(const struct sk_buff *skb) -{ - char *transport_header; - - transport_header = skb_transport_header(skb); - if (arfs_get_ip_proto(skb) == IPPROTO_TCP) - return ((struct tcphdr *)transport_header)->source; - return ((struct udphdr *)transport_header)->source; -} - static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, struct arfs_table *arfs_t, - const struct sk_buff *skb, + const struct flow_keys *fk, u16 rxq, u32 flow_id) { struct arfs_rule *rule; @@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, INIT_WORK(&rule->arfs_work, arfs_handle_work); tuple = &rule->tuple; - tuple->etype = skb->protocol; + tuple->etype = fk->basic.n_proto; + tuple->ip_proto = fk->basic.ip_proto; if (tuple->etype == htons(ETH_P_IP)) { - tuple->src_ipv4 = ip_hdr(skb)->saddr; - tuple->dst_ipv4 = ip_hdr(skb)->daddr; + tuple->src_ipv4 = fk->addrs.v4addrs.src; + tuple->dst_ipv4 = fk->addrs.v4addrs.dst; } else { - memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, + memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src, sizeof(struct in6_addr)); - memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, + memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, sizeof(struct in6_addr)); } - tuple->ip_proto = arfs_get_ip_proto(skb); - tuple->src_port = arfs_get_src_port(skb); - tuple->dst_port = arfs_get_dst_port(skb); + tuple->src_port = fk->ports.src; + tuple->dst_port = fk->ports.dst; rule->flow_id = flow_id; rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; @@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, return rule; } -static bool arfs_cmp_ips(struct arfs_tuple *tuple, - const struct sk_buff *skb) +static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk) { - if (tuple->etype == htons(ETH_P_IP) && - tuple->src_ipv4 == ip_hdr(skb)->saddr && - tuple->dst_ipv4 == ip_hdr(skb)->daddr) - return true; - if (tuple->etype == htons(ETH_P_IPV6) && - (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, - sizeof(struct in6_addr))) && - (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, - sizeof(struct in6_addr)))) - return true; + if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst) + return false; + if (tuple->etype != fk->basic.n_proto) + return false; + if (tuple->etype == htons(ETH_P_IP)) + return tuple->src_ipv4 == fk->addrs.v4addrs.src && + tuple->dst_ipv4 == fk->addrs.v4addrs.dst; + if (tuple->etype == htons(ETH_P_IPV6)) + return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); return false; } static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, - const struct sk_buff *skb) + const struct flow_keys *fk) { struct arfs_rule *arfs_rule; struct hlist_head *head; - __be16 src_port = arfs_get_src_port(skb); - __be16 dst_port = arfs_get_dst_port(skb); - head = arfs_hash_bucket(arfs_t, src_port, dst_port); + head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst); hlist_for_each_entry(arfs_rule, head, hlist) { - if (arfs_rule->tuple.src_port == src_port && - arfs_rule->tuple.dst_port == dst_port && - arfs_cmp_ips(&arfs_rule->tuple, skb)) { + if (arfs_cmp(&arfs_rule->tuple, fk)) return arfs_rule; - } } return NULL; @@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct arfs_table *arfs_t; struct arfs_rule *arfs_rule; + struct flow_keys fk; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; - if (skb->protocol != htons(ETH_P_IP) && - skb->protocol != htons(ETH_P_IPV6)) + if (fk.basic.n_proto != htons(ETH_P_IP) && + fk.basic.n_proto != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; if (skb->encapsulation) return -EPROTONOSUPPORT; - arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); + arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto); if (!arfs_t) return -EPROTONOSUPPORT; spin_lock_bh(&arfs->arfs_lock); - arfs_rule = arfs_find_rule(arfs_t, skb); + arfs_rule = arfs_find_rule(arfs_t, &fk); if (arfs_rule) { if (arfs_rule->rxq == rxq_index) { spin_unlock_bh(&arfs->arfs_lock); @@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, } arfs_rule->rxq = rxq_index; } else { - arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, - rxq_index, flow_id); + arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id); if (!arfs_rule) { spin_unlock_bh(&arfs->arfs_lock); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index f637d81f08bc..06f9bd6a45e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1060,6 +1060,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : mlx5e_port_speed2linkmodes(mdev, speed, !ext); + if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && + autoneg != AUTONEG_ENABLE) { + netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n", + __func__); + err = -EINVAL; + goto out; + } + link_modes = link_modes & eproto.cap; if (!link_modes) { netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", @@ -1317,6 +1325,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev = priv->mdev; int err; + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return -EOPNOTSUPP; + if (pauseparam->autoneg) return -EINVAL; diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index b509b941d5ca..6825254eb882 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -71,46 +71,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port) debugfs_remove_recursive(nsim_dev_port->ddir); } +static struct net *nsim_devlink_net(struct devlink *devlink) +{ + return &init_net; +} + static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false); } static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB_RULES, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false); } static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false); } static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv; - return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB_RULES, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false); } static int nsim_dev_resources_register(struct devlink *devlink) { - struct nsim_dev *nsim_dev = devlink_priv(devlink); struct devlink_resource_size_params params = { .size_max = (u64)-1, .size_granularity = 1, .unit = DEVLINK_RESOURCE_UNIT_ENTRY }; + struct net *net = nsim_devlink_net(devlink); int err; u64 n; @@ -124,8 +125,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) goto out; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true); err = devlink_resource_register(devlink, "fib", n, NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4, ¶ms); @@ -134,8 +134,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) return err; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB_RULES, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true); err = devlink_resource_register(devlink, "fib-rules", n, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV4, ¶ms); @@ -154,8 +153,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) goto out; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true); err = devlink_resource_register(devlink, "fib", n, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6, ¶ms); @@ -164,8 +162,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) return err; } - n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB_RULES, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true); err = devlink_resource_register(devlink, "fib-rules", n, NSIM_RESOURCE_IPV6_FIB_RULES, NSIM_RESOURCE_IPV6, ¶ms); @@ -177,19 +174,19 @@ static int nsim_dev_resources_register(struct devlink *devlink) devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB, nsim_dev_ipv4_fib_resource_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB_RULES, nsim_dev_ipv4_fib_rules_res_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB, nsim_dev_ipv6_fib_resource_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB_RULES, nsim_dev_ipv6_fib_rules_res_occ_get, - nsim_dev); + net); out: return err; } @@ -197,11 +194,11 @@ out: static int nsim_dev_reload(struct devlink *devlink, struct netlink_ext_ack *extack) { - struct nsim_dev *nsim_dev = devlink_priv(devlink); enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES }; + struct net *net = nsim_devlink_net(devlink); int i; for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { @@ -210,8 +207,7 @@ static int nsim_dev_reload(struct devlink *devlink, err = devlink_resource_size_get(devlink, res_ids[i], &val); if (!err) { - err = nsim_fib_set_max(nsim_dev->fib_data, - res_ids[i], val, extack); + err = nsim_fib_set_max(net, res_ids[i], val, extack); if (err) return err; } @@ -241,15 +237,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count) INIT_LIST_HEAD(&nsim_dev->port_list); mutex_init(&nsim_dev->port_list_lock); - nsim_dev->fib_data = nsim_fib_create(); - if (IS_ERR(nsim_dev->fib_data)) { - err = PTR_ERR(nsim_dev->fib_data); - goto err_devlink_free; - } - err = nsim_dev_resources_register(devlink); if (err) - goto err_fib_destroy; + goto err_devlink_free; err = devlink_register(devlink, &nsim_bus_dev->dev); if (err) @@ -271,8 +261,6 @@ err_dl_unregister: devlink_unregister(devlink); err_resources_unregister: devlink_resources_unregister(devlink, NULL); -err_fib_destroy: - nsim_fib_destroy(nsim_dev->fib_data); err_devlink_free: devlink_free(devlink); return ERR_PTR(err); @@ -286,7 +274,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev) nsim_dev_debugfs_exit(nsim_dev); devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); - nsim_fib_destroy(nsim_dev->fib_data); mutex_destroy(&nsim_dev->port_list_lock); devlink_free(devlink); } diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 8c57ba747772..f61d094746c0 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "netdevsim.h" @@ -32,14 +33,15 @@ struct nsim_per_fib_data { }; struct nsim_fib_data { - struct notifier_block fib_nb; struct nsim_per_fib_data ipv4; struct nsim_per_fib_data ipv6; }; -u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, bool max) +static unsigned int nsim_fib_net_id; + +u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max) { + struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; switch (res_id) { @@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, return max ? entry->max : entry->num; } -int nsim_fib_set_max(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, u64 val, +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, struct netlink_ext_ack *extack) { + struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; int err = 0; @@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add, return err; } -static int nsim_fib_rule_event(struct nsim_fib_data *data, - struct fib_notifier_info *info, bool add) +static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add) { + struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); struct netlink_ext_ack *extack = info->extack; int err = 0; @@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add, return err; } -static int nsim_fib_event(struct nsim_fib_data *data, - struct fib_notifier_info *info, bool add) +static int nsim_fib_event(struct fib_notifier_info *info, bool add) { + struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); struct netlink_ext_ack *extack = info->extack; int err = 0; @@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data, static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, void *ptr) { - struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, - fib_nb); struct fib_notifier_info *info = ptr; int err = 0; switch (event) { case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: - err = nsim_fib_rule_event(data, info, - event == FIB_EVENT_RULE_ADD); + err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD); break; case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: - err = nsim_fib_event(data, info, - event == FIB_EVENT_ENTRY_ADD); + err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD); break; } @@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, /* inconsistent dump, trying again */ static void nsim_fib_dump_inconsistent(struct notifier_block *nb) { - struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, - fib_nb); + struct nsim_fib_data *data; + struct net *net; + + rcu_read_lock(); + for_each_net_rcu(net) { + data = net_generic(net, nsim_fib_net_id); + + data->ipv4.fib.num = 0ULL; + data->ipv4.rules.num = 0ULL; - data->ipv4.fib.num = 0ULL; - data->ipv4.rules.num = 0ULL; - data->ipv6.fib.num = 0ULL; - data->ipv6.rules.num = 0ULL; + data->ipv6.fib.num = 0ULL; + data->ipv6.rules.num = 0ULL; + } + rcu_read_unlock(); } -struct nsim_fib_data *nsim_fib_create(void) -{ - struct nsim_fib_data *data; - int err; +static struct notifier_block nsim_fib_nb = { + .notifier_call = nsim_fib_event_nb, +}; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return ERR_PTR(-ENOMEM); +/* Initialize per network namespace state */ +static int __net_init nsim_fib_netns_init(struct net *net) +{ + struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id); data->ipv4.fib.max = (u64)-1; data->ipv4.rules.max = (u64)-1; @@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void) data->ipv6.fib.max = (u64)-1; data->ipv6.rules.max = (u64)-1; - data->fib_nb.notifier_call = nsim_fib_event_nb; - err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent); - if (err) { - pr_err("Failed to register fib notifier\n"); - goto err_out; - } + return 0; +} - return data; +static struct pernet_operations nsim_fib_net_ops = { + .init = nsim_fib_netns_init, + .id = &nsim_fib_net_id, + .size = sizeof(struct nsim_fib_data), +}; -err_out: - kfree(data); - return ERR_PTR(err); +void nsim_fib_exit(void) +{ + unregister_pernet_subsys(&nsim_fib_net_ops); + unregister_fib_notifier(&nsim_fib_nb); } -void nsim_fib_destroy(struct nsim_fib_data *data) +int nsim_fib_init(void) { - unregister_fib_notifier(&data->fib_nb); - kfree(data); + int err; + + err = register_pernet_subsys(&nsim_fib_net_ops); + if (err < 0) { + pr_err("Failed to register pernet subsystem\n"); + goto err_out; + } + + err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent); + if (err < 0) { + pr_err("Failed to register fib notifier\n"); + goto err_out; + } + +err_out: + return err; } diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index e5c8aa08e1cd..533a182eefca 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -370,12 +370,18 @@ static int __init nsim_module_init(void) if (err) goto err_dev_exit; - err = rtnl_link_register(&nsim_link_ops); + err = nsim_fib_init(); if (err) goto err_bus_exit; + err = rtnl_link_register(&nsim_link_ops); + if (err) + goto err_fib_exit; + return 0; +err_fib_exit: + nsim_fib_exit(); err_bus_exit: nsim_bus_exit(); err_dev_exit: @@ -386,6 +392,7 @@ err_dev_exit: static void __exit nsim_module_exit(void) { rtnl_link_unregister(&nsim_link_ops); + nsim_fib_exit(); nsim_bus_exit(); nsim_dev_exit(); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 3f398797c2bc..f9253fe68c31 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -168,12 +168,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_index); -struct nsim_fib_data *nsim_fib_create(void); -void nsim_fib_destroy(struct nsim_fib_data *fib_data); -u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, bool max); -int nsim_fib_set_max(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, u64 val, +int nsim_fib_init(void); +void nsim_fib_exit(void); +u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max); +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, struct netlink_ext_ack *extack); #if IS_ENABLED(CONFIG_XFRM_OFFLOAD) diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index b9d4145781ca..58bb25e4af10 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev) int val, devad; bool link = true; + if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + if (val < 0) + return val; + + /* Autoneg is being started, therefore disregard current + * link status and report link as down. + */ + if (val & MDIO_AN_CTRL1_RESTART) { + phydev->link = 0; + return 0; + } + } + while (mmd_mask && link) { devad = __ffs(mmd_mask); mmd_mask &= ~BIT(devad); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ffa402732aea..3af0af495cf1 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1708,7 +1708,17 @@ EXPORT_SYMBOL(genphy_aneg_done); */ int genphy_update_link(struct phy_device *phydev) { - int status; + int status = 0, bmcr; + + bmcr = phy_read(phydev, MII_BMCR); + if (bmcr < 0) + return bmcr; + + /* Autoneg is being started, therefore disregard BMSR value and + * report link as down. + */ + if (bmcr & BMCR_ANRESTART) + goto done; /* The link state is latched low so that momentary link * drops can be detected. Do not double-read the status diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 36916bf51ee6..d1b4c7d8e2bc 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team) team->dev->vlan_features = vlan_features; team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; team->dev->hard_header_len = max_hard_header_len; diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 6d25dea5ad4b..f7d117d80cfb 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -282,7 +282,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) { int i; - __u8 tmp; + __u8 tmp = 0; __le16 retdatai; int ret; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 405038ce98d6..7573af2d88ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -97,7 +97,7 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_object); union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, - int data_size) + int data_size, int *tbl_rev) { int i; union acpi_object *wifi_pkg; @@ -113,16 +113,19 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, /* * We need at least two packages, one for the revision and one * for the data itself. Also check that the revision is valid - * (i.e. it is an integer set to 0). + * (i.e. it is an integer smaller than 2, as we currently support only + * 2 revisions). */ if (data->type != ACPI_TYPE_PACKAGE || data->package.count < 2 || data->package.elements[0].type != ACPI_TYPE_INTEGER || - data->package.elements[0].integer.value != 0) { + data->package.elements[0].integer.value > 1) { IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n"); return ERR_PTR(-EINVAL); } + *tbl_rev = data->package.elements[0].integer.value; + /* loop through all the packages to find the one for WiFi */ for (i = 1; i < data->package.count; i++) { union acpi_object *domain; @@ -151,14 +154,15 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc) { union acpi_object *wifi_pkg, *data; u32 mcc_val; - int ret; + int ret, tbl_rev; data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); - wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE); - if (IS_ERR(wifi_pkg)) { + wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg) || tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } @@ -185,6 +189,7 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev) { union acpi_object *data, *wifi_pkg; u64 dflt_pwr_limit; + int tbl_rev; data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD); if (IS_ERR(data)) { @@ -193,8 +198,8 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev) } wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, - ACPI_SPLC_WIFI_DATA_SIZE); - if (IS_ERR(wifi_pkg) || + ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev); + if (IS_ERR(wifi_pkg) || tbl_rev != 0 || wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) { dflt_pwr_limit = 0; goto out_free; @@ -211,14 +216,15 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit); int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) { union acpi_object *wifi_pkg, *data; - int ret; + int ret, tbl_rev; data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD); if (IS_ERR(data)) return PTR_ERR(data); - wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE); - if (IS_ERR(wifi_pkg)) { + wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg) || tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index f5704e16643f..991a23450999 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -97,7 +97,7 @@ void *iwl_acpi_get_object(struct device *dev, acpi_string method); union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, - int data_size); + int data_size, int *tbl_rev); /** * iwl_acpi_get_mcc - read MCC from ACPI, if available @@ -131,7 +131,8 @@ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, - int data_size) + int data_size, + int *tbl_rev) { return ERR_PTR(-ENOENT); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 01f003c6cff9..f195db398bed 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -419,14 +419,26 @@ struct iwl_per_chain_offset_group { struct iwl_per_chain_offset hb; } __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */ +/** + * struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd. + * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation + * @table: offset profile per band. + */ +struct iwl_geo_tx_power_profiles_cmd_v1 { + __le32 ops; + struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; +} __packed; /* GEO_TX_POWER_LIMIT_VER_1 */ + /** * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. + * @table_revision: BIOS table revision. */ struct iwl_geo_tx_power_profiles_cmd { __le32 ops; struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; + __le32 table_revision; } __packed; /* GEO_TX_POWER_LIMIT */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index de9243d30135..a74f34a8dffb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -286,6 +286,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S. * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of * STA_CONTEXT_DOT11AX_API_S + * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar + * version tables. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -318,6 +320,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52, IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54, + IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 5af9959d05e5..8892707050d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -682,15 +682,15 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) { union acpi_object *wifi_pkg, *table, *data; bool enabled; - int ret; + int ret, tbl_rev; data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, - ACPI_WRDS_WIFI_DATA_SIZE); - if (IS_ERR(wifi_pkg)) { + ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev); + if (IS_ERR(wifi_pkg) || tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } @@ -719,15 +719,15 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) { union acpi_object *wifi_pkg, *data; bool enabled; - int i, n_profiles, ret; + int i, n_profiles, ret, tbl_rev; data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, - ACPI_EWRD_WIFI_DATA_SIZE); - if (IS_ERR(wifi_pkg)) { + ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev); + if (IS_ERR(wifi_pkg) || tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } @@ -778,7 +778,7 @@ out_free: static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) { union acpi_object *wifi_pkg, *data; - int i, j, ret; + int i, j, ret, tbl_rev; int idx = 1; data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD); @@ -786,12 +786,13 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, - ACPI_WGDS_WIFI_DATA_SIZE); - if (IS_ERR(wifi_pkg)) { + ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev); + if (IS_ERR(wifi_pkg) || tbl_rev > 1) { ret = PTR_ERR(wifi_pkg); goto out_free; } + mvm->geo_rev = tbl_rev; for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) { union acpi_object *entry; @@ -894,15 +895,29 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { struct iwl_geo_tx_power_profiles_resp *resp; int ret; + u16 len; + void *data; + struct iwl_geo_tx_power_profiles_cmd geo_cmd; + struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1; + struct iwl_host_cmd cmd; + + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) { + geo_cmd.ops = + cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); + len = sizeof(geo_cmd); + data = &geo_cmd; + } else { + geo_cmd_v1.ops = + cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); + len = sizeof(geo_cmd_v1); + data = &geo_cmd_v1; + } - struct iwl_geo_tx_power_profiles_cmd geo_cmd = { - .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE), - }; - struct iwl_host_cmd cmd = { + cmd = (struct iwl_host_cmd){ .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT), - .len = { sizeof(geo_cmd), }, + .len = { len, }, .flags = CMD_WANT_SKB, - .data = { &geo_cmd }, + .data = { data }, }; if (!iwl_mvm_sar_geo_support(mvm)) @@ -969,6 +984,16 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) i, j, value[1], value[2], value[0]); } } + + cmd.table_revision = cpu_to_le32(mvm->geo_rev); + + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SAR_TABLE_VER)) { + return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, + sizeof(struct iwl_geo_tx_power_profiles_cmd_v1), + &cmd); + } + return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 88af1f0ba3f0..ed8fc9a9204c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1184,6 +1184,7 @@ struct iwl_mvm { #ifdef CONFIG_ACPI struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM]; struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES]; + u32 geo_rev; #endif }; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1d9940d4e8c7..c9262ffeefe4 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; nskb = xenvif_alloc_skb(0); if (unlikely(nskb == NULL)) { + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); xenvif_tx_err(queue, &txreq, extra_count, idx); if (net_ratelimit()) @@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); kfree_skb(nskb); break; diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 1d902230ba61..be6cda89dcf5 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -815,6 +815,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map), INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), + INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), {} }; diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c index c1ca931e1fab..7a8cbfb5d213 100644 --- a/drivers/platform/x86/pcengines-apuv2.c +++ b/drivers/platform/x86/pcengines-apuv2.c @@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); MODULE_ALIAS("platform:pcengines-apuv2"); -MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME); -MODULE_SOFTDEP("pre: platform:leds-gpio"); -MODULE_SOFTDEP("pre: platform:gpio_keys_polled"); +MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled"); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 8068520cf89e..152de392f9aa 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, case IOACCEL2_SERV_RESPONSE_COMPLETE: switch (c2->error_data.status) { case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: + if (cmd) + cmd->result = 0; break; case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: cmd->result |= SAM_STAT_CHECK_CONDITION; @@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h, /* check for good status */ if (likely(c2->error_data.serv_response == 0 && - c2->error_data.status == 0)) + c2->error_data.status == 0)) { + cmd->result = 0; return hpsa_cmd_free_and_done(h, c, cmd); + } /* * Any RAID offload error results in retry which will use @@ -5638,6 +5642,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) } c = cmd_tagged_alloc(h, cmd); + /* + * This is necessary because the SML doesn't zero out this field during + * error recovery. + */ + cmd->result = 0; + /* * Call alternate submit routine for I/O accelerated commands. * Retries always go down the normal I/O path. diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 54772d4c377f..6a4c719497ca 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) ql_log(ql_log_warn, vha, 0xd049, "Failed to allocate ct_sns request.\n"); kfree(fcport); - fcport = NULL; + return NULL; } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c index 2edf3ee91300..caf4d4df4bd3 100644 --- a/drivers/staging/comedi/drivers/dt3000.c +++ b/drivers/staging/comedi/drivers/dt3000.c @@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d) static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, unsigned int flags) { - int divider, base, prescale; + unsigned int divider, base, prescale; - /* This function needs improvment */ + /* This function needs improvement */ /* Don't know if divider==0 works. */ for (prescale = 0; prescale < 16; prescale++) { @@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, divider = (*nanosec) / base; break; case CMDF_ROUND_UP: - divider = (*nanosec) / base; + divider = DIV_ROUND_UP(*nanosec, base); break; } if (divider < 65536) { @@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, } prescale = 15; - base = timer_base * (1 << prescale); + base = timer_base * (prescale + 1); divider = 65535; *nanosec = divider * base; return (prescale << 16) | (divider); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 183b41753c98..62f4fb9b362f 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1301,10 +1301,6 @@ made_compressed_probe: tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; - minor = acm_alloc_minor(acm); - if (minor < 0) - goto alloc_fail1; - ctrlsize = usb_endpoint_maxp(epctrl); readsize = usb_endpoint_maxp(epread) * (quirks == SINGLE_RX_URB ? 1 : 2); @@ -1312,6 +1308,13 @@ made_compressed_probe: acm->writesize = usb_endpoint_maxp(epwrite) * 20; acm->control = control_interface; acm->data = data_interface; + + usb_get_intf(acm->control); /* undone in destruct() */ + + minor = acm_alloc_minor(acm); + if (minor < 0) + goto alloc_fail1; + acm->minor = minor; acm->dev = usb_dev; if (h.usb_cdc_acm_descriptor) @@ -1458,7 +1461,6 @@ skip_countries: usb_driver_claim_interface(&acm_driver, data_interface, acm); usb_set_intfdata(data_interface, acm); - usb_get_intf(control_interface); tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, &control_interface->dev); if (IS_ERR(tty_dev)) { diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index 65de6f73b672..558890ada0e5 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf, intf->minor = minor; break; } - up_write(&minor_rwsem); - if (intf->minor < 0) + if (intf->minor < 0) { + up_write(&minor_rwsem); return -EXFULL; + } /* create a usb class device for this usb interface */ snprintf(name, sizeof(name), class_driver->name, minor - minor_base); @@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf, MKDEV(USB_MAJOR, minor), class_driver, "%s", kbasename(name)); if (IS_ERR(intf->usb_dev)) { - down_write(&minor_rwsem); usb_minors[minor] = NULL; intf->minor = -1; - up_write(&minor_rwsem); retval = PTR_ERR(intf->usb_dev); } + up_write(&minor_rwsem); return retval; } EXPORT_SYMBOL_GPL(usb_register_dev); @@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf, return; dev_dbg(&intf->dev, "removing %d minor\n", intf->minor); + device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); down_write(&minor_rwsem); usb_minors[intf->minor] = NULL; up_write(&minor_rwsem); - device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); intf->usb_dev = NULL; intf->minor = -1; destroy_usb_class(); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 94d22551fc1b..82e41179fb2d 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -101,11 +101,6 @@ static DEFINE_SPINLOCK(hcd_urb_unlink_lock); /* wait queue for synchronous unlinks */ DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue); -static inline int is_root_hub(struct usb_device *udev) -{ - return (udev->parent == NULL); -} - /*-------------------------------------------------------------------------*/ /* @@ -878,101 +873,6 @@ static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } - -/* - * Show & store the current value of authorized_default - */ -static ssize_t authorized_default_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct usb_device *rh_usb_dev = to_usb_device(dev); - struct usb_bus *usb_bus = rh_usb_dev->bus; - struct usb_hcd *hcd; - - hcd = bus_to_hcd(usb_bus); - return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy); -} - -static ssize_t authorized_default_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - ssize_t result; - unsigned val; - struct usb_device *rh_usb_dev = to_usb_device(dev); - struct usb_bus *usb_bus = rh_usb_dev->bus; - struct usb_hcd *hcd; - - hcd = bus_to_hcd(usb_bus); - result = sscanf(buf, "%u\n", &val); - if (result == 1) { - hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ? - val : USB_DEVICE_AUTHORIZE_ALL; - result = size; - } else { - result = -EINVAL; - } - return result; -} -static DEVICE_ATTR_RW(authorized_default); - -/* - * interface_authorized_default_show - show default authorization status - * for USB interfaces - * - * note: interface_authorized_default is the default value - * for initializing the authorized attribute of interfaces - */ -static ssize_t interface_authorized_default_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct usb_device *usb_dev = to_usb_device(dev); - struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); - - return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd)); -} - -/* - * interface_authorized_default_store - store default authorization status - * for USB interfaces - * - * note: interface_authorized_default is the default value - * for initializing the authorized attribute of interfaces - */ -static ssize_t interface_authorized_default_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct usb_device *usb_dev = to_usb_device(dev); - struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); - int rc = count; - bool val; - - if (strtobool(buf, &val) != 0) - return -EINVAL; - - if (val) - set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); - else - clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); - - return rc; -} -static DEVICE_ATTR_RW(interface_authorized_default); - -/* Group all the USB bus attributes */ -static struct attribute *usb_bus_attrs[] = { - &dev_attr_authorized_default.attr, - &dev_attr_interface_authorized_default.attr, - NULL, -}; - -static const struct attribute_group usb_bus_attr_group = { - .name = NULL, /* we want them in the same directory */ - .attrs = usb_bus_attrs, -}; - - - /*-------------------------------------------------------------------------*/ /** @@ -2895,32 +2795,11 @@ int usb_add_hcd(struct usb_hcd *hcd, if (retval != 0) goto err_register_root_hub; - retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group); - if (retval < 0) { - printk(KERN_ERR "Cannot register USB bus sysfs attributes: %d\n", - retval); - goto error_create_attr_group; - } if (hcd->uses_new_polling && HCD_POLL_RH(hcd)) usb_hcd_poll_rh_status(hcd); return retval; -error_create_attr_group: - clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); - if (HC_IS_RUNNING(hcd->state)) - hcd->state = HC_STATE_QUIESCING; - spin_lock_irq(&hcd_root_hub_lock); - hcd->rh_registered = 0; - spin_unlock_irq(&hcd_root_hub_lock); - -#ifdef CONFIG_PM - cancel_work_sync(&hcd->wakeup_work); -#endif - cancel_work_sync(&hcd->died_work); - mutex_lock(&usb_bus_idr_lock); - usb_disconnect(&rhdev); /* Sets rhdev to NULL */ - mutex_unlock(&usb_bus_idr_lock); err_register_root_hub: hcd->rh_pollable = 0; clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); @@ -2964,8 +2843,6 @@ void usb_remove_hcd(struct usb_hcd *hcd) dev_info(hcd->self.controller, "remove, state %x\n", hcd->state); usb_get_dev(rhdev); - sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group); - clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); if (HC_IS_RUNNING (hcd->state)) hcd->state = HC_STATE_QUIESCING; diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index e844bb7b5676..5adf489428aa 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -2218,14 +2218,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, (struct usb_cdc_dmm_desc *)buffer; break; case USB_CDC_MDLM_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_desc *)) + if (elength < sizeof(struct usb_cdc_mdlm_desc)) goto next_desc; if (desc) return -EINVAL; desc = (struct usb_cdc_mdlm_desc *)buffer; break; case USB_CDC_MDLM_DETAIL_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) + if (elength < sizeof(struct usb_cdc_mdlm_detail_desc)) goto next_desc; if (detail) return -EINVAL; diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 7e88fdfe3cf5..f19694e69f5c 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include "usb.h" @@ -922,6 +923,116 @@ static struct bin_attribute dev_bin_attr_descriptors = { .size = 18 + 65535, /* dev descr + max-size raw descriptor */ }; +/* + * Show & store the current value of authorized_default + */ +static ssize_t authorized_default_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_device *rh_usb_dev = to_usb_device(dev); + struct usb_bus *usb_bus = rh_usb_dev->bus; + struct usb_hcd *hcd; + + hcd = bus_to_hcd(usb_bus); + return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy); +} + +static ssize_t authorized_default_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + ssize_t result; + unsigned int val; + struct usb_device *rh_usb_dev = to_usb_device(dev); + struct usb_bus *usb_bus = rh_usb_dev->bus; + struct usb_hcd *hcd; + + hcd = bus_to_hcd(usb_bus); + result = sscanf(buf, "%u\n", &val); + if (result == 1) { + hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ? + val : USB_DEVICE_AUTHORIZE_ALL; + result = size; + } else { + result = -EINVAL; + } + return result; +} +static DEVICE_ATTR_RW(authorized_default); + +/* + * interface_authorized_default_show - show default authorization status + * for USB interfaces + * + * note: interface_authorized_default is the default value + * for initializing the authorized attribute of interfaces + */ +static ssize_t interface_authorized_default_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_device *usb_dev = to_usb_device(dev); + struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); + + return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd)); +} + +/* + * interface_authorized_default_store - store default authorization status + * for USB interfaces + * + * note: interface_authorized_default is the default value + * for initializing the authorized attribute of interfaces + */ +static ssize_t interface_authorized_default_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_device *usb_dev = to_usb_device(dev); + struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus); + int rc = count; + bool val; + + if (strtobool(buf, &val) != 0) + return -EINVAL; + + if (val) + set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); + else + clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags); + + return rc; +} +static DEVICE_ATTR_RW(interface_authorized_default); + +/* Group all the USB bus attributes */ +static struct attribute *usb_bus_attrs[] = { + &dev_attr_authorized_default.attr, + &dev_attr_interface_authorized_default.attr, + NULL, +}; + +static const struct attribute_group usb_bus_attr_group = { + .name = NULL, /* we want them in the same directory */ + .attrs = usb_bus_attrs, +}; + + +static int add_default_authorized_attributes(struct device *dev) +{ + int rc = 0; + + if (is_usb_device(dev)) + rc = sysfs_create_group(&dev->kobj, &usb_bus_attr_group); + + return rc; +} + +static void remove_default_authorized_attributes(struct device *dev) +{ + if (is_usb_device(dev)) { + sysfs_remove_group(&dev->kobj, &usb_bus_attr_group); + } +} + int usb_create_sysfs_dev_files(struct usb_device *udev) { struct device *dev = &udev->dev; @@ -938,7 +1049,14 @@ int usb_create_sysfs_dev_files(struct usb_device *udev) retval = add_power_attributes(dev); if (retval) goto error; + + if (is_root_hub(udev)) { + retval = add_default_authorized_attributes(dev); + if (retval) + goto error; + } return retval; + error: usb_remove_sysfs_dev_files(udev); return retval; @@ -948,6 +1066,9 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev) { struct device *dev = &udev->dev; + if (is_root_hub(udev)) + remove_default_authorized_attributes(dev); + remove_power_attributes(dev); remove_persist_attributes(dev); device_remove_bin_file(dev, &dev_bin_attr_descriptors); diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index d95a5358f73d..d5ac492f441b 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -153,6 +153,11 @@ static inline int is_usb_port(const struct device *dev) return dev->type == &usb_port_device_type; } +static inline int is_root_hub(struct usb_device *udev) +{ + return (udev->parent == NULL); +} + /* Do the same for device drivers and interface drivers. */ static inline int is_usb_device_driver(struct device_driver *drv) diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 7dc248546fd4..b6eec81b6a40 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, if (usb3->forced_b_device) return -EBUSY; - if (!strncmp(buf, "host", strlen("host"))) + if (sysfs_streq(buf, "host")) new_mode_is_host = true; - else if (!strncmp(buf, "peripheral", strlen("peripheral"))) + else if (sysfs_streq(buf, "peripheral")) new_mode_is_host = false; else return -EINVAL; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c1582fbd1150..38e920ac7f82 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) }, + /* Motorola devices */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */ + { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, @@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ .driver_info = RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, @@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */ + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */ + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index 73427d8e0116..e5694133ebe5 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c @@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value, { int err; u16 old_value; - pci_power_t new_state, old_state; + pci_power_t new_state; err = pci_read_config_word(dev, offset, &old_value); if (err) goto out; - old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK); new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK); new_value &= PM_OK_BITS; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 982152d3f920..69f8ab4d91f2 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1488,7 +1488,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr) goto out; } - trans = btrfs_attach_transaction(root); + trans = btrfs_join_transaction_nostart(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { ret = PTR_ERR(trans); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 1aa3f6d6d775..2db14fdd6bff 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | __TRANS_ATTACH | - __TRANS_JOIN), + __TRANS_JOIN | + __TRANS_JOIN_NOSTART), [TRANS_STATE_UNBLOCKED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | - __TRANS_JOIN_NOLOCK), + __TRANS_JOIN_NOLOCK | + __TRANS_JOIN_NOSTART), [TRANS_STATE_COMPLETED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | - __TRANS_JOIN_NOLOCK), + __TRANS_JOIN_NOLOCK | + __TRANS_JOIN_NOSTART), }; void btrfs_put_transaction(struct btrfs_transaction *transaction) @@ -525,7 +528,8 @@ again: ret = join_transaction(fs_info, type); if (ret == -EBUSY) { wait_current_trans(fs_info); - if (unlikely(type == TRANS_ATTACH)) + if (unlikely(type == TRANS_ATTACH || + type == TRANS_JOIN_NOSTART)) ret = -ENOENT; } } while (ret == -EBUSY); @@ -641,6 +645,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root BTRFS_RESERVE_NO_FLUSH, true); } +/* + * Similar to regular join but it never starts a transaction when none is + * running or after waiting for the current one to finish. + */ +struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root) +{ + return start_transaction(root, 0, TRANS_JOIN_NOSTART, + BTRFS_RESERVE_NO_FLUSH, true); +} + /* * btrfs_attach_transaction() - catch the running transaction * diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 78c446c222b7..2f695587f828 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -94,11 +94,13 @@ struct btrfs_transaction { #define __TRANS_JOIN (1U << 11) #define __TRANS_JOIN_NOLOCK (1U << 12) #define __TRANS_DUMMY (1U << 13) +#define __TRANS_JOIN_NOSTART (1U << 14) #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE) #define TRANS_ATTACH (__TRANS_ATTACH) #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE) #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK) +#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART) #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH) @@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( int min_factor); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_attach_transaction_barrier( struct btrfs_root *root); diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 963fb4571fd9..bb6fd5a506d3 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -794,6 +794,29 @@ static int move_data_block(struct inode *inode, block_t bidx, if (lfs_mode) down_write(&fio.sbi->io_order_lock); + mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), + fio.old_blkaddr, false); + if (!mpage) + goto up_out; + + fio.encrypted_page = mpage; + + /* read source block in mpage */ + if (!PageUptodate(mpage)) { + err = f2fs_submit_page_bio(&fio); + if (err) { + f2fs_put_page(mpage, 1); + goto up_out; + } + lock_page(mpage); + if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || + !PageUptodate(mpage))) { + err = -EIO; + f2fs_put_page(mpage, 1); + goto up_out; + } + } + f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, &sum, CURSEG_COLD_DATA, NULL, false); @@ -801,44 +824,18 @@ static int move_data_block(struct inode *inode, block_t bidx, newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); if (!fio.encrypted_page) { err = -ENOMEM; - goto recover_block; - } - - mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), - fio.old_blkaddr, FGP_LOCK, GFP_NOFS); - if (mpage) { - bool updated = false; - - if (PageUptodate(mpage)) { - memcpy(page_address(fio.encrypted_page), - page_address(mpage), PAGE_SIZE); - updated = true; - } f2fs_put_page(mpage, 1); - invalidate_mapping_pages(META_MAPPING(fio.sbi), - fio.old_blkaddr, fio.old_blkaddr); - if (updated) - goto write_page; - } - - err = f2fs_submit_page_bio(&fio); - if (err) - goto put_page_out; - - /* write page */ - lock_page(fio.encrypted_page); - - if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) { - err = -EIO; - goto put_page_out; - } - if (unlikely(!PageUptodate(fio.encrypted_page))) { - err = -EIO; - goto put_page_out; + goto recover_block; } -write_page: + /* write target block */ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); + memcpy(page_address(fio.encrypted_page), + page_address(mpage), PAGE_SIZE); + f2fs_put_page(mpage, 1); + invalidate_mapping_pages(META_MAPPING(fio.sbi), + fio.old_blkaddr, fio.old_blkaddr); + set_page_dirty(fio.encrypted_page); if (clear_page_dirty_for_io(fio.encrypted_page)) dec_page_count(fio.sbi, F2FS_DIRTY_META); @@ -869,11 +866,12 @@ write_page: put_page_out: f2fs_put_page(fio.encrypted_page, 1); recover_block: - if (lfs_mode) - up_write(&fio.sbi->io_order_lock); if (err) f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, true, true); +up_out: + if (lfs_mode) + up_write(&fio.sbi->io_order_lock); put_out: f2fs_put_dnode(&dn); out: diff --git a/fs/io_uring.c b/fs/io_uring.c index 3e887a09533b..61018559e8fe 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1032,10 +1032,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, iter->bvec = bvec + seg_skip; iter->nr_segs -= seg_skip; - iter->count -= (seg_skip << PAGE_SHIFT); + iter->count -= bvec->bv_len + offset; iter->iov_offset = offset & ~PAGE_MASK; - if (iter->iov_offset) - iter->count -= iter->iov_offset; } } diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 385f3aaa2448..90c830e3758e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); int low_bucket = 0, bucket, high_bucket; struct ocfs2_xattr_bucket *search; - u32 last_hash; u64 blkno, lower_blkno = 0; search = ocfs2_xattr_bucket_new(inode); @@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode, if (xh->xh_count) xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1]; - last_hash = le32_to_cpu(xe->xe_name_hash); - /* record lower_blkno which may be the insert place. */ lower_blkno = blkno; diff --git a/fs/seq_file.c b/fs/seq_file.c index abe27ec43176..225bf9239b32 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset) } if (seq_has_overflowed(m)) goto Eoverflow; + p = m->op->next(m, p, &m->index); if (pos + m->count > offset) { m->from = offset - pos; m->count -= m->from; @@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset) } pos += m->count; m->count = 0; - p = m->op->next(m, p, &m->index); if (pos == offset) break; } diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h index c64bea7a52be..e9f20b813a69 100644 --- a/include/asm-generic/getorder.h +++ b/include/asm-generic/getorder.h @@ -7,24 +7,6 @@ #include #include -/* - * Runtime evaluation of get_order() - */ -static inline __attribute_const__ -int __get_order(unsigned long size) -{ - int order; - - size--; - size >>= PAGE_SHIFT; -#if BITS_PER_LONG == 32 - order = fls(size); -#else - order = fls64(size); -#endif - return order; -} - /** * get_order - Determine the allocation order of a memory size * @size: The size for which to get the order @@ -43,19 +25,27 @@ int __get_order(unsigned long size) * to hold an object of the specified size. * * The result is undefined if the size is 0. - * - * This function may be used to initialise variables with compile time - * evaluations of constants. */ -#define get_order(n) \ -( \ - __builtin_constant_p(n) ? ( \ - ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \ - (((n) < (1UL << PAGE_SHIFT)) ? 0 : \ - ilog2((n) - 1) - PAGE_SHIFT + 1) \ - ) : \ - __get_order(n) \ -) +static inline __attribute_const__ int get_order(unsigned long size) +{ + if (__builtin_constant_p(size)) { + if (!size) + return BITS_PER_LONG - PAGE_SHIFT; + + if (size < (1UL << PAGE_SHIFT)) + return 0; + + return ilog2((size) - 1) - PAGE_SHIFT + 1; + } + + size--; + size >>= PAGE_SHIFT; +#if BITS_PER_LONG == 32 + return fls(size); +#else + return fls64(size); +#endif +} #endif /* __ASSEMBLY__ */ diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 1dda31825ec4..71283739ffd2 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -32,6 +32,7 @@ #endif /* CONFIG_SPARSEMEM */ +#ifndef BUILD_VDSO32_64 /* * page->flags layout: * @@ -76,20 +77,22 @@ #define LAST_CPUPID_SHIFT 0 #endif -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#ifdef CONFIG_KASAN_SW_TAGS +#define KASAN_TAG_WIDTH 8 +#else +#define KASAN_TAG_WIDTH 0 +#endif + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \ + <= BITS_PER_LONG - NR_PAGEFLAGS #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #else #define LAST_CPUPID_WIDTH 0 #endif -#ifdef CONFIG_KASAN_SW_TAGS -#define KASAN_TAG_WIDTH 8 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ > BITS_PER_LONG - NR_PAGEFLAGS -#error "KASAN: not enough bits in page flags for tag" -#endif -#else -#define KASAN_TAG_WIDTH 0 +#error "Not enough bits in page flags" #endif /* @@ -104,4 +107,5 @@ #define LAST_CPUPID_NOT_IN_PAGE_FLAGS #endif +#endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 056f557d5194..64fa59b2c8d5 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1358,6 +1358,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; +static inline void skb_copy_decrypted(struct sk_buff *to, + const struct sk_buff *from) +{ +#ifdef CONFIG_TLS_DEVICE + to->decrypted = from->decrypted; +#endif +} + #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { diff --git a/include/linux/socket.h b/include/linux/socket.h index b57cd8bf96e2..810d5ec0ada3 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -291,6 +291,9 @@ struct ucred { #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN #define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ +#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry + * plain text and require encryption + */ #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ diff --git a/include/net/netlink.h b/include/net/netlink.h index 395b4406f4b0..222af2046086 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -680,9 +680,8 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, const struct nla_policy *policy, struct netlink_ext_ack *extack) { - return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), - nlmsg_attrlen(nlh, hdrlen), policy, - NL_VALIDATE_STRICT, extack); + return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy, + NL_VALIDATE_STRICT, extack); } /** diff --git a/include/net/sock.h b/include/net/sock.h index 6cbc16136357..526de911cd91 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2482,6 +2482,7 @@ static inline bool sk_fullsock(const struct sock *sk) /* Checks if this SKB belongs to an HW offloaded socket * and whether any SW fallbacks are required based on dev. + * Check decrypted mark in case skb_orphan() cleared socket. */ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) @@ -2489,8 +2490,15 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, #ifdef CONFIG_SOCK_VALIDATE_XMIT struct sock *sk = skb->sk; - if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) + if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { skb = sk->sk_validate_xmit_skb(sk, dev, skb); +#ifdef CONFIG_TLS_DEVICE + } else if (unlikely(skb->decrypted)) { + pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n"); + kfree_skb(skb); + skb = NULL; +#endif + } #endif return skb; diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h index 2212adda8f77..64e92d56c6a8 100644 --- a/include/trace/events/dma_fence.h +++ b/include/trace/events/dma_fence.h @@ -2,7 +2,7 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM dma_fence -#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_DMA_FENCE_H #include diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h index f3a12566bed0..6678cf8b235b 100644 --- a/include/trace/events/napi.h +++ b/include/trace/events/napi.h @@ -3,7 +3,7 @@ #define TRACE_SYSTEM napi #if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_NAPI_H_ +#define _TRACE_NAPI_H #include #include @@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll, #undef NO_DEV -#endif /* _TRACE_NAPI_H_ */ +#endif /* _TRACE_NAPI_H */ /* This part must be outside protection */ #include diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 60d0d8bd336d..0d1a9ebf55ba 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -2,7 +2,7 @@ #define TRACE_SYSTEM qdisc #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_QDISC_H_ +#define _TRACE_QDISC_H #include #include @@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); -#endif /* _TRACE_QDISC_H_ */ +#endif /* _TRACE_QDISC_H */ /* This part must be outside protection */ #include diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h index 0818f6286110..971cd02d2daf 100644 --- a/include/trace/events/tegra_apb_dma.h +++ b/include/trace/events/tegra_apb_dma.h @@ -1,5 +1,5 @@ #if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_TEGRA_APM_DMA_H +#define _TRACE_TEGRA_APB_DMA_H #include #include @@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr, TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq) ); -#endif /* _TRACE_TEGRADMA_H */ +#endif /* _TRACE_TEGRA_APB_DMA_H */ /* This part must be outside protection */ #include diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index f7afdadb6770..3401382bbca2 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, int ret; if (!dev_is_dma_coherent(dev)) { + unsigned long pfn; + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) return -ENXIO; - page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr, - dma_addr)); + /* If the PFN is not valid, we do not have a struct page */ + pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); + if (!pfn_valid(pfn)) + return -ENXIO; + page = pfn_to_page(pfn); } else { page = virt_to_page(cpu_addr); } @@ -170,7 +175,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, if (!dev_is_dma_coherent(dev)) { if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) return -ENXIO; + + /* If the PFN is not valid, we do not have a struct page */ pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); + if (!pfn_valid(pfn)) + return -ENXIO; } else { pfn = page_to_pfn(virt_to_page(cpu_addr)); } diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 962cf343f798..ae3ec77bb92f 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -40,6 +40,7 @@ struct sugov_policy { struct task_struct *thread; bool work_in_progress; + bool limits_changed; bool need_freq_update; }; @@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) !cpufreq_this_cpu_can_update(sg_policy->policy)) return false; - if (unlikely(sg_policy->need_freq_update)) + if (unlikely(sg_policy->limits_changed)) { + sg_policy->limits_changed = false; + sg_policy->need_freq_update = true; return true; + } delta_ns = time - sg_policy->last_freq_update_time; @@ -427,7 +431,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) { if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) - sg_policy->need_freq_update = true; + sg_policy->limits_changed = true; } static void sugov_update_single(struct update_util_data *hook, u64 time, @@ -447,7 +451,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, if (!sugov_should_update_freq(sg_policy, time)) return; - busy = sugov_cpu_is_busy(sg_cpu); + /* Limits may have changed, don't skip frequency update */ + busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu); util = sugov_get_util(sg_cpu); max = sg_cpu->max; @@ -821,6 +826,7 @@ static int sugov_start(struct cpufreq_policy *policy) sg_policy->last_freq_update_time = 0; sg_policy->next_freq = 0; sg_policy->work_in_progress = false; + sg_policy->limits_changed = false; sg_policy->need_freq_update = false; sg_policy->cached_raw_freq = 0; @@ -869,7 +875,7 @@ static void sugov_limits(struct cpufreq_policy *policy) mutex_unlock(&sg_policy->work_lock); } - sg_policy->need_freq_update = true; + sg_policy->limits_changed = true; } struct cpufreq_governor schedutil_gov = { diff --git a/mm/hmm.c b/mm/hmm.c index 4c405dfbd2b3..27dd9a881627 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -995,7 +995,7 @@ EXPORT_SYMBOL(hmm_range_unregister); * @range: range * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid * permission (for instance asking for write and range is read only), - * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid + * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid * vma or it is illegal to access that range), number of valid pages * in range->pfns[] (from range start address). * @@ -1019,7 +1019,7 @@ long hmm_range_snapshot(struct hmm_range *range) do { /* If range is no longer valid force retry. */ if (!range->valid) - return -EAGAIN; + return -EBUSY; vma = find_vma(hmm->mm, start); if (vma == NULL || (vma->vm_flags & device_vma)) @@ -1117,10 +1117,8 @@ long hmm_range_fault(struct hmm_range *range, bool block) do { /* If range is no longer valid force retry. */ - if (!range->valid) { - up_read(&hmm->mm->mmap_sem); - return -EAGAIN; - } + if (!range->valid) + return -EBUSY; vma = find_vma(hmm->mm, start); if (vma == NULL || (vma->vm_flags & device_vma)) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 3e147ea83182..3afb01bce736 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -114,7 +114,7 @@ /* GFP bitmask for kmemleak internal allocations */ #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ __GFP_NORETRY | __GFP_NOMEMALLOC | \ - __GFP_NOWARN | __GFP_NOFAIL) + __GFP_NOWARN) /* scanning area inside a memory block */ struct kmemleak_scan_area { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 902d020aa70e..8f5dabfaf94d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1126,26 +1126,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root, css_put(&prev->css); } -static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) +static void __invalidate_reclaim_iterators(struct mem_cgroup *from, + struct mem_cgroup *dead_memcg) { - struct mem_cgroup *memcg = dead_memcg; struct mem_cgroup_reclaim_iter *iter; struct mem_cgroup_per_node *mz; int nid; int i; - for (; memcg; memcg = parent_mem_cgroup(memcg)) { - for_each_node(nid) { - mz = mem_cgroup_nodeinfo(memcg, nid); - for (i = 0; i <= DEF_PRIORITY; i++) { - iter = &mz->iter[i]; - cmpxchg(&iter->position, - dead_memcg, NULL); - } + for_each_node(nid) { + mz = mem_cgroup_nodeinfo(from, nid); + for (i = 0; i <= DEF_PRIORITY; i++) { + iter = &mz->iter[i]; + cmpxchg(&iter->position, + dead_memcg, NULL); } } } +static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) +{ + struct mem_cgroup *memcg = dead_memcg; + struct mem_cgroup *last; + + do { + __invalidate_reclaim_iterators(memcg, dead_memcg); + last = memcg; + } while ((memcg = parent_mem_cgroup(memcg))); + + /* + * When cgruop1 non-hierarchy mode is used, + * parent_mem_cgroup() does not walk all the way up to the + * cgroup root (root_mem_cgroup). So we have to handle + * dead_memcg from cgroup root separately. + */ + if (last != root_mem_cgroup) + __invalidate_reclaim_iterators(root_mem_cgroup, + dead_memcg); +} + /** * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy * @memcg: hierarchy root diff --git a/mm/mempolicy.c b/mm/mempolicy.c index fdcb73536319..ca3f443c8fc1 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { }, }; -static void migrate_page_add(struct page *page, struct list_head *pagelist, +static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags); struct queue_pages { @@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page, } /* - * queue_pages_pmd() has three possible return values: - * 1 - pages are placed on the right node or queued successfully. - * 0 - THP was split. - * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing - * page was already on a node that does not follow the policy. + * queue_pages_pmd() has four possible return values: + * 0 - pages are placed on the right node or queued successfully. + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were + * specified. + * 2 - THP was split. + * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an + * existing page was already on a node that does not follow the + * policy. */ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, if (is_huge_zero_page(page)) { spin_unlock(ptl); __split_huge_pmd(walk->vma, pmd, addr, false, NULL); + ret = 2; goto out; } - if (!queue_pages_required(page, qp)) { - ret = 1; + if (!queue_pages_required(page, qp)) goto unlock; - } - ret = 1; flags = qp->flags; /* go to thp migration */ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { - if (!vma_migratable(walk->vma)) { - ret = -EIO; + if (!vma_migratable(walk->vma) || + migrate_page_add(page, qp->pagelist, flags)) { + ret = 1; goto unlock; } - - migrate_page_add(page, qp->pagelist, flags); } else ret = -EIO; unlock: @@ -479,6 +479,13 @@ out: /* * Scan through pages checking if pages follow certain conditions, * and move them to the pagelist if they do. + * + * queue_pages_pte_range() has three possible return values: + * 0 - pages are placed on the right node or queued successfully. + * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were + * specified. + * -EIO - only MPOL_MF_STRICT was specified and an existing page was already + * on a node that does not follow the policy. */ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, struct queue_pages *qp = walk->private; unsigned long flags = qp->flags; int ret; + bool has_unmovable = false; pte_t *pte; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { ret = queue_pages_pmd(pmd, ptl, addr, end, walk); - if (ret > 0) - return 0; - else if (ret < 0) + if (ret != 2) return ret; } + /* THP was split, fall through to pte walk */ if (pmd_trans_unstable(pmd)) return 0; @@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, if (!queue_pages_required(page, qp)) continue; if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { - if (!vma_migratable(vma)) + /* MPOL_MF_STRICT must be specified if we get here */ + if (!vma_migratable(vma)) { + has_unmovable = true; break; - migrate_page_add(page, qp->pagelist, flags); + } + + /* + * Do not abort immediately since there may be + * temporary off LRU pages in the range. Still + * need migrate other LRU pages. + */ + if (migrate_page_add(page, qp->pagelist, flags)) + has_unmovable = true; } else break; } pte_unmap_unlock(pte - 1, ptl); cond_resched(); + + if (has_unmovable) + return 1; + return addr != end ? -EIO : 0; } @@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, * * If pages found in a given range are on a set of nodes (determined by * @nodes and @flags,) it's isolated and queued to the pagelist which is - * passed via @private.) + * passed via @private. + * + * queue_pages_range() has three possible return values: + * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were + * specified. + * 0 - queue pages successfully or no misplaced page. + * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified. */ static int queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, @@ -940,7 +967,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, /* * page migration, thp tail pages can be passed. */ -static void migrate_page_add(struct page *page, struct list_head *pagelist, +static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { struct page *head = compound_head(page); @@ -953,8 +980,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_cache(head), hpage_nr_pages(head)); + } else if (flags & MPOL_MF_STRICT) { + /* + * Non-movable page may reach here. And, there may be + * temporary off LRU pages or non-LRU movable pages. + * Treat them as unmovable pages since they can't be + * isolated, so they can't be moved at the moment. It + * should return -EIO for this case too. + */ + return -EIO; } } + + return 0; } /* page allocation callback for NUMA node migration */ @@ -1157,9 +1195,10 @@ static struct page *new_page(struct page *page, unsigned long start) } #else -static void migrate_page_add(struct page *page, struct list_head *pagelist, +static int migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { + return -EIO; } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, @@ -1182,6 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len, struct mempolicy *new; unsigned long end; int err; + int ret; LIST_HEAD(pagelist); if (flags & ~(unsigned long)MPOL_MF_VALID) @@ -1243,10 +1283,15 @@ static long do_mbind(unsigned long start, unsigned long len, if (err) goto mpol_out; - err = queue_pages_range(mm, start, end, nmask, + ret = queue_pages_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); - if (!err) - err = mbind_range(mm, start, end, new); + + if (ret < 0) { + err = -EIO; + goto up_out; + } + + err = mbind_range(mm, start, end, new); if (!err) { int nr_failed = 0; @@ -1259,13 +1304,14 @@ static long do_mbind(unsigned long start, unsigned long len, putback_movable_pages(&pagelist); } - if (nr_failed && (flags & MPOL_MF_STRICT)) + if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) err = -EIO; } else putback_movable_pages(&pagelist); +up_out: up_write(&mm->mmap_sem); - mpol_out: +mpol_out: mpol_put(new); return err; } diff --git a/mm/rmap.c b/mm/rmap.c index e5dfe2ae6b0d..003377e24232 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1475,7 +1475,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, /* * No need to invalidate here it will synchronize on * against the special swap migration pte. + * + * The assignment to subpage above was computed from a + * swap PTE which results in an invalid pointer. + * Since only PAGE_SIZE pages can currently be + * migrated, just set it to page. This will need to be + * changed when hugepage migrations to device private + * memory are supported. */ + subpage = page; goto discard; } diff --git a/mm/usercopy.c b/mm/usercopy.c index 2a09796edef8..98e924864554 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -147,7 +147,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n, bool to_user) { /* Reject if object wraps past end of memory. */ - if (ptr + n < ptr) + if (ptr + (n - 1) < ptr) usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); /* Reject if NULL or ZERO-allocation. */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 4ebf20152328..c8f58f5695a9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -88,9 +88,6 @@ struct scan_control { /* Can pages be swapped as part of reclaim? */ unsigned int may_swap:1; - /* e.g. boosted watermark reclaim leaves slabs alone */ - unsigned int may_shrinkslab:1; - /* * Cgroups are not reclaimed below their configured memory.low, * unless we threaten to OOM. If any cgroups are skipped due to @@ -2669,10 +2666,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) shrink_node_memcg(pgdat, memcg, sc, &lru_pages); node_lru_pages += lru_pages; - if (sc->may_shrinkslab) { - shrink_slab(sc->gfp_mask, pgdat->node_id, - memcg, sc->priority); - } + shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, + sc->priority); /* Record the group's reclaim efficiency */ vmpressure(sc->gfp_mask, memcg, false, @@ -3149,7 +3144,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, - .may_shrinkslab = 1, }; /* @@ -3191,7 +3185,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, .may_unmap = 1, .reclaim_idx = MAX_NR_ZONES - 1, .may_swap = !noswap, - .may_shrinkslab = 1, }; unsigned long lru_pages; @@ -3236,7 +3229,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = may_swap, - .may_shrinkslab = 1, }; /* @@ -3545,7 +3537,6 @@ restart: */ sc.may_writepage = !laptop_mode && !nr_boost_reclaim; sc.may_swap = !nr_boost_reclaim; - sc.may_shrinkslab = !nr_boost_reclaim; /* * Do some background aging of the anon list, to give diff --git a/mm/z3fold.c b/mm/z3fold.c index 3b27094dc42e..c4debbe683eb 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -819,9 +819,19 @@ out: static void z3fold_destroy_pool(struct z3fold_pool *pool) { kmem_cache_destroy(pool->c_handle); - z3fold_unregister_migration(pool); - destroy_workqueue(pool->release_wq); + + /* + * We need to destroy pool->compact_wq before pool->release_wq, + * as any pending work on pool->compact_wq will call + * queue_work(pool->release_wq, &pool->work). + * + * There are still outstanding pages until both workqueues are drained, + * so we cannot unregister migration until then. + */ + destroy_workqueue(pool->compact_wq); + destroy_workqueue(pool->release_wq); + z3fold_unregister_migration(pool); kfree(pool); } diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 963dfdc14827..1fa9ac483173 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1770,20 +1770,28 @@ static int compat_calc_entry(const struct ebt_entry *e, return 0; } +static int ebt_compat_init_offsets(unsigned int number) +{ + if (number > INT_MAX) + return -EINVAL; + + /* also count the base chain policies */ + number += NF_BR_NUMHOOKS; + + return xt_compat_init_offsets(NFPROTO_BRIDGE, number); +} static int compat_table_info(const struct ebt_table_info *info, struct compat_ebt_replace *newinfo) { unsigned int size = info->entries_size; const void *entries = info->entries; + int ret; newinfo->entries_size = size; - if (info->nentries) { - int ret = xt_compat_init_offsets(NFPROTO_BRIDGE, - info->nentries); - if (ret) - return ret; - } + ret = ebt_compat_init_offsets(info->nentries); + if (ret) + return ret; return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries, newinfo); @@ -2234,11 +2242,9 @@ static int compat_do_replace(struct net *net, void __user *user, xt_compat_lock(NFPROTO_BRIDGE); - if (tmp.nentries) { - ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); - if (ret < 0) - goto out_unlock; - } + ret = ebt_compat_init_offsets(tmp.nentries); + if (ret < 0) + goto out_unlock; ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (ret < 0) diff --git a/net/core/filter.c b/net/core/filter.c index f681fb772940..534c310bb089 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7325,12 +7325,12 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, gso_segs): /* si->dst_reg = skb_shinfo(SKB); */ #ifdef NET_SKBUFF_DATA_USES_OFFSET - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), - si->dst_reg, si->src_reg, - offsetof(struct sk_buff, head)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), BPF_REG_AX, si->src_reg, offsetof(struct sk_buff, end)); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, head)); *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); #else *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), diff --git a/net/core/sock.c b/net/core/sock.c index aa4a00d381e3..df7b38b60164 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1988,6 +1988,19 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) } EXPORT_SYMBOL(skb_set_owner_w); +static bool can_skb_orphan_partial(const struct sk_buff *skb) +{ +#ifdef CONFIG_TLS_DEVICE + /* Drivers depend on in-order delivery for crypto offload, + * partial orphan breaks out-of-order-OK logic. + */ + if (skb->decrypted) + return false; +#endif + return (skb->destructor == sock_wfree || + (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); +} + /* This helper is used by netem, as it can hold packets in its * delay queue. We want to allow the owner socket to send more * packets, as if they were already TX completed by a typical driver. @@ -1999,11 +2012,7 @@ void skb_orphan_partial(struct sk_buff *skb) if (skb_is_tcp_pure_ack(skb)) return; - if (skb->destructor == sock_wfree -#ifdef CONFIG_INET - || skb->destructor == tcp_wfree -#endif - ) { + if (can_skb_orphan_partial(skb)) { struct sock *sk = skb->sk; if (refcount_inc_not_zero(&sk->sk_refcnt)) { diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 4ec5b7f85d51..09d9286b27cc 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -153,6 +153,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds, { int port; + if (!ds->ops->port_mdb_add) + return; + for_each_set_bit(port, bitmap, ds->num_ports) ds->ops->port_mdb_add(ds, port, mdb); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5264f064a87e..b30f7f877181 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -984,6 +984,9 @@ new_segment: if (!skb) goto wait_for_memory; +#ifdef CONFIG_TLS_DEVICE + skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); +#endif skb_entail(sk, skb); copy = size_goal; } diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 3d1e15401384..8a56e09cfb0e 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -398,10 +398,14 @@ more_data: static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_msg tmp, *msg_tx = NULL; - int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS; int copied = 0, err = 0; struct sk_psock *psock; long timeo; + int flags; + + /* Don't let internal do_tcp_sendpages() flags through */ + flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); + flags |= MSG_NO_SHARED_FRAGS; psock = sk_psock_get(sk); if (unlikely(!psock)) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7d0be046cbc1..359d298348c7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1318,6 +1318,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, buff = sk_stream_alloc_skb(sk, nsize, gfp, true); if (!buff) return -ENOMEM; /* We'll just try again later. */ + skb_copy_decrypted(buff, skb); sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); @@ -1872,6 +1873,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, buff = sk_stream_alloc_skb(sk, 0, gfp, true); if (unlikely(!buff)) return -ENOMEM; + skb_copy_decrypted(buff, skb); sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); @@ -2141,6 +2143,7 @@ static int tcp_mtu_probe(struct sock *sk) sk_mem_charge(sk, nskb->truesize); skb = tcp_send_head(sk); + skb_copy_decrypted(nskb, skb); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f4f9b8344a32..e343a030ec26 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -453,13 +453,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); * table location, we assume id gets exposed to userspace. * * Following nf_conn items do not change throughout lifetime - * of the nf_conn after it has been committed to main hash table: + * of the nf_conn: * * 1. nf_conn address - * 2. nf_conn->ext address - * 3. nf_conn->master address (normally NULL) - * 4. tuple - * 5. the associated net namespace + * 2. nf_conn->master address (normally NULL) + * 3. the associated net namespace + * 4. the original direction tuple */ u32 nf_ct_get_id(const struct nf_conn *ct) { @@ -469,9 +468,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct) net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); a = (unsigned long)ct; - b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct)); - c = (unsigned long)ct->ext; - d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash), + b = (unsigned long)ct->master; + c = (unsigned long)nf_ct_net(ct); + d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, + sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple), &ct_id_seed); #ifdef CONFIG_64BIT return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5f78df080573..bad144dfabc5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2607,6 +2607,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) mutex_lock(&po->pg_vec_lock); + /* packet_sendmsg() check on tx_ring.pg_vec was lockless, + * we need to confirm it under protection of pg_vec_lock. + */ + if (unlikely(!po->tx_ring.pg_vec)) { + err = -EBUSY; + goto out; + } if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index b100870f02a6..37dced00b63d 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -307,6 +307,17 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } +static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) +{ + return nla_total_size(sizeof(struct tc_skbedit)) + + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */ + + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */ + + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */ + + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */ + + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */ + + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */ +} + static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .id = TCA_ID_SKBEDIT, @@ -316,6 +327,7 @@ static struct tc_action_ops act_skbedit_ops = { .init = tcf_skbedit_init, .cleanup = tcf_skbedit_cleanup, .walk = tcf_skbedit_walker, + .get_fill_size = tcf_skbedit_get_fill_size, .lookup = tcf_skbedit_search, .size = sizeof(struct tcf_skbedit), }; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 9ecfb8f5902a..8be89aa52b6e 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -849,7 +849,8 @@ unlock: spin_unlock_bh(qdisc_lock(sch)); free_sched: - kfree(new_admin); + if (new_admin) + call_rcu(&new_admin->rcu, taprio_free_sched_cb); return err; } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index a554d6d15d1b..1cf5bb5b73c4 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -546,7 +546,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands, */ if (net->sctp.pf_enable && (transport->state == SCTP_ACTIVE) && - (asoc->pf_retrans < transport->pathmaxrxt) && + (transport->error_count < transport->pathmaxrxt) && (transport->error_count > asoc->pf_retrans)) { sctp_assoc_control_transport(asoc, transport, diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 25946604af85..e83cdaa2ab76 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -316,6 +316,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc, nstr_list[i] = htons(str_list[i]); if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) { + kfree(nstr_list); retval = -EAGAIN; goto out; } diff --git a/net/tipc/addr.c b/net/tipc/addr.c index b88d48d00913..0f1eaed1bd1b 100644 --- a/net/tipc/addr.c +++ b/net/tipc/addr.c @@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr) tipc_set_node_id(net, node_id); } tn->trial_addr = addr; + tn->addr_trial_end = jiffies; pr_info("32-bit node address hash set to %x\n", addr); } diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index eb8f24f420f0..4cfcce211c2f 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -342,9 +342,9 @@ static int tls_push_data(struct sock *sk, struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); - int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); struct tls_record_info *record = ctx->open_record; + int tls_push_record_flags; struct page_frag *pfrag; size_t orig_size = size; u32 max_open_record_len; @@ -359,6 +359,9 @@ static int tls_push_data(struct sock *sk, if (sk->sk_err) return -sk->sk_err; + flags |= MSG_SENDPAGE_DECRYPTED; + tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); if (tls_is_partially_sent_record(tls_ctx)) { rc = tls_push_partial_record(sk, tls_ctx, flags); @@ -545,7 +548,9 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx) gfp_t sk_allocation = sk->sk_allocation; sk->sk_allocation = GFP_ATOMIC; - tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL); + tls_push_partial_record(sk, ctx, + MSG_DONTWAIT | MSG_NOSIGNAL | + MSG_SENDPAGE_DECRYPTED); sk->sk_allocation = sk_allocation; } } diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include index 8a5c4d645eb1..4bbf4fc163a2 100644 --- a/scripts/Kconfig.include +++ b/scripts/Kconfig.include @@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y) # $(cc-option,) # Return y if the compiler supports , n otherwise -cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null) +cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null) # $(ld-option,) # Return y if the linker supports , n otherwise diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 38d77353c66a..cea276955147 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -75,7 +75,7 @@ modpost = scripts/mod/modpost \ $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \ $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \ $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \ - $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \ + $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \ $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \ $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \ $(if $(KBUILD_MODPOST_WARN),-w) diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 9a94672e7adc..ade699131065 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -1228,24 +1228,11 @@ hashalg_fail: static int __init init_digests(void) { - u8 digest[TPM_MAX_DIGEST_SIZE]; - int ret; - int i; - - ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE); - if (ret < 0) - return ret; - if (ret < TPM_MAX_DIGEST_SIZE) - return -EFAULT; - digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests), GFP_KERNEL); if (!digests) return -ENOMEM; - for (i = 0; i < chip->nr_allocated_banks; i++) - memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE); - return 0; } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 12dd9b318db1..703857aab00f 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1873,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, if (!to_check) break; /* all drained */ init_waitqueue_entry(&wait, current); + set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&to_check->sleep, &wait); snd_pcm_stream_unlock_irq(substream); if (runtime->no_period_wakeup) @@ -1885,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, } tout = msecs_to_jiffies(tout * 1000); } - tout = schedule_timeout_interruptible(tout); + tout = schedule_timeout(tout); snd_pcm_stream_lock_irq(substream); group = snd_pcm_stream_group_ref(substream); diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 485edaba0037..5bf24fb819d2 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -6051,6 +6051,24 @@ void snd_hda_gen_free(struct hda_codec *codec) } EXPORT_SYMBOL_GPL(snd_hda_gen_free); +/** + * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting + * @codec: the HDA codec + * + * This can be put as patch_ops reboot_notify function. + */ +void snd_hda_gen_reboot_notify(struct hda_codec *codec) +{ + /* Make the codec enter D3 to avoid spurious noises from the internal + * speaker during (and after) reboot + */ + snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); + snd_hda_codec_write(codec, codec->core.afg, 0, + AC_VERB_SET_POWER_STATE, AC_PWRST_D3); + msleep(10); +} +EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify); + #ifdef CONFIG_PM /** * snd_hda_gen_check_power_status - check the loopback power save state @@ -6078,6 +6096,7 @@ static const struct hda_codec_ops generic_patch_ops = { .init = snd_hda_gen_init, .free = snd_hda_gen_free, .unsol_event = snd_hda_jack_unsol_event, + .reboot_notify = snd_hda_gen_reboot_notify, #ifdef CONFIG_PM .check_power_status = snd_hda_gen_check_power_status, #endif @@ -6100,7 +6119,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec) err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0); if (err < 0) - return err; + goto error; err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg); if (err < 0) diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 35a670a71c42..5f199dcb0d18 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -332,6 +332,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec, struct auto_pin_cfg *cfg); int snd_hda_gen_build_controls(struct hda_codec *codec); int snd_hda_gen_build_pcms(struct hda_codec *codec); +void snd_hda_gen_reboot_notify(struct hda_codec *codec); /* standard jack event callbacks */ void snd_hda_gen_hp_automute(struct hda_codec *codec, diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index fb8f452a1c78..5732c31c4167 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2505,6 +2505,9 @@ static const struct pci_device_id azx_ids[] = { /* AMD, X370 & co */ { PCI_DEVICE(0x1022, 0x1457), .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB }, + /* AMD, X570 & co */ + { PCI_DEVICE(0x1022, 0x1487), + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB }, /* AMD Stoney */ { PCI_DEVICE(0x1022, 0x157a), .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index f299f137eaea..14298ef45b21 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -163,23 +163,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec) { struct conexant_spec *spec = codec->spec; - switch (codec->core.vendor_id) { - case 0x14f12008: /* CX8200 */ - case 0x14f150f2: /* CX20722 */ - case 0x14f150f4: /* CX20724 */ - break; - default: - return; - } - /* Turn the problematic codec into D3 to avoid spurious noises from the internal speaker during (and after) reboot */ cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); - - snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); - snd_hda_codec_write(codec, codec->core.afg, 0, - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); - msleep(10); + snd_hda_gen_reboot_notify(codec); } static void cx_auto_free(struct hda_codec *codec) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index de224cbea7a0..e333b3e30e31 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -869,15 +869,6 @@ static void alc_reboot_notify(struct hda_codec *codec) alc_shutup(codec); } -/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */ -static void alc_d3_at_reboot(struct hda_codec *codec) -{ - snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); - snd_hda_codec_write(codec, codec->core.afg, 0, - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); - msleep(10); -} - #define alc_free snd_hda_gen_free #ifdef CONFIG_PM @@ -5152,7 +5143,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec, struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { - spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ + spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; codec->power_save_node = 0; /* avoid click noises */ snd_hda_apply_pincfgs(codec, pincfgs); @@ -6987,6 +6978,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 7498b5191b68..b5927c3d5bc0 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -68,6 +68,7 @@ struct mixer_build { unsigned char *buffer; unsigned int buflen; DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); + DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS); struct usb_audio_term oterm; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; @@ -744,6 +745,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, return -EINVAL; if (!desc->bNrInPins) return -EINVAL; + if (desc->bLength < sizeof(*desc) + desc->bNrInPins) + return -EINVAL; switch (state->mixer->protocol) { case UAC_VERSION_1: @@ -773,16 +776,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, * parse the source unit recursively until it reaches to a terminal * or a branched unit. */ -static int check_input_term(struct mixer_build *state, int id, +static int __check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term) { int protocol = state->mixer->protocol; int err; void *p1; + unsigned char *hdr; memset(term, 0, sizeof(*term)); - while ((p1 = find_audio_control_unit(state, id)) != NULL) { - unsigned char *hdr = p1; + for (;;) { + /* a loop in the terminal chain? */ + if (test_and_set_bit(id, state->termbitmap)) + return -EINVAL; + + p1 = find_audio_control_unit(state, id); + if (!p1) + break; + + hdr = p1; term->id = id; if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) { @@ -800,7 +812,7 @@ static int check_input_term(struct mixer_build *state, int id, /* call recursively to verify that the * referenced clock entity is valid */ - err = check_input_term(state, d->bCSourceID, term); + err = __check_input_term(state, d->bCSourceID, term); if (err < 0) return err; @@ -834,7 +846,7 @@ static int check_input_term(struct mixer_build *state, int id, case UAC2_CLOCK_SELECTOR: { struct uac_selector_unit_descriptor *d = p1; /* call recursively to retrieve the channel info */ - err = check_input_term(state, d->baSourceID[0], term); + err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ @@ -897,7 +909,7 @@ static int check_input_term(struct mixer_build *state, int id, /* call recursively to verify that the * referenced clock entity is valid */ - err = check_input_term(state, d->bCSourceID, term); + err = __check_input_term(state, d->bCSourceID, term); if (err < 0) return err; @@ -948,7 +960,7 @@ static int check_input_term(struct mixer_build *state, int id, case UAC3_CLOCK_SELECTOR: { struct uac_selector_unit_descriptor *d = p1; /* call recursively to retrieve the channel info */ - err = check_input_term(state, d->baSourceID[0], term); + err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ @@ -964,7 +976,7 @@ static int check_input_term(struct mixer_build *state, int id, return -EINVAL; /* call recursively to retrieve the channel info */ - err = check_input_term(state, d->baSourceID[0], term); + err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; @@ -982,6 +994,15 @@ static int check_input_term(struct mixer_build *state, int id, return -ENODEV; } + +static int check_input_term(struct mixer_build *state, int id, + struct usb_audio_term *term) +{ + memset(term, 0, sizeof(*term)); + memset(state->termbitmap, 0, sizeof(state->termbitmap)); + return __check_input_term(state, id, term); +} + /* * Feature Unit */ diff --git a/tools/perf/trace/beauty/usbdevfs_ioctl.sh b/tools/perf/trace/beauty/usbdevfs_ioctl.sh index 930b80f422e8..aa597ae53747 100755 --- a/tools/perf/trace/beauty/usbdevfs_ioctl.sh +++ b/tools/perf/trace/beauty/usbdevfs_ioctl.sh @@ -3,10 +3,13 @@ [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ +# also as: +# #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len) + printf "static const char *usbdevfs_ioctl_cmds[] = {\n" -regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" -egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \ - sed -r "s/$regex/\2 \1/g" | \ +regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" +egrep "$regex" ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \ + sed -r "s/$regex/\4 \1/g" | \ sort | xargs printf "\t[%s] = \"%s\",\n" printf "};\n\n" printf "#if 0\n" diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e84b70be3fc1..abe9af867967 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -3478,6 +3478,13 @@ int perf_session__read_header(struct perf_session *session) data->file.path); } + if (f_header.attr_size == 0) { + pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n" + "Was the 'perf record' command properly terminated?\n", + data->file.path); + return -EINVAL; + } + nr_attrs = f_header.attrs.size / f_header.attr_size; lseek(fd, f_header.attrs.offset, SEEK_SET); @@ -3558,7 +3565,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool, size += sizeof(struct perf_event_header); size += ids * sizeof(u64); - ev = malloc(size); + ev = zalloc(size); if (ev == NULL) return -ENOMEM; diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json index ecd96eda7f6a..e11b7c1efda3 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json @@ -509,5 +509,52 @@ "teardown": [ "$TC actions flush action skbedit" ] + }, + { + "id": "630c", + "name": "Add batch of 32 skbedit actions with all parameters and cookie", + "category": [ + "actions", + "skbedit" + ], + "setup": [ + [ + "$TC actions flush action skbedit", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"", + "expExitCode": "0", + "verifyCmd": "$TC actions list action skbedit", + "matchPattern": "^[ \t]+index [0-9]+ ref", + "matchCount": "32", + "teardown": [ + "$TC actions flush action skbedit" + ] + }, + { + "id": "706d", + "name": "Delete batch of 32 skbedit actions with all parameters", + "category": [ + "actions", + "skbedit" + ], + "setup": [ + [ + "$TC actions flush action skbedit", + 0, + 1, + 255 + ], + "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\"" + ], + "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"", + "expExitCode": "0", + "verifyCmd": "$TC actions list action skbedit", + "matchPattern": "^[ \t]+index [0-9]+ ref", + "matchCount": "0", + "teardown": [] } ]