diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt index 1649117e6087d..9cd7926c87814 100644 --- a/Documentation/admin-guide/devices.txt +++ b/Documentation/admin-guide/devices.txt @@ -2993,10 +2993,10 @@ 65 = /dev/infiniband/issm1 Second InfiniBand IsSM device ... 127 = /dev/infiniband/issm63 63rd InfiniBand IsSM device - 128 = /dev/infiniband/uverbs0 First InfiniBand verbs device - 129 = /dev/infiniband/uverbs1 Second InfiniBand verbs device + 192 = /dev/infiniband/uverbs0 First InfiniBand verbs device + 193 = /dev/infiniband/uverbs1 Second InfiniBand verbs device ... - 159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device + 223 = /dev/infiniband/uverbs31 31st InfiniBand verbs device 232 char Biometric Devices 0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt index c059ab74ed886..a4a75fa795249 100644 --- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt +++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt @@ -122,7 +122,7 @@ on various other factors also like; so the device should have enough free bytes available its OOB/Spare area to accommodate ECC for entire page. In general following expression helps in determining if given device can accommodate ECC syndrome: - "2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE" + "2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE" where OOBSIZE number of bytes in OOB/spare area PAGESIZE number of bytes in main-area of device page diff --git a/Makefile b/Makefile index 3a3eea3ab10a5..77dd62aa0bbe5 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 206 +SUBLEVEL = 207 EXTRAVERSION = NAME = "People's Front" diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index cf9619d4efb4f..c5254c5967ed6 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -1112,7 +1112,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) clear_page(to); clear_bit(PG_dc_clean, &page->flags); } - +EXPORT_SYMBOL(clear_user_page); /********************************************************************** * Explicit Cache flush request from user space via syscall diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index ec2327a3796d5..1b3a4144646b0 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -90,6 +90,8 @@ $(addprefix $(obj)/,$(libfdt_objs) atags_to_fdt.o): \ $(addprefix $(obj)/,$(libfdt_hdrs)) ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y) +CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN} +CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280 OBJS += $(libfdt_objs) atags_to_fdt.o endif diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts index f202396e3f2a8..f346673d34ead 100644 --- a/arch/arm/boot/dts/imx53-ppd.dts +++ b/arch/arm/boot/dts/imx53-ppd.dts @@ -70,6 +70,12 @@ clock-frequency = <11289600>; }; + achc_24M: achc-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + }; + sgtlsound: sound { compatible = "fsl,imx53-cpuvo-sgtl5000", "fsl,imx-audio-sgtl5000"; @@ -287,16 +293,13 @@ &gpio4 12 GPIO_ACTIVE_LOW>; status = "okay"; - spidev0: spi@0 { - compatible = "ge,achc"; - reg = <0>; - spi-max-frequency = <1000000>; - }; - - spidev1: spi@1 { - compatible = "ge,achc"; - reg = <1>; - spi-max-frequency = <1000000>; + spidev0: spi@1 { + compatible = "ge,achc", "nxp,kinetis-k20"; + reg = <1>, <0>; + vdd-supply = <®_3v3>; + vdda-supply = <®_3v3>; + clocks = <&achc_24M>; + reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>; }; gpioxra0: gpio@2 { diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index 4a99c92551049..d0153bbbdbeb8 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -1296,9 +1296,9 @@ <&mmcc DSI1_BYTE_CLK>, <&mmcc DSI_PIXEL_CLK>, <&mmcc DSI1_ESC_CLK>; - clock-names = "iface_clk", "bus_clk", "core_mmss_clk", - "src_clk", "byte_clk", "pixel_clk", - "core_clk"; + clock-names = "iface", "bus", "core_mmss", + "src", "byte", "pixel", + "core"; assigned-clocks = <&mmcc DSI1_BYTE_SRC>, <&mmcc DSI1_ESC_SRC>, diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi index 20137fc578b1b..394a6b4dc69d5 100644 --- a/arch/arm/boot/dts/tegra20-tamonten.dtsi +++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi @@ -185,8 +185,9 @@ nvidia,pins = "ata", "atb", "atc", "atd", "ate", "cdev1", "cdev2", "dap1", "dtb", "gma", "gmb", "gmc", "gmd", "gme", "gpu7", - "gpv", "i2cp", "pta", "rm", "slxa", - "slxk", "spia", "spib", "uac"; + "gpv", "i2cp", "irrx", "irtx", "pta", + "rm", "slxa", "slxk", "spia", "spib", + "uac"; nvidia,pull = ; nvidia,tristate = ; }; @@ -211,7 +212,7 @@ conf_ddc { nvidia,pins = "ddc", "dta", "dtd", "kbca", "kbcb", "kbcc", "kbcd", "kbce", "kbcf", - "sdc"; + "sdc", "uad", "uca"; nvidia,pull = ; nvidia,tristate = ; }; @@ -221,10 +222,9 @@ "lvp0", "owc", "sdb"; nvidia,tristate = ; }; - conf_irrx { - nvidia,pins = "irrx", "irtx", "sdd", "spic", - "spie", "spih", "uaa", "uab", "uad", - "uca", "ucb"; + conf_sdd { + nvidia,pins = "sdd", "spic", "spie", "spih", + "uaa", "uab", "ucb"; nvidia,pull = ; nvidia,tristate = ; }; diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 8cad59465af39..8b679e2ca3c3d 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -17,10 +17,14 @@ CFLAGS_REMOVE_return_address.o = -pg # Object file lists. obj-y := elf.o entry-common.o irq.o opcodes.o \ - process.o ptrace.o reboot.o return_address.o \ + process.o ptrace.o reboot.o \ setup.o signal.o sigreturn_codes.o \ stacktrace.o sys_arm.o time.o traps.o +ifneq ($(CONFIG_ARM_UNWIND),y) +obj-$(CONFIG_FRAME_POINTER) += return_address.o +endif + obj-$(CONFIG_ATAGS) += atags_parse.o obj-$(CONFIG_ATAGS_PROC) += atags_proc.o obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index 36ed35073289b..f945742dea449 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -10,8 +10,6 @@ */ #include #include - -#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) #include #include @@ -56,6 +54,4 @@ void *return_address(unsigned int level) return NULL; } -#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ - EXPORT_SYMBOL_GPL(return_address); diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index ae0a61c61a6e1..14be73ca107a5 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c @@ -109,6 +109,7 @@ struct mmdc_pmu { struct perf_event *mmdc_events[MMDC_NUM_COUNTERS]; struct hlist_node node; struct fsl_mmdc_devtype_data *devtype_data; + struct clk *mmdc_ipg_clk; }; /* @@ -474,11 +475,13 @@ static int imx_mmdc_remove(struct platform_device *pdev) cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); perf_pmu_unregister(&pmu_mmdc->pmu); iounmap(pmu_mmdc->mmdc_base); + clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk); kfree(pmu_mmdc); return 0; } -static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base) +static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base, + struct clk *mmdc_ipg_clk) { struct mmdc_pmu *pmu_mmdc; char *name; @@ -506,6 +509,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b } mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); + pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; if (mmdc_num == 0) name = "mmdc"; else @@ -541,7 +545,7 @@ pmu_free: #else #define imx_mmdc_remove NULL -#define imx_mmdc_perf_init(pdev, mmdc_base) 0 +#define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0 #endif static int imx_mmdc_probe(struct platform_device *pdev) @@ -579,9 +583,11 @@ static int imx_mmdc_probe(struct platform_device *pdev) val &= ~(1 << BP_MMDC_MAPSR_PSD); writel_relaxed(val, reg); - err = imx_mmdc_perf_init(pdev, mmdc_base); - if (err) + err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk); + if (err) { iounmap(mmdc_base); + clk_disable_unprepare(mmdc_ipg_clk); + } return err; } diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 328ced7bfaf26..79b12e7445373 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1578,6 +1578,9 @@ exit: rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi index 5c5e57026c275..c607297922fdb 100644 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi @@ -91,7 +91,7 @@ #address-cells = <0>; interrupt-controller; reg = <0x11001000 0x1000>, - <0x11002000 0x1000>, + <0x11002000 0x2000>, <0x11004000 0x2000>, <0x11006000 0x2000>; }; diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts index c13ddee8262b9..58acf21d8d333 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts +++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts @@ -28,7 +28,7 @@ stdout-path = "serial0"; }; - memory { + memory@40000000 { device_type = "memory"; reg = <0x0 0x40000000 0x0 0x20000000>; }; diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index a780f6714b445..74ab40b76ad53 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -76,8 +76,8 @@ #define EARLY_KASLR (0) #endif -#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \ - - ((vstart) >> (shift)) + 1 + EARLY_KASLR) +#define EARLY_ENTRIES(vstart, vend, shift) \ + ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR) #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT)) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 19c87b52c001f..89ab68cb35bbd 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -434,7 +434,7 @@ size_t sve_state_size(struct task_struct const *task) void sve_alloc(struct task_struct *task) { if (task->thread.sve_state) { - memset(task->thread.sve_state, 0, sve_state_size(current)); + memset(task->thread.sve_state, 0, sve_state_size(task)); return; } diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c85ea70b92936..ea7f059dbcb6c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -202,7 +202,7 @@ ENDPROC(preserve_boot_args) * to be composed of multiple pages. (This effectively scales the end index). * * vstart: virtual address of start of range - * vend: virtual address of end of range + * vend: virtual address of end of range - we map [vstart, vend] * shift: shift used to transform virtual address into index * ptrs: number of entries in page table * istart: index in table corresponding to vstart @@ -239,17 +239,18 @@ ENDPROC(preserve_boot_args) * * tbl: location of page table * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE) - * vstart: start address to map - * vend: end address to map - we map [vstart, vend] + * vstart: virtual address of start of range + * vend: virtual address of end of range - we map [vstart, vend - 1] * flags: flags to use to map last level entries * phys: physical address corresponding to vstart - physical memory is contiguous * pgds: the number of pgd entries * * Temporaries: istart, iend, tmp, count, sv - these need to be different registers - * Preserves: vstart, vend, flags - * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv + * Preserves: vstart, flags + * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv */ .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv + sub \vend, \vend, #1 add \rtbl, \tbl, #PAGE_SIZE mov \sv, \rtbl mov \count, #0 diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 7f0258ed1f5fe..6876e8205042a 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -685,6 +685,19 @@ emit_cond_jmp: } break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + /* + * Nothing required here. + * + * In case of arm64, we rely on the firmware mitigation of + * Speculative Store Bypass as controlled via the ssbd kernel + * parameter. Whenever the mitigation is enabled, it works + * for all of the kernel code with no need to provide any + * additional instructions. + */ + break; + /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c index e45ce4243aaa3..76262dc40e791 100644 --- a/arch/m68k/emu/nfeth.c +++ b/arch/m68k/emu/nfeth.c @@ -258,8 +258,8 @@ static void __exit nfeth_cleanup(void) for (i = 0; i < MAX_UNIT; i++) { if (nfeth_dev[i]) { - unregister_netdev(nfeth_dev[0]); - free_netdev(nfeth_dev[0]); + unregister_netdev(nfeth_dev[i]); + free_netdev(nfeth_dev[i]); } } free_irq(nfEtherIRQ, nfeth_interrupt); diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c index 7859b6e498634..5b5d78a7882a4 100644 --- a/arch/mips/mti-malta/malta-dtshim.c +++ b/arch/mips/mti-malta/malta-dtshim.c @@ -26,7 +26,7 @@ #define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8 #define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8) -static unsigned char fdt_buf[16 << 10] __initdata; +static unsigned char fdt_buf[16 << 10] __initdata __aligned(8); /* determined physical memory size, not overridden by command line args */ extern unsigned long physical_memsize; diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 3832c46286082..947a7172c814e 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -1282,6 +1282,9 @@ jeq_common: } break; + case BPF_ST | BPF_NOSPEC: /* speculation barrier */ + break; + case BPF_ST | BPF_B | BPF_MEM: case BPF_ST | BPF_H | BPF_MEM: case BPF_ST | BPF_W | BPF_MEM: diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 01b59d2ce1747..c2c3ce8a0f84e 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -551,6 +551,7 @@ EXCEPTION_ENTRY(_external_irq_handler) l.bnf 1f // ext irq enabled, all ok. l.nop +#ifdef CONFIG_PRINTK l.addi r1,r1,-0x8 l.movhi r3,hi(42f) l.ori r3,r3,lo(42f) @@ -564,6 +565,7 @@ EXCEPTION_ENTRY(_external_irq_handler) .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r" .align 4 .previous +#endif l.ori r4,r4,SPR_SR_IEE // fix the bug // l.sw PT_SR(r1),r4 diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 342073f44d3f1..79b05eac2b33c 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -239,6 +239,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, #endif usp = (regs->gr[30] & ~(0x01UL)); +#ifdef CONFIG_64BIT + if (is_compat_task()) { + /* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */ + usp = (compat_uint_t)usp; + } +#endif /*FIXME: frame_size parameter is unused, remove it. */ frame = get_sigframe(&ksig->ka, usp, sizeof(*frame)); diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index 9b9d17437373b..0247b8e6cb1b7 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S @@ -49,9 +49,6 @@ p_end: .long _end p_pstack: .long _platform_stack_top #endif - .globl _zimage_start - /* Clang appears to require the .weak directive to be after the symbol - * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */ .weak _zimage_start _zimage_start: .globl _zimage_start_lib diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 8661eea78503f..4bf81a111179f 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -719,7 +719,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, /* * If found, replace it with: * addis r2, r12, (.TOC.-func)@ha - * addi r2, r12, (.TOC.-func)@l + * addi r2, r2, (.TOC.-func)@l */ ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value); ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value); diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 23b5f755e419b..dadcc8bab336a 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -8,6 +8,7 @@ * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp. */ +#include #include #include #include diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 7e3ab477f67fe..e7d56ddba43aa 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -596,6 +596,12 @@ emit_clear: } break; + /* + * BPF_ST NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; + /* * BPF_ST(X) */ diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 43fabb3cae0fa..160b86d9d8199 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -168,7 +168,7 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index, */ count = 0; for (i = offset; i < offset + length; i++) - count |= arg->bytes[i] << (i - offset); + count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8); *value = count; out: diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 68f415e334a59..10009a0cdb37c 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -41,7 +41,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected, unsigned char *ipe = (unsigned char *)expected; unsigned char *ipn = (unsigned char *)new; - pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); + pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc); pr_emerg("Found: %6ph\n", ipc); pr_emerg("Expected: %6ph\n", ipe); pr_emerg("New: %6ph\n", ipn); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 3515f2b55eb9e..dc5ecaea30d71 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -318,13 +318,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu) { kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); - set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); + set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask); } static void __unset_cpu_idle(struct kvm_vcpu *vcpu) { kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); - clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); + clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask); } static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 981e3ba974616..0a2ffd5378be2 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) { - return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); + return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask); } static inline int kvm_is_ucontrol(struct kvm *kvm) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index e42354b15e0bc..8508c2c0e2a3a 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -595,8 +595,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */ if (!imm) break; - /* agfi %dst,-imm */ - EMIT6_IMM(0xc2080000, dst_reg, -imm); + if (imm == -0x80000000) { + /* algfi %dst,0x80000000 */ + EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000); + } else { + /* agfi %dst,-imm */ + EMIT6_IMM(0xc2080000, dst_reg, -imm); + } break; /* * BPF_MUL @@ -883,6 +888,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i break; } break; + /* + * BPF_NOSPEC (speculation barrier) + */ + case BPF_ST | BPF_NOSPEC: + break; /* * BPF_ST(X) */ diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index ec4da4dc98f12..1bb1e64d4377d 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1261,6 +1261,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit(opcode | RS1(src) | rs2 | RD(dst), ctx); break; } + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 2410bd4bb48f3..2d9e1372b070c 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -90,6 +90,7 @@ struct perf_ibs { unsigned long offset_mask[1]; int offset_max; unsigned int fetch_count_reset_broken : 1; + unsigned int fetch_ignore_if_zero_rip : 1; struct cpu_perf_ibs __percpu *pcpu; struct attribute **format_attrs; @@ -674,6 +675,10 @@ fail: if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) { regs.flags &= ~PERF_EFLAGS_EXACT; } else { + /* Workaround for erratum #1197 */ + if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1])) + goto out; + set_linear_ip(®s, ibs_data.regs[1]); regs.flags |= PERF_EFLAGS_EXACT; } @@ -767,6 +772,9 @@ static __init void perf_event_ibs_init(void) if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18) perf_ibs_fetch.fetch_count_reset_broken = 1; + if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10) + perf_ibs_fetch.fetch_ignore_if_zero_rip = 1; + perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); if (ibs_caps & IBS_CAPS_OPCNT) { diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index f03100bc5fd12..849f0ba53a9b9 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -69,7 +69,7 @@ static struct pt_cap_desc { PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)), PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)), PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)), - PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3), + PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x7), PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000), PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff), PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000), diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c index 6eeb17dfde48e..5fee674fe59d9 100644 --- a/arch/x86/kernel/cpu/intel_rdt_monitor.c +++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c @@ -252,6 +252,12 @@ static u64 __mon_event_count(u32 rmid, struct rmid_read *rr) case QOS_L3_MBM_LOCAL_EVENT_ID: m = &rr->d->mbm_local[rmid]; break; + default: + /* + * Code would never reach here because an invalid + * event id would fail the __rmid_read. + */ + return RMID_VAL_ERROR; } if (rr->first) { diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 6489cc19ed069..b0f3a996df15f 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -388,10 +388,11 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { }, { /* Handle problems with rebooting on the OptiPlex 990. */ .callback = set_pci_reboot, - .ident = "Dell OptiPlex 990", + .ident = "Dell OptiPlex 990 BIOS A0x", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), + DMI_MATCH(DMI_BIOS_VERSION, "A0"), }, }, { /* Handle problems with rebooting on Dell 300's */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f913127e942a1..417abc9ba1ad4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2511,6 +2511,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!msr_info->host_initiated) { s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; adjust_tsc_offset_guest(vcpu, adj); + /* Before back to guest, tsc_timestamp must be adjusted + * as well, otherwise guest's percpu pvclock time could jump. + */ + kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); } vcpu->arch.ia32_tsc_adjust_msr = data; } diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 81e85a8dd3005..4b25a1ad18ffd 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1289,18 +1289,18 @@ int kern_addr_valid(unsigned long addr) return 0; p4d = p4d_offset(pgd, addr); - if (p4d_none(*p4d)) + if (!p4d_present(*p4d)) return 0; pud = pud_offset(p4d, addr); - if (pud_none(*pud)) + if (!pud_present(*pud)) return 0; if (pud_large(*pud)) return pfn_valid(pud_pfn(*pud)); pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + if (!pmd_present(*pmd)) return 0; if (pmd_large(*pmd)) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 924ca27a6139b..81c3d4b4c7e2c 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -731,6 +731,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } break; + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + if (boot_cpu_has(X86_FEATURE_XMM2)) + /* Emit 'lfence' */ + EMIT3(0x0F, 0xAE, 0xE8); + break; + /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_B: if (is_ereg(dst_reg)) diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index adee990abab14..f48300988bc2a 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1683,6 +1683,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, i++; break; } + /* speculation barrier */ + case BPF_ST | BPF_NOSPEC: + if (boot_cpu_has(X86_FEATURE_XMM2)) + /* Emit 'lfence' */ + EMIT3(0x0F, 0xAE, 0xE8); + break; /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_H: case BPF_ST | BPF_MEM | BPF_B: diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 1c3e9185934c4..7966136352b56 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1187,6 +1187,11 @@ static void __init xen_dom0_set_legacy_features(void) x86_platform.legacy.rtc = 1; } +static void __init xen_domu_set_legacy_features(void) +{ + x86_platform.legacy.rtc = 0; +} + /* First C function to be called on Xen boot */ asmlinkage __visible void __init xen_start_kernel(void) { @@ -1354,6 +1359,8 @@ asmlinkage __visible void __init xen_start_kernel(void) add_preferred_console("xenboot", 0, NULL); if (pci_xen) x86_init.pci.arch_init = pci_xen_init; + x86_platform.set_legacy_features = + xen_domu_set_legacy_features; } else { const struct dom0_vga_console_info *info = (void *)((char *)xen_start_info + diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 82577eec6d0a7..f9b31eb6846c4 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -613,8 +613,8 @@ int xen_alloc_p2m_entry(unsigned long pfn) } /* Expanded the p2m? */ - if (pfn > xen_p2m_last_pfn) { - xen_p2m_last_pfn = pfn; + if (pfn >= xen_p2m_last_pfn) { + xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE); HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; } diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index b9ad83a0ee5db..d9b0709d6c9ce 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -25,7 +25,7 @@ config XTENSA select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD select HAVE_FUNCTION_TRACER - select HAVE_FUTEX_CMPXCHG if !MMU + select HAVE_FUTEX_CMPXCHG if !MMU && FUTEX select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_IRQ_TIME_ACCOUNTING select HAVE_MEMBLOCK diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index af81a62faba64..e7faea3d73d3b 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c @@ -168,9 +168,13 @@ static const struct tty_operations serial_ops = { int __init rs_init(void) { - tty_port_init(&serial_port); + int ret; serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); + if (!serial_driver) + return -ENOMEM; + + tty_port_init(&serial_port); pr_info("%s %s\n", serial_name, serial_version); @@ -190,8 +194,15 @@ int __init rs_init(void) tty_set_operations(serial_driver, &serial_ops); tty_port_link_device(&serial_port, serial_driver, 0); - if (tty_register_driver(serial_driver)) - panic("Couldn't register serial driver\n"); + ret = tty_register_driver(serial_driver); + if (ret) { + pr_err("Couldn't register serial driver\n"); + tty_driver_kref_put(serial_driver); + tty_port_destroy(&serial_port); + + return ret; + } + return 0; } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 5b3e5483c657c..c8c94e8e0f721 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2137,6 +2137,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) * are likely to increase the throughput. */ bfqq->new_bfqq = new_bfqq; + /* + * The above assignment schedules the following redirections: + * each time some I/O for bfqq arrives, the process that + * generated that I/O is disassociated from bfqq and + * associated with new_bfqq. Here we increases new_bfqq->ref + * in advance, adding the number of processes that are + * expected to be associated with new_bfqq as they happen to + * issue I/O. + */ new_bfqq->ref += process_refs; return new_bfqq; } @@ -2196,6 +2205,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, { struct bfq_queue *in_service_bfqq, *new_bfqq; + /* if a merge has already been setup, then proceed with that first */ + if (bfqq->new_bfqq) + return bfqq->new_bfqq; + /* * Prevent bfqq from being merged if it has been created too * long ago. The idea is that true cooperating processes, and @@ -2210,9 +2223,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfq_too_late_for_merging(bfqq)) return NULL; - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) return NULL; @@ -4241,7 +4251,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) if (bfqq->new_ioprio >= IOPRIO_BE_NR) { pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", bfqq->new_ioprio); - bfqq->new_ioprio = IOPRIO_BE_NR; + bfqq->new_ioprio = IOPRIO_BE_NR - 1; } bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); diff --git a/block/blk-zoned.c b/block/blk-zoned.c index c461cf63f1f40..d33e89d28dbeb 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -319,9 +319,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, if (!blk_queue_is_zoned(q)) return -ENOTTY; - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) return -EFAULT; @@ -380,9 +377,6 @@ int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, if (!blk_queue_is_zoned(q)) return -ENOTTY; - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - if (!(mode & FMODE_WRITE)) return -EBADF; diff --git a/certs/Makefile b/certs/Makefile index 5d0999b9e21b1..ca3c71e3a3d9f 100644 --- a/certs/Makefile +++ b/certs/Makefile @@ -46,11 +46,19 @@ endif redirect_openssl = 2>&1 quiet_redirect_openssl = 2>&1 silent_redirect_openssl = 2>/dev/null +openssl_available = $(shell openssl help 2>/dev/null && echo yes) # We do it this way rather than having a boolean option for enabling an # external private key, because 'make randconfig' might enable such a # boolean option and we unfortunately can't make it depend on !RANDCONFIG. ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem") + +ifeq ($(openssl_available),yes) +X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null) + +$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem")) +endif + $(obj)/signing_key.pem: $(obj)/x509.genkey @$(kecho) "###" @$(kecho) "### Now generating an X.509 key pair to be used for signing modules." diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index db1d86af21b4d..b95a4194a68db 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4574,6 +4574,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -6412,7 +6416,7 @@ int ata_host_start(struct ata_host *host) have_stop = 1; } - if (host->ops->host_stop) + if (host->ops && host->ops->host_stop) have_stop = 1; if (have_stop) { diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 6f142aa54f5f9..8487048c5ec9b 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -1253,24 +1253,20 @@ static int sata_dwc_probe(struct platform_device *ofdev) irq = irq_of_parse_and_map(np, 0); if (irq == NO_IRQ) { dev_err(&ofdev->dev, "no SATA DMA irq\n"); - err = -ENODEV; - goto error_out; + return -ENODEV; } #ifdef CONFIG_SATA_DWC_OLD_DMA if (!of_find_property(np, "dmas", NULL)) { err = sata_dwc_dma_init_old(ofdev, hsdev); if (err) - goto error_out; + return err; } #endif hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy"); - if (IS_ERR(hsdev->phy)) { - err = PTR_ERR(hsdev->phy); - hsdev->phy = NULL; - goto error_out; - } + if (IS_ERR(hsdev->phy)) + return PTR_ERR(hsdev->phy); err = phy_init(hsdev->phy); if (err) diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 1cda505d6a852..9664cce49109b 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user) const char *file = *(const char **)(tracedata + 2); unsigned int user_hash_value, file_hash_value; + if (!x86_platform.legacy.rtc) + return; + user_hash_value = user % USERHASH; file_hash_value = hash_string(lineno, file, FILEHASH); set_magic_time(user_hash_value, file_hash_value, dev_hash_value); @@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = { static int early_resume_init(void) { + if (!x86_platform.legacy.rtc) + return 0; + hash_value_early_read = read_magic_time(); register_pm_notifier(&pm_trace_nb); return 0; @@ -277,6 +284,9 @@ static int late_resume_init(void) unsigned int val = hash_value_early_read; unsigned int user, file, dev; + if (!x86_platform.legacy.rtc) + return 0; + user = val % USERHASH; val = val / USERHASH; file = val % FILEHASH; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index e8b3353c18eb8..330ab9c85d1b8 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1479,7 +1479,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, if (ret) { dev_err(map->dev, "Error in caching of register: %x ret: %d\n", - reg + i, ret); + reg + regmap_get_offset(map, i), ret); return ret; } } diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index fc1f4acdd189e..c0f203deaf0b5 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq); void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) { + device_initialize(&core->dev); core->dev.release = bcma_release_core_dev; core->dev.bus = &bcma_bus_type; dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); @@ -299,11 +300,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) { int err; - err = device_register(&core->dev); + err = device_add(&core->dev); if (err) { bcma_err(bus, "Could not register dev for core 0x%03X\n", core->id.id); - put_device(&core->dev); return; } core->dev_registered = true; @@ -394,7 +394,7 @@ void bcma_unregister_cores(struct bcma_bus *bus) /* Now noone uses internally-handled cores, we can free them */ list_for_each_entry_safe(core, tmp, &bus->cores, list) { list_del(&core->list); - kfree(core); + put_device(&core->dev); } } diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index e101f286ac353..60662771bd465 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -242,7 +242,7 @@ config BLK_DEV_LOOP_MIN_COUNT dynamically allocated with the /dev/loop-control interface. config BLK_DEV_CRYPTOLOOP - tristate "Cryptoloop Support" + tristate "Cryptoloop Support (DEPRECATED)" select CRYPTO select CRYPTO_CBC depends on BLK_DEV_LOOP @@ -254,7 +254,7 @@ config BLK_DEV_CRYPTOLOOP WARNING: This device is not safe for journaled file systems like ext3 or Reiserfs. Please use the Device Mapper crypto module instead, which can be configured to be on-disk compatible with the - cryptoloop device. + cryptoloop device. cryptoloop support will be removed in Linux 5.16. source "drivers/block/drbd/Kconfig" diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 7033a4beda669..1b84105dfe62d 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c @@ -201,6 +201,8 @@ init_cryptoloop(void) if (rc) printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n"); + else + pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n"); return rc; } diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 3806fd8fef0b1..a9490c8e82a70 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -46,11 +46,6 @@ static struct hlist_head *all_lists[] = { NULL, }; -static struct hlist_head *orphan_list[] = { - &clk_orphan_list, - NULL, -}; - /*** private data structures ***/ struct clk_core { @@ -2629,6 +2624,11 @@ static int inited = 0; static DEFINE_MUTEX(clk_debug_lock); static HLIST_HEAD(clk_debug_list); +static struct hlist_head *orphan_list[] = { + &clk_orphan_list, + NULL, +}; + static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, int level) { diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c index 890ebf623261b..38612cd9092eb 100644 --- a/drivers/clk/mvebu/kirkwood.c +++ b/drivers/clk/mvebu/kirkwood.c @@ -254,6 +254,7 @@ static const char *powersave_parents[] = { static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = { { "powersave", powersave_parents, ARRAY_SIZE(powersave_parents), 11, 1, 0 }, + { } }; static struct clk *clk_muxing_get_src( diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index cec90a4c79b34..7a6d4c4c0feba 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -576,7 +576,8 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag) ch->flags |= flag; /* setup timeout if no clockevent */ - if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT))) + if (ch->cmt->num_channels == 1 && + flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT))) __sh_cmt_set_next(ch, ch->max_match_value); out: raw_spin_unlock_irqrestore(&ch->lock, flags); @@ -612,20 +613,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs) static u64 sh_cmt_clocksource_read(struct clocksource *cs) { struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); - unsigned long flags; u32 has_wrapped; - u64 value; - u32 raw; - raw_spin_lock_irqsave(&ch->lock, flags); - value = ch->total_cycles; - raw = sh_cmt_get_counter(ch, &has_wrapped); + if (ch->cmt->num_channels == 1) { + unsigned long flags; + u64 value; + u32 raw; - if (unlikely(has_wrapped)) - raw += ch->match_value + 1; - raw_spin_unlock_irqrestore(&ch->lock, flags); + raw_spin_lock_irqsave(&ch->lock, flags); + value = ch->total_cycles; + raw = sh_cmt_get_counter(ch, &has_wrapped); + + if (unlikely(has_wrapped)) + raw += ch->match_value + 1; + raw_spin_unlock_irqrestore(&ch->lock, flags); + + return value + raw; + } - return value + raw; + return sh_cmt_get_counter(ch, &has_wrapped); } static int sh_cmt_clocksource_enable(struct clocksource *cs) @@ -688,7 +694,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch, cs->disable = sh_cmt_clocksource_disable; cs->suspend = sh_cmt_clocksource_suspend; cs->resume = sh_cmt_clocksource_resume; - cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8); + cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n", diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 5da985604692f..3ebadabf54110 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -46,6 +46,7 @@ #define MAX_PSTATE_SHIFT 32 #define LPSTATE_SHIFT 48 #define GPSTATE_SHIFT 56 +#define MAX_NR_CHIPS 32 #define MAX_RAMP_DOWN_TIME 5120 /* @@ -1051,12 +1052,20 @@ static int init_chip_info(void) unsigned int *chip; unsigned int cpu, i; unsigned int prev_chip_id = UINT_MAX; + cpumask_t *chip_cpu_mask; int ret = 0; chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; + /* Allocate a chip cpu mask large enough to fit mask for all chips */ + chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL); + if (!chip_cpu_mask) { + ret = -ENOMEM; + goto free_and_return; + } + for_each_possible_cpu(cpu) { unsigned int id = cpu_to_chip_id(cpu); @@ -1064,22 +1073,25 @@ static int init_chip_info(void) prev_chip_id = id; chip[nr_chips++] = id; } + cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]); } chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL); if (!chips) { ret = -ENOMEM; - goto free_and_return; + goto out_free_chip_cpu_mask; } for (i = 0; i < nr_chips; i++) { chips[i].id = chip[i]; - cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i])); + cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]); INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn); for_each_cpu(cpu, &chips[i].mask) per_cpu(chip_info, cpu) = &chips[i]; } +out_free_chip_cpu_mask: + kfree(chip_cpu_mask); free_and_return: kfree(chip); return ret; diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index b0c592073a4a3..da834ae3586b6 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -167,15 +167,19 @@ static struct dcp *global_sdcp; static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) { + int dma_err; struct dcp *sdcp = global_sdcp; const int chan = actx->chan; uint32_t stat; unsigned long ret; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; - dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), DMA_TO_DEVICE); + dma_err = dma_mapping_error(sdcp->dev, desc_phys); + if (dma_err) + return dma_err; + reinit_completion(&sdcp->completion[chan]); /* Clear status register. */ @@ -213,18 +217,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, struct ablkcipher_request *req, int init) { + dma_addr_t key_phys, src_phys, dst_phys; struct dcp *sdcp = global_sdcp; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); int ret; - dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, - 2 * AES_KEYSIZE_128, - DMA_TO_DEVICE); - dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, - DCP_BUF_SZ, DMA_TO_DEVICE); - dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, - DCP_BUF_SZ, DMA_FROM_DEVICE); + key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, + 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); + ret = dma_mapping_error(sdcp->dev, key_phys); + if (ret) + return ret; + + src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, + DCP_BUF_SZ, DMA_TO_DEVICE); + ret = dma_mapping_error(sdcp->dev, src_phys); + if (ret) + goto err_src; + + dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, + DCP_BUF_SZ, DMA_FROM_DEVICE); + ret = dma_mapping_error(sdcp->dev, dst_phys); + if (ret) + goto err_dst; if (actx->fill % AES_BLOCK_SIZE) { dev_err(sdcp->dev, "Invalid block size!\n"); @@ -262,10 +277,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, ret = mxs_dcp_start_dma(actx); aes_done_run: + dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); +err_dst: + dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); +err_src: dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); - dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); - dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); return ret; } @@ -280,21 +297,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) struct scatterlist *dst = req->dst; struct scatterlist *src = req->src; - const int nents = sg_nents(req->src); + int dst_nents = sg_nents(dst); const int out_off = DCP_BUF_SZ; uint8_t *in_buf = sdcp->coh->aes_in_buf; uint8_t *out_buf = sdcp->coh->aes_out_buf; - uint8_t *out_tmp, *src_buf, *dst_buf = NULL; uint32_t dst_off = 0; + uint8_t *src_buf = NULL; uint32_t last_out_len = 0; uint8_t *key = sdcp->coh->aes_key; int ret = 0; - int split = 0; - unsigned int i, len, clen, rem = 0, tlen = 0; + unsigned int i, len, clen, tlen = 0; int init = 0; bool limit_hit = false; @@ -312,7 +328,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); } - for_each_sg(req->src, src, nents, i) { + for_each_sg(req->src, src, sg_nents(src), i) { src_buf = sg_virt(src); len = sg_dma_len(src); tlen += len; @@ -337,34 +353,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) * submit the buffer. */ if (actx->fill == out_off || sg_is_last(src) || - limit_hit) { + limit_hit) { ret = mxs_dcp_run_aes(actx, req, init); if (ret) return ret; init = 0; - out_tmp = out_buf; + sg_pcopy_from_buffer(dst, dst_nents, out_buf, + actx->fill, dst_off); + dst_off += actx->fill; last_out_len = actx->fill; - while (dst && actx->fill) { - if (!split) { - dst_buf = sg_virt(dst); - dst_off = 0; - } - rem = min(sg_dma_len(dst) - dst_off, - actx->fill); - - memcpy(dst_buf + dst_off, out_tmp, rem); - out_tmp += rem; - dst_off += rem; - actx->fill -= rem; - - if (dst_off == sg_dma_len(dst)) { - dst = sg_next(dst); - split = 0; - } else { - split = 1; - } - } + actx->fill = 0; } } while (len); @@ -565,6 +564,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req) dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, DCP_BUF_SZ, DMA_TO_DEVICE); + ret = dma_mapping_error(sdcp->dev, buf_phys); + if (ret) + return ret; + /* Fill in the DMA descriptor. */ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | MXS_DCP_CONTROL0_INTERRUPT | @@ -597,6 +600,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req) if (rctx->fini) { digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); + ret = dma_mapping_error(sdcp->dev, digest_phys); + if (ret) + goto done_run; + desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; desc->payload = digest_phys; } diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 4d31ef4724366..180f2f61b8fbc 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1739,7 +1739,7 @@ static void omap_sham_done_task(unsigned long data) if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) goto finish; } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { - if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { + if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { omap_sham_update_dma_stop(dd); if (dd->err) { err = dd->err; diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index d2d0ae445fd89..7c7d49a8a4034 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -123,10 +123,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) hw_data->enable_error_correction = adf_vf_void_noop; hw_data->init_admin_comms = adf_vf_int_noop; hw_data->exit_admin_comms = adf_vf_void_noop; - hw_data->send_admin_init = adf_vf2pf_init; + hw_data->send_admin_init = adf_vf2pf_notify_init; hw_data->init_arb = adf_vf_int_noop; hw_data->exit_arb = adf_vf_void_noop; - hw_data->disable_iov = adf_vf2pf_shutdown; + hw_data->disable_iov = adf_vf2pf_notify_shutdown; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 38e4bc04f407b..90e8a7564756b 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -123,10 +123,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data) hw_data->enable_error_correction = adf_vf_void_noop; hw_data->init_admin_comms = adf_vf_int_noop; hw_data->exit_admin_comms = adf_vf_void_noop; - hw_data->send_admin_init = adf_vf2pf_init; + hw_data->send_admin_init = adf_vf2pf_notify_init; hw_data->init_arb = adf_vf_int_noop; hw_data->exit_arb = adf_vf_void_noop; - hw_data->disable_iov = adf_vf2pf_shutdown; + hw_data->disable_iov = adf_vf2pf_notify_shutdown; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index d78f8d5c89c3f..289dd7e48d4a4 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -239,8 +239,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); -int adf_vf2pf_init(struct adf_accel_dev *accel_dev); -void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev); +int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev); +void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev); int adf_init_pf_wq(void); void adf_exit_pf_wq(void); int adf_init_vf_wq(void); @@ -263,12 +263,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) { } -static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev) +static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev) { return 0; } -static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) +static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev) { } diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 26556c7130497..7a7d43c475342 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -105,6 +105,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) struct service_hndl *service; struct list_head *list_itr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; + int ret; if (!hw_data) { dev_err(&GET_DEV(accel_dev), @@ -171,9 +172,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) } hw_data->enable_error_correction(accel_dev); - hw_data->enable_vf2pf_comms(accel_dev); + ret = hw_data->enable_vf2pf_comms(accel_dev); - return 0; + return ret; } EXPORT_SYMBOL_GPL(adf_dev_init); diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index 4898ef41fd9fd..7d319c5c071c4 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -59,6 +59,8 @@ #include "adf_transport_access_macros.h" #include "adf_transport_internal.h" +#define ADF_MAX_NUM_VFS 32 + static int adf_enable_msix(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; @@ -111,7 +113,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *pmisc_bar_addr = pmisc->virt_addr; - u32 vf_mask; + unsigned long vf_mask; /* Get the interrupt sources triggered by VFs */ vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) & @@ -132,8 +134,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) * unless the VF is malicious and is attempting to * flood the host OS with VF2PF interrupts. */ - for_each_set_bit(i, (const unsigned long *)&vf_mask, - (sizeof(vf_mask) * BITS_PER_BYTE)) { + for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) { vf_info = accel_dev->pf.vf_info + i; if (!__ratelimit(&vf_info->vf2pf_ratelimit)) { diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index b3875fdf6cd72..c64481160b711 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -231,7 +231,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) return ret; } -EXPORT_SYMBOL_GPL(adf_iov_putmsg); void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info) { @@ -361,6 +360,8 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT; BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255); + reinit_completion(&accel_dev->vf.iov_msg_completion); + /* Send request from VF to PF */ ret = adf_iov_putmsg(accel_dev, msg, 0); if (ret) { diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index cd5f37dffe8a6..1830194567e84 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -49,14 +49,14 @@ #include "adf_pf2vf_msg.h" /** - * adf_vf2pf_init() - send init msg to PF + * adf_vf2pf_notify_init() - send init msg to PF * @accel_dev: Pointer to acceleration VF device. * * Function sends an init messge from the VF to a PF * * Return: 0 on success, error code otherwise. */ -int adf_vf2pf_init(struct adf_accel_dev *accel_dev) +int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev) { u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); @@ -69,17 +69,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev) set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); return 0; } -EXPORT_SYMBOL_GPL(adf_vf2pf_init); +EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init); /** - * adf_vf2pf_shutdown() - send shutdown msg to PF + * adf_vf2pf_notify_shutdown() - send shutdown msg to PF * @accel_dev: Pointer to acceleration VF device. * * Function sends a shutdown messge from the VF to a PF * * Return: void */ -void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) +void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev) { u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); @@ -89,4 +89,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) dev_err(&GET_DEV(accel_dev), "Failed to send Shutdown event to PF\n"); } -EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown); +EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown); diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index df9a1f35b8320..ef90902c8200d 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -203,6 +203,7 @@ static irqreturn_t adf_isr(int irq, void *privdata) struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *pmisc_bar_addr = pmisc->virt_addr; + bool handled = false; u32 v_int; /* Read VF INT source CSR to determine the source of VF interrupt */ @@ -215,7 +216,7 @@ static irqreturn_t adf_isr(int irq, void *privdata) /* Schedule tasklet to handle interrupt BH */ tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet); - return IRQ_HANDLED; + handled = true; } /* Check bundle interrupt */ @@ -227,10 +228,10 @@ static irqreturn_t adf_isr(int irq, void *privdata) WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); tasklet_hi_schedule(&bank->resp_handler); - return IRQ_HANDLED; + handled = true; } - return IRQ_NONE; + return handled ? IRQ_HANDLED : IRQ_NONE; } static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index a3b4dd8099a7b..3a8361c83f0b1 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -123,10 +123,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) hw_data->enable_error_correction = adf_vf_void_noop; hw_data->init_admin_comms = adf_vf_int_noop; hw_data->exit_admin_comms = adf_vf_void_noop; - hw_data->send_admin_init = adf_vf2pf_init; + hw_data->send_admin_init = adf_vf2pf_notify_init; hw_data->init_arb = adf_vf_int_noop; hw_data->exit_arb = adf_vf_void_noop; - hw_data->disable_iov = adf_vf2pf_shutdown; + hw_data->disable_iov = adf_vf2pf_notify_shutdown; hw_data->get_accel_mask = get_accel_mask; hw_data->get_ae_mask = get_ae_mask; hw_data->get_num_accels = get_num_accels; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 7a55baa861e58..07e1a286ee431 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -853,7 +853,11 @@ static void talitos_unregister_rng(struct device *dev) * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP */ #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) +#ifdef CONFIG_CRYPTO_DEV_TALITOS_SEC2 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) +#else +#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE) +#endif #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ struct talitos_ctx { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index f2739995c335a..199eccee0b0bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -338,7 +338,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus, void amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector) { - u8 val; + u8 val = 0; if (!amdgpu_connector->router.ddc_valid) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b14ce112703f0..e25952d516e22 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -216,7 +216,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) c++; } - BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS); + BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS); placement->num_placement = c; placement->placement = places; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 0d9e410ca01e2..dbfe5623997d4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -92,29 +92,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf, rd_buf_ptr = rd_buf; - str_len = strlen("Current: %d %d %d "); - snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ", + str_len = strlen("Current: %d 0x%x %d "); + snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ", link->cur_link_settings.lane_count, link->cur_link_settings.link_rate, link->cur_link_settings.link_spread); rd_buf_ptr += str_len; - str_len = strlen("Verified: %d %d %d "); - snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ", + str_len = strlen("Verified: %d 0x%x %d "); + snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ", link->verified_link_cap.lane_count, link->verified_link_cap.link_rate, link->verified_link_cap.link_spread); rd_buf_ptr += str_len; - str_len = strlen("Reported: %d %d %d "); - snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ", + str_len = strlen("Reported: %d 0x%x %d "); + snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ", link->reported_link_cap.lane_count, link->reported_link_cap.link_rate, link->reported_link_cap.link_spread); rd_buf_ptr += str_len; - str_len = strlen("Preferred: %d %d %d "); - snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n", + str_len = strlen("Preferred: %d 0x%x %d "); + snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n", link->preferred_link_setting.lane_count, link->preferred_link_setting.link_rate, link->preferred_link_setting.link_spread); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 79bafea663542..a40ea5c685728 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -296,10 +296,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx) int i; for (i = 0; i < ctx->mixer_count; i++) { - DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0); - DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0); - DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0); - DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0); + enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id; + + DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0); + DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0); + DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0); + DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0); } } diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index ff8164cc6738d..822cef472a7e9 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -34,8 +34,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) } phy_pdev = of_find_device_by_node(phy_node); - if (phy_pdev) + if (phy_pdev) { msm_dsi->phy = platform_get_drvdata(phy_pdev); + msm_dsi->phy_dev = &phy_pdev->dev; + } of_node_put(phy_node); @@ -44,8 +46,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) return -EPROBE_DEFER; } - msm_dsi->phy_dev = get_device(&phy_pdev->dev); - return 0; } diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c index a9d2501500a19..170371770dd41 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/ipu-v3/ipu-cpmem.c @@ -545,21 +545,21 @@ static const struct ipu_rgb def_bgra_16 = { .bits_per_pixel = 16, }; -#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y)) -#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \ - (pix->width * ((y) / 2) / 2) + (x) / 2) -#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \ - (pix->width * pix->height / 4) + \ - (pix->width * ((y) / 2) / 2) + (x) / 2) -#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \ - (pix->width * (y) / 2) + (x) / 2) -#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \ - (pix->width * pix->height / 2) + \ - (pix->width * (y) / 2) + (x) / 2) -#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \ - (pix->width * ((y) / 2)) + (x)) -#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \ - (pix->width * y) + (x)) +#define Y_OFFSET(pix, x, y) ((x) + pix->bytesperline * (y)) +#define U_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \ + (pix->bytesperline * ((y) / 2) / 2) + (x) / 2) +#define V_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \ + (pix->bytesperline * pix->height / 4) + \ + (pix->bytesperline * ((y) / 2) / 2) + (x) / 2) +#define U2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \ + (pix->bytesperline * (y) / 2) + (x) / 2) +#define V2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \ + (pix->bytesperline * pix->height / 2) + \ + (pix->bytesperline * (y) / 2) + (x) / 2) +#define UV_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \ + (pix->bytesperline * ((y) / 2)) + (x)) +#define UV2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \ + (pix->bytesperline * y) + (x)) #define NUM_ALPHA_CHANNELS 7 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 4dd151b2924e2..d56ef395eb693 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -427,8 +427,6 @@ static int hidinput_get_battery_property(struct power_supply *psy, if (dev->battery_status == HID_BATTERY_UNKNOWN) val->intval = POWER_SUPPLY_STATUS_UNKNOWN; - else if (dev->battery_capacity == 100) - val->intval = POWER_SUPPLY_STATUS_FULL; else val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c index ff340d7ae2e52..6a880c2623808 100644 --- a/drivers/i2c/busses/i2c-highlander.c +++ b/drivers/i2c/busses/i2c-highlander.c @@ -379,7 +379,7 @@ static int highlander_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); dev->irq = platform_get_irq(pdev, 0); - if (iic_force_poll) + if (dev->irq < 0 || iic_force_poll) dev->irq = 0; if (dev->irq) { diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index 85cbe4b555786..d4fe7ccccb226 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -456,16 +456,14 @@ iop3xx_i2c_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - ret = -ENXIO; + ret = irq; goto unmap; } ret = request_irq(irq, iop3xx_i2c_irq_handler, 0, pdev->name, adapter_data); - if (ret) { - ret = -EIO; + if (ret) goto unmap; - } memcpy(new_adapter->name, pdev->name, strlen(pdev->name)); new_adapter->owner = THIS_MODULE; diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 2bb4d20ead32b..e09b065a6aff0 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -804,7 +804,7 @@ static int mtk_i2c_probe(struct platform_device *pdev) return PTR_ERR(i2c->pdmabase); irq = platform_get_irq(pdev, 0); - if (irq <= 0) + if (irq < 0) return irq; init_completion(&i2c->msg_complete); diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index d3603e261a847..4c60369203882 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1179,7 +1179,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) */ if (!(i2c->quirks & QUIRK_POLL)) { i2c->irq = ret = platform_get_irq(pdev, 0); - if (ret <= 0) { + if (ret < 0) { dev_err(&pdev->dev, "cannot find IRQ\n"); clk_unprepare(i2c->clk); return ret; diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 13fdb4dfe3562..cc3a24c43e574 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -230,7 +230,7 @@ static int ad5624r_probe(struct spi_device *spi) if (!indio_dev) return -ENOMEM; st = iio_priv(indio_dev); - st->reg = devm_regulator_get(&spi->dev, "vcc"); + st->reg = devm_regulator_get_optional(&spi->dev, "vref"); if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) @@ -241,6 +241,22 @@ static int ad5624r_probe(struct spi_device *spi) goto error_disable_reg; voltage_uv = ret; + } else { + if (PTR_ERR(st->reg) != -ENODEV) + return PTR_ERR(st->reg); + /* Backwards compatibility. This naming is not correct */ + st->reg = devm_regulator_get_optional(&spi->dev, "vcc"); + if (!IS_ERR(st->reg)) { + ret = regulator_enable(st->reg); + if (ret) + return ret; + + ret = regulator_get_voltage(st->reg); + if (ret < 0) + goto error_disable_reg; + + voltage_uv = ret; + } } spi_set_drvdata(spi, indio_dev); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 99dd8452724de..57aec656ab7fb 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -1173,29 +1173,34 @@ static int __init iw_cm_init(void) ret = iwpm_init(RDMA_NL_IWCM); if (ret) - pr_err("iw_cm: couldn't init iwpm\n"); - else - rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table); + return ret; + iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0); if (!iwcm_wq) - return -ENOMEM; + goto err_alloc; iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", iwcm_ctl_table); if (!iwcm_ctl_table_hdr) { pr_err("iw_cm: couldn't register sysctl paths\n"); - destroy_workqueue(iwcm_wq); - return -ENOMEM; + goto err_sysctl; } + rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table); return 0; + +err_sysctl: + destroy_workqueue(iwcm_wq); +err_alloc: + iwpm_exit(RDMA_NL_IWCM); + return -ENOMEM; } static void __exit iw_cm_cleanup(void) { + rdma_nl_unregister(RDMA_NL_IWCM); unregister_net_sysctl_table(iwcm_ctl_table_hdr); destroy_workqueue(iwcm_wq); - rdma_nl_unregister(RDMA_NL_IWCM); iwpm_exit(RDMA_NL_IWCM); } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 7787ec42f81e1..2df75db52e917 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -824,20 +824,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); if (!d->full_dirty_stripes) - return -ENOMEM; + goto out_free_stripe_sectors_dirty; idx = ida_simple_get(&bcache_device_idx, 0, BCACHE_DEVICE_IDX_MAX, GFP_KERNEL); if (idx < 0) - return idx; + goto out_free_full_dirty_stripes; if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) - goto err; + goto out_ida_remove; d->disk = alloc_disk(BCACHE_MINORS); if (!d->disk) - goto err; + goto out_bioset_exit; set_capacity(d->disk, sectors); snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); @@ -872,8 +872,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, return 0; -err: +out_bioset_exit: + bioset_exit(&d->bio_split); +out_ida_remove: ida_simple_remove(&bcache_device_idx, idx); +out_free_full_dirty_stripes: + kvfree(d->full_dirty_stripes); +out_free_stripe_sectors_dirty: + kvfree(d->stripe_sectors_dirty); return -ENOMEM; } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 85559f772d0d6..a6a26f8e4d8e1 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2181,7 +2181,12 @@ static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data) struct crypt_config *cc = pool_data; struct page *page; - if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) && + /* + * Note, percpu_counter_read_positive() may over (and under) estimate + * the current usage by at most (batch - 1) * num_online_cpus() pages, + * but avoids potential spinlock contention of an exact result. + */ + if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) && likely(gfp_mask & __GFP_NORETRY)) return NULL; diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 85077f4d257a7..a6a5cee6b9430 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -901,7 +901,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) return -EBUSY; } - if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) { + if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) { r = __commit_transaction(pmd); if (r < 0) DMWARN("%s: __commit_transaction() failed, error = %d", diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 492a3f8ac1199..0401daa0f7fbd 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -494,7 +494,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm, void *p; int r; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); @@ -563,7 +563,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, struct buffer_aux *aux; void *p; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); @@ -603,7 +603,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock); int dm_bm_flush(struct dm_block_manager *bm) { - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; return dm_bufio_write_dirty_buffers(bm->bufio); @@ -617,19 +617,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) bool dm_bm_is_read_only(struct dm_block_manager *bm) { - return bm->read_only; + return (bm ? bm->read_only : true); } EXPORT_SYMBOL_GPL(dm_bm_is_read_only); void dm_bm_set_read_only(struct dm_block_manager *bm) { - bm->read_only = true; + if (bm) + bm->read_only = true; } EXPORT_SYMBOL_GPL(dm_bm_set_read_only); void dm_bm_set_read_write(struct dm_block_manager *bm) { - bm->read_only = false; + if (bm) + bm->read_only = false; } EXPORT_SYMBOL_GPL(dm_bm_set_read_write); diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 3c3f8cb148451..5fa787e023c7e 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -2110,32 +2110,55 @@ static void dib8000_load_ana_fe_coefs(struct dib8000_state *state, const s16 *an dib8000_write_word(state, 117 + mode, ana_fe[mode]); } -static const u16 lut_prbs_2k[14] = { - 0, 0x423, 0x009, 0x5C7, 0x7A6, 0x3D8, 0x527, 0x7FF, 0x79B, 0x3D6, 0x3A2, 0x53B, 0x2F4, 0x213 +static const u16 lut_prbs_2k[13] = { + 0x423, 0x009, 0x5C7, + 0x7A6, 0x3D8, 0x527, + 0x7FF, 0x79B, 0x3D6, + 0x3A2, 0x53B, 0x2F4, + 0x213 }; -static const u16 lut_prbs_4k[14] = { - 0, 0x208, 0x0C3, 0x7B9, 0x423, 0x5C7, 0x3D8, 0x7FF, 0x3D6, 0x53B, 0x213, 0x029, 0x0D0, 0x48E + +static const u16 lut_prbs_4k[13] = { + 0x208, 0x0C3, 0x7B9, + 0x423, 0x5C7, 0x3D8, + 0x7FF, 0x3D6, 0x53B, + 0x213, 0x029, 0x0D0, + 0x48E }; -static const u16 lut_prbs_8k[14] = { - 0, 0x740, 0x069, 0x7DD, 0x208, 0x7B9, 0x5C7, 0x7FF, 0x53B, 0x029, 0x48E, 0x4C4, 0x367, 0x684 + +static const u16 lut_prbs_8k[13] = { + 0x740, 0x069, 0x7DD, + 0x208, 0x7B9, 0x5C7, + 0x7FF, 0x53B, 0x029, + 0x48E, 0x4C4, 0x367, + 0x684 }; static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel) { int sub_channel_prbs_group = 0; + int prbs_group; - sub_channel_prbs_group = (subchannel / 3) + 1; - dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]); + sub_channel_prbs_group = subchannel / 3; + if (sub_channel_prbs_group >= ARRAY_SIZE(lut_prbs_2k)) + return 0; switch (state->fe[0]->dtv_property_cache.transmission_mode) { case TRANSMISSION_MODE_2K: - return lut_prbs_2k[sub_channel_prbs_group]; + prbs_group = lut_prbs_2k[sub_channel_prbs_group]; + break; case TRANSMISSION_MODE_4K: - return lut_prbs_4k[sub_channel_prbs_group]; + prbs_group = lut_prbs_4k[sub_channel_prbs_group]; + break; default: case TRANSMISSION_MODE_8K: - return lut_prbs_8k[sub_channel_prbs_group]; + prbs_group = lut_prbs_8k[sub_channel_prbs_group]; } + + dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", + sub_channel_prbs_group, subchannel, prbs_group); + + return prbs_group; } static void dib8000_set_13seg_channel(struct dib8000_state *state) @@ -2412,10 +2435,8 @@ static void dib8000_set_isdbt_common_channel(struct dib8000_state *state, u8 seq /* TSB or ISDBT ? apply it now */ if (c->isdbt_sb_mode) { dib8000_set_sb_channel(state); - if (c->isdbt_sb_subchannel < 14) - init_prbs = dib8000_get_init_prbs(state, c->isdbt_sb_subchannel); - else - init_prbs = 0; + init_prbs = dib8000_get_init_prbs(state, + c->isdbt_sb_subchannel); } else { dib8000_set_13seg_channel(state); init_prbs = 0xfff; @@ -3007,6 +3028,7 @@ static int dib8000_tune(struct dvb_frontend *fe) unsigned long *timeout = &state->timeout; unsigned long now = jiffies; + u16 init_prbs; #ifdef DIB8000_AGC_FREEZE u16 agc1, agc2; #endif @@ -3305,8 +3327,10 @@ static int dib8000_tune(struct dvb_frontend *fe) break; case CT_DEMOD_STEP_11: /* 41 : init prbs autosearch */ - if (state->subchannel <= 41) { - dib8000_set_subchannel_prbs(state, dib8000_get_init_prbs(state, state->subchannel)); + init_prbs = dib8000_get_init_prbs(state, state->subchannel); + + if (init_prbs) { + dib8000_set_subchannel_prbs(state, init_prbs); *tune_state = CT_DEMOD_STEP_9; } else { *tune_state = CT_DEMOD_STOP; diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c index 31a1e2294843a..85395813c0f2e 100644 --- a/drivers/media/i2c/imx258.c +++ b/drivers/media/i2c/imx258.c @@ -22,7 +22,7 @@ #define IMX258_CHIP_ID 0x0258 /* V_TIMING internal */ -#define IMX258_VTS_30FPS 0x0c98 +#define IMX258_VTS_30FPS 0x0c50 #define IMX258_VTS_30FPS_2K 0x0638 #define IMX258_VTS_30FPS_VGA 0x034c #define IMX258_VTS_MAX 0xffff @@ -46,7 +46,7 @@ /* Analog gain control */ #define IMX258_REG_ANALOG_GAIN 0x0204 #define IMX258_ANA_GAIN_MIN 0 -#define IMX258_ANA_GAIN_MAX 0x1fff +#define IMX258_ANA_GAIN_MAX 480 #define IMX258_ANA_GAIN_STEP 1 #define IMX258_ANA_GAIN_DEFAULT 0x0 diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c index d114ac5243eca..4f8dc3f56785a 100644 --- a/drivers/media/i2c/tda1997x.c +++ b/drivers/media/i2c/tda1997x.c @@ -1695,14 +1695,15 @@ static int tda1997x_query_dv_timings(struct v4l2_subdev *sd, struct v4l2_dv_timings *timings) { struct tda1997x_state *state = to_state(sd); + int ret; v4l_dbg(1, debug, state->client, "%s\n", __func__); memset(timings, 0, sizeof(struct v4l2_dv_timings)); mutex_lock(&state->lock); - tda1997x_detect_std(state, timings); + ret = tda1997x_detect_std(state, timings); mutex_unlock(&state->lock); - return 0; + return ret; } static const struct v4l2_subdev_video_ops tda1997x_video_ops = { @@ -2229,6 +2230,7 @@ static int tda1997x_core_init(struct v4l2_subdev *sd) /* get initial HDMI status */ state->hdmi_status = io_read(sd, REG_HDMI_FLAGS); + io_write(sd, REG_EDID_ENABLE, EDID_ENABLE_A_EN | EDID_ENABLE_B_EN); return 0; } diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 420897241248c..4197b311cff46 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -316,6 +316,8 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) else return NULL; fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) + return NULL; } pixmp->width = clamp(pixmp->width, frame_width_min(inst), diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c index aba488cd0e645..a2c20ca799c44 100644 --- a/drivers/media/platform/tegra-cec/tegra_cec.c +++ b/drivers/media/platform/tegra-cec/tegra_cec.c @@ -383,7 +383,11 @@ static int tegra_cec_probe(struct platform_device *pdev) return -ENOENT; } - clk_prepare_enable(cec->clk); + ret = clk_prepare_enable(cec->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to prepare clock for CEC\n"); + return ret; + } /* set context info. */ cec->dev = &pdev->dev; @@ -462,9 +466,7 @@ static int tegra_cec_resume(struct platform_device *pdev) dev_notice(&pdev->dev, "Resuming\n"); - clk_prepare_enable(cec->clk); - - return 0; + return clk_prepare_enable(cec->clk); } #endif diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c index 3822d9ebcb46c..5abbde7e5d5b3 100644 --- a/drivers/media/rc/rc-loopback.c +++ b/drivers/media/rc/rc-loopback.c @@ -52,7 +52,7 @@ static int loop_set_tx_mask(struct rc_dev *dev, u32 mask) if ((mask & (RXMASK_REGULAR | RXMASK_LEARNING)) != mask) { dprintk("invalid tx mask: %u\n", mask); - return -EINVAL; + return 2; } dprintk("setting tx mask: %u\n", mask); diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c index 43e0e0fd715b9..705c2901a89e8 100644 --- a/drivers/media/usb/dvb-usb/nova-t-usb2.c +++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c @@ -133,7 +133,7 @@ ret: static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6]) { - int i; + int i, ret; u8 b; mac[0] = 0x00; @@ -142,7 +142,9 @@ static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6]) /* this is a complete guess, but works for my box */ for (i = 136; i < 139; i++) { - dibusb_read_eeprom_byte(d,i, &b); + ret = dibusb_read_eeprom_byte(d, i, &b); + if (ret) + return ret; mac[5 - (i - 136)] = b; } diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c index c3529ea59da95..fcd66757b34dc 100644 --- a/drivers/media/usb/dvb-usb/vp702x.c +++ b/drivers/media/usb/dvb-usb/vp702x.c @@ -294,16 +294,22 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state) static int vp702x_read_mac_addr(struct dvb_usb_device *d,u8 mac[6]) { u8 i, *buf; + int ret; struct vp702x_device_state *st = d->priv; mutex_lock(&st->buf_mutex); buf = st->buf; - for (i = 6; i < 12; i++) - vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, &buf[i - 6], 1); + for (i = 6; i < 12; i++) { + ret = vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, + &buf[i - 6], 1); + if (ret < 0) + goto err; + } memcpy(mac, buf, 6); +err: mutex_unlock(&st->buf_mutex); - return 0; + return ret; } static int vp702x_frontend_attach(struct dvb_usb_adapter *adap) diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c index 3612f0d730ddd..92007a225d8ea 100644 --- a/drivers/media/usb/em28xx/em28xx-input.c +++ b/drivers/media/usb/em28xx/em28xx-input.c @@ -865,7 +865,6 @@ error: kfree(ir); ref_put: em28xx_shutdown_buttons(dev); - kref_put(&dev->ref, em28xx_free_device); return err; } diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c index 62aeebcdd7f71..c7b5a3321cd74 100644 --- a/drivers/media/usb/go7007/go7007-driver.c +++ b/drivers/media/usb/go7007/go7007-driver.c @@ -699,49 +699,23 @@ struct go7007 *go7007_alloc(const struct go7007_board_info *board, struct device *dev) { struct go7007 *go; - int i; go = kzalloc(sizeof(struct go7007), GFP_KERNEL); if (go == NULL) return NULL; go->dev = dev; go->board_info = board; - go->board_id = 0; go->tuner_type = -1; - go->channel_number = 0; - go->name[0] = 0; mutex_init(&go->hw_lock); init_waitqueue_head(&go->frame_waitq); spin_lock_init(&go->spinlock); go->status = STATUS_INIT; - memset(&go->i2c_adapter, 0, sizeof(go->i2c_adapter)); - go->i2c_adapter_online = 0; - go->interrupt_available = 0; init_waitqueue_head(&go->interrupt_waitq); - go->input = 0; go7007_update_board(go); - go->encoder_h_halve = 0; - go->encoder_v_halve = 0; - go->encoder_subsample = 0; go->format = V4L2_PIX_FMT_MJPEG; go->bitrate = 1500000; go->fps_scale = 1; - go->pali = 0; go->aspect_ratio = GO7007_RATIO_1_1; - go->gop_size = 0; - go->ipb = 0; - go->closed_gop = 0; - go->repeat_seqhead = 0; - go->seq_header_enable = 0; - go->gop_header_enable = 0; - go->dvd_mode = 0; - go->interlace_coding = 0; - for (i = 0; i < 4; ++i) - go->modet[i].enable = 0; - for (i = 0; i < 1624; ++i) - go->modet_map[i] = 0; - go->audio_deliver = NULL; - go->audio_enabled = 0; return go; } diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index e33fa78ef98dd..f46db1b6a27c4 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -1355,7 +1355,7 @@ static int stk_camera_probe(struct usb_interface *interface, if (!dev->isoc_ep) { pr_err("Could not find isoc-in endpoint\n"); err = -ENODEV; - goto error; + goto error_put; } dev->vsettings.palette = V4L2_PIX_FMT_RGB565; dev->vsettings.mode = MODE_VGA; @@ -1368,10 +1368,12 @@ static int stk_camera_probe(struct usb_interface *interface, err = stk_register_video_device(dev); if (err) - goto error; + goto error_put; return 0; +error_put: + usb_put_intf(interface); error: v4l2_ctrl_handler_free(hdl); v4l2_device_unregister(&dev->v4l2_dev); diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 06167c51af128..2ca1e8ce6159d 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -900,8 +900,8 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input) { struct uvc_fh *handle = fh; struct uvc_video_chain *chain = handle->chain; + u8 *buf; int ret; - u8 i; if (chain->selector == NULL || (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) { @@ -909,22 +909,27 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input) return 0; } + buf = kmalloc(1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id, chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL, - &i, 1); - if (ret < 0) - return ret; + buf, 1); + if (!ret) + *input = *buf - 1; - *input = i - 1; - return 0; + kfree(buf); + + return ret; } static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input) { struct uvc_fh *handle = fh; struct uvc_video_chain *chain = handle->chain; + u8 *buf; int ret; - u32 i; ret = uvc_acquire_privileges(handle); if (ret < 0) @@ -940,10 +945,17 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input) if (input >= chain->selector->bNrInPins) return -EINVAL; - i = input + 1; - return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id, - chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL, - &i, 1); + buf = kmalloc(1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + *buf = input + 1; + ret = uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id, + chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL, + buf, 1); + kfree(buf); + + return ret; } static int uvc_ioctl_queryctrl(struct file *file, void *fh, diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index a24b40dfec97a..af38c989ff336 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -196,7 +196,7 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle)) return false; - for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) { + for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) { if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap, fnc, fnc_handle) && v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i, @@ -218,7 +218,7 @@ bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic) { unsigned int i; - for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) { + for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) { const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt; diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 11ab17f64c649..f0527e7698677 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -493,7 +493,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500, if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F) line += 1; - handle_nested_irq(irq_create_mapping(ab8500->domain, line)); + handle_nested_irq(irq_find_mapping(ab8500->domain, line)); } return 0; diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index aa65931142ba7..dcb341d627582 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -127,12 +127,13 @@ static const struct regmap_range axp288_writeable_ranges[] = { static const struct regmap_range axp288_volatile_ranges[] = { regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON), + regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT), regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL), regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT), regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL), regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L), regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL), - regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE), + regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE), regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L), regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG), }; diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 566caca4efd8e..722ad2c368a56 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -1035,7 +1035,7 @@ static irqreturn_t stmpe_irq(int irq, void *data) if (variant->id_val == STMPE801_ID || variant->id_val == STMPE1600_ID) { - int base = irq_create_mapping(stmpe->domain, 0); + int base = irq_find_mapping(stmpe->domain, 0); handle_nested_irq(base); return IRQ_HANDLED; @@ -1063,7 +1063,7 @@ static irqreturn_t stmpe_irq(int irq, void *data) while (status) { int bit = __ffs(status); int line = bank * 8 + bit; - int nestedirq = irq_create_mapping(stmpe->domain, line); + int nestedirq = irq_find_mapping(stmpe->domain, line); handle_nested_irq(nestedirq); status &= ~(1 << bit); diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index cc9e563f23aa6..7062baf606858 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c @@ -187,7 +187,7 @@ again: while (status) { int bit = __ffs(status); - int virq = irq_create_mapping(tc3589x->domain, bit); + int virq = irq_find_mapping(tc3589x->domain, bit); handle_nested_irq(virq); status &= ~(1 << bit); diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c index 18710f3b5c534..2c58d9b99a394 100644 --- a/drivers/mfd/wm8994-irq.c +++ b/drivers/mfd/wm8994-irq.c @@ -159,7 +159,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data) struct wm8994 *wm8994 = data; while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio)) - handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0)); + handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0)); return IRQ_HANDLED; } diff --git a/drivers/misc/aspeed-lpc-ctrl.c b/drivers/misc/aspeed-lpc-ctrl.c index a024f8042259a..870ab0dfcde06 100644 --- a/drivers/misc/aspeed-lpc-ctrl.c +++ b/drivers/misc/aspeed-lpc-ctrl.c @@ -50,7 +50,7 @@ static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma) unsigned long vsize = vma->vm_end - vma->vm_start; pgprot_t prot = vma->vm_page_prot; - if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size) + if (vma->vm_pgoff + vma_pages(vma) > lpc_ctrl->mem_size >> PAGE_SHIFT) return -EINVAL; /* ast2400/2500 AHB accesses are not cache coherent */ diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 9bc97d6a651db..db433d285effa 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -2249,7 +2249,8 @@ int vmci_qp_broker_map(struct vmci_handle handle, result = VMCI_SUCCESS; - if (context_id != VMCI_HOST_CONTEXT_ID) { + if (context_id != VMCI_HOST_CONTEXT_ID && + !QPBROKERSTATE_HAS_MEM(entry)) { struct vmci_qp_page_store page_store; page_store.pages = guest_mem; @@ -2356,7 +2357,8 @@ int vmci_qp_broker_unmap(struct vmci_handle handle, goto out; } - if (context_id != VMCI_HOST_CONTEXT_ID) { + if (context_id != VMCI_HOST_CONTEXT_ID && + QPBROKERSTATE_HAS_MEM(entry)) { qp_acquire_queue_mutex(entry->produce_q); result = qp_save_headers(entry); if (result < VMCI_SUCCESS) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 7b2bb32e35554..d1cc0fdbc51c8 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -592,6 +592,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, } mmc_wait_for_req(card->host, &mrq); + memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); if (cmd.error) { dev_err(mmc_dev(card->host), "%s: cmd error %d\n", @@ -641,8 +642,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, if (idata->ic.postsleep_min_us) usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); - memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); - if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { /* * Ensure RPMB/R1B command has completed by polling CMD13 diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 8e09586f880f1..e3991df078efb 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -808,6 +808,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host, int ret = 0; /* Set external dma config: burst size, burst width */ + memset(&cfg, 0, sizeof(cfg)); cfg.dst_addr = host->phy_regs + fifo_offset; cfg.src_addr = cfg.dst_addr; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index a0670e9cd0127..5553a5643f405 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -631,6 +631,7 @@ static int moxart_probe(struct platform_device *pdev) host->dma_chan_tx, host->dma_chan_rx); host->have_dma = true; + memset(&cfg, 0, sizeof(cfg)); cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 02de6a5701d6c..c1de8fa50fe8f 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -551,9 +551,22 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host, return 0; } +static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); +} + +static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); +} + static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) { struct mmc_data *data = mrq->data; + int err; if (host->sg_count < 0) { data->error = host->sg_count; @@ -562,22 +575,19 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) return data->error; } - if (data->flags & MMC_DATA_READ) - return sd_read_long_data(host, mrq); + if (data->flags & MMC_DATA_READ) { + if (host->initial_mode) + sd_disable_initial_mode(host); - return sd_write_long_data(host, mrq); -} + err = sd_read_long_data(host, mrq); -static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) -{ - rtsx_pci_write_register(host->pcr, SD_CFG1, - SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); -} + if (host->initial_mode) + sd_enable_initial_mode(host); -static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) -{ - rtsx_pci_write_register(host->pcr, SD_CFG1, - SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); + return err; + } + + return sd_write_long_data(host, mrq); } static void sd_normal_rw(struct realtek_pci_sdmmc *host, diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 9c77bfe4334f3..d1a2418b0c5e5 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -185,7 +185,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock) * through low speeds without power cycling. */ sdhci_set_clock(host, host->max_clk); - phy_power_on(sdhci_arasan->phy); + if (phy_power_on(sdhci_arasan->phy)) { + pr_err("%s: Cannot power on phy.\n", + mmc_hostname(host->mmc)); + return; + } + sdhci_arasan->is_phy_on = true; /* @@ -221,7 +226,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock) msleep(20); if (ctrl_phy) { - phy_power_on(sdhci_arasan->phy); + if (phy_power_on(sdhci_arasan->phy)) { + pr_err("%s: Cannot power on phy.\n", + mmc_hostname(host->mmc)); + return; + } + sdhci_arasan->is_phy_on = true; } } @@ -395,7 +405,9 @@ static int sdhci_arasan_suspend(struct device *dev) ret = phy_power_off(sdhci_arasan->phy); if (ret) { dev_err(dev, "Cannot power off phy.\n"); - sdhci_resume_host(host); + if (sdhci_resume_host(host)) + dev_err(dev, "Cannot resume host.\n"); + return ret; } sdhci_arasan->is_phy_on = false; diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c index 3304594177c6d..1fee298d5680c 100644 --- a/drivers/mtd/nand/raw/cafe_nand.c +++ b/drivers/mtd/nand/raw/cafe_nand.c @@ -758,7 +758,7 @@ static int cafe_nand_probe(struct pci_dev *pdev, "CAFE NAND", mtd); if (err) { dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq); - goto out_ior; + goto out_free_rs; } /* Disable master reset, enable NAND clock */ @@ -802,6 +802,8 @@ static int cafe_nand_probe(struct pci_dev *pdev, /* Disable NAND IRQ in global IRQ mask register */ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); free_irq(pdev->irq, mtd); + out_free_rs: + free_rs(cafe->rs); out_ior: pci_iounmap(pdev, cafe->mmio); out_free_mtd: diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c814b266af794..d6c5f41b17f7e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1912,7 +1912,6 @@ static int __bond_release_one(struct net_device *bond_dev, /* recompute stats just before removing the slave */ bond_get_stats(bond->dev, &bond->bond_stats); - bond_upper_dev_unlink(bond, slave); /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ @@ -1921,6 +1920,8 @@ static int __bond_release_one(struct net_device *bond_dev, if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_unbind_slave(slave); + bond_upper_dev_unlink(bond, slave); + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, slave); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 7eaeab65d39f5..451121f47c89a 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -2135,9 +2135,8 @@ static int b53_switch_init(struct b53_device *dev) dev->cpu_port = 5; } - /* cpu port is always last */ - dev->num_ports = dev->cpu_port + 1; dev->enabled_ports |= BIT(dev->cpu_port); + dev->num_ports = fls(dev->enabled_ports); /* Include non standard CPU port built-in PHYs to be probed */ if (is539x(dev) || is531x5(dev)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 77005f6366eb1..b3ff8d13c31af 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1245,7 +1245,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, /* SR-IOV capability was enabled but there are no VFs*/ if (iov->total == 0) { - err = -EINVAL; + err = 0; goto failed; } diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 8f912de44defc..ecb0fb1ddde7c 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -286,6 +286,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, if (GEM_BFEXT(DMA_RXVALID, desc->addr)) { desc_ptp = macb_ptp_desc(bp, desc); + /* Unlikely but check */ + if (!desc_ptp) { + dev_warn_ratelimited(&bp->pdev->dev, + "Timestamp not supported in BD\n"); + return; + } gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); @@ -318,8 +324,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) return -ENOMEM; - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; desc_ptp = macb_ptp_desc(queue->bp, desc); + /* Unlikely but check */ + if (!desc_ptp) + return -EINVAL; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_timestamp = &queue->tx_timestamps[head]; tx_timestamp->skb = skb; /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 0ccdde366ae17..540d99f59226e 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -1153,6 +1153,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!adapter->registered_device_map) { pr_err("%s: could not register any net devices\n", pci_name(pdev)); + err = -EINVAL; goto out_release_adapter_res; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 19165a3548bfd..4fb80ed897f22 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -29,6 +29,8 @@ static const char hns3_driver_string[] = static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; static struct hnae3_client client; +#define HNS3_MIN_TUN_PKT_LEN 65U + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -792,8 +794,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, HNS3_L4T_TCP); break; case IPPROTO_UDP: - if (hns3_tunnel_csum_bug(skb)) - return skb_checksum_help(skb); + if (hns3_tunnel_csum_bug(skb)) { + int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); + + return ret ? ret : skb_checksum_help(skb); + } hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hnae3_set_field(*type_cs_vlan_tso, diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4008007c2e340..d97641b9928bb 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4038,6 +4038,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, return 0; } + if (adapter->failover_pending) { + adapter->init_done_rc = -EAGAIN; + netdev_dbg(netdev, "Failover pending, ignoring login response\n"); + complete(&adapter->init_done); + /* login response buffer will be released on reset */ + return 0; + } + netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 774f0a619a6da..f0aa7f0e54803 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1558,9 +1558,9 @@ static int build_match_list(struct match_list_head *match_head, curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); if (!curr_match) { + rcu_read_unlock(); free_match_list(match_head); - err = -ENOMEM; - goto out; + return -ENOMEM; } if (!tree_get_node(&g->node)) { kfree(curr_match); @@ -1569,7 +1569,6 @@ static int build_match_list(struct match_list_head *match_head, curr_match->g = g; list_add_tail(&curr_match->list, &match_head->list); } -out: rcu_read_unlock(); return err; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 049a83b40e469..9d77f318d11ed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -439,7 +439,12 @@ static int qed_enable_msix(struct qed_dev *cdev, rc = cnt; } - if (rc > 0) { + /* For VFs, we should return with an error in case we didn't get the + * exact number of msix vectors as we requested. + * Not doing that will lead to a crash when starting queues for + * this VF. + */ + if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { /* MSI-x configuration was achieved */ int_params->out.int_mode = QED_INT_MODE_MSIX; int_params->out.num_vectors = rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 0d62db3241bed..d16cadca2f7ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2841,6 +2841,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_nvm_image_att *p_image_att) { enum nvm_image_type type; + int rc; u32 i; /* Translate image_id into MFW definitions */ @@ -2866,7 +2867,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, return -EINVAL; } - qed_mcp_nvm_info_populate(p_hwfn); + rc = qed_mcp_nvm_info_populate(p_hwfn); + if (rc) + return rc; + for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (type == p_hwfn->nvm_info.image_att[i].image_type) break; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 1aabb2e7a38b5..756c5943f5e01 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1676,6 +1676,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev) } edev->int_info.used_cnt = 0; + edev->int_info.msix_cnt = 0; } static int qede_req_msix_irqs(struct qede_dev *edev) @@ -2193,7 +2194,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, goto out; err4: qede_sync_free_irqs(edev); - memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); err3: qede_napi_disable_remove(edev); err2: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index c48a0e2d4d7ef..6a009d51ec510 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -440,7 +440,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1); msleep(20); - qlcnic_rom_unlock(adapter); /* big hammer don't reset CAM block on reset */ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 9d188931bc09e..afd49c7fd87fe 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -413,7 +413,7 @@ qcaspi_receive(struct qcaspi *qca) skb_put(qca->rx_skb, retcode); qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); - qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_checksum_none_assert(qca->rx_skb); netif_rx_ni(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, net_dev->mtu + VLAN_ETH_HLEN); diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index db6068cd7a1f2..466e9d07697a1 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -107,7 +107,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data, skb_put(qca->rx_skb, retcode); qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); - qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_checksum_none_assert(qca->rx_skb); netif_rx_ni(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(netdev, netdev->mtu + diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index aa11b70b9ca48..2199bd08f4d6a 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -133,6 +133,8 @@ #define PHY_ST 0x8A /* PHY status register */ #define MAC_SM 0xAC /* MAC status machine */ #define MAC_SM_RST 0x0002 /* MAC status machine reset */ +#define MD_CSC 0xb6 /* MDC speed control register */ +#define MD_CSC_DEFAULT 0x0030 #define MAC_ID 0xBE /* Identifier register */ #define TX_DCNT 0x80 /* TX descriptor count */ @@ -368,8 +370,9 @@ static void r6040_reset_mac(struct r6040_private *lp) { void __iomem *ioaddr = lp->base; int limit = MAC_DEF_TIMEOUT; - u16 cmd; + u16 cmd, md_csc; + md_csc = ioread16(ioaddr + MD_CSC); iowrite16(MAC_RST, ioaddr + MCR1); while (limit--) { cmd = ioread16(ioaddr + MCR1); @@ -381,6 +384,10 @@ static void r6040_reset_mac(struct r6040_private *lp) iowrite16(MAC_SM_RST, ioaddr + MAC_SM); iowrite16(0, ioaddr + MAC_SM); mdelay(5); + + /* Restore MDIO clock frequency */ + if (md_csc != MD_CSC_DEFAULT) + iowrite16(md_csc, ioaddr + MD_CSC); } static void r6040_init_mac_regs(struct net_device *dev) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 394ab9cdfe2c7..c44aea47c1208 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2547,6 +2547,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) else txdesc->status |= cpu_to_le32(TD_TACT); + wmb(); /* cur_tx must be incremented after TACT bit was set */ mdp->cur_tx++; if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 0f56f8e336917..03b11f191c262 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -288,10 +288,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; break; default: - dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", - phy_modes(gmac->phy_mode)); - err = -EINVAL; - goto err_remove_config_dt; + goto err_unsupported_phy; } regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); @@ -308,10 +305,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); break; default: - dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", - phy_modes(gmac->phy_mode)); - err = -EINVAL; - goto err_remove_config_dt; + goto err_unsupported_phy; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); @@ -328,8 +322,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); break; default: - /* We don't get here; the switch above will have errored out */ - unreachable(); + goto err_unsupported_phy; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); @@ -360,6 +353,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) return 0; +err_unsupported_phy: + dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", + phy_modes(gmac->phy_mode)); + err = -EINVAL; + err_remove_config_dt: stmmac_remove_config_dt(pdev, plat_dat); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index d8ba512f166ad..41040756307a1 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1059,6 +1059,8 @@ static int w5100_mmio_probe(struct platform_device *pdev) mac_addr = data->mac_addr; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; if (resource_size(mem) < W5100_BUS_DIRECT_SIZE) ops = &w5100_mmio_indirect_ops; else diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 939de185bc6b8..178234e94cd16 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -736,10 +736,8 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* Kick off the transfer */ lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ - if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { - netdev_info(ndev, "%s -> netif_stop_queue\n", __func__); + if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) netif_stop_queue(ndev); - } return NETDEV_TX_OK; } diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h index 21aa24c741b96..daae7fa58fb82 100644 --- a/drivers/net/phy/dp83640_reg.h +++ b/drivers/net/phy/dp83640_reg.h @@ -5,7 +5,7 @@ #ifndef HAVE_DP83640_REGISTERS #define HAVE_DP83640_REGISTERS -#define PAGE0 0x0000 +/* #define PAGE0 0x0000 */ #define PHYCR2 0x001c /* PHY Control Register 2 */ #define PAGE4 0x0004 diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 0362acd5cdcaa..cdd1b193fd4fe 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -655,6 +655,11 @@ static const struct usb_device_id mbim_devs[] = { .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, + /* Telit LN920 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1061, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, + }, + /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 987ebae8ea0e1..afa7a82ffd5d3 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2513,8 +2513,10 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) goto free_data_skb; for (index = 0; index < num_pri_streams; index++) { - if (WARN_ON(!data_sync_bufs[index].skb)) + if (WARN_ON(!data_sync_bufs[index].skb)) { + ret = -ENOMEM; goto free_data_skb; + } ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, data_sync_bufs[index]. diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 983e1abbd9e43..4d45d5a8ad2ed 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3351,7 +3351,8 @@ found: "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n", cptr, code, reference, length, major, minor); if ((!AR_SREV_9485(ah) && length >= 1024) || - (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) { + (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) || + (length > cptr)) { ath_dbg(common, EEPROM, "Skipping bad header\n"); cptr -= COMP_HDR_LEN; continue; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 9f438d8e59f2f..daad9e7b17cf5 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1622,7 +1622,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) ath9k_hw_gpio_request_out(ah, i, NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); - ath9k_hw_gpio_free(ah, i); } } @@ -2729,14 +2728,17 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type) static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out, const char *label) { + int err; + if (ah->caps.gpio_requested & BIT(gpio)) return; - /* may be requested by BSP, free anyway */ - gpio_free(gpio); - - if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label)) + err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label); + if (err) { + ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n", + gpio, err); return; + } ah->caps.gpio_requested |= BIT(gpio); } diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index ad5d3919435c9..87a41d0ededc1 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -600,6 +600,7 @@ static int perf_setup_inbuf(struct perf_peer *peer) return -ENOMEM; } if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) { + ret = -EINVAL; dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n"); goto err_free_inbuf; } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 8798274dc3ba7..ffd6a7204509a 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -643,13 +643,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) if (ret) return ret; - ctrl->ctrl.queue_count = nr_io_queues + 1; - if (ctrl->ctrl.queue_count < 2) { + if (nr_io_queues == 0) { dev_err(ctrl->ctrl.device, "unable to set any I/O queues\n"); return -ENOMEM; } + ctrl->ctrl.queue_count = nr_io_queues + 1; dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c index a32e60b024b8d..6675b5e56960c 100644 --- a/drivers/of/kobj.c +++ b/drivers/of/kobj.c @@ -119,7 +119,7 @@ int __of_attach_node_sysfs(struct device_node *np) struct property *pp; int rc; - if (!of_kset) + if (!IS_ENABLED(CONFIG_SYSFS) || !of_kset) return 0; np->kobj.kset = of_kset; diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index 5d41dda6da4e7..75daa16f38b7f 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c @@ -535,7 +535,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port, goto out; /* Yield the port for a while. */ - if (count && dev->port->irq != PARPORT_IRQ_NONE) { + if (dev->port->irq != PARPORT_IRQ_NONE) { parport_release (dev); schedule_timeout_interruptible(msecs_to_jiffies(40)); parport_claim_or_block (dev); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 74aa9da85aa26..a9669b28c2a6d 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -166,7 +166,7 @@ (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where)) -#define PIO_RETRY_CNT 500 +#define PIO_RETRY_CNT 750000 /* 1.5 s */ #define PIO_RETRY_DELAY 2 /* 2 us*/ #define LINK_WAIT_MAX_RETRIES 10 @@ -181,6 +181,7 @@ struct advk_pcie { struct list_head resources; struct irq_domain *irq_domain; struct irq_chip irq_chip; + raw_spinlock_t irq_lock; struct irq_domain *msi_domain; struct irq_domain *msi_inner_domain; struct irq_chip msi_bottom_irq_chip; @@ -603,22 +604,28 @@ static void advk_pcie_irq_mask(struct irq_data *d) { struct advk_pcie *pcie = d->domain->host_data; irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; u32 mask; + raw_spin_lock_irqsave(&pcie->irq_lock, flags); mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); mask |= PCIE_ISR1_INTX_ASSERT(hwirq); advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); + raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static void advk_pcie_irq_unmask(struct irq_data *d) { struct advk_pcie *pcie = d->domain->host_data; irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; u32 mask; + raw_spin_lock_irqsave(&pcie->irq_lock, flags); mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq); advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); + raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); } static int advk_pcie_irq_map(struct irq_domain *h, @@ -701,6 +708,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) struct device_node *pcie_intc_node; struct irq_chip *irq_chip; + raw_spin_lock_init(&pcie->irq_lock); + pcie_intc_node = of_get_next_child(node, NULL); if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 4850a1b8eec12..a86bd9660dae9 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -6,6 +6,7 @@ * (C) Copyright 2014 - 2015, Xilinx, Inc. */ +#include #include #include #include @@ -169,6 +170,7 @@ struct nwl_pcie { u8 root_busno; struct nwl_msi msi; struct irq_domain *legacy_irq_domain; + struct clk *clk; raw_spinlock_t leg_mask_lock; }; @@ -849,6 +851,16 @@ static int nwl_pcie_probe(struct platform_device *pdev) return err; } + pcie->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pcie->clk)) + return PTR_ERR(pcie->clk); + + err = clk_prepare_enable(pcie->clk); + if (err) { + dev_err(dev, "can't enable PCIe ref clock\n"); + return err; + } + err = nwl_pcie_bridge_init(pcie); if (err) { dev_err(dev, "HW Initialization failed\n"); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index bc80b0f0ea1ba..f65800d63856b 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -754,6 +754,9 @@ static void msix_mask_all(void __iomem *base, int tsize) u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; int i; + if (pci_msi_ignore_mask) + return; + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 9ebf32de85757..97d69b9be1d49 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -224,7 +224,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path, *endptr = strchrnul(path, ';'); - wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL); + wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC); if (!wpath) return -ENOMEM; @@ -1591,11 +1591,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) * so that things like MSI message writing will behave as expected * (e.g. if the device really is in D0 at enable time). */ - if (dev->pm_cap) { - u16 pmcsr; - pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); - } + pci_update_current_state(dev, dev->current_state); if (atomic_inc_return(&dev->enable_cnt) > 1) return 0; /* already enabled */ @@ -2170,7 +2166,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable if (enable) { int error; - if (pci_pme_capable(dev, state)) + /* + * Enable PME signaling if the device can signal PME from + * D3cold regardless of whether or not it can signal PME from + * the current target state, because that will allow it to + * signal PME when the hierarchy above it goes into D3cold and + * the device itself ends up in D3cold as a result of that. + */ + if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold)) pci_pme_active(dev, true); else ret = 1; @@ -2274,16 +2277,20 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) if (dev->current_state == PCI_D3cold) target_state = PCI_D3cold; - if (wakeup) { + if (wakeup && dev->pme_support) { + pci_power_t state = target_state; + /* * Find the deepest state from which the device can generate * PME#. */ - if (dev->pme_support) { - while (target_state - && !(dev->pme_support & (1 << target_state))) - target_state--; - } + while (state && !(dev->pme_support & (1 << state))) + state--; + + if (state) + return state; + else if (dev->pme_support & 1) + return PCI_D0; } return target_state; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 7e873b6b7d558..4eb8900b9a5cd 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3152,12 +3152,13 @@ static void fixup_mpss_256(struct pci_dev *dev) { dev->pcie_mpss = 1; /* 256 bytes */ } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, - PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, - PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, - PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE, + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE, + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE, + PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256); /* * Intel 5000 and 5100 Memory controllers have an erratum with read completion @@ -4777,6 +4778,10 @@ static const struct pci_dev_acs_enabled { { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ /* Cavium ThunderX */ { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs }, + /* Cavium multi-function devices */ + { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs }, /* APM X-Gene */ { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs }, /* Ampere Computing */ @@ -5253,7 +5258,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); /* - * Create device link for NVIDIA GPU with integrated USB xHCI Host + * Create device link for GPUs with integrated USB xHCI Host * controller to VGA. */ static void quirk_gpu_usb(struct pci_dev *usb) @@ -5262,9 +5267,11 @@ static void quirk_gpu_usb(struct pci_dev *usb) } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); /* - * Create device link for NVIDIA GPU with integrated Type-C UCSI controller + * Create device link for GPUs with integrated Type-C UCSI controller * to VGA. Currently there is no class code defined for UCSI device over PCI * so using UNKNOWN class for now and it will be updated when UCSI * over PCI gets a class code. @@ -5277,6 +5284,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi) DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_SERIAL_UNKNOWN, 8, quirk_gpu_usb_typec_ucsi); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, + PCI_CLASS_SERIAL_UNKNOWN, 8, + quirk_gpu_usb_typec_ucsi); /* * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index a7bdd10fccf33..68ac8a0f5c72e 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -21,8 +21,10 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, long err; int cfg_ret; + err = -EPERM; + dev = NULL; if (!capable(CAP_SYS_ADMIN)) - return -EPERM; + goto error; err = -ENODEV; dev = pci_get_domain_bus_and_slot(0, bus, dfn); diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 04a4e761e9a9c..c2f807bf34899 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1201,6 +1201,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs, if (PCS_HAS_PINCONF) { dev_err(pcs->dev, "pinconf not supported\n"); + res = -ENOTSUPP; goto free_pingroups; } diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index c05217edcb0e0..82407e4a16427 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -918,7 +918,7 @@ static int samsung_pinctrl_register(struct platform_device *pdev, pin_bank->grange.pin_base = drvdata->pin_base + pin_bank->pin_base; pin_bank->grange.base = pin_bank->grange.pin_base; - pin_bank->grange.npins = pin_bank->gpio_chip.ngpio; + pin_bank->grange.npins = pin_bank->nr_pins; pin_bank->grange.gc = &pin_bank->gpio_chip; pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange); } diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index ac784ac66ac34..2b807c8aa869c 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -219,6 +219,15 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev, msg->insize = sizeof(struct ec_response_get_protocol_info); ret = send_command(ec_dev, msg); + /* + * Send command once again when timeout occurred. + * Fingerprint MCU (FPMCU) is restarted during system boot which + * introduces small window in which FPMCU won't respond for any + * messages sent by kernel. There is no need to wait before next + * attempt because we waited at least EC_MSG_DEADLINE_MS. + */ + if (ret == -ETIMEDOUT) + ret = send_command(ec_dev, msg); if (ret < 0) { dev_dbg(ec_dev->dev, diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index ccccce9b67efe..0ebcf412f6ca0 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c @@ -72,6 +72,7 @@ static int run_smbios_call(struct wmi_device *wdev) if (obj->type == ACPI_TYPE_INTEGER) dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n", obj->integer.value); + kfree(output.pointer); return -EIO; } memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length); diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index 157cf5ec6b023..1641868c345c2 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -158,7 +158,7 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg) } if (ret < 0) { - dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret); + dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n", reg, ret); return ret; } @@ -172,7 +172,7 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val) ret = regmap_write(info->regmap, reg, (unsigned int)val); if (ret < 0) - dev_err(&info->pdev->dev, "axp288 reg write err:%d\n", ret); + dev_err(&info->pdev->dev, "Error writing reg 0x%02x err: %d\n", reg, ret); return ret; } diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index 00a3a581e0795..a1518eb6f6c0d 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -740,7 +740,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip) struct max17042_config_data *config = chip->pdata->config_data; max17042_override_por(map, MAX17042_TGAIN, config->tgain); - max17042_override_por(map, MAx17042_TOFF, config->toff); + max17042_override_por(map, MAX17042_TOFF, config->toff); max17042_override_por(map, MAX17042_CGAIN, config->cgain); max17042_override_por(map, MAX17042_COFF, config->coff); @@ -856,8 +856,12 @@ static irqreturn_t max17042_thread_handler(int id, void *dev) { struct max17042_chip *chip = dev; u32 val; + int ret; + + ret = regmap_read(chip->regmap, MAX17042_STATUS, &val); + if (ret) + return IRQ_HANDLED; - regmap_read(chip->regmap, MAX17042_STATUS, &val); if ((val & STATUS_INTR_SOCMIN_BIT) || (val & STATUS_INTR_SOCMAX_BIT)) { dev_info(&chip->client->dev, "SOC threshold INTR\n"); diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c index a9bbd022aeefe..9f10af7ac6efd 100644 --- a/drivers/rtc/rtc-tps65910.c +++ b/drivers/rtc/rtc-tps65910.c @@ -470,6 +470,6 @@ static struct platform_driver tps65910_rtc_driver = { }; module_platform_driver(tps65910_rtc_driver); -MODULE_ALIAS("platform:rtc-tps65910"); +MODULE_ALIAS("platform:tps65910-rtc"); MODULE_AUTHOR("Venu Byravarasu "); MODULE_LICENSE("GPL"); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 825a8f2703b4f..6efe50d70c4bf 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -364,9 +364,26 @@ static ssize_t pimpampom_show(struct device *dev, } static DEVICE_ATTR_RO(pimpampom); +static ssize_t dev_busid_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + struct pmcw *pmcw = &sch->schib.pmcw; + + if ((pmcw->st == SUBCHANNEL_TYPE_IO || + pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv) + return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid, + pmcw->dev); + else + return sysfs_emit(buf, "none\n"); +} +static DEVICE_ATTR_RO(dev_busid); + static struct attribute *io_subchannel_type_attrs[] = { &dev_attr_chpids.attr, &dev_attr_pimpampom.attr, + &dev_attr_dev_busid.attr, NULL, }; ATTRIBUTE_GROUPS(io_subchannel_type); diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 79b5c5457cc22..b8dd9986809b4 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -3605,7 +3605,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, if (buf[0] != '\n' || len > 1) printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf); } else - printk("%s", buf); + pr_cont("%s", buf); } else { if (begin) { if (adapter != NULL && adapter->adapter_initd) @@ -3613,7 +3613,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, else printk("%s%s", blogic_msglevelmap[msglevel], buf); } else - printk("%s", buf); + pr_cont("%s", buf); } begin = (buf[len - 1] == '\n'); } diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 7665fd641886e..ab66e1f0fdfa3 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1507,7 +1507,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) { u32 *list; int i; - int status = 0, rc; + int status; u32 *pbl; dma_addr_t page; int num_pages; @@ -1518,14 +1518,14 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) */ if (!qedi->num_queues) { QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n"); - return 1; + return -ENOMEM; } /* Make sure we allocated the PBL that will contain the physical * addresses of our queues */ if (!qedi->p_cpuq) { - status = 1; + status = -EINVAL; goto mem_alloc_failure; } @@ -1540,13 +1540,13 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) "qedi->global_queues=%p.\n", qedi->global_queues); /* Allocate DMA coherent buffers for BDQ */ - rc = qedi_alloc_bdq(qedi); - if (rc) + status = qedi_alloc_bdq(qedi); + if (status) goto mem_alloc_failure; /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */ - rc = qedi_alloc_nvm_iscsi_cfg(qedi); - if (rc) + status = qedi_alloc_nvm_iscsi_cfg(qedi); + if (status) goto mem_alloc_failure; /* Allocate a CQ and an associated PBL for each MSI-X diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 7821c1695e824..dcd0f058f23e0 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -88,8 +88,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, struct qla_hw_data *ha; struct qla_qpair *qpair; - if (!qidx) - qidx++; + /* Map admin queue and 1st IO queue to index 0 */ + if (qidx) + qidx--; vha = (struct scsi_qla_host *)lport->private; ha = vha->hw; diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index 50214b620865e..2b49d2c212dab 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -117,7 +117,7 @@ struct smsm_entry { DECLARE_BITMAP(irq_enabled, 32); DECLARE_BITMAP(irq_rising, 32); DECLARE_BITMAP(irq_falling, 32); - u32 last_value; + unsigned long last_value; u32 *remote_state; u32 *subscription; @@ -212,8 +212,7 @@ static irqreturn_t smsm_intr(int irq, void *data) u32 val; val = readl(entry->remote_state); - changed = val ^ entry->last_value; - entry->last_value = val; + changed = val ^ xchg(&entry->last_value, val); for_each_set_bit(i, entry->irq_enabled, 32) { if (!(changed & BIT(i))) @@ -274,6 +273,12 @@ static void smsm_unmask_irq(struct irq_data *irqd) struct qcom_smsm *smsm = entry->smsm; u32 val; + /* Make sure our last cached state is up-to-date */ + if (readl(entry->remote_state) & BIT(irq)) + set_bit(irq, &entry->last_value); + else + clear_bit(irq, &entry->last_value); + set_bit(irq, entry->irq_enabled); if (entry->subscription) { diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig index 20da55d9cbb1e..d483b0e29b81f 100644 --- a/drivers/soc/rockchip/Kconfig +++ b/drivers/soc/rockchip/Kconfig @@ -5,8 +5,8 @@ if ARCH_ROCKCHIP || COMPILE_TEST # config ROCKCHIP_GRF - bool - default y + bool "Rockchip General Register Files support" if COMPILE_TEST + default y if ARCH_ROCKCHIP help The General Register Files are a central component providing special additional settings registers for a lot of soc-components. diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 25486ee8379b6..cfbf1ffb61bff 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -430,6 +430,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) goto err_rx_dma_buf; } + memset(&cfg, 0, sizeof(cfg)); cfg.src_addr = phy_addr + SPI_POPR; cfg.dst_addr = phy_addr + SPI_PUSHR; cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c index 661a40c653e90..d8cdb13ce3e4a 100644 --- a/drivers/spi/spi-pic32.c +++ b/drivers/spi/spi-pic32.c @@ -369,6 +369,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width) struct dma_slave_config cfg; int ret; + memset(&cfg, 0, sizeof(cfg)); cfg.device_fc = true; cfg.src_addr = pic32s->dma_base + buf_offset; cfg.dst_addr = pic32s->dma_base + buf_offset; diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index e41976010dc48..97f44458ee7b7 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -99,7 +99,7 @@ #define HWRST_STATUS_SPRDISK 0xc0 /* Use default timeout 50 ms that converts to watchdog values */ -#define WDG_LOAD_VAL ((50 * 1000) / 32768) +#define WDG_LOAD_VAL ((50 * 32768) / 1000) #define WDG_LOAD_MASK GENMASK(15, 0) #define WDG_UNLOCK_KEY 0xe551 diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c index cb6feb34dd401..f980af0373452 100644 --- a/drivers/staging/board/board.c +++ b/drivers/staging/board/board.c @@ -136,6 +136,7 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc) static int board_staging_add_dev_domain(struct platform_device *pdev, const char *domain) { + struct device *dev = &pdev->dev; struct of_phandle_args pd_args; struct device_node *np; @@ -148,7 +149,11 @@ static int board_staging_add_dev_domain(struct platform_device *pdev, pd_args.np = np; pd_args.args_count = 0; - return of_genpd_add_device(&pd_args, &pdev->dev); + /* Initialization similar to device_pm_init_common() */ + spin_lock_init(&dev->power.lock); + dev->power.early_init = true; + + return of_genpd_add_device(&pd_args, dev); } #else static inline int board_staging_add_dev_domain(struct platform_device *pdev, diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c index 74551eb717fc7..79d0513bd2828 100644 --- a/drivers/staging/ks7010/ks7010_sdio.c +++ b/drivers/staging/ks7010/ks7010_sdio.c @@ -938,9 +938,9 @@ static void ks7010_private_init(struct ks_wlan_private *priv, memset(&priv->wstats, 0, sizeof(priv->wstats)); /* sleep mode */ + atomic_set(&priv->sleepstatus.status, 0); atomic_set(&priv->sleepstatus.doze_request, 0); atomic_set(&priv->sleepstatus.wakeup_request, 0); - atomic_set(&priv->sleepstatus.wakeup_request, 0); trx_device_init(priv); hostif_init(priv); diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c index c9a6d97938f63..68889d082c3c7 100644 --- a/drivers/staging/rts5208/rtsx_scsi.c +++ b/drivers/staging/rts5208/rtsx_scsi.c @@ -2841,10 +2841,10 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip) } if (dev_info_id == 0x15) { - buf_len = 0x3A; + buf_len = 0x3C; data_len = 0x3A; } else { - buf_len = 0x6A; + buf_len = 0x6C; data_len = 0x6A; } @@ -2895,11 +2895,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip) } rtsx_stor_set_xfer_buf(buf, buf_len, srb); - - if (dev_info_id == 0x15) - scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C); - else - scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C); + scsi_set_resid(srb, scsi_bufflen(srb) - buf_len); kfree(buf); return STATUS_SUCCESS; diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c index 66f95f758be05..73226337f5610 100644 --- a/drivers/tty/hvc/hvsi.c +++ b/drivers/tty/hvc/hvsi.c @@ -1038,7 +1038,7 @@ static const struct tty_operations hvsi_ops = { static int __init hvsi_init(void) { - int i; + int i, ret; hvsi_driver = alloc_tty_driver(hvsi_count); if (!hvsi_driver) @@ -1069,12 +1069,25 @@ static int __init hvsi_init(void) } hvsi_wait = wait_for_state; /* irqs active now */ - if (tty_register_driver(hvsi_driver)) - panic("Couldn't register hvsi console driver\n"); + ret = tty_register_driver(hvsi_driver); + if (ret) { + pr_err("Couldn't register hvsi console driver\n"); + goto err_free_irq; + } printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count); return 0; +err_free_irq: + hvsi_wait = poll_for_state; + for (i = 0; i < hvsi_count; i++) { + struct hvsi_struct *hp = &hvsi_ports[i]; + + free_irq(hp->virq, hp); + } + tty_driver_kref_put(hvsi_driver); + + return ret; } device_initcall(hvsi_init); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 725e5842b8acc..f54c18e4ae909 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -70,7 +70,7 @@ static void moan_device(const char *str, struct pci_dev *dev) static int setup_port(struct serial_private *priv, struct uart_8250_port *port, - int bar, int offset, int regshift) + u8 bar, unsigned int offset, int regshift) { struct pci_dev *dev = priv->dev; diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 68f71298c11b5..39e821d6e5376 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -132,7 +132,8 @@ static const struct serial8250_config uart_config[] = { .name = "16C950/954", .fifo_size = 128, .tx_loadsz = 128, - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01, + .rxtrig_bytes = {16, 32, 112, 120}, /* UART_CAP_EFR breaks billionon CF bluetooth card. */ .flags = UART_CAP_FIFO | UART_CAP_SLEEP, }, diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index deb9d4fa9cb09..b757fd1bdbfa5 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2164,7 +2164,7 @@ static int lpuart_probe(struct platform_device *pdev) return PTR_ERR(sport->port.membase); sport->port.membase += sdata->reg_off; - sport->port.mapbase = res->start; + sport->port.mapbase = res->start + sdata->reg_off; sport->port.dev = &pdev->dev; sport->port.type = PORT_LPUART; ret = platform_get_irq(pdev, 0); diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c index bf0e2a4cb0cef..c6f927a76c3be 100644 --- a/drivers/tty/serial/jsm/jsm_neo.c +++ b/drivers/tty/serial/jsm/jsm_neo.c @@ -815,7 +815,9 @@ static void neo_parse_isr(struct jsm_board *brd, u32 port) /* Parse any modem signal changes */ jsm_dbg(INTR, &ch->ch_bd->pci_dev, "MOD_STAT: sending to parse_modem_sigs\n"); + spin_lock_irqsave(&ch->uart_port.lock, lock_flags); neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); + spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags); } } diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c index 689774c073ca4..8438454ca653f 100644 --- a/drivers/tty/serial/jsm/jsm_tty.c +++ b/drivers/tty/serial/jsm/jsm_tty.c @@ -187,6 +187,7 @@ static void jsm_tty_break(struct uart_port *port, int break_state) static int jsm_tty_open(struct uart_port *port) { + unsigned long lock_flags; struct jsm_board *brd; struct jsm_channel *channel = container_of(port, struct jsm_channel, uart_port); @@ -240,6 +241,7 @@ static int jsm_tty_open(struct uart_port *port) channel->ch_cached_lsr = 0; channel->ch_stops_sent = 0; + spin_lock_irqsave(&port->lock, lock_flags); termios = &port->state->port.tty->termios; channel->ch_c_cflag = termios->c_cflag; channel->ch_c_iflag = termios->c_iflag; @@ -259,6 +261,7 @@ static int jsm_tty_open(struct uart_port *port) jsm_carrier(channel); channel->ch_open_count++; + spin_unlock_irqrestore(&port->lock, lock_flags); jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n"); return 0; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index db5b11879910c..6f44c5f0ef3aa 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1750,6 +1750,10 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr) /* Handle BREAKs */ sci_handle_breaks(port); + + /* drop invalid character received before break was detected */ + serial_port_in(port, SCxRDR); + sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port)); return IRQ_HANDLED; @@ -1829,7 +1833,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) ret = sci_er_interrupt(irq, ptr); /* Break Interrupt */ - if ((ssr_status & SCxSR_BRK(port)) && err_enabled) + if (s->irqs[SCIx_ERI_IRQ] != s->irqs[SCIx_BRI_IRQ] && + (ssr_status & SCxSR_BRK(port)) && err_enabled) ret = sci_br_interrupt(irq, ptr); /* Overrun Interrupt */ diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 9e9343adc2b46..b6f42d0ee6269 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2173,8 +2173,6 @@ static int tty_fasync(int fd, struct file *filp, int on) * Locking: * Called functions take tty_ldiscs_lock * current->signal->tty check is safe without locks - * - * FIXME: may race normal receive processing */ static int tiocsti(struct tty_struct *tty, char __user *p) @@ -2190,8 +2188,10 @@ static int tiocsti(struct tty_struct *tty, char __user *p) ld = tty_ldisc_ref_wait(tty); if (!ld) return -EIO; + tty_buffer_lock_exclusive(tty->port); if (ld->ops->receive_buf) ld->ops->receive_buf(tty, &ch, &mbz, 1); + tty_buffer_unlock_exclusive(tty->port); tty_ldisc_deref(ld); return 0; } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index d85bb3ba8263f..a76ed4acb5700 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -481,7 +481,7 @@ static u8 encode_bMaxPower(enum usb_device_speed speed, { unsigned val; - if (c->MaxPower) + if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER)) val = c->MaxPower; else val = CONFIG_USB_GADGET_VBUS_DRAW; @@ -891,7 +891,11 @@ static int set_config(struct usb_composite_dev *cdev, } /* when we return, be sure our power usage is valid */ - power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; + if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER)) + power = c->MaxPower; + else + power = CONFIG_USB_GADGET_VBUS_DRAW; + if (gadget->speed < USB_SPEED_SUPER) power = min(power, 500U); else diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 156651df6b4da..d7a12161e5531 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -491,8 +491,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, } spin_unlock_irqrestore(&dev->lock, flags); - if (skb && !in) { - dev_kfree_skb_any(skb); + if (!in) { + if (skb) + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c index 03959dc86cfd8..dd5cdcdfa4030 100644 --- a/drivers/usb/gadget/udc/at91_udc.c +++ b/drivers/usb/gadget/udc/at91_udc.c @@ -1879,7 +1879,9 @@ static int at91udc_probe(struct platform_device *pdev) clk_disable(udc->iclk); /* request UDC and maybe VBUS irqs */ - udc->udp_irq = platform_get_irq(pdev, 0); + udc->udp_irq = retval = platform_get_irq(pdev, 0); + if (retval < 0) + goto err_unprepare_iclk; retval = devm_request_irq(dev, udc->udp_irq, at91_udc_irq, 0, driver_name, udc); if (retval) { diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index e174b1b889da5..d04de117bf639 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -568,7 +568,8 @@ static int bdc_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "No suitable DMA config available, abort\n"); - return -ENOTSUPP; + ret = -ENOTSUPP; + goto phycleanup; } dev_dbg(dev, "Using 32-bit address\n"); } diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c index 35e02a8d0091a..bdba3f48c0527 100644 --- a/drivers/usb/gadget/udc/mv_u3d_core.c +++ b/drivers/usb/gadget/udc/mv_u3d_core.c @@ -1922,14 +1922,6 @@ static int mv_u3d_probe(struct platform_device *dev) goto err_get_irq; } u3d->irq = r->start; - if (request_irq(u3d->irq, mv_u3d_irq, - IRQF_SHARED, driver_name, u3d)) { - u3d->irq = 0; - dev_err(&dev->dev, "Request irq %d for u3d failed\n", - u3d->irq); - retval = -ENODEV; - goto err_request_irq; - } /* initialize gadget structure */ u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */ @@ -1942,6 +1934,15 @@ static int mv_u3d_probe(struct platform_device *dev) mv_u3d_eps_init(u3d); + if (request_irq(u3d->irq, mv_u3d_irq, + IRQF_SHARED, driver_name, u3d)) { + u3d->irq = 0; + dev_err(&dev->dev, "Request irq %d for u3d failed\n", + u3d->irq); + retval = -ENODEV; + goto err_request_irq; + } + /* external vbus detection */ if (u3d->vbus) { u3d->clock_gating = 1; @@ -1965,8 +1966,8 @@ static int mv_u3d_probe(struct platform_device *dev) err_unregister: free_irq(u3d->irq, u3d); -err_request_irq: err_get_irq: +err_request_irq: kfree(u3d->status_req); err_alloc_status_req: kfree(u3d->eps); diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 1ad72647a0691..da0f36af0b380 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -250,8 +250,11 @@ static int ehci_orion_drv_probe(struct platform_device *pdev) * the clock does not exists. */ priv->clk = devm_clk_get(&pdev->dev, NULL); - if (!IS_ERR(priv->clk)) - clk_prepare_enable(priv->clk); + if (!IS_ERR(priv->clk)) { + err = clk_prepare_enable(priv->clk); + if (err) + goto err_put_hcd; + } priv->phy = devm_phy_optional_get(&pdev->dev, "usb"); if (IS_ERR(priv->phy)) { @@ -312,6 +315,7 @@ err_phy_init: err_phy_get: if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); +err_put_hcd: usb_put_hcd(hcd); err: dev_err(&pdev->dev, "init %s fail, %d\n", diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 226b38274a6ef..1577424319613 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -2509,11 +2509,6 @@ retry_xacterr: return count; } -/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ -#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) -/* ... and packet size, for any kind of endpoint descriptor */ -#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) - /* reverse of qh_urb_transaction: free a list of TDs. * used for cleanup after errors, before HC sees an URB's TDs. */ @@ -2599,7 +2594,7 @@ static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210, token |= (1 /* "in" */ << 8); /* else it's already initted to "out" pid (0 << 8) */ - maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); + maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input); /* * buffer gets wrapped in one or more qtds; @@ -2713,9 +2708,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb, gfp_t flags) { struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags); + struct usb_host_endpoint *ep; u32 info1 = 0, info2 = 0; int is_input, type; int maxp = 0; + int mult; struct usb_tt *tt = urb->dev->tt; struct fotg210_qh_hw *hw; @@ -2730,14 +2727,15 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb, is_input = usb_pipein(urb->pipe); type = usb_pipetype(urb->pipe); - maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input); + ep = usb_pipe_endpoint(urb->dev, urb->pipe); + maxp = usb_endpoint_maxp(&ep->desc); + mult = usb_endpoint_maxp_mult(&ep->desc); /* 1024 byte maxpacket is a hardware ceiling. High bandwidth * acts like up to 3KB, but is built from smaller packets. */ - if (max_packet(maxp) > 1024) { - fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", - max_packet(maxp)); + if (maxp > 1024) { + fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", maxp); goto done; } @@ -2751,8 +2749,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb, */ if (type == PIPE_INTERRUPT) { qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, - is_input, 0, - hb_mult(maxp) * max_packet(maxp))); + is_input, 0, mult * maxp)); qh->start = NO_FRAME; if (urb->dev->speed == USB_SPEED_HIGH) { @@ -2789,7 +2786,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb, think_time = tt ? tt->think_time : 0; qh->tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(urb->dev->speed, - is_input, 0, max_packet(maxp))); + is_input, 0, maxp)); qh->period = urb->interval; if (qh->period > fotg210->periodic_size) { qh->period = fotg210->periodic_size; @@ -2852,11 +2849,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb, * to help them do so. So now people expect to use * such nonconformant devices with Linux too; sigh. */ - info1 |= max_packet(maxp) << 16; + info1 |= maxp << 16; info2 |= (FOTG210_TUNE_MULT_HS << 30); } else { /* PIPE_INTERRUPT */ - info1 |= max_packet(maxp) << 16; - info2 |= hb_mult(maxp) << 30; + info1 |= maxp << 16; + info2 |= mult << 30; } break; default: @@ -3926,6 +3923,7 @@ static void iso_stream_init(struct fotg210_hcd *fotg210, int is_input; long bandwidth; unsigned multi; + struct usb_host_endpoint *ep; /* * this might be a "high bandwidth" highspeed endpoint, @@ -3933,14 +3931,14 @@ static void iso_stream_init(struct fotg210_hcd *fotg210, */ epnum = usb_pipeendpoint(pipe); is_input = usb_pipein(pipe) ? USB_DIR_IN : 0; - maxp = usb_maxpacket(dev, pipe, !is_input); + ep = usb_pipe_endpoint(dev, pipe); + maxp = usb_endpoint_maxp(&ep->desc); if (is_input) buf1 = (1 << 11); else buf1 = 0; - maxp = max_packet(maxp); - multi = hb_mult(maxp); + multi = usb_endpoint_maxp_mult(&ep->desc); buf1 |= maxp; maxp *= multi; @@ -4461,13 +4459,12 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd) /* HC need not update length with this error */ if (!(t & FOTG210_ISOC_BABBLE)) { - desc->actual_length = - fotg210_itdlen(urb, desc, t); + desc->actual_length = FOTG210_ITD_LENGTH(t); urb->actual_length += desc->actual_length; } } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) { desc->status = 0; - desc->actual_length = fotg210_itdlen(urb, desc, t); + desc->actual_length = FOTG210_ITD_LENGTH(t); urb->actual_length += desc->actual_length; } else { /* URB was too late */ diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h index 7fcd785c7bc85..0f1da9503bc67 100644 --- a/drivers/usb/host/fotg210.h +++ b/drivers/usb/host/fotg210.h @@ -683,11 +683,6 @@ static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210) return fotg210_readl(fotg210, &fotg210->regs->frame_index); } -#define fotg210_itdlen(urb, desc, t) ({ \ - usb_pipein((urb)->pipe) ? \ - (desc)->length - FOTG210_ITD_LENGTH(t) : \ - FOTG210_ITD_LENGTH(t); \ -}) /*-------------------------------------------------------------------------*/ #endif /* __LINUX_FOTG210_H */ diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index a631dbb369d76..983a00e2988dc 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c @@ -199,6 +199,9 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev) if (!cell) return -EINVAL; + if (irq < 0) + return irq; + hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev)); if (!hcd) { ret = -ENOMEM; diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index 2b0ccd150209f..4ebbe2c232926 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -143,6 +143,13 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd) const struct soc_device_attribute *attr; const char *firmware_name; + /* + * According to the datasheet, "Upon the completion of FW Download, + * there is no need to write or reload FW". + */ + if (readl(regs + RCAR_USB3_DL_CTRL) & RCAR_USB3_DL_CTRL_FW_SUCCESS) + return 0; + attr = soc_device_match(rcar_quirks_match); if (attr) quirks = (uintptr_t)attr->data; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index c4e3760abd5b4..bebab0ec29786 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -4580,19 +4580,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, { unsigned long long timeout_ns; - if (xhci->quirks & XHCI_INTEL_HOST) - timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); - else - timeout_ns = udev->u1_params.sel; - /* Prevent U1 if service interval is shorter than U1 exit latency */ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { - if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); return USB3_LPM_DISABLED; } } + if (xhci->quirks & XHCI_INTEL_HOST) + timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); + else + timeout_ns = udev->u1_params.sel; + /* The U1 timeout is encoded in 1us intervals. * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */ @@ -4644,19 +4644,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, { unsigned long long timeout_ns; - if (xhci->quirks & XHCI_INTEL_HOST) - timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); - else - timeout_ns = udev->u2_params.sel; - /* Prevent U2 if service interval is shorter than U2 exit latency */ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { - if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); return USB3_LPM_DISABLED; } } + if (xhci->quirks & XHCI_INTEL_HOST) + timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); + else + timeout_ns = udev->u2_params.sel; + /* The U2 timeout is encoded in 256us intervals */ timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); /* If the necessary timeout value is bigger than what we can set in the diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c index 3828ed6d38a21..270721cf229f7 100644 --- a/drivers/usb/mtu3/mtu3_gadget.c +++ b/drivers/usb/mtu3/mtu3_gadget.c @@ -69,14 +69,12 @@ static int mtu3_ep_enable(struct mtu3_ep *mep) u32 interval = 0; u32 mult = 0; u32 burst = 0; - int max_packet; int ret; desc = mep->desc; comp_desc = mep->comp_desc; mep->type = usb_endpoint_type(desc); - max_packet = usb_endpoint_maxp(desc); - mep->maxp = max_packet & GENMASK(10, 0); + mep->maxp = usb_endpoint_maxp(desc); switch (mtu->g.speed) { case USB_SPEED_SUPER: @@ -97,7 +95,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep) usb_endpoint_xfer_int(desc)) { interval = desc->bInterval; interval = clamp_val(interval, 1, 16) - 1; - burst = (max_packet & GENMASK(12, 11)) >> 11; + mult = usb_endpoint_maxp_mult(desc) - 1; } break; default: diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 403eb97915f8a..2f6708b8b5c2f 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -892,23 +892,22 @@ static int dsps_probe(struct platform_device *pdev) if (!glue->usbss_base) return -ENXIO; - if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { - ret = dsps_setup_optional_vbus_irq(pdev, glue); - if (ret) - goto err_iounmap; - } - platform_set_drvdata(pdev, glue); pm_runtime_enable(&pdev->dev); ret = dsps_create_musb_pdev(glue, pdev); if (ret) goto err; + if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { + ret = dsps_setup_optional_vbus_irq(pdev, glue); + if (ret) + goto err; + } + return 0; err: pm_runtime_disable(&pdev->dev); -err_iounmap: iounmap(glue->usbss_base); return ret; } diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c index f7c96d209eda7..981db219234e4 100644 --- a/drivers/usb/phy/phy-fsl-usb.c +++ b/drivers/usb/phy/phy-fsl-usb.c @@ -873,6 +873,8 @@ int usb_otg_start(struct platform_device *pdev) /* request irq */ p_otg->irq = platform_get_irq(pdev, 0); + if (p_otg->irq < 0) + return p_otg->irq; status = request_irq(p_otg->irq, fsl_otg_isr, IRQF_SHARED, driver_name, p_otg); if (status) { diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index 0981abc3d1adc..60d390e28289f 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -396,7 +396,9 @@ static int tahvo_usb_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, tu); - tu->irq = platform_get_irq(pdev, 0); + tu->irq = ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, IRQF_ONESHOT, "tahvo-vbus", tu); diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index dade34d704198..859af6113b450 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -342,6 +342,11 @@ static int twl6030_usb_probe(struct platform_device *pdev) twl->irq2 = platform_get_irq(pdev, 1); twl->linkstat = MUSB_UNKNOWN; + if (twl->irq1 < 0) + return twl->irq1; + if (twl->irq2 < 0) + return twl->irq2; + twl->comparator.set_vbus = twl6030_set_vbus; twl->comparator.start_srp = twl6030_start_srp; diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 1e3ee2bfbcd03..d0c8d3800254d 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -226,8 +226,10 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum, int status; buf = kmalloc(1, GFP_KERNEL); - if (!buf) + if (!buf) { + *data = 0; return -ENOMEM; + } status = usb_control_msg(usbdev, pipe, request, requesttype, value, index, buf, 1, MOS_WDR_TIMEOUT); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index b13ed5a7618d8..202dc76f7beb9 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -455,8 +455,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_RESET); vhci_hcd->re_timeout = 0; + /* + * A few drivers do usb reset during probe when + * the device could be in VDEV_ST_USED state + */ if (vhci_hcd->vdev[rhport].ud.status == - VDEV_ST_NOTASSIGNED) { + VDEV_ST_NOTASSIGNED || + vhci_hcd->vdev[rhport].ud.status == + VDEV_ST_USED) { usbip_dbg_vhci_rh( " enable rhport %d (status %u)\n", rhport, @@ -957,8 +963,32 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev) spin_lock(&vdev->priv_lock); list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { + struct urb *urb; + + /* give back urb of unsent unlink request */ pr_info("unlink cleanup tx %lu\n", unlink->unlink_seqnum); + + urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); + if (!urb) { + list_del(&unlink->list); + kfree(unlink); + continue; + } + + urb->status = -ENODEV; + + usb_hcd_unlink_urb_from_ep(hcd, urb); + list_del(&unlink->list); + + spin_unlock(&vdev->priv_lock); + spin_unlock_irqrestore(&vhci->lock, flags); + + usb_hcd_giveback_urb(hcd, urb, urb->status); + + spin_lock_irqsave(&vhci->lock, flags); + spin_lock(&vdev->priv_lock); + kfree(unlink); } diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index c84333eb5eb59..b7765271d0fb6 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -29,7 +29,7 @@ menuconfig VFIO If you don't know what to do here, say N. -menuconfig VFIO_NOIOMMU +config VFIO_NOIOMMU bool "VFIO No-IOMMU support" depends on VFIO help diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 3a3098d4873be..6a8dd2f16b780 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -400,6 +400,33 @@ int pwm_backlight_brightness_default(struct device *dev, static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb) { struct device_node *node = pb->dev->of_node; + bool active = true; + + /* + * If the enable GPIO is present, observable (either as input + * or output) and off then the backlight is not currently active. + * */ + if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0) + active = false; + + if (!regulator_is_enabled(pb->power_supply)) + active = false; + + if (!pwm_is_enabled(pb->pwm)) + active = false; + + /* + * Synchronize the enable_gpio with the observed state of the + * hardware. + */ + if (pb->enable_gpio) + gpiod_direction_output(pb->enable_gpio, active); + + /* + * Do not change pb->enabled here! pb->enabled essentially + * tells us if we own one of the regulator's use counts and + * right now we do not. + */ /* Not booted with device tree or no phandle link to the node */ if (!node || !node->phandle) @@ -411,20 +438,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb) * assume that another driver will enable the backlight at the * appropriate time. Therefore, if it is disabled, keep it so. */ - - /* if the enable GPIO is disabled, do not enable the backlight */ - if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0) - return FB_BLANK_POWERDOWN; - - /* The regulator is disabled, do not enable the backlight */ - if (!regulator_is_enabled(pb->power_supply)) - return FB_BLANK_POWERDOWN; - - /* The PWM is disabled, keep it like this */ - if (!pwm_is_enabled(pb->pwm)) - return FB_BLANK_POWERDOWN; - - return FB_BLANK_UNBLANK; + return active ? FB_BLANK_UNBLANK: FB_BLANK_POWERDOWN; } static int pwm_backlight_probe(struct platform_device *pdev) @@ -494,18 +508,6 @@ static int pwm_backlight_probe(struct platform_device *pdev) pb->enable_gpio = gpio_to_desc(data->enable_gpio); } - /* - * If the GPIO is not known to be already configured as output, that - * is, if gpiod_get_direction returns either 1 or -EINVAL, change the - * direction to output and set the GPIO as active. - * Do not force the GPIO to active when it was already output as it - * could cause backlight flickering or we would enable the backlight too - * early. Leave the decision of the initial backlight state for later. - */ - if (pb->enable_gpio && - gpiod_get_direction(pb->enable_gpio) != 0) - gpiod_direction_output(pb->enable_gpio, 1); - pb->power_supply = devm_regulator_get(&pdev->dev, "power"); if (IS_ERR(pb->power_supply)) { ret = PTR_ERR(pb->power_supply); diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c index ea31054a28ca8..c1d6e63362259 100644 --- a/drivers/video/fbdev/asiliantfb.c +++ b/drivers/video/fbdev/asiliantfb.c @@ -227,6 +227,9 @@ static int asiliantfb_check_var(struct fb_var_screeninfo *var, { unsigned long Ftarget, ratio, remainder; + if (!var->pixclock) + return -EINVAL; + ratio = 1000000 / var->pixclock; remainder = 1000000 % var->pixclock; Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock; diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index de04c097d67c7..2297dfb494d6a 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -971,6 +971,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) if ((var->activate & FB_ACTIVATE_FORCE) || memcmp(&info->var, var, sizeof(struct fb_var_screeninfo))) { u32 activate = var->activate; + u32 unused; /* When using FOURCC mode, make sure the red, green, blue and * transp fields are set to 0. @@ -995,6 +996,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) if (var->xres < 8 || var->yres < 8) return -EINVAL; + /* Too huge resolution causes multiplication overflow. */ + if (check_mul_overflow(var->xres, var->yres, &unused) || + check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused)) + return -EINVAL; + ret = info->fbops->fb_check_var(var, info); if (ret) diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c index a7bd9f25911b5..74bf26b527b91 100644 --- a/drivers/video/fbdev/kyro/fbdev.c +++ b/drivers/video/fbdev/kyro/fbdev.c @@ -372,6 +372,11 @@ static int kyro_dev_overlay_viewport_set(u32 x, u32 y, u32 ulWidth, u32 ulHeight /* probably haven't called CreateOverlay yet */ return -EINVAL; + if (ulWidth == 0 || ulWidth == 0xffffffff || + ulHeight == 0 || ulHeight == 0xffffffff || + (x < 2 && ulWidth + 2 == 0)) + return -EINVAL; + /* Stop Ramdac Output */ DisableRamdacOutput(deviceInfo.pSTGReg); @@ -394,6 +399,9 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct kyrofb_info *par = info->par; + if (!var->pixclock) + return -EINVAL; + if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) { printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel); return -EINVAL; diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c index cc242ba057d3e..dfa81b641f9fe 100644 --- a/drivers/video/fbdev/riva/fbdev.c +++ b/drivers/video/fbdev/riva/fbdev.c @@ -1088,6 +1088,9 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) int mode_valid = 0; NVTRACE_ENTER(); + if (!var->pixclock) + return -EINVAL; + switch (var->bits_per_pixel) { case 1 ... 8: var->red.offset = var->green.offset = var->blue.offset = 0; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6f02a3f77fa83..f314b2c2d1487 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -530,7 +530,7 @@ again: * inode has not been flagged as nocompress. This flag can * change at any time if we discover bad compression ratios. */ - if (nr_pages > 1 && inode_need_compress(inode, start, end)) { + if (inode_need_compress(inode, start, end)) { WARN_ON(pages); pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); if (!pages) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5a19f5ee70386..6cb4896256106 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1048,6 +1048,9 @@ static void btrfs_close_one_device(struct btrfs_device *device) fs_devices->rw_devices--; } + if (device->devid == BTRFS_DEV_REPLACE_DEVID) + clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) fs_devices->missing_devices--; diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 9986817532b10..7932e20555d2b 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -371,14 +371,9 @@ cifs_strndup_from_utf16(const char *src, const int maxlen, if (!dst) return NULL; cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage, - NO_MAP_UNI_RSVD); + NO_MAP_UNI_RSVD); } else { - len = strnlen(src, maxlen); - len++; - dst = kmalloc(len, GFP_KERNEL); - if (!dst) - return NULL; - strlcpy(dst, src, len); + dst = kstrndup(src, maxlen, GFP_KERNEL); } return dst; diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index aa23c00367ec4..0113dba28eb09 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -602,7 +602,7 @@ sess_alloc_buffer(struct sess_data *sess_data, int wct) return 0; out_free_smb_buf: - kfree(smb_buf); + cifs_small_buf_release(smb_buf); sess_data->iov[0].iov_base = NULL; sess_data->iov[0].iov_len = 0; sess_data->buf0_type = CIFS_NO_BUFFER; diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index aa86cb2db8236..ae7a413c0bdde 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -279,3 +279,47 @@ err_kfree: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(fscrypt_get_symlink); + +/** + * fscrypt_symlink_getattr() - set the correct st_size for encrypted symlinks + * @path: the path for the encrypted symlink being queried + * @stat: the struct being filled with the symlink's attributes + * + * Override st_size of encrypted symlinks to be the length of the decrypted + * symlink target (or the no-key encoded symlink target, if the key is + * unavailable) rather than the length of the encrypted symlink target. This is + * necessary for st_size to match the symlink target that userspace actually + * sees. POSIX requires this, and some userspace programs depend on it. + * + * This requires reading the symlink target from disk if needed, setting up the + * inode's encryption key if possible, and then decrypting or encoding the + * symlink target. This makes lstat() more heavyweight than is normally the + * case. However, decrypted symlink targets will be cached in ->i_link, so + * usually the symlink won't have to be read and decrypted again later if/when + * it is actually followed, readlink() is called, or lstat() is called again. + * + * Return: 0 on success, -errno on failure + */ +int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat) +{ + struct dentry *dentry = path->dentry; + struct inode *inode = d_inode(dentry); + const char *link; + DEFINE_DELAYED_CALL(done); + + /* + * To get the symlink target that userspace will see (whether it's the + * decrypted target or the no-key encoded target), we can just get it in + * the same way the VFS does during path resolution and readlink(). + */ + link = READ_ONCE(inode->i_link); + if (!link) { + link = inode->i_op->get_link(dentry, inode, &done); + if (IS_ERR(link)) + return PTR_ERR(link); + } + stat->size = strlen(link); + do_delayed_call(&done); + return 0; +} +EXPORT_SYMBOL_GPL(fscrypt_symlink_getattr); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index c952461876595..c64e4a1be2f29 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -750,6 +750,12 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, ext4_write_lock_xattr(inode, &no_expand); BUG_ON(!ext4_has_inline_data(inode)); + /* + * ei->i_inline_off may have changed since ext4_write_begin() + * called ext4_try_to_write_inline_data() + */ + (void) ext4_find_inline_data_nolock(inode); + kaddr = kmap_atomic(page); ext4_write_inline_data(inode, &iloc, kaddr, pos, len); kunmap_atomic(kaddr); diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index dd05af983092d..a9457fed351ed 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c @@ -52,10 +52,19 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry, return paddr; } +static int ext4_encrypted_symlink_getattr(const struct path *path, + struct kstat *stat, u32 request_mask, + unsigned int query_flags) +{ + ext4_getattr(path, stat, request_mask, query_flags); + + return fscrypt_symlink_getattr(path, stat); +} + const struct inode_operations ext4_encrypted_symlink_inode_operations = { .get_link = ext4_encrypted_get_link, .setattr = ext4_setattr, - .getattr = ext4_getattr, + .getattr = ext4_encrypted_symlink_getattr, .listxattr = ext4_listxattr, }; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 95330dfdbb1af..2a7249496c57e 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -957,7 +957,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) } if (pg_start < pg_end) { - struct address_space *mapping = inode->i_mapping; loff_t blk_start, blk_end; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -969,8 +968,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); down_write(&F2FS_I(inode)->i_mmap_sem); - truncate_inode_pages_range(mapping, blk_start, - blk_end - 1); + truncate_pagecache_range(inode, blk_start, blk_end - 1); f2fs_lock_op(sbi); ret = f2fs_truncate_hole(inode, pg_start, pg_end); diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 700c39ec99f5a..38299ccfdf6ef 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -998,8 +998,10 @@ next_step: bool locked = false; if (S_ISREG(inode->i_mode)) { - if (!down_write_trylock(&fi->i_gc_rwsem[READ])) + if (!down_write_trylock(&fi->i_gc_rwsem[READ])) { + sbi->skipped_gc_rwsem++; continue; + } if (!down_write_trylock( &fi->i_gc_rwsem[WRITE])) { sbi->skipped_gc_rwsem++; diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index e20a0f9e68455..edc80855974ac 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -1219,9 +1219,18 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry, return target; } +static int f2fs_encrypted_symlink_getattr(const struct path *path, + struct kstat *stat, u32 request_mask, + unsigned int query_flags) +{ + f2fs_getattr(path, stat, request_mask, query_flags); + + return fscrypt_symlink_getattr(path, stat); +} + const struct inode_operations f2fs_encrypted_symlink_inode_operations = { .get_link = f2fs_encrypted_get_link, - .getattr = f2fs_getattr, + .getattr = f2fs_encrypted_symlink_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, diff --git a/fs/fcntl.c b/fs/fcntl.c index e039af1872ab2..dffb5245ae728 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -993,13 +993,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) { while (fa) { struct fown_struct *fown; + unsigned long flags; if (fa->magic != FASYNC_MAGIC) { printk(KERN_ERR "kill_fasync: bad magic number in " "fasync_struct!\n"); return; } - read_lock(&fa->fa_lock); + read_lock_irqsave(&fa->fa_lock, flags); if (fa->fa_file) { fown = &fa->fa_file->f_owner; /* Don't send SIGURG to processes which have not set a @@ -1008,7 +1009,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) if (!(sig == SIGURG && fown->signum == 0)) send_sigio(fown, fa->fa_fd, band); } - read_unlock(&fa->fa_lock); + read_unlock_irqrestore(&fa->fa_lock, flags); fa = rcu_dereference(fa->fa_next); } } diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index c550512ce3350..2ff05adfc22a4 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -78,10 +78,8 @@ void fscache_free_cookie(struct fscache_cookie *cookie) static int fscache_set_key(struct fscache_cookie *cookie, const void *index_key, size_t index_key_len) { - unsigned long long h; u32 *buf; int bufs; - int i; bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf)); @@ -95,17 +93,7 @@ static int fscache_set_key(struct fscache_cookie *cookie, } memcpy(buf, index_key, index_key_len); - - /* Calculate a hash and combine this with the length in the first word - * or first half word - */ - h = (unsigned long)cookie->parent; - h += index_key_len + cookie->type; - - for (i = 0; i < bufs; i++) - h += buf[i]; - - cookie->key_hash = h ^ (h >> 32); + cookie->key_hash = fscache_hash(0, buf, bufs); return 0; } diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index d6209022e9658..cc87288a5448c 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -101,6 +101,8 @@ extern struct workqueue_struct *fscache_object_wq; extern struct workqueue_struct *fscache_op_wq; DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); +extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n); + static inline bool fscache_object_congested(void) { return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq); diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 30ad89db1efcc..aa49234e9520a 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -98,6 +98,45 @@ static struct ctl_table fscache_sysctls_root[] = { }; #endif +/* + * Mixing scores (in bits) for (7,20): + * Input delta: 1-bit 2-bit + * 1 round: 330.3 9201.6 + * 2 rounds: 1246.4 25475.4 + * 3 rounds: 1907.1 31295.1 + * 4 rounds: 2042.3 31718.6 + * Perfect: 2048 31744 + * (32*64) (32*31/2 * 64) + */ +#define HASH_MIX(x, y, a) \ + ( x ^= (a), \ + y ^= x, x = rol32(x, 7),\ + x += y, y = rol32(y,20),\ + y *= 9 ) + +static inline unsigned int fold_hash(unsigned long x, unsigned long y) +{ + /* Use arch-optimized multiply if one exists */ + return __hash_32(y ^ __hash_32(x)); +} + +/* + * Generate a hash. This is derived from full_name_hash(), but we want to be + * sure it is arch independent and that it doesn't change as bits of the + * computed hash value might appear on disk. The caller also guarantees that + * the hashed data will be a series of aligned 32-bit words. + */ +unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n) +{ + unsigned int a, x = 0, y = salt; + + for (; n; n--) { + a = *data++; + HASH_MIX(x, y, a); + } + return fold_hash(x, y); +} + /* * initialise the fs caching module */ diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 56dddc1f8dddc..9e90e42c495ed 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -295,6 +295,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl) gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); + /* don't want to call dlm if we've unmounted the lock protocol */ + if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { + gfs2_glock_free(gl); + return; + } /* don't want to skip dlm_unlock writing the lvb when lock has one */ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 488a9e7f8f660..2355ad62b81fa 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -157,7 +157,6 @@ struct iso9660_options{ unsigned int overriderockperm:1; unsigned int uid_set:1; unsigned int gid_set:1; - unsigned int utf8:1; unsigned char map; unsigned char check; unsigned int blocksize; @@ -357,7 +356,6 @@ static int parse_options(char *options, struct iso9660_options *popt) popt->gid = GLOBAL_ROOT_GID; popt->uid = GLOBAL_ROOT_UID; popt->iocharset = NULL; - popt->utf8 = 0; popt->overriderockperm = 0; popt->session=-1; popt->sbsector=-1; @@ -390,10 +388,13 @@ static int parse_options(char *options, struct iso9660_options *popt) case Opt_cruft: popt->cruft = 1; break; +#ifdef CONFIG_JOLIET case Opt_utf8: - popt->utf8 = 1; + kfree(popt->iocharset); + popt->iocharset = kstrdup("utf8", GFP_KERNEL); + if (!popt->iocharset) + return 0; break; -#ifdef CONFIG_JOLIET case Opt_iocharset: kfree(popt->iocharset); popt->iocharset = match_strdup(&args[0]); @@ -496,7 +497,6 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root) if (sbi->s_nocompress) seq_puts(m, ",nocompress"); if (sbi->s_overriderockperm) seq_puts(m, ",overriderockperm"); if (sbi->s_showassoc) seq_puts(m, ",showassoc"); - if (sbi->s_utf8) seq_puts(m, ",utf8"); if (sbi->s_check) seq_printf(m, ",check=%c", sbi->s_check); if (sbi->s_mapping) seq_printf(m, ",map=%c", sbi->s_mapping); @@ -519,9 +519,10 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root) seq_printf(m, ",fmode=%o", sbi->s_fmode); #ifdef CONFIG_JOLIET - if (sbi->s_nls_iocharset && - strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0) + if (sbi->s_nls_iocharset) seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset); + else + seq_puts(m, ",iocharset=utf8"); #endif return 0; } @@ -865,14 +866,13 @@ root_found: sbi->s_nls_iocharset = NULL; #ifdef CONFIG_JOLIET - if (joliet_level && opt.utf8 == 0) { + if (joliet_level) { char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT; - sbi->s_nls_iocharset = load_nls(p); - if (! sbi->s_nls_iocharset) { - /* Fail only if explicit charset specified */ - if (opt.iocharset) + if (strcmp(p, "utf8") != 0) { + sbi->s_nls_iocharset = opt.iocharset ? + load_nls(opt.iocharset) : load_nls_default(); + if (!sbi->s_nls_iocharset) goto out_freesbi; - sbi->s_nls_iocharset = load_nls_default(); } } #endif @@ -888,7 +888,6 @@ root_found: sbi->s_gid = opt.gid; sbi->s_uid_set = opt.uid_set; sbi->s_gid_set = opt.gid_set; - sbi->s_utf8 = opt.utf8; sbi->s_nocompress = opt.nocompress; sbi->s_overriderockperm = opt.overriderockperm; /* diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index 055ec6c586f7f..dcdc191ed1834 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h @@ -44,7 +44,6 @@ struct isofs_sb_info { unsigned char s_session; unsigned int s_high_sierra:1; unsigned int s_rock:2; - unsigned int s_utf8:1; unsigned int s_cruft:1; /* Broken disks with high byte of length * containing junk */ unsigned int s_nocompress:1; diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c index be8b6a9d0b926..c0f04a1e7f695 100644 --- a/fs/isofs/joliet.c +++ b/fs/isofs/joliet.c @@ -41,14 +41,12 @@ uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls) int get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode) { - unsigned char utf8; struct nls_table *nls; unsigned char len = 0; - utf8 = ISOFS_SB(inode->i_sb)->s_utf8; nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset; - if (utf8) { + if (!nls) { len = utf16s_to_utf8s((const wchar_t *) de->name, de->name_len[0] >> 1, UTF16_BIG_ENDIAN, outname, PAGE_SIZE); diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index a87a562734077..57558a8d92e9b 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -991,7 +991,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp) out_invalidcred: pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n"); - return rpc_autherr_badcred; + return svc_return_autherr(rqstp, rpc_autherr_badcred); } /* diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 25be2ab6ff6cc..0b4ee1ab25df0 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -517,8 +517,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, goto out_cleanup; } err = ovl_instantiate(dentry, inode, newdentry, hardlink); - if (err) - goto out_cleanup; + if (err) { + ovl_cleanup(udir, newdentry); + dput(newdentry); + } out_dput: dput(upper); out_unlock: diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index d7d2fdda4bbd0..3dbb5ac630e42 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1642,6 +1642,16 @@ static const char *ubifs_get_link(struct dentry *dentry, return fscrypt_get_symlink(inode, ui->data, ui->data_len, done); } +static int ubifs_symlink_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) +{ + ubifs_getattr(path, stat, request_mask, query_flags); + + if (IS_ENCRYPTED(d_inode(path->dentry))) + return fscrypt_symlink_getattr(path, stat); + return 0; +} + const struct address_space_operations ubifs_file_address_operations = { .readpage = ubifs_readpage, .writepage = ubifs_writepage, @@ -1669,7 +1679,7 @@ const struct inode_operations ubifs_file_inode_operations = { const struct inode_operations ubifs_symlink_inode_operations = { .get_link = ubifs_get_link, .setattr = ubifs_setattr, - .getattr = ubifs_getattr, + .getattr = ubifs_symlink_getattr, #ifdef CONFIG_UBIFS_FS_XATTR .listxattr = ubifs_listxattr, #endif diff --git a/fs/udf/misc.c b/fs/udf/misc.c index 401e64cde1be0..853bcff51043f 100644 --- a/fs/udf/misc.c +++ b/fs/udf/misc.c @@ -173,13 +173,22 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type, else offset = le32_to_cpu(eahd->appAttrLocation); - while (offset < iinfo->i_lenEAttr) { + while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) { + uint32_t attrLength; + gaf = (struct genericFormat *)&ea[offset]; + attrLength = le32_to_cpu(gaf->attrLength); + + /* Detect undersized elements and buffer overflows */ + if ((attrLength < sizeof(*gaf)) || + (attrLength > (iinfo->i_lenEAttr - offset))) + break; + if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype) return gaf; else - offset += le32_to_cpu(gaf->attrLength); + offset += attrLength; } } diff --git a/fs/udf/super.c b/fs/udf/super.c index c7f6243f318bb..9c71246e6d602 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -112,16 +112,10 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb) return NULL; lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data; partnum = le32_to_cpu(lvid->numOfPartitions); - if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) - - offsetof(struct logicalVolIntegrityDesc, impUse)) / - (2 * sizeof(uint32_t)) < partnum) { - udf_err(sb, "Logical volume integrity descriptor corrupted " - "(numOfPartitions = %u)!\n", partnum); - return NULL; - } /* The offset is to skip freeSpaceTable and sizeTable arrays */ offset = partnum * 2 * sizeof(uint32_t); - return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]); + return (struct logicalVolIntegrityDescImpUse *) + (((uint8_t *)(lvid + 1)) + offset); } /* UDF filesystem type */ @@ -1529,6 +1523,7 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDesc *lvid; int indirections = 0; + u32 parts, impuselen; while (++indirections <= UDF_MAX_LVID_NESTING) { final_bh = NULL; @@ -1555,15 +1550,27 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data; if (lvid->nextIntegrityExt.extLength == 0) - return; + goto check; loc = leea_to_cpu(lvid->nextIntegrityExt); } udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n", UDF_MAX_LVID_NESTING); +out_err: brelse(sbi->s_lvid_bh); sbi->s_lvid_bh = NULL; + return; +check: + parts = le32_to_cpu(lvid->numOfPartitions); + impuselen = le32_to_cpu(lvid->lengthOfImpUse); + if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize || + sizeof(struct logicalVolIntegrityDesc) + impuselen + + 2 * parts * sizeof(u32) > sb->s_blocksize) { + udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), " + "ignoring.\n", parts, impuselen); + goto out_err; + } } /* diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index d269d1139f7ff..23c8efc967af2 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -32,11 +32,6 @@ static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; -enum userfaultfd_state { - UFFD_STATE_WAIT_API, - UFFD_STATE_RUNNING, -}; - /* * Start with fault_pending_wqh and fault_wqh so they're more likely * to be in the same cacheline. @@ -68,8 +63,6 @@ struct userfaultfd_ctx { unsigned int flags; /* features requested from the userspace */ unsigned int features; - /* state machine */ - enum userfaultfd_state state; /* released */ bool released; /* memory mappings are changing because of non-cooperative event */ @@ -103,6 +96,14 @@ struct userfaultfd_wake_range { unsigned long len; }; +/* internal indication that UFFD_API ioctl was successfully executed */ +#define UFFD_FEATURE_INITIALIZED (1u << 31) + +static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) +{ + return ctx->features & UFFD_FEATURE_INITIALIZED; +} + static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, int wake_flags, void *key) { @@ -700,7 +701,6 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) atomic_set(&ctx->refcount, 1); ctx->flags = octx->flags; - ctx->state = UFFD_STATE_RUNNING; ctx->features = octx->features; ctx->released = false; ctx->mmap_changing = false; @@ -981,38 +981,33 @@ static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) poll_wait(file, &ctx->fd_wqh, wait); - switch (ctx->state) { - case UFFD_STATE_WAIT_API: + if (!userfaultfd_is_initialized(ctx)) return EPOLLERR; - case UFFD_STATE_RUNNING: - /* - * poll() never guarantees that read won't block. - * userfaults can be waken before they're read(). - */ - if (unlikely(!(file->f_flags & O_NONBLOCK))) - return EPOLLERR; - /* - * lockless access to see if there are pending faults - * __pollwait last action is the add_wait_queue but - * the spin_unlock would allow the waitqueue_active to - * pass above the actual list_add inside - * add_wait_queue critical section. So use a full - * memory barrier to serialize the list_add write of - * add_wait_queue() with the waitqueue_active read - * below. - */ - ret = 0; - smp_mb(); - if (waitqueue_active(&ctx->fault_pending_wqh)) - ret = EPOLLIN; - else if (waitqueue_active(&ctx->event_wqh)) - ret = EPOLLIN; - - return ret; - default: - WARN_ON_ONCE(1); + + /* + * poll() never guarantees that read won't block. + * userfaults can be waken before they're read(). + */ + if (unlikely(!(file->f_flags & O_NONBLOCK))) return EPOLLERR; - } + /* + * lockless access to see if there are pending faults + * __pollwait last action is the add_wait_queue but + * the spin_unlock would allow the waitqueue_active to + * pass above the actual list_add inside + * add_wait_queue critical section. So use a full + * memory barrier to serialize the list_add write of + * add_wait_queue() with the waitqueue_active read + * below. + */ + ret = 0; + smp_mb(); + if (waitqueue_active(&ctx->fault_pending_wqh)) + ret = EPOLLIN; + else if (waitqueue_active(&ctx->event_wqh)) + ret = EPOLLIN; + + return ret; } static const struct file_operations userfaultfd_fops; @@ -1206,7 +1201,7 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf, struct uffd_msg msg; int no_wait = file->f_flags & O_NONBLOCK; - if (ctx->state == UFFD_STATE_WAIT_API) + if (!userfaultfd_is_initialized(ctx)) return -EINVAL; for (;;) { @@ -1808,9 +1803,10 @@ out: static inline unsigned int uffd_ctx_features(__u64 user_features) { /* - * For the current set of features the bits just coincide + * For the current set of features the bits just coincide. Set + * UFFD_FEATURE_INITIALIZED to mark the features as enabled. */ - return (unsigned int)user_features; + return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED; } /* @@ -1823,12 +1819,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, { struct uffdio_api uffdio_api; void __user *buf = (void __user *)arg; + unsigned int ctx_features; int ret; __u64 features; - ret = -EINVAL; - if (ctx->state != UFFD_STATE_WAIT_API) - goto out; ret = -EFAULT; if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) goto out; @@ -1845,9 +1839,13 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, ret = -EFAULT; if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) goto out; - ctx->state = UFFD_STATE_RUNNING; + /* only enable the requested features for this uffd context */ - ctx->features = uffd_ctx_features(features); + ctx_features = uffd_ctx_features(features); + ret = -EINVAL; + if (cmpxchg(&ctx->features, 0, ctx_features) != 0) + goto err_out; + ret = 0; out: return ret; @@ -1864,7 +1862,7 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd, int ret = -EINVAL; struct userfaultfd_ctx *ctx = file->private_data; - if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API) + if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx)) return -EINVAL; switch(cmd) { @@ -1962,7 +1960,6 @@ SYSCALL_DEFINE1(userfaultfd, int, flags) atomic_set(&ctx->refcount, 1); ctx->flags = flags; ctx->features = 0; - ctx->state = UFFD_STATE_WAIT_API; ctx->released = false; ctx->mmap_changing = false; ctx->mm = current->mm; diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index e0b681a717bac..052e26fda2e6c 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -35,9 +35,9 @@ extern void public_key_free(struct public_key *key); struct public_key_signature { struct asymmetric_key_id *auth_ids[2]; u8 *s; /* Signature */ - u32 s_size; /* Number of bytes in signature */ u8 *digest; - u8 digest_size; /* Number of bytes in digest */ + u32 s_size; /* Number of bytes in signature */ + u32 digest_size; /* Number of bytes in digest */ const char *pkey_algo; const char *hash_algo; }; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1c8517320ea64..4acd06cca703d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -41,6 +41,7 @@ enum bpf_reg_liveness { }; struct bpf_reg_state { + /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; union { /* valid when type == PTR_TO_PACKET */ @@ -62,7 +63,6 @@ struct bpf_reg_state { * came from, when one is tested for != NULL. */ u32 id; - /* Ordering of fields matters. See states_equal() */ /* For scalar types (SCALAR_VALUE), this represents our knowledge of * the actual value. * For pointer types, this represents the variable part of the offset @@ -79,15 +79,15 @@ struct bpf_reg_state { s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ + /* parentage chain for liveness checking */ + struct bpf_reg_state *parent; /* Inside the callee two registers can be both PTR_TO_STACK like * R1=fp-8 and R2=fp-8, but one of them points to this function stack * while another to the caller's stack. To differentiate them 'frameno' * is used which is an index in bpf_verifier_state->frame[] array * pointing to bpf_func_state. - * This field must be second to last, for states_equal() reasons. */ u32 frameno; - /* This field must be last, for states_equal() reasons. */ enum bpf_reg_liveness live; }; @@ -110,7 +110,6 @@ struct bpf_stack_state { */ struct bpf_func_state { struct bpf_reg_state regs[MAX_BPF_REG]; - struct bpf_verifier_state *parent; /* index of call instruction that called into this func */ int callsite; /* stack frame number of this function state from pov of @@ -128,11 +127,17 @@ struct bpf_func_state { struct bpf_stack_state *stack; }; +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +/* Maximum number of register states that can exist at once */ +#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) #define MAX_CALL_FRAMES 8 struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; - struct bpf_verifier_state *parent; u32 curframe; bool speculative; }; @@ -160,8 +165,8 @@ struct bpf_insn_aux_data { u32 alu_limit; /* limit for add/sub register with pointer */ }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - int sanitize_stack_off; /* stack slot to be cleared */ bool seen; /* this insn was processed by the verifier */ + bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ u8 alu_state; /* used in combination with alu_limit */ }; @@ -210,11 +215,13 @@ struct bpf_verifier_env { struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ + bool explore_alu_limits; bool allow_ptr_leaks; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; + struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; u32 subprog_cnt; }; diff --git a/include/linux/filter.h b/include/linux/filter.h index 7c84762cb59e5..e981bd92a4e3a 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -64,6 +64,11 @@ struct sock_reuseport; /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 +/* unused opcode to mark speculation barrier for mitigating + * Speculative Store Bypass + */ +#define BPF_NOSPEC 0xc0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -354,6 +359,16 @@ struct sock_reuseport; .off = 0, \ .imm = 0 }) +/* Speculation barrier */ + +#define BPF_ST_NOSPEC() \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_NOSPEC, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + /* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h index 93304cfeb6016..6e95c5a9b7fa1 100644 --- a/include/linux/fscrypt_notsupp.h +++ b/include/linux/fscrypt_notsupp.h @@ -234,4 +234,10 @@ static inline const char *fscrypt_get_symlink(struct inode *inode, return ERR_PTR(-EOPNOTSUPP); } +static inline int fscrypt_symlink_getattr(const struct path *path, + struct kstat *stat) +{ + return -EOPNOTSUPP; +} + #endif /* _LINUX_FSCRYPT_NOTSUPP_H */ diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h index 0409c14ae1de4..f4cb4871a987f 100644 --- a/include/linux/fscrypt_supp.h +++ b/include/linux/fscrypt_supp.h @@ -231,5 +231,6 @@ extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, unsigned int max_size, struct delayed_call *done); +int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat); #endif /* _LINUX_FSCRYPT_SUPP_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 2df83a6598182..cb7dc38e9c779 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -513,6 +513,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); +static inline void hugetlb_count_init(struct mm_struct *mm) +{ + atomic_long_set(&mm->hugetlb_usage, 0); +} + static inline void hugetlb_count_add(long l, struct mm_struct *mm) { atomic_long_add(l, &mm->hugetlb_usage); @@ -593,6 +598,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, return &mm->page_table_lock; } +static inline void hugetlb_count_init(struct mm_struct *mm) +{ +} + static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) { } diff --git a/include/linux/list.h b/include/linux/list.h index de04cc5ed5367..d2c12ef7a4e32 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -484,6 +484,15 @@ static inline void list_splice_tail_init(struct list_head *list, pos != (head); \ pos = n, n = pos->prev) +/** + * list_entry_is_head - test if the entry points to the head of the list + * @pos: the type * to cursor + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_entry_is_head(pos, head, member) \ + (&pos->member == (head)) + /** * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. @@ -492,7 +501,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** @@ -503,7 +512,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** @@ -528,7 +537,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue(pos, head, member) \ for (pos = list_next_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** @@ -542,7 +551,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue_reverse(pos, head, member) \ for (pos = list_prev_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** @@ -554,7 +563,7 @@ static inline void list_splice_tail_init(struct list_head *list, * Iterate over list of given type, continuing from current position. */ #define list_for_each_entry_from(pos, head, member) \ - for (; &pos->member != (head); \ + for (; !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** @@ -567,7 +576,7 @@ static inline void list_splice_tail_init(struct list_head *list, * Iterate backwards over list of given type, continuing from current position. */ #define list_for_each_entry_from_reverse(pos, head, member) \ - for (; &pos->member != (head); \ + for (; !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** @@ -580,7 +589,7 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member), \ n = list_next_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** @@ -596,7 +605,7 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each_entry_safe_continue(pos, n, head, member) \ for (pos = list_next_entry(pos, member), \ n = list_next_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** @@ -611,7 +620,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_safe_from(pos, n, head, member) \ for (n = list_next_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** @@ -627,7 +636,7 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each_entry_safe_reverse(pos, n, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member), \ n = list_prev_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = n, n = list_prev_entry(n, member)) /** diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index d17d45c41a0b0..5653178768227 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -344,6 +344,6 @@ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, int online_type); -extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, - unsigned long nr_pages); +extern struct zone *zone_for_pfn_range(int online_type, int nid, + unsigned long start_pfn, unsigned long nr_pages); #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 2fda9893962d1..6f9ca2f278b32 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1644,8 +1644,9 @@ static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } static inline void pci_disable_device(struct pci_dev *dev) { } static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY; } -static inline int __pci_register_driver(struct pci_driver *drv, - struct module *owner) +static inline int __must_check __pci_register_driver(struct pci_driver *drv, + struct module *owner, + const char *mod_name) { return 0; } static inline int pci_register_driver(struct pci_driver *drv) { return 0; } diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index a7ed29baf44a6..86e5ad8aeee4e 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -82,7 +82,7 @@ enum max17042_register { MAX17042_RelaxCFG = 0x2A, MAX17042_MiscCFG = 0x2B, MAX17042_TGAIN = 0x2C, - MAx17042_TOFF = 0x2D, + MAX17042_TOFF = 0x2D, MAX17042_CGAIN = 0x2E, MAX17042_COFF = 0x2F, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5f2e6451ece54..f97734f34746a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1761,7 +1761,7 @@ static inline void __skb_insert(struct sk_buff *newsk, WRITE_ONCE(newsk->prev, prev); WRITE_ONCE(next->prev, newsk); WRITE_ONCE(prev->next, newsk); - list->qlen++; + WRITE_ONCE(list->qlen, list->qlen + 1); } static inline void __skb_queue_splice(const struct sk_buff_head *list, diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index fdb6b317d9747..c46abf35c9bb6 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -271,6 +271,7 @@ struct svc_rqst { #define RQ_VICTIM (5) /* about to be shut down */ #define RQ_BUSY (6) /* request is busy */ #define RQ_DATA (7) /* request has data */ +#define RQ_AUTHERR (8) /* Request status is auth error */ unsigned long rq_flags; /* flags field */ ktime_t rq_qtime; /* enqueue time */ @@ -504,6 +505,7 @@ unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, void *p, size_t total); +__be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err); #define RPC_MAX_ADDRBUFLEN (63U) diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 8975fd1a1421f..24ce2aab89469 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -779,6 +779,8 @@ struct tc_codel_xstats { /* FQ_CODEL */ +#define FQ_CODEL_QUANTUM_MAX (1 << 20) + enum { TCA_FQ_CODEL_UNSPEC, TCA_FQ_CODEL_TARGET, diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h index be07b5470f4bb..f51bc8f368134 100644 --- a/include/uapi/linux/serial_reg.h +++ b/include/uapi/linux/serial_reg.h @@ -62,6 +62,7 @@ * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654 * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750 * TI16C752: 8 16 56 60 8 16 32 56 + * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950 * Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA */ #define UART_FCR_R_TRIG_00 0x00 diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index d2b6d2459aad4..341402bc1202d 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -33,6 +33,7 @@ #include #include +#include #include /* Registers */ @@ -1050,6 +1051,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) /* Non-UAPI available opcodes. */ [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, + [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC, }; #undef BPF_INSN_3_LBL #undef BPF_INSN_2_LBL @@ -1356,7 +1358,21 @@ out: JMP_EXIT: return BPF_R0; - /* STX and ST and LDX*/ + /* ST, STX and LDX*/ + ST_NOSPEC: + /* Speculation barrier for mitigating Speculative Store Bypass. + * In case of arm64, we rely on the firmware mitigation as + * controlled via the ssbd kernel parameter. Whenever the + * mitigation is enabled, it works for all of the kernel code + * with no need to provide any additional instructions here. + * In case of x86, we use 'lfence' insn for mitigation. We + * reuse preexisting logic from Spectre v1 mitigation that + * happens to produce the required code on x86 for v4 as well. + */ +#ifdef CONFIG_X86 + barrier_nospec(); +#endif + CONT; #define LDST(SIZEOP, SIZE) \ STX_MEM_##SIZEOP: \ *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c index d6b76377cb6ee..cbd75dd5992ef 100644 --- a/kernel/bpf/disasm.c +++ b/kernel/bpf/disasm.c @@ -171,15 +171,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, else verbose(cbs->private_data, "BUG_%02x\n", insn->code); } else if (class == BPF_ST) { - if (BPF_MODE(insn->code) != BPF_MEM) { + if (BPF_MODE(insn->code) == BPF_MEM) { + verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, + insn->off, insn->imm); + } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) { + verbose(cbs->private_data, "(%02x) nospec\n", insn->code); + } else { verbose(cbs->private_data, "BUG_st_%02x\n", insn->code); - return; } - verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n", - insn->code, - bpf_ldst_string[BPF_SIZE(insn->code) >> 3], - insn->dst_reg, - insn->off, insn->imm); } else if (class == BPF_LDX) { if (BPF_MODE(insn->code) != BPF_MEM) { verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index abdc9eca463c5..9a671f604ebfe 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -380,9 +380,9 @@ static int copy_stack_state(struct bpf_func_state *dst, /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_func_state() to grow the stack size. - * Note there is a non-zero 'parent' pointer inside bpf_verifier_state - * which this function copies over. It points to previous bpf_verifier_state - * which is never reallocated + * Note there is a non-zero parent pointer inside each reg of bpf_verifier_state + * which this function copies over. It points to corresponding reg in previous + * bpf_verifier_state which is never reallocated */ static int realloc_func_state(struct bpf_func_state *state, int size, bool copy_old) @@ -467,7 +467,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, } dst_state->speculative = src->speculative; dst_state->curframe = src->curframe; - dst_state->parent = src->parent; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { @@ -739,6 +738,7 @@ static void init_reg_state(struct bpf_verifier_env *env, for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; + regs[i].parent = NULL; } /* frame pointer */ @@ -883,74 +883,21 @@ next: return 0; } -static -struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env, - const struct bpf_verifier_state *state, - struct bpf_verifier_state *parent, - u32 regno) -{ - struct bpf_verifier_state *tmp = NULL; - - /* 'parent' could be a state of caller and - * 'state' could be a state of callee. In such case - * parent->curframe < state->curframe - * and it's ok for r1 - r5 registers - * - * 'parent' could be a callee's state after it bpf_exit-ed. - * In such case parent->curframe > state->curframe - * and it's ok for r0 only - */ - if (parent->curframe == state->curframe || - (parent->curframe < state->curframe && - regno >= BPF_REG_1 && regno <= BPF_REG_5) || - (parent->curframe > state->curframe && - regno == BPF_REG_0)) - return parent; - - if (parent->curframe > state->curframe && - regno >= BPF_REG_6) { - /* for callee saved regs we have to skip the whole chain - * of states that belong to callee and mark as LIVE_READ - * the registers before the call - */ - tmp = parent; - while (tmp && tmp->curframe != state->curframe) { - tmp = tmp->parent; - } - if (!tmp) - goto bug; - parent = tmp; - } else { - goto bug; - } - return parent; -bug: - verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp); - verbose(env, "regno %d parent frame %d current frame %d\n", - regno, parent->curframe, state->curframe); - return NULL; -} - +/* Parentage chain of this register (or stack slot) should take care of all + * issues like callee-saved registers, stack slot allocation time, etc. + */ static int mark_reg_read(struct bpf_verifier_env *env, - const struct bpf_verifier_state *state, - struct bpf_verifier_state *parent, - u32 regno) + const struct bpf_reg_state *state, + struct bpf_reg_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ - if (regno == BPF_REG_FP) - /* We don't need to worry about FP liveness because it's read-only */ - return 0; - while (parent) { /* if read wasn't screened by an earlier write ... */ - if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN) + if (writes && state->live & REG_LIVE_WRITTEN) break; - parent = skip_callee(env, state, parent, regno); - if (!parent) - return -EFAULT; /* ... then we depend on parent's value */ - parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ; + parent->live |= REG_LIVE_READ; state = parent; parent = state->parent; writes = true; @@ -976,7 +923,10 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, verbose(env, "R%d !read_ok\n", regno); return -EACCES; } - return mark_reg_read(env, vstate, vstate->parent, regno); + /* We don't need to worry about FP liveness because it's read-only */ + if (regno != BPF_REG_FP) + return mark_reg_read(env, ®s[regno], + regs[regno].parent); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { @@ -1013,6 +963,23 @@ static bool register_is_null(struct bpf_reg_state *reg) return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); } +static bool register_is_const(struct bpf_reg_state *reg) +{ + return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); +} + +static void save_register_state(struct bpf_func_state *state, + int spi, struct bpf_reg_state *reg) +{ + int i; + + state->stack[spi].spilled_ptr = *reg; + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + + for (i = 0; i < BPF_REG_SIZE; i++) + state->stack[spi].slot_type[i] = STACK_SPILL; +} + /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ @@ -1022,7 +989,7 @@ static int check_stack_write(struct bpf_verifier_env *env, { struct bpf_func_state *cur; /* state of the current function */ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; - enum bpf_reg_type type; + struct bpf_reg_state *reg = NULL; err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), true); @@ -1039,56 +1006,45 @@ static int check_stack_write(struct bpf_verifier_env *env, } cur = env->cur_state->frame[env->cur_state->curframe]; - if (value_regno >= 0 && - is_spillable_regtype((type = cur->regs[value_regno].type))) { + if (value_regno >= 0) + reg = &cur->regs[value_regno]; + if (!env->allow_ptr_leaks) { + bool sanitize = reg && is_spillable_regtype(reg->type); + for (i = 0; i < size; i++) { + if (state->stack[spi].slot_type[i] == STACK_INVALID) { + sanitize = true; + break; + } + } + + if (sanitize) + env->insn_aux_data[insn_idx].sanitize_stack_spill = true; + } + + if (reg && size == BPF_REG_SIZE && register_is_const(reg) && + !register_is_null(reg) && env->allow_ptr_leaks) { + save_register_state(state, spi, reg); + } else if (reg && is_spillable_regtype(reg->type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } - - if (state != cur && type == PTR_TO_STACK) { + if (state != cur && reg->type == PTR_TO_STACK) { verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } - - /* save register state */ - state->stack[spi].spilled_ptr = cur->regs[value_regno]; - state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; - - for (i = 0; i < BPF_REG_SIZE; i++) { - if (state->stack[spi].slot_type[i] == STACK_MISC && - !env->allow_ptr_leaks) { - int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; - int soff = (-spi - 1) * BPF_REG_SIZE; - - /* detected reuse of integer stack slot with a pointer - * which means either llvm is reusing stack slot or - * an attacker is trying to exploit CVE-2018-3639 - * (speculative store bypass) - * Have to sanitize that slot with preemptive - * store of zero. - */ - if (*poff && *poff != soff) { - /* disallow programs where single insn stores - * into two different stack slots, since verifier - * cannot sanitize them - */ - verbose(env, - "insn %d cannot access two stack slots fp%d and fp%d", - insn_idx, *poff, soff); - return -EINVAL; - } - *poff = soff; - } - state->stack[spi].slot_type[i] = STACK_SPILL; - } + save_register_state(state, spi, reg); } else { u8 type = STACK_MISC; - /* regular write of data into stack */ - state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; + /* regular write of data into stack destroys any spilled ptr */ + state->stack[spi].spilled_ptr.type = NOT_INIT; + /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ + if (state->stack[spi].slot_type[0] == STACK_SPILL) + for (i = 0; i < BPF_REG_SIZE; i++) + state->stack[spi].slot_type[i] = STACK_MISC; /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon @@ -1102,10 +1058,10 @@ static int check_stack_write(struct bpf_verifier_env *env, state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; /* when we zero initialize stack slots mark them as such */ - if (value_regno >= 0 && - register_is_null(&cur->regs[value_regno])) + if (reg && register_is_null(reg)) type = STACK_ZERO; + /* Mark slots affected by this stack write. */ for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; @@ -1113,61 +1069,6 @@ static int check_stack_write(struct bpf_verifier_env *env, return 0; } -/* registers of every function are unique and mark_reg_read() propagates - * the liveness in the following cases: - * - from callee into caller for R1 - R5 that were used as arguments - * - from caller into callee for R0 that used as result of the call - * - from caller to the same caller skipping states of the callee for R6 - R9, - * since R6 - R9 are callee saved by implicit function prologue and - * caller's R6 != callee's R6, so when we propagate liveness up to - * parent states we need to skip callee states for R6 - R9. - * - * stack slot marking is different, since stacks of caller and callee are - * accessible in both (since caller can pass a pointer to caller's stack to - * callee which can pass it to another function), hence mark_stack_slot_read() - * has to propagate the stack liveness to all parent states at given frame number. - * Consider code: - * f1() { - * ptr = fp - 8; - * *ptr = ctx; - * call f2 { - * .. = *ptr; - * } - * .. = *ptr; - * } - * First *ptr is reading from f1's stack and mark_stack_slot_read() has - * to mark liveness at the f1's frame and not f2's frame. - * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has - * to propagate liveness to f2 states at f1's frame level and further into - * f1 states at f1's frame level until write into that stack slot - */ -static void mark_stack_slot_read(struct bpf_verifier_env *env, - const struct bpf_verifier_state *state, - struct bpf_verifier_state *parent, - int slot, int frameno) -{ - bool writes = parent == state->parent; /* Observe write marks */ - - while (parent) { - if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE) - /* since LIVE_WRITTEN mark is only done for full 8-byte - * write the read marks are conservative and parent - * state may not even have the stack allocated. In such case - * end the propagation, since the loop reached beginning - * of the function - */ - break; - /* if read wasn't screened by an earlier write ... */ - if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) - break; - /* ... then we depend on parent's value */ - parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ; - state = parent; - parent = state->parent; - writes = true; - } -} - static int check_stack_read(struct bpf_verifier_env *env, struct bpf_func_state *reg_state /* func where register points to */, int off, int size, int value_regno) @@ -1175,6 +1076,7 @@ static int check_stack_read(struct bpf_verifier_env *env, struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; + struct bpf_reg_state *reg; u8 *stype; if (reg_state->allocated_stack <= slot) { @@ -1183,11 +1085,20 @@ static int check_stack_read(struct bpf_verifier_env *env, return -EACCES; } stype = reg_state->stack[spi].slot_type; + reg = ®_state->stack[spi].spilled_ptr; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { - verbose(env, "invalid size of register spill\n"); - return -EACCES; + if (reg->type != SCALAR_VALUE) { + verbose(env, "invalid size of register fill\n"); + return -EACCES; + } + if (value_regno >= 0) { + mark_reg_unknown(env, state->regs, value_regno); + state->regs[value_regno].live |= REG_LIVE_WRITTEN; + } + mark_reg_read(env, reg, reg->parent); + return 0; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { @@ -1198,16 +1109,14 @@ static int check_stack_read(struct bpf_verifier_env *env, if (value_regno >= 0) { /* restore register state from stack */ - state->regs[value_regno] = reg_state->stack[spi].spilled_ptr; + state->regs[value_regno] = *reg; /* mark reg as written since spilled pointer state likely * has its liveness marks cleared by is_state_visited() * which resets stack/reg liveness for state transitions */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; } - mark_stack_slot_read(env, vstate, vstate->parent, spi, - reg_state->frameno); - return 0; + mark_reg_read(env, reg, reg->parent); } else { int zeros = 0; @@ -1222,8 +1131,7 @@ static int check_stack_read(struct bpf_verifier_env *env, off, i, size); return -EACCES; } - mark_stack_slot_read(env, vstate, vstate->parent, spi, - reg_state->frameno); + mark_reg_read(env, reg, reg->parent); if (value_regno >= 0) { if (zeros == size) { /* any size read into register is zero extended, @@ -1236,8 +1144,8 @@ static int check_stack_read(struct bpf_verifier_env *env, } state->regs[value_regno].live |= REG_LIVE_WRITTEN; } - return 0; } + return 0; } static int check_stack_access(struct bpf_verifier_env *env, @@ -1855,6 +1763,29 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins BPF_SIZE(insn->code), BPF_WRITE, -1, true); } +static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, + int off, int access_size, + bool zero_size_allowed) +{ + struct bpf_reg_state *reg = cur_regs(env) + regno; + + if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || + access_size < 0 || (access_size == 0 && !zero_size_allowed)) { + if (tnum_is_const(reg->var_off)) { + verbose(env, "invalid stack type R%d off=%d access_size=%d\n", + regno, off, access_size); + } else { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", + regno, tn_buf, access_size); + } + return -EACCES; + } + return 0; +} + /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. @@ -1867,7 +1798,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, { struct bpf_reg_state *reg = cur_regs(env) + regno; struct bpf_func_state *state = func(env, reg); - int off, i, slot, spi; + int err, min_off, max_off, i, j, slot, spi; if (reg->type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ @@ -1881,21 +1812,57 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, return -EACCES; } - /* Only allow fixed-offset stack reads */ - if (!tnum_is_const(reg->var_off)) { - char tn_buf[48]; + if (tnum_is_const(reg->var_off)) { + min_off = max_off = reg->var_off.value + reg->off; + err = __check_stack_boundary(env, regno, min_off, access_size, + zero_size_allowed); + if (err) + return err; + } else { + /* Variable offset is prohibited for unprivileged mode for + * simplicity since it requires corresponding support in + * Spectre masking for stack ALU. + * See also retrieve_ptr_limit(). + */ + if (!env->allow_ptr_leaks) { + char tn_buf[48]; - tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); - verbose(env, "invalid variable stack read R%d var_off=%s\n", - regno, tn_buf); - return -EACCES; - } - off = reg->off + reg->var_off.value; - if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || - access_size < 0 || (access_size == 0 && !zero_size_allowed)) { - verbose(env, "invalid stack type R%d off=%d access_size=%d\n", - regno, off, access_size); - return -EACCES; + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", + regno, tn_buf); + return -EACCES; + } + /* Only initialized buffer on stack is allowed to be accessed + * with variable offset. With uninitialized buffer it's hard to + * guarantee that whole memory is marked as initialized on + * helper return since specific bounds are unknown what may + * cause uninitialized stack leaking. + */ + if (meta && meta->raw_mode) + meta = NULL; + + if (reg->smax_value >= BPF_MAX_VAR_OFF || + reg->smax_value <= -BPF_MAX_VAR_OFF) { + verbose(env, "R%d unbounded indirect variable offset stack access\n", + regno); + return -EACCES; + } + min_off = reg->smin_value + reg->off; + max_off = reg->smax_value + reg->off; + err = __check_stack_boundary(env, regno, min_off, access_size, + zero_size_allowed); + if (err) { + verbose(env, "R%d min value is outside of stack bound\n", + regno); + return err; + } + err = __check_stack_boundary(env, regno, max_off, access_size, + zero_size_allowed); + if (err) { + verbose(env, "R%d max value is outside of stack bound\n", + regno); + return err; + } } if (meta && meta->raw_mode) { @@ -1904,10 +1871,10 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, return 0; } - for (i = 0; i < access_size; i++) { + for (i = min_off; i < max_off + access_size; i++) { u8 *stype; - slot = -(off + i) - 1; + slot = -i - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot) goto err; @@ -1919,18 +1886,34 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, *stype = STACK_MISC; goto mark; } + if (state->stack[spi].slot_type[0] == STACK_SPILL && + state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { + __mark_reg_unknown(&state->stack[spi].spilled_ptr); + for (j = 0; j < BPF_REG_SIZE; j++) + state->stack[spi].slot_type[j] = STACK_MISC; + goto mark; + } + err: - verbose(env, "invalid indirect read from stack off %d+%d size %d\n", - off, i, access_size); + if (tnum_is_const(reg->var_off)) { + verbose(env, "invalid indirect read from stack off %d+%d size %d\n", + min_off, i - min_off, access_size); + } else { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", + tn_buf, i - min_off, access_size); + } return -EACCES; mark: /* reading any byte out of 8-byte 'spill_slot' will cause * the whole slot to be marked as 'read' */ - mark_stack_slot_read(env, env->cur_state, env->cur_state->parent, - spi, state->frameno); + mark_reg_read(env, &state->stack[spi].spilled_ptr, + state->stack[spi].spilled_ptr.parent); } - return update_stack_depth(env, state, off); + return update_stack_depth(env, state, min_off); } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, @@ -2384,11 +2367,13 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, state->curframe + 1 /* frameno within this callchain */, subprog /* subprog number within this prog */); - /* copy r1 - r5 args that callee can access */ + /* copy r1 - r5 args that callee can access. The copy includes parent + * pointers, which connects us up to the liveness chain + */ for (i = BPF_REG_1; i <= BPF_REG_5; i++) callee->regs[i] = caller->regs[i]; - /* after the call regsiters r0 - r5 were scratched */ + /* after the call registers r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, caller->regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); @@ -2886,6 +2871,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; alu_state |= ptr_is_dst_reg ? BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; + + /* Limit pruning on unknown scalars to enable deep search for + * potential masking differences from other program paths. + */ + if (!off_is_imm) + env->explore_alu_limits = true; } err = update_alu_sanitation_state(aux, alu_state, alu_limit); @@ -4798,13 +4789,6 @@ static bool range_within(struct bpf_reg_state *old, old->smax_value >= cur->smax_value; } -/* Maximum number of register states that can exist at once */ -#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) -struct idpair { - u32 old; - u32 cur; -}; - /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. @@ -4815,11 +4799,11 @@ struct idpair { * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ -static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) +static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap) { unsigned int i; - for (i = 0; i < ID_MAP_SIZE; i++) { + for (i = 0; i < BPF_ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; @@ -4835,8 +4819,8 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) } /* Returns true if (rold safe implies rcur safe) */ -static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, - struct idpair *idmap) +static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, + struct bpf_reg_state *rcur, struct bpf_id_pair *idmap) { bool equal; @@ -4844,7 +4828,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, /* explored state didn't use this */ return true; - equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0; + equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; if (rold->type == PTR_TO_STACK) /* two stack pointers are equal only if they're pointing to @@ -4862,6 +4846,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, return false; switch (rold->type) { case SCALAR_VALUE: + if (env->explore_alu_limits) + return false; if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && @@ -4938,9 +4924,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, return false; } -static bool stacksafe(struct bpf_func_state *old, - struct bpf_func_state *cur, - struct idpair *idmap) +static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, + struct bpf_func_state *cur, struct bpf_id_pair *idmap) { int i, spi; @@ -4982,9 +4967,8 @@ static bool stacksafe(struct bpf_func_state *old, continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; - if (!regsafe(&old->stack[spi].spilled_ptr, - &cur->stack[spi].spilled_ptr, - idmap)) + if (!regsafe(env, &old->stack[spi].spilled_ptr, + &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. @@ -5026,29 +5010,21 @@ static bool stacksafe(struct bpf_func_state *old, * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ -static bool func_states_equal(struct bpf_func_state *old, +static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur) { - struct idpair *idmap; - bool ret = false; int i; - idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); - /* If we failed to allocate the idmap, just say it's not safe */ - if (!idmap) - return false; + memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); + for (i = 0; i < MAX_BPF_REG; i++) + if (!regsafe(env, &old->regs[i], &cur->regs[i], + env->idmap_scratch)) + return false; - for (i = 0; i < MAX_BPF_REG; i++) { - if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) - goto out_free; - } + if (!stacksafe(env, old, cur, env->idmap_scratch)) + return false; - if (!stacksafe(old, cur, idmap)) - goto out_free; - ret = true; -out_free: - kfree(idmap); - return ret; + return true; } static bool states_equal(struct bpf_verifier_env *env, @@ -5072,7 +5048,7 @@ static bool states_equal(struct bpf_verifier_env *env, for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; - if (!func_states_equal(old->frame[i], cur->frame[i])) + if (!func_states_equal(env, old->frame[i], cur->frame[i])) return false; } return true; @@ -5083,7 +5059,7 @@ static bool states_equal(struct bpf_verifier_env *env, * equivalent state (jump target or such) we didn't arrive by the straight-line * code, so read marks in the state must propagate to the parent regardless * of the state's write marks. That's what 'parent == state->parent' comparison - * in mark_reg_read() and mark_stack_slot_read() is for. + * in mark_reg_read() is for. */ static int propagate_liveness(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, @@ -5104,7 +5080,8 @@ static int propagate_liveness(struct bpf_verifier_env *env, if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) continue; if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { - err = mark_reg_read(env, vstate, vparent, i); + err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i], + &vparent->frame[vstate->curframe]->regs[i]); if (err) return err; } @@ -5119,7 +5096,8 @@ static int propagate_liveness(struct bpf_verifier_env *env, if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) - mark_stack_slot_read(env, vstate, vparent, i, frame); + mark_reg_read(env, &state->stack[i].spilled_ptr, + &parent->stack[i].spilled_ptr); } } return err; @@ -5129,7 +5107,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; - struct bpf_verifier_state *cur = env->cur_state; + struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err, states_cnt = 0; sl = env->explored_states[insn_idx]; @@ -5175,16 +5153,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) return -ENOMEM; /* add new state to the head of linked list */ - err = copy_verifier_state(&new_sl->state, cur); + new = &new_sl->state; + err = copy_verifier_state(new, cur); if (err) { - free_verifier_state(&new_sl->state, false); + free_verifier_state(new, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ - cur->parent = &new_sl->state; + for (i = 0; i < BPF_REG_FP; i++) + cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i]; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark @@ -5197,9 +5177,13 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) /* all stack frames are accessible from callee, clear them all */ for (j = 0; j <= cur->curframe; j++) { struct bpf_func_state *frame = cur->frame[j]; + struct bpf_func_state *newframe = new->frame[j]; - for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) + for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; + frame->stack[i].spilled_ptr.parent = + &newframe->stack[i].spilled_ptr; + } } return 0; } @@ -5850,34 +5834,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { + bool ctx_access; + if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || - insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) + insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) { type = BPF_READ; - else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || - insn->code == (BPF_STX | BPF_MEM | BPF_H) || - insn->code == (BPF_STX | BPF_MEM | BPF_W) || - insn->code == (BPF_STX | BPF_MEM | BPF_DW)) + ctx_access = true; + } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || + insn->code == (BPF_STX | BPF_MEM | BPF_H) || + insn->code == (BPF_STX | BPF_MEM | BPF_W) || + insn->code == (BPF_STX | BPF_MEM | BPF_DW) || + insn->code == (BPF_ST | BPF_MEM | BPF_B) || + insn->code == (BPF_ST | BPF_MEM | BPF_H) || + insn->code == (BPF_ST | BPF_MEM | BPF_W) || + insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { type = BPF_WRITE; - else + ctx_access = BPF_CLASS(insn->code) == BPF_STX; + } else { continue; + } if (type == BPF_WRITE && - env->insn_aux_data[i + delta].sanitize_stack_off) { + env->insn_aux_data[i + delta].sanitize_stack_spill) { struct bpf_insn patch[] = { - /* Sanitize suspicious stack slot with zero. - * There are no memory dependencies for this store, - * since it's only using frame pointer and immediate - * constant of zero - */ - BPF_ST_MEM(BPF_DW, BPF_REG_FP, - env->insn_aux_data[i + delta].sanitize_stack_off, - 0), - /* the original STX instruction will immediately - * overwrite the same stack slot with appropriate value - */ *insn, + BPF_ST_NOSPEC(), }; cnt = ARRAY_SIZE(patch); @@ -5891,6 +5874,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) continue; } + if (!ctx_access) + continue; + if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; diff --git a/kernel/events/core.c b/kernel/events/core.c index dd740f91de478..4a8c3f5313f96 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8914,7 +8914,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event) return; if (ifh->nr_file_filters) { - mm = get_task_mm(event->ctx->task); + mm = get_task_mm(task); if (!mm) goto restart; diff --git a/kernel/fork.c b/kernel/fork.c index cf535b9d5db75..b658716005077 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -964,6 +964,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm->pmd_huge_pte = NULL; #endif mm_init_uprobes_state(mm); + hugetlb_count_init(mm); if (current->mm) { mm->flags = current->mm->flags & MMF_INIT_MASK; diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 354151fef06ae..fbc62d360419d 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -911,7 +911,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) { struct mutex_waiter waiter; - bool first = false; struct ww_mutex *ww; int ret; @@ -986,6 +985,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, set_current_state(state); for (;;) { + bool first; + /* * Once we hold wait_lock, we're serialized against * mutex_unlock() handing the lock off to us, do a trylock @@ -1014,15 +1015,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, spin_unlock(&lock->wait_lock); schedule_preempt_disabled(); - /* - * ww_mutex needs to always recheck its position since its waiter - * list is not FIFO ordered. - */ - if (ww_ctx || !first) { - first = __mutex_waiter_is_first(lock, &waiter); - if (first) - __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); - } + first = __mutex_waiter_is_first(lock, &waiter); + if (first) + __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); set_current_state(state); /* diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 95271f180687e..33de14435c1f6 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -52,7 +52,8 @@ static struct kmem_cache *create_pid_cachep(unsigned int level) mutex_lock(&pid_caches_mutex); /* Name collision forces to do allocation under mutex. */ if (!*pkc) - *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0); + *pkc = kmem_cache_create(name, len, 0, + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, 0); mutex_unlock(&pid_caches_mutex); /* current can fail, but someone else can succeed. */ return READ_ONCE(*pkc); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index aa592dc3cb401..beec5081a55af 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1654,6 +1654,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused */ raw_spin_lock(&rq->lock); if (p->dl.dl_non_contending) { + update_rq_clock(rq); sub_running_bw(&p->dl, &rq->dl); p->dl.dl_non_contending = 0; /* @@ -2615,7 +2616,7 @@ void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) dl_se->dl_runtime = attr->sched_runtime; dl_se->dl_deadline = attr->sched_deadline; dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; - dl_se->flags = attr->sched_flags; + dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS; dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); } @@ -2628,7 +2629,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr) attr->sched_runtime = dl_se->dl_runtime; attr->sched_deadline = dl_se->dl_deadline; attr->sched_period = dl_se->dl_period; - attr->sched_flags = dl_se->flags; + attr->sched_flags &= ~SCHED_DL_FLAGS; + attr->sched_flags |= dl_se->flags; } /* @@ -2703,7 +2705,7 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) if (dl_se->dl_runtime != attr->sched_runtime || dl_se->dl_deadline != attr->sched_deadline || dl_se->dl_period != attr->sched_period || - dl_se->flags != attr->sched_flags) + dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS)) return true; return false; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7b7ba91e319bb..55e695080fc6b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -209,6 +209,8 @@ static inline int task_has_dl_policy(struct task_struct *p) */ #define SCHED_FLAG_SUGOV 0x10000000 +#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) + static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) { #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 0e04b24cec818..32ee24f5142ab 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1020,12 +1020,13 @@ static void __remove_hrtimer(struct hrtimer *timer, * remove hrtimer, called with base lock held */ static inline int -remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) +remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, + bool restart, bool keep_local) { u8 state = timer->state; if (state & HRTIMER_STATE_ENQUEUED) { - int reprogram; + bool reprogram; /* * Remove the timer and force reprogramming when high @@ -1038,8 +1039,16 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest debug_deactivate(timer); reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); + /* + * If the timer is not restarted then reprogramming is + * required if the timer is local. If it is local and about + * to be restarted, avoid programming it twice (on removal + * and a moment later when it's requeued). + */ if (!restart) state = HRTIMER_STATE_INACTIVE; + else + reprogram &= !keep_local; __remove_hrtimer(timer, base, state, reprogram); return 1; @@ -1093,9 +1102,31 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, struct hrtimer_clock_base *base) { struct hrtimer_clock_base *new_base; + bool force_local, first; - /* Remove an active timer from the queue: */ - remove_hrtimer(timer, base, true); + /* + * If the timer is on the local cpu base and is the first expiring + * timer then this might end up reprogramming the hardware twice + * (on removal and on enqueue). To avoid that by prevent the + * reprogram on removal, keep the timer local to the current CPU + * and enforce reprogramming after it is queued no matter whether + * it is the new first expiring timer again or not. + */ + force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases); + force_local &= base->cpu_base->next_timer == timer; + + /* + * Remove an active timer from the queue. In case it is not queued + * on the current CPU, make sure that remove_hrtimer() updates the + * remote data correctly. + * + * If it's on the current CPU and the first expiring timer, then + * skip reprogramming, keep the timer local and enforce + * reprogramming later if it was the first expiring timer. This + * avoids programming the underlying clock event twice (once at + * removal and once after enqueue). + */ + remove_hrtimer(timer, base, true, force_local); if (mode & HRTIMER_MODE_REL) tim = ktime_add_safe(tim, base->get_time()); @@ -1105,9 +1136,24 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, hrtimer_set_expires_range_ns(timer, tim, delta_ns); /* Switch the timer base, if necessary: */ - new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); + if (!force_local) { + new_base = switch_hrtimer_base(timer, base, + mode & HRTIMER_MODE_PINNED); + } else { + new_base = base; + } + + first = enqueue_hrtimer(timer, new_base, mode); + if (!force_local) + return first; - return enqueue_hrtimer(timer, new_base, mode); + /* + * Timer was forced to stay on the current CPU to avoid + * reprogramming on removal and enqueue. Force reprogram the + * hardware by evaluating the new first expiring timer. + */ + hrtimer_force_reprogram(new_base->cpu_base, 1); + return 0; } /** @@ -1168,7 +1214,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer) base = lock_hrtimer_base(timer, &flags); if (!hrtimer_callback_running(timer)) - ret = remove_hrtimer(timer, base, false); + ret = remove_hrtimer(timer, base, false, false); unlock_hrtimer_base(timer, &flags); diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c index 20ed0f7667871..00825028cc847 100644 --- a/lib/mpi/mpiutil.c +++ b/lib/mpi/mpiutil.c @@ -91,7 +91,7 @@ int mpi_resize(MPI a, unsigned nlimbs) return 0; /* no need to do it */ if (a->d) { - p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL); + p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL); if (!p) return -ENOMEM; memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 08d3d59dca173..49d79079e8b3e 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -4293,8 +4293,8 @@ static struct bpf_test tests[] = { .u.insns_int = { BPF_LD_IMM64(R0, 0), BPF_LD_IMM64(R1, 0xffffffffffffffffLL), - BPF_STX_MEM(BPF_W, R10, R1, -40), - BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_STX_MEM(BPF_DW, R10, R1, -40), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, @@ -6687,7 +6687,14 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test) u64 duration; u32 ret; - if (test->test[i].data_size == 0 && + /* + * NOTE: Several sub-tests may be present, in which case + * a zero {data_size, result} tuple indicates the end of + * the sub-test array. The first test is always run, + * even if both data_size and result happen to be zero. + */ + if (i > 0 && + test->test[i].data_size == 0 && test->test[i].result == 0) break; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e60e28131f679..20f079c81b335 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -783,8 +783,8 @@ static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn return movable_node_enabled ? movable_zone : kernel_zone; } -struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, - unsigned long nr_pages) +struct zone *zone_for_pfn_range(int online_type, int nid, + unsigned long start_pfn, unsigned long nr_pages) { if (online_type == MMOP_ONLINE_KERNEL) return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4446a523e684e..afcaa657a0229 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -807,7 +807,7 @@ static inline void __free_one_page(struct page *page, struct page *buddy; unsigned int max_order; - max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); + max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order); VM_BUG_ON(!zone_is_initialized(zone)); VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); @@ -820,7 +820,7 @@ static inline void __free_one_page(struct page *page, VM_BUG_ON_PAGE(bad_range(zone, page), page); continue_merging: - while (order < max_order - 1) { + while (order < max_order) { buddy_pfn = __find_buddy_pfn(pfn, order); buddy = page + (buddy_pfn - pfn); @@ -844,7 +844,7 @@ continue_merging: pfn = combined_pfn; order++; } - if (max_order < MAX_ORDER) { + if (order < MAX_ORDER - 1) { /* If we are here, it means order is >= pageblock_order. * We want to prevent merge between freepages on isolate * pageblock and normal pageblock. Without this, pageblock @@ -865,7 +865,7 @@ continue_merging: is_migrate_isolate(buddy_mt))) goto done_merging; } - max_order++; + max_order = order + 1; goto continue_merging; } diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 9daab0dd833b3..21132bf3d8503 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -138,7 +138,7 @@ static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size) static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req) { - struct xen_9pfs_front_priv *priv = NULL; + struct xen_9pfs_front_priv *priv; RING_IDX cons, prod, masked_cons, masked_prod; unsigned long flags; u32 size = p9_req->tc.size; @@ -151,7 +151,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req) break; } read_unlock(&xen_9pfs_lock); - if (!priv || priv->client != client) + if (list_entry_is_head(priv, &xen_9pfs_devs, list)) return -EINVAL; num = p9_req->tc.tag % priv->num_rings; diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h index c32638dddbf94..f6b9dc4e408f2 100644 --- a/net/bluetooth/cmtp/cmtp.h +++ b/net/bluetooth/cmtp/cmtp.h @@ -26,7 +26,7 @@ #include #include -#define BTNAMSIZ 18 +#define BTNAMSIZ 21 /* CMTP ioctl defines */ #define CMTPCONNADD _IOW('C', 200, int) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 7a85f215da45c..26acacb2fa95f 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1296,6 +1296,12 @@ int hci_inquiry(void __user *arg) goto done; } + /* Restrict maximum inquiry length to 60 seconds */ + if (ir.length > 60) { + err = -EINVAL; + goto done; + } + hci_dev_lock(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { @@ -1622,6 +1628,14 @@ int hci_dev_do_close(struct hci_dev *hdev) hci_request_cancel_all(hdev); hci_req_sync_lock(hdev); + if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + test_bit(HCI_UP, &hdev->flags)) { + /* Execute vendor specific shutdown routine */ + if (hdev->shutdown) + hdev->shutdown(hdev); + } + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { cancel_delayed_work_sync(&hdev->cmd_timer); hci_req_sync_unlock(hdev); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 45cc864cf2b38..937cada5595ee 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4083,6 +4083,21 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, switch (ev->status) { case 0x00: + /* The synchronous connection complete event should only be + * sent once per new connection. Receiving a successful + * complete event when the connection status is already + * BT_CONNECTED means that the device is misbehaving and sent + * multiple complete event packets for the same new connection. + * + * Registering the device more than once can corrupt kernel + * memory, hence upon detecting this invalid event, we report + * an error and ignore the packet. + */ + if (conn->state == BT_CONNECTED) { + bt_dev_err(hdev, "Ignoring connect complete event for existing connection"); + goto unlock; + } + conn->handle = __le16_to_cpu(ev->handle); conn->state = BT_CONNECTED; conn->type = ev->link_type; @@ -4786,9 +4801,64 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, } #endif +static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, + u8 bdaddr_type, bdaddr_t *local_rpa) +{ + if (conn->out) { + conn->dst_type = bdaddr_type; + conn->resp_addr_type = bdaddr_type; + bacpy(&conn->resp_addr, bdaddr); + + /* Check if the controller has set a Local RPA then it must be + * used instead or hdev->rpa. + */ + if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { + conn->init_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(&conn->init_addr, local_rpa); + } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { + conn->init_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(&conn->init_addr, &conn->hdev->rpa); + } else { + hci_copy_identity_address(conn->hdev, &conn->init_addr, + &conn->init_addr_type); + } + } else { + conn->resp_addr_type = conn->hdev->adv_addr_type; + /* Check if the controller has set a Local RPA then it must be + * used instead or hdev->rpa. + */ + if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { + conn->resp_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(&conn->resp_addr, local_rpa); + } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { + /* In case of ext adv, resp_addr will be updated in + * Adv Terminated event. + */ + if (!ext_adv_capable(conn->hdev)) + bacpy(&conn->resp_addr, + &conn->hdev->random_addr); + } else { + bacpy(&conn->resp_addr, &conn->hdev->bdaddr); + } + + conn->init_addr_type = bdaddr_type; + bacpy(&conn->init_addr, bdaddr); + + /* For incoming connections, set the default minimum + * and maximum connection interval. They will be used + * to check if the parameters are in range and if not + * trigger the connection update procedure. + */ + conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; + conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; + } +} + static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, - bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle, - u16 interval, u16 latency, u16 supervision_timeout) + bdaddr_t *bdaddr, u8 bdaddr_type, + bdaddr_t *local_rpa, u8 role, u16 handle, + u16 interval, u16 latency, + u16 supervision_timeout) { struct hci_conn_params *params; struct hci_conn *conn; @@ -4836,32 +4906,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, cancel_delayed_work(&conn->le_conn_timeout); } - if (!conn->out) { - /* Set the responder (our side) address type based on - * the advertising address type. - */ - conn->resp_addr_type = hdev->adv_addr_type; - if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { - /* In case of ext adv, resp_addr will be updated in - * Adv Terminated event. - */ - if (!ext_adv_capable(hdev)) - bacpy(&conn->resp_addr, &hdev->random_addr); - } else { - bacpy(&conn->resp_addr, &hdev->bdaddr); - } - - conn->init_addr_type = bdaddr_type; - bacpy(&conn->init_addr, bdaddr); - - /* For incoming connections, set the default minimum - * and maximum connection interval. They will be used - * to check if the parameters are in range and if not - * trigger the connection update procedure. - */ - conn->le_conn_min_interval = hdev->le_conn_min_interval; - conn->le_conn_max_interval = hdev->le_conn_max_interval; - } + le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); /* Lookup the identity address from the stored connection * address and address type. @@ -4959,7 +5004,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, - ev->role, le16_to_cpu(ev->handle), + NULL, ev->role, le16_to_cpu(ev->handle), le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); @@ -4973,7 +5018,7 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, - ev->role, le16_to_cpu(ev->handle), + &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); @@ -5004,7 +5049,8 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) if (conn) { struct adv_info *adv_instance; - if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM) + if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || + bacmp(&conn->resp_addr, BDADDR_ANY)) return; if (!hdev->cur_adv_instance) { diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index a4ca55df73908..007a01b08dbe9 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -48,6 +48,8 @@ struct sco_conn { spinlock_t lock; struct sock *sk; + struct delayed_work timeout_work; + unsigned int mtu; }; @@ -73,9 +75,20 @@ struct sco_pinfo { #define SCO_CONN_TIMEOUT (HZ * 40) #define SCO_DISCONN_TIMEOUT (HZ * 2) -static void sco_sock_timeout(struct timer_list *t) +static void sco_sock_timeout(struct work_struct *work) { - struct sock *sk = from_timer(sk, t, sk_timer); + struct sco_conn *conn = container_of(work, struct sco_conn, + timeout_work.work); + struct sock *sk; + + sco_conn_lock(conn); + sk = conn->sk; + if (sk) + sock_hold(sk); + sco_conn_unlock(conn); + + if (!sk) + return; BT_DBG("sock %p state %d", sk, sk->sk_state); @@ -84,20 +97,26 @@ static void sco_sock_timeout(struct timer_list *t) sk->sk_state_change(sk); bh_unlock_sock(sk); - sco_sock_kill(sk); sock_put(sk); } static void sco_sock_set_timer(struct sock *sk, long timeout) { + if (!sco_pi(sk)->conn) + return; + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); - sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout); + cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); + schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout); } static void sco_sock_clear_timer(struct sock *sk) { + if (!sco_pi(sk)->conn) + return; + BT_DBG("sock %p state %d", sk, sk->sk_state); - sk_stop_timer(sk, &sk->sk_timer); + cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); } /* ---- SCO connections ---- */ @@ -176,8 +195,10 @@ static void sco_conn_del(struct hci_conn *hcon, int err) sco_sock_clear_timer(sk); sco_chan_del(sk, err); bh_unlock_sock(sk); - sco_sock_kill(sk); sock_put(sk); + + /* Ensure no more work items will run before freeing conn. */ + cancel_delayed_work_sync(&conn->timeout_work); } hcon->sco_data = NULL; @@ -192,6 +213,8 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, sco_pi(sk)->conn = conn; conn->sk = sk; + INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); + if (parent) bt_accept_enqueue(parent, sk, true); } @@ -211,44 +234,32 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk, return err; } -static int sco_connect(struct sock *sk) +static int sco_connect(struct hci_dev *hdev, struct sock *sk) { struct sco_conn *conn; struct hci_conn *hcon; - struct hci_dev *hdev; int err, type; BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); - hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); - if (!hdev) - return -EHOSTUNREACH; - - hci_dev_lock(hdev); - if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; else type = SCO_LINK; if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && - (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) { - err = -EOPNOTSUPP; - goto done; - } + (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) + return -EOPNOTSUPP; hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, sco_pi(sk)->setting); - if (IS_ERR(hcon)) { - err = PTR_ERR(hcon); - goto done; - } + if (IS_ERR(hcon)) + return PTR_ERR(hcon); conn = sco_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); - err = -ENOMEM; - goto done; + return -ENOMEM; } /* Update source addr of the socket */ @@ -256,7 +267,7 @@ static int sco_connect(struct sock *sk) err = sco_chan_add(conn, sk, NULL); if (err) - goto done; + return err; if (hcon->state == BT_CONNECTED) { sco_sock_clear_timer(sk); @@ -266,9 +277,6 @@ static int sco_connect(struct sock *sk) sco_sock_set_timer(sk, sk->sk_sndtimeo); } -done: - hci_dev_unlock(hdev); - hci_dev_put(hdev); return err; } @@ -393,8 +401,7 @@ static void sco_sock_cleanup_listen(struct sock *parent) */ static void sco_sock_kill(struct sock *sk) { - if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || - sock_flag(sk, SOCK_DEAD)) + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d", sk, sk->sk_state); @@ -446,7 +453,6 @@ static void sco_sock_close(struct sock *sk) lock_sock(sk); __sco_sock_close(sk); release_sock(sk); - sco_sock_kill(sk); } static void sco_sock_init(struct sock *sk, struct sock *parent) @@ -488,8 +494,6 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; - timer_setup(&sk->sk_timer, sco_sock_timeout, 0); - bt_sock_link(&sco_sk_list, sk); return sk; } @@ -554,6 +558,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; + struct hci_dev *hdev; int err; BT_DBG("sk %p", sk); @@ -568,12 +573,19 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen if (sk->sk_type != SOCK_SEQPACKET) return -EINVAL; + hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR); + if (!hdev) + return -EHOSTUNREACH; + hci_dev_lock(hdev); + lock_sock(sk); /* Set destination address and psm */ bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr); - err = sco_connect(sk); + err = sco_connect(hdev, sk); + hci_dev_unlock(hdev); + hci_dev_put(hdev); if (err) goto done; @@ -761,6 +773,11 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting) cp.max_latency = cpu_to_le16(0xffff); cp.retrans_effort = 0xff; break; + default: + /* use CVSD settings as fallback */ + cp.max_latency = cpu_to_le16(0xffff); + cp.retrans_effort = 0xff; + break; } hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 13e2ae6be620c..8aeece7aa9e97 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -53,20 +53,6 @@ struct chnl_net { enum caif_states state; }; -static void robust_list_del(struct list_head *delete_node) -{ - struct list_head *list_node; - struct list_head *n; - ASSERT_RTNL(); - list_for_each_safe(list_node, n, &chnl_net_list) { - if (list_node == delete_node) { - list_del(list_node); - return; - } - } - WARN_ON(1); -} - static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) { struct sk_buff *skb; @@ -368,6 +354,7 @@ static int chnl_net_init(struct net_device *dev) ASSERT_RTNL(); priv = netdev_priv(dev); strncpy(priv->name, dev->name, sizeof(priv->name)); + INIT_LIST_HEAD(&priv->list_field); return 0; } @@ -376,7 +363,7 @@ static void chnl_net_uninit(struct net_device *dev) struct chnl_net *priv; ASSERT_RTNL(); priv = netdev_priv(dev); - robust_list_del(&priv->list_field); + list_del_init(&priv->list_field); } static const struct net_device_ops netdev_ops = { @@ -541,7 +528,7 @@ static void __exit chnl_exit_module(void) rtnl_lock(); list_for_each_safe(list_node, _tmp, &chnl_net_list) { dev = list_entry(list_node, struct chnl_net, list_field); - list_del(list_node); + list_del_init(list_node); delete_device(dev); } rtnl_unlock(); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 994dd1520f07a..949694c70cbc6 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -694,8 +694,10 @@ proto_again: FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container); - memcpy(&key_addrs->v4addrs, &iph->saddr, - sizeof(key_addrs->v4addrs)); + memcpy(&key_addrs->v4addrs.src, &iph->saddr, + sizeof(key_addrs->v4addrs.src)); + memcpy(&key_addrs->v4addrs.dst, &iph->daddr, + sizeof(key_addrs->v4addrs.dst)); key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; } @@ -744,8 +746,10 @@ proto_again: FLOW_DISSECTOR_KEY_IPV6_ADDRS, target_container); - memcpy(&key_addrs->v6addrs, &iph->saddr, - sizeof(key_addrs->v6addrs)); + memcpy(&key_addrs->v6addrs.src, &iph->saddr, + sizeof(key_addrs->v6addrs.src)); + memcpy(&key_addrs->v6addrs.dst, &iph->daddr, + sizeof(key_addrs->v6addrs.dst)); key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 939d8a31eb82a..26d70c00b0545 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -192,9 +192,9 @@ static int net_eq_idr(int id, void *net, void *peer) return 0; } -/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc - * is set to true, thus the caller knows that the new id must be notified via - * rtnl. +/* Must be called from RCU-critical section or with nsid_lock held. If + * a new id is assigned, the bool alloc is set to true, thus the + * caller knows that the new id must be notified via rtnl. */ static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) { @@ -218,7 +218,7 @@ static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) return NETNSA_NSID_NOT_ASSIGNED; } -/* should be called with nsid_lock held */ +/* Must be called from RCU-critical section or with nsid_lock held */ static int __peernet2id(struct net *net, struct net *peer) { bool no = false; @@ -261,9 +261,10 @@ int peernet2id(struct net *net, struct net *peer) { int id; - spin_lock_bh(&net->nsid_lock); + rcu_read_lock(); id = __peernet2id(net, peer); - spin_unlock_bh(&net->nsid_lock); + rcu_read_unlock(); + return id; } EXPORT_SYMBOL(peernet2id); @@ -837,6 +838,7 @@ struct rtnl_net_dump_cb { int s_idx; }; +/* Runs in RCU-critical section. */ static int rtnl_net_dumpid_one(int id, void *peer, void *data) { struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; @@ -867,9 +869,9 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) .s_idx = cb->args[0], }; - spin_lock_bh(&net->nsid_lock); + rcu_read_lock(); idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); - spin_unlock_bh(&net->nsid_lock); + rcu_read_unlock(); cb->args[0] = net_cb.idx; return skb->len; diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index ba6fc3c1186b9..e91838a7b8497 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -98,6 +98,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, newdp->dccps_role = DCCP_ROLE_SERVER; newdp->dccps_hc_rx_ackvec = NULL; newdp->dccps_service_list = NULL; + newdp->dccps_hc_rx_ccid = NULL; + newdp->dccps_hc_tx_ccid = NULL; newdp->dccps_service = dreq->dreq_service; newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index b887d9edb9c38..f7c122357a966 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1226,13 +1226,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev) * use the switch internal MDIO bus instead */ ret = dsa_slave_phy_connect(slave_dev, dp->index); - if (ret) { - netdev_err(slave_dev, - "failed to connect to port %d: %d\n", - dp->index, ret); - phylink_destroy(dp->pl); - return ret; - } + } + if (ret) { + netdev_err(slave_dev, "failed to connect to PHY: %pe\n", + ERR_PTR(ret)); + phylink_destroy(dp->pl); } return ret; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index dde6cf82e9f0a..fe10a565b7d85 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -465,6 +465,23 @@ out_bh_enable: local_bh_enable(); } +/* + * The device used for looking up which routing table to use for sending an ICMP + * error is preferably the source whenever it is set, which should ensure the + * icmp error can be sent to the source host, else lookup using the routing + * table of the destination device, else use the main routing table (index 0). + */ +static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb) +{ + struct net_device *route_lookup_dev = NULL; + + if (skb->dev) + route_lookup_dev = skb->dev; + else if (skb_dst(skb)) + route_lookup_dev = skb_dst(skb)->dev; + return route_lookup_dev; +} + static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, struct sk_buff *skb_in, @@ -473,6 +490,7 @@ static struct rtable *icmp_route_lookup(struct net *net, int type, int code, struct icmp_bxm *param) { + struct net_device *route_lookup_dev; struct rtable *rt, *rt2; struct flowi4 fl4_dec; int err; @@ -487,7 +505,8 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; - fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); + route_lookup_dev = icmp_get_route_lookup_dev(skb_in); + fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev); security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); rt = ip_route_output_key_hash(net, fl4, skb_in); @@ -511,7 +530,7 @@ static struct rtable *icmp_route_lookup(struct net *net, if (err) goto relookup_failed; - if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev, + if (inet_addr_type_dev_table(net, route_lookup_dev, fl4_dec.saddr) == RTN_LOCAL) { rt2 = __ip_route_output_key(net, &fl4_dec); if (IS_ERR(rt2)) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index dca7fe0ae24ad..15804cfc19a8c 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2743,6 +2743,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u rv = 1; } else if (im) { if (src_addr) { + spin_lock_bh(&im->lock); for (psf = im->sources; psf; psf = psf->sf_next) { if (psf->sf_inaddr == src_addr) break; @@ -2753,6 +2754,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u im->sfcount[MCAST_EXCLUDE]; else rv = im->sfcount[MCAST_EXCLUDE] != 0; + spin_unlock_bh(&im->lock); } else rv = 1; /* unspecified source; tentatively allow */ } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index a8a37d1128201..0c431fd4b1200 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -449,8 +449,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, static int gre_handle_offloads(struct sk_buff *skb, bool csum) { - if (csum && skb_checksum_start(skb) < skb->data) - return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } @@ -682,15 +680,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, } if (dev->header_ops) { + const int pull_len = tunnel->hlen + sizeof(struct iphdr); + if (skb_cow_head(skb, 0)) goto free_skb; tnl_params = (const struct iphdr *)skb->data; + if (pull_len > skb_transport_offset(skb)) + goto free_skb; + /* Pull skb since ip_tunnel_xmit() needs skb->data pointing * to gre header. */ - skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); + skb_pull(skb, pull_len); skb_reset_mac_header(skb); } else { if (skb_cow_head(skb, dev->needed_headroom)) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e63905f7f6f95..25beecee89494 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -419,8 +419,9 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) { BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) != offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr)); - memcpy(&iph->saddr, &fl4->saddr, - sizeof(fl4->saddr) + sizeof(fl4->daddr)); + + iph->saddr = fl4->saddr; + iph->daddr = fl4->daddr; } /* Note: skb->sk can be different from sk, in case of tunnels */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 1491d239385e5..730a15fc497ca 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -604,18 +604,25 @@ static void fnhe_flush_routes(struct fib_nh_exception *fnhe) } } -static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) +static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash) { - struct fib_nh_exception *fnhe, *oldest; + struct fib_nh_exception __rcu **fnhe_p, **oldest_p; + struct fib_nh_exception *fnhe, *oldest = NULL; - oldest = rcu_dereference(hash->chain); - for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe; - fnhe = rcu_dereference(fnhe->fnhe_next)) { - if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) + for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) { + fnhe = rcu_dereference_protected(*fnhe_p, + lockdep_is_held(&fnhe_lock)); + if (!fnhe) + break; + if (!oldest || + time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) { oldest = fnhe; + oldest_p = fnhe_p; + } } fnhe_flush_routes(oldest); - return oldest; + *oldest_p = oldest->fnhe_next; + kfree_rcu(oldest, rcu); } static inline u32 fnhe_hashfun(__be32 daddr) @@ -692,16 +699,21 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, if (rt) fill_route_from_fnhe(rt, fnhe); } else { - if (depth > FNHE_RECLAIM_DEPTH) - fnhe = fnhe_oldest(hash); - else { - fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); - if (!fnhe) - goto out_unlock; - - fnhe->fnhe_next = hash->chain; - rcu_assign_pointer(hash->chain, fnhe); + /* Randomize max depth to avoid some side channels attacks. */ + int max_depth = FNHE_RECLAIM_DEPTH + + prandom_u32_max(FNHE_RECLAIM_DEPTH); + + while (depth > max_depth) { + fnhe_remove_oldest(hash); + depth--; } + + fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); + if (!fnhe) + goto out_unlock; + + fnhe->fnhe_next = hash->chain; + fnhe->fnhe_genid = genid; fnhe->fnhe_daddr = daddr; fnhe->fnhe_gw = gw; @@ -709,6 +721,8 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, fnhe->fnhe_mtu_locked = lock; fnhe->fnhe_expires = max(1UL, expires); + rcu_assign_pointer(hash->chain, fnhe); + /* Exception created; mark the cached routes for the nexthop * stale, so anyone caching it rechecks if this exception * applies to them. @@ -2801,7 +2815,7 @@ static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst, udph = skb_put_zero(skb, sizeof(struct udphdr)); udph->source = sport; udph->dest = dport; - udph->len = sizeof(struct udphdr); + udph->len = htons(sizeof(struct udphdr)); udph->check = 0; break; } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 2ab371f555250..119d2c2f3b047 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -342,8 +342,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, return NULL; } - if (syn_data && - tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD)) + if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD)) goto fastopen; if (foc->len >= 0 && /* Client presents or requests a cookie */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 36bff9291530b..5117e0aeea1af 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1195,7 +1195,7 @@ static u8 tcp_sacktag_one(struct sock *sk, if (dup_sack && (sacked & TCPCB_RETRANS)) { if (tp->undo_marker && tp->undo_retrans > 0 && after(end_seq, tp->undo_marker)) - tp->undo_retrans--; + tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount); if ((sacked & TCPCB_SACKED_ACKED) && before(start_seq, state->reord)) state->reord = start_seq; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 71236aa7388d7..de4edfbc9e466 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2177,6 +2177,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos) static void *tcp_seek_last_pos(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; + int bucket = st->bucket; int offset = st->offset; int orig_num = st->num; void *rc = NULL; @@ -2187,7 +2188,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq) break; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_next(seq, NULL); - while (offset-- && rc) + while (offset-- && rc && bucket == st->bucket) rc = listening_get_next(seq, rc); if (rc) break; @@ -2198,7 +2199,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq) if (st->bucket > tcp_hashinfo.ehash_mask) break; rc = established_get_first(seq); - while (offset-- && rc) + while (offset-- && rc && bucket == st->bucket) rc = established_get_next(seq, rc); } diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c index f14de4b6d639d..58e839e2ce1d3 100644 --- a/net/ipv6/netfilter/nf_socket_ipv6.c +++ b/net/ipv6/netfilter/nf_socket_ipv6.c @@ -104,7 +104,7 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, { __be16 uninitialized_var(dport), uninitialized_var(sport); const struct in6_addr *daddr = NULL, *saddr = NULL; - struct ipv6hdr *iph = ipv6_hdr(skb); + struct ipv6hdr *iph = ipv6_hdr(skb), ipv6_var; struct sk_buff *data_skb = NULL; int doff = 0; int thoff = 0, tproto; @@ -134,8 +134,6 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, thoff + sizeof(*hp); } else if (tproto == IPPROTO_ICMPV6) { - struct ipv6hdr ipv6_var; - if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, &sport, &dport, &ipv6_var)) return NULL; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 9abdc0a04d993..bf2a53d455a18 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -889,8 +889,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) } if (tunnel->version == L2TP_HDR_VER_3 && - l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) + l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) { + l2tp_session_dec_refcount(session); goto error; + } l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); l2tp_session_dec_refcount(session); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3530d1a5fc98e..5c5908127fcb5 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3142,7 +3142,9 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) return true; - if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr))) + if (!ieee80211_amsdu_realloc_pad(local, skb, + sizeof(*amsdu_hdr) + + local->hw.extra_tx_headroom)) return false; data = skb_push(skb, sizeof(*amsdu_hdr)); diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index 3e3494c8d42f8..e252f62bb8c20 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c @@ -156,8 +156,8 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, return -ENOMEM; doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL); if (doi_def->map.std == NULL) { - ret_val = -ENOMEM; - goto add_std_failure; + kfree(doi_def); + return -ENOMEM; } doi_def->type = CIPSO_V4_MAP_TRANS; @@ -198,14 +198,14 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, } doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size, sizeof(u32), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (doi_def->map.std->lvl.local == NULL) { ret_val = -ENOMEM; goto add_std_failure; } doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size, sizeof(u32), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (doi_def->map.std->lvl.cipso == NULL) { ret_val = -ENOMEM; goto add_std_failure; @@ -273,7 +273,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, doi_def->map.std->cat.local = kcalloc( doi_def->map.std->cat.local_size, sizeof(u32), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (doi_def->map.std->cat.local == NULL) { ret_val = -ENOMEM; goto add_std_failure; @@ -281,7 +281,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, doi_def->map.std->cat.cipso = kcalloc( doi_def->map.std->cat.cipso_size, sizeof(u32), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (doi_def->map.std->cat.cipso == NULL) { ret_val = -ENOMEM; goto add_std_failure; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index ac3fe507bc1c4..b0fd268ed65e5 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2498,13 +2498,15 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, /* errors reported via destination sk->sk_err, but propagate * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ err = nlmsg_multicast(sk, skb, exclude_portid, group, flags); + if (err == -ESRCH) + err = 0; } if (report) { int err2; err2 = nlmsg_unicast(sk, skb, portid); - if (!err || err == -ESRCH) + if (!err) err = err2; } diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index ebc3c8c7e6661..bc62e1b246539 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1616,7 +1616,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); if (err) { kfree(cl); - return err; + goto failure; } if (tca[TCA_RATE]) { diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index a862d9990be74..e4f69c779b8cf 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -382,6 +382,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, { struct fq_codel_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; + u32 quantum = 0; int err; if (!opt) @@ -399,6 +400,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, q->flows_cnt > 65536) return -EINVAL; } + if (tb[TCA_FQ_CODEL_QUANTUM]) { + quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); + if (quantum > FQ_CODEL_QUANTUM_MAX) { + NL_SET_ERR_MSG(extack, "Invalid quantum"); + return -EINVAL; + } + } sch_tree_lock(sch); if (tb[TCA_FQ_CODEL_TARGET]) { @@ -425,8 +433,8 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_FQ_CODEL_ECN]) q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); - if (tb[TCA_FQ_CODEL_QUANTUM]) - q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); + if (quantum) + q->quantum = quantum; if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index a85d78d2bdb73..d9d03881e4de5 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1914,7 +1914,7 @@ gss_svc_init_net(struct net *net) goto out2; return 0; out2: - destroy_use_gss_proxy_proc_entry(net); + rsi_cache_destroy_net(net); out1: rsc_cache_destroy_net(net); return rv; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 11d393c047728..6a606af8de819 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1146,6 +1146,22 @@ static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, .. extern void svc_tcp_prep_reply_hdr(struct svc_rqst *); +__be32 +svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err) +{ + set_bit(RQ_AUTHERR, &rqstp->rq_flags); + return auth_err; +} +EXPORT_SYMBOL_GPL(svc_return_autherr); + +static __be32 +svc_get_autherr(struct svc_rqst *rqstp, __be32 *statp) +{ + if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags)) + return *statp; + return rpc_auth_ok; +} + /* * Common routine for processing the RPC request. */ @@ -1296,11 +1312,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) procp->pc_release(rqstp); goto dropit; } - if (*statp == rpc_autherr_badcred) { - if (procp->pc_release) - procp->pc_release(rqstp); - goto err_bad_auth; - } + auth_stat = svc_get_autherr(rqstp, statp); + if (auth_stat != rpc_auth_ok) + goto err_release_bad_auth; if (*statp == rpc_success && procp->pc_encode && !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) { dprintk("svc: failed to encode reply\n"); @@ -1359,6 +1373,9 @@ err_bad_rpc: svc_putnl(resv, 2); goto sendit; +err_release_bad_auth: + if (procp->pc_release) + procp->pc_release(rqstp); err_bad_auth: dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat)); serv->sv_stats->rpcbadauth++; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 6aead6deaa6c4..848ae6dcbd822 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1716,6 +1716,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, bool connected = !tipc_sk_type_connectionless(sk); struct tipc_sock *tsk = tipc_sk(sk); int rc, err, hlen, dlen, copy; + struct tipc_skb_cb *skb_cb; struct sk_buff_head xmitq; struct tipc_msg *hdr; struct sk_buff *skb; @@ -1739,6 +1740,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, if (unlikely(rc)) goto exit; skb = skb_peek(&sk->sk_receive_queue); + skb_cb = TIPC_SKB_CB(skb); hdr = buf_msg(skb); dlen = msg_data_sz(hdr); hlen = msg_hdr_sz(hdr); @@ -1758,18 +1760,33 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, /* Capture data if non-error msg, otherwise just set return value */ if (likely(!err)) { - copy = min_t(int, dlen, buflen); - if (unlikely(copy != dlen)) - m->msg_flags |= MSG_TRUNC; - rc = skb_copy_datagram_msg(skb, hlen, m, copy); + int offset = skb_cb->bytes_read; + + copy = min_t(int, dlen - offset, buflen); + rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); + if (unlikely(rc)) + goto exit; + if (unlikely(offset + copy < dlen)) { + if (flags & MSG_EOR) { + if (!(flags & MSG_PEEK)) + skb_cb->bytes_read = offset + copy; + } else { + m->msg_flags |= MSG_TRUNC; + skb_cb->bytes_read = 0; + } + } else { + if (flags & MSG_EOR) + m->msg_flags |= MSG_EOR; + skb_cb->bytes_read = 0; + } } else { copy = 0; rc = 0; - if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) + if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) { rc = -ECONNRESET; + goto exit; + } } - if (unlikely(rc)) - goto exit; /* Mark message as group event if applicable */ if (unlikely(grp_evt)) { @@ -1792,6 +1809,9 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, tipc_node_distr_xmit(sock_net(sk), &xmitq); } + if (skb_cb->bytes_read) + goto exit; + tsk_advance_rx_queue(sk); if (likely(!connected)) @@ -2203,7 +2223,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, u32 dport, struct sk_buff_head *xmitq) { - unsigned long time_limit = jiffies + 2; + unsigned long time_limit = jiffies + usecs_to_jiffies(20000); struct sk_buff *skb; unsigned int lim; atomic_t *dcnt; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 98c253afa0db2..c293a558b0d4f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2739,7 +2739,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, other = unix_peer(sk); if (other && unix_peer(other) != sk && - unix_recvq_full(other) && + unix_recvq_full_lockless(other) && unix_dgram_peer_wake_me(sk, other)) writable = 0; diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh index e68b9ee6814b8..35db26f736b9d 100755 --- a/samples/bpf/test_override_return.sh +++ b/samples/bpf/test_override_return.sh @@ -1,5 +1,6 @@ #!/bin/bash +rm -r tmpmnt rm -f testfile.img dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1 DEVICE=$(losetup --show -f testfile.img) diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c index ea6dae78f0dff..2ed13e9f3fcb0 100644 --- a/samples/bpf/tracex7_user.c +++ b/samples/bpf/tracex7_user.c @@ -13,6 +13,11 @@ int main(int argc, char **argv) char command[256]; int ret; + if (!argv[1]) { + fprintf(stderr, "ERROR: Run with the btrfs device argument!\n"); + return 0; + } + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); if (load_bpf_file(filename)) { diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 13b446328dda0..5095b2e8fceeb 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -5,7 +5,6 @@ config IMA select SECURITYFS select CRYPTO select CRYPTO_HMAC - select CRYPTO_MD5 select CRYPTO_SHA1 select CRYPTO_HASH_INFO select TCG_TPM if HAS_IOMEM && !UML diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c index 3e7a1523663b8..daad75ee74d9f 100644 --- a/security/integrity/ima/ima_mok.c +++ b/security/integrity/ima/ima_mok.c @@ -26,7 +26,7 @@ struct key *ima_blacklist_keyring; /* * Allocate the IMA blacklist keyring */ -__init int ima_mok_init(void) +static __init int ima_mok_init(void) { struct key_restriction *restriction; diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c index a7855c61c05cd..07d23b4f76f3b 100644 --- a/security/smack/smack_access.c +++ b/security/smack/smack_access.c @@ -85,23 +85,22 @@ int log_policy = SMACK_AUDIT_DENIED; int smk_access_entry(char *subject_label, char *object_label, struct list_head *rule_list) { - int may = -ENOENT; struct smack_rule *srp; list_for_each_entry_rcu(srp, rule_list, list) { if (srp->smk_object->smk_known == object_label && srp->smk_subject->smk_known == subject_label) { - may = srp->smk_access; - break; + int may = srp->smk_access; + /* + * MAY_WRITE implies MAY_LOCK. + */ + if ((may & MAY_WRITE) == MAY_WRITE) + may |= MAY_LOCK; + return may; } } - /* - * MAY_WRITE implies MAY_LOCK. - */ - if ((may & MAY_WRITE) == MAY_WRITE) - may |= MAY_LOCK; - return may; + return -ENOENT; } /** diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 56295936387c3..da454eeee5c91 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -1751,7 +1751,7 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, channels = params_channels(params); frame_size = snd_pcm_format_size(format, channels); if (frame_size > 0) - params->fifo_size /= (unsigned)frame_size; + params->fifo_size /= frame_size; } return 0; } diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 186c0ee059da7..c4d19b88d17dc 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -293,9 +293,6 @@ static const struct snd_soc_dapm_widget byt_rt5640_widgets[] = { static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = { {"Headphone", NULL, "Platform Clock"}, {"Headset Mic", NULL, "Platform Clock"}, - {"Internal Mic", NULL, "Platform Clock"}, - {"Speaker", NULL, "Platform Clock"}, - {"Headset Mic", NULL, "MICBIAS1"}, {"IN2P", NULL, "Headset Mic"}, {"Headphone", NULL, "HPOL"}, @@ -303,19 +300,23 @@ static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = { }; static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic1_map[] = { + {"Internal Mic", NULL, "Platform Clock"}, {"DMIC1", NULL, "Internal Mic"}, }; static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic2_map[] = { + {"Internal Mic", NULL, "Platform Clock"}, {"DMIC2", NULL, "Internal Mic"}, }; static const struct snd_soc_dapm_route byt_rt5640_intmic_in1_map[] = { + {"Internal Mic", NULL, "Platform Clock"}, {"Internal Mic", NULL, "MICBIAS1"}, {"IN1P", NULL, "Internal Mic"}, }; static const struct snd_soc_dapm_route byt_rt5640_intmic_in3_map[] = { + {"Internal Mic", NULL, "Platform Clock"}, {"Internal Mic", NULL, "MICBIAS1"}, {"IN3P", NULL, "Internal Mic"}, }; @@ -357,6 +358,7 @@ static const struct snd_soc_dapm_route byt_rt5640_ssp0_aif2_map[] = { }; static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = { + {"Speaker", NULL, "Platform Clock"}, {"Speaker", NULL, "SPOLP"}, {"Speaker", NULL, "SPOLN"}, {"Speaker", NULL, "SPORP"}, @@ -364,6 +366,7 @@ static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = { }; static const struct snd_soc_dapm_route byt_rt5640_mono_spk_map[] = { + {"Speaker", NULL, "Platform Clock"}, {"Speaker", NULL, "SPOLP"}, {"Speaker", NULL, "SPOLN"}, }; diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index b86f76c3598cd..c6d11a59310fc 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -189,7 +189,9 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai, { struct rk_i2s_dev *i2s = to_info(cpu_dai); unsigned int mask = 0, val = 0; + int ret = 0; + pm_runtime_get_sync(cpu_dai->dev); mask = I2S_CKR_MSS_MASK; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: @@ -202,7 +204,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai, i2s->is_master_mode = false; break; default: - return -EINVAL; + ret = -EINVAL; + goto err_pm_put; } regmap_update_bits(i2s->regmap, I2S_CKR, mask, val); @@ -216,7 +219,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai, val = I2S_CKR_CKP_POS; break; default: - return -EINVAL; + ret = -EINVAL; + goto err_pm_put; } regmap_update_bits(i2s->regmap, I2S_CKR, mask, val); @@ -232,14 +236,15 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai, case SND_SOC_DAIFMT_I2S: val = I2S_TXCR_IBM_NORMAL; break; - case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */ - val = I2S_TXCR_TFS_PCM; - break; - case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */ + case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */ val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1); break; + case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */ + val = I2S_TXCR_TFS_PCM; + break; default: - return -EINVAL; + ret = -EINVAL; + goto err_pm_put; } regmap_update_bits(i2s->regmap, I2S_TXCR, mask, val); @@ -255,19 +260,23 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai, case SND_SOC_DAIFMT_I2S: val = I2S_RXCR_IBM_NORMAL; break; - case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */ - val = I2S_RXCR_TFS_PCM; - break; - case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */ + case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */ val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1); break; + case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */ + val = I2S_RXCR_TFS_PCM; + break; default: - return -EINVAL; + ret = -EINVAL; + goto err_pm_put; } regmap_update_bits(i2s->regmap, I2S_RXCR, mask, val); - return 0; +err_pm_put: + pm_runtime_put(cpu_dai->dev); + + return ret; } static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream, diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 4114594e57a30..f4bdcff82f5cc 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1554,6 +1554,7 @@ static const struct registration_quirk registration_quirks[] = { REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */ REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */ REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */ + REG_QUIRK_ENTRY(0x0ecb, 0x1f47, 2), /* JBL Quantum 800 */ REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */ REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2), /* JBL Quantum 600 */ REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */ diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index ab208400ea140..4ada233b37ed1 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1893,6 +1893,7 @@ static int add_callchain_ip(struct thread *thread, al.filtered = 0; al.sym = NULL; + al.srcline = NULL; if (!cpumode) { thread__find_cpumode_addr_location(thread, ip, &al); } else { diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 4e202217fae10..87ba89df98022 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -796,7 +796,7 @@ static void test_sockmap(int tasks, void *data) FD_ZERO(&w); FD_SET(sfd[3], &w); - to.tv_sec = 1; + to.tv_sec = 30; to.tv_usec = 0; s = select(sfd[3] + 1, &w, NULL, NULL, &to); if (s == -1) { diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c7d17781dbfee..858e551432339 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -956,15 +956,45 @@ static struct bpf_test tests[] = { BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), /* mess up with R1 pointer on stack */ BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), - /* fill back into R0 should fail */ + /* fill back into R0 is fine for priv. + * R0 now becomes SCALAR_VALUE. + */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + /* Load from R0 should fail. */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), BPF_EXIT_INSN(), }, .errstr_unpriv = "attempt to corrupt spilled", - .errstr = "corrupted spill", + .errstr = "R0 invalid mem access 'inv", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, + { + "check corrupted spill/fill, LSB", + .insns = { + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "attempt to corrupt spilled", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = POINTER_VALUE, + }, + { + "check corrupted spill/fill, MSB", + .insns = { + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "attempt to corrupt spilled", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = POINTER_VALUE, + }, { "invalid src register in STX", .insns = { @@ -3858,7 +3888,8 @@ static struct bpf_test tests[] = { offsetof(struct __sk_buff, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), @@ -6530,9 +6561,9 @@ static struct bpf_test tests[] = { { "helper access to variable memory: stack, bitwise AND, zero included", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), @@ -6547,9 +6578,9 @@ static struct bpf_test tests[] = { { "helper access to variable memory: stack, bitwise AND + JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), @@ -6623,9 +6654,9 @@ static struct bpf_test tests[] = { { "helper access to variable memory: stack, JMP, bounds + offset", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), @@ -6644,9 +6675,9 @@ static struct bpf_test tests[] = { { "helper access to variable memory: stack, JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), @@ -6664,9 +6695,9 @@ static struct bpf_test tests[] = { { "helper access to variable memory: stack, JMP, no max check", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_MOV64_IMM(BPF_REG_4, 0), @@ -6684,9 +6715,9 @@ static struct bpf_test tests[] = { { "helper access to variable memory: stack, JMP, no min check", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), @@ -6702,9 +6733,9 @@ static struct bpf_test tests[] = { { "helper access to variable memory: stack, JMP (signed), no min check", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), @@ -6746,6 +6777,7 @@ static struct bpf_test tests[] = { { "helper access to variable memory: map, JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), @@ -6753,7 +6785,7 @@ static struct bpf_test tests[] = { BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, @@ -6765,7 +6797,7 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map2 = { 3 }, + .fixup_map2 = { 4 }, .errstr = "invalid access to map value, value_size=48 off=0 size=49", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, @@ -6800,6 +6832,7 @@ static struct bpf_test tests[] = { { "helper access to variable memory: map adjusted, JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), @@ -6808,7 +6841,7 @@ static struct bpf_test tests[] = { BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, @@ -6820,7 +6853,7 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map2 = { 3 }, + .fixup_map2 = { 4 }, .errstr = "R1 min value is outside of the array range", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, @@ -6842,8 +6875,8 @@ static struct bpf_test tests[] = { { "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 1), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), @@ -7070,6 +7103,7 @@ static struct bpf_test tests[] = { { "helper access to variable memory: 8 bytes leak", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), BPF_MOV64_IMM(BPF_REG_0, 0), @@ -7080,7 +7114,6 @@ static struct bpf_test tests[] = { BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 1), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), @@ -8465,7 +8498,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_LWT_IN, }, { - "indirect variable-offset stack access", + "indirect variable-offset stack access, out of bound", .insns = { /* Fill the top 8 bytes of the stack */ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), @@ -8486,10 +8519,85 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 5 }, - .errstr = "variable stack read R2", + .errstr = "invalid stack type R2 var_off", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, + { + "indirect variable-offset stack access, max_off+size > max_initialized", + .insns = { + /* Fill only the second from top 8 bytes of the stack. */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), + /* Get an unknown value. */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* Make it small and 4-byte aligned. */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), + /* Add it to fp. We now have either fp-12 or fp-16, but we don't know + * which. fp-12 size 8 is partially uninitialized stack. + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), + /* Dereference it indirectly. */ + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 5 }, + .errstr = "invalid indirect read from stack var_off", .result = REJECT, .prog_type = BPF_PROG_TYPE_LWT_IN, }, + { + "indirect variable-offset stack access, min_off < min_initialized", + .insns = { + /* Fill only the top 8 bytes of the stack. */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + /* Get an unknown value */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* Make it small and 4-byte aligned. */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), + /* Add it to fp. We now have either fp-12 or fp-16, but we don't know + * which. fp-16 size 8 is partially uninitialized stack. + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), + /* Dereference it indirectly. */ + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 5 }, + .errstr = "invalid indirect read from stack var_off", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, + { + "indirect variable-offset stack access, ok", + .insns = { + /* Fill the top 16 bytes of the stack. */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + /* Get an unknown value. */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* Make it small and 4-byte aligned. */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), + /* Add it to fp. We now have either fp-12 or fp-16, we don't know + * which, but either way it points to initialized stack. + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), + /* Dereference it indirectly. */ + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 6 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, { "direct stack access with 32-bit wraparound. test1", .insns = { diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile index 735a510230c3f..45751648aacda 100644 --- a/tools/thermal/tmon/Makefile +++ b/tools/thermal/tmon/Makefile @@ -10,7 +10,7 @@ CFLAGS+= -O1 ${WARNFLAGS} # Add "-fstack-protector" only if toolchain supports it. CFLAGS+= $(call cc-option,-fstack-protector) CC?= $(CROSS_COMPILE)gcc -PKG_CONFIG?= pkg-config +PKG_CONFIG?= $(CROSS_COMPILE)pkg-config CFLAGS+=-D VERSION=\"$(VERSION)\" LDFLAGS+= diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 39706799ecdf8..b943ec5345cbd 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1137,6 +1137,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (copy_from_user(®, argp, sizeof(reg))) break; + /* + * We could owe a reset due to PSCI. Handle the pending reset + * here to ensure userspace register accesses are ordered after + * the reset. + */ + if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) + kvm_reset_vcpu(vcpu); + if (ioctl == KVM_SET_ONE_REG) r = kvm_arm_set_reg(vcpu, ®); else