diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt index d412b236a9d6f..7cf7143921a1f 100644 --- a/Documentation/filesystems/seq_file.txt +++ b/Documentation/filesystems/seq_file.txt @@ -192,6 +192,12 @@ between the calls to start() and stop(), so holding a lock during that time is a reasonable thing to do. The seq_file code will also avoid taking any other locks while the iterator is active. +The iterater value returned by start() or next() is guaranteed to be +passed to a subsequent next() or stop() call. This allows resources +such as locks that were taken to be reliably released. There is *no* +guarantee that the iterator will be passed to show(), though in practice +it often will be. + Formatted output diff --git a/Makefile b/Makefile index 74b9258e7d2ce..6787efa2edc90 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 177 +SUBLEVEL = 178 EXTRAVERSION = NAME = "People's Front" diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index e205bbbe27949..69e661f574a07 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -1090,9 +1090,9 @@ __armv4_mmu_cache_off: __armv7_mmu_cache_off: mrc p15, 0, r0, c1, c0 #ifdef CONFIG_MMU - bic r0, r0, #0x000d + bic r0, r0, #0x0005 #else - bic r0, r0, #0x000c + bic r0, r0, #0x0004 #endif mcr p15, 0, r0, c1, c0 @ turn MMU and cache off mov r12, lr diff --git a/arch/arm/boot/dts/armada-388-helios4.dts b/arch/arm/boot/dts/armada-388-helios4.dts index 705adfa8c680f..a94758090fb0d 100644 --- a/arch/arm/boot/dts/armada-388-helios4.dts +++ b/arch/arm/boot/dts/armada-388-helios4.dts @@ -70,6 +70,9 @@ system-leds { compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&helios_system_led_pins>; + status-led { label = "helios4:green:status"; gpios = <&gpio0 24 GPIO_ACTIVE_LOW>; @@ -86,6 +89,9 @@ io-leds { compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&helios_io_led_pins>; + sata1-led { label = "helios4:green:ata1"; gpios = <&gpio1 17 GPIO_ACTIVE_LOW>; @@ -121,11 +127,15 @@ fan1: j10-pwm { compatible = "pwm-fan"; pwms = <&gpio1 9 40000>; /* Target freq:25 kHz */ + pinctrl-names = "default"; + pinctrl-0 = <&helios_fan1_pins>; }; fan2: j17-pwm { compatible = "pwm-fan"; pwms = <&gpio1 23 40000>; /* Target freq:25 kHz */ + pinctrl-names = "default"; + pinctrl-0 = <&helios_fan2_pins>; }; usb2_phy: usb2-phy { @@ -291,16 +301,22 @@ "mpp39", "mpp40"; marvell,function = "sd0"; }; - helios_led_pins: helios-led-pins { - marvell,pins = "mpp24", "mpp25", - "mpp49", "mpp50", + helios_system_led_pins: helios-system-led-pins { + marvell,pins = "mpp24", "mpp25"; + marvell,function = "gpio"; + }; + helios_io_led_pins: helios-io-led-pins { + marvell,pins = "mpp49", "mpp50", "mpp52", "mpp53", "mpp54"; marvell,function = "gpio"; }; - helios_fan_pins: helios-fan-pins { - marvell,pins = "mpp41", "mpp43", - "mpp48", "mpp55"; + helios_fan1_pins: helios_fan1_pins { + marvell,pins = "mpp41", "mpp43"; + marvell,function = "gpio"; + }; + helios_fan2_pins: helios_fan2_pins { + marvell,pins = "mpp48", "mpp55"; marvell,function = "gpio"; }; microsom_spi1_cs_pins: spi1-cs-pins { diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi index 69f6b9d2e7e7d..2af093759143c 100644 --- a/arch/arm/boot/dts/aspeed-g4.dtsi +++ b/arch/arm/boot/dts/aspeed-g4.dtsi @@ -312,6 +312,7 @@ compatible = "aspeed,ast2400-ibt-bmc"; reg = <0xc0 0x18>; interrupts = <8>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi index f2e1015d75ab4..bcd57e6c51e51 100644 --- a/arch/arm/boot/dts/aspeed-g5.dtsi +++ b/arch/arm/boot/dts/aspeed-g5.dtsi @@ -372,6 +372,7 @@ compatible = "aspeed,ast2500-ibt-bmc"; reg = <0xc0 0x18>; interrupts = <8>; + clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi index 7c22cbf6f3d41..6e30db644c83a 100644 --- a/arch/arm/boot/dts/exynos3250-artik5.dtsi +++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi @@ -68,7 +68,7 @@ s2mps14_pmic@66 { compatible = "samsung,s2mps14-pmic"; interrupt-parent = <&gpx3>; - interrupts = <5 IRQ_TYPE_NONE>; + interrupts = <5 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&s2mps14_irq>; reg = <0x66>; diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts index 6ffedf4ed9f2b..d343dc13ceecd 100644 --- a/arch/arm/boot/dts/exynos3250-monk.dts +++ b/arch/arm/boot/dts/exynos3250-monk.dts @@ -188,7 +188,7 @@ s2mps14_pmic@66 { compatible = "samsung,s2mps14-pmic"; interrupt-parent = <&gpx0>; - interrupts = <7 IRQ_TYPE_NONE>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; reg = <0x66>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts index 2a6b828c01b7c..29df4cfa9165f 100644 --- a/arch/arm/boot/dts/exynos3250-rinato.dts +++ b/arch/arm/boot/dts/exynos3250-rinato.dts @@ -253,7 +253,7 @@ s2mps14_pmic@66 { compatible = "samsung,s2mps14-pmic"; interrupt-parent = <&gpx0>; - interrupts = <7 IRQ_TYPE_NONE>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; reg = <0x66>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts index 3d501926c2278..2355c53164840 100644 --- a/arch/arm/boot/dts/exynos5250-spring.dts +++ b/arch/arm/boot/dts/exynos5250-spring.dts @@ -108,7 +108,7 @@ compatible = "samsung,s5m8767-pmic"; reg = <0x66>; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts index a370857beac0d..fbaca74cbaea4 100644 --- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts +++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts @@ -84,7 +84,7 @@ reg = <0x66>; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_EDGE_FALLING>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&s2mps11_irq>; diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi index d476ba0f07b6b..ba7187a74be36 100644 --- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi @@ -136,7 +136,7 @@ samsung,s2mps11-acokb-ground; interrupt-parent = <&gpx0>; - interrupts = <4 IRQ_TYPE_EDGE_FALLING>; + interrupts = <4 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&s2mps11_irq>; diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi index 86b9caf461dfa..6e320efd9fc1d 100644 --- a/arch/arm/boot/dts/omap443x.dtsi +++ b/arch/arm/boot/dts/omap443x.dtsi @@ -33,10 +33,12 @@ }; ocp { + /* 4430 has only gpio_86 tshut and no talert interrupt */ bandgap: bandgap@4a002260 { reg = <0x4a002260 0x4 0x4a00232C 0x4>; compatible = "ti,omap4430-bandgap"; + gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>; #thermal-sensor-cells = <0>; }; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1fe3e5cb29278..1daefa57e2742 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -473,7 +473,7 @@ config ARM64_ERRATUM_1024718 help This option adds work around for Arm Cortex-A55 Erratum 1024718. - Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect + Affected Cortex-A55 cores (all revisions) could cause incorrect update of the hardware dirty bit when the DBM/AP bits are updated without a break-before-make. The work around is to disable the usage of hardware DBM locally on the affected cores. CPUs not affected by diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts index 897e60cbe38d1..ecb3e10c85e07 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts @@ -67,8 +67,6 @@ }; &ehci0 { - phys = <&usbphy 0>; - phy-names = "usb"; status = "okay"; }; @@ -107,6 +105,7 @@ pinctrl-0 = <&mmc2_pins>; vmmc-supply = <®_dcdc1>; vqmmc-supply = <®_eldo1>; + max-frequency = <200000000>; bus-width = <8>; non-removable; cap-mmc-hw-reset; @@ -115,8 +114,6 @@ }; &ohci0 { - phys = <&usbphy 0>; - phy-names = "usb"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi index 6723b8695e0bb..ca39084fddc0b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi @@ -51,7 +51,6 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc0_pins>; vmmc-supply = <®_dcdc1>; - non-removable; disable-wp; bus-width = <4>; cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */ diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 7abc4ea305410..30cc2e83a288e 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -264,7 +264,7 @@ resets = <&ccu RST_BUS_MMC2>; reset-names = "ahb"; interrupts = ; - max-frequency = <200000000>; + max-frequency = <150000000>; status = "disabled"; #address-cells = <1>; #size-cells = <0>; @@ -312,6 +312,8 @@ <&ccu CLK_USB_OHCI0>; resets = <&ccu RST_BUS_OHCI0>, <&ccu RST_BUS_EHCI0>; + phys = <&usbphy 0>; + phy-names = "usb"; status = "disabled"; }; @@ -322,6 +324,8 @@ clocks = <&ccu CLK_BUS_OHCI0>, <&ccu CLK_USB_OHCI0>; resets = <&ccu RST_BUS_OHCI0>; + phys = <&usbphy 0>; + phy-names = "usb"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi index a1e3194b74837..d64f97d97c350 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi @@ -378,7 +378,7 @@ s2mps13-pmic@66 { compatible = "samsung,s2mps13-pmic"; interrupt-parent = <&gpa0>; - interrupts = <7 IRQ_TYPE_NONE>; + interrupts = <7 IRQ_TYPE_LEVEL_LOW>; reg = <0x66>; samsung,s2mps11-wrstbi-ground; diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index d991eae5202f2..2ba62118ae906 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts @@ -85,7 +85,7 @@ s2mps15_pmic@66 { compatible = "samsung,s2mps15-pmic"; reg = <0x66>; - interrupts = <2 IRQ_TYPE_NONE>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; interrupt-parent = <&gpa0>; pinctrl-names = "default"; pinctrl-0 = <&pmic_irq>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index de2c47bdbe646..2bcee994898a2 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -641,6 +641,8 @@ clocks = <&pericfg CLK_PERI_MSDC30_1_PD>, <&topckgen CLK_TOP_AXI_SEL>; clock-names = "source", "hclk"; + resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>; + reset-names = "hrst"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi index 6597c0894137a..4eecf3b024688 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi @@ -826,6 +826,7 @@ <&tegra_car 128>, /* hda2hdmi */ <&tegra_car 111>; /* hda2codec_2x */ reset-names = "hda", "hda2hdmi", "hda2codec_2x"; + power-domains = <&pd_sor>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 2c5193ae20277..ba42c62399226 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -64,7 +64,7 @@ no-map; }; - reserved@8668000 { + reserved@86680000 { reg = <0x0 0x86680000 0x0 0x80000>; no-map; }; @@ -77,7 +77,7 @@ qcom,client-id = <1>; }; - rfsa@867e00000 { + rfsa@867e0000 { reg = <0x0 0x867e0000 0x0 0x20000>; no-map; }; diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index d8c521c757e83..3f51743c01d86 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -21,6 +21,7 @@ MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha1"); struct sha1_ce_state { struct sha1_state sst; diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index c47d1a28ff6bb..4022c51a377bf 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -21,6 +21,8 @@ MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); struct sha256_ce_state { struct sha256_state sst; diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c index a336feac0f59a..df20ab645487c 100644 --- a/arch/arm64/crypto/sha3-ce-glue.c +++ b/arch/arm64/crypto/sha3-ce-glue.c @@ -22,6 +22,10 @@ MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha3-224"); +MODULE_ALIAS_CRYPTO("sha3-256"); +MODULE_ALIAS_CRYPTO("sha3-384"); +MODULE_ALIAS_CRYPTO("sha3-512"); asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks, int md_len); diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c index f2c5f28c622ac..2871e68ae3df4 100644 --- a/arch/arm64/crypto/sha512-ce-glue.c +++ b/arch/arm64/crypto/sha512-ce-glue.c @@ -22,6 +22,8 @@ MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel "); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha384"); +MODULE_ALIAS_CRYPTO("sha512"); asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src, int blocks); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index de6fa9b4abfa0..1719d21a171a9 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1012,7 +1012,7 @@ static bool cpu_has_broken_dbm(void) /* List of CPUs which have broken DBM support. */ static const struct midr_range cpus[] = { #ifdef CONFIG_ARM64_ERRATUM_1024718 - MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), #endif {}, }; diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index d22ab8d9edc95..c85ea70b92936 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -867,6 +867,7 @@ __primary_switch: tlbi vmalle1 // Remove any stale TLB entries dsb nsh + isb msr sctlr_el1, x19 // re-enable the MMU isb diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index 636ca0119c0ef..6aeb11aa7e283 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -41,7 +41,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, /* TODO: Currently we do not support AARCH32 instruction probing */ if (mm->context.flags & MMCF_AARCH32) - return -ENOTSUPP; + return -EOPNOTSUPP; else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE)) return -EINVAL; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index be63fff95b2ab..968c5765020c9 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -93,6 +93,7 @@ SECTIONS INIT_TASK_DATA(THREAD_SIZE) NOSAVE_DATA + PAGE_ALIGNED_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 37caeadb2964c..0476d7e97a03f 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -244,7 +244,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc) generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); /* if this is a EBU irq, we need to ack it or get a deadlock */ - if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) + if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, LTQ_EBU_PCC_ISTAT); } diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 7650edd5cf7ff..60fe72170856d 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1673,7 +1673,7 @@ static int probe_scache(void) return 1; } -static void __init loongson2_sc_init(void) +static void loongson2_sc_init(void) { struct cpuinfo_mips *c = ¤t_cpu_data; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d18ea3c1f4fac..6dd2a14e1ebcd 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -709,7 +709,7 @@ config PPC_64K_PAGES config PPC_256K_PAGES bool "256k page size" - depends on 44x && !STDBINUTILS + depends on 44x && !STDBINUTILS && !PPC_47x help Make the page size 256k. diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index dc99258f2e8c6..08c16aa0dd534 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -269,7 +269,7 @@ SystemCall: /* On the MPC8xx, this is a software emulation interrupt. It occurs * for all unimplemented and illegal instructions. */ - EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD) + EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD) . = 0x1100 /* diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 51cd66dc1bb09..7c8354dfe80e2 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1497,7 +1497,7 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } -int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -1515,7 +1515,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -1533,7 +1533,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; @@ -1551,7 +1551,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) return result; } -int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) +static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) { union kvmppc_one_reg reg; int vmx_offset = 0; diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index c5ffcadab7302..90fd03b9d3c25 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -132,7 +132,6 @@ void dlpar_free_cc_nodes(struct device_node *dn) #define NEXT_PROPERTY 3 #define PREV_PARENT 4 #define MORE_MEMORY 5 -#define CALL_AGAIN -2 #define ERR_CFG_USE -9003 struct device_node *dlpar_configure_connector(__be32 drc_index, @@ -173,6 +172,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, spin_unlock(&rtas_data_buf_lock); + if (rtas_busy_delay(rc)) + continue; + switch (rc) { case COMPLETE: break; @@ -221,9 +223,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, last_dn = last_dn->parent; break; - case CALL_AGAIN: - break; - case MORE_MEMORY: case ERR_CFG_USE: default: diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index e6f2a38d2e61e..1f1a7583fa905 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -554,7 +554,7 @@ config COMPAT bool depends on SPARC64 default y - select COMPAT_BINFMT_ELF + select COMPAT_BINFMT_ELF if BINFMT_ELF select HAVE_UID16 select ARCH_WANT_OLD_COMPAT_IPC select COMPAT_OLD_SIGACTION diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index b89d42b29e344..f427f34b8b79b 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -142,6 +142,7 @@ __bzero: ZERO_LAST_BLOCKS(%o0, 0x48, %g2) ZERO_LAST_BLOCKS(%o0, 0x08, %g2) 13: + EXT(12b, 13b, 21f) be 8f andcc %o1, 4, %g0 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 39f3cad58b6cd..a19706bee6874 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -538,29 +538,20 @@ static void emergency_vmx_disable_all(void) local_irq_disable(); /* - * We need to disable VMX on all CPUs before rebooting, otherwise - * we risk hanging up the machine, because the CPU ignore INIT - * signals when VMX is enabled. + * Disable VMX on all CPUs before rebooting, otherwise we risk hanging + * the machine, because the CPU blocks INIT when it's in VMX root. * - * We can't take any locks and we may be on an inconsistent - * state, so we use NMIs as IPIs to tell the other CPUs to disable - * VMX and halt. + * We can't take any locks and we may be on an inconsistent state, so + * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. * - * For safety, we will avoid running the nmi_shootdown_cpus() - * stuff unnecessarily, but we don't have a way to check - * if other CPUs have VMX enabled. So we will call it only if the - * CPU we are running on has VMX enabled. - * - * We will miss cases where VMX is not enabled on all CPUs. This - * shouldn't do much harm because KVM always enable VMX on all - * CPUs anyway. But we can miss it on the small window where KVM - * is still enabling VMX. + * Do the NMI shootdown even if VMX if off on _this_ CPU, as that + * doesn't prevent a different CPU from being in VMX root operation. */ - if (cpu_has_vmx() && cpu_vmx_enabled()) { - /* Disable VMX on this CPU. */ - cpu_vmxoff(); + if (cpu_has_vmx()) { + /* Safely force _this_ CPU out of VMX root operation. */ + __cpu_emergency_vmxoff(); - /* Halt and disable VMX on the other CPUs */ + /* Halt and exit VMX root operation on the other CPUs. */ nmi_shootdown_cpus(vmxoff_nmi); } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index a3c9ea29d7cc3..324e26d0607ba 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -1131,12 +1131,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + kfree(v); ++*pos; return memtype_get_idx(*pos); } static void memtype_seq_stop(struct seq_file *seq, void *v) { + kfree(v); } static int memtype_seq_show(struct seq_file *seq, void *v) @@ -1145,7 +1147,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), print_entry->start, print_entry->end); - kfree(print_entry); return 0; } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 5198ed1b36690..d984592b0995e 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2478,6 +2478,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd, } bfqd->in_service_queue = bfqq; + bfqd->in_serv_last_pos = 0; } /* diff --git a/block/blk-core.c b/block/blk-core.c index ce3710404544c..80f3e729fdd4d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1059,6 +1059,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, mutex_init(&q->blk_trace_mutex); #endif mutex_init(&q->sysfs_lock); + mutex_init(&q->sysfs_dir_lock); spin_lock_init(&q->__queue_lock); if (!q->mq_ops) diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 5006a0d009901..5e4b7ed1e897a 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -264,7 +264,7 @@ void blk_mq_unregister_dev(struct device *dev, struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - lockdep_assert_held(&q->sysfs_lock); + lockdep_assert_held(&q->sysfs_dir_lock); queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); @@ -312,7 +312,7 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q) int ret, i; WARN_ON_ONCE(!q->kobj.parent); - lockdep_assert_held(&q->sysfs_lock); + lockdep_assert_held(&q->sysfs_dir_lock); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) @@ -358,7 +358,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - mutex_lock(&q->sysfs_lock); + mutex_lock(&q->sysfs_dir_lock); if (!q->mq_sysfs_init_done) goto unlock; @@ -366,7 +366,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q) blk_mq_unregister_hctx(hctx); unlock: - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->sysfs_dir_lock); } int blk_mq_sysfs_register(struct request_queue *q) @@ -374,7 +374,7 @@ int blk_mq_sysfs_register(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i, ret = 0; - mutex_lock(&q->sysfs_lock); + mutex_lock(&q->sysfs_dir_lock); if (!q->mq_sysfs_init_done) goto unlock; @@ -385,7 +385,7 @@ int blk_mq_sysfs_register(struct request_queue *q) } unlock: - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->sysfs_dir_lock); return ret; } diff --git a/block/blk-settings.c b/block/blk-settings.c index 01093b8f3e624..2c01b6f651102 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -513,6 +513,14 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) } EXPORT_SYMBOL(blk_queue_io_opt); +static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) +{ + sectors = round_down(sectors, lbs >> SECTOR_SHIFT); + if (sectors < PAGE_SIZE >> SECTOR_SHIFT) + sectors = PAGE_SIZE >> SECTOR_SHIFT; + return sectors; +} + /** * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers * @t: the stacking driver (top) @@ -639,6 +647,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ret = -1; } + t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); + t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); + t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); + /* Discard alignment and granularity */ if (b->discard_granularity) { alignment = queue_limit_discard_alignment(b, start); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 8286640d4d663..07494deb1a26f 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -892,14 +892,14 @@ int blk_register_queue(struct gendisk *disk) int ret; struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; + bool has_elevator = false; if (WARN_ON(!q)) return -ENXIO; - WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), + WARN_ONCE(blk_queue_registered(q), "%s is registering an already registered queue\n", kobject_name(&dev->kobj)); - queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); /* * SCSI probing may synchronously create and destroy a lot of @@ -920,8 +920,7 @@ int blk_register_queue(struct gendisk *disk) if (ret) return ret; - /* Prevent changes through sysfs until registration is completed. */ - mutex_lock(&q->sysfs_lock); + mutex_lock(&q->sysfs_dir_lock); ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); if (ret < 0) { @@ -934,26 +933,37 @@ int blk_register_queue(struct gendisk *disk) blk_mq_debugfs_register(q); } - kobject_uevent(&q->kobj, KOBJ_ADD); - - wbt_enable_default(q); - - blk_throtl_register_queue(q); - + mutex_lock(&q->sysfs_lock); + /* + * The flag of QUEUE_FLAG_REGISTERED isn't set yet, so elevator + * switch won't happen at all. + */ if (q->request_fn || (q->mq_ops && q->elevator)) { - ret = elv_register_queue(q); + ret = elv_register_queue(q, false); if (ret) { mutex_unlock(&q->sysfs_lock); - kobject_uevent(&q->kobj, KOBJ_REMOVE); + mutex_unlock(&q->sysfs_dir_lock); kobject_del(&q->kobj); blk_trace_remove_sysfs(dev); kobject_put(&dev->kobj); return ret; } + has_elevator = true; } + + blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); + wbt_enable_default(q); + blk_throtl_register_queue(q); + + /* Now everything is ready and send out KOBJ_ADD uevent */ + kobject_uevent(&q->kobj, KOBJ_ADD); + if (has_elevator) + kobject_uevent(&q->elevator->kobj, KOBJ_ADD); + mutex_unlock(&q->sysfs_lock); + ret = 0; unlock: - mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->sysfs_dir_lock); return ret; } EXPORT_SYMBOL_GPL(blk_register_queue); @@ -973,7 +983,7 @@ void blk_unregister_queue(struct gendisk *disk) return; /* Return early if disk->queue was never registered. */ - if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) + if (!blk_queue_registered(q)) return; /* @@ -982,25 +992,30 @@ void blk_unregister_queue(struct gendisk *disk) * concurrent elv_iosched_store() calls. */ mutex_lock(&q->sysfs_lock); - blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); + mutex_unlock(&q->sysfs_lock); + mutex_lock(&q->sysfs_dir_lock); /* * Remove the sysfs attributes before unregistering the queue data * structures that can be modified through sysfs. */ if (q->mq_ops) blk_mq_unregister_dev(disk_to_dev(disk), q); - mutex_unlock(&q->sysfs_lock); kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); mutex_lock(&q->sysfs_lock); - if (q->request_fn || (q->mq_ops && q->elevator)) + /* + * q->kobj has been removed, so it is safe to check if elevator + * exists without holding q->sysfs_lock. + */ + if (q->request_fn || q->elevator) elv_unregister_queue(q); mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->sysfs_dir_lock); kobject_put(&disk_to_dev(disk)->kobj); } diff --git a/block/blk-wbt.c b/block/blk-wbt.c index f1de8ba483a97..50f2abfa1a60e 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -708,7 +708,7 @@ void wbt_enable_default(struct request_queue *q) return; /* Queue not registered? Maybe shutting down... */ - if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) + if (!blk_queue_registered(q)) return; if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) || diff --git a/block/blk.h b/block/blk.h index 1a5b67b57e6b2..ae87e2a5f2bd1 100644 --- a/block/blk.h +++ b/block/blk.h @@ -244,7 +244,7 @@ int elevator_init_mq(struct request_queue *q); int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e); void elevator_exit(struct request_queue *, struct elevator_queue *); -int elv_register_queue(struct request_queue *q); +int elv_register_queue(struct request_queue *q, bool uevent); void elv_unregister_queue(struct request_queue *q); struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); diff --git a/block/elevator.c b/block/elevator.c index ddbcd36616a8d..5b51bc5fad9fe 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -833,13 +833,16 @@ static struct kobj_type elv_ktype = { .release = elevator_release, }; -int elv_register_queue(struct request_queue *q) +/* + * elv_register_queue is called from either blk_register_queue or + * elevator_switch, elevator switch is prevented from being happen + * in the two paths, so it is safe to not hold q->sysfs_lock. + */ +int elv_register_queue(struct request_queue *q, bool uevent) { struct elevator_queue *e = q->elevator; int error; - lockdep_assert_held(&q->sysfs_lock); - error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); if (!error) { struct elv_fs_entry *attr = e->type->elevator_attrs; @@ -850,7 +853,9 @@ int elv_register_queue(struct request_queue *q) attr++; } } - kobject_uevent(&e->kobj, KOBJ_ADD); + if (uevent) + kobject_uevent(&e->kobj, KOBJ_ADD); + e->registered = 1; if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn) e->type->ops.sq.elevator_registered_fn(q); @@ -858,15 +863,19 @@ int elv_register_queue(struct request_queue *q) return error; } +/* + * elv_unregister_queue is called from either blk_unregister_queue or + * elevator_switch, elevator switch is prevented from being happen + * in the two paths, so it is safe to not hold q->sysfs_lock. + */ void elv_unregister_queue(struct request_queue *q) { - lockdep_assert_held(&q->sysfs_lock); - if (q) { struct elevator_queue *e = q->elevator; kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); + e->registered = 0; /* Re-enable throttling in case elevator disabled it */ wbt_enable_default(q); @@ -942,6 +951,7 @@ int elevator_switch_mq(struct request_queue *q, if (q->elevator) { if (q->elevator->registered) elv_unregister_queue(q); + ioc_clear_queue(q); elevator_exit(q, q->elevator); } @@ -951,7 +961,7 @@ int elevator_switch_mq(struct request_queue *q, goto out; if (new_e) { - ret = elv_register_queue(q); + ret = elv_register_queue(q, true); if (ret) { elevator_exit(q, q->elevator); goto out; @@ -1047,7 +1057,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) if (err) goto fail_init; - err = elv_register_queue(q); + err = elv_register_queue(q, true); if (err) goto fail_register; @@ -1067,7 +1077,7 @@ fail_init: /* switch failed, restore and re-register old elevator */ if (old) { q->elevator = old; - elv_register_queue(q); + elv_register_queue(q, true); blk_queue_bypass_end(q); } @@ -1083,7 +1093,7 @@ static int __elevator_change(struct request_queue *q, const char *name) struct elevator_type *e; /* Make sure queue is not in the middle of being removed */ - if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) + if (!blk_queue_registered(q)) return -ENOENT; /* diff --git a/certs/blacklist.c b/certs/blacklist.c index 3a507b9e2568a..e9f3f81c51f96 100644 --- a/certs/blacklist.c +++ b/certs/blacklist.c @@ -157,7 +157,7 @@ static int __init blacklist_init(void) KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH, KEY_ALLOC_NOT_IN_QUOTA | - KEY_FLAG_KEEP, + KEY_ALLOC_SET_KEEP, NULL, NULL); if (IS_ERR(blacklist_keyring)) panic("Can't allocate system blacklist keyring\n"); diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c index d3af8e8b0b5e4..25711de445848 100644 --- a/crypto/ecdh_helper.c +++ b/crypto/ecdh_helper.c @@ -71,6 +71,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len, if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH) return -EINVAL; + if (unlikely(len < secret.len)) + return -EINVAL; + ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id)); ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); if (secret.len != crypto_ecdh_key_len(params)) diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c index b588503890941..c0325556a897b 100644 --- a/drivers/acpi/acpi_configfs.c +++ b/drivers/acpi/acpi_configfs.c @@ -269,7 +269,12 @@ static int __init acpi_configfs_init(void) acpi_table_group = configfs_register_default_group(root, "table", &acpi_tables_type); - return PTR_ERR_OR_ZERO(acpi_table_group); + if (IS_ERR(acpi_table_group)) { + configfs_unregister_subsystem(&acpi_configfs); + return PTR_ERR(acpi_table_group); + } + + return 0; } module_init(acpi_configfs_init); diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 288673cff85ea..27db1a968241b 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -720,9 +720,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data, const union acpi_object *obj; int ret; - if (!val) - return -EINVAL; - if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) { ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj); if (ret) @@ -732,28 +729,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data, case DEV_PROP_U8: if (obj->integer.value > U8_MAX) return -EOVERFLOW; - *(u8 *)val = obj->integer.value; + + if (val) + *(u8 *)val = obj->integer.value; + break; case DEV_PROP_U16: if (obj->integer.value > U16_MAX) return -EOVERFLOW; - *(u16 *)val = obj->integer.value; + + if (val) + *(u16 *)val = obj->integer.value; + break; case DEV_PROP_U32: if (obj->integer.value > U32_MAX) return -EOVERFLOW; - *(u32 *)val = obj->integer.value; + + if (val) + *(u32 *)val = obj->integer.value; + break; default: - *(u64 *)val = obj->integer.value; + if (val) + *(u64 *)val = obj->integer.value; + break; } + + if (!val) + return 1; } else if (proptype == DEV_PROP_STRING) { ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj); if (ret) return ret; - *(char **)val = obj->string.pointer; + if (val) + *(char **)val = obj->string.pointer; return 1; } else { @@ -767,7 +779,7 @@ int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, { int ret; - if (!adev) + if (!adev || !val) return -EINVAL; ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val); @@ -861,10 +873,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data, const union acpi_object *items; int ret; - if (val && nval == 1) { + if (nval == 1 || !val) { ret = acpi_data_prop_read_single(data, propname, proptype, val); - if (ret >= 0) + /* + * The overflow error means that the property is there and it is + * single-value, but its type does not match, so return. + */ + if (ret >= 0 || ret == -EOVERFLOW) return ret; + + /* + * Reading this property as a single-value one failed, but its + * value may still be represented as one-element array, so + * continue. + */ } ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj); diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 41b706403ef72..2380ebd9b7fda 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -284,10 +284,11 @@ static int amba_remove(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *drv = to_amba_driver(dev->driver); - int ret; + int ret = 0; pm_runtime_get_sync(dev); - ret = drv->remove(pcdev); + if (drv->remove) + ret = drv->remove(pcdev); pm_runtime_put_noidle(dev); /* Undo the runtime PM settings in amba_probe() */ @@ -304,7 +305,9 @@ static int amba_remove(struct device *dev) static void amba_shutdown(struct device *dev) { struct amba_driver *drv = to_amba_driver(dev->driver); - drv->shutdown(to_amba_device(dev)); + + if (drv->shutdown) + drv->shutdown(to_amba_device(dev)); } /** @@ -317,12 +320,13 @@ static void amba_shutdown(struct device *dev) */ int amba_driver_register(struct amba_driver *drv) { - drv->drv.bus = &amba_bustype; + if (!drv->probe) + return -EINVAL; -#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn - SETFN(probe); - SETFN(remove); - SETFN(shutdown); + drv->drv.bus = &amba_bustype; + drv->drv.probe = amba_probe; + drv->drv.remove = amba_remove; + drv->drv.shutdown = amba_shutdown; return driver_register(&drv->drv); } diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 0192cab1b862f..e58b336d1324c 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -370,6 +370,10 @@ static int brcm_ahci_resume(struct device *dev) if (ret) return ret; + ret = ahci_platform_enable_regulators(hpriv); + if (ret) + goto out_disable_clks; + brcm_sata_init(priv); brcm_sata_phys_enable(priv); brcm_sata_alpm_init(hpriv); @@ -399,6 +403,8 @@ out_disable_platform_phys: ahci_platform_disable_phys(hpriv); out_disable_phys: brcm_sata_phys_disable(priv); + ahci_platform_disable_regulators(hpriv); +out_disable_clks: ahci_platform_disable_clks(hpriv); return ret; } @@ -471,6 +477,10 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (ret) goto out_reset; + ret = ahci_platform_enable_regulators(hpriv); + if (ret) + goto out_disable_clks; + /* Must be first so as to configure endianness including that * of the standard AHCI register space. */ @@ -480,7 +490,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) priv->port_mask = brcm_ahci_get_portmask(hpriv, priv); if (!priv->port_mask) { ret = -ENODEV; - goto out_disable_clks; + goto out_disable_regulators; } /* Must be done before ahci_platform_enable_phys() */ @@ -505,6 +515,8 @@ out_disable_platform_phys: ahci_platform_disable_phys(hpriv); out_disable_phys: brcm_sata_phys_disable(priv); +out_disable_regulators: + ahci_platform_disable_regulators(hpriv); out_disable_clks: ahci_platform_disable_clks(hpriv); out_reset: diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 21393ec3b9a4a..194370ae37dd0 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv) { struct ht16k33_fbdev *fbdev = &priv->fbdev; - schedule_delayed_work(&fbdev->work, - msecs_to_jiffies(HZ / fbdev->refresh_rate)); + schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate); } /* diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c index 50a66382d87d0..e75168b941d0c 100644 --- a/drivers/base/regmap/regmap-sdw.c +++ b/drivers/base/regmap/regmap-sdw.c @@ -12,7 +12,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val) struct device *dev = context; struct sdw_slave *slave = dev_to_sdw_dev(dev); - return sdw_write(slave, reg, val); + return sdw_write_no_pm(slave, reg, val); } static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val) @@ -21,7 +21,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val) struct sdw_slave *slave = dev_to_sdw_dev(dev); int read; - read = sdw_read(slave, reg); + read = sdw_read_no_pm(slave, reg); if (read < 0) return read; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index bf222c4b2f82f..56d4c7df91857 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4074,21 +4074,22 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) if (UFDCS->rawcmd == 1) UFDCS->rawcmd = 2; - if (!(mode & FMODE_NDELAY)) { - if (mode & (FMODE_READ|FMODE_WRITE)) { - UDRS->last_checked = 0; - clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); - check_disk_change(bdev); - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) - goto out; - if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) - goto out; - } - res = -EROFS; - if ((mode & FMODE_WRITE) && - !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) + if (mode & (FMODE_READ|FMODE_WRITE)) { + UDRS->last_checked = 0; + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); + check_disk_change(bdev); + if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) + goto out; + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) goto out; } + + res = -EROFS; + + if ((mode & FMODE_WRITE) && + !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) + goto out; + mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return 0; diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 7df3eed1ef5e9..874172aa8e417 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -166,8 +166,10 @@ static int btqcomsmd_probe(struct platform_device *pdev) btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD", btqcomsmd_cmd_callback, btq); - if (IS_ERR(btq->cmd_channel)) - return PTR_ERR(btq->cmd_channel); + if (IS_ERR(btq->cmd_channel)) { + ret = PTR_ERR(btq->cmd_channel); + goto destroy_acl_channel; + } /* The local-bd-address property is usually injected by the * bootloader which has access to the allocated BD address. @@ -179,8 +181,10 @@ static int btqcomsmd_probe(struct platform_device *pdev) } hdev = hci_alloc_dev(); - if (!hdev) - return -ENOMEM; + if (!hdev) { + ret = -ENOMEM; + goto destroy_cmd_channel; + } hci_set_drvdata(hdev, btq); btq->hdev = hdev; @@ -194,14 +198,21 @@ static int btqcomsmd_probe(struct platform_device *pdev) hdev->set_bdaddr = qca_set_bdaddr_rome; ret = hci_register_dev(hdev); - if (ret < 0) { - hci_free_dev(hdev); - return ret; - } + if (ret < 0) + goto hci_free_dev; platform_set_drvdata(pdev, btq); return 0; + +hci_free_dev: + hci_free_dev(hdev); +destroy_cmd_channel: + rpmsg_destroy_ept(btq->cmd_channel); +destroy_acl_channel: + rpmsg_destroy_ept(btq->acl_channel); + + return ret; } static int btqcomsmd_remove(struct platform_device *pdev) diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index f615684028af7..ba01f24db6db0 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -72,7 +72,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data, */ if (retval > 0) usleep_range(period_us, - period_us + min(1, period_us / 100)); + period_us + max(1, period_us / 100)); *(u32 *)data = readl(priv->io_base); retval += sizeof(u32); diff --git a/drivers/char/random.c b/drivers/char/random.c index 98925d49c96be..297a716f5a560 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -2071,7 +2071,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (crng_init < 2) return -ENODATA; - crng_reseed(&primary_crng, NULL); + crng_reseed(&primary_crng, &input_pool); crng_global_init_time = jiffies - 1; return 0; default: diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 280d60cba1f8c..c9a5f34097df5 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -129,7 +129,8 @@ static bool check_locality(struct tpm_chip *chip, int l) if (rc < 0) return false; - if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == + if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID + | TPM_ACCESS_REQUEST_USE)) == (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) { priv->locality = l; return true; @@ -138,58 +139,13 @@ static bool check_locality(struct tpm_chip *chip, int l) return false; } -static bool locality_inactive(struct tpm_chip *chip, int l) -{ - struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); - int rc; - u8 access; - - rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); - if (rc < 0) - return false; - - if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) - == TPM_ACCESS_VALID) - return true; - - return false; -} - static int release_locality(struct tpm_chip *chip, int l) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); - unsigned long stop, timeout; - long rc; tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); - stop = jiffies + chip->timeout_a; - - if (chip->flags & TPM_CHIP_FLAG_IRQ) { -again: - timeout = stop - jiffies; - if ((long)timeout <= 0) - return -1; - - rc = wait_event_interruptible_timeout(priv->int_queue, - (locality_inactive(chip, l)), - timeout); - - if (rc > 0) - return 0; - - if (rc == -ERESTARTSYS && freezing(current)) { - clear_thread_flag(TIF_SIGPENDING); - goto again; - } - } else { - do { - if (locality_inactive(chip, l)) - return 0; - tpm_msleep(TPM_TIMEOUT); - } while (time_before(jiffies, stop)); - } - return -1; + return 0; } static int request_locality(struct tpm_chip *chip, int l) diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 3e04617ac47f6..6fdad22a583d9 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -197,7 +197,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, if (parent_rate == 0 || rate == 0) return -EINVAL; - old_rate = rate; + old_rate = clk_hw_get_rate(hw); pllt = meson_clk_get_pll_settings(rate, pll); if (!pllt) diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c index 772a08101ddf2..0ccd6b79cb5e7 100644 --- a/drivers/clk/qcom/gcc-msm8998.c +++ b/drivers/clk/qcom/gcc-msm8998.c @@ -124,7 +124,7 @@ static struct pll_vco fabia_vco[] = { static struct clk_alpha_pll gpll0 = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -134,58 +134,58 @@ static struct clk_alpha_pll gpll0 = { .name = "gpll0", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll0_out_even = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_even", .parent_names = (const char *[]){ "gpll0" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll0_out_main = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_main", .parent_names = (const char *[]){ "gpll0" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll0_out_odd = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_odd", .parent_names = (const char *[]){ "gpll0" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll0_out_test = { .offset = 0x0, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll0_out_test", .parent_names = (const char *[]){ "gpll0" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll gpll1 = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -195,58 +195,58 @@ static struct clk_alpha_pll gpll1 = { .name = "gpll1", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll1_out_even = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll1_out_even", .parent_names = (const char *[]){ "gpll1" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll1_out_main = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll1_out_main", .parent_names = (const char *[]){ "gpll1" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll1_out_odd = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll1_out_odd", .parent_names = (const char *[]){ "gpll1" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll1_out_test = { .offset = 0x1000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll1_out_test", .parent_names = (const char *[]){ "gpll1" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll gpll2 = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -256,58 +256,58 @@ static struct clk_alpha_pll gpll2 = { .name = "gpll2", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll2_out_even = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll2_out_even", .parent_names = (const char *[]){ "gpll2" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll2_out_main = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll2_out_main", .parent_names = (const char *[]){ "gpll2" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll2_out_odd = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll2_out_odd", .parent_names = (const char *[]){ "gpll2" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll2_out_test = { .offset = 0x2000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll2_out_test", .parent_names = (const char *[]){ "gpll2" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll gpll3 = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -317,58 +317,58 @@ static struct clk_alpha_pll gpll3 = { .name = "gpll3", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll3_out_even = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll3_out_even", .parent_names = (const char *[]){ "gpll3" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll3_out_main = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll3_out_main", .parent_names = (const char *[]){ "gpll3" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll3_out_odd = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll3_out_odd", .parent_names = (const char *[]){ "gpll3" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll3_out_test = { .offset = 0x3000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll3_out_test", .parent_names = (const char *[]){ "gpll3" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll gpll4 = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .vco_table = fabia_vco, .num_vco = ARRAY_SIZE(fabia_vco), .clkr = { @@ -378,52 +378,52 @@ static struct clk_alpha_pll gpll4 = { .name = "gpll4", .parent_names = (const char *[]){ "xo" }, .num_parents = 1, - .ops = &clk_alpha_pll_ops, + .ops = &clk_alpha_pll_fixed_fabia_ops, } }, }; static struct clk_alpha_pll_postdiv gpll4_out_even = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll4_out_even", .parent_names = (const char *[]){ "gpll4" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll4_out_main = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll4_out_main", .parent_names = (const char *[]){ "gpll4" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll4_out_odd = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll4_out_odd", .parent_names = (const char *[]){ "gpll4" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; static struct clk_alpha_pll_postdiv gpll4_out_test = { .offset = 0x77000, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], .clkr.hw.init = &(struct clk_init_data){ .name = "gpll4_out_test", .parent_names = (const char *[]){ "gpll4" }, .num_parents = 1, - .ops = &clk_alpha_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_fabia_ops, }, }; diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index d425b47cef179..1197ace591247 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -223,7 +223,7 @@ static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k", static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2", psi_ahb1_ahb2_parents, 0x510, - 0, 5, /* M */ + 0, 2, /* M */ 8, 2, /* P */ 24, 2, /* mux */ 0); @@ -232,19 +232,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k", "psi-ahb1-ahb2", "pll-periph0" }; static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c, - 0, 5, /* M */ + 0, 2, /* M */ 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520, - 0, 5, /* M */ + 0, 2, /* M */ 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524, - 0, 5, /* M */ + 0, 2, /* M */ 8, 2, /* P */ 24, 2, /* mux */ 0); @@ -662,7 +662,7 @@ static struct ccu_mux hdmi_cec_clk = { .common = { .reg = 0xb10, - .features = CCU_FEATURE_VARIABLE_PREDIV, + .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("hdmi-cec", hdmi_cec_parents, &ccu_mux_ops, diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c index f6ddae30933f7..dae8c0c2e606f 100644 --- a/drivers/clocksource/mxs_timer.c +++ b/drivers/clocksource/mxs_timer.c @@ -138,10 +138,7 @@ static void mxs_irq_clear(char *state) /* Clear pending interrupt */ timrot_irq_acknowledge(); - -#ifdef DEBUG - pr_info("%s: changing mode to %s\n", __func__, state) -#endif /* DEBUG */ + pr_debug("%s: changing mode to %s\n", __func__, state); } static int mxs_shutdown(struct clock_event_device *evt) diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 77b0e5d0fb134..a3c82f530d608 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -566,6 +566,16 @@ unmap_base: return ret; } +static void brcm_avs_prepare_uninit(struct platform_device *pdev) +{ + struct private_data *priv; + + priv = platform_get_drvdata(pdev); + + iounmap(priv->avs_intr_base); + iounmap(priv->base); +} + static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *freq_table; @@ -701,21 +711,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev) brcm_avs_driver.driver_data = pdev; - return cpufreq_register_driver(&brcm_avs_driver); + ret = cpufreq_register_driver(&brcm_avs_driver); + if (ret) + brcm_avs_prepare_uninit(pdev); + + return ret; } static int brcm_avs_cpufreq_remove(struct platform_device *pdev) { - struct private_data *priv; int ret; ret = cpufreq_unregister_driver(&brcm_avs_driver); - if (ret) - return ret; + WARN_ON(ret); - priv = platform_get_drvdata(pdev); - iounmap(priv->base); - iounmap(priv->avs_intr_base); + brcm_avs_prepare_uninit(pdev); return 0; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 864a7e8ebdfc3..c2d222f97db79 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1420,11 +1420,9 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { cpu->pstate.min_pstate = pstate_funcs.get_min(); - cpu->pstate.max_pstate = pstate_funcs.get_max(); cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); cpu->pstate.scaling = pstate_funcs.get_scaling(); - cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; if (hwp_active && !hwp_mode_bdw) { unsigned int phy_max, current_max; @@ -1432,9 +1430,12 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; cpu->pstate.turbo_pstate = phy_max; + cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached)); } else { cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + cpu->pstate.max_pstate = pstate_funcs.get_max(); } + cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; if (pstate_funcs.get_aperf_mperf_shift) cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c2736274ad634..c63992fbbc988 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -52,7 +52,7 @@ /* ================= Device Structure ================== */ -struct device_private iproc_priv; +struct bcm_device_private iproc_priv; /* ==================== Parameters ===================== */ diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index 763c425c41cae..36452d26c7c5c 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h @@ -431,7 +431,7 @@ struct spu_hw { u32 num_chan; }; -struct device_private { +struct bcm_device_private { struct platform_device *pdev; struct spu_hw spu; @@ -478,6 +478,6 @@ struct device_private { struct mbox_chan **mbox; }; -extern struct device_private iproc_priv; +extern struct bcm_device_private iproc_priv; #endif diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c index a912c6ad3e850..f96d1dade010a 100644 --- a/drivers/crypto/bcm/util.c +++ b/drivers/crypto/bcm/util.c @@ -400,7 +400,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode) static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { - struct device_private *ipriv; + struct bcm_device_private *ipriv; char *buf; ssize_t ret, out_offset, out_count; int i; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h index 4282d8a4eae48..ef72610724348 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.h +++ b/drivers/crypto/chelsio/chtls/chtls_cm.h @@ -53,9 +53,6 @@ #define MIN_RCV_WND (24 * 1024U) #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000)) -/* ulp_mem_io + ulptx_idata + payload + padding */ -#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8) - /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ #define TX_HEADER_LEN \ (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 22e4918579254..aa3d2f439965e 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -34,6 +34,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq) unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; + unsigned long pi = 0, po = 0; /* progress for in and out */ + bool miter_err; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ unsigned long flags; @@ -53,50 +55,62 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq) spin_lock_irqsave(&ss->slock, flags); - for (i = 0; i < op->keylen; i += 4) - writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); + for (i = 0; i < op->keylen / 4; i++) + writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); - writel(v, ss->base + SS_IV0 + i * 4); + writesl(ss->base + SS_IV0 + i * 4, &v, 1); } } writel(mode, ss->base + SS_CTL); - sg_miter_start(&mi, areq->src, sg_nents(areq->src), - SG_MITER_FROM_SG | SG_MITER_ATOMIC); - sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), - SG_MITER_TO_SG | SG_MITER_ATOMIC); - sg_miter_next(&mi); - sg_miter_next(&mo); - if (!mi.addr || !mo.addr) { - dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); - err = -EINVAL; - goto release_ss; - } ileft = areq->cryptlen / 4; oleft = areq->cryptlen / 4; oi = 0; oo = 0; do { - todo = min(rx_cnt, ileft); - todo = min_t(size_t, todo, (mi.length - oi) / 4); - if (todo) { - ileft -= todo; - writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); - oi += todo * 4; - } - if (oi == mi.length) { - sg_miter_next(&mi); - oi = 0; + if (ileft) { + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + if (pi) + sg_miter_skip(&mi, pi); + miter_err = sg_miter_next(&mi); + if (!miter_err || !mi.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } + todo = min(rx_cnt, ileft); + todo = min_t(size_t, todo, (mi.length - oi) / 4); + if (todo) { + ileft -= todo; + writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); + oi += todo * 4; + } + if (oi == mi.length) { + pi += mi.length; + oi = 0; + } + sg_miter_stop(&mi); } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + if (po) + sg_miter_skip(&mo, po); + miter_err = sg_miter_next(&mo); + if (!miter_err || !mo.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } todo = min(tx_cnt, oleft); todo = min_t(size_t, todo, (mo.length - oo) / 4); if (todo) { @@ -105,9 +119,10 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq) oo += todo * 4; } if (oo == mo.length) { - sg_miter_next(&mo); oo = 0; + po += mo.length; } + sg_miter_stop(&mo); } while (oleft); if (areq->iv) { @@ -118,8 +133,6 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq) } release_ss: - sg_miter_stop(&mi); - sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; @@ -148,6 +161,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; + unsigned long pi = 0, po = 0; /* progress for in and out */ + bool miter_err; unsigned int oi, oo; /* offset for in and out */ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ @@ -174,12 +189,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) * we can use the SS optimized function */ while (in_sg && no_chunk == 1) { - if (in_sg->length % 4) + if ((in_sg->length | in_sg->offset) & 3u) no_chunk = 0; in_sg = sg_next(in_sg); } while (out_sg && no_chunk == 1) { - if (out_sg->length % 4) + if ((out_sg->length | out_sg->offset) & 3u) no_chunk = 0; out_sg = sg_next(out_sg); } @@ -189,28 +204,17 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) spin_lock_irqsave(&ss->slock, flags); - for (i = 0; i < op->keylen; i += 4) - writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); + for (i = 0; i < op->keylen / 4; i++) + writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); - writel(v, ss->base + SS_IV0 + i * 4); + writesl(ss->base + SS_IV0 + i * 4, &v, 1); } } writel(mode, ss->base + SS_CTL); - sg_miter_start(&mi, areq->src, sg_nents(areq->src), - SG_MITER_FROM_SG | SG_MITER_ATOMIC); - sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), - SG_MITER_TO_SG | SG_MITER_ATOMIC); - sg_miter_next(&mi); - sg_miter_next(&mo); - if (!mi.addr || !mo.addr) { - dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); - err = -EINVAL; - goto release_ss; - } ileft = areq->cryptlen; oleft = areq->cryptlen; oi = 0; @@ -218,6 +222,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) while (oleft) { if (ileft) { + sg_miter_start(&mi, areq->src, sg_nents(areq->src), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + if (pi) + sg_miter_skip(&mi, pi); + miter_err = sg_miter_next(&mi); + if (!miter_err || !mi.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } /* * todo is the number of consecutive 4byte word that we * can read from current SG @@ -250,31 +264,38 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) } } if (oi == mi.length) { - sg_miter_next(&mi); + pi += mi.length; oi = 0; } + sg_miter_stop(&mi); } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); - dev_dbg(ss->dev, - "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n", - mode, - oi, mi.length, ileft, areq->cryptlen, rx_cnt, - oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob); if (!tx_cnt) continue; + sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + if (po) + sg_miter_skip(&mo, po); + miter_err = sg_miter_next(&mo); + if (!miter_err || !mo.addr) { + dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); + err = -EINVAL; + goto release_ss; + } /* todo in 4bytes word */ todo = min(tx_cnt, oleft / 4); todo = min_t(size_t, todo, (mo.length - oo) / 4); + if (todo) { readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oleft -= todo * 4; oo += todo * 4; if (oo == mo.length) { - sg_miter_next(&mo); + po += mo.length; oo = 0; } } else { @@ -299,12 +320,14 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) obo += todo; oo += todo; if (oo == mo.length) { + po += mo.length; sg_miter_next(&mo); oo = 0; } } while (obo < obl); /* bufo must be fully used here */ } + sg_miter_stop(&mo); } if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { @@ -314,8 +337,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) } release_ss: - sg_miter_stop(&mi); - sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c70a7c4f5b739..7a55baa861e58 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1066,11 +1066,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, */ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, unsigned int offset, int datalen, int elen, - struct talitos_ptr *link_tbl_ptr) + struct talitos_ptr *link_tbl_ptr, int align) { int n_sg = elen ? sg_count + 1 : sg_count; int count = 0; int cryptlen = datalen + elen; + int padding = ALIGN(cryptlen, align) - cryptlen; while (cryptlen && sg && n_sg--) { unsigned int len = sg_dma_len(sg); @@ -1094,7 +1095,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, offset += datalen; } to_talitos_ptr(link_tbl_ptr + count, - sg_dma_address(sg) + offset, len, 0); + sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0); to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); count++; cryptlen -= len; @@ -1117,10 +1118,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, unsigned int len, struct talitos_edesc *edesc, struct talitos_ptr *ptr, int sg_count, unsigned int offset, int tbl_off, int elen, - bool force) + bool force, int align) { struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); + int aligned_len = ALIGN(len, align); if (!src) { to_talitos_ptr(ptr, 0, 0, is_sec1); @@ -1128,22 +1130,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, } to_talitos_ptr_ext_set(ptr, elen, is_sec1); if (sg_count == 1 && !force) { - to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); + to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1); return sg_count; } if (is_sec1) { - to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); + to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1); return sg_count; } sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen, - &edesc->link_tbl[tbl_off]); + &edesc->link_tbl[tbl_off], align); if (sg_count == 1 && !force) { /* Only one segment now, so no link tbl needed*/ copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1); return sg_count; } to_talitos_ptr(ptr, edesc->dma_link_tbl + - tbl_off * sizeof(struct talitos_ptr), len, is_sec1); + tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1); to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1); return sg_count; @@ -1155,7 +1157,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src, unsigned int offset, int tbl_off) { return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset, - tbl_off, 0, false); + tbl_off, 0, false, 1); } /* @@ -1224,7 +1226,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], sg_count, areq->assoclen, tbl_off, elen, - false); + false, 1); if (ret > 1) { tbl_off += ret; @@ -1244,7 +1246,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, elen = 0; ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], sg_count, areq->assoclen, tbl_off, elen, - is_ipsec_esp && !encrypt); + is_ipsec_esp && !encrypt, 1); tbl_off += ret; /* ICV data */ @@ -1554,6 +1556,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc, bool sync_needed = false; struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); + bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU && + (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR; /* first DWORD empty */ @@ -1574,8 +1578,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* * cipher in */ - sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, - &desc->ptr[3], sg_count, 0, 0); + sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3], + sg_count, 0, 0, 0, false, is_ctr ? 16 : 1); if (sg_count > 1) sync_needed = true; diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index cb0137e131cc8..16f96c57de341 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -377,6 +377,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) /* primary execution unit mode (MODE0) and derivatives */ #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) +#define DESC_HDR_MODE0_AESU_MASK cpu_to_be32(0x00600000) #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) #define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000) #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 1117b5123a6fc..e7ca3175dbc30 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1218,6 +1218,7 @@ static int fsldma_of_probe(struct platform_device *op) { struct fsldma_device *fdev; struct device_node *child; + unsigned int i; int err; fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); @@ -1296,6 +1297,10 @@ static int fsldma_of_probe(struct platform_device *op) return 0; out_free_fdev: + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + if (fdev->chan[i]) + fsl_dma_chan_remove(fdev->chan[i]); + } irq_dispose_mapping(fdev->irq); iounmap(fdev->regs); out_free: @@ -1318,6 +1323,7 @@ static int fsldma_of_remove(struct platform_device *op) if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); } + irq_dispose_mapping(fdev->irq); iounmap(fdev->regs); kfree(fdev); diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c index ad45cd344bbae..78836526d2e07 100644 --- a/drivers/dma/hsu/pci.c +++ b/drivers/dma/hsu/pci.c @@ -29,22 +29,12 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev) { struct hsu_dma_chip *chip = dev; - struct pci_dev *pdev = to_pci_dev(chip->dev); u32 dmaisr; u32 status; unsigned short i; int ret = 0; int err; - /* - * On Intel Tangier B0 and Anniedale the interrupt line, disregarding - * to have different numbers, is shared between HSU DMA and UART IPs. - * Thus on such SoCs we are expecting that IRQ handler is called in - * UART driver only. - */ - if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA) - return IRQ_HANDLED; - dmaisr = readl(chip->regs + HSU_PCI_DMAISR); for (i = 0; i < chip->hsu->nr_channels; i++) { if (dmaisr & 0x1) { @@ -108,6 +98,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_register_irq; + /* + * On Intel Tangier B0 and Anniedale the interrupt line, disregarding + * to have different numbers, is shared between HSU DMA and UART IPs. + * Thus on such SoCs we are expecting that IRQ handler is called in + * UART driver only. Instead of handling the spurious interrupt + * from HSU DMA here and waste CPU time and delay HSU UART interrupt + * handling, disable the interrupt entirely. + */ + if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA) + disable_irq_nosync(chip->irq); + pci_set_drvdata(pdev, chip); return 0; diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 7ff04bf04b31b..da5050ab7f387 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -932,6 +932,7 @@ static int owl_dma_remove(struct platform_device *pdev) owl_dma_free(od); clk_disable_unprepare(od->clk); + dma_pool_destroy(od->lli_pool); return 0; } diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 68a35b65925ac..d3fcc0d15eb85 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -357,7 +357,7 @@ static int pcf857x_probe(struct i2c_client *client, * reset state. Otherwise it flags pins to be driven low. */ gpio->out = ~n_latch; - gpio->status = gpio->out; + gpio->status = gpio->read(gpio->client); status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio); if (status < 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index e63a253eb4255..cbb969a870f6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -617,7 +617,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) { int ret; - long level; + unsigned long level; char *sub_str = NULL; char *tmp; char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; @@ -633,8 +633,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) while (tmp[0]) { sub_str = strsep(&tmp, delimiter); if (strlen(sub_str)) { - ret = kstrtol(sub_str, 0, &level); - if (ret) + ret = kstrtoul(sub_str, 0, &level); + if (ret || level > 31) return -EINVAL; *mask |= 1 << level; } else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 7206a0025b17a..db9907fb99f3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -21,7 +21,7 @@ * */ -#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _AMDGPU_TRACE_H_ #include diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index ab63d0d0304cb..6fd57cfb112f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -429,12 +429,12 @@ static void set_clamp( clamp_max = 0x3FC0; break; case COLOR_DEPTH_101010: - /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */ - clamp_max = 0x3FFC; + /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */ + clamp_max = 0x3FF0; break; case COLOR_DEPTH_121212: - /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */ - clamp_max = 0x3FFF; + /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */ + clamp_max = 0x3FFC; break; default: clamp_max = 0x3FC0; diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c index e281070611480..fc9a34ed58bd1 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c @@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) hdmi_dev = pci_get_drvdata(dev); i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL); - if (i2c_dev == NULL) { - DRM_ERROR("Can't allocate interface\n"); - ret = -ENOMEM; - goto exit; - } + if (!i2c_dev) + return -ENOMEM; i2c_dev->adap = &oaktrail_hdmi_i2c_adapter; i2c_dev->status = I2C_STAT_INIT; @@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) oaktrail_hdmi_i2c_adapter.name, hdmi_dev); if (ret) { DRM_ERROR("Failed to request IRQ for I2C controller\n"); - goto err; + goto free_dev; } /* Adapter registration */ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter); - return ret; + if (ret) { + DRM_ERROR("Failed to add I2C adapter\n"); + goto free_irq; + } -err: + return 0; + +free_irq: + free_irq(dev->irq, hdmi_dev); +free_dev: kfree(i2c_dev); -exit: + return ret; } diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index ac32ab5aa0027..2fa3c7fc4b6d5 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -316,6 +316,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto out_err; + ret = -ENOMEM; + dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0); if (!dev_priv->mmu) goto out_err; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index 1ca6c69516f57..4c037d855b272 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -147,7 +147,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { .disable = dsi_20nm_phy_disable, .init = msm_dsi_phy_init_common, }, - .io_start = { 0xfd998300, 0xfd9a0300 }, + .io_start = { 0xfd998500, 0xfd9a0500 }, .num_dsi_phy = 2, }; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 05122167d9d85..9b66eb1d42c2d 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -93,7 +93,7 @@ EXPORT_SYMBOL_GPL(hid_register_report); * Register a new field for this report. */ -static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) +static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) { struct hid_field *field; @@ -104,7 +104,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field = kzalloc((sizeof(struct hid_field) + usages * sizeof(struct hid_usage) + - values * sizeof(unsigned)), GFP_KERNEL); + usages * sizeof(unsigned)), GFP_KERNEL); if (!field) return NULL; @@ -300,7 +300,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign usages = max_t(unsigned, parser->local.usage_index, parser->global.report_count); - field = hid_register_field(report, usages, parser->global.report_count); + field = hid_register_field(report, usages); if (!field) return 0; @@ -1128,6 +1128,9 @@ EXPORT_SYMBOL_GPL(hid_open_report); static s32 snto32(__u32 value, unsigned n) { + if (!value || !n) + return 0; + switch (n) { case 8: return ((__s8)value); case 16: return ((__s16)value); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index da83884b90d25..274a55702784a 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2557,7 +2557,12 @@ static void wacom_wac_finger_event(struct hid_device *hdev, wacom_wac->hid_data.tipswitch = value; break; case HID_DG_CONTACTMAX: - features->touch_max = value; + if (!features->touch_max) { + features->touch_max = value; + } else { + hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max " + "%d -> %d\n", __func__, features->touch_max, value); + } return; } diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 7920b0d7e35a7..ac9617671757c 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -1001,8 +1001,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) vmbus_device_unregister(channel->device_obj); put_device(dev); } - } - if (channel->primary_channel != NULL) { + } else if (channel->primary_channel != NULL) { /* * Sub-channel is being rescinded. Following is the channel * close sequence when initiated from the driveri (refer to diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 826d320499961..2086a96307bf9 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -318,7 +318,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev, goto cmd_out; } - if ((CMD_RD || CMD_WR) && + if ((cmd == CMD_RD || cmd == CMD_WR) && bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) { rc = -EREMOTEIO; dev_dbg(dev->device, "controller received NOACK intr for %s\n", diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index a18f3f8ad77fe..471a824be86c4 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -366,6 +366,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, mutex_lock(&file->mutex); + if (file->agents_dead) { + mutex_unlock(&file->mutex); + return -EIO; + } + while (list_empty(&file->recv_list)) { mutex_unlock(&file->mutex); @@ -379,6 +384,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, mutex_lock(&file->mutex); } + if (file->agents_dead) { + mutex_unlock(&file->mutex); + return -EIO; + } + packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); list_del(&packet->list); @@ -508,7 +518,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, agent = __get_agent(file, packet->mad.hdr.id); if (!agent) { - ret = -EINVAL; + ret = -EIO; goto err_up; } @@ -637,10 +647,14 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait) /* we will always be able to post a MAD send */ __poll_t mask = EPOLLOUT | EPOLLWRNORM; + mutex_lock(&file->mutex); poll_wait(filp, &file->recv_wait, wait); if (!list_empty(&file->recv_list)) mask |= EPOLLIN | EPOLLRDNORM; + if (file->agents_dead) + mask = EPOLLERR; + mutex_unlock(&file->mutex); return mask; } @@ -1257,6 +1271,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port) list_for_each_entry(file, &port->file_list, port_list) { mutex_lock(&file->mutex); file->agents_dead = 1; + wake_up_interruptible(&file->recv_wait); mutex_unlock(&file->mutex); for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 4c90a007e09db..c89aec834972e 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -572,7 +572,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT); break; case MLX5_CMD_OP_CREATE_TIR: - MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR); + *obj_id = MLX5_GET(create_tir_out, out, tirn); + MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR); + MLX5_SET(destroy_tir_in, din, tirn, *obj_id); break; case MLX5_CMD_OP_CREATE_TIS: MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS); diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 7903bd5c639ea..04bfc36cc8d76 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -500,6 +500,11 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb) void rxe_loopback(struct sk_buff *skb) { + if (skb->protocol == htons(ETH_P_IP)) + skb_pull(skb, sizeof(struct iphdr)); + else + skb_pull(skb, sizeof(struct ipv6hdr)); + rxe_rcv(skb); } diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index b8f3e65402d1d..d94e2c5bc317d 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -36,21 +36,26 @@ #include "rxe.h" #include "rxe_loc.h" +/* check that QP matches packet opcode type and is in a valid state */ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp) { + unsigned int pkt_type; + if (unlikely(!qp->valid)) goto err1; + pkt_type = pkt->opcode & 0xe0; + switch (qp_type(qp)) { case IB_QPT_RC: - if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) { + if (unlikely(pkt_type != IB_OPCODE_RC)) { pr_warn_ratelimited("bad qp type\n"); goto err1; } break; case IB_QPT_UC: - if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) { + if (unlikely(pkt_type != IB_OPCODE_UC)) { pr_warn_ratelimited("bad qp type\n"); goto err1; } @@ -58,7 +63,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, case IB_QPT_UD: case IB_QPT_SMI: case IB_QPT_GSI: - if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) { + if (unlikely(pkt_type != IB_OPCODE_UD)) { pr_warn_ratelimited("bad qp type\n"); goto err1; } diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 4c1e427dfabb9..7ef6e1c165e35 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -460,7 +460,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev, if (IS_ERR(abspam)) return PTR_ERR(abspam); - for (i = 0; i < joydev->nabs; i++) { + for (i = 0; i < len && i < joydev->nabs; i++) { if (abspam[i] > ABS_MAX) { retval = -EINVAL; goto out; @@ -484,6 +484,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, int i; int retval = 0; + if (len % sizeof(*keypam)) + return -EINVAL; + len = min(len, sizeof(joydev->keypam)); /* Validate the map. */ @@ -491,7 +494,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, if (IS_ERR(keypam)) return PTR_ERR(keypam); - for (i = 0; i < joydev->nkey; i++) { + for (i = 0; i < (len / 2) && i < joydev->nkey; i++) { if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) { retval = -EINVAL; goto out; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index ef4e8423843f3..eacb8af8b4fc0 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -319,6 +319,7 @@ static const struct xpad_device { { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, + { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 844875df8cad7..0463ab79160b7 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -592,6 +592,10 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ }, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ + }, }, { } }; diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c index 7f2942f3cec6e..0f3146bcfcd0f 100644 --- a/drivers/input/touchscreen/elo.c +++ b/drivers/input/touchscreen/elo.c @@ -345,8 +345,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv) switch (elo->id) { case 0: /* 10-byte protocol */ - if (elo_setup_10(elo)) + if (elo_setup_10(elo)) { + err = -EIO; goto fail3; + } break; diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index 05c1054330b71..1a8c7403cfe6d 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c @@ -419,6 +419,7 @@ static int raydium_i2c_write_object(struct i2c_client *client, enum raydium_bl_ack state) { int error; + static const u8 cmd[] = { 0xFF, 0x39 }; error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len); if (error) { @@ -427,7 +428,7 @@ static int raydium_i2c_write_object(struct i2c_client *client, return error; } - error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0); + error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd)); if (error) { dev_err(&client->dev, "Ack obj command failed: %d\n", error); return error; diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index caa3aca2ea541..d5956854ac983 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -778,6 +778,7 @@ static int sur40_probe(struct usb_interface *interface, dev_err(&interface->dev, "Unable to register video controls."); v4l2_ctrl_handler_free(&sur40->hdl); + error = sur40->hdl.error; goto err_unreg_v4l2; } diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 7e426e4d13528..8cda3f7ddbae8 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -110,6 +110,10 @@ struct mapped_device { /* zero-length flush that will be cloned and submitted to targets */ struct bio flush_bio; + int swap_bios; + struct semaphore swap_bios_semaphore; + struct mutex swap_bios_lock; + struct dm_stats stats; struct kthread_worker kworker; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 07661c3c1513f..85559f772d0d6 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2852,6 +2852,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) wake_up_process(cc->write_thread); ti->num_flush_bios = 1; + ti->limit_swap_bios = true; return 0; diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 8e48920a3ffa6..c596fc564a589 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -46,6 +46,7 @@ struct writeset { static void writeset_free(struct writeset *ws) { vfree(ws->bits); + ws->bits = NULL; } static int setup_on_disk_bitset(struct dm_disk_bitset *info, @@ -70,8 +71,6 @@ static size_t bitset_size(unsigned nr_bits) */ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) { - ws->md.nr_bits = nr_blocks; - ws->md.root = INVALID_WRITESET_ROOT; ws->bits = vzalloc(bitset_size(nr_blocks)); if (!ws->bits) { DMERR("%s: couldn't allocate in memory bitset", __func__); @@ -84,12 +83,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) /* * Wipes the in-core bitset, and creates a new on disk bitset. */ -static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws) +static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws, + dm_block_t nr_blocks) { int r; - memset(ws->bits, 0, bitset_size(ws->md.nr_bits)); + memset(ws->bits, 0, bitset_size(nr_blocks)); + ws->md.nr_bits = nr_blocks; r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); if (r) { DMERR("%s: setup_on_disk_bitset failed", __func__); @@ -133,7 +134,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info, { int r; - if (!test_and_set_bit(block, ws->bits)) { + if (!test_bit(block, ws->bits)) { r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); if (r) { /* FIXME: fail mode */ @@ -387,7 +388,7 @@ static void ws_dec(void *context, const void *value) static int ws_eq(void *context, const void *value1, const void *value2) { - return !memcmp(value1, value2, sizeof(struct writeset_metadata)); + return !memcmp(value1, value2, sizeof(struct writeset_disk)); } /*----------------------------------------------------------------*/ @@ -563,6 +564,15 @@ static int open_metadata(struct era_metadata *md) } disk = dm_block_data(sblock); + + /* Verify the data block size hasn't changed */ + if (le32_to_cpu(disk->data_block_size) != md->block_size) { + DMERR("changing the data block size (from %u to %llu) is not supported", + le32_to_cpu(disk->data_block_size), md->block_size); + r = -EINVAL; + goto bad; + } + r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION, disk->metadata_space_map_root, sizeof(disk->metadata_space_map_root), @@ -574,10 +584,10 @@ static int open_metadata(struct era_metadata *md) setup_infos(md); - md->block_size = le32_to_cpu(disk->data_block_size); md->nr_blocks = le32_to_cpu(disk->nr_blocks); md->current_era = le32_to_cpu(disk->current_era); + ws_unpack(&disk->current_writeset, &md->current_writeset->md); md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root); md->era_array_root = le64_to_cpu(disk->era_array_root); md->metadata_snap = le64_to_cpu(disk->metadata_snap); @@ -745,6 +755,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md, ws_unpack(&disk, &d->writeset); d->value = cpu_to_le32(key); + /* + * We initialise another bitset info to avoid any caching side effects + * with the previous one. + */ + dm_disk_bitset_init(md->tm, &d->info); + d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks); d->current_bit = 0; d->step = metadata_digest_transcribe_writeset; @@ -758,12 +774,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d) return 0; memset(d, 0, sizeof(*d)); - - /* - * We initialise another bitset info to avoid any caching side - * effects with the previous one. - */ - dm_disk_bitset_init(md->tm, &d->info); d->step = metadata_digest_lookup_writeset; return 0; @@ -801,6 +811,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev, static void metadata_close(struct era_metadata *md) { + writeset_free(&md->writesets[0]); + writeset_free(&md->writesets[1]); destroy_persistent_data_objects(md); kfree(md); } @@ -838,6 +850,7 @@ static int metadata_resize(struct era_metadata *md, void *arg) r = writeset_alloc(&md->writesets[1], *new_size); if (r) { DMERR("%s: writeset_alloc failed for writeset 1", __func__); + writeset_free(&md->writesets[0]); return r; } @@ -848,6 +861,8 @@ static int metadata_resize(struct era_metadata *md, void *arg) &value, &md->era_array_root); if (r) { DMERR("%s: dm_array_resize failed", __func__); + writeset_free(&md->writesets[0]); + writeset_free(&md->writesets[1]); return r; } @@ -869,7 +884,6 @@ static int metadata_era_archive(struct era_metadata *md) } ws_pack(&md->current_writeset->md, &value); - md->current_writeset->md.root = INVALID_WRITESET_ROOT; keys[0] = md->current_era; __dm_bless_for_disk(&value); @@ -881,6 +895,7 @@ static int metadata_era_archive(struct era_metadata *md) return r; } + md->current_writeset->md.root = INVALID_WRITESET_ROOT; md->archived_writesets = true; return 0; @@ -897,7 +912,7 @@ static int metadata_new_era(struct era_metadata *md) int r; struct writeset *new_writeset = next_writeset(md); - r = writeset_init(&md->bitset_info, new_writeset); + r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks); if (r) { DMERR("%s: writeset_init failed", __func__); return r; @@ -950,7 +965,7 @@ static int metadata_commit(struct era_metadata *md) int r; struct dm_block *sblock; - if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) { + if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) { r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, &md->current_writeset->md.root); if (r) { @@ -1225,8 +1240,10 @@ static void process_deferred_bios(struct era *era) int r; struct bio_list deferred_bios, marked_bios; struct bio *bio; + struct blk_plug plug; bool commit_needed = false; bool failed = false; + struct writeset *ws = era->md->current_writeset; bio_list_init(&deferred_bios); bio_list_init(&marked_bios); @@ -1236,9 +1253,11 @@ static void process_deferred_bios(struct era *era) bio_list_init(&era->deferred_bios); spin_unlock(&era->deferred_lock); + if (bio_list_empty(&deferred_bios)) + return; + while ((bio = bio_list_pop(&deferred_bios))) { - r = writeset_test_and_set(&era->md->bitset_info, - era->md->current_writeset, + r = writeset_test_and_set(&era->md->bitset_info, ws, get_block(era, bio)); if (r < 0) { /* @@ -1246,7 +1265,6 @@ static void process_deferred_bios(struct era *era) * FIXME: finish. */ failed = true; - } else if (r == 0) commit_needed = true; @@ -1262,9 +1280,19 @@ static void process_deferred_bios(struct era *era) if (failed) while ((bio = bio_list_pop(&marked_bios))) bio_io_error(bio); - else - while ((bio = bio_list_pop(&marked_bios))) + else { + blk_start_plug(&plug); + while ((bio = bio_list_pop(&marked_bios))) { + /* + * Only update the in-core writeset if the on-disk one + * was updated too. + */ + if (commit_needed) + set_bit(get_block(era, bio), ws->bits); generic_make_request(bio); + } + blk_finish_plug(&plug); + } } static void process_rpc_calls(struct era *era) @@ -1485,15 +1513,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) } era->md = md; - era->nr_blocks = calc_nr_blocks(era); - - r = metadata_resize(era->md, &era->nr_blocks); - if (r) { - ti->error = "couldn't resize metadata"; - era_destroy(era); - return -ENOMEM; - } - era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); if (!era->wq) { ti->error = "could not create workqueue for metadata object"; @@ -1570,16 +1589,24 @@ static int era_preresume(struct dm_target *ti) dm_block_t new_size = calc_nr_blocks(era); if (era->nr_blocks != new_size) { - r = in_worker1(era, metadata_resize, &new_size); - if (r) + r = metadata_resize(era->md, &new_size); + if (r) { + DMERR("%s: metadata_resize failed", __func__); + return r; + } + + r = metadata_commit(era->md); + if (r) { + DMERR("%s: metadata_commit failed", __func__); return r; + } era->nr_blocks = new_size; } start_worker(era); - r = in_worker0(era, metadata_new_era); + r = in_worker0(era, metadata_era_rollover); if (r) { DMERR("%s: metadata_era_rollover failed", __func__); return r; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 13237b85cfec5..01bf5bc925d0b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -146,6 +146,16 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; +#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) +static int swap_bios = DEFAULT_SWAP_BIOS; +static int get_swap_bios(void) +{ + int latch = READ_ONCE(swap_bios); + if (unlikely(latch <= 0)) + latch = DEFAULT_SWAP_BIOS; + return latch; +} + /* * For mempools pre-allocation at the table loading time. */ @@ -935,6 +945,11 @@ void disable_write_zeroes(struct mapped_device *md) limits->max_write_zeroes_sectors = 0; } +static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) +{ + return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); +} + static void clone_endio(struct bio *bio) { blk_status_t error = bio->bi_status; @@ -972,6 +987,11 @@ static void clone_endio(struct bio *bio) } } + if (unlikely(swap_bios_limit(tio->ti, bio))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } + free_tio(tio); dec_pending(io, error); } @@ -1250,6 +1270,22 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) } EXPORT_SYMBOL_GPL(dm_remap_zone_report); +static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) +{ + mutex_lock(&md->swap_bios_lock); + while (latch < md->swap_bios) { + cond_resched(); + down(&md->swap_bios_semaphore); + md->swap_bios--; + } + while (latch > md->swap_bios) { + cond_resched(); + up(&md->swap_bios_semaphore); + md->swap_bios++; + } + mutex_unlock(&md->swap_bios_lock); +} + static blk_qc_t __map_bio(struct dm_target_io *tio) { int r; @@ -1270,6 +1306,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) atomic_inc(&io->io_count); sector = clone->bi_iter.bi_sector; + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + int latch = get_swap_bios(); + if (unlikely(latch != md->swap_bios)) + __set_swap_bios_limit(md, latch); + down(&md->swap_bios_semaphore); + } + r = ti->type->map(ti, clone); switch (r) { case DM_MAPIO_SUBMITTED: @@ -1284,10 +1328,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) ret = generic_make_request(clone); break; case DM_MAPIO_KILL: + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } free_tio(tio); dec_pending(io, BLK_STS_IOERR); break; case DM_MAPIO_REQUEUE: + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } free_tio(tio); dec_pending(io, BLK_STS_DM_REQUEUE); break; @@ -1859,6 +1911,7 @@ static void cleanup_mapped_device(struct mapped_device *md) mutex_destroy(&md->suspend_lock); mutex_destroy(&md->type_lock); mutex_destroy(&md->table_devices_lock); + mutex_destroy(&md->swap_bios_lock); dm_mq_cleanup_mapped_device(md); } @@ -1933,6 +1986,10 @@ static struct mapped_device *alloc_dev(int minor) init_completion(&md->kobj_holder.completion); md->kworker_task = NULL; + md->swap_bios = get_swap_bios(); + sema_init(&md->swap_bios_semaphore, md->swap_bios); + mutex_init(&md->swap_bios_lock); + md->disk->major = _major; md->disk->first_minor = minor; md->disk->fops = &dm_blk_dops; @@ -3228,6 +3285,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); +module_param(swap_bios, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); + MODULE_DESCRIPTION(DM_NAME " driver"); MODULE_AUTHOR("Joe Thornber "); MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c index 53dd30d96e691..62dfdcb6e0bd6 100644 --- a/drivers/media/i2c/ov5670.c +++ b/drivers/media/i2c/ov5670.c @@ -2081,7 +2081,8 @@ static int ov5670_init_controls(struct ov5670 *ov5670) /* By default, V4L2_CID_PIXEL_RATE is read only */ ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops, - V4L2_CID_PIXEL_RATE, 0, + V4L2_CID_PIXEL_RATE, + link_freq_configs[0].pixel_rate, link_freq_configs[0].pixel_rate, 1, link_freq_configs[0].pixel_rate); diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index 2f0171134f7e1..e04fe9f17b7aa 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c @@ -986,8 +986,10 @@ int cx25821_riscmem_alloc(struct pci_dev *pci, __le32 *cpu; dma_addr_t dma = 0; - if (NULL != risc->cpu && risc->size < size) + if (risc->cpu && risc->size < size) { pci_free_consistent(pci, risc->size, risc->cpu, risc->dma); + risc->cpu = NULL; + } if (NULL == risc->cpu) { cpu = pci_zalloc_consistent(pci, size, &dma); if (NULL == cpu) diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 2ad2870c03ae2..cdf4d07343a71 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -1290,7 +1290,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd, fmt->format.code = formats[0].mbus_code; for (i = 0; i < ARRAY_SIZE(formats); i++) { - if (formats[i].mbus_code == fmt->format.code) { + if (formats[i].mbus_code == mbus_code) { fmt->format.code = mbus_code; break; } diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index 66acfd35ffc60..8680eb08b654d 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c @@ -293,8 +293,11 @@ static int empress_init(struct saa7134_dev *dev) q->lock = &dev->lock; q->dev = &dev->pci->dev; err = vb2_queue_init(q); - if (err) + if (err) { + video_device_release(dev->empress_dev); + dev->empress_dev = NULL; return err; + } dev->empress_dev->queue = q; video_set_drvdata(dev->empress_dev, dev); diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c index 406ac673ad84c..0281b8e53fef2 100644 --- a/drivers/media/platform/pxa_camera.c +++ b/drivers/media/platform/pxa_camera.c @@ -1452,6 +1452,9 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb) struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue); struct pxa_buffer *buf = vb2_to_pxa_buffer(vb); int ret = 0; +#ifdef DEBUG + int i; +#endif switch (pcdev->channels) { case 1: diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c index c9bb0d023db48..e81ebeb0506e4 100644 --- a/drivers/media/platform/qcom/camss/camss-video.c +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -901,6 +901,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev, video->nformats = ARRAY_SIZE(formats_rdi_8x96); } } else { + ret = -EINVAL; goto error_video_register; } diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index 4e6530ee809af..022f84569d0e5 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c @@ -882,8 +882,10 @@ static int vsp1_probe(struct platform_device *pdev) } done: - if (ret) + if (ret) { pm_runtime_disable(&pdev->dev); + rcar_fcp_put(vsp1->fcp); + } return ret; } diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c index 83ca5dc047ea2..baa9950783b66 100644 --- a/drivers/media/tuners/qm1d1c0042.c +++ b/drivers/media/tuners/qm1d1c0042.c @@ -343,8 +343,10 @@ static int qm1d1c0042_init(struct dvb_frontend *fe) if (val == reg_initval[reg_index][0x00]) break; } - if (reg_index >= QM1D1C0042_NUM_REG_ROWS) + if (reg_index >= QM1D1C0042_NUM_REG_ROWS) { + ret = -EINVAL; goto failed; + } memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS); usleep_range(2000, 3000); diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 0750a975bcb89..316edb2dd6c4a 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -436,7 +436,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe); if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) - lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa), + lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa); lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c index 69445c8e38e28..d0f95a5cb4d23 100644 --- a/drivers/media/usb/em28xx/em28xx-core.c +++ b/drivers/media/usb/em28xx/em28xx-core.c @@ -955,14 +955,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk, usb_bufs->buf[i] = kzalloc(sb_size, GFP_KERNEL); if (!usb_bufs->buf[i]) { - em28xx_uninit_usb_xfer(dev, mode); - for (i--; i >= 0; i--) kfree(usb_bufs->buf[i]); - kfree(usb_bufs->buf); - usb_bufs->buf = NULL; - + em28xx_uninit_usb_xfer(dev, mode); return -ENOMEM; } diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c index 3db2fd7f5d7c4..e4f2160f46ff1 100644 --- a/drivers/media/usb/tm6000/tm6000-dvb.c +++ b/drivers/media/usb/tm6000/tm6000-dvb.c @@ -149,6 +149,10 @@ static int tm6000_start_stream(struct tm6000_core *dev) if (ret < 0) { printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n", ret, __func__); + + kfree(dvb->bulk_urb->transfer_buffer); + usb_free_urb(dvb->bulk_urb); + dvb->bulk_urb = NULL; return ret; } else printk(KERN_ERR "tm6000: pipe resetted\n"); diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 0921c95a1dca5..06167c51af128 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -253,7 +253,9 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, goto done; /* After the probe, update fmt with the values returned from - * negotiation with the device. + * negotiation with the device. Some devices return invalid bFormatIndex + * and bFrameIndex values, in which case we can only assume they have + * accepted the requested format as-is. */ for (i = 0; i < stream->nformats; ++i) { if (probe->bFormatIndex == stream->format[i].index) { @@ -262,11 +264,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, } } - if (i == stream->nformats) { - uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n", + if (i == stream->nformats) + uvc_trace(UVC_TRACE_FORMAT, + "Unknown bFormatIndex %u, using default\n", probe->bFormatIndex); - return -EINVAL; - } for (i = 0; i < format->nframes; ++i) { if (probe->bFrameIndex == format->frame[i].bFrameIndex) { @@ -275,11 +276,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, } } - if (i == format->nframes) { - uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n", + if (i == format->nframes) + uvc_trace(UVC_TRACE_FORMAT, + "Unknown bFrameIndex %u, using default\n", probe->bFrameIndex); - return -EINVAL; - } fmt->fmt.pix.width = frame->wWidth; fmt->fmt.pix.height = frame->wHeight; diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c index 475e5b3790edb..be3d978648f57 100644 --- a/drivers/memory/ti-aemif.c +++ b/drivers/memory/ti-aemif.c @@ -381,8 +381,10 @@ static int aemif_probe(struct platform_device *pdev) */ for_each_available_child_of_node(np, child_np) { ret = of_aemif_parse_abus_config(pdev, child_np); - if (ret < 0) + if (ret < 0) { + of_node_put(child_np); goto error; + } } } else if (pdata && pdata->num_abus_data > 0) { for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) { @@ -408,8 +410,10 @@ static int aemif_probe(struct platform_device *pdev) for_each_available_child_of_node(np, child_np) { ret = of_platform_populate(child_np, NULL, dev_lookup, dev); - if (ret < 0) + if (ret < 0) { + of_node_put(child_np); goto error; + } } } else if (pdata) { for (i = 0; i < pdata->num_sub_devices; i++) { diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c index fab3cdc27ed64..19d57a45134c6 100644 --- a/drivers/mfd/bd9571mwv.c +++ b/drivers/mfd/bd9571mwv.c @@ -185,9 +185,9 @@ static int bd9571mwv_probe(struct i2c_client *client, return ret; } - ret = mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, bd9571mwv_cells, - ARRAY_SIZE(bd9571mwv_cells), NULL, 0, - regmap_irq_get_domain(bd->irq_data)); + ret = devm_mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, + bd9571mwv_cells, ARRAY_SIZE(bd9571mwv_cells), + NULL, 0, regmap_irq_get_domain(bd->irq_data)); if (ret) { regmap_del_irq_chip(bd->irq, bd->irq_data); return ret; diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c index fd789d2eb0f52..9f7ae1e1ebcd6 100644 --- a/drivers/mfd/wm831x-auxadc.c +++ b/drivers/mfd/wm831x-auxadc.c @@ -98,11 +98,10 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x, wait_for_completion_timeout(&req->done, msecs_to_jiffies(500)); mutex_lock(&wm831x->auxadc_lock); - - list_del(&req->list); ret = req->val; out: + list_del(&req->list); mutex_unlock(&wm831x->auxadc_lock); kfree(req); diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/misc/aspeed-lpc-snoop.c index c10be21a1663d..b4a776bf44bc5 100644 --- a/drivers/misc/aspeed-lpc-snoop.c +++ b/drivers/misc/aspeed-lpc-snoop.c @@ -15,6 +15,7 @@ */ #include +#include #include #include #include @@ -71,6 +72,7 @@ struct aspeed_lpc_snoop_channel { struct aspeed_lpc_snoop { struct regmap *regmap; int irq; + struct clk *clk; struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS]; }; @@ -286,22 +288,42 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev) return -ENODEV; } + lpc_snoop->clk = devm_clk_get(dev, NULL); + if (IS_ERR(lpc_snoop->clk)) { + rc = PTR_ERR(lpc_snoop->clk); + if (rc != -EPROBE_DEFER) + dev_err(dev, "couldn't get clock\n"); + return rc; + } + rc = clk_prepare_enable(lpc_snoop->clk); + if (rc) { + dev_err(dev, "couldn't enable clock\n"); + return rc; + } + rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev); if (rc) - return rc; + goto err; rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port); if (rc) - return rc; + goto err; /* Configuration of 2nd snoop channel port is optional */ if (of_property_read_u32_index(dev->of_node, "snoop-ports", 1, &port) == 0) { rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port); - if (rc) + if (rc) { aspeed_lpc_disable_snoop(lpc_snoop, 0); + goto err; + } } + return 0; + +err: + clk_disable_unprepare(lpc_snoop->clk); + return rc; } @@ -313,6 +335,8 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev) aspeed_lpc_disable_snoop(lpc_snoop, 0); aspeed_lpc_disable_snoop(lpc_snoop, 1); + clk_disable_unprepare(lpc_snoop->clk); + return 0; } diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index f0e845c8e6a76..d3ce9fe08eafb 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -339,6 +339,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr) { rts5227_extra_init_hw(pcr); + /* Power down OCP for power consumption */ + if (!pcr->card_exist) + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, + OC_POWER_DOWN); + rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG); rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04); diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 38766968bfa20..a3248ebd28c62 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -523,3 +523,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); MODULE_AUTHOR("Anatolij Gustschin "); MODULE_ALIAS("spi:93xx46"); +MODULE_ALIAS("spi:eeprom-93xx46"); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 5e0d1ac67f73f..9bc97d6a651db 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -552,6 +552,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); + if (queue_size + queue_page_size > KMALLOC_MAX_SIZE) + return NULL; + queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); if (queue) { queue->q_header = NULL; @@ -645,7 +648,7 @@ static void qp_release_pages(struct page **pages, for (i = 0; i < num_pages; i++) { if (dirty) - set_page_dirty(pages[i]); + set_page_dirty_lock(pages[i]); put_page(pages[i]); pages[i] = NULL; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 74eea8247490d..6e2685c9e9cb5 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -180,8 +180,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, mmc_get_dma_dir(data))) goto force_pio; - /* This DMAC cannot handle if buffer is not 8-bytes alignment */ - if (!IS_ALIGNED(sg_dma_address(sg), 8)) + /* This DMAC cannot handle if buffer is not 128-bytes alignment */ + if (!IS_ALIGNED(sg_dma_address(sg), 128)) goto force_pio_with_unmap; if (data->flags & MMC_DATA_READ) { diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index bd502f4f47045..5099353e6f137 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1370,9 +1370,10 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev) struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); - int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + int dead; pm_runtime_get_sync(&pdev->dev); + dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index cdfeb15b6f051..ef3aa8b520787 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1866,10 +1866,12 @@ static int usdhi6_probe(struct platform_device *pdev) ret = mmc_add_host(mmc); if (ret < 0) - goto e_clk_off; + goto e_release_dma; return 0; +e_release_dma: + usdhi6_dma_release(host); e_clk_off: clk_disable_unprepare(host->clk); e_free_mmc: diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 04cedd3a2bf66..a92f531ad23a3 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -473,7 +473,7 @@ static int cqspi_read_setup(struct spi_nor *nor) /* Setup dummy clock cycles */ dummy_clk = nor->read_dummy; if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) - dummy_clk = CQSPI_DUMMY_CLKS_MAX; + return -EOPNOTSUPP; if (dummy_clk / 8) { reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB); diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c index dea7b0c4b339f..184ba5069ac51 100644 --- a/drivers/mtd/spi-nor/hisi-sfc.c +++ b/drivers/mtd/spi-nor/hisi-sfc.c @@ -408,8 +408,10 @@ static int hisi_spi_nor_register_all(struct hifmc_host *host) for_each_available_child_of_node(dev->of_node, np) { ret = hisi_spi_nor_register(np, host); - if (ret) + if (ret) { + of_node_put(np); goto fail; + } if (host->num_chip == HIFMC_MAX_CHIP_NUM) { dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index b40d4377cc71d..b2cd3bdba9f89 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1279,10 +1279,18 @@ #define MDIO_PMA_10GBR_FECCTRL 0x00ab #endif +#ifndef MDIO_PMA_RX_CTRL1 +#define MDIO_PMA_RX_CTRL1 0x8051 +#endif + #ifndef MDIO_PCS_DIG_CTRL #define MDIO_PCS_DIG_CTRL 0x8000 #endif +#ifndef MDIO_PCS_DIGITAL_STAT +#define MDIO_PCS_DIGITAL_STAT 0x8010 +#endif + #ifndef MDIO_AN_XNP #define MDIO_AN_XNP 0x0016 #endif @@ -1358,6 +1366,8 @@ #define XGBE_KR_TRAINING_ENABLE BIT(1) #define XGBE_PCS_CL37_BP BIT(12) +#define XGBE_PCS_PSEQ_STATE_MASK 0x1c +#define XGBE_PCS_PSEQ_STATE_POWER_GOOD 0x10 #define XGBE_AN_CL37_INT_CMPLT BIT(0) #define XGBE_AN_CL37_INT_MASK 0x01 @@ -1375,6 +1385,10 @@ #define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 #define XGBE_PMA_CDR_TRACK_EN_ON 0x01 +#define XGBE_PMA_RX_RST_0_MASK BIT(4) +#define XGBE_PMA_RX_RST_0_RESET_ON 0x10 +#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 5519eff584417..80cf6af822f72 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1444,6 +1444,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) return; netif_tx_stop_all_queues(netdev); + netif_carrier_off(pdata->netdev); xgbe_stop_timers(pdata); flush_workqueue(pdata->dev_workqueue); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 8a3a60bb26888..156a0bc8ab01d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1345,7 +1345,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata) &an_restart); if (an_restart) { xgbe_phy_config_aneg(pdata); - return; + goto adjust_link; } if (pdata->phy.link) { @@ -1396,7 +1396,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) pdata->phy_if.phy_impl.stop(pdata); pdata->phy.link = 0; - netif_carrier_off(pdata->netdev); xgbe_phy_adjust_link(pdata); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 3ceb4f95ca7ca..54753c8a6a9d7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -912,6 +912,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) if ((phy_id & 0xfffffff0) != 0x03625d10) return false; + /* Reset PHY - wait for self-clearing reset bit to clear */ + genphy_soft_reset(phy_data->phydev); + /* Disable RGMII mode */ phy_write(phy_data->phydev, 0x18, 0x7007); reg = phy_read(phy_data->phydev, 0x18); @@ -1942,6 +1945,27 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata) xgbe_phy_put_comm_ownership(pdata); } +static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata) +{ + int reg; + + reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT, + XGBE_PCS_PSEQ_STATE_MASK); + if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) { + /* Mailbox command timed out, reset of RX block is required. + * This can be done by asseting the reset bit and wait for + * its compeletion. + */ + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1, + XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON); + ndelay(20); + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1, + XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF); + usleep_range(40, 50); + netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n"); + } +} + static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, unsigned int cmd, unsigned int sub_cmd) { @@ -1949,9 +1973,11 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, unsigned int wait; /* Log if a previous command did not complete */ - if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) + if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) { netif_dbg(pdata, link, pdata->netdev, "firmware mailbox not ready for command\n"); + xgbe_phy_rx_reset(pdata); + } /* Construct the command */ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd); @@ -1973,6 +1999,9 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, netif_dbg(pdata, link, pdata->netdev, "firmware mailbox command did not complete\n"); + + /* Reset on error */ + xgbe_phy_rx_reset(pdata); } static void xgbe_phy_rrc(struct xgbe_prv_data *pdata) @@ -2569,6 +2598,14 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) if (reg & MDIO_STAT1_LSTATUS) return 1; + if (pdata->phy.autoneg == AUTONEG_ENABLE && + phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) { + if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) { + netif_carrier_off(pdata->netdev); + *an_restart = 1; + } + } + /* No link, attempt a receiver reset cycle */ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { phy_data->rrc_count = 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index db1a23b8d531d..44ed2f6e2d96c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6298,9 +6298,10 @@ void bnxt_tx_disable(struct bnxt *bp) txr->dev_state = BNXT_DEV_STATE_CLOSING; } } + /* Drop carrier first to prevent TX timeout */ + netif_carrier_off(bp->dev); /* Stop all TX queues */ netif_tx_disable(bp->dev); - netif_carrier_off(bp->dev); } void bnxt_tx_enable(struct bnxt *bp) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index de9ad311dacd8..ea0758de8ac89 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -44,6 +44,9 @@ #define MAX_ULD_QSETS 16 +/* ulp_mem_io + ulptx_idata + payload + padding */ +#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8) + /* CPL message priority levels */ enum { CPL_PRIORITY_DATA = 0, /* data messages */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 7801f2aeeb30e..0a2d10a000ca4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2084,17 +2084,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) * @skb: the packet * * Returns true if a packet can be sent as an offload WR with immediate - * data. We currently use the same limit as for Ethernet packets. + * data. + * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field. + * However, FW_ULPTX_WR commands have a 256 byte immediate only + * payload limit. */ static inline int is_ofld_imm(const struct sk_buff *skb) { struct work_request_hdr *req = (struct work_request_hdr *)skb->data; unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); - if (opcode == FW_CRYPTO_LOOKASIDE_WR) + if (unlikely(opcode == FW_ULPTX_WR)) + return skb->len <= MAX_IMM_ULPTX_WR_LEN; + else if (opcode == FW_CRYPTO_LOOKASIDE_WR) return skb->len <= SGE_MAX_WR_LEN; else - return skb->len <= MAX_IMM_TX_PKT_LEN; + return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN; } /** diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 68d5971c200a1..0c7c0206b1be5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -212,8 +212,13 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, if (!ltb->buff) return; + /* VIOS automatically unmaps the long term buffer at remote + * end for the following resets: + * FAILOVER, MOBILITY, TIMEOUT. + */ if (adapter->reset_reason != VNIC_RESET_FAILOVER && - adapter->reset_reason != VNIC_RESET_MOBILITY) + adapter->reset_reason != VNIC_RESET_MOBILITY && + adapter->reset_reason != VNIC_RESET_TIMEOUT) send_request_unmap(adapter, ltb->map_id); dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); } @@ -1257,10 +1262,8 @@ static int __ibmvnic_close(struct net_device *netdev) adapter->state = VNIC_CLOSING; rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); - if (rc) - return rc; adapter->state = VNIC_CLOSED; - return 0; + return rc; } static int ibmvnic_close(struct net_device *netdev) @@ -1523,6 +1526,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) skb_copy_from_linear_data(skb, dst, skb->len); } + /* post changes to long_term_buff *dst before VIOS accessing it */ + dma_wmb(); + tx_pool->consumer_index = (tx_pool->consumer_index + 1) % tx_pool->num_buffers; @@ -2188,6 +2194,8 @@ restart_poll: offset = be16_to_cpu(next->rx_comp.off_frame_data); flags = next->rx_comp.flags; skb = rx_buff->skb; + /* load long_term_buff before copying to skb */ + dma_rmb(); skb_copy_to_linear_data(skb, rx_buff->data + offset, length); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a728b6a7872cb..fe9da568ee196 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2577,7 +2577,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) return; if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; - if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { + if (test_bit(__I40E_VF_DISABLE, pf->state)) { set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); return; } @@ -2595,7 +2595,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) } } } - clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -7007,6 +7006,8 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, if (filter->flags >= ARRAY_SIZE(flag_table)) return I40E_ERR_CONFIG; + memset(&cld_filter, 0, sizeof(cld_filter)); + /* copy element needed to add cloud filter from filter */ i40e_set_cld_element(filter, &cld_filter); @@ -7070,10 +7071,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, return -EOPNOTSUPP; /* adding filter using src_port/src_ip is not supported at this stage */ - if (filter->src_port || filter->src_ipv4 || + if (filter->src_port || + (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) || !ipv6_addr_any(&filter->ip.v6.src_ip6)) return -EOPNOTSUPP; + memset(&cld_filter, 0, sizeof(cld_filter)); + /* copy element needed to add cloud filter from filter */ i40e_set_cld_element(filter, &cld_filter.element); @@ -7097,7 +7101,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT); } - } else if (filter->dst_ipv4 || + } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) || !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { cld_filter.element.flags = cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT); @@ -9399,7 +9403,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; - u8 set_fc_aq_fail = 0; i40e_status ret; u32 val; int v; @@ -9480,13 +9483,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - /* make sure our flow control settings are restored */ - ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); - if (ret) - dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only * need to rebuild the switch model in the HW. @@ -11107,6 +11103,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) struct i40e_aqc_configure_partition_bw_data bw_data; i40e_status status; + memset(&bw_data, 0, sizeof(bw_data)); + /* Set the valid bit for this PF */ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; @@ -13623,7 +13621,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int err; u32 val; u32 i; - u8 set_fc_aq_fail; err = pci_enable_device_mem(pdev); if (err) @@ -13903,24 +13900,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); - /* Make sure flow control is set according to current settings */ - err = i40e_set_fc(hw, &set_fc_aq_fail, true); - if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) - dev_dbg(&pf->pdev->dev, - "Set fc with err %s aq_err %s on get_phy_cap\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); - if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) - dev_dbg(&pf->pdev->dev, - "Set fc with err %s aq_err %s on set_phy_config\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); - if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) - dev_dbg(&pf->pdev->dev, - "Set fc with err %s aq_err %s on get_link_info\n", - i40e_stat_str(hw, err), - i40e_aq_str(hw, hw->aq.asq_last_status)); - /* if FDIR VSI was set up, start it now */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b5042d1a63c0b..9ccbcd88bf1e6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3070,13 +3070,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4_proto = ip.v4->protocol; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + int ret; + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; - if (l4.hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, - &l4_proto, &frag_off); + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + if (ret < 0) + return -1; } /* define outer transport */ diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 30a16cf796c7a..fda5dd8c71ebd 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3022,7 +3022,9 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, } /* Setup XPS mapping */ - if (txq_number > 1) + if (pp->neta_armada3700) + cpu = 0; + else if (txq_number > 1) cpu = txq->id % num_present_cpus(); else cpu = pp->rxq_def % num_present_cpus(); @@ -3667,6 +3669,11 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) node_online); struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts + * are routed to CPU 0, so we don't need all the cpu-hotplug support + */ + if (pp->neta_armada3700) + return 0; spin_lock(&pp->lock); /* diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index a4c1ed65f620c..550e4893253ee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -4990,6 +4990,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule if (!fs_rule->mirr_mbox) { mlx4_err(dev, "rule mirroring mailbox is null\n"); + mlx4_free_cmd_mailbox(dev, mailbox); return -EINVAL; } memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 6b901bf1cd54d..6fd1a639ec533 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4365,7 +4365,7 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) { - RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, MaxTxPacketSize, 0x24); RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B); @@ -4373,7 +4373,7 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) { - RTL_W8(tp, MaxTxPacketSize, 0x0c); + RTL_W8(tp, MaxTxPacketSize, 0x3f); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index baa3088b475c7..59fc14b58c838 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1353,27 +1353,12 @@ sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, if (vio_version_after_eq(&port->vio, 1, 3)) localmtu -= VLAN_HLEN; - if (skb->protocol == htons(ETH_P_IP)) { - struct flowi4 fl4; - struct rtable *rt = NULL; - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = dev->ifindex; - fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); - fl4.daddr = ip_hdr(skb)->daddr; - fl4.saddr = ip_hdr(skb)->saddr; - - rt = ip_route_output_key(dev_net(dev), &fl4); - if (!IS_ERR(rt)) { - skb_dst_set(skb, &rt->dst); - icmp_send(skb, ICMP_DEST_UNREACH, - ICMP_FRAG_NEEDED, - htonl(localmtu)); - } - } + if (skb->protocol == htons(ETH_P_IP)) + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(localmtu)); #if IS_ENABLED(CONFIG_IPV6) else if (skb->protocol == htons(ETH_P_IPV6)) - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); #endif goto out_dropped; } diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index bb9cd1262cc99..e18d06cb2173c 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -549,9 +549,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && mtu < ntohs(iph->tot_len)) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, - htonl(mtu)); + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); goto err_rt; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index bdfe88c754dfe..d2e5f5b7adf18 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1291,6 +1291,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */ + {QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 66fffbd64a33f..49e8c6d42cda4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3812,7 +3812,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan, *next; struct net_device *dev, *aux; - unsigned int h; for_each_netdev_safe(net, dev, aux) if (dev->rtnl_link_ops == &vxlan_link_ops) @@ -3826,14 +3825,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) unregister_netdevice_queue(vxlan->dev, head); } - for (h = 0; h < PORT_HASH_SIZE; ++h) - WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); } static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) { struct net *net; LIST_HEAD(list); + unsigned int h; rtnl_lock(); list_for_each_entry(net, net_list, exit_list) @@ -3841,6 +3839,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) unregister_netdevice_many(&list); rtnl_unlock(); + + list_for_each_entry(net, net_list, exit_list) { + struct vxlan_net *vn = net_generic(net, vxlan_net_id); + + for (h = 0; h < PORT_HASH_SIZE; ++h) + WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); + } } static struct pernet_operations vxlan_net_ops = { diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index e2d78f77edb70..241e6f0e1dfe2 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -789,13 +789,14 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar) ret = ath10k_snoc_init_pipes(ar); if (ret) { ath10k_err(ar, "failed to initialize CE: %d\n", ret); - goto err_wlan_enable; + goto err_free_rri; } napi_enable(&ar->napi); return 0; -err_wlan_enable: +err_free_rri: + ath10k_ce_free_rri(ar); ath10k_snoc_wlan_disable(ar); return ret; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 0a6eb8a8c1ed0..84fe686709496 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1236,8 +1236,11 @@ static ssize_t write_file_nf_override(struct file *file, ah->nf_override = val; - if (ah->curchan) + if (ah->curchan) { + ath9k_ps_wakeup(sc); ath9k_hw_loadnf(ah, ah->curchan); + ath9k_ps_restore(sc); + } return count; } diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index 44ab080d65182..88446258e7751 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -5320,7 +5320,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev) for (i = 0; i < 4; i++) { if (dev->phy.rev >= 3) - table[i] = coef[i]; + coef[i] = table[i]; else coef[i] = 0; } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index c960cb7e3251f..3b5fdb24ef1b9 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -162,13 +162,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id) { struct xenvif_queue *queue = dev_id; int old; + bool has_rx, has_tx; old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending); WARN(old, "Interrupt while EOI pending\n"); - /* Use bitwise or as we need to call both functions. */ - if ((!xenvif_handle_tx_interrupt(queue) | - !xenvif_handle_rx_interrupt(queue))) { + has_tx = xenvif_handle_tx_interrupt(queue); + has_rx = xenvif_handle_rx_interrupt(queue); + + if (!has_rx && !has_tx) { atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending); xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); } diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 863cabc352159..f0e0e3b42c91f 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -359,16 +359,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(state); -static ssize_t available_slots_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) { - struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); + struct device *dev; ssize_t rc; u32 nfree; if (!ndd) return -ENXIO; + dev = ndd->dev; nvdimm_bus_lock(dev); nfree = nd_label_nfree(ndd); if (nfree - 1 > nfree) { @@ -380,6 +380,18 @@ static ssize_t available_slots_show(struct device *dev, nvdimm_bus_unlock(dev); return rc; } + +static ssize_t available_slots_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t rc; + + device_lock(dev); + rc = __available_slots_show(dev_get_drvdata(dev), buf); + device_unlock(dev); + + return rc; +} static DEVICE_ATTR_RO(available_slots); static struct attribute *nvdimm_attributes[] = { diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 800ad252cf9c6..21160a08ead4b 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1172,8 +1172,16 @@ int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size) int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool nomap) { - if (nomap) - return memblock_remove(base, size); + if (nomap) { + /* + * If the memory is already reserved (by another region), we + * should not allow it to be marked nomap. + */ + if (memblock_is_region_reserved(base, size)) + return -EBUSY; + + return memblock_mark_nomap(base, size); + } return memblock_reserve(base, size); } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 791d6b671ee0b..33e5103939767 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -371,7 +371,9 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) /* enable external reference clock */ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); - val &= ~PHY_REFCLK_USE_PAD; + /* USE_PAD is required only for ipq806x */ + if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) + val &= ~PHY_REFCLK_USE_PAD; val |= PHY_REFCLK_SSP_EN; writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index d96626c614f56..a7bdd10fccf33 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -19,7 +19,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, u16 word; u32 dword; long err; - long cfg_ret; + int cfg_ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -45,7 +45,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, } err = -EIO; - if (cfg_ret != PCIBIOS_SUCCESSFUL) + if (cfg_ret) goto error; switch (len) { @@ -103,7 +103,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, if (err) break; err = pci_user_write_config_byte(dev, off, byte); - if (err != PCIBIOS_SUCCESSFUL) + if (err) err = -EIO; break; @@ -112,7 +112,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, if (err) break; err = pci_user_write_config_word(dev, off, word); - if (err != PCIBIOS_SUCCESSFUL) + if (err) err = -EIO; break; @@ -121,7 +121,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, if (err) break; err = pci_user_write_config_dword(dev, off, dword); - if (err != PCIBIOS_SUCCESSFUL) + if (err) err = -EIO; break; diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c index d9493e893d64e..7cf59e764ef28 100644 --- a/drivers/power/reset/at91-sama5d2_shdwc.c +++ b/drivers/power/reset/at91-sama5d2_shdwc.c @@ -36,7 +36,7 @@ #define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */ #define AT91_SHDW_WKUPDBC_SHIFT 24 -#define AT91_SHDW_WKUPDBC_MASK GENMASK(31, 16) +#define AT91_SHDW_WKUPDBC_MASK GENMASK(26, 24) #define AT91_SHDW_WKUPDBC(x) (((x) << AT91_SHDW_WKUPDBC_SHIFT) \ & AT91_SHDW_WKUPDBC_MASK) diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index 4d99d468df09a..48bcc853d57a7 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -370,7 +370,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev) ret = pwmchip_add(&pc->chip); if (ret < 0) { - clk_unprepare(pc->clk); dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); goto err_pclk; } diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 91b8ff8bac157..c9e3677ac734b 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -558,7 +558,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) static int axp20x_regulator_parse_dt(struct platform_device *pdev) { struct device_node *np, *regulators; - int ret; + int ret = 0; u32 dcdcfreq = 0; np = of_node_get(pdev->dev.parent->of_node); @@ -573,13 +573,12 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev) ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); if (ret < 0) { dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret); - return ret; } - of_node_put(regulators); } - return 0; + of_node_put(np); + return ret; } static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode) diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index 667d16dc83ce1..7ff94695eee72 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -548,14 +548,18 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, rdata = devm_kcalloc(&pdev->dev, pdata->num_regulators, sizeof(*rdata), GFP_KERNEL); - if (!rdata) + if (!rdata) { + of_node_put(regulators_np); return -ENOMEM; + } rmode = devm_kcalloc(&pdev->dev, pdata->num_regulators, sizeof(*rmode), GFP_KERNEL); - if (!rmode) + if (!rmode) { + of_node_put(regulators_np); return -ENOMEM; + } pdata->regulators = rdata; pdata->opmode = rmode; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 28a4505a1bc82..b5845f16a3a26 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -639,6 +639,7 @@ config RTC_DRV_S5M tristate "Samsung S2M/S5M series" depends on MFD_SEC_CORE || COMPILE_TEST select REGMAP_IRQ + select REGMAP_I2C help If you say yes here you will get support for the RTC of Samsung S2MPS14 and S5M PMIC series. diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig index d401a096dfc7e..2eb2476852b11 100644 --- a/drivers/scsi/bnx2fc/Kconfig +++ b/drivers/scsi/bnx2fc/Kconfig @@ -4,6 +4,7 @@ config SCSI_BNX2X_FCOE depends on (IPV6 || IPV6=n) depends on LIBFC depends on LIBFCOE + depends on MMU select NETDEVICES select ETHERNET select NET_VENDOR_BROADCOM diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 5a9d7e252a77c..2254e36c7f468 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1605,7 +1605,7 @@ static int atmel_spi_probe(struct platform_device *pdev) if (ret == 0) { as->use_dma = true; } else if (ret == -EPROBE_DEFER) { - return ret; + goto out_unmap_regs; } } else if (as->caps.has_pdc_support) { as->use_pdc = true; diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 869f188b02eb3..1736a48bbccec 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -21,7 +21,8 @@ enum { PORT_BSW1, PORT_BSW2, PORT_CE4100, - PORT_LPT, + PORT_LPT0, + PORT_LPT1, }; struct pxa_spi_info { @@ -55,8 +56,10 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 }; static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 }; static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 }; -static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 }; -static struct dw_dma_slave lpt_rx_param = { .src_id = 1 }; +static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 }; +static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 }; +static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 }; +static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 }; static bool lpss_dma_filter(struct dma_chan *chan, void *param) { @@ -182,12 +185,19 @@ static struct pxa_spi_info spi_info_configs[] = { .num_chipselect = 1, .max_clk_rate = 50000000, }, - [PORT_LPT] = { + [PORT_LPT0] = { .type = LPSS_LPT_SSP, .port_id = 0, .setup = lpss_spi_setup, - .tx_param = &lpt_tx_param, - .rx_param = &lpt_rx_param, + .tx_param = &lpt0_tx_param, + .rx_param = &lpt0_rx_param, + }, + [PORT_LPT1] = { + .type = LPSS_LPT_SSP, + .port_id = 1, + .setup = lpss_spi_setup, + .tx_param = &lpt1_tx_param, + .rx_param = &lpt1_rx_param, }, }; @@ -281,8 +291,9 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = { { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 }, { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 }, { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 }, - { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT }, - { }, + { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 }, + { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 }, + { } }; MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices); diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S index 059f2dc1fda2d..1565c792da079 100644 --- a/drivers/spi/spi-s3c24xx-fiq.S +++ b/drivers/spi/spi-s3c24xx-fiq.S @@ -36,7 +36,6 @@ @ and an offset to the irq acknowledgment word ENTRY(s3c24xx_spi_fiq_rx) -s3c24xx_spi_fix_rx: .word fiq_rx_end - fiq_rx_start .word fiq_rx_irq_ack - fiq_rx_start fiq_rx_start: @@ -50,7 +49,7 @@ fiq_rx_start: strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] subs fiq_rcount, fiq_rcount, #1 - subnes pc, lr, #4 @@ return, still have work to do + subsne pc, lr, #4 @@ return, still have work to do @@ set IRQ controller so that next op will trigger IRQ mov fiq_rtmp, #0 @@ -62,7 +61,6 @@ fiq_rx_irq_ack: fiq_rx_end: ENTRY(s3c24xx_spi_fiq_txrx) -s3c24xx_spi_fiq_txrx: .word fiq_txrx_end - fiq_txrx_start .word fiq_txrx_irq_ack - fiq_txrx_start fiq_txrx_start: @@ -77,7 +75,7 @@ fiq_txrx_start: strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] subs fiq_rcount, fiq_rcount, #1 - subnes pc, lr, #4 @@ return, still have work to do + subsne pc, lr, #4 @@ return, still have work to do mov fiq_rtmp, #0 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] @@ -89,7 +87,6 @@ fiq_txrx_irq_ack: fiq_txrx_end: ENTRY(s3c24xx_spi_fiq_tx) -s3c24xx_spi_fix_tx: .word fiq_tx_end - fiq_tx_start .word fiq_tx_irq_ack - fiq_tx_start fiq_tx_start: @@ -102,7 +99,7 @@ fiq_tx_start: strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] subs fiq_rcount, fiq_rcount, #1 - subnes pc, lr, #4 @@ return, still have work to do + subsne pc, lr, #4 @@ return, still have work to do mov fiq_rtmp, #0 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 8eb2644506dd3..8d692f16d90ac 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -992,6 +992,10 @@ static int stm32_spi_transfer_one(struct spi_master *master, struct stm32_spi *spi = spi_master_get_devdata(master); int ret; + /* Don't do anything on 0 bytes transfers */ + if (transfer->len == 0) + return 0; + spi->tx_buf = transfer->tx_buf; spi->rx_buf = transfer->rx_buf; spi->tx_len = spi->tx_buf ? transfer->len : 0; diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c index dc4da66c3695b..54bdb64f52e88 100644 --- a/drivers/staging/gdm724x/gdm_usb.c +++ b/drivers/staging/gdm724x/gdm_usb.c @@ -56,20 +56,24 @@ static int gdm_usb_recv(void *priv_dev, static int request_mac_address(struct lte_udev *udev) { - u8 buf[16] = {0,}; - struct hci_packet *hci = (struct hci_packet *)buf; + struct hci_packet *hci; struct usb_device *usbdev = udev->usbdev; int actual; int ret = -1; + hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL); + if (!hci) + return -ENOMEM; + hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION); hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1); hci->data[0] = MAC_ADDRESS; - ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5, + ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5, &actual, 1000); udev->request_mac_addr = 1; + kfree(hci); return ret; } diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile index d3152d45cf450..e3ab560ed35ac 100644 --- a/drivers/staging/mt7621-dma/Makefile +++ b/drivers/staging/mt7621-dma/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o -obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o +obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o ccflags-y += -I$(srctree)/drivers/dma diff --git a/drivers/staging/mt7621-dma/hsdma-mt7621.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c new file mode 100644 index 0000000000000..f487cbc91dee6 --- /dev/null +++ b/drivers/staging/mt7621-dma/hsdma-mt7621.c @@ -0,0 +1,771 @@ +/* + * Copyright (C) 2015, Michael Lee + * MTK HSDMA support + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +#define HSDMA_BASE_OFFSET 0x800 + +#define HSDMA_REG_TX_BASE 0x00 +#define HSDMA_REG_TX_CNT 0x04 +#define HSDMA_REG_TX_CTX 0x08 +#define HSDMA_REG_TX_DTX 0x0c +#define HSDMA_REG_RX_BASE 0x100 +#define HSDMA_REG_RX_CNT 0x104 +#define HSDMA_REG_RX_CRX 0x108 +#define HSDMA_REG_RX_DRX 0x10c +#define HSDMA_REG_INFO 0x200 +#define HSDMA_REG_GLO_CFG 0x204 +#define HSDMA_REG_RST_CFG 0x208 +#define HSDMA_REG_DELAY_INT 0x20c +#define HSDMA_REG_FREEQ_THRES 0x210 +#define HSDMA_REG_INT_STATUS 0x220 +#define HSDMA_REG_INT_MASK 0x228 +#define HSDMA_REG_SCH_Q01 0x280 +#define HSDMA_REG_SCH_Q23 0x284 + +#define HSDMA_DESCS_MAX 0xfff +#define HSDMA_DESCS_NUM 8 +#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1) +#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK) + +/* HSDMA_REG_INFO */ +#define HSDMA_INFO_INDEX_MASK 0xf +#define HSDMA_INFO_INDEX_SHIFT 24 +#define HSDMA_INFO_BASE_MASK 0xff +#define HSDMA_INFO_BASE_SHIFT 16 +#define HSDMA_INFO_RX_MASK 0xff +#define HSDMA_INFO_RX_SHIFT 8 +#define HSDMA_INFO_TX_MASK 0xff +#define HSDMA_INFO_TX_SHIFT 0 + +/* HSDMA_REG_GLO_CFG */ +#define HSDMA_GLO_TX_2B_OFFSET BIT(31) +#define HSDMA_GLO_CLK_GATE BIT(30) +#define HSDMA_GLO_BYTE_SWAP BIT(29) +#define HSDMA_GLO_MULTI_DMA BIT(10) +#define HSDMA_GLO_TWO_BUF BIT(9) +#define HSDMA_GLO_32B_DESC BIT(8) +#define HSDMA_GLO_BIG_ENDIAN BIT(7) +#define HSDMA_GLO_TX_DONE BIT(6) +#define HSDMA_GLO_BT_MASK 0x3 +#define HSDMA_GLO_BT_SHIFT 4 +#define HSDMA_GLO_RX_BUSY BIT(3) +#define HSDMA_GLO_RX_DMA BIT(2) +#define HSDMA_GLO_TX_BUSY BIT(1) +#define HSDMA_GLO_TX_DMA BIT(0) + +#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT) +#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT) +#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT) +#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT) + +#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \ + HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES) + +/* HSDMA_REG_RST_CFG */ +#define HSDMA_RST_RX_SHIFT 16 +#define HSDMA_RST_TX_SHIFT 0 + +/* HSDMA_REG_DELAY_INT */ +#define HSDMA_DELAY_INT_EN BIT(15) +#define HSDMA_DELAY_PEND_OFFSET 8 +#define HSDMA_DELAY_TIME_OFFSET 0 +#define HSDMA_DELAY_TX_OFFSET 16 +#define HSDMA_DELAY_RX_OFFSET 0 + +#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \ + ((x) << HSDMA_DELAY_PEND_OFFSET)) +#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \ + HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x)) + +/* HSDMA_REG_INT_STATUS */ +#define HSDMA_INT_DELAY_RX_COH BIT(31) +#define HSDMA_INT_DELAY_RX_INT BIT(30) +#define HSDMA_INT_DELAY_TX_COH BIT(29) +#define HSDMA_INT_DELAY_TX_INT BIT(28) +#define HSDMA_INT_RX_MASK 0x3 +#define HSDMA_INT_RX_SHIFT 16 +#define HSDMA_INT_RX_Q0 BIT(16) +#define HSDMA_INT_TX_MASK 0xf +#define HSDMA_INT_TX_SHIFT 0 +#define HSDMA_INT_TX_Q0 BIT(0) + +/* tx/rx dma desc flags */ +#define HSDMA_PLEN_MASK 0x3fff +#define HSDMA_DESC_DONE BIT(31) +#define HSDMA_DESC_LS0 BIT(30) +#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16) +#define HSDMA_DESC_TAG BIT(15) +#define HSDMA_DESC_LS1 BIT(14) +#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK) + +/* align 4 bytes */ +#define HSDMA_ALIGN_SIZE 3 +/* align size 128bytes */ +#define HSDMA_MAX_PLEN 0x3f80 + +struct hsdma_desc { + u32 addr0; + u32 flags; + u32 addr1; + u32 unused; +}; + +struct mtk_hsdma_sg { + dma_addr_t src_addr; + dma_addr_t dst_addr; + u32 len; +}; + +struct mtk_hsdma_desc { + struct virt_dma_desc vdesc; + unsigned int num_sgs; + struct mtk_hsdma_sg sg[1]; +}; + +struct mtk_hsdma_chan { + struct virt_dma_chan vchan; + unsigned int id; + dma_addr_t desc_addr; + int tx_idx; + int rx_idx; + struct hsdma_desc *tx_ring; + struct hsdma_desc *rx_ring; + struct mtk_hsdma_desc *desc; + unsigned int next_sg; +}; + +struct mtk_hsdam_engine { + struct dma_device ddev; + struct device_dma_parameters dma_parms; + void __iomem *base; + struct tasklet_struct task; + volatile unsigned long chan_issued; + + struct mtk_hsdma_chan chan[1]; +}; + +static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev( + struct mtk_hsdma_chan *chan) +{ + return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine, + ddev); +} + +static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c) +{ + return container_of(c, struct mtk_hsdma_chan, vchan.chan); +} + +static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc( + struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct mtk_hsdma_desc, vdesc); +} + +static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg) +{ + return readl(hsdma->base + reg); +} + +static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma, + unsigned reg, u32 val) +{ + writel(val, hsdma->base + reg); +} + +static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma, + struct mtk_hsdma_chan *chan) +{ + chan->tx_idx = 0; + chan->rx_idx = HSDMA_DESCS_NUM - 1; + + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx); + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx); + + mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG, + 0x1 << (chan->id + HSDMA_RST_TX_SHIFT)); + mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG, + 0x1 << (chan->id + HSDMA_RST_RX_SHIFT)); +} + +static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma) +{ + dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \ + "tctx %08x, tdtx: %08x, rbase %08x, " \ + "rcnt %08x, rctx %08x, rdtx %08x\n", + mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE), + mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT), + mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX), + mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX), + mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE), + mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT), + mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX), + mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX)); + + dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \ + "intr_stat %08x, intr_mask %08x\n", + mtk_hsdma_read(hsdma, HSDMA_REG_INFO), + mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG), + mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT), + mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS), + mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK)); +} + +static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma, + struct mtk_hsdma_chan *chan) +{ + struct hsdma_desc *tx_desc; + struct hsdma_desc *rx_desc; + int i; + + dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n", + chan->tx_idx, chan->rx_idx); + + for (i = 0; i < HSDMA_DESCS_NUM; i++) { + tx_desc = &chan->tx_ring[i]; + rx_desc = &chan->rx_ring[i]; + + dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \ + "tx addr1: %08x, rx addr0 %08x, flags %08x\n", + i, tx_desc->addr0, tx_desc->flags, \ + tx_desc->addr1, rx_desc->addr0, rx_desc->flags); + } +} + +static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma, + struct mtk_hsdma_chan *chan) +{ + int i; + + /* disable dma */ + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0); + + /* disable intr */ + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0); + + /* init desc value */ + for (i = 0; i < HSDMA_DESCS_NUM; i++) { + chan->tx_ring[i].addr0 = 0; + chan->tx_ring[i].flags = HSDMA_DESC_LS0 | + HSDMA_DESC_DONE; + } + for (i = 0; i < HSDMA_DESCS_NUM; i++) { + chan->rx_ring[i].addr0 = 0; + chan->rx_ring[i].flags = 0; + } + + /* reset */ + mtk_hsdma_reset_chan(hsdma, chan); + + /* enable intr */ + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0); + + /* enable dma */ + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT); +} + +static int mtk_hsdma_terminate_all(struct dma_chan *c) +{ + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); + struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan); + unsigned long timeout; + LIST_HEAD(head); + + spin_lock_bh(&chan->vchan.lock); + chan->desc = NULL; + clear_bit(chan->id, &hsdma->chan_issued); + vchan_get_all_descriptors(&chan->vchan, &head); + spin_unlock_bh(&chan->vchan.lock); + + vchan_dma_desc_free_list(&chan->vchan, &head); + + /* wait dma transfer complete */ + timeout = jiffies + msecs_to_jiffies(2000); + while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) & + (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) { + if (time_after_eq(jiffies, timeout)) { + hsdma_dump_desc(hsdma, chan); + mtk_hsdma_reset(hsdma, chan); + dev_err(hsdma->ddev.dev, "timeout, reset it\n"); + break; + } + cpu_relax(); + } + + return 0; +} + +static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma, + struct mtk_hsdma_chan *chan) +{ + dma_addr_t src, dst; + size_t len, tlen; + struct hsdma_desc *tx_desc, *rx_desc; + struct mtk_hsdma_sg *sg; + unsigned int i; + int rx_idx; + + sg = &chan->desc->sg[0]; + len = sg->len; + chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN); + + /* tx desc */ + src = sg->src_addr; + for (i = 0; i < chan->desc->num_sgs; i++) { + tx_desc = &chan->tx_ring[chan->tx_idx]; + + if (len > HSDMA_MAX_PLEN) + tlen = HSDMA_MAX_PLEN; + else + tlen = len; + + if (i & 0x1) { + tx_desc->addr1 = src; + tx_desc->flags |= HSDMA_DESC_PLEN1(tlen); + } else { + tx_desc->addr0 = src; + tx_desc->flags = HSDMA_DESC_PLEN0(tlen); + + /* update index */ + chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx); + } + + src += tlen; + len -= tlen; + } + if (i & 0x1) + tx_desc->flags |= HSDMA_DESC_LS0; + else + tx_desc->flags |= HSDMA_DESC_LS1; + + /* rx desc */ + rx_idx = HSDMA_NEXT_DESC(chan->rx_idx); + len = sg->len; + dst = sg->dst_addr; + for (i = 0; i < chan->desc->num_sgs; i++) { + rx_desc = &chan->rx_ring[rx_idx]; + if (len > HSDMA_MAX_PLEN) + tlen = HSDMA_MAX_PLEN; + else + tlen = len; + + rx_desc->addr0 = dst; + rx_desc->flags = HSDMA_DESC_PLEN0(tlen); + + dst += tlen; + len -= tlen; + + /* update index */ + rx_idx = HSDMA_NEXT_DESC(rx_idx); + } + + /* make sure desc and index all up to date */ + wmb(); + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx); + + return 0; +} + +static int gdma_next_desc(struct mtk_hsdma_chan *chan) +{ + struct virt_dma_desc *vdesc; + + vdesc = vchan_next_desc(&chan->vchan); + if (!vdesc) { + chan->desc = NULL; + return 0; + } + chan->desc = to_mtk_hsdma_desc(vdesc); + chan->next_sg = 0; + + return 1; +} + +static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma, + struct mtk_hsdma_chan *chan) +{ + struct mtk_hsdma_desc *desc; + int chan_issued; + + chan_issued = 0; + spin_lock_bh(&chan->vchan.lock); + desc = chan->desc; + if (likely(desc)) { + if (chan->next_sg == desc->num_sgs) { + list_del(&desc->vdesc.node); + vchan_cookie_complete(&desc->vdesc); + chan_issued = gdma_next_desc(chan); + } + } else + dev_dbg(hsdma->ddev.dev, "no desc to complete\n"); + + if (chan_issued) + set_bit(chan->id, &hsdma->chan_issued); + spin_unlock_bh(&chan->vchan.lock); +} + +static irqreturn_t mtk_hsdma_irq(int irq, void *devid) +{ + struct mtk_hsdam_engine *hsdma = devid; + u32 status; + + status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS); + if (unlikely(!status)) + return IRQ_NONE; + + if (likely(status & HSDMA_INT_RX_Q0)) + tasklet_schedule(&hsdma->task); + else + dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", + status); + /* clean intr bits */ + mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status); + + return IRQ_HANDLED; +} + +static void mtk_hsdma_issue_pending(struct dma_chan *c) +{ + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); + struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan); + + spin_lock_bh(&chan->vchan.lock); + if (vchan_issue_pending(&chan->vchan) && !chan->desc) { + if (gdma_next_desc(chan)) { + set_bit(chan->id, &hsdma->chan_issued); + tasklet_schedule(&hsdma->task); + } else + dev_dbg(hsdma->ddev.dev, "no desc to issue\n"); + } + spin_unlock_bh(&chan->vchan.lock); +} + +static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy( + struct dma_chan *c, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); + struct mtk_hsdma_desc *desc; + + if (len <= 0) + return NULL; + + desc = kzalloc(sizeof(struct mtk_hsdma_desc), GFP_ATOMIC); + if (!desc) { + dev_err(c->device->dev, "alloc memcpy decs error\n"); + return NULL; + } + + desc->sg[0].src_addr = src; + desc->sg[0].dst_addr = dest; + desc->sg[0].len = len; + + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); +} + +static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + return dma_cookie_status(c, cookie, state); +} + +static void mtk_hsdma_free_chan_resources(struct dma_chan *c) +{ + vchan_free_chan_resources(to_virt_chan(c)); +} + +static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc) +{ + kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc)); +} + +static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma) +{ + struct mtk_hsdma_chan *chan; + + if (test_and_clear_bit(0, &hsdma->chan_issued)) { + chan = &hsdma->chan[0]; + if (chan->desc) + mtk_hsdma_start_transfer(hsdma, chan); + else + dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n"); + } +} + +static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma) +{ + struct mtk_hsdma_chan *chan; + int next_idx, drx_idx, cnt; + + chan = &hsdma->chan[0]; + next_idx = HSDMA_NEXT_DESC(chan->rx_idx); + drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX); + + cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK; + if (!cnt) + return; + + chan->next_sg += cnt; + chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK; + + /* update rx crx */ + wmb(); + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx); + + mtk_hsdma_chan_done(hsdma, chan); +} + +static void mtk_hsdma_tasklet(unsigned long arg) +{ + struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg; + + mtk_hsdma_rx(hsdma); + mtk_hsdma_tx(hsdma); +} + +static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma, + struct mtk_hsdma_chan *chan) +{ + int i; + + chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev, + 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring), + &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO); + if (!chan->tx_ring) + goto no_mem; + + chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM]; + + /* init tx ring value */ + for (i = 0; i < HSDMA_DESCS_NUM; i++) + chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE; + + return 0; +no_mem: + return -ENOMEM; +} + +static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma, + struct mtk_hsdma_chan *chan) +{ + if (chan->tx_ring) { + dma_free_coherent(hsdma->ddev.dev, + 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring), + chan->tx_ring, chan->desc_addr); + chan->tx_ring = NULL; + chan->rx_ring = NULL; + } +} + +static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma) +{ + struct mtk_hsdma_chan *chan; + int ret; + u32 reg; + + /* init desc */ + chan = &hsdma->chan[0]; + ret = mtk_hsdam_alloc_desc(hsdma, chan); + if (ret) + return ret; + + /* tx */ + mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr); + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM); + /* rx */ + mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr + + (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM)); + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM); + /* reset */ + mtk_hsdma_reset_chan(hsdma, chan); + + /* enable rx intr */ + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0); + + /* enable dma */ + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT); + + /* hardware info */ + reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO); + dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n", + (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK, + (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK); + + hsdma_dump_reg(hsdma); + + return ret; +} + +static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma) +{ + struct mtk_hsdma_chan *chan; + + /* disable dma */ + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0); + + /* disable intr */ + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0); + + /* free desc */ + chan = &hsdma->chan[0]; + mtk_hsdam_free_desc(hsdma, chan); + + /* tx */ + mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0); + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0); + /* rx */ + mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0); + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0); + /* reset */ + mtk_hsdma_reset_chan(hsdma, chan); +} + +static const struct of_device_id mtk_hsdma_of_match[] = { + { .compatible = "mediatek,mt7621-hsdma" }, + { }, +}; + +static int mtk_hsdma_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + struct mtk_hsdma_chan *chan; + struct mtk_hsdam_engine *hsdma; + struct dma_device *dd; + struct resource *res; + int ret; + int irq; + void __iomem *base; + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + match = of_match_device(mtk_hsdma_of_match, &pdev->dev); + if (!match) + return -EINVAL; + + hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); + if (!hsdma) { + dev_err(&pdev->dev, "alloc dma device failed\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + hsdma->base = base + HSDMA_BASE_OFFSET; + tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get irq\n"); + return -EINVAL; + } + ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq, + 0, dev_name(&pdev->dev), hsdma); + if (ret) { + dev_err(&pdev->dev, "failed to request irq\n"); + return ret; + } + + device_reset(&pdev->dev); + + dd = &hsdma->ddev; + dma_cap_set(DMA_MEMCPY, dd->cap_mask); + dd->copy_align = HSDMA_ALIGN_SIZE; + dd->device_free_chan_resources = mtk_hsdma_free_chan_resources; + dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy; + dd->device_terminate_all = mtk_hsdma_terminate_all; + dd->device_tx_status = mtk_hsdma_tx_status; + dd->device_issue_pending = mtk_hsdma_issue_pending; + dd->dev = &pdev->dev; + dd->dev->dma_parms = &hsdma->dma_parms; + dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN); + INIT_LIST_HEAD(&dd->channels); + + chan = &hsdma->chan[0]; + chan->id = 0; + chan->vchan.desc_free = mtk_hsdma_desc_free; + vchan_init(&chan->vchan, dd); + + /* init hardware */ + ret = mtk_hsdma_init(hsdma); + if (ret) { + dev_err(&pdev->dev, "failed to alloc ring descs\n"); + return ret; + } + + ret = dma_async_device_register(dd); + if (ret) { + dev_err(&pdev->dev, "failed to register dma device\n"); + goto err_uninit_hsdma; + } + + ret = of_dma_controller_register(pdev->dev.of_node, + of_dma_xlate_by_chan_id, hsdma); + if (ret) { + dev_err(&pdev->dev, "failed to register of dma controller\n"); + goto err_unregister; + } + + platform_set_drvdata(pdev, hsdma); + + return 0; + +err_unregister: + dma_async_device_unregister(dd); +err_uninit_hsdma: + mtk_hsdma_uninit(hsdma); + return ret; +} + +static int mtk_hsdma_remove(struct platform_device *pdev) +{ + struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev); + + mtk_hsdma_uninit(hsdma); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&hsdma->ddev); + + return 0; +} + +static struct platform_driver mtk_hsdma_driver = { + .probe = mtk_hsdma_probe, + .remove = mtk_hsdma_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = mtk_hsdma_of_match, + }, +}; +module_platform_driver(mtk_hsdma_driver); + +MODULE_AUTHOR("Michael Lee "); +MODULE_DESCRIPTION("MTK HSDMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c deleted file mode 100644 index f60302b711813..0000000000000 --- a/drivers/staging/mt7621-dma/mtk-hsdma.c +++ /dev/null @@ -1,771 +0,0 @@ -/* - * Copyright (C) 2015, Michael Lee - * MTK HSDMA support - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "virt-dma.h" - -#define HSDMA_BASE_OFFSET 0x800 - -#define HSDMA_REG_TX_BASE 0x00 -#define HSDMA_REG_TX_CNT 0x04 -#define HSDMA_REG_TX_CTX 0x08 -#define HSDMA_REG_TX_DTX 0x0c -#define HSDMA_REG_RX_BASE 0x100 -#define HSDMA_REG_RX_CNT 0x104 -#define HSDMA_REG_RX_CRX 0x108 -#define HSDMA_REG_RX_DRX 0x10c -#define HSDMA_REG_INFO 0x200 -#define HSDMA_REG_GLO_CFG 0x204 -#define HSDMA_REG_RST_CFG 0x208 -#define HSDMA_REG_DELAY_INT 0x20c -#define HSDMA_REG_FREEQ_THRES 0x210 -#define HSDMA_REG_INT_STATUS 0x220 -#define HSDMA_REG_INT_MASK 0x228 -#define HSDMA_REG_SCH_Q01 0x280 -#define HSDMA_REG_SCH_Q23 0x284 - -#define HSDMA_DESCS_MAX 0xfff -#define HSDMA_DESCS_NUM 8 -#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1) -#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK) - -/* HSDMA_REG_INFO */ -#define HSDMA_INFO_INDEX_MASK 0xf -#define HSDMA_INFO_INDEX_SHIFT 24 -#define HSDMA_INFO_BASE_MASK 0xff -#define HSDMA_INFO_BASE_SHIFT 16 -#define HSDMA_INFO_RX_MASK 0xff -#define HSDMA_INFO_RX_SHIFT 8 -#define HSDMA_INFO_TX_MASK 0xff -#define HSDMA_INFO_TX_SHIFT 0 - -/* HSDMA_REG_GLO_CFG */ -#define HSDMA_GLO_TX_2B_OFFSET BIT(31) -#define HSDMA_GLO_CLK_GATE BIT(30) -#define HSDMA_GLO_BYTE_SWAP BIT(29) -#define HSDMA_GLO_MULTI_DMA BIT(10) -#define HSDMA_GLO_TWO_BUF BIT(9) -#define HSDMA_GLO_32B_DESC BIT(8) -#define HSDMA_GLO_BIG_ENDIAN BIT(7) -#define HSDMA_GLO_TX_DONE BIT(6) -#define HSDMA_GLO_BT_MASK 0x3 -#define HSDMA_GLO_BT_SHIFT 4 -#define HSDMA_GLO_RX_BUSY BIT(3) -#define HSDMA_GLO_RX_DMA BIT(2) -#define HSDMA_GLO_TX_BUSY BIT(1) -#define HSDMA_GLO_TX_DMA BIT(0) - -#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT) -#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT) -#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT) -#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT) - -#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \ - HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES) - -/* HSDMA_REG_RST_CFG */ -#define HSDMA_RST_RX_SHIFT 16 -#define HSDMA_RST_TX_SHIFT 0 - -/* HSDMA_REG_DELAY_INT */ -#define HSDMA_DELAY_INT_EN BIT(15) -#define HSDMA_DELAY_PEND_OFFSET 8 -#define HSDMA_DELAY_TIME_OFFSET 0 -#define HSDMA_DELAY_TX_OFFSET 16 -#define HSDMA_DELAY_RX_OFFSET 0 - -#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \ - ((x) << HSDMA_DELAY_PEND_OFFSET)) -#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \ - HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x)) - -/* HSDMA_REG_INT_STATUS */ -#define HSDMA_INT_DELAY_RX_COH BIT(31) -#define HSDMA_INT_DELAY_RX_INT BIT(30) -#define HSDMA_INT_DELAY_TX_COH BIT(29) -#define HSDMA_INT_DELAY_TX_INT BIT(28) -#define HSDMA_INT_RX_MASK 0x3 -#define HSDMA_INT_RX_SHIFT 16 -#define HSDMA_INT_RX_Q0 BIT(16) -#define HSDMA_INT_TX_MASK 0xf -#define HSDMA_INT_TX_SHIFT 0 -#define HSDMA_INT_TX_Q0 BIT(0) - -/* tx/rx dma desc flags */ -#define HSDMA_PLEN_MASK 0x3fff -#define HSDMA_DESC_DONE BIT(31) -#define HSDMA_DESC_LS0 BIT(30) -#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16) -#define HSDMA_DESC_TAG BIT(15) -#define HSDMA_DESC_LS1 BIT(14) -#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK) - -/* align 4 bytes */ -#define HSDMA_ALIGN_SIZE 3 -/* align size 128bytes */ -#define HSDMA_MAX_PLEN 0x3f80 - -struct hsdma_desc { - u32 addr0; - u32 flags; - u32 addr1; - u32 unused; -}; - -struct mtk_hsdma_sg { - dma_addr_t src_addr; - dma_addr_t dst_addr; - u32 len; -}; - -struct mtk_hsdma_desc { - struct virt_dma_desc vdesc; - unsigned int num_sgs; - struct mtk_hsdma_sg sg[1]; -}; - -struct mtk_hsdma_chan { - struct virt_dma_chan vchan; - unsigned int id; - dma_addr_t desc_addr; - int tx_idx; - int rx_idx; - struct hsdma_desc *tx_ring; - struct hsdma_desc *rx_ring; - struct mtk_hsdma_desc *desc; - unsigned int next_sg; -}; - -struct mtk_hsdam_engine { - struct dma_device ddev; - struct device_dma_parameters dma_parms; - void __iomem *base; - struct tasklet_struct task; - volatile unsigned long chan_issued; - - struct mtk_hsdma_chan chan[1]; -}; - -static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev( - struct mtk_hsdma_chan *chan) -{ - return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine, - ddev); -} - -static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c) -{ - return container_of(c, struct mtk_hsdma_chan, vchan.chan); -} - -static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc( - struct virt_dma_desc *vdesc) -{ - return container_of(vdesc, struct mtk_hsdma_desc, vdesc); -} - -static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg) -{ - return readl(hsdma->base + reg); -} - -static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma, - unsigned reg, u32 val) -{ - writel(val, hsdma->base + reg); -} - -static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma, - struct mtk_hsdma_chan *chan) -{ - chan->tx_idx = 0; - chan->rx_idx = HSDMA_DESCS_NUM - 1; - - mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx); - mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx); - - mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG, - 0x1 << (chan->id + HSDMA_RST_TX_SHIFT)); - mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG, - 0x1 << (chan->id + HSDMA_RST_RX_SHIFT)); -} - -static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma) -{ - dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \ - "tctx %08x, tdtx: %08x, rbase %08x, " \ - "rcnt %08x, rctx %08x, rdtx %08x\n", - mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE), - mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT), - mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX), - mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX), - mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE), - mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT), - mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX), - mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX)); - - dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \ - "intr_stat %08x, intr_mask %08x\n", - mtk_hsdma_read(hsdma, HSDMA_REG_INFO), - mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG), - mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT), - mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS), - mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK)); -} - -static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma, - struct mtk_hsdma_chan *chan) -{ - struct hsdma_desc *tx_desc; - struct hsdma_desc *rx_desc; - int i; - - dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n", - chan->tx_idx, chan->rx_idx); - - for (i = 0; i < HSDMA_DESCS_NUM; i++) { - tx_desc = &chan->tx_ring[i]; - rx_desc = &chan->rx_ring[i]; - - dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \ - "tx addr1: %08x, rx addr0 %08x, flags %08x\n", - i, tx_desc->addr0, tx_desc->flags, \ - tx_desc->addr1, rx_desc->addr0, rx_desc->flags); - } -} - -static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma, - struct mtk_hsdma_chan *chan) -{ - int i; - - /* disable dma */ - mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0); - - /* disable intr */ - mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0); - - /* init desc value */ - for (i = 0; i < HSDMA_DESCS_NUM; i++) { - chan->tx_ring[i].addr0 = 0; - chan->tx_ring[i].flags = HSDMA_DESC_LS0 | - HSDMA_DESC_DONE; - } - for (i = 0; i < HSDMA_DESCS_NUM; i++) { - chan->rx_ring[i].addr0 = 0; - chan->rx_ring[i].flags = 0; - } - - /* reset */ - mtk_hsdma_reset_chan(hsdma, chan); - - /* enable intr */ - mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0); - - /* enable dma */ - mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT); -} - -static int mtk_hsdma_terminate_all(struct dma_chan *c) -{ - struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); - struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan); - unsigned long timeout; - LIST_HEAD(head); - - spin_lock_bh(&chan->vchan.lock); - chan->desc = NULL; - clear_bit(chan->id, &hsdma->chan_issued); - vchan_get_all_descriptors(&chan->vchan, &head); - spin_unlock_bh(&chan->vchan.lock); - - vchan_dma_desc_free_list(&chan->vchan, &head); - - /* wait dma transfer complete */ - timeout = jiffies + msecs_to_jiffies(2000); - while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) & - (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) { - if (time_after_eq(jiffies, timeout)) { - hsdma_dump_desc(hsdma, chan); - mtk_hsdma_reset(hsdma, chan); - dev_err(hsdma->ddev.dev, "timeout, reset it\n"); - break; - } - cpu_relax(); - } - - return 0; -} - -static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma, - struct mtk_hsdma_chan *chan) -{ - dma_addr_t src, dst; - size_t len, tlen; - struct hsdma_desc *tx_desc, *rx_desc; - struct mtk_hsdma_sg *sg; - unsigned int i; - int rx_idx; - - sg = &chan->desc->sg[0]; - len = sg->len; - chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN); - - /* tx desc */ - src = sg->src_addr; - for (i = 0; i < chan->desc->num_sgs; i++) { - tx_desc = &chan->tx_ring[chan->tx_idx]; - - if (len > HSDMA_MAX_PLEN) - tlen = HSDMA_MAX_PLEN; - else - tlen = len; - - if (i & 0x1) { - tx_desc->addr1 = src; - tx_desc->flags |= HSDMA_DESC_PLEN1(tlen); - } else { - tx_desc->addr0 = src; - tx_desc->flags = HSDMA_DESC_PLEN0(tlen); - - /* update index */ - chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx); - } - - src += tlen; - len -= tlen; - } - if (i & 0x1) - tx_desc->flags |= HSDMA_DESC_LS0; - else - tx_desc->flags |= HSDMA_DESC_LS1; - - /* rx desc */ - rx_idx = HSDMA_NEXT_DESC(chan->rx_idx); - len = sg->len; - dst = sg->dst_addr; - for (i = 0; i < chan->desc->num_sgs; i++) { - rx_desc = &chan->rx_ring[rx_idx]; - if (len > HSDMA_MAX_PLEN) - tlen = HSDMA_MAX_PLEN; - else - tlen = len; - - rx_desc->addr0 = dst; - rx_desc->flags = HSDMA_DESC_PLEN0(tlen); - - dst += tlen; - len -= tlen; - - /* update index */ - rx_idx = HSDMA_NEXT_DESC(rx_idx); - } - - /* make sure desc and index all up to date */ - wmb(); - mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx); - - return 0; -} - -static int gdma_next_desc(struct mtk_hsdma_chan *chan) -{ - struct virt_dma_desc *vdesc; - - vdesc = vchan_next_desc(&chan->vchan); - if (!vdesc) { - chan->desc = NULL; - return 0; - } - chan->desc = to_mtk_hsdma_desc(vdesc); - chan->next_sg = 0; - - return 1; -} - -static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma, - struct mtk_hsdma_chan *chan) -{ - struct mtk_hsdma_desc *desc; - int chan_issued; - - chan_issued = 0; - spin_lock_bh(&chan->vchan.lock); - desc = chan->desc; - if (likely(desc)) { - if (chan->next_sg == desc->num_sgs) { - list_del(&desc->vdesc.node); - vchan_cookie_complete(&desc->vdesc); - chan_issued = gdma_next_desc(chan); - } - } else - dev_dbg(hsdma->ddev.dev, "no desc to complete\n"); - - if (chan_issued) - set_bit(chan->id, &hsdma->chan_issued); - spin_unlock_bh(&chan->vchan.lock); -} - -static irqreturn_t mtk_hsdma_irq(int irq, void *devid) -{ - struct mtk_hsdam_engine *hsdma = devid; - u32 status; - - status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS); - if (unlikely(!status)) - return IRQ_NONE; - - if (likely(status & HSDMA_INT_RX_Q0)) - tasklet_schedule(&hsdma->task); - else - dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", - status); - /* clean intr bits */ - mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status); - - return IRQ_HANDLED; -} - -static void mtk_hsdma_issue_pending(struct dma_chan *c) -{ - struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); - struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan); - - spin_lock_bh(&chan->vchan.lock); - if (vchan_issue_pending(&chan->vchan) && !chan->desc) { - if (gdma_next_desc(chan)) { - set_bit(chan->id, &hsdma->chan_issued); - tasklet_schedule(&hsdma->task); - } else - dev_dbg(hsdma->ddev.dev, "no desc to issue\n"); - } - spin_unlock_bh(&chan->vchan.lock); -} - -static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy( - struct dma_chan *c, dma_addr_t dest, dma_addr_t src, - size_t len, unsigned long flags) -{ - struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); - struct mtk_hsdma_desc *desc; - - if (len <= 0) - return NULL; - - desc = kzalloc(sizeof(struct mtk_hsdma_desc), GFP_ATOMIC); - if (!desc) { - dev_err(c->device->dev, "alloc memcpy decs error\n"); - return NULL; - } - - desc->sg[0].src_addr = src; - desc->sg[0].dst_addr = dest; - desc->sg[0].len = len; - - return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); -} - -static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c, - dma_cookie_t cookie, - struct dma_tx_state *state) -{ - return dma_cookie_status(c, cookie, state); -} - -static void mtk_hsdma_free_chan_resources(struct dma_chan *c) -{ - vchan_free_chan_resources(to_virt_chan(c)); -} - -static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc) -{ - kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc)); -} - -static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma) -{ - struct mtk_hsdma_chan *chan; - - if (test_and_clear_bit(0, &hsdma->chan_issued)) { - chan = &hsdma->chan[0]; - if (chan->desc) - mtk_hsdma_start_transfer(hsdma, chan); - else - dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n"); - } -} - -static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma) -{ - struct mtk_hsdma_chan *chan; - int next_idx, drx_idx, cnt; - - chan = &hsdma->chan[0]; - next_idx = HSDMA_NEXT_DESC(chan->rx_idx); - drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX); - - cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK; - if (!cnt) - return; - - chan->next_sg += cnt; - chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK; - - /* update rx crx */ - wmb(); - mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx); - - mtk_hsdma_chan_done(hsdma, chan); -} - -static void mtk_hsdma_tasklet(unsigned long arg) -{ - struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg; - - mtk_hsdma_rx(hsdma); - mtk_hsdma_tx(hsdma); -} - -static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma, - struct mtk_hsdma_chan *chan) -{ - int i; - - chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev, - 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring), - &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO); - if (!chan->tx_ring) - goto no_mem; - - chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM]; - - /* init tx ring value */ - for (i = 0; i < HSDMA_DESCS_NUM; i++) - chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE; - - return 0; -no_mem: - return -ENOMEM; -} - -static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma, - struct mtk_hsdma_chan *chan) -{ - if (chan->tx_ring) { - dma_free_coherent(hsdma->ddev.dev, - 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring), - chan->tx_ring, chan->desc_addr); - chan->tx_ring = NULL; - chan->rx_ring = NULL; - } -} - -static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma) -{ - struct mtk_hsdma_chan *chan; - int ret; - u32 reg; - - /* init desc */ - chan = &hsdma->chan[0]; - ret = mtk_hsdam_alloc_desc(hsdma, chan); - if (ret) - return ret; - - /* tx */ - mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr); - mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM); - /* rx */ - mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr + - (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM)); - mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM); - /* reset */ - mtk_hsdma_reset_chan(hsdma, chan); - - /* enable rx intr */ - mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0); - - /* enable dma */ - mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT); - - /* hardware info */ - reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO); - dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n", - (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK, - (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK); - - hsdma_dump_reg(hsdma); - - return ret; -} - -static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma) -{ - struct mtk_hsdma_chan *chan; - - /* disable dma */ - mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0); - - /* disable intr */ - mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0); - - /* free desc */ - chan = &hsdma->chan[0]; - mtk_hsdam_free_desc(hsdma, chan); - - /* tx */ - mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0); - mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0); - /* rx */ - mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0); - mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0); - /* reset */ - mtk_hsdma_reset_chan(hsdma, chan); -} - -static const struct of_device_id mtk_hsdma_of_match[] = { - { .compatible = "mediatek,mt7621-hsdma" }, - { }, -}; - -static int mtk_hsdma_probe(struct platform_device *pdev) -{ - const struct of_device_id *match; - struct mtk_hsdma_chan *chan; - struct mtk_hsdam_engine *hsdma; - struct dma_device *dd; - struct resource *res; - int ret; - int irq; - void __iomem *base; - - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (ret) - return ret; - - match = of_match_device(mtk_hsdma_of_match, &pdev->dev); - if (!match) - return -EINVAL; - - hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); - if (!hsdma) { - dev_err(&pdev->dev, "alloc dma device failed\n"); - return -EINVAL; - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); - hsdma->base = base + HSDMA_BASE_OFFSET; - tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma); - - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "failed to get irq\n"); - return -EINVAL; - } - ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq, - 0, dev_name(&pdev->dev), hsdma); - if (ret) { - dev_err(&pdev->dev, "failed to request irq\n"); - return ret; - } - - device_reset(&pdev->dev); - - dd = &hsdma->ddev; - dma_cap_set(DMA_MEMCPY, dd->cap_mask); - dd->copy_align = HSDMA_ALIGN_SIZE; - dd->device_free_chan_resources = mtk_hsdma_free_chan_resources; - dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy; - dd->device_terminate_all = mtk_hsdma_terminate_all; - dd->device_tx_status = mtk_hsdma_tx_status; - dd->device_issue_pending = mtk_hsdma_issue_pending; - dd->dev = &pdev->dev; - dd->dev->dma_parms = &hsdma->dma_parms; - dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN); - INIT_LIST_HEAD(&dd->channels); - - chan = &hsdma->chan[0]; - chan->id = 0; - chan->vchan.desc_free = mtk_hsdma_desc_free; - vchan_init(&chan->vchan, dd); - - /* init hardware */ - ret = mtk_hsdma_init(hsdma); - if (ret) { - dev_err(&pdev->dev, "failed to alloc ring descs\n"); - return ret; - } - - ret = dma_async_device_register(dd); - if (ret) { - dev_err(&pdev->dev, "failed to register dma device\n"); - goto err_uninit_hsdma; - } - - ret = of_dma_controller_register(pdev->dev.of_node, - of_dma_xlate_by_chan_id, hsdma); - if (ret) { - dev_err(&pdev->dev, "failed to register of dma controller\n"); - goto err_unregister; - } - - platform_set_drvdata(pdev, hsdma); - - return 0; - -err_unregister: - dma_async_device_unregister(dd); -err_uninit_hsdma: - mtk_hsdma_uninit(hsdma); - return ret; -} - -static int mtk_hsdma_remove(struct platform_device *pdev) -{ - struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev); - - mtk_hsdma_uninit(hsdma); - - of_dma_controller_free(pdev->dev.of_node); - dma_async_device_unregister(&hsdma->ddev); - - return 0; -} - -static struct platform_driver mtk_hsdma_driver = { - .probe = mtk_hsdma_probe, - .remove = mtk_hsdma_remove, - .driver = { - .name = "hsdma-mt7621", - .of_match_table = mtk_hsdma_of_match, - }, -}; -module_platform_driver(mtk_hsdma_driver); - -MODULE_AUTHOR("Michael Lee "); -MODULE_DESCRIPTION("MTK HSDMA driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 276c965afc114..5c6630255ac20 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -41,6 +41,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */ {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ + {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c index aa2f62acc994d..4dd6f3fb59060 100644 --- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c +++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c @@ -39,7 +39,7 @@ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM) static const struct ieee80211_regdomain rtw_regdom_rd = { - .n_reg_rules = 3, + .n_reg_rules = 2, .alpha2 = "99", .reg_rules = { RTW_2GHZ_CH01_11, diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 25eb3891e34b8..56bfb30b0ef58 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -89,8 +89,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb) if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)) length += sizeof(struct cpl_tx_data_iso); -#define MAX_IMM_TX_PKT_LEN 256 - return length <= MAX_IMM_TX_PKT_LEN; + return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN; } /* diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index c1592403222f5..239443ce52001 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -391,6 +391,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, + /* ELMO L-12F document camera */ + { USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG }, + /* Broadcom BCM92035DGROM BT dongle */ { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -415,6 +418,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* novation SoundControl XL */ + { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Huawei 4G LTE module */ { USB_DEVICE(0x12d1, 0x15bb), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, @@ -495,9 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = { /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, - /* novation SoundControl XL */ - { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, - { } /* terminating entry must be last */ }; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index a5c8329fd4625..56a35e0160392 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -1512,19 +1512,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, if (num_packets > max_hc_pkt_count) { num_packets = max_hc_pkt_count; chan->xfer_len = num_packets * chan->max_packet; + } else if (chan->ep_is_in) { + /* + * Always program an integral # of max packets + * for IN transfers. + * Note: This assumes that the input buffer is + * aligned and sized accordingly. + */ + chan->xfer_len = num_packets * chan->max_packet; } } else { /* Need 1 packet for transfer length of 0 */ num_packets = 1; } - if (chan->ep_is_in) - /* - * Always program an integral # of max packets for IN - * transfers - */ - chan->xfer_len = num_packets * chan->max_packet; - if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) /* diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index a052d39b4375e..d5f4ec1b73b15 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg, &short_read); if (urb->actual_length + xfer_length > urb->length) { - dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__); + dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__); xfer_length = urb->length - urb->actual_length; } @@ -1977,6 +1977,18 @@ error: qtd->error_count++; dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd, DWC2_HC_XFER_XACT_ERR); + /* + * We can get here after a completed transaction + * (urb->actual_length >= urb->length) which was not reported + * as completed. If that is the case, and we do not abort + * the transfer, a transfer of size 0 will be enqueued + * subsequently. If urb->actual_length is not DMA-aligned, + * the buffer will then point to an unaligned address, and + * the resulting behavior is undefined. Bail out in that + * situation. + */ + if (qtd->urb->actual_length >= qtd->urb->length) + qtd->error_count = 3; dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 6ab5c48f5d873..a0806dca3de9d 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -593,8 +593,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); if (desc->bInterval) { - params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); - dep->interval = 1 << (desc->bInterval - 1); + u8 bInterval_m1; + + /* + * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it + * must be set to 0 when the controller operates in full-speed. + */ + bInterval_m1 = min_t(u8, desc->bInterval - 1, 13); + if (dwc->gadget.speed == USB_SPEED_FULL) + bInterval_m1 = 0; + + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT && + dwc->gadget.speed == USB_SPEED_FULL) + dep->interval = desc->bInterval; + else + dep->interval = 1 << (desc->bInterval - 1); + + params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1); } return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index fb5ed97572e5f..0cb0c638fd131 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -89,7 +89,12 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) struct snd_uac_chip *uac = prm->uac; /* i/f shutting down */ - if (!prm->ep_enabled || req->status == -ESHUTDOWN) + if (!prm->ep_enabled) { + usb_ep_free_request(ep, req); + return; + } + + if (req->status == -ESHUTDOWN) return; /* @@ -351,8 +356,14 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep) for (i = 0; i < params->req_number; i++) { if (prm->ureq[i].req) { - usb_ep_dequeue(ep, prm->ureq[i].req); - usb_ep_free_request(ep, prm->ureq[i].req); + if (usb_ep_dequeue(ep, prm->ureq[i].req)) + usb_ep_free_request(ep, prm->ureq[i].req); + /* + * If usb_ep_dequeue() cannot successfully dequeue the + * request, the request will be freed by the completion + * callback. + */ + prm->ureq[i].req = NULL; } } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 28b4cacf7a787..9ce21b8caf35c 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2108,32 +2108,35 @@ int musb_queue_resume_work(struct musb *musb, { struct musb_pending_work *w; unsigned long flags; + bool is_suspended; int error; if (WARN_ON(!callback)) return -EINVAL; - if (pm_runtime_active(musb->controller)) - return callback(musb, data); + spin_lock_irqsave(&musb->list_lock, flags); + is_suspended = musb->is_runtime_suspended; + + if (is_suspended) { + w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); + if (!w) { + error = -ENOMEM; + goto out_unlock; + } - w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); - if (!w) - return -ENOMEM; + w->callback = callback; + w->data = data; - w->callback = callback; - w->data = data; - spin_lock_irqsave(&musb->list_lock, flags); - if (musb->is_runtime_suspended) { list_add_tail(&w->node, &musb->pending_list); error = 0; - } else { - dev_err(musb->controller, "could not add resume work %p\n", - callback); - devm_kfree(musb->controller, w); - error = -EINPROGRESS; } + +out_unlock: spin_unlock_irqrestore(&musb->list_lock, flags); + if (!is_suspended) + error = callback(musb, data); + return error; } EXPORT_SYMBOL_GPL(musb_queue_resume_work); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b2364e3794295..a5891cb2c72e3 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1370,8 +1370,9 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) index_value = get_ftdi_divisor(tty, port); value = (u16)index_value; index = (u16)(index_value >> 16); - if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) || - (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) { + if (priv->chip_type == FT2232C || priv->chip_type == FT2232H || + priv->chip_type == FT4232H || priv->chip_type == FT232H || + priv->chip_type == FTX) { /* Probably the BM type needs the MSB of the encoded fractional * divider also moved like for the chips above. Any infos? */ index = (u16)((index << 8) | priv->interface); diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 1f65bee521e69..1e3ee2bfbcd03 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -1250,8 +1250,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC); - if (!urb->transfer_buffer) + if (!urb->transfer_buffer) { + bytes_sent = -ENOMEM; goto exit; + } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 4a7bd26841af0..c78dfb7fd3948 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -1340,8 +1340,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC); - if (!urb->transfer_buffer) + if (!urb->transfer_buffer) { + bytes_sent = -ENOMEM; goto exit; + } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index cdf25a39fca79..aeaa3756f6ee8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1569,7 +1569,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, + { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */ + .driver_info = RSVD(3) | RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index f99558d006bf4..97c4319797d5c 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1303,6 +1303,7 @@ config FB_ATY select FB_CFB_IMAGEBLIT select FB_BACKLIGHT if FB_ATY_BACKLIGHT select FB_MACMODES if PPC + select FB_ATY_CT if SPARC64 && PCI help This driver supports graphics boards with the ATI Mach64 chips. Say Y if you have such a graphics board. @@ -1313,7 +1314,6 @@ config FB_ATY config FB_ATY_CT bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support" depends on PCI && FB_ATY - default y if SPARC64 && PCI help Say Y here to support use of ATI's 64-bit Rage boards (or other boards based on the Mach64 CT, VT, GT, and LT chipsets) as a diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c index 8023cf28657ab..c862dd38b6f5d 100644 --- a/drivers/watchdog/mei_wdt.c +++ b/drivers/watchdog/mei_wdt.c @@ -382,6 +382,7 @@ static int mei_wdt_register(struct mei_wdt *wdt) watchdog_set_drvdata(&wdt->wdd, wdt); watchdog_stop_on_reboot(&wdt->wdd); + watchdog_stop_on_unregister(&wdt->wdd); ret = watchdog_register_device(&wdt->wdd); if (ret) { diff --git a/fs/affs/namei.c b/fs/affs/namei.c index 41c5749f4db78..5400a876d73fb 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -460,8 +460,10 @@ affs_xrename(struct inode *old_dir, struct dentry *old_dentry, return -EIO; bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino); - if (!bh_new) + if (!bh_new) { + affs_brelse(bh_old); return -EIO; + } /* Remove old header from its parent directory. */ affs_lock_dir(old_dir); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index f36b2a386aae8..49db37b1f6e36 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -267,9 +267,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, ret = btrfs_inc_ref(trans, root, cow, 1); else ret = btrfs_inc_ref(trans, root, cow, 0); - - if (ret) + if (ret) { + btrfs_tree_unlock(cow); + free_extent_buffer(cow); + btrfs_abort_transaction(trans, ret); return ret; + } btrfs_mark_buffer_dirty(cow); *cow_ret = cow; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 652b0b16e93e2..6511cb71986c9 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -743,8 +743,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, while (num_entries) { e = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); - if (!e) + if (!e) { + ret = -ENOMEM; goto free_cache; + } ret = io_ctl_read_entry(&io_ctl, e, &type); if (ret) { @@ -753,6 +755,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, } if (!e->bytes) { + ret = -1; kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache; } @@ -773,6 +776,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, e->bitmap = kmem_cache_zalloc( btrfs_free_space_bitmap_cachep, GFP_NOFS); if (!e->bitmap) { + ret = -ENOMEM; kmem_cache_free( btrfs_free_space_cachep, e); goto free_cache; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index eedcb7bf50e9e..e6e4e6fb2adde 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1291,9 +1291,7 @@ static void __del_reloc_root(struct btrfs_root *root) RB_CLEAR_NODE(&node->rb_node); } spin_unlock(&rc->reloc_root_tree.lock); - if (!node) - return; - BUG_ON((struct btrfs_root *)node->data != root); + ASSERT(!node || (struct btrfs_root *)node->data == root); } spin_lock(&fs_info->trans_lock); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6285085195c15..632249ce61eba 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3882,6 +3882,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL); if (cifs_sb->prepath == NULL) return -ENOMEM; + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; } return 0; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 8f7e0ad5b5ef1..0dde6385a1258 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2315,11 +2315,10 @@ again: (frame - 1)->bh); if (err) goto journal_error; - if (restart) { - err = ext4_handle_dirty_dx_node(handle, dir, - frame->bh); + err = ext4_handle_dirty_dx_node(handle, dir, + frame->bh); + if (err) goto journal_error; - } } else { struct dx_root *dxroot; memcpy((char *) entries2, (char *) entries, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 59b5c0b032bb5..95330dfdbb1af 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -675,6 +675,10 @@ int f2fs_truncate(struct inode *inode) return -EIO; } + err = dquot_initialize(inode); + if (err) + return err; + /* we should check inline_data size */ if (!f2fs_may_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); @@ -756,7 +760,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr) if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) + if (!in_group_p(inode->i_gid) && + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) mode &= ~S_ISGID; set_acl_inode(inode, mode); } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 2fabeb0bb28fd..299f295fcb6c7 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -193,6 +193,10 @@ int f2fs_convert_inline_inode(struct inode *inode) if (!f2fs_has_inline_data(inode)) return 0; + err = dquot_initialize(inode); + if (err) + return err; + page = f2fs_grab_cache_page(inode->i_mapping, 0, false); if (!page) return -ENOMEM; diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 62edf8f5615fa..56dddc1f8dddc 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -283,7 +283,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; - int lvb_needs_unlock = 0; int error; if (gl->gl_lksb.sb_lkid == 0) { @@ -296,13 +295,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl) gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); - /* don't want to skip dlm_unlock writing the lvb when lock is ex */ - - if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) - lvb_needs_unlock = 1; + /* don't want to skip dlm_unlock writing the lvb when lock has one */ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && - !lvb_needs_unlock) { + !gl->gl_lksb.sb_lvbptr) { gfs2_glock_free(gl); return; } diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index 947ce22f5b3c3..55df4d80793ba 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -152,6 +152,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, inode->i_ino); + brelse(bh); return -EIO; } diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index cac468f04820e..558e7c51ce0d4 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -102,6 +102,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry, printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, dir->i_ino); + brelse(bh); return 0; } diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c index be7c8a6a57480..4fe64519870f1 100644 --- a/fs/jffs2/summary.c +++ b/fs/jffs2/summary.c @@ -783,6 +783,8 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n", je16_to_cpu(temp->u.nodetype)); jffs2_sum_disable_collecting(c->summary); + /* The above call removes the list, nothing more to do */ + goto bail_rwcompat; } else { BUG(); /* unknown node in summary information */ } @@ -794,6 +796,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock c->summary->sum_num--; } + bail_rwcompat: jffs2_sum_reset_collected(c->summary); diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 49263e220dbcf..687b07b9b4f62 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -1669,7 +1669,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen) } else if (rc == -ENOSPC) { /* search for next smaller log2 block */ l2nb = BLKSTOL2(nblocks) - 1; - nblocks = 1 << l2nb; + nblocks = 1LL << l2nb; } else { /* Trim any already allocated blocks */ jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n"); diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 0d4b5b9843b62..a2fb866ff76e8 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -654,6 +654,12 @@ static int ntfs_read_locked_inode(struct inode *vi) } a = ctx->attr; /* Get the standard information attribute value. */ + if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset) + + le32_to_cpu(a->data.resident.value_length) > + (u8 *)ctx->mrec + vol->mft_record_size) { + ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode."); + goto unm_err_out; + } si = (STANDARD_INFORMATION*)((u8*)a + le16_to_cpu(a->data.resident.value_offset)); diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 9b2ed62dd6385..19b0d358a0d6e 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -2154,7 +2154,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g o2hb_nego_timeout_handler, reg, NULL, ®->hr_handler_list); if (ret) - goto free; + goto remove_item; ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key, sizeof(struct o2hb_nego_msg), @@ -2173,6 +2173,12 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g unregister_handler: o2net_unregister_handler_list(®->hr_handler_list); +remove_item: + spin_lock(&o2hb_live_lock); + list_del(®->hr_all_item); + if (o2hb_global_heartbeat_active()) + clear_bit(reg->hr_region_num, o2hb_region_bitmap); + spin_unlock(&o2hb_live_lock); free: kfree(reg); return ERR_PTR(ret); diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 2197bf68f2786..904c2a60f5bab 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -250,7 +250,7 @@ static int pstore_compress(const void *in, void *out, { int ret; - if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION)) + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS)) return -EINVAL; ret = crypto_comp_compress(tfm, in, inlen, out, &outlen); @@ -650,7 +650,7 @@ static void decompress_record(struct pstore_record *record) int unzipped_len; char *decompressed; - if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed) + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed) return; /* Only PSTORE_TYPE_DMESG support compression. */ diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index d99710270a373..addfaae8decfd 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c @@ -165,19 +165,24 @@ static int v2_read_file_info(struct super_block *sb, int type) quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).", (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits, i_size_read(sb_dqopt(sb)->files[type])); - goto out; + goto out_free; } if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) { quota_error(sb, "Free block number too big (%u >= %u).", qinfo->dqi_free_blk, qinfo->dqi_blocks); - goto out; + goto out_free; } if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) { quota_error(sb, "Block with free entry too big (%u >= %u).", qinfo->dqi_free_entry, qinfo->dqi_blocks); - goto out; + goto out_free; } ret = 0; +out_free: + if (ret) { + kfree(info->dqi_priv); + info->dqi_priv = NULL; + } out: up_read(&dqopt->dqio_sem); return ret; diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 856c56ef01431..878b8e26c6c50 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -59,11 +59,11 @@ struct acpi_exception_info { #define AE_OK (acpi_status) 0x0000 -#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL) -#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML) -#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER) -#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES) -#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL) +#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL) +#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML) +#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER) +#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES) +#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL) /* * Environmental exceptions diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index e71c97c3c25ef..2d632a74cc5e9 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -701,8 +701,13 @@ /* DWARF 4 */ \ .debug_types 0 : { *(.debug_types) } \ /* DWARF 5 */ \ + .debug_addr 0 : { *(.debug_addr) } \ + .debug_line_str 0 : { *(.debug_line_str) } \ + .debug_loclists 0 : { *(.debug_loclists) } \ .debug_macro 0 : { *(.debug_macro) } \ - .debug_addr 0 : { *(.debug_addr) } + .debug_names 0 : { *(.debug_names) } \ + .debug_rnglists 0 : { *(.debug_rnglists) } \ + .debug_str_offsets 0 : { *(.debug_str_offsets) } /* Stabs debugging sections. */ #define STABS_DEBUG \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 745b2d0dcf78c..209ba8e7bd317 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -637,6 +637,7 @@ struct request_queue { struct delayed_work requeue_work; struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; int bypass_depth; atomic_t mq_freeze_depth; @@ -743,6 +744,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) +#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ff60ba537cf2c..cde6708644aed 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -315,6 +315,11 @@ struct dm_target { * on max_io_len boundary. */ bool split_discard_bios:1; + + /* + * Set if we need to limit the number of in-flight bios when swapping. + */ + bool limit_swap_bios:1; }; /* Each target can link one of these into the table */ diff --git a/include/linux/filter.h b/include/linux/filter.h index d61dc72ebb960..117f9380069a8 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -746,7 +746,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); #define __bpf_call_base_args \ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ - __bpf_call_base) + (void *)__bpf_call_base) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index a8f8889761378..0be0d68fbb009 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -3,6 +3,7 @@ #define _LINUX_ICMPV6_H #include +#include #include static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) @@ -13,21 +14,64 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #include #if IS_ENABLED(CONFIG_IPV6) -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr); + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm); +#if IS_BUILTIN(CONFIG_IPV6) +static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm) +{ + icmp6_send(skb, type, code, info, NULL, parm); +} +static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +#else +extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); +#endif + +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + __icmpv6_send(skb, type, code, info, IP6CB(skb)); +} + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); +#if IS_ENABLED(CONFIG_NF_NAT) +void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); +#else +static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct inet6_skb_parm parm = { 0 }; + __icmpv6_send(skb_in, type, code, info, &parm); +} +#endif + #else static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { +} +static inline void icmpv6_ndo_send(struct sk_buff *skb, + u8 type, u8 code, __u32 info) +{ } #endif diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 8415bf1a97762..0ebd180e7f91a 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -3,6 +3,7 @@ #define _IPV6_H #include +#include #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) #define ipv6_authlen(p) (((p)->hdrlen+2) << 2) @@ -83,7 +84,6 @@ struct ipv6_params { __s32 autoconf; }; extern struct ipv6_params ipv6_defaults; -#include #include #include diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 9e4e638fb5051..fe9f6f2dd811d 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -260,6 +260,11 @@ struct kimage { /* Information for loading purgatory */ struct purgatory_info purgatory_info; #endif + +#ifdef CONFIG_IMA_KEXEC + /* Virtual address of IMA measurement buffer for kexec syscall */ + void *ima_buffer; +#endif }; /* kexec interface functions */ diff --git a/include/linux/key.h b/include/linux/key.h index e58ee10f6e585..3683c6a6fca30 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -249,6 +249,7 @@ extern struct key *key_alloc(struct key_type *type, #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ #define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */ +#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 988d176472df7..d7d6d4eb17949 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -214,7 +214,8 @@ struct page_vma_mapped_walk { static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { - if (pvmw->pte) + /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ + if (pvmw->pte && !PageHuge(pvmw->page)) pte_unmap(pvmw->pte); if (pvmw->ptl) spin_unlock(pvmw->ptl); diff --git a/include/net/icmp.h b/include/net/icmp.h index 8665bf24e3b7a..ffe4a5d2bbe7e 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -47,6 +47,16 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt); } +#if IS_ENABLED(CONFIG_NF_NAT) +void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info); +#else +static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct ip_options opts = { 0 }; + __icmp_send(skb_in, type, code, info, &opts); +} +#endif + int icmp_rcv(struct sk_buff *skb); void icmp_err(struct sk_buff *skb, u32 info); int icmp_init(void); diff --git a/include/net/tcp.h b/include/net/tcp.h index afbe1d3991f21..4fe3ab47b4803 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1380,8 +1380,13 @@ static inline int tcp_full_space(const struct sock *sk) */ static inline bool tcp_rmem_pressure(const struct sock *sk) { - int rcvbuf = READ_ONCE(sk->sk_rcvbuf); - int threshold = rcvbuf - (rcvbuf >> 3); + int rcvbuf, threshold; + + if (tcp_under_memory_pressure(sk)) + return true; + + rcvbuf = READ_ONCE(sk->sk_rcvbuf); + threshold = rcvbuf - (rcvbuf >> 3); return atomic_read(&sk->sk_rmem_alloc) > threshold; } diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c index e6ef4401a1380..9b5eeff72fd37 100644 --- a/kernel/bpf/bpf_lru_list.c +++ b/kernel/bpf/bpf_lru_list.c @@ -505,13 +505,14 @@ struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash) static void bpf_common_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node) { + u8 node_type = READ_ONCE(node->type); unsigned long flags; - if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) || - WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE)) + if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) || + WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE)) return; - if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) { + if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) { struct bpf_lru_locallist *loc_l; loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 2118d8258b7c9..ad53b19734e9b 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -233,7 +233,7 @@ extern struct task_struct *kdb_curr_task(int); #define kdb_do_each_thread(g, p) do_each_thread(g, p) #define kdb_while_each_thread(g, p) while_each_thread(g, p) -#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) +#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL) extern void *debug_kmalloc(size_t size, gfp_t flags); extern void debug_kfree(void *); diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 7c8262635b292..01c6737218949 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -83,6 +83,7 @@ void static_key_slow_inc_cpuslocked(struct static_key *key) int v, v1; STATIC_KEY_CHECK_USE(key); + lockdep_assert_cpus_held(); /* * Careful if we get concurrent static_key_slow_inc() calls; @@ -128,6 +129,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); void static_key_enable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(key); + lockdep_assert_cpus_held(); if (atomic_read(&key->enabled) > 0) { WARN_ON_ONCE(atomic_read(&key->enabled) != 1); @@ -158,6 +160,7 @@ EXPORT_SYMBOL_GPL(static_key_enable); void static_key_disable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(key); + lockdep_assert_cpus_held(); if (atomic_read(&key->enabled) != 1) { WARN_ON_ONCE(atomic_read(&key->enabled) != 0); @@ -183,6 +186,10 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key, unsigned long rate_limit, struct delayed_work *work) { + int val; + + lockdep_assert_cpus_held(); + /* * The negative count check is valid even when a negative * key->enabled is in use by static_key_slow_inc(); a @@ -190,17 +197,20 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key, * returns is unbalanced, because all other static_key_slow_inc() * instances block while the update is in progress. */ - if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { - WARN(atomic_read(&key->enabled) < 0, - "jump label: negative count!\n"); + val = atomic_fetch_add_unless(&key->enabled, -1, 1); + if (val != 1) { + WARN(val < 0, "jump label: negative count!\n"); return; } - if (rate_limit) { - atomic_inc(&key->enabled); - schedule_delayed_work(work, rate_limit); - } else { - jump_label_update(key); + jump_label_lock(); + if (atomic_dec_and_test(&key->enabled)) { + if (rate_limit) { + atomic_inc(&key->enabled); + schedule_delayed_work(work, rate_limit); + } else { + jump_label_update(key); + } } jump_label_unlock(); } diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index c6a3b6851372c..2fbdb78d66c80 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -168,6 +168,11 @@ void kimage_file_post_load_cleanup(struct kimage *image) vfree(pi->sechdrs); pi->sechdrs = NULL; +#ifdef CONFIG_IMA_KEXEC + vfree(image->ima_buffer); + image->ima_buffer = NULL; +#endif /* CONFIG_IMA_KEXEC */ + /* See if architecture has anything to cleanup post load */ arch_kimage_file_post_load_cleanup(image); diff --git a/kernel/module.c b/kernel/module.c index 429769605871d..d5d01ece720c4 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2274,6 +2274,21 @@ static int verify_export_symbols(struct module *mod) return 0; } +static bool ignore_undef_symbol(Elf_Half emachine, const char *name) +{ + /* + * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as + * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. + * i386 has a similar problem but may not deserve a fix. + * + * If we ever have to ignore many symbols, consider refactoring the code to + * only warn if referenced by a relocation. + */ + if (emachine == EM_386 || emachine == EM_X86_64) + return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); + return false; +} + /* Change all symbols so that st_value encodes the pointer directly. */ static int simplify_symbols(struct module *mod, const struct load_info *info) { @@ -2319,8 +2334,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) break; } - /* Ok if weak. */ - if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) + /* Ok if weak or ignored. */ + if (!ksym && + (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || + ignore_undef_symbol(info->hdr->e_machine, name))) break; ret = PTR_ERR(ksym) ?: -ENOENT; diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index 82003d0d2e9f7..0dbc060331136 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -55,6 +55,8 @@ struct printk_safe_seq_buf { static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq); static DEFINE_PER_CPU(int, printk_context); +static DEFINE_RAW_SPINLOCK(safe_read_lock); + #ifdef CONFIG_PRINTK_NMI static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); #endif @@ -190,8 +192,6 @@ static void report_message_lost(struct printk_safe_seq_buf *s) */ static void __printk_safe_flush(struct irq_work *work) { - static raw_spinlock_t read_lock = - __RAW_SPIN_LOCK_INITIALIZER(read_lock); struct printk_safe_seq_buf *s = container_of(work, struct printk_safe_seq_buf, work); unsigned long flags; @@ -205,7 +205,7 @@ static void __printk_safe_flush(struct irq_work *work) * different CPUs. This is especially important when printing * a backtrace. */ - raw_spin_lock_irqsave(&read_lock, flags); + raw_spin_lock_irqsave(&safe_read_lock, flags); i = 0; more: @@ -242,7 +242,7 @@ more: out: report_message_lost(s); - raw_spin_unlock_irqrestore(&read_lock, flags); + raw_spin_unlock_irqrestore(&safe_read_lock, flags); } /** @@ -288,6 +288,14 @@ void printk_safe_flush_on_panic(void) raw_spin_lock_init(&logbuf_lock); } + if (raw_spin_is_locked(&safe_read_lock)) { + if (num_online_cpus() > 1) + return; + + debug_locks_off(); + raw_spin_lock_init(&safe_read_lock); + } + printk_safe_flush(); } diff --git a/kernel/seccomp.c b/kernel/seccomp.c index b9bd5a6b51386..a9dd2325bdda6 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -771,6 +771,8 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, const bool recheck_after_trace) { BUG(); + + return -1; } #endif diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index a3be42304485f..d5ce692319128 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -66,6 +66,12 @@ struct tp_probes { struct tracepoint_func probes[0]; }; +/* Called in removal of a func but failed to allocate a new tp_funcs */ +static void tp_stub_func(void) +{ + return; +} + static inline void *allocate_probes(int count) { struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func) @@ -144,6 +150,7 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, { struct tracepoint_func *old, *new; int nr_probes = 0; + int stub_funcs = 0; int pos = -1; if (WARN_ON(!tp_func->func)) @@ -160,14 +167,34 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, if (old[nr_probes].func == tp_func->func && old[nr_probes].data == tp_func->data) return ERR_PTR(-EEXIST); + if (old[nr_probes].func == tp_stub_func) + stub_funcs++; } } - /* + 2 : one for new probe, one for NULL func */ - new = allocate_probes(nr_probes + 2); + /* + 2 : one for new probe, one for NULL func - stub functions */ + new = allocate_probes(nr_probes + 2 - stub_funcs); if (new == NULL) return ERR_PTR(-ENOMEM); if (old) { - if (pos < 0) { + if (stub_funcs) { + /* Need to copy one at a time to remove stubs */ + int probes = 0; + + pos = -1; + for (nr_probes = 0; old[nr_probes].func; nr_probes++) { + if (old[nr_probes].func == tp_stub_func) + continue; + if (pos < 0 && old[nr_probes].prio < prio) + pos = probes++; + new[probes++] = old[nr_probes]; + } + nr_probes = probes; + if (pos < 0) + pos = probes; + else + nr_probes--; /* Account for insertion */ + + } else if (pos < 0) { pos = nr_probes; memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); } else { @@ -201,8 +228,9 @@ static void *func_remove(struct tracepoint_func **funcs, /* (N -> M), (N > 1, M >= 0) probes */ if (tp_func->func) { for (nr_probes = 0; old[nr_probes].func; nr_probes++) { - if (old[nr_probes].func == tp_func->func && - old[nr_probes].data == tp_func->data) + if ((old[nr_probes].func == tp_func->func && + old[nr_probes].data == tp_func->data) || + old[nr_probes].func == tp_stub_func) nr_del++; } } @@ -221,14 +249,32 @@ static void *func_remove(struct tracepoint_func **funcs, /* N -> M, (N > 1, M > 0) */ /* + 1 for NULL */ new = allocate_probes(nr_probes - nr_del + 1); - if (new == NULL) - return ERR_PTR(-ENOMEM); - for (i = 0; old[i].func; i++) - if (old[i].func != tp_func->func - || old[i].data != tp_func->data) - new[j++] = old[i]; - new[nr_probes - nr_del].func = NULL; - *funcs = new; + if (new) { + for (i = 0; old[i].func; i++) + if ((old[i].func != tp_func->func + || old[i].data != tp_func->data) + && old[i].func != tp_stub_func) + new[j++] = old[i]; + new[nr_probes - nr_del].func = NULL; + *funcs = new; + } else { + /* + * Failed to allocate, replace the old function + * with calls to tp_stub_func. + */ + for (i = 0; old[i].func; i++) + if (old[i].func == tp_func->func && + old[i].data == tp_func->data) { + old[i].func = tp_stub_func; + /* Set the prio to the next event. */ + if (old[i + 1].func) + old[i].prio = + old[i + 1].prio; + else + old[i].prio = -1; + } + *funcs = old; + } } debug_print_probes(*funcs); return old; @@ -284,10 +330,12 @@ static int tracepoint_remove_func(struct tracepoint *tp, tp_funcs = rcu_dereference_protected(tp->funcs, lockdep_is_held(&tracepoints_mutex)); old = func_remove(&tp_funcs, func); - if (IS_ERR(old)) { - WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); + if (WARN_ON_ONCE(IS_ERR(old))) return PTR_ERR(old); - } + + if (tp_funcs == old) + /* Failed allocating new tp_funcs, replaced func with stub */ + return 0; if (!tp_funcs) { /* Removed last function */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index df89aed023029..9eb5c25fabc9e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2648,8 +2648,10 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, return -ENOMEM; retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); - if (retval) + if (retval) { kobject_put(hstate_kobjs[hi]); + hstate_kobjs[hi] = NULL; + } return retval; } diff --git a/mm/memory.c b/mm/memory.c index eeae63bd95027..c1a05c2484b09 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1995,11 +1995,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { - pte_t *pte; + pte_t *pte, *mapped_pte; spinlock_t *ptl; int err = 0; - pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); @@ -2013,7 +2013,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(pte - 1, ptl); + pte_unmap_unlock(mapped_pte, ptl); return err; } @@ -4885,17 +4885,19 @@ long copy_huge_page_from_user(struct page *dst_page, void *page_kaddr; unsigned long i, rc = 0; unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; + struct page *subpage = dst_page; - for (i = 0; i < pages_per_huge_page; i++) { + for (i = 0; i < pages_per_huge_page; + i++, subpage = mem_map_next(subpage, dst_page, i)) { if (allow_pagefault) - page_kaddr = kmap(dst_page + i); + page_kaddr = kmap(subpage); else - page_kaddr = kmap_atomic(dst_page + i); + page_kaddr = kmap_atomic(subpage); rc = copy_from_user(page_kaddr, (const void __user *)(src + i * PAGE_SIZE), PAGE_SIZE); if (allow_pagefault) - kunmap(dst_page + i); + kunmap(subpage); else kunmap_atomic(page_kaddr); diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index be9640e9ca006..e09ea78356c36 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c @@ -388,9 +388,9 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, hdev = hci_dev_get(req->id); if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) { struct a2mp_amp_assoc_rsp rsp; - rsp.id = req->id; memset(&rsp, 0, sizeof(rsp)); + rsp.id = req->id; if (tmp) { rsp.status = A2MP_STATUS_COLLISION_OCCURED; @@ -519,6 +519,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); if (!assoc) { amp_ctrl_put(ctrl); + hci_dev_put(hdev); return -ENOMEM; } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index e03faca84919e..04d6f50798c98 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1316,8 +1316,10 @@ int hci_inquiry(void __user *arg) * cleared). If it is interrupted by a signal, return -EINTR. */ if (wait_on_bit(&hdev->flags, HCI_INQUIRY, - TASK_INTERRUPTIBLE)) - return -EINTR; + TASK_INTERRUPTIBLE)) { + err = -EINTR; + goto done; + } } /* for unlimited number of responses we will use buffer with diff --git a/net/core/filter.c b/net/core/filter.c index 557bd5cc8f94c..bbf5dbb95644d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4500,6 +4500,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, { struct net *net = dev_net(skb->dev); int rc = -EAFNOSUPPORT; + bool check_mtu = false; if (plen < sizeof(*params)) return -EINVAL; @@ -4507,22 +4508,28 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) return -EINVAL; + if (params->tot_len) + check_mtu = true; + switch (params->family) { #if IS_ENABLED(CONFIG_INET) case AF_INET: - rc = bpf_ipv4_fib_lookup(net, params, flags, false); + rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu); break; #endif #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - rc = bpf_ipv6_fib_lookup(net, params, flags, false); + rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu); break; #endif } - if (!rc) { + if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) { struct net_device *dev; + /* When tot_len isn't provided by user, check skb + * against MTU of FIB lookup resulting net_device + */ dev = dev_get_by_index_rcu(net, params->ifindex); if (!is_skb_forwardable(dev, skb)) rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 59c0b1a86e51b..b048125ea0994 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -755,6 +755,40 @@ out:; } EXPORT_SYMBOL(__icmp_send); +#if IS_ENABLED(CONFIG_NF_NAT) +#include +void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + struct sk_buff *cloned_skb = NULL; + struct ip_options opts = { 0 }; + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + __be32 orig_ip; + + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + __icmp_send(skb_in, type, code, info, &opts); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct iphdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct iphdr)))) + goto out; + + orig_ip = ip_hdr(skb_in)->saddr; + ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; + __icmp_send(skb_in, type, code, info, &opts); + ip_hdr(skb_in)->saddr = orig_ip; +out: + consume_skb(cloned_skb); +} +EXPORT_SYMBOL(icmp_ndo_send); +#endif static void icmp_socket_deliver(struct sk_buff *skb, u32 info) { diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 6d14cbe443f82..fbc8746371b6d 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -309,10 +309,9 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st } #if IS_ENABLED(CONFIG_IPV6_MIP6) -static void mip6_addr_swap(struct sk_buff *skb) +static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) { struct ipv6hdr *iph = ipv6_hdr(skb); - struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6_destopt_hao *hao; struct in6_addr tmp; int off; @@ -329,7 +328,7 @@ static void mip6_addr_swap(struct sk_buff *skb) } } #else -static inline void mip6_addr_swap(struct sk_buff *skb) {} +static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {} #endif static struct dst_entry *icmpv6_route_lookup(struct net *net, @@ -418,8 +417,9 @@ static int icmp6_iif(const struct sk_buff *skb) /* * Send an ICMP message in response to a packet in error */ -static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr) +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr, + const struct inet6_skb_parm *parm) { struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -512,7 +512,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type)) goto out_bh_enable; - mip6_addr_swap(skb); + mip6_addr_swap(skb, parm); memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; @@ -592,12 +592,13 @@ out: out_bh_enable: local_bh_enable(); } +EXPORT_SYMBOL(icmp6_send); /* Slightly more convenient version of icmp6_send. */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { - icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL); + icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb)); kfree_skb(skb); } @@ -654,10 +655,10 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, } if (type == ICMP_TIME_EXCEEDED) icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, - info, &temp_saddr); + info, &temp_saddr, IP6CB(skb2)); else icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, - info, &temp_saddr); + info, &temp_saddr, IP6CB(skb2)); if (rt) ip6_rt_put(rt); diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c index 02045494c24cc..9e3574880cb03 100644 --- a/net/ipv6/ip6_icmp.c +++ b/net/ipv6/ip6_icmp.c @@ -9,6 +9,8 @@ #if IS_ENABLED(CONFIG_IPV6) +#if !IS_BUILTIN(CONFIG_IPV6) + static ip6_icmp_send_t __rcu *ip6_icmp_send; int inet6_register_icmp_sender(ip6_icmp_send_t *fn) @@ -31,18 +33,52 @@ int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) } EXPORT_SYMBOL(inet6_unregister_icmp_sender); -void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct inet6_skb_parm *parm) { ip6_icmp_send_t *send; rcu_read_lock(); send = rcu_dereference(ip6_icmp_send); + if (send) + send(skb, type, code, info, NULL, parm); + rcu_read_unlock(); +} +EXPORT_SYMBOL(__icmpv6_send); +#endif + +#if IS_ENABLED(CONFIG_NF_NAT) +#include +void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) +{ + struct inet6_skb_parm parm = { 0 }; + struct sk_buff *cloned_skb = NULL; + enum ip_conntrack_info ctinfo; + struct in6_addr orig_ip; + struct nf_conn *ct; - if (!send) + ct = nf_ct_get(skb_in, &ctinfo); + if (!ct || !(ct->status & IPS_SRC_NAT)) { + __icmpv6_send(skb_in, type, code, info, &parm); + return; + } + + if (skb_shared(skb_in)) + skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); + + if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || + (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) > + skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, + skb_network_offset(skb_in) + sizeof(struct ipv6hdr)))) goto out; - send(skb, type, code, info, NULL); + + orig_ip = ipv6_hdr(skb_in)->saddr; + ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; + __icmpv6_send(skb_in, type, code, info, &parm); + ipv6_hdr(skb_in)->saddr = orig_ip; out: - rcu_read_unlock(); + consume_skb(cloned_skb); } -EXPORT_SYMBOL(icmpv6_send); +EXPORT_SYMBOL(icmpv6_ndo_send); +#endif #endif diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 6219b6b0c7e17..18158855d98c4 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -355,7 +355,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, */ tx_time = (device_constant + 10 * test_frame_len / rate); estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); - result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT); + result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT); return (u32)result; } diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c index 997af345ce374..cb425e216d461 100644 --- a/net/qrtr/tun.c +++ b/net/qrtr/tun.c @@ -31,6 +31,7 @@ static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb) static int qrtr_tun_open(struct inode *inode, struct file *filp) { struct qrtr_tun *tun; + int ret; tun = kzalloc(sizeof(*tun), GFP_KERNEL); if (!tun) @@ -43,7 +44,16 @@ static int qrtr_tun_open(struct inode *inode, struct file *filp) filp->private_data = tun; - return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO); + ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO); + if (ret) + goto out; + + return 0; + +out: + filp->private_data = NULL; + kfree(tun); + return ret; } static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 6cc9f6e2dd2b7..eae8b90864978 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -300,10 +300,10 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); } else { - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, - htonl(mtu)); + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); } dst_release(dst); diff --git a/scripts/Makefile b/scripts/Makefile index 61affa300d25d..3ce4981bb2b66 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -10,6 +10,9 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include +CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto) +CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null) + hostprogs-$(CONFIG_BUILD_BIN2C) += bin2c hostprogs-$(CONFIG_KALLSYMS) += kallsyms hostprogs-$(CONFIG_LOGO) += pnmtologo @@ -23,8 +26,10 @@ hostprogs-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include -HOSTLDLIBS_sign-file = -lcrypto -HOSTLDLIBS_extract-cert = -lcrypto +HOSTCFLAGS_sign-file.o = $(CRYPTO_CFLAGS) +HOSTLDLIBS_sign-file = $(CRYPTO_LIBS) +HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS) +HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS) always := $(hostprogs-y) $(hostprogs-m) diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index f599031260d51..7f6f96256b09f 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -263,7 +263,11 @@ if ($arch eq "x86_64") { # force flags for this arch $ld .= " -m shlelf_linux"; - $objcopy .= " -O elf32-sh-linux"; + if ($endian eq "big") { + $objcopy .= " -O elf32-shbig-linux"; + } else { + $objcopy .= " -O elf32-sh-linux"; + } } elsif ($arch eq "powerpc") { my $ldemulation; diff --git a/security/commoncap.c b/security/commoncap.c index a1dee0ab345a2..1bc40e78fa7ff 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -506,7 +506,8 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size) __u32 magic, nsmagic; struct inode *inode = d_backing_inode(dentry); struct user_namespace *task_ns = current_user_ns(), - *fs_ns = inode->i_sb->s_user_ns; + *fs_ns = inode->i_sb->s_user_ns, + *ancestor; kuid_t rootid; size_t newsize; @@ -529,6 +530,15 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size) if (nsrootid == -1) return -EINVAL; + /* + * Do not allow allow adding a v3 filesystem capability xattr + * if the rootid field is ambiguous. + */ + for (ancestor = task_ns->parent; ancestor; ancestor = ancestor->parent) { + if (from_kuid(ancestor, rootid) == 0) + return -EINVAL; + } + newsize = sizeof(struct vfs_ns_cap_data); nscap = kmalloc(newsize, GFP_ATOMIC); if (!nscap) diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c index 16bd18747cfa0..6a10d4d8b6e1d 100644 --- a/security/integrity/ima/ima_kexec.c +++ b/security/integrity/ima/ima_kexec.c @@ -124,6 +124,7 @@ void ima_add_kexec_buffer(struct kimage *image) ret = kexec_add_buffer(&kbuf); if (ret) { pr_err("Error passing over kexec measurement buffer.\n"); + vfree(kexec_buffer); return; } @@ -133,6 +134,8 @@ void ima_add_kexec_buffer(struct kimage *image) return; } + image->ima_buffer = kexec_buffer; + pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n", kbuf.mem); } diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c index 073ddc9bce5ba..3e7a1523663b8 100644 --- a/security/integrity/ima/ima_mok.c +++ b/security/integrity/ima/ima_mok.c @@ -43,13 +43,12 @@ __init int ima_mok_init(void) (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_WRITE | KEY_USR_SEARCH, - KEY_ALLOC_NOT_IN_QUOTA, + KEY_ALLOC_NOT_IN_QUOTA | + KEY_ALLOC_SET_KEEP, restriction, NULL); if (IS_ERR(ima_blacklist_keyring)) panic("Can't allocate IMA blacklist keyring."); - - set_bit(KEY_FLAG_KEEP, &ima_blacklist_keyring->flags); return 0; } device_initcall(ima_mok_init); diff --git a/security/keys/key.c b/security/keys/key.c index d5fa8c4fc5544..d3ebc0533e3ad 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -305,6 +305,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, key->flags |= 1 << KEY_FLAG_BUILTIN; if (flags & KEY_ALLOC_UID_KEYRING) key->flags |= 1 << KEY_FLAG_UID_KEYRING; + if (flags & KEY_ALLOC_SET_KEEP) + key->flags |= 1 << KEY_FLAG_KEEP; #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 09545c42977e8..9179a5bbf998b 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -796,7 +796,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay, case Opt_migratable: if (*args[0].from == '0') pay->migratable = 0; - else + else if (*args[0].from != '1') return -EINVAL; break; case Opt_pcrlock: diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 9f0b05bcbd867..70571092db8d4 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1880,6 +1880,7 @@ enum { ALC889_FIXUP_FRONT_HP_NO_PRESENCE, ALC889_FIXUP_VAIO_TT, ALC888_FIXUP_EEE1601, + ALC886_FIXUP_EAPD, ALC882_FIXUP_EAPD, ALC883_FIXUP_EAPD, ALC883_FIXUP_ACER_EAPD, @@ -2213,6 +2214,15 @@ static const struct hda_fixup alc882_fixups[] = { { } } }, + [ALC886_FIXUP_EAPD] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* change to EAPD mode */ + { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x0068 }, + { } + } + }, [ALC882_FIXUP_EAPD] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -2485,6 +2495,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), + SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c index d7f05b384f1fb..1902689c5ea2c 100644 --- a/sound/soc/codecs/cpcap.c +++ b/sound/soc/codecs/cpcap.c @@ -1263,12 +1263,12 @@ static int cpcap_voice_hw_params(struct snd_pcm_substream *substream, if (direction == SNDRV_PCM_STREAM_CAPTURE) { mask = 0x0000; - mask |= CPCAP_BIT_MIC1_RX_TIMESLOT0; - mask |= CPCAP_BIT_MIC1_RX_TIMESLOT1; - mask |= CPCAP_BIT_MIC1_RX_TIMESLOT2; - mask |= CPCAP_BIT_MIC2_TIMESLOT0; - mask |= CPCAP_BIT_MIC2_TIMESLOT1; - mask |= CPCAP_BIT_MIC2_TIMESLOT2; + mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0); + mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT1); + mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT2); + mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT0); + mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT1); + mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT2); val = 0x0000; if (channels >= 2) val = BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0); diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c index a5c8736fad777..04f89b751304c 100644 --- a/sound/soc/codecs/cs42l56.c +++ b/sound/soc/codecs/cs42l56.c @@ -1260,6 +1260,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, dev_err(&i2c_client->dev, "CS42L56 Device ID (%X). Expected %X\n", devid, CS42L56_DEVID); + ret = -EINVAL; goto err_enable; } alpha_rev = reg & CS42L56_AREV_MASK; @@ -1317,7 +1318,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, ret = devm_snd_soc_register_component(&i2c_client->dev, &soc_component_dev_cs42l56, &cs42l56_dai, 1); if (ret < 0) - return ret; + goto err_enable; return 0; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index e4d2fcc89c306..4c9ab611aa3ef 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -1853,7 +1853,7 @@ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs) { struct snd_pcm *pcm = subs->stream->pcm; struct snd_pcm_substream *s = pcm->streams[subs->direction].substream; - struct device *dev = subs->dev->bus->controller; + struct device *dev = subs->dev->bus->sysdev; if (!snd_usb_use_vmalloc) snd_pcm_lib_preallocate_pages(s, SNDRV_DMA_TYPE_DEV_SG, diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c index 0e2d00d69e6e2..66e46bc8d6f1d 100644 --- a/tools/perf/tests/sample-parsing.c +++ b/tools/perf/tests/sample-parsing.c @@ -173,7 +173,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format) .data = {1, 211, 212, 213}, }; u64 regs[64]; - const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL}; + const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 }; const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL}; struct perf_sample sample = { .ip = 101, diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 9c22729e2ad60..538c935b40b8b 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -1660,6 +1660,8 @@ int machine__resolve(struct machine *machine, struct addr_location *al, } al->sym = map__find_symbol(al->map, al->addr); + } else if (symbol_conf.dso_list) { + al->filtered |= (1 << HIST_FILTER__DSO); } if (symbol_conf.sym_list && diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 6522b6513895c..e2f038f84dbc1 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1596,6 +1596,9 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder) break; case INTEL_PT_CYC: + intel_pt_calc_cyc_timestamp(decoder); + break; + case INTEL_PT_VMCS: case INTEL_PT_MNT: case INTEL_PT_PAD: