diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index ef6d6c57105ef..96834d103a09e 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -29,6 +29,8 @@ Description: (RW) Reports the current configuration of the QAT device. services * asym;sym: identical to sym;asym * dc: the device is configured for running compression services + * dcc: identical to dc but enables the dc chaining feature, + hash then compression. If this is not required chose dc * sym: the device is configured for running symmetric crypto services * asym: the device is configured for running asymmetric crypto diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index b6cfb51cb0b46..e715bfc09879a 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -46,12 +46,22 @@ The possible values in this file are: The processor is not vulnerable - * 'Vulnerable: no microcode': +* 'Vulnerable': + + The processor is vulnerable and no mitigations have been applied. + + * 'Vulnerable: No microcode': The processor is vulnerable, no microcode extending IBPB functionality to address the vulnerability has been applied. - * 'Mitigation: microcode': + * 'Vulnerable: Safe RET, no microcode': + + The "Safe RET" mitigation (see below) has been applied to protect the + kernel, but the IBPB-extending microcode has not been applied. User + space tasks may still be vulnerable. + + * 'Vulnerable: Microcode, no safe RET': Extended IBPB functionality microcode patch has been applied. It does not address User->Kernel and Guest->Host transitions protection but it @@ -72,11 +82,11 @@ The possible values in this file are: (spec_rstack_overflow=microcode) - * 'Mitigation: safe RET': + * 'Mitigation: Safe RET': - Software-only mitigation. It complements the extended IBPB microcode - patch functionality by addressing User->Kernel and Guest->Host - transitions protection. + Combined microcode/software mitigation. It complements the + extended IBPB microcode patch functionality by addressing + User->Kernel and Guest->Host transitions protection. Selected by default or by spec_rstack_overflow=safe-ret @@ -129,7 +139,7 @@ an indrect branch prediction barrier after having applied the required microcode patch for one's system. This mitigation comes also at a performance cost. -Mitigation: safe RET +Mitigation: Safe RET -------------------- The mitigation works by ensuring all RET instructions speculate to diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt index 294693a8906cf..10540aa7afa1a 100644 --- a/Documentation/devicetree/bindings/mfd/mt6397.txt +++ b/Documentation/devicetree/bindings/mfd/mt6397.txt @@ -22,8 +22,9 @@ compatible: "mediatek,mt6323" for PMIC MT6323 "mediatek,mt6331" for PMIC MT6331 and MT6332 "mediatek,mt6357" for PMIC MT6357 - "mediatek,mt6358" for PMIC MT6358 and MT6366 + "mediatek,mt6358" for PMIC MT6358 "mediatek,mt6359" for PMIC MT6359 + "mediatek,mt6366", "mediatek,mt6358" for PMIC MT6366 "mediatek,mt6397" for PMIC MT6397 Optional subnodes: @@ -40,6 +41,7 @@ Optional subnodes: - compatible: "mediatek,mt6323-regulator" see ../regulator/mt6323-regulator.txt - compatible: "mediatek,mt6358-regulator" + - compatible: "mediatek,mt6366-regulator", "mediatek-mt6358-regulator" see ../regulator/mt6358-regulator.txt - compatible: "mediatek,mt6397-regulator" see ../regulator/mt6397-regulator.txt diff --git a/Makefile b/Makefile index f8c14da7c7bc7..03c52108af626 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 6 -SUBLEVEL = 1 +SUBLEVEL = 2 EXTRAVERSION = NAME = Hurr durr I'ma ninja sloth diff --git a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi index 42bcbf10957c4..9f9084269ef58 100644 --- a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi @@ -181,5 +181,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts index e04d2e5ea51aa..72e960c888ac8 100644 --- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts +++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts @@ -85,5 +85,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts index a399800139d9c..750e17482371c 100644 --- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts +++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts @@ -88,5 +88,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts index fad3473810a2e..2bdbc7d18b0eb 100644 --- a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts +++ b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts @@ -122,5 +122,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts index 5b2b7b8b3b123..b226bef3369cf 100644 --- a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts +++ b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts @@ -145,6 +145,14 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts index d0a26b643b82f..192b8db5a89c3 100644 --- a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts +++ b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts @@ -145,5 +145,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts index 9f21d6d6d35b7..0198b5f9e4a75 100644 --- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts +++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts @@ -81,5 +81,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts index 2561072917021..73ff1694a4a0b 100644 --- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts +++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts @@ -148,5 +148,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts index 707c561703ed8..55fc9f44cbc7f 100644 --- a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts +++ b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts @@ -227,6 +227,14 @@ label = "wan"; }; + port@5 { + status = "disabled"; + }; + + port@7 { + status = "disabled"; + }; + port@8 { label = "cpu"; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts index c914569ddd5ec..e6d26987865d0 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts @@ -144,6 +144,14 @@ label = "wan"; }; + port@5 { + status = "disabled"; + }; + + port@7 { + status = "disabled"; + }; + port@8 { label = "cpu"; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts index f050acbea0b20..3124dfd01b944 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts @@ -192,6 +192,14 @@ label = "wan"; }; + port@5 { + status = "disabled"; + }; + + port@7 { + status = "disabled"; + }; + port@8 { label = "cpu"; phy-mode = "rgmii"; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts index e8991d4e248ce..e374062eb5b76 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts @@ -107,5 +107,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts index afc635c8cdebb..badafa024d24c 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts @@ -120,5 +120,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts index 7cfa4607ef311..cf95af9db1e66 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts @@ -107,5 +107,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts index d55e10095eae7..992c19e1cfa17 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts @@ -75,5 +75,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts index ccf031c0e276d..4d0ba315a2049 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts @@ -147,5 +147,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts index e28f7a3501179..83c429afc2974 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts @@ -158,5 +158,13 @@ port@5 { label = "cpu"; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts index 03ad614e6b721..0bf5106f7012c 100644 --- a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts +++ b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts @@ -124,6 +124,14 @@ full-duplex; }; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts index 26c12bfb0bdd4..25eeacf6a2484 100644 --- a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts +++ b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts @@ -185,6 +185,14 @@ full-duplex; }; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm953012er.dts b/arch/arm/boot/dts/broadcom/bcm953012er.dts index 4fe3b36533767..d939ec9f4a9e7 100644 --- a/arch/arm/boot/dts/broadcom/bcm953012er.dts +++ b/arch/arm/boot/dts/broadcom/bcm953012er.dts @@ -84,6 +84,14 @@ label = "cpu"; ethernet = <&gmac0>; }; + + port@7 { + status = "disabled"; + }; + + port@8 { + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts index 884d99297d4cf..f516e0426bb9e 100644 --- a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts +++ b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts @@ -45,11 +45,11 @@ event-hall-sensor { label = "Hall Effect Sensor"; - gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>; - interrupts = <&tlmm 110 IRQ_TYPE_EDGE_FALLING>; + gpios = <&tlmm 110 GPIO_ACTIVE_LOW>; linux,input-type = ; linux,code = ; debounce-interval = <15>; + linux,can-disable; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi index fc4f52f9e9f7d..63e21aa236429 100644 --- a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi @@ -47,14 +47,12 @@ }; }; - regulators { - vsdcc_fixed: vsdcc-regulator { - compatible = "regulator-fixed"; - regulator-name = "SDCC Power"; - regulator-min-microvolt = <2700000>; - regulator-max-microvolt = <2700000>; - regulator-always-on; - }; + vsdcc_fixed: vsdcc-regulator { + compatible = "regulator-fixed"; + regulator-name = "SDCC Power"; + regulator-min-microvolt = <2700000>; + regulator-max-microvolt = <2700000>; + regulator-always-on; }; soc: soc { diff --git a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts index c66de9dd12dfc..6a83923aa4612 100644 --- a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts +++ b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts @@ -239,7 +239,7 @@ }; keyboard_pins: keyboard { - pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_02"; + pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_2"; bias-pull-up; }; diff --git a/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi index 65480a9f5cc4e..842f2b17c4a81 100644 --- a/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi +++ b/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi @@ -376,7 +376,6 @@ }; }; - ltdc_pins_a: ltdc-0 { pins { pinmux = , /* LCD_B0 */ diff --git a/arch/arm/boot/dts/ti/omap/am3517-evm.dts b/arch/arm/boot/dts/ti/omap/am3517-evm.dts index af9df15274bed..866f68c5b504d 100644 --- a/arch/arm/boot/dts/ti/omap/am3517-evm.dts +++ b/arch/arm/boot/dts/ti/omap/am3517-evm.dts @@ -271,13 +271,6 @@ >; }; - leds_pins: leds-pins { - pinctrl-single,pins = < - OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */ - OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */ - >; - }; - mmc1_pins: mmc1-pins { pinctrl-single,pins = < OMAP3_CORE1_IOPAD(0x2144, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */ @@ -355,3 +348,12 @@ >; }; }; + +&omap3_pmx_wkup { + leds_pins: leds-pins { + pinctrl-single,pins = < + OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */ + OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */ + >; + }; +}; diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h index 72529f5e2bed9..a41b503b7dcde 100644 --- a/arch/arm/include/asm/arm_pmuv3.h +++ b/arch/arm/include/asm/arm_pmuv3.h @@ -23,6 +23,8 @@ #define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0) #define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1) #define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2) +#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4) +#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5) #define PMMIR __ACCESS_CP15(c9, 0, c14, 6) #define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7) @@ -150,21 +152,6 @@ static inline u64 read_pmccntr(void) return read_sysreg(PMCCNTR); } -static inline void write_pmxevcntr(u32 val) -{ - write_sysreg(val, PMXEVCNTR); -} - -static inline u32 read_pmxevcntr(void) -{ - return read_sysreg(PMXEVCNTR); -} - -static inline void write_pmxevtyper(u32 val) -{ - write_sysreg(val, PMXEVTYPER); -} - static inline void write_pmcntenset(u32 val) { write_sysreg(val, PMCNTENSET); @@ -205,16 +192,6 @@ static inline void write_pmuserenr(u32 val) write_sysreg(val, PMUSERENR); } -static inline u32 read_pmceid0(void) -{ - return read_sysreg(PMCEID0); -} - -static inline u32 read_pmceid1(void) -{ - return read_sysreg(PMCEID1); -} - static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u32 clr) {} static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) @@ -231,6 +208,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {} /* PMU Version in DFR Register */ #define ARMV8_PMU_DFR_VER_NI 0 +#define ARMV8_PMU_DFR_VER_V3P1 0x4 #define ARMV8_PMU_DFR_VER_V3P4 0x5 #define ARMV8_PMU_DFR_VER_V3P5 0x6 #define ARMV8_PMU_DFR_VER_IMP_DEF 0xF @@ -251,4 +229,24 @@ static inline bool is_pmuv3p5(int pmuver) return pmuver >= ARMV8_PMU_DFR_VER_V3P5; } +static inline u64 read_pmceid0(void) +{ + u64 val = read_sysreg(PMCEID0); + + if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1) + val |= (u64)read_sysreg(PMCEID2) << 32; + + return val; +} + +static inline u64 read_pmceid1(void) +{ + u64 val = read_sysreg(PMCEID1); + + if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1) + val |= (u64)read_sysreg(PMCEID3) << 32; + + return val; +} + #endif diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index c6aded1b069cf..e2a1916013e75 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -12,6 +12,9 @@ extern phys_addr_t arm_dma_zone_size; \ arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \ (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) + +extern phys_addr_t arm_dma_limit; +#define ARCH_LOW_ADDRESS_LIMIT arm_dma_limit #endif #ifdef CONFIG_ISA_DMA_API diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index d71ab61430b26..de75ae4d5ab41 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -17,6 +17,7 @@ ENTRY(__memset) ENTRY(mmioset) WEAK(memset) UNWIND( .fnstart ) + and r1, r1, #255 @ cast to unsigned char ands r3, r0, #3 @ 1 unaligned? mov ip, r0 @ preserve r0 as return value bne 6f @ 1 diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index c392e18f1e431..9afdc4c4a5dc1 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu) BUG_ON(err); per_cpu(xen_vcpu, cpu) = vcpup; - if (!xen_kernel_unmapped_at_usr()) - xen_setup_runstate_info(cpu); - after_register_vcpu_info: enable_percpu_irq(xen_events_irq, 0); return 0; @@ -523,9 +520,6 @@ static int __init xen_guest_init(void) return -EINVAL; } - if (!xen_kernel_unmapped_at_usr()) - xen_time_setup_guest(); - if (xen_initial_domain()) pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); @@ -535,7 +529,13 @@ static int __init xen_guest_init(void) } early_initcall(xen_guest_init); -static int __init xen_pm_init(void) +static int xen_starting_runstate_cpu(unsigned int cpu) +{ + xen_setup_runstate_info(cpu); + return 0; +} + +static int __init xen_late_init(void) { if (!xen_domain()) return -ENODEV; @@ -548,9 +548,16 @@ static int __init xen_pm_init(void) do_settimeofday64(&ts); } - return 0; + if (xen_kernel_unmapped_at_usr()) + return 0; + + xen_time_setup_guest(); + + return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING, + "arm/xen_runstate:starting", + xen_starting_runstate_cpu, NULL); } -late_initcall(xen_pm_init); +late_initcall(xen_late_init); /* empty stubs */ diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index 236fe44f779df..738024baaa578 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -399,6 +399,7 @@ "pll8k", "pll11k", "clkext3"; dmas = <&sdma2 24 25 0x80000000>; dma-names = "rx"; + #sound-dai-cells = <0>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi index aa38dd6dc9ba5..1bb1d0c1bae4d 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@ -371,6 +371,7 @@ "pll8k", "pll11k", "clkext3"; dmas = <&sdma2 24 25 0x80000000>; dma-names = "rx"; + #sound-dai-cells = <0>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts index 28db9349ed62c..267ceffc02d84 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts @@ -284,7 +284,6 @@ usb_hub_2_x: hub@1 { compatible = "usbbda,5411"; reg = <1>; - reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>; vdd-supply = <®_usb_hub>; peer-hub = <&usb_hub_3_x>; }; @@ -293,7 +292,6 @@ usb_hub_3_x: hub@2 { compatible = "usbbda,411"; reg = <2>; - reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>; vdd-supply = <®_usb_hub>; peer-hub = <&usb_hub_2_x>; }; @@ -443,7 +441,6 @@ pinctrl_usb1: usb1grp { fsl,pins = < MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR 0x10 - MX8MP_IOMUXC_SAI2_TXC__GPIO4_IO25 0x19 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi index 7764b4146e0ab..2bbdacb1313f9 100644 --- a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi @@ -8,5 +8,5 @@ }; &jpegenc { - compatible = "nxp,imx8qm-jpgdec", "nxp,imx8qxp-jpgenc"; + compatible = "nxp,imx8qm-jpgenc", "nxp,imx8qxp-jpgenc"; }; diff --git a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi index 32cfb3e2efc3a..47d45ff3d6f57 100644 --- a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi +++ b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi @@ -120,7 +120,7 @@ "mpp59", "mpp60", "mpp61"; marvell,function = "sdio"; }; - cp0_spi0_pins: cp0-spi-pins-0 { + cp0_spi1_pins: cp0-spi-pins-1 { marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16"; marvell,function = "spi1"; }; @@ -170,7 +170,7 @@ &cp0_spi1 { pinctrl-names = "default"; - pinctrl-0 = <&cp0_spi0_pins>; + pinctrl-0 = <&cp0_spi1_pins>; reg = <0x700680 0x50>, /* control */ <0x2000000 0x1000000>; /* CS0 */ status = "okay"; diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi index c7de1ea0d470a..6eb6a175de38d 100644 --- a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi +++ b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi @@ -307,7 +307,7 @@ &cp0_spi1 { status = "disabled"; pinctrl-names = "default"; - pinctrl-0 = <&cp0_spi0_pins>; + pinctrl-0 = <&cp0_spi1_pins>; reg = <0x700680 0x50>; flash@0 { @@ -371,7 +371,7 @@ "mpp59", "mpp60", "mpp61"; marvell,function = "sdio"; }; - cp0_spi0_pins: cp0-spi-pins-0 { + cp0_spi1_pins: cp0-spi-pins-1 { marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16"; marvell,function = "spi1"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi index 5f592f1d81e2e..fe08e131b7b9e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi @@ -28,7 +28,7 @@ flash@0 { compatible = "jedec,spi-nor"; reg = <0>; - spi-max-frequency = <136000000>; + spi-max-frequency = <102000000>; spi-tx-bus-width = <4>; spi-rx-bus-width = <4>; }; @@ -42,7 +42,7 @@ mmc@3400000 { status = "okay"; bus-width = <4>; - cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>; disable-wp; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi index 95524e5bce826..ac69eacf8a6ba 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi @@ -43,12 +43,12 @@ , , , - , - , - , - , - , - ; + , + , + , + , + , + ; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts index 4f5541e9be0e9..dabe9f42a63ad 100644 --- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts +++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts @@ -172,6 +172,9 @@ pd-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>; avdd-supply = <&pm8916_l6>; + a2vdd-supply = <&pm8916_l6>; + dvdd-supply = <&pm8916_l6>; + pvdd-supply = <&pm8916_l6>; v1p2-supply = <&pm8916_l6>; v3p3-supply = <&pm8916_l17>; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 33fb65d731046..3c934363368c3 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1813,7 +1813,7 @@ #size-cells = <1>; #iommu-cells = <1>; compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1"; - ranges = <0 0x01e20000 0x40000>; + ranges = <0 0x01e20000 0x20000>; reg = <0x01ef0000 0x3000>; clocks = <&gcc GCC_SMMU_CFG_CLK>, <&gcc GCC_APSS_TCU_CLK>; diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi index 6e24f0f2374fe..5a6b1942cfaa5 100644 --- a/arch/arm64/boot/dts/qcom/msm8939.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi @@ -1447,7 +1447,7 @@ apps_iommu: iommu@1ef0000 { compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1"; reg = <0x01ef0000 0x3000>; - ranges = <0 0x01e20000 0x40000>; + ranges = <0 0x01e20000 0x20000>; clocks = <&gcc GCC_SMMU_CFG_CLK>, <&gcc GCC_APSS_TCU_CLK>; clock-names = "iface", "bus"; diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi index f9f5afbcc52bb..4c5be22b47fee 100644 --- a/arch/arm64/boot/dts/qcom/msm8976.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi @@ -379,7 +379,7 @@ smp2p-modem { compatible = "qcom,smp2p"; interrupts = ; - qcom,ipc = <&apcs 8 13>; + qcom,ipc = <&apcs 8 14>; qcom,local-pid = <0>; qcom,remote-pid = <1>; @@ -402,7 +402,7 @@ smp2p-wcnss { compatible = "qcom,smp2p"; interrupts = ; - qcom,ipc = <&apcs 8 17>; + qcom,ipc = <&apcs 8 18>; qcom,local-pid = <0>; qcom,remote-pid = <4>; @@ -428,9 +428,9 @@ #address-cells = <1>; #size-cells = <0>; - qcom,ipc-1 = <&apcs 8 12>; + qcom,ipc-1 = <&apcs 8 13>; qcom,ipc-2 = <&apcs 8 9>; - qcom,ipc-3 = <&apcs 8 18>; + qcom,ipc-3 = <&apcs 8 19>; apps_smsm: apps@0 { reg = <0>; diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts index fcca1ba94da69..5fe5de9ceef99 100644 --- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts +++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts @@ -109,11 +109,6 @@ qcom,client-id = <1>; }; - audio_mem: audio@cb400000 { - reg = <0 0xcb000000 0 0x400000>; - no-mem; - }; - qseecom_mem: qseecom@cb400000 { reg = <0 0xcb400000 0 0x1c00000>; no-mem; diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts index eadba066972e8..0f7c591878962 100644 --- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts +++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts @@ -13,7 +13,7 @@ compatible = "qcom,qrb2210-rb1", "qcom,qrb2210", "qcom,qcm2290"; aliases { - serial0 = &uart0; + serial0 = &uart4; sdhc1 = &sdhc_1; sdhc2 = &sdhc_2; }; @@ -150,15 +150,15 @@ pm2250_s3: s3 { /* 0.4V-1.6625V -> 1.3V (Power tree requirements) */ - regulator-min-microvolts = <1350000>; - regulator-max-microvolts = <1350000>; + regulator-min-microvolt = <1352000>; + regulator-max-microvolt = <1352000>; regulator-boot-on; }; pm2250_s4: s4 { /* 1.2V-2.35V -> 2.05V (Power tree requirements) */ - regulator-min-microvolts = <2072000>; - regulator-max-microvolts = <2072000>; + regulator-min-microvolt = <2072000>; + regulator-max-microvolt = <2072000>; regulator-boot-on; }; @@ -166,47 +166,47 @@ pm2250_l2: l2 { /* LPDDR4X VDD2 */ - regulator-min-microvolts = <1136000>; - regulator-max-microvolts = <1136000>; + regulator-min-microvolt = <1136000>; + regulator-max-microvolt = <1136000>; regulator-always-on; regulator-boot-on; }; pm2250_l3: l3 { /* LPDDR4X VDDQ */ - regulator-min-microvolts = <616000>; - regulator-max-microvolts = <616000>; + regulator-min-microvolt = <616000>; + regulator-max-microvolt = <616000>; regulator-always-on; regulator-boot-on; }; pm2250_l4: l4 { - /* max = 3.05V -> max = just below 3V (SDHCI2) */ - regulator-min-microvolts = <1648000>; - regulator-max-microvolts = <2992000>; + /* max = 3.05V -> max = 2.7 to disable 3V signaling (SDHCI2) */ + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2700000>; regulator-allow-set-load; }; pm2250_l5: l5 { /* CSI/DSI */ - regulator-min-microvolts = <1232000>; - regulator-max-microvolts = <1232000>; + regulator-min-microvolt = <1232000>; + regulator-max-microvolt = <1232000>; regulator-allow-set-load; regulator-boot-on; }; pm2250_l6: l6 { /* DRAM PLL */ - regulator-min-microvolts = <928000>; - regulator-max-microvolts = <928000>; + regulator-min-microvolt = <928000>; + regulator-max-microvolt = <928000>; regulator-always-on; regulator-boot-on; }; pm2250_l7: l7 { /* Wi-Fi CX/MX */ - regulator-min-microvolts = <664000>; - regulator-max-microvolts = <664000>; + regulator-min-microvolt = <664000>; + regulator-max-microvolt = <664000>; }; /* @@ -216,37 +216,37 @@ pm2250_l10: l10 { /* Wi-Fi RFA */ - regulator-min-microvolts = <1300000>; - regulator-max-microvolts = <1300000>; + regulator-min-microvolt = <1304000>; + regulator-max-microvolt = <1304000>; }; pm2250_l11: l11 { /* GPS RF1 */ - regulator-min-microvolts = <1000000>; - regulator-max-microvolts = <1000000>; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; regulator-boot-on; }; pm2250_l12: l12 { /* USB PHYs */ - regulator-min-microvolts = <928000>; - regulator-max-microvolts = <928000>; + regulator-min-microvolt = <928000>; + regulator-max-microvolt = <928000>; regulator-allow-set-load; regulator-boot-on; }; pm2250_l13: l13 { /* USB/QFPROM/PLLs */ - regulator-min-microvolts = <1800000>; - regulator-max-microvolts = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; regulator-allow-set-load; regulator-boot-on; }; pm2250_l14: l14 { /* SDHCI1 VQMMC */ - regulator-min-microvolts = <1800000>; - regulator-max-microvolts = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; regulator-allow-set-load; /* Broken hardware, never turn it off! */ regulator-always-on; @@ -254,8 +254,8 @@ pm2250_l15: l15 { /* WCD/DSI/BT VDDIO */ - regulator-min-microvolts = <1800000>; - regulator-max-microvolts = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; regulator-allow-set-load; regulator-always-on; regulator-boot-on; @@ -263,47 +263,47 @@ pm2250_l16: l16 { /* GPS RF2 */ - regulator-min-microvolts = <1800000>; - regulator-max-microvolts = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; regulator-boot-on; }; pm2250_l17: l17 { - regulator-min-microvolts = <3000000>; - regulator-max-microvolts = <3000000>; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; }; pm2250_l18: l18 { /* VDD_PXn */ - regulator-min-microvolts = <1800000>; - regulator-max-microvolts = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; }; pm2250_l19: l19 { /* VDD_PXn */ - regulator-min-microvolts = <1800000>; - regulator-max-microvolts = <1800000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; }; pm2250_l20: l20 { /* SDHCI1 VMMC */ - regulator-min-microvolts = <2856000>; - regulator-max-microvolts = <2856000>; + regulator-min-microvolt = <2400000>; + regulator-max-microvolt = <3600000>; regulator-allow-set-load; }; pm2250_l21: l21 { /* SDHCI2 VMMC */ - regulator-min-microvolts = <2960000>; - regulator-max-microvolts = <3300000>; + regulator-min-microvolt = <2960000>; + regulator-max-microvolt = <3300000>; regulator-allow-set-load; regulator-boot-on; }; pm2250_l22: l22 { /* Wi-Fi */ - regulator-min-microvolts = <3312000>; - regulator-max-microvolts = <3312000>; + regulator-min-microvolt = <3312000>; + regulator-max-microvolt = <3312000>; }; }; }; @@ -357,7 +357,7 @@ }; /* UART connected to the Micro-USB port via a FTDI chip */ -&uart0 { +&uart4 { compatible = "qcom,geni-debug-uart"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index 925428a5f6aea..91bb58c6b1a61 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -649,18 +649,6 @@ }; }; - eud_typec: connector { - compatible = "usb-c-connector"; - - ports { - port@0 { - con_eud: endpoint { - remote-endpoint = <&eud_con>; - }; - }; - }; - }; - memory@80000000 { device_type = "memory"; /* We expect the bootloader to fill in the size */ @@ -869,7 +857,8 @@ clocks = <&rpmhcc RPMH_CXO_CLK>, <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>, <0>, <&pcie1_lane>, - <0>, <0>, <0>, <0>; + <0>, <0>, <0>, + <&usb_1_ssphy>; clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk", "pcie_0_pipe_clk", "pcie_1_pipe_clk", "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk", @@ -3624,6 +3613,8 @@ <0 0x88e2000 0 0x1000>; interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + ports { #address-cells = <1>; #size-cells = <0>; @@ -3634,13 +3625,6 @@ remote-endpoint = <&usb2_role_switch>; }; }; - - port@1 { - reg = <1>; - eud_con: endpoint { - remote-endpoint = <&con_eud>; - }; - }; }; }; @@ -5363,6 +5347,14 @@ reg = <0 0x18591000 0 0x1000>, <0 0x18592000 0 0x1000>, <0 0x18593000 0 0x1000>; + + interrupts = , + , + ; + interrupt-names = "dcvsh-irq-0", + "dcvsh-irq-1", + "dcvsh-irq-2"; + clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>; clock-names = "xo", "alternate"; #freq-domain-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 84cd2e39266fe..ba2043d67370a 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -1328,7 +1328,8 @@ compatible = "qcom,sdm670-pdc", "qcom,pdc"; reg = <0 0x0b220000 0 0x30000>; qcom,pdc-ranges = <0 480 40>, <41 521 7>, <49 529 4>, - <54 534 24>, <79 559 30>, <115 630 7>; + <54 534 24>, <79 559 15>, <94 609 15>, + <115 630 7>; #interrupt-cells = <2>; interrupt-parent = <&intc>; interrupt-controller; diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi index f86e7acdfd99f..0ab5e8f53ac9f 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi @@ -143,16 +143,20 @@ }; }; +&cpufreq_hw { + /delete-property/ interrupts-extended; /* reference to lmh_cluster[01] */ +}; + &psci { - /delete-node/ cpu0; - /delete-node/ cpu1; - /delete-node/ cpu2; - /delete-node/ cpu3; - /delete-node/ cpu4; - /delete-node/ cpu5; - /delete-node/ cpu6; - /delete-node/ cpu7; - /delete-node/ cpu-cluster0; + /delete-node/ power-domain-cpu0; + /delete-node/ power-domain-cpu1; + /delete-node/ power-domain-cpu2; + /delete-node/ power-domain-cpu3; + /delete-node/ power-domain-cpu4; + /delete-node/ power-domain-cpu5; + /delete-node/ power-domain-cpu6; + /delete-node/ power-domain-cpu7; + /delete-node/ power-domain-cluster; }; &cpus { @@ -275,6 +279,14 @@ &CLUSTER_SLEEP_0>; }; +&lmh_cluster0 { + status = "disabled"; +}; + +&lmh_cluster1 { + status = "disabled"; +}; + /* * Reserved memory changes * @@ -338,6 +350,8 @@ &apps_rsc { + /delete-property/ power-domains; + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts index b3c27a5247429..1516113391edc 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts @@ -716,6 +716,8 @@ vdd-1.8-xo-supply = <&vreg_l7a_1p8>; vdd-1.3-rfa-supply = <&vreg_l17a_1p3>; vdd-3.3-ch0-supply = <&vreg_l25a_3p3>; + + qcom,snoc-host-cap-8bit-quirk; }; /* PINCTRL - additions to nodes defined in sdm845.dtsi */ diff --git a/arch/arm64/boot/dts/qcom/sdx75-idp.dts b/arch/arm64/boot/dts/qcom/sdx75-idp.dts index 10d15871f2c48..a14e0650c4a8a 100644 --- a/arch/arm64/boot/dts/qcom/sdx75-idp.dts +++ b/arch/arm64/boot/dts/qcom/sdx75-idp.dts @@ -44,7 +44,7 @@ }; &apps_rsc { - pmx75-rpmh-regulators { + regulators-0 { compatible = "qcom,pmx75-rpmh-regulators"; qcom,pmic-id = "b"; diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi index d7c1a40617c64..197f8fed19a29 100644 --- a/arch/arm64/boot/dts/qcom/sm6125.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi @@ -1208,7 +1208,7 @@ apps_smmu: iommu@c600000 { compatible = "qcom,sm6125-smmu-500", "qcom,smmu-500", "arm,mmu-500"; - reg = <0xc600000 0x80000>; + reg = <0x0c600000 0x80000>; interrupts = , , , diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 06c53000bb74d..19c6003dca153 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -1893,8 +1893,12 @@ ranges; clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>, <&gcc GCC_PCIE_0_CFG_AHB_CLK>, + <&gcc GCC_PCIE_0_CLKREF_CLK>, <&gcc GCC_PCIE0_PHY_REFGEN_CLK>; - clock-names = "aux", "cfg_ahb", "refgen"; + clock-names = "aux", + "cfg_ahb", + "ref", + "refgen"; resets = <&gcc GCC_PCIE_0_PHY_BCR>; reset-names = "phy"; @@ -1991,8 +1995,12 @@ ranges; clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>, <&gcc GCC_PCIE_1_CFG_AHB_CLK>, + <&gcc GCC_PCIE_1_CLKREF_CLK>, <&gcc GCC_PCIE1_PHY_REFGEN_CLK>; - clock-names = "aux", "cfg_ahb", "refgen"; + clock-names = "aux", + "cfg_ahb", + "ref", + "refgen"; resets = <&gcc GCC_PCIE_1_PHY_BCR>; reset-names = "phy"; diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index 00604bf7724f4..a94e069da83d5 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -2964,7 +2964,7 @@ }; qup_uart18_default: qup-uart18-default-state { - pins = "gpio58", "gpio59"; + pins = "gpio68", "gpio69"; function = "qup18"; drive-strength = <2>; bias-disable; diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile index e7b8e2e7f083d..8bd5acc6d6835 100644 --- a/arch/arm64/boot/dts/ti/Makefile +++ b/arch/arm64/boot/dts/ti/Makefile @@ -9,6 +9,8 @@ # alphabetically. # Boards with AM62x SoC +k3-am625-sk-hdmi-audio-dtbs := k3-am625-sk.dtb k3-am62x-sk-hdmi-audio.dtbo +k3-am62-lp-sk-hdmi-audio-dtbs := k3-am62-lp-sk.dtb k3-am62x-sk-hdmi-audio.dtbo dtb-$(CONFIG_ARCH_K3) += k3-am625-beagleplay.dtb dtb-$(CONFIG_ARCH_K3) += k3-am625-phyboard-lyra-rdk.dtb dtb-$(CONFIG_ARCH_K3) += k3-am625-sk.dtb @@ -19,7 +21,8 @@ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dahlia.dtb dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dev.dtb dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-yavia.dtb dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk.dtb -dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-hdmi-audio.dtbo +dtb-$(CONFIG_ARCH_K3) += k3-am625-sk-hdmi-audio.dtb +dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk-hdmi-audio.dtb # Boards with AM62Ax SoC dtb-$(CONFIG_ARCH_K3) += k3-am62a7-sk.dtb @@ -66,6 +69,8 @@ dtb-$(CONFIG_ARCH_K3) += k3-j721e-sk.dtb dtb-$(CONFIG_ARCH_K3) += k3-am68-sk-base-board.dtb dtb-$(CONFIG_ARCH_K3) += k3-j721s2-common-proc-board.dtb dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm-gesi-exp-board.dtbo +k3-j721s2-evm-dtbs := k3-j721s2-common-proc-board.dtb k3-j721s2-evm-gesi-exp-board.dtbo +dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm.dtb # Boards with J784s4 SoC dtb-$(CONFIG_ARCH_K3) += k3-am69-sk.dtb diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi index 40992e7e4c308..5db52f2372534 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi @@ -1061,6 +1061,7 @@ vddc-supply = <®_1v2_dsi>; vddmipi-supply = <®_1v2_dsi>; vddio-supply = <®_1v8_dsi>; + status = "disabled"; dsi_bridge_ports: ports { #address-cells = <1>; diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts index 7cfdf562b53bf..2de74428a8bde 100644 --- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts +++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts @@ -58,7 +58,7 @@ ramoops: ramoops@9ca00000 { compatible = "ramoops"; - reg = <0x00 0x9c700000 0x00 0x00100000>; + reg = <0x00 0x9ca00000 0x00 0x00100000>; record-size = <0x8000>; console-size = <0x8000>; ftrace-size = <0x00>; diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts index cff283c75f8ec..99f2878de4c67 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts @@ -250,7 +250,7 @@ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&main_i2c1_pins_default>; - clock-frequency = <400000>; + clock-frequency = <100000>; exp1: gpio@22 { compatible = "ti,tca6424"; diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h index 18dc2fb3d7b7b..c27404fa4418a 100644 --- a/arch/arm64/include/asm/arm_pmuv3.h +++ b/arch/arm64/include/asm/arm_pmuv3.h @@ -46,12 +46,12 @@ static inline u32 read_pmuver(void) ID_AA64DFR0_EL1_PMUVer_SHIFT); } -static inline void write_pmcr(u32 val) +static inline void write_pmcr(u64 val) { write_sysreg(val, pmcr_el0); } -static inline u32 read_pmcr(void) +static inline u64 read_pmcr(void) { return read_sysreg(pmcr_el0); } @@ -71,21 +71,6 @@ static inline u64 read_pmccntr(void) return read_sysreg(pmccntr_el0); } -static inline void write_pmxevcntr(u32 val) -{ - write_sysreg(val, pmxevcntr_el0); -} - -static inline u32 read_pmxevcntr(void) -{ - return read_sysreg(pmxevcntr_el0); -} - -static inline void write_pmxevtyper(u32 val) -{ - write_sysreg(val, pmxevtyper_el0); -} - static inline void write_pmcntenset(u32 val) { write_sysreg(val, pmcntenset_el0); @@ -106,7 +91,7 @@ static inline void write_pmintenclr(u32 val) write_sysreg(val, pmintenclr_el1); } -static inline void write_pmccfiltr(u32 val) +static inline void write_pmccfiltr(u64 val) { write_sysreg(val, pmccfiltr_el0); } @@ -126,12 +111,12 @@ static inline void write_pmuserenr(u32 val) write_sysreg(val, pmuserenr_el0); } -static inline u32 read_pmceid0(void) +static inline u64 read_pmceid0(void) { return read_sysreg(pmceid0_el0); } -static inline u32 read_pmceid1(void) +static inline u64 read_pmceid1(void) { return read_sysreg(pmceid1_el0); } diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 74d00feb62f03..7c7493cb571f9 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -86,7 +86,8 @@ #define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define ARM_CPU_PART_CORTEX_A78C 0xD4B -#define APM_CPU_PART_POTENZA 0x000 +#define APM_CPU_PART_XGENE 0x000 +#define APM_CPU_VAR_POTENZA 0x00 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 95f6945c44325..a1710e5fa72b6 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -874,7 +874,7 @@ u32 __attribute_const__ kvm_target_cpu(void) break; case ARM_CPU_IMP_APM: switch (part_number) { - case APM_CPU_PART_POTENZA: + case APM_CPU_PART_XGENE: return KVM_ARM_TARGET_XGENE_POTENZA; } break; diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h index 6fe46e7545566..0b4e5f8ce3e8a 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-40x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h @@ -69,9 +69,6 @@ #define _PTE_NONE_MASK 0 -/* Until my rework is finished, 40x still needs atomic PTE updates */ -#define PTE_ATOMIC_UPDATES 1 - #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) #define _PAGE_BASE (_PAGE_BASE_NC) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b68898ac07e19..392404688cec3 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -2258,6 +2258,22 @@ unsigned long __get_wchan(struct task_struct *p) return ret; } +static bool empty_user_regs(struct pt_regs *regs, struct task_struct *tsk) +{ + unsigned long stack_page; + + // A non-empty pt_regs should never have a zero MSR or TRAP value. + if (regs->msr || regs->trap) + return false; + + // Check it sits at the very base of the stack + stack_page = (unsigned long)task_stack_page(tsk); + if ((unsigned long)(regs + 1) != stack_page + THREAD_SIZE) + return false; + + return true; +} + static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; void __no_sanitize_address show_stack(struct task_struct *tsk, @@ -2322,9 +2338,13 @@ void __no_sanitize_address show_stack(struct task_struct *tsk, lr = regs->link; printk("%s--- interrupt: %lx at %pS\n", loglvl, regs->trap, (void *)regs->nip); - __show_regs(regs); - printk("%s--- interrupt: %lx\n", - loglvl, regs->trap); + + // Detect the case of an empty pt_regs at the very base + // of the stack and suppress showing it in full. + if (!empty_user_regs(regs, tsk)) { + __show_regs(regs); + printk("%s--- interrupt: %lx\n", loglvl, regs->trap); + } firstframe = 1; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 64ff37721fd06..fe3f720c9cd61 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1164,6 +1164,7 @@ void emulate_single_step(struct pt_regs *regs) __single_step_exception(regs); } +#ifdef CONFIG_PPC_FPU_REGS static inline int __parse_fpscr(unsigned long fpscr) { int ret = FPE_FLTUNK; @@ -1190,6 +1191,7 @@ static inline int __parse_fpscr(unsigned long fpscr) return ret; } +#endif static void parse_fpe(struct pt_regs *regs) { diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index de64c79629912..005269ac3244c 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -74,6 +74,9 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_STRUCT_SIZE(mmu_psize_def); VMCOREINFO_OFFSET(mmu_psize_def, shift); #endif + VMCOREINFO_SYMBOL(cur_cpu_spec); + VMCOREINFO_OFFSET(cpu_spec, mmu_features); + vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled()); vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); } diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 9d229ef7f86ef..ada817c49b722 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -51,7 +51,7 @@ static int trace_imc_mem_size; * core and trace-imc */ static struct imc_pmu_ref imc_global_refc = { - .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock), + .lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock), .id = 0, .refc = 0, }; diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index 77ea9335fd049..f381b177ea06a 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -4,6 +4,8 @@ * Copyright (C) 2019 Haren Myneni, IBM Corp */ +#define pr_fmt(fmt) "vas-api: " fmt + #include #include #include @@ -78,7 +80,7 @@ int get_vas_user_win_ref(struct vas_user_win_ref *task_ref) task_ref->mm = get_task_mm(current); if (!task_ref->mm) { put_pid(task_ref->pid); - pr_err("VAS: pid(%d): mm_struct is not found\n", + pr_err("pid(%d): mm_struct is not found\n", current->pid); return -EPERM; } @@ -235,8 +237,7 @@ void vas_update_csb(struct coprocessor_request_block *crb, rc = kill_pid_info(SIGSEGV, &info, pid); rcu_read_unlock(); - pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__, - pid_vnr(pid), rc); + pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc); } void vas_dump_crb(struct coprocessor_request_block *crb) @@ -294,7 +295,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg) rc = copy_from_user(&uattr, uptr, sizeof(uattr)); if (rc) { - pr_err("%s(): copy_from_user() returns %d\n", __func__, rc); + pr_err("copy_from_user() returns %d\n", rc); return -EFAULT; } @@ -311,7 +312,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg) txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags, cp_inst->coproc->cop_type); if (IS_ERR(txwin)) { - pr_err("%s() VAS window open failed, %ld\n", __func__, + pr_err_ratelimited("VAS window open failed rc=%ld\n", PTR_ERR(txwin)); return PTR_ERR(txwin); } @@ -405,8 +406,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) * window is not opened. Shouldn't expect this error. */ if (!cp_inst || !cp_inst->txwin) { - pr_err("%s(): Unexpected fault on paste address with TX window closed\n", - __func__); + pr_err("Unexpected fault on paste address with TX window closed\n"); return VM_FAULT_SIGBUS; } @@ -421,8 +421,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) * issue NX request. */ if (txwin->task_ref.vma != vmf->vma) { - pr_err("%s(): No previous mapping with paste address\n", - __func__); + pr_err("No previous mapping with paste address\n"); return VM_FAULT_SIGBUS; } @@ -481,19 +480,19 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) txwin = cp_inst->txwin; if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { - pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__, + pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n", (vma->vm_end - vma->vm_start), PAGE_SIZE); return -EINVAL; } /* Ensure instance has an open send window */ if (!txwin) { - pr_err("%s(): No send window open?\n", __func__); + pr_err("No send window open?\n"); return -EINVAL; } if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) { - pr_err("%s(): VAS API is not registered\n", __func__); + pr_err("VAS API is not registered\n"); return -EACCES; } @@ -510,14 +509,14 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) */ mutex_lock(&txwin->task_ref.mmap_mutex); if (txwin->status != VAS_WIN_ACTIVE) { - pr_err("%s(): Window is not active\n", __func__); + pr_err("Window is not active\n"); rc = -EACCES; goto out; } paste_addr = cp_inst->coproc->vops->paste_addr(txwin); if (!paste_addr) { - pr_err("%s(): Window paste address failed\n", __func__); + pr_err("Window paste address failed\n"); rc = -EINVAL; goto out; } @@ -533,8 +532,8 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, vma->vm_end - vma->vm_start, prot); - pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__, - paste_addr, vma->vm_start, rc); + pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr, + vma->vm_start, rc); txwin->task_ref.vma = vma; vma->vm_ops = &vas_vm_ops; @@ -609,8 +608,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type, goto err; } - pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno), - MINOR(devno)); + pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno)); return 0; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index f2cb62148f36f..d4d6de0628b05 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -526,8 +526,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, if (cmd) { rc = init_cpu_associativity(); - if (rc) + if (rc) { + destroy_cpu_associativity(); goto out; + } for_each_possible_cpu(cpu) { disp = per_cpu_ptr(&vcpu_disp_data, cpu); diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c index e25ac52acf507..b1f25bac280b4 100644 --- a/arch/powerpc/platforms/pseries/vas.c +++ b/arch/powerpc/platforms/pseries/vas.c @@ -341,7 +341,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, if (atomic_inc_return(&cop_feat_caps->nr_used_credits) > atomic_read(&cop_feat_caps->nr_total_credits)) { - pr_err("Credits are not available to allocate window\n"); + pr_err_ratelimited("Credits are not available to allocate window\n"); rc = -EINVAL; goto out; } @@ -424,7 +424,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, put_vas_user_win_ref(&txwin->vas_win.task_ref); rc = -EBUSY; - pr_err("No credit is available to allocate window\n"); + pr_err_ratelimited("No credit is available to allocate window\n"); out_free: /* diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 9f0af4d795d88..f1c0fa6ece21d 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -802,7 +802,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio, if (out_qpage) *out_qpage = be64_to_cpu(qpage); if (out_qsize) - *out_qsize = be32_to_cpu(qsize); + *out_qsize = be64_to_cpu(qsize); if (out_qeoi_page) *out_qeoi_page = be64_to_cpu(qeoi_page); if (out_escalate_irq) diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile index 22b13947bd131..8e7fc0edf21d3 100644 --- a/arch/riscv/boot/Makefile +++ b/arch/riscv/boot/Makefile @@ -17,6 +17,7 @@ KCOV_INSTRUMENT := n OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S +OBJCOPYFLAGS_loader.bin :=-O binary OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image Image.* loader loader.o loader.lds loader.bin diff --git a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi index 8275630af977d..b8684312593e5 100644 --- a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi +++ b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi @@ -30,7 +30,6 @@ cpu0_intc: interrupt-controller { compatible = "riscv,cpu-intc"; interrupt-controller; - #address-cells = <0>; #interrupt-cells = <1>; }; }; diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index c17dacb1141cb..157ace8b262c2 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -125,13 +125,14 @@ old_interface: */ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid) { - int rc; - for (; node; node = node->parent) { if (of_device_is_compatible(node, "riscv")) { - rc = riscv_of_processor_hartid(node, hartid); - if (!rc) - return 0; + *hartid = (unsigned long)of_get_cpu_hwid(node, 0); + if (*hartid == ~0UL) { + pr_warn("Found CPU without hart ID\n"); + return -ENODEV; + } + return 0; } } diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index c449e7c1b20ff..8bcd6c1431a95 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -22,6 +22,17 @@ config STACK_DEBUG every function call and will therefore incur a major performance hit. Most users should say N. +config EARLY_PRINTK + bool "Early printk" + depends on SH_STANDARD_BIOS + help + Say Y here to redirect kernel printk messages to the serial port + used by the SH-IPL bootloader, starting very early in the boot + process and ending when the kernel's serial console is initialised. + This option is only useful while porting the kernel to a new machine, + when the kernel may crash or hang before the serial console is + initialised. If unsure, say N. + config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB diff --git a/arch/x86/coco/tdx/tdcall.S b/arch/x86/coco/tdx/tdcall.S index b193c0a1d8db3..2eca5f43734fe 100644 --- a/arch/x86/coco/tdx/tdcall.S +++ b/arch/x86/coco/tdx/tdcall.S @@ -195,6 +195,7 @@ SYM_FUNC_END(__tdx_module_call) xor %r10d, %r10d xor %r11d, %r11d xor %rdi, %rdi + xor %rsi, %rsi xor %rdx, %rdx /* Restore callee-saved GPRs as mandated by the x86_64 ABI */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index c55cc243592e9..197ff4f4d1ceb 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -271,7 +271,7 @@ .Lskip_rsb_\@: .endm -#ifdef CONFIG_CPU_UNRET_ENTRY +#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO) #define CALL_UNTRAIN_RET "call entry_untrain_ret" #else #define CALL_UNTRAIN_RET "" @@ -312,7 +312,7 @@ .macro UNTRAIN_RET_FROM_CALL #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ - defined(CONFIG_CALL_DEPTH_TRACKING) + defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h index 64df897c0ee30..1be13b2dfe8bf 100644 --- a/arch/x86/include/asm/sparsemem.h +++ b/arch/x86/include/asm/sparsemem.h @@ -37,6 +37,8 @@ extern int phys_to_target_node(phys_addr_t start); #define phys_to_target_node phys_to_target_node extern int memory_add_physaddr_to_nid(u64 start); #define memory_add_physaddr_to_nid memory_add_physaddr_to_nid +extern int numa_fill_memblks(u64 start, u64 end); +#define numa_fill_memblks numa_fill_memblks #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8bae40a662827..5c367c1290c35 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -496,7 +496,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len); #define copy_mc_to_kernel copy_mc_to_kernel unsigned long __must_check -copy_mc_to_user(void *to, const void *from, unsigned len); +copy_mc_to_user(void __user *to, const void *from, unsigned len); #endif /* diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 356de955e78dd..cab4d8b1535d6 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -112,6 +112,9 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) }, diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 760adac3d1a82..3cdf48493546d 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -36,6 +36,8 @@ #include #include +#include + #include #include #include @@ -2344,6 +2346,15 @@ static int __init smp_init_primary_thread_mask(void) { unsigned int cpu; + /* + * XEN/PV provides either none or useless topology information. + * Pretend that all vCPUs are primary threads. + */ + if (xen_pv_domain()) { + cpumask_copy(&__cpu_primary_thread_mask, cpu_possible_mask); + return 0; + } + for (cpu = 0; cpu < nr_logical_cpuids; cpu++) cpu_mark_primary_thread(cpu, cpuid_to_apicid[cpu]); return 0; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 10499bcd4e396..a55a3864df1c9 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2353,6 +2353,8 @@ early_param("l1tf", l1tf_cmdline); enum srso_mitigation { SRSO_MITIGATION_NONE, + SRSO_MITIGATION_UCODE_NEEDED, + SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, SRSO_MITIGATION_MICROCODE, SRSO_MITIGATION_SAFE_RET, SRSO_MITIGATION_IBPB, @@ -2368,11 +2370,13 @@ enum srso_mitigation_cmd { }; static const char * const srso_strings[] = { - [SRSO_MITIGATION_NONE] = "Vulnerable", - [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", - [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", - [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", - [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" + [SRSO_MITIGATION_NONE] = "Vulnerable", + [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode", + [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", + [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", + [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", + [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" }; static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; @@ -2409,10 +2413,7 @@ static void __init srso_select_mitigation(void) if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) goto pred_cmd; - if (!has_microcode) { - pr_warn("IBPB-extending microcode not applied!\n"); - pr_warn(SRSO_NOTICE); - } else { + if (has_microcode) { /* * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. @@ -2425,10 +2426,15 @@ static void __init srso_select_mitigation(void) if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { if (has_microcode) { - pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n"); srso_mitigation = SRSO_MITIGATION_IBPB; - goto pred_cmd; + goto out; } + } else { + pr_warn("IBPB-extending microcode not applied!\n"); + pr_warn(SRSO_NOTICE); + + /* may be overwritten by SRSO_CMD_SAFE_RET below */ + srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; } switch (srso_cmd) { @@ -2458,7 +2464,10 @@ static void __init srso_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_SRSO); x86_return_thunk = srso_return_thunk; } - srso_mitigation = SRSO_MITIGATION_SAFE_RET; + if (has_microcode) + srso_mitigation = SRSO_MITIGATION_SAFE_RET; + else + srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); goto pred_cmd; @@ -2493,10 +2502,11 @@ static void __init srso_select_mitigation(void) break; } - pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); +out: + pr_info("%s\n", srso_strings[srso_mitigation]); pred_cmd: - if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) && + if ((!boot_cpu_has_bug(X86_BUG_SRSO) || srso_cmd == SRSO_CMD_OFF) && boot_cpu_has(X86_FEATURE_SBPB)) x86_pred_cmd = PRED_CMD_SBPB; } @@ -2704,9 +2714,7 @@ static ssize_t srso_show_state(char *buf) if (boot_cpu_has(X86_FEATURE_SRSO_NO)) return sysfs_emit(buf, "Mitigation: SMT disabled\n"); - return sysfs_emit(buf, "%s%s\n", - srso_strings[srso_mitigation], - boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode"); + return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]); } static ssize_t gds_show_state(char *buf) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 49f7629b17f73..bbc21798df10e 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -80,7 +80,7 @@ static struct desc_struct startup_gdt[GDT_ENTRIES] = { * while the kernel still uses a direct mapping. */ static struct desc_ptr startup_gdt_descr = { - .size = sizeof(startup_gdt), + .size = sizeof(startup_gdt)-1, .address = 0, }; diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index a0c551846b35f..4766b6bed4439 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -507,12 +507,13 @@ DEFINE_IDTENTRY_RAW(exc_nmi) } this_cpu_write(nmi_state, NMI_EXECUTING); this_cpu_write(nmi_cr2, read_cr2()); + +nmi_restart: if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) { WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1); WARN_ON_ONCE(!(nsp->idt_seq & 0x1)); WRITE_ONCE(nsp->recv_jiffies, jiffies); } -nmi_restart: /* * Needs to happen before DR7 is accessed, because the hypervisor can @@ -548,16 +549,16 @@ nmi_restart: if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) write_cr2(this_cpu_read(nmi_cr2)); - if (this_cpu_dec_return(nmi_state)) - goto nmi_restart; - - if (user_mode(regs)) - mds_user_clear_cpu_buffers(); if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) { WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1); WARN_ON_ONCE(nsp->idt_seq & 0x1); WRITE_ONCE(nsp->recv_jiffies, jiffies); } + if (this_cpu_dec_return(nmi_state)) + goto nmi_restart; + + if (user_mode(regs)) + mds_user_clear_cpu_buffers(); } #if IS_ENABLED(CONFIG_KVM_INTEL) diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c index 80efd45a77617..6e8b7e600def5 100644 --- a/arch/x86/lib/copy_mc.c +++ b/arch/x86/lib/copy_mc.c @@ -70,23 +70,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne } EXPORT_SYMBOL_GPL(copy_mc_to_kernel); -unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len) +unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len) { unsigned long ret; if (copy_mc_fragile_enabled) { __uaccess_begin(); - ret = copy_mc_fragile(dst, src, len); + ret = copy_mc_fragile((__force void *)dst, src, len); __uaccess_end(); return ret; } if (static_cpu_has(X86_FEATURE_ERMS)) { __uaccess_begin(); - ret = copy_mc_enhanced_fast_string(dst, src, len); + ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len); __uaccess_end(); return ret; } - return copy_user_generic(dst, src, len); + return copy_user_generic((__force void *)dst, src, len); } diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c index 5a53c2cc169cc..6993f026adec9 100644 --- a/arch/x86/mm/maccess.c +++ b/arch/x86/mm/maccess.c @@ -9,12 +9,21 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) unsigned long vaddr = (unsigned long)unsafe_src; /* - * Range covering the highest possible canonical userspace address - * as well as non-canonical address range. For the canonical range - * we also need to include the userspace guard page. + * Do not allow userspace addresses. This disallows + * normal userspace and the userspace guard page: */ - return vaddr >= TASK_SIZE_MAX + PAGE_SIZE && - __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits); + if (vaddr < TASK_SIZE_MAX + PAGE_SIZE) + return false; + + /* + * Allow everything during early boot before 'x86_virt_bits' + * is initialized. Needed for instruction decoding in early + * exception handlers. + */ + if (!boot_cpu_data.x86_virt_bits) + return true; + + return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits); } #else bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 2aadb2019b4f2..c01c5506fd4ae 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -961,4 +962,83 @@ int memory_add_physaddr_to_nid(u64 start) return nid; } EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); + +static int __init cmp_memblk(const void *a, const void *b) +{ + const struct numa_memblk *ma = *(const struct numa_memblk **)a; + const struct numa_memblk *mb = *(const struct numa_memblk **)b; + + return ma->start - mb->start; +} + +static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata; + +/** + * numa_fill_memblks - Fill gaps in numa_meminfo memblks + * @start: address to begin fill + * @end: address to end fill + * + * Find and extend numa_meminfo memblks to cover the @start-@end + * physical address range, such that the first memblk includes + * @start, the last memblk includes @end, and any gaps in between + * are filled. + * + * RETURNS: + * 0 : Success + * NUMA_NO_MEMBLK : No memblk exists in @start-@end range + */ + +int __init numa_fill_memblks(u64 start, u64 end) +{ + struct numa_memblk **blk = &numa_memblk_list[0]; + struct numa_meminfo *mi = &numa_meminfo; + int count = 0; + u64 prev_end; + + /* + * Create a list of pointers to numa_meminfo memblks that + * overlap start, end. Exclude (start == bi->end) since + * end addresses in both a CFMWS range and a memblk range + * are exclusive. + * + * This list of pointers is used to make in-place changes + * that fill out the numa_meminfo memblks. + */ + for (int i = 0; i < mi->nr_blks; i++) { + struct numa_memblk *bi = &mi->blk[i]; + + if (start < bi->end && end >= bi->start) { + blk[count] = &mi->blk[i]; + count++; + } + } + if (!count) + return NUMA_NO_MEMBLK; + + /* Sort the list of pointers in memblk->start order */ + sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL); + + /* Make sure the first/last memblks include start/end */ + blk[0]->start = min(blk[0]->start, start); + blk[count - 1]->end = max(blk[count - 1]->end, end); + + /* + * Fill any gaps by tracking the previous memblks + * end address and backfilling to it if needed. + */ + prev_end = blk[0]->end; + for (int i = 1; i < count; i++) { + struct numa_memblk *curr = blk[i]; + + if (prev_end >= curr->start) { + if (prev_end < curr->end) + prev_end = curr->end; + } else { + curr->start = prev_end; + prev_end = curr->end; + } + } + return 0; +} + #endif diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a5930042139d3..52f36c48c1b9e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1018,6 +1018,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) +/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ +#define RESTORE_TAIL_CALL_CNT(stack) \ + EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding) { @@ -1623,9 +1627,7 @@ st: if (is_imm8(insn->off)) func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) { - /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ - EMIT3_off32(0x48, 0x8B, 0x85, - -round_up(bpf_prog->aux->stack_depth, 8) - 8); + RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); if (!imm32) return -EINVAL; offs = 7 + x86_call_depth_emit_accounting(&prog, func); @@ -2400,6 +2402,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i * [ ... ] * [ stack_arg2 ] * RBP - arg_stack_off [ stack_arg1 ] + * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX */ /* room for return value of orig_call or fentry prog */ @@ -2464,6 +2467,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i else /* sub rsp, stack_size */ EMIT4(0x48, 0x83, 0xEC, stack_size); + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + EMIT1(0x50); /* push rax */ /* mov QWORD PTR [rbp - rbx_off], rbx */ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); @@ -2516,9 +2521,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i restore_regs(m, &prog, regs_off); save_args(m, &prog, arg_stack_off, true); + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + /* Before calling the original function, restore the + * tail_call_cnt from stack to rax. + */ + RESTORE_TAIL_CALL_CNT(stack_size); + if (flags & BPF_TRAMP_F_ORIG_STACK) { - emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); - EMIT2(0xff, 0xd0); /* call *rax */ + emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); + EMIT2(0xff, 0xd3); /* call *rbx */ } else { /* call original function */ if (emit_rsb_call(&prog, orig_call, prog)) { @@ -2569,7 +2580,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ret = -EINVAL; goto cleanup; } - } + } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + /* Before running the original function, restore the + * tail_call_cnt from stack to rax. + */ + RESTORE_TAIL_CALL_CNT(stack_size); + /* restore return value of orig_call or fentry prog back into RAX */ if (save_ret) emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); diff --git a/block/blk-core.c b/block/blk-core.c index 9d51e9894ece7..fdf25b8d6e784 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -501,8 +501,8 @@ static inline void bio_check_ro(struct bio *bio) if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) return; - pr_warn("Trying to write to read-only block-device %pg\n", - bio->bi_bdev); + pr_warn_ratelimited("Trying to write to read-only block-device %pg\n", + bio->bi_bdev); /* Older lvm-tools actually trigger this */ } } diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 1ef3b46d6f6e5..59ec726b7c770 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -76,7 +76,7 @@ config SIGNED_PE_FILE_VERIFICATION signed PE binary. config FIPS_SIGNATURE_SELFTEST - bool "Run FIPS selftests on the X.509+PKCS7 signature verification" + tristate "Run FIPS selftests on the X.509+PKCS7 signature verification" help This option causes some selftests to be run on the signature verification code, using some built in data. This is required @@ -84,5 +84,6 @@ config FIPS_SIGNATURE_SELFTEST depends on KEYS depends on ASYMMETRIC_KEY_TYPE depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER + depends on X509_CERTIFICATE_PARSER endif # ASYMMETRIC_KEY_TYPE diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile index 0d1fa1b692c6b..1a273d6df3ebf 100644 --- a/crypto/asymmetric_keys/Makefile +++ b/crypto/asymmetric_keys/Makefile @@ -22,7 +22,8 @@ x509_key_parser-y := \ x509_cert_parser.o \ x509_loader.o \ x509_public_key.o -x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o +obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o +x509_selftest-y += selftest.o $(obj)/x509_cert_parser.o: \ $(obj)/x509.asn1.h \ diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c index fa0bf7f242849..c50da7ef90ae9 100644 --- a/crypto/asymmetric_keys/selftest.c +++ b/crypto/asymmetric_keys/selftest.c @@ -4,10 +4,11 @@ * Written by David Howells (dhowells@redhat.com) */ -#include +#include #include +#include #include -#include +#include #include "x509_parser.h" struct certs_test { @@ -175,7 +176,7 @@ static const struct certs_test certs_tests[] __initconst = { TEST(certs_selftest_1_data, certs_selftest_1_pkcs7), }; -int __init fips_signature_selftest(void) +static int __init fips_signature_selftest(void) { struct key *keyring; int ret, i; @@ -222,3 +223,9 @@ int __init fips_signature_selftest(void) key_put(keyring); return 0; } + +late_initcall(fips_signature_selftest); + +MODULE_DESCRIPTION("X.509 self tests"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index a299c9c56f409..97a886cbe01c3 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -40,15 +40,6 @@ struct x509_certificate { bool blacklisted; }; -/* - * selftest.c - */ -#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST -extern int __init fips_signature_selftest(void); -#else -static inline int fips_signature_selftest(void) { return 0; } -#endif - /* * x509_cert_parser.c */ diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 7c71db3ac23d4..6a4f00be22fc1 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -262,15 +262,9 @@ static struct asymmetric_key_parser x509_key_parser = { /* * Module stuff */ -extern int __init certs_selftest(void); static int __init x509_key_init(void) { - int ret; - - ret = register_asymmetric_key_parser(&x509_key_parser); - if (ret < 0) - return ret; - return fips_signature_selftest(); + return register_asymmetric_key_parser(&x509_key_parser); } static void __exit x509_key_exit(void) diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c index 20c4583f12b0d..31c74ca70a2e5 100644 --- a/drivers/accel/habanalabs/gaudi2/gaudi2.c +++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c @@ -8149,11 +8149,11 @@ static int gaudi2_psoc_razwi_get_engines(struct gaudi2_razwi_info *razwi_info, u eng_id[num_of_eng] = razwi_info[i].eng_id; base[num_of_eng] = razwi_info[i].rtr_ctrl; if (!num_of_eng) - str_size += snprintf(eng_name + str_size, + str_size += scnprintf(eng_name + str_size, PSOC_RAZWI_ENG_STR_SIZE - str_size, "%s", razwi_info[i].eng_name); else - str_size += snprintf(eng_name + str_size, + str_size += scnprintf(eng_name + str_size, PSOC_RAZWI_ENG_STR_SIZE - str_size, " or %s", razwi_info[i].eng_name); num_of_eng++; diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index b9bbf07461992..a34d8578b3da6 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -158,8 +158,8 @@ static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalia return 0; len = snprintf(modalias, size, "acpi:"); - if (len <= 0) - return len; + if (len >= size) + return -ENOMEM; size -= len; @@ -212,8 +212,10 @@ static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); ACPI_FREE(buf.pointer); - if (len <= 0) - return len; + if (len >= size) + return -ENOMEM; + + size -= len; of_compatible = acpi_dev->data.of_compatible; if (of_compatible->type == ACPI_TYPE_PACKAGE) { diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 1f4fc5f8a819d..12f330b0eac01 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -310,11 +310,16 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, start = cfmws->base_hpa; end = cfmws->base_hpa + cfmws->window_size; - /* Skip if the SRAT already described the NUMA details for this HPA */ - node = phys_to_target_node(start); - if (node != NUMA_NO_NODE) + /* + * The SRAT may have already described NUMA details for all, + * or a portion of, this CFMWS HPA range. Extend the memblks + * found for any portion of the window to cover the entire + * window. + */ + if (!numa_fill_memblks(start, end)) return 0; + /* No SRAT description. Create a new node. */ node = acpi_map_pxm_to_node(*fake_pxm); if (node == NUMA_NO_NODE) { diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 413e4fcadcaf7..99b4e33554355 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1102,25 +1102,26 @@ static int acpi_data_prop_read(const struct acpi_device_data *data, switch (proptype) { case DEV_PROP_STRING: break; - case DEV_PROP_U8 ... DEV_PROP_U64: + default: if (obj->type == ACPI_TYPE_BUFFER) { if (nval > obj->buffer.length) return -EOVERFLOW; - break; + } else { + if (nval > obj->package.count) + return -EOVERFLOW; } - fallthrough; - default: - if (nval > obj->package.count) - return -EOVERFLOW; break; } if (nval == 0) return -EINVAL; - if (obj->type != ACPI_TYPE_BUFFER) - items = obj->package.elements; - else + if (obj->type == ACPI_TYPE_BUFFER) { + if (proptype != DEV_PROP_U8) + return -EPROTO; items = obj; + } else { + items = obj->package.elements; + } switch (proptype) { case DEV_PROP_U8: diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 442396f6ed1f9..31205fee59d4a 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -130,6 +130,16 @@ static int video_detect_force_native(const struct dmi_system_id *d) return 0; } +static int video_detect_portege_r100(const struct dmi_system_id *d) +{ + struct pci_dev *dev; + /* Search for Trident CyberBlade XP4m32 to confirm Portégé R100 */ + dev = pci_get_device(PCI_VENDOR_ID_TRIDENT, 0x2100, NULL); + if (dev) + acpi_backlight_dmi = acpi_backlight_vendor; + return 0; +} + static const struct dmi_system_id video_detect_dmi_table[] = { /* * Models which should use the vendor backlight interface, @@ -270,6 +280,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, }, + /* + * Toshiba Portégé R100 has working both acpi_video and toshiba_acpi + * vendor driver. But none of them gets activated as it has a VGA with + * no kernel driver (Trident CyberBlade XP4m32). + * The DMI strings are generic so check for the VGA chip in callback. + */ + { + .callback = video_detect_portege_r100, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"), + DMI_MATCH(DMI_BOARD_NAME, "Portable PC") + }, + }, + /* * Models which need acpi_video backlight control where the GPU drivers * do not call acpi_video_register_backlight() because no internal panel diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index f36027591e1a8..bdd80b73c3e6c 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file, name = map->dev->driver->name; ret = snprintf(buf, PAGE_SIZE, "%s\n", name); - if (ret < 0) { + if (ret >= PAGE_SIZE) { kfree(buf); return ret; } diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 234a84ecde8b1..ea61577471994 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1620,17 +1620,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, } if (!map->cache_bypass && map->format.parse_val) { - unsigned int ival; + unsigned int ival, offset; int val_bytes = map->format.val_bytes; - for (i = 0; i < val_len / val_bytes; i++) { - ival = map->format.parse_val(val + (i * val_bytes)); - ret = regcache_write(map, - reg + regmap_get_offset(map, i), - ival); + + /* Cache the last written value for noinc writes */ + i = noinc ? val_len - val_bytes : 0; + for (; i < val_len; i += val_bytes) { + ival = map->format.parse_val(val + i); + offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes); + ret = regcache_write(map, reg + offset, ival); if (ret) { dev_err(map->dev, "Error in caching of register: %x ret: %d\n", - reg + regmap_get_offset(map, i), ret); + reg + offset, ret); return ret; } } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 800f131222fc8..855fdf5c3b4ea 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd) struct gendisk *disk = nbd->disk; del_gendisk(disk); - put_disk(disk); blk_mq_free_tag_set(&nbd->tag_set); /* @@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd) idr_remove(&nbd_index_idr, nbd->index); mutex_unlock(&nbd_index_mutex); destroy_workqueue(nbd->recv_workq); - kfree(nbd); + put_disk(disk); } static void nbd_dev_remove_work(struct work_struct *work) @@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk) nbd_put(nbd); } +static void nbd_free_disk(struct gendisk *disk) +{ + struct nbd_device *nbd = disk->private_data; + + kfree(nbd); +} + static const struct block_device_operations nbd_fops = { .owner = THIS_MODULE, @@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops = .release = nbd_release, .ioctl = nbd_ioctl, .compat_ioctl = nbd_ioctl, + .free_disk = nbd_free_disk, }; #if IS_ENABLED(CONFIG_DEBUG_FS) diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index e19b0f9f48b97..4c08efe7f3753 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -70,7 +70,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) { if (!wait) return 0; - hwrng_msleep(rng, 1000); + hwrng_yield(rng); } num_words = rng_readl(priv, RNG_STATUS) >> 24; diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index e3598ec9cfca8..420f155d251fb 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -678,6 +678,12 @@ long hwrng_msleep(struct hwrng *rng, unsigned int msecs) } EXPORT_SYMBOL_GPL(hwrng_msleep); +long hwrng_yield(struct hwrng *rng) +{ + return wait_for_completion_interruptible_timeout(&rng->dying, 1); +} +EXPORT_SYMBOL_GPL(hwrng_yield); + static int __init hwrng_modinit(void) { int ret; diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 12fbe80918319..159baf00a8675 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -58,7 +58,8 @@ struct amd_geode_priv { static int geode_rng_data_read(struct hwrng *rng, u32 *data) { - void __iomem *mem = (void __iomem *)rng->priv; + struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv; + void __iomem *mem = priv->membase; *data = readl(mem + GEODE_RNG_DATA_REG); @@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data) static int geode_rng_data_present(struct hwrng *rng, int wait) { - void __iomem *mem = (void __iomem *)rng->priv; + struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv; + void __iomem *mem = priv->membase; int data, i; for (i = 0; i < 20; i++) { diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index e319cfa51a8a3..030186def9c69 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c @@ -510,7 +510,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) return; npcm7xx_init_fail: - kfree(npcm7xx_clk_data->hws); + kfree(npcm7xx_clk_data); npcm7xx_init_np_err: iounmap(clk_base); npcm7xx_init_error: diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 2c7a830ce3080..fdec715c9ba9b 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -213,6 +213,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) sclk->info = scmi_proto_clk_ops->info_get(ph, idx); if (!sclk->info) { dev_dbg(dev, "invalid clock info for idx %d\n", idx); + devm_kfree(dev, sclk); continue; } diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig index f6b82e0b9703a..db3bca5f4ec9c 100644 --- a/drivers/clk/imx/Kconfig +++ b/drivers/clk/imx/Kconfig @@ -96,6 +96,7 @@ config CLK_IMX8QXP depends on (ARCH_MXC && ARM64) || COMPILE_TEST depends on IMX_SCU && HAVE_ARM_SMCCC select MXC_CLK_SCU + select MXC_CLK help Build the driver for IMX8QXP SCU based clocks. diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c index 1e82f72b75c67..1c95ae905eec8 100644 --- a/drivers/clk/imx/clk-imx8-acm.c +++ b/drivers/clk/imx/clk-imx8-acm.c @@ -279,8 +279,10 @@ static int clk_imx_acm_attach_pm_domains(struct device *dev, for (i = 0; i < dev_pm->num_domains; i++) { dev_pm->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i); - if (IS_ERR(dev_pm->pd_dev[i])) - return PTR_ERR(dev_pm->pd_dev[i]); + if (IS_ERR(dev_pm->pd_dev[i])) { + ret = PTR_ERR(dev_pm->pd_dev[i]); + goto detach_pm; + } dev_pm->pd_dev_link[i] = device_link_add(dev, dev_pm->pd_dev[i], @@ -371,7 +373,7 @@ static int imx8_acm_clk_probe(struct platform_device *pdev) sels[i].shift, sels[i].width, 0, NULL, NULL); if (IS_ERR(hws[sels[i].clkid])) { - pm_runtime_disable(&pdev->dev); + ret = PTR_ERR(hws[sels[i].clkid]); goto err_clk_register; } } @@ -381,12 +383,16 @@ static int imx8_acm_clk_probe(struct platform_device *pdev) ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data); if (ret < 0) { dev_err(dev, "failed to register hws for ACM\n"); - pm_runtime_disable(&pdev->dev); + goto err_clk_register; } -err_clk_register: + pm_runtime_put_sync(&pdev->dev); + return 0; +err_clk_register: pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + clk_imx_acm_detach_pm_domains(&pdev->dev, &priv->dev_pm); return ret; } diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index 4bd65879fcd34..f70ed231b92d6 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -288,8 +288,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) void __iomem *base; int err; - clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, - IMX8MQ_CLK_END), GFP_KERNEL); + clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MQ_CLK_END), GFP_KERNEL); if (WARN_ON(!clk_hw_data)) return -ENOMEM; @@ -306,10 +305,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) hws[IMX8MQ_CLK_EXT4] = imx_get_clk_hw_by_name(np, "clk_ext4"); np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop"); - base = of_iomap(np, 0); + base = devm_of_iomap(dev, np, 0, NULL); of_node_put(np); - if (WARN_ON(!base)) - return -ENOMEM; + if (WARN_ON(IS_ERR(base))) { + err = PTR_ERR(base); + goto unregister_hws; + } hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); @@ -395,8 +396,10 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) np = dev->of_node; base = devm_platform_ioremap_resource(pdev, 0); - if (WARN_ON(IS_ERR(base))) - return PTR_ERR(base); + if (WARN_ON(IS_ERR(base))) { + err = PTR_ERR(base); + goto unregister_hws; + } /* CORE */ hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000); diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c index cadcbb318f5cf..4020aa4b79bf2 100644 --- a/drivers/clk/imx/clk-imx8qxp.c +++ b/drivers/clk/imx/clk-imx8qxp.c @@ -147,10 +147,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER); imx_clk_scu("adc1_clk", IMX_SC_R_ADC_1, IMX_SC_PM_CLK_PER); imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER); + imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL); imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER); imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0); imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS); - imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL); /* Audio SS */ imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL); diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c index ee5c72369334f..6bbdd4705d71f 100644 --- a/drivers/clk/keystone/pll.c +++ b/drivers/clk/keystone/pll.c @@ -281,12 +281,13 @@ static void __init of_pll_div_clk_init(struct device_node *node) clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift, mask, 0, NULL); - if (clk) { - of_clk_add_provider(node, of_clk_src_simple_get, clk); - } else { + if (IS_ERR(clk)) { pr_err("%s: error registering divider %s\n", __func__, clk_name); iounmap(reg); + return; } + + of_clk_add_provider(node, of_clk_src_simple_get, clk); } CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init); @@ -328,10 +329,12 @@ static void __init of_pll_mux_clk_init(struct device_node *node) clk = clk_register_mux(NULL, clk_name, (const char **)&parents, ARRAY_SIZE(parents) , 0, reg, shift, mask, 0, NULL); - if (clk) - of_clk_add_provider(node, of_clk_src_simple_get, clk); - else + if (IS_ERR(clk)) { pr_err("%s: error registering mux %s\n", __func__, clk_name); + return; + } + + of_clk_add_provider(node, of_clk_src_simple_get, clk); } CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init); diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c index c81f3e33ce568..12d9560eb4ba2 100644 --- a/drivers/clk/mediatek/clk-mt2701.c +++ b/drivers/clk/mediatek/clk-mt2701.c @@ -667,6 +667,8 @@ static int mtk_topckgen_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_TOP_NR); + if (!clk_data) + return -ENOMEM; mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data); @@ -747,6 +749,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return; for (i = 0; i < CLK_INFRA_NR; i++) infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER); @@ -774,6 +778,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return -ENOMEM; } else { for (i = 0; i < CLK_INFRA_NR; i++) { if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER)) @@ -890,6 +896,8 @@ static int mtk_pericfg_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_PERI_NR); + if (!clk_data) + return -ENOMEM; mtk_clk_register_gates(&pdev->dev, node, peri_clks, ARRAY_SIZE(peri_clks), clk_data); diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c index 1f4c8d0c041ab..9c7f7407d7980 100644 --- a/drivers/clk/mediatek/clk-mt6765.c +++ b/drivers/clk/mediatek/clk-mt6765.c @@ -737,6 +737,8 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); @@ -769,6 +771,8 @@ static int clk_mt6765_top_probe(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data); @@ -807,6 +811,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_gates(&pdev->dev, node, ifr_clks, ARRAY_SIZE(ifr_clks), clk_data); diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c index 3ee2f5a2319a0..ffedb1fe3c672 100644 --- a/drivers/clk/mediatek/clk-mt6779.c +++ b/drivers/clk/mediatek/clk-mt6779.c @@ -1217,6 +1217,8 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); @@ -1237,6 +1239,8 @@ static int clk_mt6779_top_probe(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data); diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c index 2ebd25f0ce71d..f12d4e9ff0bba 100644 --- a/drivers/clk/mediatek/clk-mt6797.c +++ b/drivers/clk/mediatek/clk-mt6797.c @@ -390,6 +390,8 @@ static int mtk_topckgen_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_TOP_NR); + if (!clk_data) + return -ENOMEM; mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs), clk_data); @@ -545,6 +547,8 @@ static void mtk_infrasys_init_early(struct device_node *node) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return; for (i = 0; i < CLK_INFRA_NR; i++) infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER); @@ -570,6 +574,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) if (!infra_clk_data) { infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); + if (!infra_clk_data) + return -ENOMEM; } else { for (i = 0; i < CLK_INFRA_NR; i++) { if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER)) diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c index fe714debdc9ec..1bfedc988cfe8 100644 --- a/drivers/clk/mediatek/clk-mt7629-eth.c +++ b/drivers/clk/mediatek/clk-mt7629-eth.c @@ -77,6 +77,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev) int r; clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_gates(&pdev->dev, node, eth_clks, CLK_ETH_NR_CLK, clk_data); @@ -100,6 +102,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev) int r; clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_gates(&pdev->dev, node, sgmii_clks[id++], CLK_SGMII_NR_CLK, clk_data); diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c index 2882107d0f240..b8a1f01bc974d 100644 --- a/drivers/clk/mediatek/clk-mt7629.c +++ b/drivers/clk/mediatek/clk-mt7629.c @@ -555,6 +555,8 @@ static int mtk_topckgen_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data); @@ -579,6 +581,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) struct clk_hw_onecell_data *clk_data; clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_gates(&pdev->dev, node, infra_clks, ARRAY_SIZE(infra_clks), clk_data); @@ -602,6 +606,8 @@ static int mtk_pericfg_init(struct platform_device *pdev) return PTR_ERR(base); clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK); + if (!clk_data) + return -ENOMEM; mtk_clk_register_gates(&pdev->dev, node, peri_clks, ARRAY_SIZE(peri_clks), clk_data); diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c index a4eca5fd539c8..513ab6b1b3229 100644 --- a/drivers/clk/mediatek/clk-pll.c +++ b/drivers/clk/mediatek/clk-pll.c @@ -321,10 +321,8 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll, ret = clk_hw_register(NULL, &pll->hw); - if (ret) { - kfree(pll); + if (ret) return ERR_PTR(ret); - } return &pll->hw; } @@ -340,6 +338,8 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data, return ERR_PTR(-ENOMEM); hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops); + if (IS_ERR(hw)) + kfree(pll); return hw; } diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 865db5202e4cf..a79b837583894 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -131,6 +131,7 @@ config IPQ_APSS_6018 tristate "IPQ APSS Clock Controller" select IPQ_APSS_PLL depends on QCOM_APCS_IPC || COMPILE_TEST + depends on QCOM_SMEM help Support for APSS clock controller on IPQ platforms. The APSS clock controller manages the Mux and enable block that feeds the diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c index e170331858cc1..41279e5437a62 100644 --- a/drivers/clk/qcom/apss-ipq-pll.c +++ b/drivers/clk/qcom/apss-ipq-pll.c @@ -68,13 +68,13 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = { .fw_name = "xo", }, .num_parents = 1, - .ops = &clk_alpha_pll_stromer_ops, + .ops = &clk_alpha_pll_stromer_plus_ops, }, }, }; static const struct alpha_pll_config ipq5332_pll_config = { - .l = 0x3e, + .l = 0x2d, .config_ctl_val = 0x4001075b, .config_ctl_hi_val = 0x304, .main_output_mask = BIT(0), diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index e4ef645f65d1f..892f2efc1c32c 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -2479,3 +2479,66 @@ const struct clk_ops clk_alpha_pll_stromer_ops = { .set_rate = clk_alpha_pll_stromer_set_rate, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_ops); + +static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long prate) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 l, alpha_width = pll_alpha_width(pll); + int ret, pll_mode; + u64 a; + + rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width); + + ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &pll_mode); + if (ret) + return ret; + + regmap_write(pll->clkr.regmap, PLL_MODE(pll), 0); + + /* Delay of 2 output clock ticks required until output is disabled */ + udelay(1); + + regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); + + if (alpha_width > ALPHA_BITWIDTH) + a <<= alpha_width - ALPHA_BITWIDTH; + + regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a); + regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), + a >> ALPHA_BITWIDTH); + + regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL); + + /* Wait five micro seconds or more */ + udelay(5); + regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N, + PLL_RESET_N); + + /* The lock time should be less than 50 micro seconds worst case */ + usleep_range(50, 60); + + ret = wait_for_pll_enable_lock(pll); + if (ret) { + pr_err("Wait for PLL enable lock failed [%s] %d\n", + clk_hw_get_name(hw), ret); + return ret; + } + + if (pll_mode & PLL_OUTCTRL) + regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_OUTCTRL, + PLL_OUTCTRL); + + return 0; +} + +const struct clk_ops clk_alpha_pll_stromer_plus_ops = { + .prepare = clk_alpha_pll_enable, + .unprepare = clk_alpha_pll_disable, + .is_enabled = clk_alpha_pll_is_enabled, + .recalc_rate = clk_alpha_pll_recalc_rate, + .determine_rate = clk_alpha_pll_stromer_determine_rate, + .set_rate = clk_alpha_pll_stromer_plus_set_rate, +}; +EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_plus_ops); diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index e4bd863027ab6..903fbab9b58e9 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -152,6 +152,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_ops; extern const struct clk_ops clk_alpha_pll_huayra_ops; extern const struct clk_ops clk_alpha_pll_postdiv_ro_ops; extern const struct clk_ops clk_alpha_pll_stromer_ops; +extern const struct clk_ops clk_alpha_pll_stromer_plus_ops; extern const struct clk_ops clk_alpha_pll_fabia_ops; extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index e22baf3a7112a..5183c74b074f8 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -158,17 +158,11 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) static unsigned long calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) { - if (hid_div) { - rate *= 2; - rate /= hid_div + 1; - } + if (hid_div) + rate = mult_frac(rate, 2, hid_div + 1); - if (mode) { - u64 tmp = rate; - tmp *= m; - do_div(tmp, n); - rate = tmp; - } + if (mode) + rate = mult_frac(rate, m, n); return rate; } diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c index 19dc2b71cacf0..2a3c0659b7008 100644 --- a/drivers/clk/qcom/gcc-ipq5018.c +++ b/drivers/clk/qcom/gcc-ipq5018.c @@ -128,7 +128,6 @@ static struct clk_alpha_pll_postdiv gpll0 = { }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -143,7 +142,6 @@ static struct clk_alpha_pll_postdiv gpll2 = { }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -158,7 +156,6 @@ static struct clk_alpha_pll_postdiv gpll4 = { }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c index b02026f8549b2..f98591148a976 100644 --- a/drivers/clk/qcom/gcc-ipq5332.c +++ b/drivers/clk/qcom/gcc-ipq5332.c @@ -71,7 +71,6 @@ static struct clk_fixed_factor gpll0_div2 = { &gpll0_main.clkr.hw }, .num_parents = 1, .ops = &clk_fixed_factor_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -85,7 +84,6 @@ static struct clk_alpha_pll_postdiv gpll0 = { &gpll0_main.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -114,7 +112,6 @@ static struct clk_alpha_pll_postdiv gpll2 = { &gpll2_main.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; @@ -154,7 +151,6 @@ static struct clk_alpha_pll_postdiv gpll4 = { &gpll4_main.clkr.hw }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_ro_ops, - .flags = CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c index 8f430367299e6..e8190108e1aef 100644 --- a/drivers/clk/qcom/gcc-ipq9574.c +++ b/drivers/clk/qcom/gcc-ipq9574.c @@ -87,7 +87,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = { &gpll0_main.clkr.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_fixed_factor_ops, }, }; @@ -102,7 +101,6 @@ static struct clk_alpha_pll_postdiv gpll0 = { &gpll0_main.clkr.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_alpha_pll_postdiv_ro_ops, }, }; @@ -132,7 +130,6 @@ static struct clk_alpha_pll_postdiv gpll4 = { &gpll4_main.clkr.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_alpha_pll_postdiv_ro_ops, }, }; @@ -162,7 +159,6 @@ static struct clk_alpha_pll_postdiv gpll2 = { &gpll2_main.clkr.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_alpha_pll_postdiv_ro_ops, }, }; diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 14dcc3f036683..e7b03a17514a5 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -244,71 +244,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4_gpll0_early_div[] = { { .hw = &gpll0_early_div.hw } }; -static const struct freq_tbl ftbl_system_noc_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0), - F(100000000, P_GPLL0, 6, 0, 0), - F(150000000, P_GPLL0, 4, 0, 0), - F(200000000, P_GPLL0, 3, 0, 0), - F(240000000, P_GPLL0, 2.5, 0, 0), - { } -}; - -static struct clk_rcg2 system_noc_clk_src = { - .cmd_rcgr = 0x0401c, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_gpll0_early_div_map, - .freq_tbl = ftbl_system_noc_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "system_noc_clk_src", - .parent_data = gcc_xo_gpll0_gpll0_early_div, - .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div), - .ops = &clk_rcg2_ops, - }, -}; - -static const struct freq_tbl ftbl_config_noc_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(37500000, P_GPLL0, 16, 0, 0), - F(75000000, P_GPLL0, 8, 0, 0), - { } -}; - -static struct clk_rcg2 config_noc_clk_src = { - .cmd_rcgr = 0x0500c, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_map, - .freq_tbl = ftbl_config_noc_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "config_noc_clk_src", - .parent_data = gcc_xo_gpll0, - .num_parents = ARRAY_SIZE(gcc_xo_gpll0), - .ops = &clk_rcg2_ops, - }, -}; - -static const struct freq_tbl ftbl_periph_noc_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(37500000, P_GPLL0, 16, 0, 0), - F(50000000, P_GPLL0, 12, 0, 0), - F(75000000, P_GPLL0, 8, 0, 0), - F(100000000, P_GPLL0, 6, 0, 0), - { } -}; - -static struct clk_rcg2 periph_noc_clk_src = { - .cmd_rcgr = 0x06014, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_map, - .freq_tbl = ftbl_periph_noc_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "periph_noc_clk_src", - .parent_data = gcc_xo_gpll0, - .num_parents = ARRAY_SIZE(gcc_xo_gpll0), - .ops = &clk_rcg2_ops, - }, -}; - static const struct freq_tbl ftbl_usb30_master_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(120000000, P_GPLL0, 5, 0, 0), @@ -1297,11 +1232,7 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mmss_noc_cfg_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + .flags = CLK_IGNORE_UNUSED, .ops = &clk_branch2_ops, }, }, @@ -1464,11 +1395,6 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_usb_phy_cfg_ahb2phy_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1498,11 +1424,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc1_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1549,11 +1470,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc2_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1583,11 +1499,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc3_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1617,11 +1528,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_sdcc4_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1635,11 +1541,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = { .enable_mask = BIT(17), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp1_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -1977,11 +1878,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = { .enable_mask = BIT(15), .hw.init = &(struct clk_init_data){ .name = "gcc_blsp2_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2318,11 +2214,6 @@ static struct clk_branch gcc_pdm_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pdm_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2353,11 +2244,6 @@ static struct clk_branch gcc_prng_ahb_clk = { .enable_mask = BIT(13), .hw.init = &(struct clk_init_data){ .name = "gcc_prng_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2370,11 +2256,6 @@ static struct clk_branch gcc_tsif_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_tsif_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2422,11 +2303,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = { .enable_mask = BIT(10), .hw.init = &(struct clk_init_data){ .name = "gcc_boot_rom_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2520,11 +2396,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_0_slv_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2537,11 +2408,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_0_mstr_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2554,11 +2420,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_0_cfg_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2606,11 +2467,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_1_slv_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2623,11 +2479,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_1_mstr_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2640,11 +2491,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_1_cfg_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2692,11 +2538,6 @@ static struct clk_branch gcc_pcie_2_slv_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_2_slv_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2709,11 +2550,6 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_2_mstr_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2726,11 +2562,6 @@ static struct clk_branch gcc_pcie_2_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_2_cfg_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2778,11 +2609,6 @@ static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_pcie_phy_cfg_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -2829,11 +2655,6 @@ static struct clk_branch gcc_ufs_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_ufs_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -3060,11 +2881,7 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_aggre0_snoc_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -3077,11 +2894,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_aggre0_cnoc_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -3094,11 +2907,7 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_smmu_aggre0_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -3111,11 +2920,7 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_smmu_aggre0_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -3162,10 +2967,6 @@ static struct clk_branch gcc_dcc_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_dcc_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -3178,10 +2979,6 @@ static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_aggre0_noc_mpu_cfg_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -3194,11 +2991,6 @@ static struct clk_branch gcc_qspi_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_qspi_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &periph_noc_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -3347,10 +3139,6 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mss_cfg_ahb_clk", - .parent_hws = (const struct clk_hw*[]){ - &config_noc_clk_src.clkr.hw, - }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -3363,10 +3151,6 @@ static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mss_mnoc_bimc_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -3379,10 +3163,6 @@ static struct clk_branch gcc_mss_snoc_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mss_snoc_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -3395,10 +3175,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_mss_q6_bimc_axi_clk", - .parent_hws = (const struct clk_hw*[]){ - &system_noc_clk_src.clkr.hw, - }, - .num_parents = 1, .ops = &clk_branch2_ops, }, }, @@ -3495,9 +3271,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = { [GPLL0] = &gpll0.clkr, [GPLL4_EARLY] = &gpll4_early.clkr, [GPLL4] = &gpll4.clkr, - [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr, - [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr, - [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr, [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr, [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr, [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr, diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index 41ab210875fb2..05d115c52dfeb 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -774,7 +774,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { .name = "gcc_sdcc2_apps_clk_src", .parent_data = gcc_parents_6, .num_parents = ARRAY_SIZE(gcc_parents_6), - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_OPS_PARENT_ENABLE, .ops = &clk_rcg2_floor_ops, }, }; diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c index a023c4374be96..1180e48c687ac 100644 --- a/drivers/clk/qcom/mmcc-msm8998.c +++ b/drivers/clk/qcom/mmcc-msm8998.c @@ -2439,6 +2439,7 @@ static struct clk_branch fd_ahb_clk = { static struct clk_branch mnoc_ahb_clk = { .halt_reg = 0x5024, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x5024, .enable_mask = BIT(0), @@ -2454,6 +2455,7 @@ static struct clk_branch mnoc_ahb_clk = { static struct clk_branch bimc_smmu_ahb_clk = { .halt_reg = 0xe004, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0xe004, .hwcg_bit = 1, .clkr = { @@ -2471,6 +2473,7 @@ static struct clk_branch bimc_smmu_ahb_clk = { static struct clk_branch bimc_smmu_axi_clk = { .halt_reg = 0xe008, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0xe008, .hwcg_bit = 1, .clkr = { @@ -2607,11 +2610,13 @@ static struct gdsc camss_cpp_gdsc = { static struct gdsc bimc_smmu_gdsc = { .gdscr = 0xe020, .gds_hw_ctrl = 0xe024, + .cxcs = (unsigned int []){ 0xe008 }, + .cxc_count = 1, .pd = { .name = "bimc_smmu", }, .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | ALWAYS_ON, + .flags = VOTABLE, }; static struct clk_regmap *mmcc_msm8998_clocks[] = { diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c index 1e7991439527a..50a443bf79ecd 100644 --- a/drivers/clk/ralink/clk-mtmips.c +++ b/drivers/clk/ralink/clk-mtmips.c @@ -821,6 +821,10 @@ static const struct mtmips_clk_data mt76x8_clk_data = { }; static const struct of_device_id mtmips_of_match[] = { + { + .compatible = "ralink,rt2880-reset", + .data = NULL, + }, { .compatible = "ralink,rt2880-sysc", .data = &rt2880_clk_data, @@ -1088,25 +1092,11 @@ static int mtmips_clk_probe(struct platform_device *pdev) return 0; } -static const struct of_device_id mtmips_clk_of_match[] = { - { .compatible = "ralink,rt2880-reset" }, - { .compatible = "ralink,rt2880-sysc" }, - { .compatible = "ralink,rt3050-sysc" }, - { .compatible = "ralink,rt3052-sysc" }, - { .compatible = "ralink,rt3352-sysc" }, - { .compatible = "ralink,rt3883-sysc" }, - { .compatible = "ralink,rt5350-sysc" }, - { .compatible = "ralink,mt7620-sysc" }, - { .compatible = "ralink,mt7628-sysc" }, - { .compatible = "ralink,mt7688-sysc" }, - {} -}; - static struct platform_driver mtmips_clk_driver = { .probe = mtmips_clk_probe, .driver = { .name = "mtmips-clk", - .of_match_table = mtmips_clk_of_match, + .of_match_table = mtmips_of_match, }, }; diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c index e2e0447de1901..5a15f8788b922 100644 --- a/drivers/clk/renesas/rcar-cpg-lib.c +++ b/drivers/clk/renesas/rcar-cpg-lib.c @@ -70,8 +70,21 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers, #define STPnHCK BIT(9 - SDnSRCFC_SHIFT) static const struct clk_div_table cpg_sdh_div_table[] = { + /* + * These values are recommended by the datasheet. Because they come + * first, Linux will only use these. + */ { 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 }, - { STPnHCK | 4, 16 }, { 0, 0 }, + { STPnHCK | 4, 16 }, + /* + * These values are not recommended because STPnHCK is wrong. But they + * have been seen because of broken firmware. So, we support reading + * them but Linux will sanitize them when initializing through + * recalc_rate. + */ + { STPnHCK | 0, 1 }, { STPnHCK | 1, 2 }, { 2, 4 }, { 3, 8 }, { 4, 16 }, + /* Sentinel */ + { 0, 0 } }; struct clk * __init cpg_sdh_clk_register(const char *name, diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index 47f488387f33a..3f01620e292b6 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -11,6 +11,7 @@ * Copyright (C) 2015 Renesas Electronics Corp. */ +#include #include #include #include @@ -38,14 +39,13 @@ #define WARN_DEBUG(x) do { } while (0) #endif -#define DIV_RSMASK(v, s, m) ((v >> s) & m) #define GET_SHIFT(val) ((val >> 12) & 0xff) #define GET_WIDTH(val) ((val >> 8) & 0xf) -#define KDIV(val) DIV_RSMASK(val, 16, 0xffff) -#define MDIV(val) DIV_RSMASK(val, 6, 0x3ff) -#define PDIV(val) DIV_RSMASK(val, 0, 0x3f) -#define SDIV(val) DIV_RSMASK(val, 0, 0x7) +#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val)) +#define MDIV(val) FIELD_GET(GENMASK(15, 6), val) +#define PDIV(val) FIELD_GET(GENMASK(5, 0), val) +#define SDIV(val) FIELD_GET(GENMASK(2, 0), val) #define CLK_ON_R(reg) (reg) #define CLK_MON_R(reg) (0x180 + (reg)) @@ -188,7 +188,9 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index) u32 off = GET_REG_OFFSET(hwdata->conf); u32 shift = GET_SHIFT(hwdata->conf); const u32 clk_src_266 = 2; - u32 bitmask; + u32 msk, val, bitmask; + unsigned long flags; + int ret; /* * As per the HW manual, we should not directly switch from 533 MHz to @@ -202,26 +204,30 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index) * the index to value mapping is done by adding 1 to the index. */ bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16; + msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS; + spin_lock_irqsave(&priv->rmw_lock, flags); if (index != clk_src_266) { - u32 msk, val; - int ret; - writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off); - msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS; - - ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val, - !(val & msk), 100, - CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US); - if (ret) { - dev_err(priv->dev, "failed to switch clk source\n"); - return ret; - } + ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val, + !(val & msk), 10, + CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US); + if (ret) + goto unlock; } writel(bitmask | ((index + 1) << shift), priv->base + off); - return 0; + ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val, + !(val & msk), 10, + CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US); +unlock: + spin_unlock_irqrestore(&priv->rmw_lock, flags); + + if (ret) + dev_err(priv->dev, "failed to switch clk source\n"); + + return ret; } static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw) @@ -232,14 +238,8 @@ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw) val >>= GET_SHIFT(hwdata->conf); val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0); - if (val) { - val--; - } else { - /* Prohibited clk source, change it to 533 MHz(reset value) */ - rzg2l_cpg_sd_clk_mux_set_parent(hw, 0); - } - return val; + return val ? val - 1 : 0; } static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = { @@ -695,18 +695,18 @@ static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw, struct pll_clk *pll_clk = to_pll(hw); struct rzg2l_cpg_priv *priv = pll_clk->priv; unsigned int val1, val2; - unsigned int mult = 1; - unsigned int div = 1; + u64 rate; if (pll_clk->type != CLK_TYPE_SAM_PLL) return parent_rate; val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf)); val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf)); - mult = MDIV(val1) + KDIV(val1) / 65536; - div = PDIV(val1) << SDIV(val2); - return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div); + rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1), + 16 + SDIV(val2)); + + return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1)); } static const struct clk_ops rzg2l_cpg_pll_ops = { diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h index 6cee9e56acc72..91e9c2569f801 100644 --- a/drivers/clk/renesas/rzg2l-cpg.h +++ b/drivers/clk/renesas/rzg2l-cpg.h @@ -43,7 +43,7 @@ #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28) #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29) -#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 20000 +#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 200 /* n = 0/1/2 for PLL1/4/6 */ #define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n)) diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index 768a1f3398b47..5d5bb123ba949 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -309,7 +309,6 @@ static struct clk *_register_divider(struct device_node *node, u32 flags, struct clk_omap_divider *div) { - struct clk *clk; struct clk_init_data init; const char *parent_name; const char *name; @@ -326,12 +325,7 @@ static struct clk *_register_divider(struct device_node *node, div->hw.init = &init; /* register the clock */ - clk = of_ti_clk_register(node, &div->hw, name); - - if (IS_ERR(clk)) - kfree(div); - - return clk; + return of_ti_clk_register(node, &div->hw, name); } int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 7dd2c615bce23..071b04f1ee730 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -836,8 +836,9 @@ static u64 __arch_timer_check_delta(void) * Note that TVAL is signed, thus has only 31 of its * 32 bits to express magnitude. */ - MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM, - APM_CPU_PART_POTENZA)), + MIDR_REV_RANGE(MIDR_CPU_MODEL(ARM_CPU_IMP_APM, + APM_CPU_PART_XGENE), + APM_CPU_VAR_POTENZA, 0x0, 0xf), {}, }; diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 09ab29cb7f641..5f60f6bd33866 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -140,6 +140,8 @@ struct dmtimer { struct platform_device *pdev; struct list_head node; struct notifier_block nb; + struct notifier_block fclk_nb; + unsigned long fclk_rate; }; static u32 omap_reserved_systimers; @@ -253,8 +255,7 @@ static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer) timer->posted = OMAP_TIMER_POSTED; } -static inline void __omap_dm_timer_stop(struct dmtimer *timer, - unsigned long rate) +static inline void __omap_dm_timer_stop(struct dmtimer *timer) { u32 l; @@ -269,7 +270,7 @@ static inline void __omap_dm_timer_stop(struct dmtimer *timer, * Wait for functional clock period x 3.5 to make sure that * timer is stopped */ - udelay(3500000 / rate + 1); + udelay(3500000 / timer->fclk_rate + 1); #endif } @@ -348,6 +349,21 @@ static int omap_timer_context_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static int omap_timer_fclk_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct clk_notifier_data *clk_data = data; + struct dmtimer *timer = container_of(nb, struct dmtimer, fclk_nb); + + switch (event) { + case POST_RATE_CHANGE: + timer->fclk_rate = clk_data->new_rate; + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + static int omap_dm_timer_reset(struct dmtimer *timer) { u32 l, timeout = 100000; @@ -754,7 +770,6 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie) { struct dmtimer *timer; struct device *dev; - unsigned long rate = 0; timer = to_dmtimer(cookie); if (unlikely(!timer)) @@ -762,10 +777,7 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie) dev = &timer->pdev->dev; - if (!timer->omap1) - rate = clk_get_rate(timer->fclk); - - __omap_dm_timer_stop(timer, rate); + __omap_dm_timer_stop(timer); pm_runtime_put_sync(dev); @@ -1124,6 +1136,14 @@ static int omap_dm_timer_probe(struct platform_device *pdev) timer->fclk = devm_clk_get(dev, "fck"); if (IS_ERR(timer->fclk)) return PTR_ERR(timer->fclk); + + timer->fclk_nb.notifier_call = omap_timer_fclk_notifier; + ret = devm_clk_notifier_register(dev, timer->fclk, + &timer->fclk_nb); + if (ret) + return ret; + + timer->fclk_rate = clk_get_rate(timer->fclk); } else { timer->fclk = ERR_PTR(-ENODEV); } diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 88ef5e57ccd05..386aed3637b4e 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -450,6 +450,8 @@ static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy, if (IS_ERR(opp)) continue; + dev_pm_opp_put(opp); + ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ); if (ret < 0) return ret; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index eba2d750c3b07..066f08a3a040d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -575,7 +575,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; - ctx->cdata.key_virt = key; + memcpy(ctx->key, key, keylen); + ctx->cdata.key_virt = ctx->key; ctx->cdata.keylen = keylen - saltlen; return chachapoly_set_sh_desc(aead); diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 9156bbe038b7b..a148ff1f0872c 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -641,7 +641,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; - ctx->cdata.key_virt = key; + memcpy(ctx->key, key, keylen); + ctx->cdata.key_virt = ctx->key; ctx->cdata.keylen = keylen - saltlen; return chachapoly_set_sh_desc(aead); diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c index 839ea14b9a853..6f33149ef80df 100644 --- a/drivers/crypto/ccp/dbc.c +++ b/drivers/crypto/ccp/dbc.c @@ -205,7 +205,7 @@ int dbc_dev_init(struct psp_device *psp) return -ENOMEM; BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE); - dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); + dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL | __GFP_ZERO, 0); if (!dbc_dev->mbox) { ret = -ENOMEM; goto cleanup_dev; diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 39297ce70f441..3dce35debf637 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -433,8 +433,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); +static bool pf_q_num_flag; static int pf_q_num_set(const char *val, const struct kernel_param *kp) { + pf_q_num_flag = true; + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF); } @@ -1033,7 +1036,7 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm) for (i = 0; i < clusters_num; i++) { ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); - if (ret < 0) + if (ret >= HPRE_DBGFS_VAL_MAX_LEN) return -EINVAL; tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); @@ -1157,6 +1160,8 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; + if (pf_q_num_flag) + set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } ret = hisi_qm_init(qm); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a99fd589445ce..1638c0a7df310 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -206,8 +206,6 @@ #define WAIT_PERIOD 20 #define REMOVE_WAIT_DELAY 10 -#define QM_DRIVER_REMOVING 0 -#define QM_RST_SCHED 1 #define QM_QOS_PARAM_NUM 2 #define QM_QOS_MAX_VAL 1000 #define QM_QOS_RATE 100 @@ -2824,7 +2822,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) mutex_init(&qm->mailbox_lock); init_rwsem(&qm->qps_lock); qm->qp_in_used = 0; - qm->misc_ctl = false; if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); @@ -5093,6 +5090,7 @@ free_eq_irq: static int qm_get_qp_num(struct hisi_qm *qm) { + struct device *dev = &qm->pdev->dev; bool is_db_isolation; /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ @@ -5109,13 +5107,21 @@ static int qm_get_qp_num(struct hisi_qm *qm) qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_FUNC_MAX_QP_CAP, is_db_isolation); - /* check if qp number is valid */ - if (qm->qp_num > qm->max_qp_num) { - dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n", + if (qm->qp_num <= qm->max_qp_num) + return 0; + + if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { + /* Check whether the set qp number is valid */ + dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n", qm->qp_num, qm->max_qp_num); return -EINVAL; } + dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n", + qm->qp_num, qm->max_qp_num); + qm->qp_num = qm->max_qp_num; + qm->debug.curr_qm_qp_num = qm->qp_num; + return 0; } diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h index 1406a422d4551..8e36aa9c681be 100644 --- a/drivers/crypto/hisilicon/qm_common.h +++ b/drivers/crypto/hisilicon/qm_common.h @@ -4,7 +4,6 @@ #define QM_COMMON_H #define QM_DBG_READ_LEN 256 -#define QM_RESETTING 2 struct qm_cqe { __le32 rsvd0; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 77f9f131b8503..62bd8936a9154 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -311,8 +311,11 @@ static int sec_diff_regs_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(sec_diff_regs); +static bool pf_q_num_flag; static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) { + pf_q_num_flag = true; + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF); } @@ -1120,6 +1123,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &sec_devices; + if (pf_q_num_flag) + set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { /* * have no way to get qm configure in VM in v1 hardware, diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index f3ce34198775d..84dbaeb07ea83 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -364,8 +364,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); +static bool pf_q_num_flag; static int pf_q_num_set(const char *val, const struct kernel_param *kp) { + pf_q_num_flag = true; + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF); } @@ -1139,6 +1142,8 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &zip_devices; + if (pf_q_num_flag) + set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { /* * have no way to get qm configure in VM in v1 hardware, diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index dd4464b7e00b1..a5691ba0b7244 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -11,8 +11,13 @@ #include #include #include "adf_4xxx_hw_data.h" +#include "adf_cfg_services.h" #include "icp_qat_hw.h" +#define ADF_AE_GROUP_0 GENMASK(3, 0) +#define ADF_AE_GROUP_1 GENMASK(7, 4) +#define ADF_AE_GROUP_2 BIT(8) + enum adf_fw_objs { ADF_FW_SYM_OBJ, ADF_FW_ASYM_OBJ, @@ -40,39 +45,45 @@ struct adf_fw_config { }; static const struct adf_fw_config adf_fw_cy_config[] = { - {0xF0, ADF_FW_SYM_OBJ}, - {0xF, ADF_FW_ASYM_OBJ}, - {0x100, ADF_FW_ADMIN_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ}, }; static const struct adf_fw_config adf_fw_dc_config[] = { - {0xF0, ADF_FW_DC_OBJ}, - {0xF, ADF_FW_DC_OBJ}, - {0x100, ADF_FW_ADMIN_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ}, }; static const struct adf_fw_config adf_fw_sym_config[] = { - {0xF0, ADF_FW_SYM_OBJ}, - {0xF, ADF_FW_SYM_OBJ}, - {0x100, ADF_FW_ADMIN_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ}, }; static const struct adf_fw_config adf_fw_asym_config[] = { - {0xF0, ADF_FW_ASYM_OBJ}, - {0xF, ADF_FW_ASYM_OBJ}, - {0x100, ADF_FW_ADMIN_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ}, }; static const struct adf_fw_config adf_fw_asym_dc_config[] = { - {0xF0, ADF_FW_ASYM_OBJ}, - {0xF, ADF_FW_DC_OBJ}, - {0x100, ADF_FW_ADMIN_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ}, }; static const struct adf_fw_config adf_fw_sym_dc_config[] = { - {0xF0, ADF_FW_SYM_OBJ}, - {0xF, ADF_FW_DC_OBJ}, - {0x100, ADF_FW_ADMIN_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_dcc_config[] = { + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ}, }; static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config)); @@ -80,6 +91,7 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config)); static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config)); static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config)); static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config)); +static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config)); /* Worker thread to service arbiter mappings */ static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { @@ -94,36 +106,18 @@ static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = { 0x0 }; +static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x0 +}; + static struct adf_hw_device_class adf_4xxx_class = { .name = ADF_4XXX_DEVICE_NAME, .type = DEV_4XXX, .instances = 0, }; -enum dev_services { - SVC_CY = 0, - SVC_CY2, - SVC_DC, - SVC_SYM, - SVC_ASYM, - SVC_DC_ASYM, - SVC_ASYM_DC, - SVC_DC_SYM, - SVC_SYM_DC, -}; - -static const char *const dev_cfg_services[] = { - [SVC_CY] = ADF_CFG_CY, - [SVC_CY2] = ADF_CFG_ASYM_SYM, - [SVC_DC] = ADF_CFG_DC, - [SVC_SYM] = ADF_CFG_SYM, - [SVC_ASYM] = ADF_CFG_ASYM, - [SVC_DC_ASYM] = ADF_CFG_DC_ASYM, - [SVC_ASYM_DC] = ADF_CFG_ASYM_DC, - [SVC_DC_SYM] = ADF_CFG_DC_SYM, - [SVC_SYM_DC] = ADF_CFG_SYM_DC, -}; - static int get_service_enabled(struct adf_accel_dev *accel_dev) { char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; @@ -137,7 +131,7 @@ static int get_service_enabled(struct adf_accel_dev *accel_dev) return ret; } - ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services), + ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services), services); if (ret < 0) dev_err(&GET_DEV(accel_dev), @@ -212,6 +206,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; u32 capabilities_sym, capabilities_asym, capabilities_dc; + u32 capabilities_dcc; u32 fusectl1; /* Read accelerator capabilities mask */ @@ -284,6 +279,14 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) return capabilities_sym | capabilities_asym; case SVC_DC: return capabilities_dc; + case SVC_DCC: + /* + * Sym capabilities are available for chaining operations, + * but sym crypto instances cannot be supported + */ + capabilities_dcc = capabilities_dc | capabilities_sym; + capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + return capabilities_dcc; case SVC_SYM: return capabilities_sym; case SVC_ASYM: @@ -309,6 +312,8 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) switch (get_service_enabled(accel_dev)) { case SVC_DC: return thrd_to_arb_map_dc; + case SVC_DCC: + return thrd_to_arb_map_dcc; default: return default_thrd_to_arb_map; } @@ -393,38 +398,96 @@ static u32 uof_get_num_objs(void) return ARRAY_SIZE(adf_fw_cy_config); } -static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, - const char * const fw_objs[], int num_objs) +static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) { - int id; - switch (get_service_enabled(accel_dev)) { case SVC_CY: case SVC_CY2: - id = adf_fw_cy_config[obj_num].obj; - break; + return adf_fw_cy_config; case SVC_DC: - id = adf_fw_dc_config[obj_num].obj; - break; + return adf_fw_dc_config; + case SVC_DCC: + return adf_fw_dcc_config; case SVC_SYM: - id = adf_fw_sym_config[obj_num].obj; - break; + return adf_fw_sym_config; case SVC_ASYM: - id = adf_fw_asym_config[obj_num].obj; - break; + return adf_fw_asym_config; case SVC_ASYM_DC: case SVC_DC_ASYM: - id = adf_fw_asym_dc_config[obj_num].obj; - break; + return adf_fw_asym_dc_config; case SVC_SYM_DC: case SVC_DC_SYM: - id = adf_fw_sym_dc_config[obj_num].obj; - break; + return adf_fw_sym_dc_config; default: - id = -EINVAL; - break; + return NULL; + } +} + +enum adf_rp_groups { + RP_GROUP_0 = 0, + RP_GROUP_1, + RP_GROUP_COUNT +}; + +static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + enum adf_cfg_service_type rps[RP_GROUP_COUNT]; + const struct adf_fw_config *fw_config; + u16 ring_to_svc_map; + int i, j; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return 0; + + for (i = 0; i < RP_GROUP_COUNT; i++) { + switch (fw_config[i].ae_mask) { + case ADF_AE_GROUP_0: + j = RP_GROUP_0; + break; + case ADF_AE_GROUP_1: + j = RP_GROUP_1; + break; + default: + return 0; + } + + switch (fw_config[i].obj) { + case ADF_FW_SYM_OBJ: + rps[j] = SYM; + break; + case ADF_FW_ASYM_OBJ: + rps[j] = ASYM; + break; + case ADF_FW_DC_OBJ: + rps[j] = COMP; + break; + default: + rps[j] = 0; + break; + } } + ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} + +static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, + const char * const fw_objs[], int num_objs) +{ + const struct adf_fw_config *fw_config; + int id; + + fw_config = get_fw_config(accel_dev); + if (fw_config) + id = fw_config[obj_num].obj; + else + id = -EINVAL; + if (id < 0 || id > num_objs) return NULL; @@ -447,26 +510,13 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { - switch (get_service_enabled(accel_dev)) { - case SVC_CY: - return adf_fw_cy_config[obj_num].ae_mask; - case SVC_DC: - return adf_fw_dc_config[obj_num].ae_mask; - case SVC_CY2: - return adf_fw_cy_config[obj_num].ae_mask; - case SVC_SYM: - return adf_fw_sym_config[obj_num].ae_mask; - case SVC_ASYM: - return adf_fw_asym_config[obj_num].ae_mask; - case SVC_ASYM_DC: - case SVC_DC_ASYM: - return adf_fw_asym_dc_config[obj_num].ae_mask; - case SVC_SYM_DC: - case SVC_DC_SYM: - return adf_fw_sym_dc_config[obj_num].ae_mask; - default: + const struct adf_fw_config *fw_config; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) return 0; - } + + return fw_config[obj_num].ae_mask; } void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) @@ -522,6 +572,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->set_msix_rttable = set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; + hw_data->get_ring_to_svc_map = get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 6d4e2e139ffa2..90f5c1ca7b8d8 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -11,6 +11,7 @@ #include #include "adf_4xxx_hw_data.h" +#include "adf_cfg_services.h" #include "qat_compression.h" #include "qat_crypto.h" #include "adf_transport_access_macros.h" @@ -23,30 +24,6 @@ static const struct pci_device_id adf_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, adf_pci_tbl); -enum configs { - DEV_CFG_CY = 0, - DEV_CFG_DC, - DEV_CFG_SYM, - DEV_CFG_ASYM, - DEV_CFG_ASYM_SYM, - DEV_CFG_ASYM_DC, - DEV_CFG_DC_ASYM, - DEV_CFG_SYM_DC, - DEV_CFG_DC_SYM, -}; - -static const char * const services_operations[] = { - ADF_CFG_CY, - ADF_CFG_DC, - ADF_CFG_SYM, - ADF_CFG_ASYM, - ADF_CFG_ASYM_SYM, - ADF_CFG_ASYM_DC, - ADF_CFG_DC_ASYM, - ADF_CFG_SYM_DC, - ADF_CFG_DC_SYM, -}; - static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) { if (accel_dev->hw_device) { @@ -292,16 +269,17 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; - ret = sysfs_match_string(services_operations, services); + ret = sysfs_match_string(adf_cfg_services, services); if (ret < 0) goto err; switch (ret) { - case DEV_CFG_CY: - case DEV_CFG_ASYM_SYM: + case SVC_CY: + case SVC_CY2: ret = adf_crypto_dev_config(accel_dev); break; - case DEV_CFG_DC: + case SVC_DC: + case SVC_DCC: ret = adf_comp_dev_config(accel_dev); break; default: diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index e57abde66f4fb..79d5a1535eda3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -29,7 +29,7 @@ #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 -#define ADF_MAX_MSIX_VECTOR_NAME 16 +#define ADF_MAX_MSIX_VECTOR_NAME 48 #define ADF_DEVICE_NAME_PREFIX "qat_" enum adf_accel_capabilities { @@ -182,6 +182,7 @@ struct adf_hw_device_data { void (*get_arb_info)(struct arb_info *arb_csrs_info); void (*get_admin_info)(struct admin_info *admin_csrs_info); enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); + u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev); int (*alloc_irq)(struct adf_accel_dev *accel_dev); void (*free_irq)(struct adf_accel_dev *accel_dev); void (*enable_error_correction)(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index ff790823b8686..194d64d4b99a1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -8,6 +8,7 @@ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" +#include "adf_cfg.h" #include "adf_heartbeat.h" #include "icp_qat_fw_init_admin.h" @@ -212,6 +213,17 @@ int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp) return 0; } +static int adf_set_chaining(struct adf_accel_dev *accel_dev) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + + req.cmd_id = ICP_QAT_FW_DC_CHAIN_INIT; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev, u32 *capabilities) { @@ -284,6 +296,19 @@ int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +static bool is_dcc_enabled(struct adf_accel_dev *accel_dev) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + int ret; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + if (ret) + return false; + + return !strcmp(services, "dcc"); +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. @@ -297,6 +322,16 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev) u32 dc_capabilities = 0; int ret; + ret = adf_set_fw_constants(accel_dev); + if (ret) + return ret; + + if (is_dcc_enabled(accel_dev)) { + ret = adf_set_chaining(accel_dev); + if (ret) + return ret; + } + ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities); if (ret) { dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n"); @@ -304,10 +339,6 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev) } accel_dev->hw_device->extended_dc_capabilities = dc_capabilities; - ret = adf_set_fw_constants(accel_dev); - if (ret) - return ret; - return adf_init_ae(accel_dev); } EXPORT_SYMBOL_GPL(adf_send_admin_init); diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h new file mode 100644 index 0000000000000..b353d40c5c6d0 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef _ADF_CFG_SERVICES_H_ +#define _ADF_CFG_SERVICES_H_ + +#include "adf_cfg_strings.h" + +enum adf_services { + SVC_CY = 0, + SVC_CY2, + SVC_DC, + SVC_DCC, + SVC_SYM, + SVC_ASYM, + SVC_DC_ASYM, + SVC_ASYM_DC, + SVC_DC_SYM, + SVC_SYM_DC, +}; + +static const char *const adf_cfg_services[] = { + [SVC_CY] = ADF_CFG_CY, + [SVC_CY2] = ADF_CFG_ASYM_SYM, + [SVC_DC] = ADF_CFG_DC, + [SVC_DCC] = ADF_CFG_DCC, + [SVC_SYM] = ADF_CFG_SYM, + [SVC_ASYM] = ADF_CFG_ASYM, + [SVC_DC_ASYM] = ADF_CFG_DC_ASYM, + [SVC_ASYM_DC] = ADF_CFG_ASYM_DC, + [SVC_DC_SYM] = ADF_CFG_DC_SYM, + [SVC_SYM_DC] = ADF_CFG_SYM_DC, +}; + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index 6066dc637352c..322b76903a737 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -32,6 +32,7 @@ #define ADF_CFG_DC_ASYM "dc;asym" #define ADF_CFG_SYM_DC "sym;dc" #define ADF_CFG_DC_SYM "dc;sym" +#define ADF_CFG_DCC "dcc" #define ADF_SERVICES_ENABLED "ServicesEnabled" #define ADF_PM_IDLE_SUPPORT "PmIdleSupport" #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 673b5044c62a5..79ff7982378d9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -25,6 +25,8 @@ #define ADF_STATUS_AE_STARTED 6 #define ADF_STATUS_PF_RUNNING 7 #define ADF_STATUS_IRQ_ALLOCATED 8 +#define ADF_STATUS_CRYPTO_ALGS_REGISTERED 9 +#define ADF_STATUS_COMP_ALGS_REGISTERED 10 enum adf_dev_reset_mode { ADF_DEV_RESET_ASYNC = 0, diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 89001fe92e762..0f9e2d59ce385 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -97,6 +97,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) return -EFAULT; } + if (hw_data->get_ring_to_svc_map) + hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev); + if (adf_ae_init(accel_dev)) { dev_err(&GET_DEV(accel_dev), "Failed to initialise Acceleration Engine\n"); @@ -231,6 +234,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_STARTED, &accel_dev->status); return -EFAULT; } + set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status); if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) { dev_err(&GET_DEV(accel_dev), @@ -239,6 +243,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_STARTED, &accel_dev->status); return -EFAULT; } + set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); adf_dbgfs_add(accel_dev); @@ -272,13 +277,17 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); - if (!list_empty(&accel_dev->crypto_list)) { + if (!list_empty(&accel_dev->crypto_list) && + test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) { qat_algs_unregister(); qat_asym_algs_unregister(); } + clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status); - if (!list_empty(&accel_dev->compression_list)) + if (!list_empty(&accel_dev->compression_list) && + test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status)) qat_comp_algs_unregister(); + clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); @@ -440,13 +449,6 @@ int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig) mutex_lock(&accel_dev->state_lock); - if (!adf_dev_started(accel_dev)) { - dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n", - accel_dev->accel_id); - ret = -EINVAL; - goto out; - } - if (reconfig) { ret = adf_dev_shutdown_cache_cfg(accel_dev); goto out; diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index a74d2f9303670..8f04b0d3c5ac8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -5,6 +5,7 @@ #include #include "adf_accel_devices.h" #include "adf_cfg.h" +#include "adf_cfg_services.h" #include "adf_common_drv.h" static const char * const state_operations[] = { @@ -52,6 +53,13 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, case DEV_DOWN: dev_info(dev, "Stopping device qat_dev%d\n", accel_id); + if (!adf_dev_started(accel_dev)) { + dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n", + accel_id); + + break; + } + ret = adf_dev_down(accel_dev, true); if (ret < 0) return -EINVAL; @@ -61,7 +69,9 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, dev_info(dev, "Starting device qat_dev%d\n", accel_id); ret = adf_dev_up(accel_dev, true); - if (ret < 0) { + if (ret == -EALREADY) { + break; + } else if (ret) { dev_err(dev, "Failed to start device qat_dev%d\n", accel_id); adf_dev_down(accel_dev, true); @@ -75,18 +85,6 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, return count; } -static const char * const services_operations[] = { - ADF_CFG_CY, - ADF_CFG_DC, - ADF_CFG_SYM, - ADF_CFG_ASYM, - ADF_CFG_ASYM_SYM, - ADF_CFG_ASYM_DC, - ADF_CFG_DC_ASYM, - ADF_CFG_SYM_DC, - ADF_CFG_DC_SYM, -}; - static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -121,7 +119,7 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a struct adf_accel_dev *accel_dev; int ret; - ret = sysfs_match_string(services_operations, buf); + ret = sysfs_match_string(adf_cfg_services, buf); if (ret < 0) return ret; @@ -135,7 +133,7 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a return -EINVAL; } - ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]); + ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]); if (ret < 0) return ret; diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c index 08bca1c506c0e..e2dd568b87b51 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c @@ -90,7 +90,7 @@ DEFINE_SEQ_ATTRIBUTE(adf_ring_debug); int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) { struct adf_etr_ring_debug_entry *ring_debug; - char entry_name[8]; + char entry_name[16]; ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL); if (!ring_debug) @@ -192,7 +192,7 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) { struct adf_accel_dev *accel_dev = bank->accel_dev; struct dentry *parent = accel_dev->transport->debug; - char name[8]; + char name[16]; snprintf(name, sizeof(name), "bank_%02d", bank->bank_number); bank->bank_debug_dir = debugfs_create_dir(name, parent); diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 3e968a4bcc9cd..019a6443834e0 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, + ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_PM_STATE_CONFIG = 128, diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c index bb80455b3e81e..b97b678823a97 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c +++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c @@ -40,40 +40,44 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog) spin_unlock_bh(&backlog->lock); } -static void qat_alg_backlog_req(struct qat_alg_req *req, - struct qat_instance_backlog *backlog) -{ - INIT_LIST_HEAD(&req->list); - - spin_lock_bh(&backlog->lock); - list_add_tail(&req->list, &backlog->list); - spin_unlock_bh(&backlog->lock); -} - -static int qat_alg_send_message_maybacklog(struct qat_alg_req *req) +static bool qat_alg_try_enqueue(struct qat_alg_req *req) { struct qat_instance_backlog *backlog = req->backlog; struct adf_etr_ring_data *tx_ring = req->tx_ring; u32 *fw_req = req->fw_req; - /* If any request is already backlogged, then add to backlog list */ + /* Check if any request is already backlogged */ if (!list_empty(&backlog->list)) - goto enqueue; + return false; - /* If ring is nearly full, then add to backlog list */ + /* Check if ring is nearly full */ if (adf_ring_nearly_full(tx_ring)) - goto enqueue; + return false; - /* If adding request to HW ring fails, then add to backlog list */ + /* Try to enqueue to HW ring */ if (adf_send_message(tx_ring, fw_req)) - goto enqueue; + return false; - return -EINPROGRESS; + return true; +} -enqueue: - qat_alg_backlog_req(req, backlog); - return -EBUSY; +static int qat_alg_send_message_maybacklog(struct qat_alg_req *req) +{ + struct qat_instance_backlog *backlog = req->backlog; + int ret = -EINPROGRESS; + + if (qat_alg_try_enqueue(req)) + return ret; + + spin_lock_bh(&backlog->lock); + if (!qat_alg_try_enqueue(req)) { + list_add_tail(&req->list, &backlog->list); + ret = -EBUSY; + } + spin_unlock_bh(&backlog->lock); + + return ret; } int qat_alg_send_message(struct qat_alg_req *req) diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 45e7e044cf4a0..8e5f3d84311e5 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -75,6 +75,7 @@ resource_size_t __rcrb_to_component(struct device *dev, enum cxl_rcrb which); extern struct rw_semaphore cxl_dpa_rwsem; +extern struct rw_semaphore cxl_region_rwsem; int cxl_memdev_init(void); void cxl_memdev_exit(void); diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 4449b34a80cc9..64e86b786db52 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -85,7 +85,7 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, struct cxl_component_regs *regs) { struct cxl_register_map map = { - .dev = &port->dev, + .host = &port->dev, .resource = port->component_reg_phys, .base = crb, .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, @@ -575,17 +575,11 @@ static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl) CXL_HDM_DECODER0_CTRL_HOSTONLY); } -static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) +static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) { struct cxl_dport **t = &cxlsd->target[0]; int ways = cxlsd->cxld.interleave_ways; - if (dev_WARN_ONCE(&cxlsd->cxld.dev, - ways > 8 || ways > cxlsd->nr_targets, - "ways: %d overflows targets: %d\n", ways, - cxlsd->nr_targets)) - return -ENXIO; - *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); if (ways > 1) *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); @@ -601,8 +595,6 @@ static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); if (ways > 7) *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); - - return 0; } /* @@ -650,6 +642,25 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld) return -EBUSY; } + /* + * For endpoint decoders hosted on CXL memory devices that + * support the sanitize operation, make sure sanitize is not in-flight. + */ + if (is_endpoint_decoder(&cxld->dev)) { + struct cxl_endpoint_decoder *cxled = + to_cxl_endpoint_decoder(&cxld->dev); + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_memdev_state *mds = + to_cxl_memdev_state(cxlmd->cxlds); + + if (mds && mds->security.sanitize_active) { + dev_dbg(&cxlmd->dev, + "attempted to commit %s during sanitize\n", + dev_name(&cxld->dev)); + return -EBUSY; + } + } + down_read(&cxl_dpa_rwsem); /* common decoder settings */ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); @@ -670,13 +681,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld) void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); u64 targets; - rc = cxlsd_set_targets(cxlsd, &targets); - if (rc) { - dev_dbg(&port->dev, "%s: target configuration error\n", - dev_name(&cxld->dev)); - goto err; - } - + cxlsd_set_targets(cxlsd, &targets); writel(upper_32_bits(targets), tl_hi); writel(lower_32_bits(targets), tl_lo); } else { @@ -694,7 +699,6 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld) port->commit_end++; rc = cxld_await_commit(hdm, cxld->id); -err: if (rc) { dev_dbg(&port->dev, "%s: error %d committing decoder\n", dev_name(&cxld->dev), rc); diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 4df4f614f490e..b91bb98869917 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1125,20 +1125,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds) } EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); -/** - * cxl_mem_sanitize() - Send a sanitization command to the device. - * @mds: The device data for the operation - * @cmd: The specific sanitization command opcode - * - * Return: 0 if the command was executed successfully, regardless of - * whether or not the actual security operation is done in the background, - * such as for the Sanitize case. - * Error return values can be the result of the mailbox command, -EINVAL - * when security requirements are not met or invalid contexts. - * - * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase. - */ -int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) +static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) { int rc; u32 sec_out = 0; @@ -1183,7 +1170,45 @@ int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL); + + +/** + * cxl_mem_sanitize() - Send a sanitization command to the device. + * @cxlmd: The device for the operation + * @cmd: The specific sanitization command opcode + * + * Return: 0 if the command was executed successfully, regardless of + * whether or not the actual security operation is done in the background, + * such as for the Sanitize case. + * Error return values can be the result of the mailbox command, -EINVAL + * when security requirements are not met or invalid contexts, or -EBUSY + * if the sanitize operation is already in flight. + * + * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase. + */ +int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) +{ + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_port *endpoint; + int rc; + + /* synchronize with cxl_mem_probe() and decoder write operations */ + device_lock(&cxlmd->dev); + endpoint = cxlmd->endpoint; + down_read(&cxl_region_rwsem); + /* + * Require an endpoint to be safe otherwise the driver can not + * be sure that the device is unmapped. + */ + if (endpoint && endpoint->commit_end == -1) + rc = __cxl_mem_sanitize(mds, cmd); + else + rc = -EBUSY; + up_read(&cxl_region_rwsem); + device_unlock(&cxlmd->dev); + + return rc; +} static int add_dpa_res(struct device *dev, struct resource *parent, struct resource *res, resource_size_t start, diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 14b547c07f547..fed9573cf355e 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -125,13 +125,16 @@ static ssize_t security_state_show(struct device *dev, struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); - u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg); - u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); unsigned long state = mds->security.state; + int rc = 0; - if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100) - return sysfs_emit(buf, "sanitize\n"); + /* sync with latest submission state */ + mutex_lock(&mds->mbox_mutex); + if (mds->security.sanitize_active) + rc = sysfs_emit(buf, "sanitize\n"); + mutex_unlock(&mds->mbox_mutex); + if (rc) + return rc; if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) return sysfs_emit(buf, "disabled\n"); @@ -152,24 +155,17 @@ static ssize_t security_sanitize_store(struct device *dev, const char *buf, size_t len) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); - struct cxl_port *port = cxlmd->endpoint; bool sanitize; ssize_t rc; if (kstrtobool(buf, &sanitize) || !sanitize) return -EINVAL; - if (!port || !is_cxl_endpoint(port)) - return -EINVAL; - - /* ensure no regions are mapped to this memdev */ - if (port->commit_end != -1) - return -EBUSY; - - rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE); + rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE); + if (rc) + return rc; - return rc ? rc : len; + return len; } static struct device_attribute dev_attr_security_sanitize = __ATTR(sanitize, 0200, NULL, security_sanitize_store); @@ -179,24 +175,17 @@ static ssize_t security_erase_store(struct device *dev, const char *buf, size_t len) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); - struct cxl_port *port = cxlmd->endpoint; ssize_t rc; bool erase; if (kstrtobool(buf, &erase) || !erase) return -EINVAL; - if (!port || !is_cxl_endpoint(port)) - return -EINVAL; - - /* ensure no regions are mapped to this memdev */ - if (port->commit_end != -1) - return -EBUSY; - - rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE); + rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE); + if (rc) + return rc; - return rc ? rc : len; + return len; } static struct device_attribute dev_attr_security_erase = __ATTR(erase, 0200, NULL, security_erase_store); @@ -556,21 +545,11 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, } EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL); -static void cxl_memdev_security_shutdown(struct device *dev) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); - - if (mds->security.poll) - cancel_delayed_work_sync(&mds->security.poll_dwork); -} - static void cxl_memdev_shutdown(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); down_write(&cxl_memdev_rwsem); - cxl_memdev_security_shutdown(dev); cxlmd->cxlds = NULL; up_write(&cxl_memdev_rwsem); } @@ -580,8 +559,8 @@ static void cxl_memdev_unregister(void *_cxlmd) struct cxl_memdev *cxlmd = _cxlmd; struct device *dev = &cxlmd->dev; - cxl_memdev_shutdown(dev); cdev_device_del(&cxlmd->cdev, dev); + cxl_memdev_shutdown(dev); put_device(dev); } @@ -961,17 +940,16 @@ static const struct fw_upload_ops cxl_memdev_fw_ops = { .cleanup = cxl_fw_cleanup, }; -static void devm_cxl_remove_fw_upload(void *fwl) +static void cxl_remove_fw_upload(void *fwl) { firmware_upload_unregister(fwl); } -int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds) +int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds) { struct cxl_dev_state *cxlds = &mds->cxlds; struct device *dev = &cxlds->cxlmd->dev; struct fw_upload *fwl; - int rc; if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds)) return 0; @@ -979,19 +957,10 @@ int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds) fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev), &cxl_memdev_fw_ops, mds); if (IS_ERR(fwl)) - return dev_err_probe(dev, PTR_ERR(fwl), - "Failed to register firmware loader\n"); - - rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload, - fwl); - if (rc) - dev_err(dev, - "Failed to add firmware loader remove action: %d\n", - rc); - - return rc; + return PTR_ERR(fwl); + return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl); } -EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL); static const struct file_operations cxl_memdev_fops = { .owner = THIS_MODULE, @@ -1002,36 +971,8 @@ static const struct file_operations cxl_memdev_fops = { .llseek = noop_llseek, }; -static void put_sanitize(void *data) -{ - struct cxl_memdev_state *mds = data; - - sysfs_put(mds->security.sanitize_node); -} - -static int cxl_memdev_security_init(struct cxl_memdev *cxlmd) -{ - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - struct device *dev = &cxlmd->dev; - struct kernfs_node *sec; - - sec = sysfs_get_dirent(dev->kobj.sd, "security"); - if (!sec) { - dev_err(dev, "sysfs_get_dirent 'security' failed\n"); - return -ENODEV; - } - mds->security.sanitize_node = sysfs_get_dirent(sec, "state"); - sysfs_put(sec); - if (!mds->security.sanitize_node) { - dev_err(dev, "sysfs_get_dirent 'state' failed\n"); - return -ENODEV; - } - - return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds); - } - -struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds) +struct cxl_memdev *devm_cxl_add_memdev(struct device *host, + struct cxl_dev_state *cxlds) { struct cxl_memdev *cxlmd; struct device *dev; @@ -1059,11 +1000,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds) if (rc) goto err; - rc = cxl_memdev_security_init(cxlmd); - if (rc) - goto err; - - rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd); + rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd); if (rc) return ERR_PTR(rc); return cxlmd; @@ -1079,6 +1016,50 @@ err: } EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL); +static void sanitize_teardown_notifier(void *data) +{ + struct cxl_memdev_state *mds = data; + struct kernfs_node *state; + + /* + * Prevent new irq triggered invocations of the workqueue and + * flush inflight invocations. + */ + mutex_lock(&mds->mbox_mutex); + state = mds->security.sanitize_node; + mds->security.sanitize_node = NULL; + mutex_unlock(&mds->mbox_mutex); + + cancel_delayed_work_sync(&mds->security.poll_dwork); + sysfs_put(state); +} + +int devm_cxl_sanitize_setup_notifier(struct device *host, + struct cxl_memdev *cxlmd) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + struct kernfs_node *sec; + + if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds)) + return 0; + + /* + * Note, the expectation is that @cxlmd would have failed to be + * created if these sysfs_get_dirent calls fail. + */ + sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security"); + if (!sec) + return -ENOENT; + mds->security.sanitize_node = sysfs_get_dirent(sec, "state"); + sysfs_put(sec); + if (!mds->security.sanitize_node) + return -ENOENT; + + return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds); +} +EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL); + __init int cxl_memdev_init(void) { dev_t devt; diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 7ca01a834e188..2c6001592fe20 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -28,6 +28,12 @@ * instantiated by the core. */ +/* + * All changes to the interleave configuration occur with this lock held + * for write. + */ +DECLARE_RWSEM(cxl_region_rwsem); + static DEFINE_IDA(cxl_port_ida); static DEFINE_XARRAY(cxl_root_buses); @@ -691,14 +697,14 @@ err: return ERR_PTR(rc); } -static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map, +static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map, resource_size_t component_reg_phys) { if (component_reg_phys == CXL_RESOURCE_NONE) return 0; *map = (struct cxl_register_map) { - .dev = dev, + .host = host, .reg_type = CXL_REGLOC_RBI_COMPONENT, .resource = component_reg_phys, .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, @@ -716,13 +722,23 @@ static int cxl_port_setup_regs(struct cxl_port *port, component_reg_phys); } -static int cxl_dport_setup_regs(struct cxl_dport *dport, +static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, resource_size_t component_reg_phys) { + int rc; + if (dev_is_platform(dport->dport_dev)) return 0; - return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map, - component_reg_phys); + + /* + * use @dport->dport_dev for the context for error messages during + * register probing, and fixup @host after the fact, since @host may be + * NULL. + */ + rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map, + component_reg_phys); + dport->comp_map.host = host; + return rc; } static struct cxl_port *__devm_cxl_add_port(struct device *host, @@ -983,7 +999,16 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, if (!dport) return ERR_PTR(-ENOMEM); - if (rcrb != CXL_RESOURCE_NONE) { + dport->dport_dev = dport_dev; + dport->port_id = port_id; + dport->port = port; + + if (rcrb == CXL_RESOURCE_NONE) { + rc = cxl_dport_setup_regs(&port->dev, dport, + component_reg_phys); + if (rc) + return ERR_PTR(rc); + } else { dport->rcrb.base = rcrb; component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb, CXL_RCRB_DOWNSTREAM); @@ -992,6 +1017,14 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, return ERR_PTR(-ENXIO); } + /* + * RCH @dport is not ready to map until associated with its + * memdev + */ + rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys); + if (rc) + return ERR_PTR(rc); + dport->rch = true; } @@ -999,14 +1032,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, dev_dbg(dport_dev, "Component Registers found for dport: %pa\n", &component_reg_phys); - dport->dport_dev = dport_dev; - dport->port_id = port_id; - dport->port = port; - - rc = cxl_dport_setup_regs(dport, component_reg_phys); - if (rc) - return ERR_PTR(rc); - cond_cxl_root_lock(port); rc = add_dport(port, dport); cond_cxl_root_unlock(port); diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 6d63b8798c299..85c0881fba442 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -28,12 +28,6 @@ * 3. Decoder targets */ -/* - * All changes to the interleave configuration occur with this lock held - * for write. - */ -static DECLARE_RWSEM(cxl_region_rwsem); - static struct cxl_region *to_cxl_region(struct device *dev); static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, @@ -294,7 +288,7 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr, */ rc = cxl_region_invalidate_memregion(cxlr); if (rc) - return rc; + goto out; if (commit) { rc = cxl_region_decode_commit(cxlr); @@ -1195,6 +1189,14 @@ static int cxl_port_setup_targets(struct cxl_port *port, return rc; } + if (iw > 8 || iw > cxlsd->nr_targets) { + dev_dbg(&cxlr->dev, + "%s:%s:%s: ways: %d overflows targets: %d\n", + dev_name(port->uport_dev), dev_name(&port->dev), + dev_name(&cxld->dev), iw, cxlsd->nr_targets); + return -ENXIO; + } + if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { if (cxld->interleave_ways != iw || cxld->interleave_granularity != ig || @@ -1480,6 +1482,14 @@ static int cxl_region_attach_auto(struct cxl_region *cxlr, return 0; } +static int cmp_interleave_pos(const void *a, const void *b) +{ + struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a; + struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b; + + return cxled_a->pos - cxled_b->pos; +} + static struct cxl_port *next_port(struct cxl_port *port) { if (!port->parent_dport) @@ -1487,119 +1497,127 @@ static struct cxl_port *next_port(struct cxl_port *port) return port->parent_dport->port; } -static int decoder_match_range(struct device *dev, void *data) +static int match_switch_decoder_by_range(struct device *dev, void *data) { - struct cxl_endpoint_decoder *cxled = data; struct cxl_switch_decoder *cxlsd; + struct range *r1, *r2 = data; if (!is_switch_decoder(dev)) return 0; cxlsd = to_cxl_switch_decoder(dev); - return range_contains(&cxlsd->cxld.hpa_range, &cxled->cxld.hpa_range); -} + r1 = &cxlsd->cxld.hpa_range; -static void find_positions(const struct cxl_switch_decoder *cxlsd, - const struct cxl_port *iter_a, - const struct cxl_port *iter_b, int *a_pos, - int *b_pos) -{ - int i; - - for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) { - if (cxlsd->target[i] == iter_a->parent_dport) - *a_pos = i; - else if (cxlsd->target[i] == iter_b->parent_dport) - *b_pos = i; - if (*a_pos >= 0 && *b_pos >= 0) - break; - } + if (is_root_decoder(dev)) + return range_contains(r1, r2); + return (r1->start == r2->start && r1->end == r2->end); } -static int cmp_decode_pos(const void *a, const void *b) +static int find_pos_and_ways(struct cxl_port *port, struct range *range, + int *pos, int *ways) { - struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a; - struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b; - struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a); - struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b); - struct cxl_port *port_a = cxled_to_port(cxled_a); - struct cxl_port *port_b = cxled_to_port(cxled_b); - struct cxl_port *iter_a, *iter_b, *port = NULL; struct cxl_switch_decoder *cxlsd; + struct cxl_port *parent; struct device *dev; - int a_pos, b_pos; - unsigned int seq; - - /* Exit early if any prior sorting failed */ - if (cxled_a->pos < 0 || cxled_b->pos < 0) - return 0; + int rc = -ENXIO; - /* - * Walk up the hierarchy to find a shared port, find the decoder that - * maps the range, compare the relative position of those dport - * mappings. - */ - for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) { - struct cxl_port *next_a, *next_b; + parent = next_port(port); + if (!parent) + return rc; - next_a = next_port(iter_a); - if (!next_a) - break; + dev = device_find_child(&parent->dev, range, + match_switch_decoder_by_range); + if (!dev) { + dev_err(port->uport_dev, + "failed to find decoder mapping %#llx-%#llx\n", + range->start, range->end); + return rc; + } + cxlsd = to_cxl_switch_decoder(dev); + *ways = cxlsd->cxld.interleave_ways; - for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) { - next_b = next_port(iter_b); - if (next_a != next_b) - continue; - port = next_a; + for (int i = 0; i < *ways; i++) { + if (cxlsd->target[i] == port->parent_dport) { + *pos = i; + rc = 0; break; } - - if (port) - break; } + put_device(dev); - if (!port) { - dev_err(cxlmd_a->dev.parent, - "failed to find shared port with %s\n", - dev_name(cxlmd_b->dev.parent)); - goto err; - } + return rc; +} - dev = device_find_child(&port->dev, cxled_a, decoder_match_range); - if (!dev) { - struct range *range = &cxled_a->cxld.hpa_range; +/** + * cxl_calc_interleave_pos() - calculate an endpoint position in a region + * @cxled: endpoint decoder member of given region + * + * The endpoint position is calculated by traversing the topology from + * the endpoint to the root decoder and iteratively applying this + * calculation: + * + * position = position * parent_ways + parent_pos; + * + * ...where @position is inferred from switch and root decoder target lists. + * + * Return: position >= 0 on success + * -ENXIO on failure + */ +static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled) +{ + struct cxl_port *iter, *port = cxled_to_port(cxled); + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct range *range = &cxled->cxld.hpa_range; + int parent_ways = 0, parent_pos = 0, pos = 0; + int rc; - dev_err(port->uport_dev, - "failed to find decoder that maps %#llx-%#llx\n", - range->start, range->end); - goto err; - } + /* + * Example: the expected interleave order of the 4-way region shown + * below is: mem0, mem2, mem1, mem3 + * + * root_port + * / \ + * host_bridge_0 host_bridge_1 + * | | | | + * mem0 mem1 mem2 mem3 + * + * In the example the calculator will iterate twice. The first iteration + * uses the mem position in the host-bridge and the ways of the host- + * bridge to generate the first, or local, position. The second + * iteration uses the host-bridge position in the root_port and the ways + * of the root_port to refine the position. + * + * A trace of the calculation per endpoint looks like this: + * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0 + * pos = 0 * 2 + 0 pos = 0 * 2 + 1 + * pos: 0 pos: 1 + * + * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1 + * pos = 1 * 2 + 0 pos = 1 * 2 + 1 + * pos: 2 pos = 3 + * + * Note that while this example is simple, the method applies to more + * complex topologies, including those with switches. + */ - cxlsd = to_cxl_switch_decoder(dev); - do { - seq = read_seqbegin(&cxlsd->target_lock); - find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos); - } while (read_seqretry(&cxlsd->target_lock, seq)); + /* Iterate from endpoint to root_port refining the position */ + for (iter = port; iter; iter = next_port(iter)) { + if (is_cxl_root(iter)) + break; - put_device(dev); + rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways); + if (rc) + return rc; - if (a_pos < 0 || b_pos < 0) { - dev_err(port->uport_dev, - "failed to find shared decoder for %s and %s\n", - dev_name(cxlmd_a->dev.parent), - dev_name(cxlmd_b->dev.parent)); - goto err; + pos = pos * parent_ways + parent_pos; } - dev_dbg(port->uport_dev, "%s comes %s %s\n", - dev_name(cxlmd_a->dev.parent), - a_pos - b_pos < 0 ? "before" : "after", - dev_name(cxlmd_b->dev.parent)); + dev_dbg(&cxlmd->dev, + "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n", + dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent), + dev_name(&port->dev), range->start, range->end, pos); - return a_pos - b_pos; -err: - cxled_a->pos = -1; - return 0; + return pos; } static int cxl_region_sort_targets(struct cxl_region *cxlr) @@ -1607,22 +1625,21 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr) struct cxl_region_params *p = &cxlr->params; int i, rc = 0; - sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos, - NULL); - for (i = 0; i < p->nr_targets; i++) { struct cxl_endpoint_decoder *cxled = p->targets[i]; + cxled->pos = cxl_calc_interleave_pos(cxled); /* - * Record that sorting failed, but still continue to restore - * cxled->pos with its ->targets[] position so that follow-on - * code paths can reliably do p->targets[cxled->pos] to - * self-reference their entry. + * Record that sorting failed, but still continue to calc + * cxled->pos so that follow-on code paths can reliably + * do p->targets[cxled->pos] to self-reference their entry. */ if (cxled->pos < 0) rc = -ENXIO; - cxled->pos = i; } + /* Keep the cxlr target list in interleave position order */ + sort(p->targets, p->nr_targets, sizeof(p->targets[0]), + cmp_interleave_pos, NULL); dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful"); return rc; @@ -1761,6 +1778,26 @@ static int cxl_region_attach(struct cxl_region *cxlr, .end = p->res->end, }; + if (p->nr_targets != p->interleave_ways) + return 0; + + /* + * Test the auto-discovery position calculator function + * against this successfully created user-defined region. + * A fail message here means that this interleave config + * will fail when presented as CXL_REGION_F_AUTO. + */ + for (int i = 0; i < p->nr_targets; i++) { + struct cxl_endpoint_decoder *cxled = p->targets[i]; + int test_pos; + + test_pos = cxl_calc_interleave_pos(cxled); + dev_dbg(&cxled->cxld.dev, + "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n", + (test_pos == cxled->pos) ? "success" : "fail", + test_pos, cxled->pos); + } + return 0; err_decrement: @@ -2696,7 +2733,7 @@ err: return rc; } -static int match_decoder_by_range(struct device *dev, void *data) +static int match_root_decoder_by_range(struct device *dev, void *data) { struct range *r1, *r2 = data; struct cxl_root_decoder *cxlrd; @@ -2827,7 +2864,7 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled) int rc; cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range, - match_decoder_by_range); + match_root_decoder_by_range); if (!cxlrd_dev) { dev_err(cxlmd->dev.parent, "%s:%s no CXL window for range %#llx:%#llx\n", diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 6281127b3e9d9..e0fbe964f6f0a 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -204,7 +204,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map, struct cxl_component_regs *regs, unsigned long map_mask) { - struct device *dev = map->dev; + struct device *host = map->host; struct mapinfo { const struct cxl_reg_map *rmap; void __iomem **addr; @@ -225,7 +225,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map, continue; phys_addr = map->resource + mi->rmap->offset; length = mi->rmap->size; - *(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length); + *(mi->addr) = devm_cxl_iomap_block(host, phys_addr, length); if (!*(mi->addr)) return -ENOMEM; } @@ -237,7 +237,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL); int cxl_map_device_regs(const struct cxl_register_map *map, struct cxl_device_regs *regs) { - struct device *dev = map->dev; + struct device *host = map->host; resource_size_t phys_addr = map->resource; struct mapinfo { const struct cxl_reg_map *rmap; @@ -259,7 +259,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map, addr = phys_addr + mi->rmap->offset; length = mi->rmap->size; - *(mi->addr) = devm_cxl_iomap_block(dev, addr, length); + *(mi->addr) = devm_cxl_iomap_block(host, addr, length); if (!*(mi->addr)) return -ENOMEM; } @@ -309,7 +309,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, int regloc, i; *map = (struct cxl_register_map) { - .dev = &pdev->dev, + .host = &pdev->dev, .resource = CXL_RESOURCE_NONE, }; @@ -403,15 +403,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL); static int cxl_map_regblock(struct cxl_register_map *map) { - struct device *dev = map->dev; + struct device *host = map->host; map->base = ioremap(map->resource, map->max_size); if (!map->base) { - dev_err(dev, "failed to map registers\n"); + dev_err(host, "failed to map registers\n"); return -ENOMEM; } - dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource); + dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource); return 0; } @@ -425,28 +425,28 @@ static int cxl_probe_regs(struct cxl_register_map *map) { struct cxl_component_reg_map *comp_map; struct cxl_device_reg_map *dev_map; - struct device *dev = map->dev; + struct device *host = map->host; void __iomem *base = map->base; switch (map->reg_type) { case CXL_REGLOC_RBI_COMPONENT: comp_map = &map->component_map; - cxl_probe_component_regs(dev, base, comp_map); - dev_dbg(dev, "Set up component registers\n"); + cxl_probe_component_regs(host, base, comp_map); + dev_dbg(host, "Set up component registers\n"); break; case CXL_REGLOC_RBI_MEMDEV: dev_map = &map->device_map; - cxl_probe_device_regs(dev, base, dev_map); + cxl_probe_device_regs(host, base, dev_map); if (!dev_map->status.valid || !dev_map->mbox.valid || !dev_map->memdev.valid) { - dev_err(dev, "registers not found: %s%s%s\n", + dev_err(host, "registers not found: %s%s%s\n", !dev_map->status.valid ? "status " : "", !dev_map->mbox.valid ? "mbox " : "", !dev_map->memdev.valid ? "memdev " : ""); return -ENXIO; } - dev_dbg(dev, "Probing device registers...\n"); + dev_dbg(host, "Probing device registers...\n"); break; default: break; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 76d92561af294..b5b015b661eae 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -247,7 +247,7 @@ struct cxl_pmu_reg_map { /** * struct cxl_register_map - DVSEC harvested register block mapping parameters - * @dev: device for devm operations and logging + * @host: device for devm operations and logging * @base: virtual base of the register-block-BAR + @block_offset * @resource: physical resource base of the register block * @max_size: maximum mapping size to perform register search @@ -257,7 +257,7 @@ struct cxl_pmu_reg_map { * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units */ struct cxl_register_map { - struct device *dev; + struct device *host; void __iomem *base; resource_size_t resource; resource_size_t max_size; diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 706f8a6d1ef43..6933bc20e76b6 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -84,9 +84,12 @@ static inline bool is_cxl_endpoint(struct cxl_port *port) return is_cxl_memdev(port->uport_dev); } -struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds); +struct cxl_memdev *devm_cxl_add_memdev(struct device *host, + struct cxl_dev_state *cxlds); +int devm_cxl_sanitize_setup_notifier(struct device *host, + struct cxl_memdev *cxlmd); struct cxl_memdev_state; -int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds); +int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds); int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, resource_size_t base, resource_size_t len, resource_size_t skipped); @@ -360,16 +363,16 @@ struct cxl_fw_state { * * @state: state of last security operation * @enabled_cmds: All security commands enabled in the CEL - * @poll: polling for sanitization is enabled, device has no mbox irq support * @poll_tmo_secs: polling timeout + * @sanitize_active: sanitize completion pending * @poll_dwork: polling work item * @sanitize_node: sanitation sysfs file to notify */ struct cxl_security_state { unsigned long state; DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX); - bool poll; int poll_tmo_secs; + bool sanitize_active; struct delayed_work poll_dwork; struct kernfs_node *sanitize_node; }; @@ -883,7 +886,7 @@ static inline void cxl_mem_active_dec(void) } #endif -int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd); +int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd); struct cxl_hdm { struct cxl_component_regs regs; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 44a21ab7add51..8bece1e2e2491 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -128,10 +128,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); if (opcode == CXL_MBOX_OP_SANITIZE) { + mutex_lock(&mds->mbox_mutex); if (mds->security.sanitize_node) - sysfs_notify_dirent(mds->security.sanitize_node); - - dev_dbg(cxlds->dev, "Sanitization operation ended\n"); + mod_delayed_work(system_wq, &mds->security.poll_dwork, 0); + mutex_unlock(&mds->mbox_mutex); } else { /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */ rcuwait_wake_up(&mds->mbox_wait); @@ -152,18 +152,16 @@ static void cxl_mbox_sanitize_work(struct work_struct *work) mutex_lock(&mds->mbox_mutex); if (cxl_mbox_background_complete(cxlds)) { mds->security.poll_tmo_secs = 0; - put_device(cxlds->dev); - if (mds->security.sanitize_node) sysfs_notify_dirent(mds->security.sanitize_node); + mds->security.sanitize_active = false; dev_dbg(cxlds->dev, "Sanitization operation ended\n"); } else { int timeout = mds->security.poll_tmo_secs + 10; mds->security.poll_tmo_secs = min(15 * 60, timeout); - queue_delayed_work(system_wq, &mds->security.poll_dwork, - timeout * HZ); + schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ); } mutex_unlock(&mds->mbox_mutex); } @@ -295,18 +293,15 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, * and allow userspace to poll(2) for completion. */ if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) { - if (mds->security.poll) { - /* hold the device throughout */ - get_device(cxlds->dev); - - /* give first timeout a second */ - timeout = 1; - mds->security.poll_tmo_secs = timeout; - queue_delayed_work(system_wq, - &mds->security.poll_dwork, - timeout * HZ); - } - + if (mds->security.sanitize_active) + return -EBUSY; + + /* give first timeout a second */ + timeout = 1; + mds->security.poll_tmo_secs = timeout; + mds->security.sanitize_active = true; + schedule_delayed_work(&mds->security.poll_dwork, + timeout * HZ); dev_dbg(dev, "Sanitization operation started\n"); goto success; } @@ -389,7 +384,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds) const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); struct device *dev = cxlds->dev; unsigned long timeout; + int irq, msgnum; u64 md_status; + u32 ctrl; timeout = jiffies + mbox_ready_timeout * HZ; do { @@ -437,33 +434,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds) dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size); rcuwait_init(&mds->mbox_wait); + INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work); - if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) { - u32 ctrl; - int irq, msgnum; - struct pci_dev *pdev = to_pci_dev(cxlds->dev); - - msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap); - irq = pci_irq_vector(pdev, msgnum); - if (irq < 0) - goto mbox_poll; - - if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL)) - goto mbox_poll; + /* background command interrupts are optional */ + if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ)) + return 0; - /* enable background command mbox irq support */ - ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); - ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ; - writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); + msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap); + irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum); + if (irq < 0) + return 0; + if (cxl_request_irq(cxlds, irq, NULL, cxl_pci_mbox_irq)) return 0; - } -mbox_poll: - mds->security.poll = true; - INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work); + dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n"); + /* enable background command mbox irq support */ + ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); + ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ; + writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); - dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported"); return 0; } @@ -484,7 +474,7 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev, resource_size_t component_reg_phys; *map = (struct cxl_register_map) { - .dev = &pdev->dev, + .host = &pdev->dev, .resource = CXL_RESOURCE_NONE, }; @@ -882,11 +872,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - cxlmd = devm_cxl_add_memdev(cxlds); + cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); - rc = cxl_memdev_setup_fw_upload(mds); + rc = devm_cxl_setup_fw_upload(&pdev->dev, mds); + if (rc) + return rc; + + rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd); if (rc) return rc; diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 39ac069cabc75..74893c06aa087 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -193,14 +193,15 @@ static int rockchip_dfi_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(data->clk), "Cannot get the clk pclk_ddr_mon\n"); - /* try to find the optional reference to the pmu syscon */ node = of_parse_phandle(np, "rockchip,pmu", 0); - if (node) { - data->regmap_pmu = syscon_node_to_regmap(node); - of_node_put(node); - if (IS_ERR(data->regmap_pmu)) - return PTR_ERR(data->regmap_pmu); - } + if (!node) + return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n"); + + data->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); + if (IS_ERR(data->regmap_pmu)) + return PTR_ERR(data->regmap_pmu); + data->dev = dev; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile index dc096839ac637..c5e679070e463 100644 --- a/drivers/dma/idxd/Makefile +++ b/drivers/dma/idxd/Makefile @@ -1,12 +1,12 @@ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD +obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o +idxd_bus-y := bus.o + obj-$(CONFIG_INTEL_IDXD) += idxd.o idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o -obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o -idxd_bus-y := bus.o - obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o idxd_compat-y := compat.o diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 1b046d9a3a269..16d342654da2b 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -722,7 +722,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd) dma_addr_t dma; struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd); - BUG_ON(sw_desc->nb_desc == 0); for (i = sw_desc->nb_desc - 1; i >= 0; i--) { if (i > 0) dma = sw_desc->hw_desc[i - 1]->ddadr; diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index aa8e2e8ac2609..33d6d931b33bb 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2401,7 +2401,7 @@ static int edma_probe(struct platform_device *pdev) if (irq < 0 && node) irq = irq_of_parse_and_map(node, 0); - if (irq >= 0) { + if (irq > 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", dev_name(dev)); ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, @@ -2417,7 +2417,7 @@ static int edma_probe(struct platform_device *pdev) if (irq < 0 && node) irq = irq_of_parse_and_map(node, 2); - if (irq >= 0) { + if (irq > 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", dev_name(dev)); ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 2b8bfcd010f5f..7865438b36960 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -193,6 +193,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, dev->release = ffa_release_device; dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id); + ffa_dev->id = id; ffa_dev->vm_id = vm_id; ffa_dev->ops = ops; uuid_copy(&ffa_dev->uuid, uuid); diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 121f4fc903cd5..7cd6b1564e801 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -587,17 +587,9 @@ static int ffa_partition_info_get(const char *uuid_str, return 0; } -static void _ffa_mode_32bit_set(struct ffa_device *dev) -{ - dev->mode_32bit = true; -} - static void ffa_mode_32bit_set(struct ffa_device *dev) { - if (drv_info->version > FFA_VERSION_1_0) - return; - - _ffa_mode_32bit_set(dev); + dev->mode_32bit = true; } static int ffa_sync_send_receive(struct ffa_device *dev, @@ -706,7 +698,7 @@ static void ffa_setup_partitions(void) if (drv_info->version > FFA_VERSION_1_0 && !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) - _ffa_mode_32bit_set(ffa_dev); + ffa_mode_32bit_set(ffa_dev); } kfree(pbuf); } diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 51d062e0c3f12..c1590d3aa9cb7 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -313,6 +313,8 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, return __tegra_bpmp_channel_write(channel, mrq, flags, data, size); } +static int __maybe_unused tegra_bpmp_resume(struct device *dev); + int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp, struct tegra_bpmp_message *msg) { @@ -325,6 +327,14 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp, if (!tegra_bpmp_message_valid(msg)) return -EINVAL; + if (bpmp->suspended) { + /* Reset BPMP IPC channels during resume based on flags passed */ + if (msg->flags & TEGRA_BPMP_MESSAGE_RESET) + tegra_bpmp_resume(bpmp->dev); + else + return -EAGAIN; + } + channel = bpmp->tx_channel; spin_lock(&bpmp->atomic_tx_lock); @@ -364,6 +374,14 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp, if (!tegra_bpmp_message_valid(msg)) return -EINVAL; + if (bpmp->suspended) { + /* Reset BPMP IPC channels during resume based on flags passed */ + if (msg->flags & TEGRA_BPMP_MESSAGE_RESET) + tegra_bpmp_resume(bpmp->dev); + else + return -EAGAIN; + } + channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data, msg->tx.size); if (IS_ERR(channel)) @@ -796,10 +814,21 @@ deinit: return err; } +static int __maybe_unused tegra_bpmp_suspend(struct device *dev) +{ + struct tegra_bpmp *bpmp = dev_get_drvdata(dev); + + bpmp->suspended = true; + + return 0; +} + static int __maybe_unused tegra_bpmp_resume(struct device *dev) { struct tegra_bpmp *bpmp = dev_get_drvdata(dev); + bpmp->suspended = false; + if (bpmp->soc->ops->resume) return bpmp->soc->ops->resume(bpmp); else @@ -807,6 +836,7 @@ static int __maybe_unused tegra_bpmp_resume(struct device *dev) } static const struct dev_pm_ops tegra_bpmp_pm_ops = { + .suspend_noirq = tegra_bpmp_suspend, .resume_noirq = tegra_bpmp_resume, }; diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 26a37f47f4ca5..66c3846c91476 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -190,19 +190,6 @@ static int ti_sci_debugfs_create(struct platform_device *pdev, return 0; } -/** - * ti_sci_debugfs_destroy() - clean up log debug file - * @pdev: platform device pointer - * @info: Pointer to SCI entity information - */ -static void ti_sci_debugfs_destroy(struct platform_device *pdev, - struct ti_sci_info *info) -{ - if (IS_ERR(info->debug_region)) - return; - - debugfs_remove(info->d); -} #else /* CONFIG_DEBUG_FS */ static inline int ti_sci_debugfs_create(struct platform_device *dev, struct ti_sci_info *info) @@ -3449,43 +3436,12 @@ out: return ret; } -static int ti_sci_remove(struct platform_device *pdev) -{ - struct ti_sci_info *info; - struct device *dev = &pdev->dev; - int ret = 0; - - of_platform_depopulate(dev); - - info = platform_get_drvdata(pdev); - - if (info->nb.notifier_call) - unregister_restart_handler(&info->nb); - - mutex_lock(&ti_sci_list_mutex); - if (info->users) - ret = -EBUSY; - else - list_del(&info->node); - mutex_unlock(&ti_sci_list_mutex); - - if (!ret) { - ti_sci_debugfs_destroy(pdev, info); - - /* Safe to free channels since no more users */ - mbox_free_channel(info->chan_tx); - mbox_free_channel(info->chan_rx); - } - - return ret; -} - static struct platform_driver ti_sci_driver = { .probe = ti_sci_probe, - .remove = ti_sci_remove, .driver = { .name = "ti-sci", .of_match_table = of_match_ptr(ti_sci_of_match), + .suppress_bind_attrs = true, }, }; module_platform_driver(ti_sci_driver); diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 44bf1709a6488..a8e5ac95cf170 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -1438,10 +1438,10 @@ static const struct config_item_type gpio_sim_device_config_group_type = { static struct config_group * gpio_sim_config_make_device_group(struct config_group *group, const char *name) { - struct gpio_sim_device *dev __free(kfree) = NULL; int id; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); + struct gpio_sim_device *dev __free(kfree) = kzalloc(sizeof(*dev), + GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 2382921710ece..ef4cb921781d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -384,9 +384,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, struct amdgpu_ring *ring = &kiq->ring; u32 domain = AMDGPU_GEM_DOMAIN_GTT; +#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0)) domain |= AMDGPU_GEM_DOMAIN_VRAM; +#endif /* create MQD for KIQ */ if (!adev->enable_mes_kiq && !ring->mqd_obj) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 6c6184f0dbc17..508f02eb0cf8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -28,7 +28,7 @@ #define AMDGPU_IH_MAX_NUM_IVS 32 #define IH_RING_SIZE (256 * 1024) -#define IH_SW_RING_SIZE (8 * 1024) /* enough for 256 CAM entries */ +#define IH_SW_RING_SIZE (16 * 1024) /* enough for 512 CAM entries */ struct amdgpu_device; struct amdgpu_iv_entry; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 9032d7a24d7cd..306252cd67fd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6457,11 +6457,11 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) - memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else { /* restore mqd with the backup copy */ if (adev->gfx.me.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; *ring->wptr_cpu_addr = 0; @@ -6735,7 +6735,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.kiq[0].mqd_backup) - memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; @@ -6758,7 +6758,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.kiq[0].mqd_backup) - memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); } return 0; @@ -6779,11 +6779,11 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else { /* restore MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 762d7a19f1be1..43d066bc5245b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -3684,11 +3684,11 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) - memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else { /* restore mqd with the backup copy */ if (adev->gfx.me.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; *ring->wptr_cpu_addr = 0; @@ -3977,7 +3977,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.kiq[0].mqd_backup) - memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; @@ -4000,7 +4000,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.kiq[0].mqd_backup) - memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); } return 0; @@ -4021,11 +4021,11 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else { /* restore MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index bb16b795d1bc2..2a42fbddcb7ae 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -495,11 +495,11 @@ svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange) /* We need a new svm_bo. Spin-loop to wait for concurrent * svm_range_bo_release to finish removing this range from - * its range list. After this, it is safe to reuse the - * svm_bo pointer and svm_bo_list head. + * its range list and set prange->svm_bo to null. After this, + * it is safe to reuse the svm_bo pointer and svm_bo_list head. */ - while (!list_empty_careful(&prange->svm_bo_list)) - ; + while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo) + cond_resched(); return false; } @@ -820,7 +820,7 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange, } } - return !prange->is_error_flag; + return true; } /** @@ -1662,73 +1662,66 @@ static int svm_range_validate_and_map(struct mm_struct *mm, start = prange->start << PAGE_SHIFT; end = (prange->last + 1) << PAGE_SHIFT; - for (addr = start; addr < end && !r; ) { + for (addr = start; !r && addr < end; ) { struct hmm_range *hmm_range; struct vm_area_struct *vma; - unsigned long next; + unsigned long next = 0; unsigned long offset; unsigned long npages; bool readonly; vma = vma_lookup(mm, addr); - if (!vma) { + if (vma) { + readonly = !(vma->vm_flags & VM_WRITE); + + next = min(vma->vm_end, end); + npages = (next - addr) >> PAGE_SHIFT; + WRITE_ONCE(p->svms.faulting_task, current); + r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, + readonly, owner, NULL, + &hmm_range); + WRITE_ONCE(p->svms.faulting_task, NULL); + if (r) { + pr_debug("failed %d to get svm range pages\n", r); + if (r == -EBUSY) + r = -EAGAIN; + } + } else { r = -EFAULT; - goto unreserve_out; - } - readonly = !(vma->vm_flags & VM_WRITE); - - next = min(vma->vm_end, end); - npages = (next - addr) >> PAGE_SHIFT; - WRITE_ONCE(p->svms.faulting_task, current); - r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, - readonly, owner, NULL, - &hmm_range); - WRITE_ONCE(p->svms.faulting_task, NULL); - if (r) { - pr_debug("failed %d to get svm range pages\n", r); - if (r == -EBUSY) - r = -EAGAIN; - goto unreserve_out; } - offset = (addr - start) >> PAGE_SHIFT; - r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, - hmm_range->hmm_pfns); - if (r) { - pr_debug("failed %d to dma map range\n", r); - goto unreserve_out; + if (!r) { + offset = (addr - start) >> PAGE_SHIFT; + r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, + hmm_range->hmm_pfns); + if (r) + pr_debug("failed %d to dma map range\n", r); } svm_range_lock(prange); - if (amdgpu_hmm_range_get_pages_done(hmm_range)) { + if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) { pr_debug("hmm update the range, need validate again\n"); r = -EAGAIN; - goto unlock_out; } - if (!list_empty(&prange->child_list)) { + + if (!r && !list_empty(&prange->child_list)) { pr_debug("range split by unmap in parallel, validate again\n"); r = -EAGAIN; - goto unlock_out; } - r = svm_range_map_to_gpus(prange, offset, npages, readonly, - ctx->bitmap, wait, flush_tlb); + if (!r) + r = svm_range_map_to_gpus(prange, offset, npages, readonly, + ctx->bitmap, wait, flush_tlb); + + if (!r && next == end) + prange->mapped_to_gpu = true; -unlock_out: svm_range_unlock(prange); addr = next; } - if (addr == end) { - prange->validated_once = true; - prange->mapped_to_gpu = true; - } - -unreserve_out: svm_range_unreserve_bos(ctx); - - prange->is_error_flag = !!r; if (!r) prange->validate_timestamp = ktime_get_boottime(); @@ -2097,7 +2090,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, next = interval_tree_iter_next(node, start, last); next_start = min(node->last, last) + 1; - if (svm_range_is_same_attrs(p, prange, nattr, attrs)) { + if (svm_range_is_same_attrs(p, prange, nattr, attrs) && + prange->mapped_to_gpu) { /* nothing to do */ } else if (node->start < start || node->last > last) { /* node intersects the update range and its attributes @@ -3507,7 +3501,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, struct svm_range *next; bool update_mapping = false; bool flush_tlb; - int r = 0; + int r, ret = 0; pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n", p->pasid, &p->svms, start, start + size - 1, size); @@ -3595,7 +3589,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, out_unlock_range: mutex_unlock(&prange->migrate_mutex); if (r) - break; + ret = r; } dynamic_svm_range_dump(svms); @@ -3608,7 +3602,7 @@ out: pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, &p->svms, start, start + size - 1, r); - return r; + return ret ? ret : r; } static int diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 9e668eeefb32d..25f7119057386 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -132,9 +132,7 @@ struct svm_range { struct list_head child_list; DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE); DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); - bool validated_once; bool mapped_to_gpu; - bool is_error_flag; }; static inline void svm_range_lock(struct svm_range *prange) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 868946dd7ef12..34f011cedd065 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1692,8 +1692,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, dce_version_to_string(adev->dm.dc->ctx->dce_version)); } else { - DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER, - dce_version_to_string(adev->dm.dc->ctx->dce_version)); + DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); goto error; } @@ -9892,16 +9891,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, } } +static void +dm_get_plane_scale(struct drm_plane_state *plane_state, + int *out_plane_scale_w, int *out_plane_scale_h) +{ + int plane_src_w, plane_src_h; + + dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); + *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w; + *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h; +} + static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) { - struct drm_plane *cursor = crtc->cursor, *underlying; + struct drm_plane *cursor = crtc->cursor, *plane, *underlying; + struct drm_plane_state *old_plane_state, *new_plane_state; struct drm_plane_state *new_cursor_state, *new_underlying_state; int i; int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; - int cursor_src_w, cursor_src_h; - int underlying_src_w, underlying_src_h; + bool any_relevant_change = false; /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and @@ -9909,13 +9919,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, * blending properties match the underlying planes'. */ - new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); - if (!new_cursor_state || !new_cursor_state->fb) + /* If no plane was enabled or changed scaling, no need to check again */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + int new_scale_w, new_scale_h, old_scale_w, old_scale_h; + + if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc) + continue; + + if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) { + any_relevant_change = true; + break; + } + + if (new_plane_state->fb == old_plane_state->fb && + new_plane_state->crtc_w == old_plane_state->crtc_w && + new_plane_state->crtc_h == old_plane_state->crtc_h) + continue; + + dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h); + dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); + + if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { + any_relevant_change = true; + break; + } + } + + if (!any_relevant_change) + return 0; + + new_cursor_state = drm_atomic_get_plane_state(state, cursor); + if (IS_ERR(new_cursor_state)) + return PTR_ERR(new_cursor_state); + + if (!new_cursor_state->fb) return 0; - dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); - cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; - cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h; + dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h); + + /* Need to check all enabled planes, even if this commit doesn't change + * their state + */ + i = drm_atomic_add_affected_planes(state, crtc); + if (i) + return i; for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { /* Narrow down to non-cursor planes on the same CRTC as the cursor */ @@ -9926,10 +9973,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, if (!new_underlying_state->fb) continue; - dm_get_oriented_plane_size(new_underlying_state, - &underlying_src_w, &underlying_src_h); - underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; - underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h; + dm_get_plane_scale(new_underlying_state, + &underlying_scale_w, &underlying_scale_h); if (cursor_scale_w != underlying_scale_w || cursor_scale_h != underlying_scale_h) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index de80e191a92c4..24d6811438c5c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1968,8 +1968,10 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); ret = smu_v13_0_6_get_metrics_table(smu, metrics, true); - if (ret) + if (ret) { + kfree(metrics); return ret; + } smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index d207b03f8357c..78122b35a0cbb 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -358,11 +358,18 @@ static void aspeed_gfx_remove(struct platform_device *pdev) sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group); drm_dev_unregister(drm); aspeed_gfx_unload(drm); + drm_atomic_helper_shutdown(drm); +} + +static void aspeed_gfx_shutdown(struct platform_device *pdev) +{ + drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); } static struct platform_driver aspeed_gfx_platform_driver = { .probe = aspeed_gfx_probe, .remove_new = aspeed_gfx_remove, + .shutdown = aspeed_gfx_shutdown, .driver = { .name = "aspeed_gfx", .of_match_table = aspeed_gfx_match, diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 44a660a4bdbfc..ba82a1142adf7 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -181,6 +181,7 @@ config DRM_NWL_MIPI_DSI select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL_BRIDGE + select GENERIC_PHY select GENERIC_PHY_MIPI_DPHY select MFD_SYSCON select MULTIPLEXER @@ -227,6 +228,7 @@ config DRM_SAMSUNG_DSIM select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL_BRIDGE + select GENERIC_PHY select GENERIC_PHY_MIPI_DPHY help The Samsung MIPI DSIM bridge controller driver. diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig index ec35215a20034..cced81633ddcd 100644 --- a/drivers/gpu/drm/bridge/cadence/Kconfig +++ b/drivers/gpu/drm/bridge/cadence/Kconfig @@ -4,6 +4,7 @@ config DRM_CDNS_DSI select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL_BRIDGE + select GENERIC_PHY select GENERIC_PHY_MIPI_DPHY depends on OF help diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index 466641c77fe91..fc7f5ec5fb381 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -1447,10 +1447,14 @@ static int it66121_audio_get_eld(struct device *dev, void *data, struct it66121_ctx *ctx = dev_get_drvdata(dev); mutex_lock(&ctx->lock); - - memcpy(buf, ctx->connector->eld, - min(sizeof(ctx->connector->eld), len)); - + if (!ctx->connector) { + /* Pass en empty ELD if connector not available */ + dev_dbg(dev, "No connector present, passing empty EDID data"); + memset(buf, 0, len); + } else { + memcpy(buf, ctx->connector->eld, + min(sizeof(ctx->connector->eld), len)); + } mutex_unlock(&ctx->lock); return 0; diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 4eaea67fb71c2..03532efb893bb 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -45,7 +45,6 @@ struct lt8912 { u8 data_lanes; bool is_power_on; - bool is_attached; }; static int lt8912_write_init_config(struct lt8912 *lt) @@ -559,6 +558,13 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge, struct lt8912 *lt = bridge_to_lt8912(bridge); int ret; + ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret < 0) { + dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret); + return ret; + } + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { ret = lt8912_bridge_connector_init(bridge); if (ret) { @@ -575,8 +581,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge, if (ret) goto error; - lt->is_attached = true; - return 0; error: @@ -588,15 +592,10 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge) { struct lt8912 *lt = bridge_to_lt8912(bridge); - if (lt->is_attached) { - lt8912_hard_power_off(lt); - - if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD) - drm_bridge_hpd_disable(lt->hdmi_port); + lt8912_hard_power_off(lt); - drm_connector_unregister(<->connector); - drm_connector_cleanup(<->connector); - } + if (lt->connector.dev && lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD) + drm_bridge_hpd_disable(lt->hdmi_port); } static enum drm_connector_status @@ -750,7 +749,6 @@ static void lt8912_remove(struct i2c_client *client) { struct lt8912 *lt = i2c_get_clientdata(client); - lt8912_bridge_detach(<->bridge); drm_bridge_remove(<->bridge); lt8912_free_i2c(lt); lt8912_put_dt(lt); diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 22c84d29c2bc5..6f33bb0dd32aa 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -929,9 +929,9 @@ retry: init_waitqueue_head(<9611uxc->wq); INIT_WORK(<9611uxc->work, lt9611uxc_hpd_work); - ret = devm_request_threaded_irq(dev, client->irq, NULL, - lt9611uxc_irq_thread_handler, - IRQF_ONESHOT, "lt9611uxc", lt9611uxc); + ret = request_threaded_irq(client->irq, NULL, + lt9611uxc_irq_thread_handler, + IRQF_ONESHOT, "lt9611uxc", lt9611uxc); if (ret) { dev_err(dev, "failed to request irq\n"); goto err_disable_regulators; @@ -967,6 +967,8 @@ retry: return lt9611uxc_audio_init(dev, lt9611uxc); err_remove_bridge: + free_irq(client->irq, lt9611uxc); + cancel_work_sync(<9611uxc->work); drm_bridge_remove(<9611uxc->bridge); err_disable_regulators: @@ -983,7 +985,7 @@ static void lt9611uxc_remove(struct i2c_client *client) { struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client); - disable_irq(client->irq); + free_irq(client->irq, lt9611uxc); cancel_work_sync(<9611uxc->work); lt9611uxc_audio_exit(lt9611uxc); drm_bridge_remove(<9611uxc->bridge); diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index cf777bdb25d2a..19bdb32dbc9aa 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -385,7 +385,7 @@ static const unsigned int imx8mm_dsim_reg_values[] = { [RESET_TYPE] = DSIM_SWRST, [PLL_TIMER] = 500, [STOP_STATE_CNT] = 0xf, - [PHYCTRL_ULPS_EXIT] = 0, + [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf), [PHYCTRL_VREG_LP] = 0, [PHYCTRL_SLEW_UP] = 0, [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06), @@ -413,6 +413,7 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = { .m_min = 41, .m_max = 125, .min_freq = 500, + .has_broken_fifoctrl_emptyhdr = 1, }; static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = { @@ -429,6 +430,7 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = { .m_min = 41, .m_max = 125, .min_freq = 500, + .has_broken_fifoctrl_emptyhdr = 1, }; static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = { @@ -1010,8 +1012,20 @@ static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi) do { u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG); - if (reg & DSIM_SFR_HEADER_EMPTY) - return 0; + if (!dsi->driver_data->has_broken_fifoctrl_emptyhdr) { + if (reg & DSIM_SFR_HEADER_EMPTY) + return 0; + } else { + if (!(reg & DSIM_SFR_HEADER_FULL)) { + /* + * Wait a little bit, so the pending data can + * actually leave the FIFO to avoid overflow. + */ + if (!cond_resched()) + usleep_range(950, 1050); + return 0; + } + } if (!cond_resched()) usleep_range(950, 1050); diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index 819a4b6ec2a07..6eed5c4232956 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -216,6 +217,10 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask, u32 tmp, orig; tc358768_read(priv, reg, &orig); + + if (priv->error) + return; + tmp = orig & ~mask; tmp |= val & mask; if (tmp != orig) @@ -600,7 +605,7 @@ static int tc358768_setup_pll(struct tc358768_priv *priv, dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n", clk_get_rate(priv->refclk), fbd, prd, frs); - dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n", + dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, HSByteClk %u\n", priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4); dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n", tc358768_pll_to_pclk(priv, priv->dsiclk * 2), @@ -623,15 +628,14 @@ static int tc358768_setup_pll(struct tc358768_priv *priv, return tc358768_clear_error(priv); } -#define TC358768_PRECISION 1000 -static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk) +static u32 tc358768_ns_to_cnt(u32 ns, u32 period_ps) { - return (ns * TC358768_PRECISION + period_nsk) / period_nsk; + return DIV_ROUND_UP(ns * 1000, period_ps); } -static u32 tc358768_to_ns(u32 nsk) +static u32 tc358768_ps_to_ns(u32 ps) { - return (nsk / TC358768_PRECISION); + return ps / 1000; } static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) @@ -642,13 +646,15 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) u32 val, val2, lptxcnt, hact, data_type; s32 raw_val; const struct drm_display_mode *mode; - u32 dsibclk_nsk, dsiclk_nsk, ui_nsk; - u32 dsiclk, dsibclk, video_start; + u32 hsbyteclk_ps, dsiclk_ps, ui_ps; + u32 dsiclk, hsbyteclk, video_start; const u32 internal_delay = 40; int ret, i; + struct videomode vm; + struct device *dev = priv->dev; if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { - dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n"); + dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n"); mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS; } @@ -656,7 +662,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) ret = tc358768_sw_reset(priv); if (ret) { - dev_err(priv->dev, "Software reset failed: %d\n", ret); + dev_err(dev, "Software reset failed: %d\n", ret); tc358768_hw_disable(priv); return; } @@ -664,45 +670,47 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) mode = &bridge->encoder->crtc->state->adjusted_mode; ret = tc358768_setup_pll(priv, mode); if (ret) { - dev_err(priv->dev, "PLL setup failed: %d\n", ret); + dev_err(dev, "PLL setup failed: %d\n", ret); tc358768_hw_disable(priv); return; } + drm_display_mode_to_videomode(mode, &vm); + dsiclk = priv->dsiclk; - dsibclk = dsiclk / 4; + hsbyteclk = dsiclk / 4; /* Data Format Control Register */ val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */ switch (dsi_dev->format) { case MIPI_DSI_FMT_RGB888: val |= (0x3 << 4); - hact = mode->hdisplay * 3; - video_start = (mode->htotal - mode->hsync_start) * 3; + hact = vm.hactive * 3; + video_start = (vm.hsync_len + vm.hback_porch) * 3; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; break; case MIPI_DSI_FMT_RGB666: val |= (0x4 << 4); - hact = mode->hdisplay * 3; - video_start = (mode->htotal - mode->hsync_start) * 3; + hact = vm.hactive * 3; + video_start = (vm.hsync_len + vm.hback_porch) * 3; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; break; case MIPI_DSI_FMT_RGB666_PACKED: val |= (0x4 << 4) | BIT(3); - hact = mode->hdisplay * 18 / 8; - video_start = (mode->htotal - mode->hsync_start) * 18 / 8; + hact = vm.hactive * 18 / 8; + video_start = (vm.hsync_len + vm.hback_porch) * 18 / 8; data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; break; case MIPI_DSI_FMT_RGB565: val |= (0x5 << 4); - hact = mode->hdisplay * 2; - video_start = (mode->htotal - mode->hsync_start) * 2; + hact = vm.hactive * 2; + video_start = (vm.hsync_len + vm.hback_porch) * 2; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; break; default: - dev_err(priv->dev, "Invalid data format (%u)\n", + dev_err(dev, "Invalid data format (%u)\n", dsi_dev->format); tc358768_hw_disable(priv); return; @@ -722,67 +730,67 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000); /* DSI Timings */ - dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, - dsibclk); - dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk); - ui_nsk = dsiclk_nsk / 2; - dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk); - dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk); - dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk); + hsbyteclk_ps = (u32)div_u64(PICO, hsbyteclk); + dsiclk_ps = (u32)div_u64(PICO, dsiclk); + ui_ps = dsiclk_ps / 2; + dev_dbg(dev, "dsiclk: %u ps, ui %u ps, hsbyteclk %u ps\n", dsiclk_ps, + ui_ps, hsbyteclk_ps); /* LP11 > 100us for D-PHY Rx Init */ - val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1; - dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val); + val = tc358768_ns_to_cnt(100 * 1000, hsbyteclk_ps) - 1; + dev_dbg(dev, "LINEINITCNT: %u\n", val); tc358768_write(priv, TC358768_LINEINITCNT, val); /* LPTimeCnt > 50ns */ - val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1; + val = tc358768_ns_to_cnt(50, hsbyteclk_ps) - 1; lptxcnt = val; - dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val); + dev_dbg(dev, "LPTXTIMECNT: %u\n", val); tc358768_write(priv, TC358768_LPTXTIMECNT, val); /* 38ns < TCLK_PREPARE < 95ns */ - val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1; + val = tc358768_ns_to_cnt(65, hsbyteclk_ps) - 1; + dev_dbg(dev, "TCLK_PREPARECNT %u\n", val); /* TCLK_PREPARE + TCLK_ZERO > 300ns */ - val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk), - dsibclk_nsk) - 2; + val2 = tc358768_ns_to_cnt(300 - tc358768_ps_to_ns(2 * ui_ps), + hsbyteclk_ps) - 2; + dev_dbg(dev, "TCLK_ZEROCNT %u\n", val2); val |= val2 << 8; - dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val); tc358768_write(priv, TC358768_TCLK_HEADERCNT, val); /* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */ - raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5; + raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(2 * ui_ps), hsbyteclk_ps) - 5; val = clamp(raw_val, 0, 127); - dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val); + dev_dbg(dev, "TCLK_TRAILCNT: %u\n", val); tc358768_write(priv, TC358768_TCLK_TRAILCNT, val); /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */ - val = 50 + tc358768_to_ns(4 * ui_nsk); - val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1; + val = 50 + tc358768_ps_to_ns(4 * ui_ps); + val = tc358768_ns_to_cnt(val, hsbyteclk_ps) - 1; + dev_dbg(dev, "THS_PREPARECNT %u\n", val); /* THS_PREPARE + THS_ZERO > 145ns + 10*UI */ - raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10; + raw_val = tc358768_ns_to_cnt(145 - tc358768_ps_to_ns(3 * ui_ps), hsbyteclk_ps) - 10; val2 = clamp(raw_val, 0, 127); + dev_dbg(dev, "THS_ZEROCNT %u\n", val2); val |= val2 << 8; - dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val); tc358768_write(priv, TC358768_THS_HEADERCNT, val); /* TWAKEUP > 1ms in lptxcnt steps */ - val = tc358768_ns_to_cnt(1020000, dsibclk_nsk); + val = tc358768_ns_to_cnt(1020000, hsbyteclk_ps); val = val / (lptxcnt + 1) - 1; - dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val); + dev_dbg(dev, "TWAKEUP: %u\n", val); tc358768_write(priv, TC358768_TWAKEUP, val); /* TCLK_POSTCNT > 60ns + 52*UI */ - val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk), - dsibclk_nsk) - 3; - dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val); + val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(52 * ui_ps), + hsbyteclk_ps) - 3; + dev_dbg(dev, "TCLK_POSTCNT: %u\n", val); tc358768_write(priv, TC358768_TCLK_POSTCNT, val); /* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */ - raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk), - dsibclk_nsk) - 4; + raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(18 * ui_ps), + hsbyteclk_ps) - 4; val = clamp(raw_val, 0, 15); - dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val); + dev_dbg(dev, "THS_TRAILCNT: %u\n", val); tc358768_write(priv, TC358768_THS_TRAILCNT, val); val = BIT(0); @@ -790,16 +798,17 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) val |= BIT(i + 1); tc358768_write(priv, TC358768_HSTXVREGEN, val); - if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) - tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1); + tc358768_write(priv, TC358768_TXOPTIONCNTRL, + (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : BIT(0)); /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */ - val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4); - val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1; - val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk), - dsibclk_nsk) - 2; + val = tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps * 4); + val = tc358768_ns_to_cnt(val, hsbyteclk_ps) / 4 - 1; + dev_dbg(dev, "TXTAGOCNT: %u\n", val); + val2 = tc358768_ns_to_cnt(tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps), + hsbyteclk_ps) - 2; + dev_dbg(dev, "RXTASURECNT: %u\n", val2); val = val << 16 | val2; - dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val); tc358768_write(priv, TC358768_BTACNTRL1, val); /* START[0] */ @@ -810,43 +819,43 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) tc358768_write(priv, TC358768_DSI_EVENT, 0); /* vact */ - tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); + tc358768_write(priv, TC358768_DSI_VACT, vm.vactive); /* vsw */ - tc358768_write(priv, TC358768_DSI_VSW, - mode->vsync_end - mode->vsync_start); + tc358768_write(priv, TC358768_DSI_VSW, vm.vsync_len); + /* vbp */ - tc358768_write(priv, TC358768_DSI_VBPR, - mode->vtotal - mode->vsync_end); + tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch); /* hsw * byteclk * ndl / pclk */ - val = (u32)div_u64((mode->hsync_end - mode->hsync_start) * - ((u64)priv->dsiclk / 4) * priv->dsi_lanes, - mode->clock * 1000); + val = (u32)div_u64(vm.hsync_len * + (u64)hsbyteclk * priv->dsi_lanes, + vm.pixelclock); tc358768_write(priv, TC358768_DSI_HSW, val); /* hbp * byteclk * ndl / pclk */ - val = (u32)div_u64((mode->htotal - mode->hsync_end) * - ((u64)priv->dsiclk / 4) * priv->dsi_lanes, - mode->clock * 1000); + val = (u32)div_u64(vm.hback_porch * + (u64)hsbyteclk * priv->dsi_lanes, + vm.pixelclock); tc358768_write(priv, TC358768_DSI_HBPR, val); } else { /* Set event mode */ tc358768_write(priv, TC358768_DSI_EVENT, 1); /* vact */ - tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); + tc358768_write(priv, TC358768_DSI_VACT, vm.vactive); /* vsw (+ vbp) */ tc358768_write(priv, TC358768_DSI_VSW, - mode->vtotal - mode->vsync_start); + vm.vsync_len + vm.vback_porch); + /* vbp (not used in event mode) */ tc358768_write(priv, TC358768_DSI_VBPR, 0); /* (hsw + hbp) * byteclk * ndl / pclk */ - val = (u32)div_u64((mode->htotal - mode->hsync_start) * - ((u64)priv->dsiclk / 4) * priv->dsi_lanes, - mode->clock * 1000); + val = (u32)div_u64((vm.hsync_len + vm.hback_porch) * + (u64)hsbyteclk * priv->dsi_lanes, + vm.pixelclock); tc358768_write(priv, TC358768_DSI_HSW, val); /* hbp (not used in event mode) */ @@ -857,11 +866,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) tc358768_write(priv, TC358768_DSI_HACT, hact); /* VSYNC polarity */ - if (!(mode->flags & DRM_MODE_FLAG_NVSYNC)) - tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5)); + tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), + (mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(5) : 0); + /* HSYNC polarity */ - if (mode->flags & DRM_MODE_FLAG_PHSYNC) - tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0)); + tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), + (mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(0) : 0); /* Start DSI Tx */ tc358768_write(priv, TC358768_DSI_START, 0x1); @@ -891,7 +901,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) ret = tc358768_clear_error(priv); if (ret) { - dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret); + dev_err(dev, "Bridge pre_enable failed: %d\n", ret); tc358768_bridge_disable(bridge); tc358768_bridge_post_disable(bridge); } diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index f7003d1ec5ef1..01da6789d0440 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1069,7 +1069,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, fence = drm_syncobj_fence_get(syncobjs[i]); if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) { dma_fence_put(fence); - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { + if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) { continue; } else { timeout = -EINVAL; diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.c b/drivers/gpu/drm/loongson/lsdc_pixpll.c index 04c15b4697e21..2609a2256da4b 100644 --- a/drivers/gpu/drm/loongson/lsdc_pixpll.c +++ b/drivers/gpu/drm/loongson/lsdc_pixpll.c @@ -120,12 +120,14 @@ static int lsdc_pixel_pll_setup(struct lsdc_pixpll * const this) struct lsdc_pixpll_parms *pparms; this->mmio = ioremap(this->reg_base, this->reg_size); - if (IS_ERR_OR_NULL(this->mmio)) + if (!this->mmio) return -ENOMEM; pparms = kzalloc(sizeof(*pparms), GFP_KERNEL); - if (IS_ERR_OR_NULL(pparms)) + if (!pparms) { + iounmap(this->mmio); return -ENOMEM; + } pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index b6fa4ad2f94dc..0a511d7688a3a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -408,6 +408,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) unsigned int local_layer; plane_state = to_mtk_plane_state(plane->state); + + /* should not enable layer before crtc enabled */ + plane_state->pending.enable = false; comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); if (comp) mtk_ddp_comp_layer_config(comp, local_layer, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 93552d76b6e77..2d6a979afe8f9 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -288,6 +288,7 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = { static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = { .main_path = mt8188_mtk_ddp_main, .main_len = ARRAY_SIZE(mt8188_mtk_ddp_main), + .mmsys_dev_num = 1, }; static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 0e0a41b2f57f0..4f2e3feabc0f8 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -121,7 +121,14 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, int ret; args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); - args->size = args->pitch * args->height; + + /* + * Multiply 2 variables of different types, + * for example: args->size = args->spacing * args->height; + * may cause coverity issue with unintentional overflow. + */ + args->size = args->pitch; + args->size *= args->height; mtk_gem = mtk_drm_gem_create(dev, args->size, false); if (IS_ERR(mtk_gem)) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index db2f70ae060d6..ddc9355b06d51 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -141,6 +141,7 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state, dma_addr_t addr; dma_addr_t hdr_addr = 0; unsigned int hdr_pitch = 0; + int offset; gem = fb->obj[0]; mtk_gem = to_mtk_gem_obj(gem); @@ -150,8 +151,15 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state, modifier = fb->modifier; if (modifier == DRM_FORMAT_MOD_LINEAR) { - addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; - addr += (new_state->src.y1 >> 16) * pitch; + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; + * may cause coverity issue with unintentional overflow. + */ + offset = (new_state->src.x1 >> 16) * fb->format->cpp[0]; + addr += offset; + offset = (new_state->src.y1 >> 16) * pitch; + addr += offset; } else { int width_in_blocks = ALIGN(fb->width, AFBC_DATA_BLOCK_WIDTH) / AFBC_DATA_BLOCK_WIDTH; @@ -159,21 +167,34 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state, / AFBC_DATA_BLOCK_HEIGHT; int x_offset_in_blocks = (new_state->src.x1 >> 16) / AFBC_DATA_BLOCK_WIDTH; int y_offset_in_blocks = (new_state->src.y1 >> 16) / AFBC_DATA_BLOCK_HEIGHT; - int hdr_size; + int hdr_size, hdr_offset; hdr_pitch = width_in_blocks * AFBC_HEADER_BLOCK_SIZE; pitch = width_in_blocks * AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT * fb->format->cpp[0]; hdr_size = ALIGN(hdr_pitch * height_in_blocks, AFBC_HEADER_ALIGNMENT); + hdr_offset = hdr_pitch * y_offset_in_blocks + + AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks; + + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += hdr_pitch * y_offset_in_blocks; + * may cause coverity issue with unintentional overflow. + */ + hdr_addr = addr + hdr_offset; - hdr_addr = addr + hdr_pitch * y_offset_in_blocks + - AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks; /* The data plane is offset by 1 additional block. */ - addr = addr + hdr_size + - pitch * y_offset_in_blocks + - AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT * - fb->format->cpp[0] * (x_offset_in_blocks + 1); + offset = pitch * y_offset_in_blocks + + AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT * + fb->format->cpp[0] * (x_offset_in_blocks + 1); + + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += pitch * y_offset_in_blocks; + * may cause coverity issue with unintentional overflow. + */ + addr = addr + hdr_size + offset; } mtk_plane_state->pending.enable = true; @@ -206,9 +227,9 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane, plane->state->src_y = new_state->src_y; plane->state->src_h = new_state->src_h; plane->state->src_w = new_state->src_w; - swap(plane->state->fb, new_state->fb); mtk_plane_update_new_state(new_state, new_plane_state); + swap(plane->state->fb, new_state->fb); wmb(); /* Make sure the above parameters are set before update */ new_plane_state->pending.async_dirty = true; mtk_drm_crtc_async_update(new_state->crtc, plane, state); diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index d8bfc2cce54dc..290f328c6a421 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -407,7 +407,7 @@ static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi) if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) tmp_reg |= HSTX_CKLP_EN; - if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)) + if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) tmp_reg |= DIS_EOT; writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL); @@ -484,7 +484,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) timing->da_hs_zero + timing->da_hs_exit + 3; delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12; - delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 2 : 0; + delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2; horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index abddf37f0ea11..2fb18b782b053 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -278,6 +279,12 @@ static void mgag200_pci_remove(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); drm_dev_unregister(dev); + drm_atomic_helper_shutdown(dev); +} + +static void mgag200_pci_shutdown(struct pci_dev *pdev) +{ + drm_atomic_helper_shutdown(pci_get_drvdata(pdev)); } static struct pci_driver mgag200_pci_driver = { @@ -285,6 +292,7 @@ static struct pci_driver mgag200_pci_driver = { .id_table = mgag200_pciidlist, .probe = mgag200_pci_probe, .remove = mgag200_pci_remove, + .shutdown = mgag200_pci_shutdown, }; drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index d4e85e24002fb..522ca7fe67625 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -2237,7 +2237,7 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i DRM_DEV_ERROR(dev, "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", speedbin); - return UINT_MAX; + supp_hw = BIT(0); /* Default */ } ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 575e7c56219ff..f2d9d34ed50f9 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -331,7 +331,7 @@ static const struct adreno_info gpulist[] = { ), }, { .machine = "qcom,sm6375", - .chip_ids = ADRENO_CHIP_IDS(0x06010900), + .chip_ids = ADRENO_CHIP_IDS(0x06010901), .family = ADRENO_6XX_GEN1, .revn = 619, .fw = { diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index baab79ab6e745..32f965bacdc30 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -126,6 +126,7 @@ static void dsi_unbind(struct device *dev, struct device *master, struct msm_drm_private *priv = dev_get_drvdata(master); struct msm_dsi *msm_dsi = dev_get_drvdata(dev); + msm_dsi_tx_buf_free(msm_dsi->host); priv->dsi[msm_dsi->id] = NULL; } diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index bd3763a5d7234..3b46617a59f20 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -125,6 +125,7 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size); void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host); void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host); void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host); +void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host); int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova); int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova); int dsi_clk_init_v2(struct msm_dsi_host *msm_host); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 3d6fb708dc223..470866896b9b8 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -147,6 +147,7 @@ struct msm_dsi_host { /* DSI 6G TX buffer*/ struct drm_gem_object *tx_gem_obj; + struct msm_gem_address_space *aspace; /* DSI v2 TX buffer */ void *tx_buf; @@ -1111,8 +1112,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size) uint64_t iova; u8 *data; + msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace); + data = msm_gem_kernel_new(dev, size, MSM_BO_WC, - priv->kms->aspace, + msm_host->aspace, &msm_host->tx_gem_obj, &iova); if (IS_ERR(data)) { @@ -1141,10 +1144,10 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size) return 0; } -static void dsi_tx_buf_free(struct msm_dsi_host *msm_host) +void msm_dsi_tx_buf_free(struct mipi_dsi_host *host) { + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); struct drm_device *dev = msm_host->dev; - struct msm_drm_private *priv; /* * This is possible if we're tearing down before we've had a chance to @@ -1155,11 +1158,11 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host) if (!dev) return; - priv = dev->dev_private; if (msm_host->tx_gem_obj) { - msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace); - drm_gem_object_put(msm_host->tx_gem_obj); + msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace); + msm_gem_address_space_put(msm_host->aspace); msm_host->tx_gem_obj = NULL; + msm_host->aspace = NULL; } if (msm_host->tx_buf) @@ -1945,7 +1948,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host) struct msm_dsi_host *msm_host = to_msm_dsi_host(host); DBG(""); - dsi_tx_buf_free(msm_host); if (msm_host->workqueue) { destroy_workqueue(msm_host->workqueue); msm_host->workqueue = NULL; diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index ba3b5b5f0cdfe..02e6b74d50166 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -323,12 +323,18 @@ static void pl111_amba_remove(struct amba_device *amba_dev) struct pl111_drm_dev_private *priv = drm->dev_private; drm_dev_unregister(drm); + drm_atomic_helper_shutdown(drm); if (priv->panel) drm_panel_bridge_remove(priv->bridge); drm_dev_put(drm); of_reserved_mem_device_release(dev); } +static void pl111_amba_shutdown(struct amba_device *amba_dev) +{ + drm_atomic_helper_shutdown(amba_get_drvdata(amba_dev)); +} + /* * This early variant lacks the 565 and 444 pixel formats. */ @@ -431,6 +437,7 @@ static struct amba_driver pl111_amba_driver __maybe_unused = { }, .probe = pl111_amba_probe, .remove = pl111_amba_remove, + .shutdown = pl111_amba_shutdown, .id_table = pl111_id_table, }; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4f06356d9ce2e..f0ae087be914e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4821,14 +4821,15 @@ restart_ih: break; case 44: /* hdmi */ afmt_idx = src_data; - if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG)) - DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); - if (afmt_idx > 5) { DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); break; } + + if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG; queue_hdmi = true; DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 8afb03bbce298..3d3d2109dfebc 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2215,10 +2215,6 @@ int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index fa531493b1113..7bf08164140ef 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -555,8 +555,6 @@ static const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 358d19242f4ba..3fec3acdaf284 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -311,22 +311,6 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, return 0; } -int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp) -{ - /* TODO: implement */ - DRM_ERROR("unimplemented %s\n", __func__); - return -EOPNOTSUPP; -} - -int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp) -{ - /* TODO: implement */ - DRM_ERROR("unimplemented %s\n", __func__); - return -EOPNOTSUPP; -} - int radeon_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index a29fbafce3936..3793863c210eb 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1177,6 +1177,7 @@ static int cdn_dp_probe(struct platform_device *pdev) struct cdn_dp_device *dp; struct extcon_dev *extcon; struct phy *phy; + int ret; int i; dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); @@ -1217,9 +1218,19 @@ static int cdn_dp_probe(struct platform_device *pdev) mutex_init(&dp->lock); dev_set_drvdata(dev, dp); - cdn_dp_audio_codec_init(dp, dev); + ret = cdn_dp_audio_codec_init(dp, dev); + if (ret) + return ret; + + ret = component_add(dev, &cdn_dp_component_ops); + if (ret) + goto err_audio_deinit; - return component_add(dev, &cdn_dp_component_ops); + return 0; + +err_audio_deinit: + platform_device_unregister(dp->audio_pdev); + return ret; } static void cdn_dp_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index b8f8b45ebf594..93ed841f5dcea 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -40,7 +40,7 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt, prot); - if (ret < rk_obj->base.size) { + if (ret < (ssize_t)rk_obj->base.size) { DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", ret, rk_obj->base.size); ret = -ENOMEM; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 14320bc73e5bf..41cd12d5f2fa2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1614,7 +1614,8 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc) if (WARN_ON(!crtc->state)) return NULL; - rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL); + rockchip_state = kmemdup(to_rockchip_crtc_state(crtc->state), + sizeof(*rockchip_state), GFP_KERNEL); if (!rockchip_state) return NULL; @@ -1639,7 +1640,10 @@ static void vop_crtc_reset(struct drm_crtc *crtc) if (crtc->state) vop_crtc_destroy_state(crtc, crtc->state); - __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base); + if (crtc_state) + __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); } #ifdef CONFIG_DRM_ANALOGIX_DP diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 583df4d22f7e9..c306806aa3dea 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -2079,30 +2079,15 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = { .atomic_disable = vop2_crtc_atomic_disable, }; -static void vop2_crtc_reset(struct drm_crtc *crtc) -{ - struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state); - - if (crtc->state) { - __drm_atomic_helper_crtc_destroy_state(crtc->state); - kfree(vcstate); - } - - vcstate = kzalloc(sizeof(*vcstate), GFP_KERNEL); - if (!vcstate) - return; - - crtc->state = &vcstate->base; - crtc->state->crtc = crtc; -} - static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc) { - struct rockchip_crtc_state *vcstate, *old_vcstate; + struct rockchip_crtc_state *vcstate; - old_vcstate = to_rockchip_crtc_state(crtc->state); + if (WARN_ON(!crtc->state)) + return NULL; - vcstate = kmemdup(old_vcstate, sizeof(*old_vcstate), GFP_KERNEL); + vcstate = kmemdup(to_rockchip_crtc_state(crtc->state), + sizeof(*vcstate), GFP_KERNEL); if (!vcstate) return NULL; @@ -2120,6 +2105,20 @@ static void vop2_crtc_destroy_state(struct drm_crtc *crtc, kfree(vcstate); } +static void vop2_crtc_reset(struct drm_crtc *crtc) +{ + struct rockchip_crtc_state *vcstate = + kzalloc(sizeof(*vcstate), GFP_KERNEL); + + if (crtc->state) + vop2_crtc_destroy_state(crtc, crtc->state); + + if (vcstate) + __drm_atomic_helper_crtc_reset(crtc, &vcstate->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); +} + static const struct drm_crtc_funcs vop2_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index 5a80b228d18ca..78272b1f9d5b1 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -553,14 +553,45 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, struct ssd130x_plane_state *ssd130x_state) { - struct drm_rect fullscreen = { - .x1 = 0, - .x2 = ssd130x->width, - .y1 = 0, - .y2 = ssd130x->height, - }; - - ssd130x_update_rect(ssd130x, ssd130x_state, &fullscreen); + unsigned int page_height = ssd130x->device_info->page_height; + unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height); + u8 *data_array = ssd130x_state->data_array; + unsigned int width = ssd130x->width; + int ret, i; + + if (!ssd130x->page_address_mode) { + memset(data_array, 0, width * pages); + + /* Set address range for horizontal addressing mode */ + ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset, width); + if (ret < 0) + return; + + ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset, pages); + if (ret < 0) + return; + + /* Write out update in one go if we aren't using page addressing mode */ + ssd130x_write_data(ssd130x, data_array, width * pages); + } else { + /* + * In page addressing mode, the start address needs to be reset, + * and each page then needs to be written out separately. + */ + memset(data_array, 0, width); + + for (i = 0; i < pages; i++) { + ret = ssd130x_set_page_pos(ssd130x, + ssd130x->page_offset + i, + ssd130x->col_offset); + if (ret < 0) + return; + + ret = ssd130x_write_data(ssd130x, data_array, width); + if (ret < 0) + return; + } + } } static int ssd130x_fb_blit_rect(struct drm_plane_state *state, diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index c68c831136c9b..e8523abef27a5 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -114,6 +114,7 @@ static void drv_unload(struct drm_device *ddev) DRM_DEBUG("%s\n", __func__); drm_kms_helper_poll_fini(ddev); + drm_atomic_helper_shutdown(ddev); ltdc_unload(ddev); } @@ -225,6 +226,11 @@ static void stm_drm_platform_remove(struct platform_device *pdev) drm_dev_put(ddev); } +static void stm_drm_platform_shutdown(struct platform_device *pdev) +{ + drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); +} + static const struct of_device_id drv_dt_ids[] = { { .compatible = "st,stm32-ltdc"}, { /* end node */ }, @@ -234,6 +240,7 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids); static struct platform_driver stm_drm_platform_driver = { .probe = stm_drm_platform_probe, .remove_new = stm_drm_platform_remove, + .shutdown = stm_drm_platform_shutdown, .driver = { .name = "stm32-display", .of_match_table = drv_dt_ids, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index fe56beea3e93f..8ebd7134ee21b 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -175,6 +175,7 @@ static void tilcdc_fini(struct drm_device *dev) drm_dev_unregister(dev); drm_kms_helper_poll_fini(dev); + drm_atomic_helper_shutdown(dev); tilcdc_irq_uninstall(dev); drm_mode_config_cleanup(dev); @@ -389,6 +390,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) init_failed: tilcdc_fini(ddev); + platform_set_drvdata(pdev, NULL); return ret; } @@ -537,7 +539,8 @@ static void tilcdc_unbind(struct device *dev) if (!ddev->dev_private) return; - tilcdc_fini(dev_get_drvdata(dev)); + tilcdc_fini(ddev); + dev_set_drvdata(dev, NULL); } static const struct component_master_ops tilcdc_comp_ops = { @@ -582,6 +585,11 @@ static int tilcdc_pdev_remove(struct platform_device *pdev) return 0; } +static void tilcdc_pdev_shutdown(struct platform_device *pdev) +{ + drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); +} + static const struct of_device_id tilcdc_of_match[] = { { .compatible = "ti,am33xx-tilcdc", }, { .compatible = "ti,da850-tilcdc", }, @@ -592,6 +600,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match); static struct platform_driver tilcdc_platform_driver = { .probe = tilcdc_pdev_probe, .remove = tilcdc_pdev_remove, + .shutdown = tilcdc_pdev_shutdown, .driver = { .name = "tilcdc", .pm = pm_sleep_ptr(&tilcdc_pm_ops), diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index 0bb56d0635366..acce210e25547 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -242,6 +242,7 @@ static void tve200_remove(struct platform_device *pdev) struct tve200_drm_dev_private *priv = drm->dev_private; drm_dev_unregister(drm); + drm_atomic_helper_shutdown(drm); if (priv->panel) drm_panel_bridge_remove(priv->bridge); drm_mode_config_cleanup(drm); @@ -249,6 +250,11 @@ static void tve200_remove(struct platform_device *pdev) drm_dev_put(drm); } +static void tve200_shutdown(struct platform_device *pdev) +{ + drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); +} + static const struct of_device_id tve200_of_match[] = { { .compatible = "faraday,tve200", @@ -263,6 +269,7 @@ static struct platform_driver tve200_driver = { }, .probe = tve200_probe, .remove_new = tve200_remove, + .shutdown = tve200_shutdown, }; drm_module_platform_driver(tve200_driver); diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index 4fee15c97c341..047b958123341 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -97,11 +98,19 @@ static void vbox_pci_remove(struct pci_dev *pdev) struct vbox_private *vbox = pci_get_drvdata(pdev); drm_dev_unregister(&vbox->ddev); + drm_atomic_helper_shutdown(&vbox->ddev); vbox_irq_fini(vbox); vbox_mode_fini(vbox); vbox_hw_fini(vbox); } +static void vbox_pci_shutdown(struct pci_dev *pdev) +{ + struct vbox_private *vbox = pci_get_drvdata(pdev); + + drm_atomic_helper_shutdown(&vbox->ddev); +} + static int vbox_pm_suspend(struct device *dev) { struct vbox_private *vbox = dev_get_drvdata(dev); @@ -165,6 +174,7 @@ static struct pci_driver vbox_pci_driver = { .id_table = pciidlist, .probe = vbox_pci_probe, .remove = vbox_pci_remove, + .shutdown = vbox_pci_shutdown, .driver.pm = pm_sleep_ptr(&vbox_pm_ops), }; diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c index 5d12d7beef0eb..ade3309ae042f 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c +++ b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c @@ -26,7 +26,7 @@ struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test, struct vc4_crtc *vc4_crtc; int ret; - dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL); + dummy_crtc = drmm_kzalloc(drm, sizeof(*dummy_crtc), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, dummy_crtc); vc4_crtc = &dummy_crtc->crtc; diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c index 6e11fcc9ef45e..e70d7c3076acf 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c +++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c @@ -32,7 +32,7 @@ struct vc4_dummy_output *vc4_dummy_output(struct kunit *test, struct drm_encoder *enc; int ret; - dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL); + dummy_output = drmm_kzalloc(drm, sizeof(*dummy_output), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output); dummy_output->encoder.type = vc4_encoder_type; diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c index a3f336edd991b..955c971c528d4 100644 --- a/drivers/gpu/host1x/context.c +++ b/drivers/gpu/host1x/context.c @@ -34,10 +34,10 @@ int host1x_memory_context_list_init(struct host1x *host1x) if (err < 0) return 0; - cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL); + cdl->len = err / 4; + cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL); if (!cdl->devs) return -ENOMEM; - cdl->len = err / 4; for (i = 0; i < cdl->len; i++) { ctx = &cdl->devs[i]; diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 54c33a24f8442..20a0d1315d90f 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -1151,8 +1151,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); - INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); - if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); @@ -1168,7 +1166,11 @@ static void cp2112_gpio_irq_shutdown(struct irq_data *d) struct cp2112_device *dev = gpiochip_get_data(gc); cp2112_gpio_irq_mask(d); - cancel_delayed_work_sync(&dev->gpio_poll_worker); + + if (!dev->irq_mask) { + dev->gpio_poll = false; + cancel_delayed_work_sync(&dev->gpio_poll_worker); + } } static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type) @@ -1307,6 +1309,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) girq->handler = handle_simple_irq; girq->threaded = true; + INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); + ret = gpiochip_add_data(&dev->gc, dev); if (ret < 0) { hid_err(hdev, "error registering gpio chip\n"); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index a209d51bd2476..7bf12ca0eb4a9 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -1835,15 +1835,14 @@ static int hidpp_battery_get_property(struct power_supply *psy, /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b -static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp) +static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index) { u8 feature_type; int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_WIRELESS_DEVICE_STATUS, - &hidpp->wireless_feature_index, - &feature_type); + feature_index, &feature_type); return ret; } @@ -4249,6 +4248,13 @@ static void hidpp_connect_event(struct hidpp_device *hidpp) } } + if (hidpp->protocol_major >= 2) { + u8 feature_index; + + if (!hidpp_get_wireless_feature_index(hidpp, &feature_index)) + hidpp->wireless_feature_index = feature_index; + } + if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) { name = hidpp_get_device_name(hidpp); if (name) { @@ -4394,7 +4400,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) bool connected; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct hidpp_ff_private_data data; - bool will_restart = false; /* report_fixup needs drvdata to be set before we call hid_parse */ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL); @@ -4445,10 +4450,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; } - if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT || - hidpp->quirks & HIDPP_QUIRK_UNIFYING) - will_restart = true; - INIT_WORK(&hidpp->work, delayed_work_cb); mutex_init(&hidpp->send_mutex); init_waitqueue_head(&hidpp->wait); @@ -4460,10 +4461,12 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hdev->name); /* - * Plain USB connections need to actually call start and open - * on the transport driver to allow incoming data. + * First call hid_hw_start(hdev, 0) to allow IO without connecting any + * hid subdrivers (hid-input, hidraw). This allows retrieving the dev's + * name and serial number and store these in hdev->name and hdev->uniq, + * before the hid-input and hidraw drivers expose these to userspace. */ - ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask); + ret = hid_hw_start(hdev, 0); if (ret) { hid_err(hdev, "hw start failed\n"); goto hid_hw_start_fail; @@ -4496,15 +4499,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hidpp_overwrite_name(hdev); } - if (connected && hidpp->protocol_major >= 2) { - ret = hidpp_set_wireless_feature_index(hidpp); - if (ret == -ENOENT) - hidpp->wireless_feature_index = 0; - else if (ret) - goto hid_hw_init_fail; - ret = 0; - } - if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) { ret = wtp_get_config(hidpp); if (ret) @@ -4518,21 +4512,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) schedule_work(&hidpp->work); flush_work(&hidpp->work); - if (will_restart) { - /* Reset the HID node state */ - hid_device_io_stop(hdev); - hid_hw_close(hdev); - hid_hw_stop(hdev); - - if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) - connect_mask &= ~HID_CONNECT_HIDINPUT; + if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) + connect_mask &= ~HID_CONNECT_HIDINPUT; - /* Now export the actual inputs and hidraw nodes to the world */ - ret = hid_hw_start(hdev, connect_mask); - if (ret) { - hid_err(hdev, "%s:hid_hw_start returned error\n", __func__); - goto hid_hw_start_fail; - } + /* Now export the actual inputs and hidraw nodes to the world */ + ret = hid_connect(hdev, connect_mask); + if (ret) { + hid_err(hdev, "%s:hid_connect returned error %d\n", __func__, ret); + goto hid_hw_init_fail; } if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) { @@ -4543,6 +4530,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) ret); } + /* + * This relies on logi_dj_ll_close() being a no-op so that DJ connection + * events will still be received. + */ + hid_hw_close(hdev); return ret; hid_hw_init_fail: diff --git a/drivers/hid/hid-uclogic-core-test.c b/drivers/hid/hid-uclogic-core-test.c index 2bb916226a389..cb274cde3ad23 100644 --- a/drivers/hid/hid-uclogic-core-test.c +++ b/drivers/hid/hid-uclogic-core-test.c @@ -56,6 +56,11 @@ static struct uclogic_raw_event_hook_test test_events[] = { }, }; +static void fake_work(struct work_struct *work) +{ + +} + static void hid_test_uclogic_exec_event_hook_test(struct kunit *test) { struct uclogic_params p = {0, }; @@ -77,6 +82,8 @@ static void hid_test_uclogic_exec_event_hook_test(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filter->event); memcpy(filter->event, &hook_events[n].event[0], filter->size); + INIT_WORK(&filter->work, fake_work); + list_add_tail(&filter->list, &p.event_hooks->list); } diff --git a/drivers/hid/hid-uclogic-params-test.c b/drivers/hid/hid-uclogic-params-test.c index 678f50cbb160b..a30121419a292 100644 --- a/drivers/hid/hid-uclogic-params-test.c +++ b/drivers/hid/hid-uclogic-params-test.c @@ -174,12 +174,26 @@ static void hid_test_uclogic_parse_ugee_v2_desc(struct kunit *test) KUNIT_EXPECT_EQ(test, params->frame_type, frame_type); } +struct fake_device { + unsigned long quirks; +}; + static void hid_test_uclogic_params_cleanup_event_hooks(struct kunit *test) { int res, n; + struct hid_device *hdev; + struct fake_device *fake_dev; struct uclogic_params p = {0, }; - res = uclogic_params_ugee_v2_init_event_hooks(NULL, &p); + hdev = kunit_kzalloc(test, sizeof(struct hid_device), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hdev); + + fake_dev = kunit_kzalloc(test, sizeof(struct fake_device), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fake_dev); + + hid_set_drvdata(hdev, fake_dev); + + res = uclogic_params_ugee_v2_init_event_hooks(hdev, &p); KUNIT_ASSERT_EQ(test, res, 0); /* Check that the function can be called repeatedly */ diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c index ba37a5efbf820..ab2edff018eb6 100644 --- a/drivers/hte/hte-tegra194-test.c +++ b/drivers/hte/hte-tegra194-test.c @@ -153,8 +153,10 @@ static int tegra_hte_test_probe(struct platform_device *pdev) } cnt = of_hte_req_count(hte.pdev); - if (cnt < 0) + if (cnt < 0) { + ret = cnt; goto free_irq; + } dev_info(&pdev->dev, "Total requested lines:%d\n", cnt); diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c index 5fd136baf1cd3..19b9bf3d75ef9 100644 --- a/drivers/hwmon/axi-fan-control.c +++ b/drivers/hwmon/axi-fan-control.c @@ -496,6 +496,21 @@ static int axi_fan_control_probe(struct platform_device *pdev) return -ENODEV; } + ret = axi_fan_control_init(ctl, pdev->dev.of_node); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize device\n"); + return ret; + } + + ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev, + name, + ctl, + &axi_chip_info, + axi_fan_control_groups); + + if (IS_ERR(ctl->hdev)) + return PTR_ERR(ctl->hdev); + ctl->irq = platform_get_irq(pdev, 0); if (ctl->irq < 0) return ctl->irq; @@ -509,19 +524,7 @@ static int axi_fan_control_probe(struct platform_device *pdev) return ret; } - ret = axi_fan_control_init(ctl, pdev->dev.of_node); - if (ret) { - dev_err(&pdev->dev, "Failed to initialize device\n"); - return ret; - } - - ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev, - name, - ctl, - &axi_chip_info, - axi_fan_control_groups); - - return PTR_ERR_OR_ZERO(ctl->hdev); + return 0; } static struct platform_driver axi_fan_control_driver = { diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index eba94f68585a8..ba82d1e79c131 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -42,7 +42,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ #define NUM_REAL_CORES 128 /* Number of Real cores per cpu */ -#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */ +#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c index b5b81bd83bb15..d928eb8ae5a37 100644 --- a/drivers/hwmon/nct6775-core.c +++ b/drivers/hwmon/nct6775-core.c @@ -1614,17 +1614,21 @@ struct nct6775_data *nct6775_update_device(struct device *dev) data->fan_div[i]); if (data->has_fan_min & BIT(i)) { - err = nct6775_read_value(data, data->REG_FAN_MIN[i], ®); + u16 tmp; + + err = nct6775_read_value(data, data->REG_FAN_MIN[i], &tmp); if (err) goto out; - data->fan_min[i] = reg; + data->fan_min[i] = tmp; } if (data->REG_FAN_PULSES[i]) { - err = nct6775_read_value(data, data->REG_FAN_PULSES[i], ®); + u16 tmp; + + err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &tmp); if (err) goto out; - data->fan_pulses[i] = (reg >> data->FAN_PULSE_SHIFT[i]) & 0x03; + data->fan_pulses[i] = (tmp >> data->FAN_PULSE_SHIFT[i]) & 0x03; } err = nct6775_select_fan_div(dev, data, i, reg); diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c index 26ba506331007..b9bb469e2d8fe 100644 --- a/drivers/hwmon/pmbus/mp2975.c +++ b/drivers/hwmon/pmbus/mp2975.c @@ -297,6 +297,11 @@ static int mp2973_read_word_data(struct i2c_client *client, int page, int ret; switch (reg) { + case PMBUS_STATUS_WORD: + /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */ + ret = pmbus_read_word_data(client, page, phase, reg); + ret ^= PB_STATUS_POWER_GOOD_N; + break; case PMBUS_OT_FAULT_LIMIT: ret = mp2975_read_word_helper(client, page, phase, reg, GENMASK(7, 0)); @@ -380,11 +385,6 @@ static int mp2975_read_word_data(struct i2c_client *client, int page, int ret; switch (reg) { - case PMBUS_STATUS_WORD: - /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */ - ret = pmbus_read_word_data(client, page, phase, reg); - ret ^= PB_STATUS_POWER_GOOD_N; - break; case PMBUS_OT_FAULT_LIMIT: ret = mp2975_read_word_helper(client, page, phase, reg, GENMASK(7, 0)); diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c index 1bbda3b05532e..bf408e35e2c32 100644 --- a/drivers/hwmon/sch5627.c +++ b/drivers/hwmon/sch5627.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -32,6 +33,10 @@ #define SCH5627_REG_PRIMARY_ID 0x3f #define SCH5627_REG_CTRL 0x40 +#define SCH5627_CTRL_START BIT(0) +#define SCH5627_CTRL_LOCK BIT(1) +#define SCH5627_CTRL_VBAT BIT(4) + #define SCH5627_NO_TEMPS 8 #define SCH5627_NO_FANS 4 #define SCH5627_NO_IN 5 @@ -147,7 +152,8 @@ static int sch5627_update_in(struct sch5627_data *data) /* Trigger a Vbat voltage measurement every 5 minutes */ if (time_after(jiffies, data->last_battery + 300 * HZ)) { - sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | 0x10); + sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, + data->control | SCH5627_CTRL_VBAT); data->last_battery = jiffies; } @@ -226,6 +232,14 @@ static int reg_to_rpm(u16 reg) static umode_t sch5627_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int channel) { + const struct sch5627_data *data = drvdata; + + /* Once the lock bit is set, the virtual registers become read-only + * until the next power cycle. + */ + if (data->control & SCH5627_CTRL_LOCK) + return 0444; + if (type == hwmon_pwm && attr == hwmon_pwm_auto_channels_temp) return 0644; @@ -483,14 +497,13 @@ static int sch5627_probe(struct platform_device *pdev) return val; data->control = val; - if (!(data->control & 0x01)) { + if (!(data->control & SCH5627_CTRL_START)) { pr_err("hardware monitoring not enabled\n"); return -ENODEV; } /* Trigger a Vbat voltage measurement, so that we get a valid reading the first time we read Vbat */ - sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, - data->control | 0x10); + sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | SCH5627_CTRL_VBAT); data->last_battery = jiffies; /* diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c index de3a0886c2f72..ac1f725807155 100644 --- a/drivers/hwmon/sch56xx-common.c +++ b/drivers/hwmon/sch56xx-common.c @@ -7,10 +7,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include #include #include -#include #include #include #include @@ -21,10 +19,7 @@ #include #include "sch56xx-common.h" -static bool ignore_dmi; -module_param(ignore_dmi, bool, 0); -MODULE_PARM_DESC(ignore_dmi, "Omit DMI check for supported devices (default=0)"); - +/* Insmod parameters */ static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" @@ -523,66 +518,11 @@ static int __init sch56xx_device_add(int address, const char *name) return PTR_ERR_OR_ZERO(sch56xx_pdev); } -static const struct dmi_system_id sch56xx_dmi_override_table[] __initconst = { - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS W380"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO P710"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO E9900"), - }, - }, - { } -}; - -/* For autoloading only */ -static const struct dmi_system_id sch56xx_dmi_table[] __initconst = { - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - }, - }, - { } -}; -MODULE_DEVICE_TABLE(dmi, sch56xx_dmi_table); - static int __init sch56xx_init(void) { - const char *name = NULL; int address; + const char *name = NULL; - if (!ignore_dmi) { - if (!dmi_check_system(sch56xx_dmi_table)) - return -ENODEV; - - if (!dmi_check_system(sch56xx_dmi_override_table)) { - /* - * Some machines like the Esprimo P720 and Esprimo C700 have - * onboard devices named " Antiope"/" Theseus" instead of - * "Antiope"/"Theseus", so we need to check for both. - */ - if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) && - !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) && - !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) && - !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL)) - return -ENODEV; - } - } - - /* - * Some devices like the Esprimo C700 have both onboard devices, - * so we still have to check manually - */ address = sch56xx_find(0x4e, &name); if (address < 0) address = sch56xx_find(0x2e, &name); diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 51aab662050b1..e905734c26a04 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -316,26 +316,44 @@ static void bcm_iproc_i2c_slave_init( iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); } -static void bcm_iproc_i2c_check_slave_status( - struct bcm_iproc_i2c_dev *iproc_i2c) +static bool bcm_iproc_i2c_check_slave_status + (struct bcm_iproc_i2c_dev *iproc_i2c, u32 status) { u32 val; + bool recover = false; - val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET); - /* status is valid only when START_BUSY is cleared after it was set */ - if (val & BIT(S_CMD_START_BUSY_SHIFT)) - return; + /* check slave transmit status only if slave is transmitting */ + if (!iproc_i2c->slave_rx_only) { + val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET); + /* status is valid only when START_BUSY is cleared */ + if (!(val & BIT(S_CMD_START_BUSY_SHIFT))) { + val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK; + if (val == S_CMD_STATUS_TIMEOUT || + val == S_CMD_STATUS_MASTER_ABORT) { + dev_warn(iproc_i2c->device, + (val == S_CMD_STATUS_TIMEOUT) ? + "slave random stretch time timeout\n" : + "Master aborted read transaction\n"); + recover = true; + } + } + } + + /* RX_EVENT is not valid when START_BUSY is set */ + if ((status & BIT(IS_S_RX_EVENT_SHIFT)) && + (status & BIT(IS_S_START_BUSY_SHIFT))) { + dev_warn(iproc_i2c->device, "Slave aborted read transaction\n"); + recover = true; + } - val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK; - if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) { - dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ? - "slave random stretch time timeout\n" : - "Master aborted read transaction\n"); + if (recover) { /* re-initialize i2c for recovery */ bcm_iproc_i2c_enable_disable(iproc_i2c, false); bcm_iproc_i2c_slave_init(iproc_i2c, true); bcm_iproc_i2c_enable_disable(iproc_i2c, true); } + + return recover; } static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c) @@ -420,48 +438,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, u32 val; u8 value; - /* - * Slave events in case of master-write, master-write-read and, - * master-read - * - * Master-write : only IS_S_RX_EVENT_SHIFT event - * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT - * events - * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT - * events or only IS_S_RD_EVENT_SHIFT - * - * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt - * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes - * full. This can happen if Master issues write requests of more than - * 64 bytes. - */ - if (status & BIT(IS_S_RX_EVENT_SHIFT) || - status & BIT(IS_S_RD_EVENT_SHIFT) || - status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) { - /* disable slave interrupts */ - val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); - val &= ~iproc_i2c->slave_int_mask; - iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); - - if (status & BIT(IS_S_RD_EVENT_SHIFT)) - /* Master-write-read request */ - iproc_i2c->slave_rx_only = false; - else - /* Master-write request only */ - iproc_i2c->slave_rx_only = true; - - /* schedule tasklet to read data later */ - tasklet_schedule(&iproc_i2c->slave_rx_tasklet); - - /* - * clear only IS_S_RX_EVENT_SHIFT and - * IS_S_RX_FIFO_FULL_SHIFT interrupt. - */ - val = BIT(IS_S_RX_EVENT_SHIFT); - if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) - val |= BIT(IS_S_RX_FIFO_FULL_SHIFT); - iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val); - } if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) { iproc_i2c->tx_underrun++; @@ -493,8 +469,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, * less than PKT_LENGTH bytes were output on the SMBUS */ iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT); - iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, - iproc_i2c->slave_int_mask); + val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); + val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT); + iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); /* End of SMBUS for Master Read */ val = BIT(S_TX_WR_STATUS_SHIFT); @@ -515,9 +492,49 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, BIT(IS_S_START_BUSY_SHIFT)); } - /* check slave transmit status only if slave is transmitting */ - if (!iproc_i2c->slave_rx_only) - bcm_iproc_i2c_check_slave_status(iproc_i2c); + /* if the controller has been reset, immediately return from the ISR */ + if (bcm_iproc_i2c_check_slave_status(iproc_i2c, status)) + return true; + + /* + * Slave events in case of master-write, master-write-read and, + * master-read + * + * Master-write : only IS_S_RX_EVENT_SHIFT event + * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT + * events + * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT + * events or only IS_S_RD_EVENT_SHIFT + * + * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt + * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes + * full. This can happen if Master issues write requests of more than + * 64 bytes. + */ + if (status & BIT(IS_S_RX_EVENT_SHIFT) || + status & BIT(IS_S_RD_EVENT_SHIFT) || + status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) { + /* disable slave interrupts */ + val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); + val &= ~iproc_i2c->slave_int_mask; + iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); + + if (status & BIT(IS_S_RD_EVENT_SHIFT)) + /* Master-write-read request */ + iproc_i2c->slave_rx_only = false; + else + /* Master-write request only */ + iproc_i2c->slave_rx_only = true; + + /* schedule tasklet to read data later */ + tasklet_schedule(&iproc_i2c->slave_rx_tasklet); + + /* clear IS_S_RX_FIFO_FULL_SHIFT interrupt */ + if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) { + val = BIT(IS_S_RX_FIFO_FULL_SHIFT); + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val); + } + } return true; } diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 87283e4a46076..0e9ff5500a777 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1525,9 +1525,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master) desc->dev->dev.of_node = desc->boardinfo->of_node; ret = device_register(&desc->dev->dev); - if (ret) + if (ret) { dev_err(&master->dev, "Failed to add I3C device (err = %d)\n", ret); + put_device(&desc->dev->dev); + } } } diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c index 85e289700c3c5..4abf80f75ef5d 100644 --- a/drivers/iio/frequency/adf4350.c +++ b/drivers/iio/frequency/adf4350.c @@ -33,7 +33,6 @@ enum { struct adf4350_state { struct spi_device *spi; - struct regulator *reg; struct gpio_desc *lock_detect_gpiod; struct adf4350_platform_data *pdata; struct clk *clk; @@ -469,6 +468,15 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev) return pdata; } +static void adf4350_power_down(void *data) +{ + struct iio_dev *indio_dev = data; + struct adf4350_state *st = iio_priv(indio_dev); + + st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN; + adf4350_sync_config(st); +} + static int adf4350_probe(struct spi_device *spi) { struct adf4350_platform_data *pdata; @@ -491,31 +499,21 @@ static int adf4350_probe(struct spi_device *spi) } if (!pdata->clkin) { - clk = devm_clk_get(&spi->dev, "clkin"); + clk = devm_clk_get_enabled(&spi->dev, "clkin"); if (IS_ERR(clk)) - return -EPROBE_DEFER; - - ret = clk_prepare_enable(clk); - if (ret < 0) - return ret; + return PTR_ERR(clk); } indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); - if (indio_dev == NULL) { - ret = -ENOMEM; - goto error_disable_clk; - } + if (indio_dev == NULL) + return -ENOMEM; st = iio_priv(indio_dev); - st->reg = devm_regulator_get(&spi->dev, "vcc"); - if (!IS_ERR(st->reg)) { - ret = regulator_enable(st->reg); - if (ret) - goto error_disable_clk; - } + ret = devm_regulator_get_enable(&spi->dev, "vcc"); + if (ret) + return ret; - spi_set_drvdata(spi, indio_dev); st->spi = spi; st->pdata = pdata; @@ -544,47 +542,21 @@ static int adf4350_probe(struct spi_device *spi) st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL, GPIOD_IN); - if (IS_ERR(st->lock_detect_gpiod)) { - ret = PTR_ERR(st->lock_detect_gpiod); - goto error_disable_reg; - } + if (IS_ERR(st->lock_detect_gpiod)) + return PTR_ERR(st->lock_detect_gpiod); if (pdata->power_up_frequency) { ret = adf4350_set_freq(st, pdata->power_up_frequency); if (ret) - goto error_disable_reg; + return ret; } - ret = iio_device_register(indio_dev); + ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev); if (ret) - goto error_disable_reg; - - return 0; - -error_disable_reg: - if (!IS_ERR(st->reg)) - regulator_disable(st->reg); -error_disable_clk: - clk_disable_unprepare(clk); - - return ret; -} - -static void adf4350_remove(struct spi_device *spi) -{ - struct iio_dev *indio_dev = spi_get_drvdata(spi); - struct adf4350_state *st = iio_priv(indio_dev); - struct regulator *reg = st->reg; - - st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN; - adf4350_sync_config(st); - - iio_device_unregister(indio_dev); - - clk_disable_unprepare(st->clk); + return dev_err_probe(&spi->dev, ret, + "Failed to add action to managed power down\n"); - if (!IS_ERR(reg)) - regulator_disable(reg); + return devm_iio_device_register(&spi->dev, indio_dev); } static const struct of_device_id adf4350_of_match[] = { @@ -607,7 +579,6 @@ static struct spi_driver adf4350_driver = { .of_match_table = adf4350_of_match, }, .probe = adf4350_probe, - .remove = adf4350_remove, .id_table = adf4350_id, }; module_spi_driver(adf4350_driver); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index a666847bd7143..010718738d04c 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -804,7 +804,7 @@ static int alloc_port_data(struct ib_device *device) * empty slots at the beginning. */ pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata, - rdma_end_port(device) + 1), + size_add(rdma_end_port(device), 1)), GFP_KERNEL); if (!pdata_rcu) return -ENOMEM; diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 59179cfc20ef9..8175dde60b0a8 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -2159,7 +2159,9 @@ static int ib_sa_add_one(struct ib_device *device) s = rdma_start_port(device); e = rdma_end_port(device); - sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL); + sa_dev = kzalloc(struct_size(sa_dev, port, + size_add(size_sub(e, s), 1)), + GFP_KERNEL); if (!sa_dev) return -ENOMEM; diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index ee59d73915689..ec5efdc166601 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -903,7 +903,7 @@ alloc_hw_stats_device(struct ib_device *ibdev) * Two extra attribue elements here, one for the lifespan entry and * one to NULL terminate the list for the sysfs core code */ - data = kzalloc(struct_size(data, attrs, stats->num_counters + 1), + data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)), GFP_KERNEL); if (!data) goto err_free_stats; @@ -1009,7 +1009,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group) * Two extra attribue elements here, one for the lifespan entry and * one to NULL terminate the list for the sysfs core code */ - data = kzalloc(struct_size(data, attrs, stats->num_counters + 1), + data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)), GFP_KERNEL); if (!data) goto err_free_stats; @@ -1140,7 +1140,7 @@ static int setup_gid_attrs(struct ib_port *port, int ret; gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list, - attr->gid_tbl_len * 2), + size_mul(attr->gid_tbl_len, 2)), GFP_KERNEL); if (!gid_attr_group) return -ENOMEM; @@ -1205,8 +1205,8 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num, int ret; p = kvzalloc(struct_size(p, attrs_list, - attr->gid_tbl_len + attr->pkey_tbl_len), - GFP_KERNEL); + size_add(attr->gid_tbl_len, attr->pkey_tbl_len)), + GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); p->ibdev = device; diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 7e5c33aad1619..f5feca7fa9b9c 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1378,7 +1378,9 @@ static int ib_umad_add_one(struct ib_device *device) s = rdma_start_port(device); e = rdma_end_port(device); - umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL); + umad_dev = kzalloc(struct_size(umad_dev, ports, + size_add(size_sub(e, s), 1)), + GFP_KERNEL); if (!umad_dev) return -ENOMEM; diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c index 7741a1d69097c..2b5d264f41e51 100644 --- a/drivers/infiniband/hw/hfi1/efivar.c +++ b/drivers/infiniband/hw/hfi1/efivar.c @@ -112,7 +112,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind, unsigned long *size, void **return_data) { char prefix_name[64]; - char name[64]; + char name[128]; int result; /* create a common prefix */ diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index e77fcc74f15c4..3df032ddda189 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -33,7 +33,9 @@ #include #include #include +#include "hnae3.h" #include "hns_roce_device.h" +#include "hns_roce_hw_v2.h" static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr) { @@ -57,6 +59,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device); struct hns_roce_ah *ah = to_hr_ah(ibah); int ret = 0; + u32 max_sl; if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) return -EOPNOTSUPP; @@ -70,9 +73,17 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, ah->av.hop_limit = grh->hop_limit; ah->av.flowlabel = grh->flow_label; ah->av.udp_sport = get_ah_udp_sport(ah_attr); - ah->av.sl = rdma_ah_get_sl(ah_attr); ah->av.tclass = get_tclass(grh); + ah->av.sl = rdma_ah_get_sl(ah_attr); + max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1); + if (unlikely(ah->av.sl > max_sl)) { + ibdev_err_ratelimited(&hr_dev->ib_dev, + "failed to set sl, sl (%u) shouldn't be larger than %u.\n", + ah->av.sl, max_sl); + return -EINVAL; + } + memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index d82daff2d9bd5..58d14f1562b9a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -270,7 +270,7 @@ static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len) struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); int mtu = ib_mtu_enum_to_int(qp->path_mtu); - if (len > qp->max_inline_data || len > mtu) { + if (mtu < 0 || len > qp->max_inline_data || len > mtu) { ibdev_err(&hr_dev->ib_dev, "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n", len, qp->max_inline_data, mtu); @@ -4725,6 +4725,9 @@ static int check_cong_type(struct ib_qp *ibqp, { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + if (ibqp->qp_type == IB_QPT_UD) + hr_dev->caps.cong_type = CONG_TYPE_DCQCN; + /* different congestion types match different configurations */ switch (hr_dev->caps.cong_type) { case CONG_TYPE_DCQCN: @@ -4821,22 +4824,32 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct ib_device *ibdev = &hr_dev->ib_dev; const struct ib_gid_attr *gid_attr = NULL; + u8 sl = rdma_ah_get_sl(&attr->ah_attr); int is_roce_protocol; u16 vlan_id = 0xffff; bool is_udp = false; + u32 max_sl; u8 ib_port; u8 hr_port; int ret; + max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1); + if (unlikely(sl > max_sl)) { + ibdev_err_ratelimited(ibdev, + "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n", + sl, max_sl); + return -EINVAL; + } + /* * If free_mr_en of qp is set, it means that this qp comes from * free mr. This qp will perform the loopback operation. * In the loopback scenario, only sl needs to be set. */ if (hr_qp->free_mr_en) { - hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr)); + hr_reg_write(context, QPC_SL, sl); hr_reg_clear(qpc_mask, QPC_SL); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); + hr_qp->sl = sl; return 0; } @@ -4903,14 +4916,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); - if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { - ibdev_err(ibdev, - "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n", - hr_qp->sl, MAX_SERVICE_LEVEL); - return -EINVAL; - } - + hr_qp->sl = sl; hr_reg_write(context, QPC_SL, hr_qp->sl); hr_reg_clear(qpc_mask, QPC_SL); @@ -5804,7 +5810,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work) case HNS_ROCE_EVENT_TYPE_COMM_EST: break; case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: - ibdev_warn(ibdev, "send queue drained.\n"); + ibdev_dbg(ibdev, "send queue drained.\n"); break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n", @@ -5819,10 +5825,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work) irq_work->queue_num, irq_work->sub_type); break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: - ibdev_warn(ibdev, "SRQ limit reach.\n"); + ibdev_dbg(ibdev, "SRQ limit reach.\n"); break; case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: - ibdev_warn(ibdev, "SRQ last wqe reach.\n"); + ibdev_dbg(ibdev, "SRQ last wqe reach.\n"); break; case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: ibdev_err(ibdev, "SRQ catas error.\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index d9d546cdef525..4a9cd4d21bc99 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -547,17 +547,12 @@ static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats( struct ib_device *device, u32 port_num) { struct hns_roce_dev *hr_dev = to_hr_dev(device); - u32 port = port_num - 1; - if (port > hr_dev->caps.num_ports) { + if (port_num > hr_dev->caps.num_ports) { ibdev_err(device, "invalid port num.\n"); return NULL; } - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 || - hr_dev->is_vf) - return NULL; - return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs, ARRAY_SIZE(hns_roce_port_stats_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); @@ -577,10 +572,6 @@ static int hns_roce_get_hw_stats(struct ib_device *device, if (port > hr_dev->caps.num_ports) return -EINVAL; - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 || - hr_dev->is_vf) - return -EOPNOTSUPP; - ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port, &num_counters); if (ret) { @@ -634,8 +625,6 @@ static const struct ib_device_ops hns_roce_dev_ops = { .query_pkey = hns_roce_query_pkey, .query_port = hns_roce_query_port, .reg_user_mr = hns_roce_reg_user_mr, - .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats, - .get_hw_stats = hns_roce_get_hw_stats, INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq), @@ -644,6 +633,11 @@ static const struct ib_device_ops hns_roce_dev_ops = { INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext), }; +static const struct ib_device_ops hns_roce_dev_hw_stats_ops = { + .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats, + .get_hw_stats = hns_roce_get_hw_stats, +}; + static const struct ib_device_ops hns_roce_dev_mr_ops = { .rereg_user_mr = hns_roce_rereg_user_mr, }; @@ -720,6 +714,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops); + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09 && + !hr_dev->is_vf) + ib_set_device_ops(ib_dev, &hns_roce_dev_hw_stats_ops); + ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops); ib_set_device_ops(ib_dev, &hns_roce_dev_ops); ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index cdc1c6de43a17..828b58534aa97 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1064,7 +1064,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, { struct hns_roce_ib_create_qp_resp resp = {}; struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_ib_create_qp ucmd; + struct hns_roce_ib_create_qp ucmd = {}; int ret; mutex_init(&hr_qp->mutex); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 555629b798b95..5d963abb7e609 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4071,10 +4071,8 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) return ret; ret = mlx5_mkey_cache_init(dev); - if (ret) { + if (ret) mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); - mlx5r_umr_resource_cleanup(dev); - } return ret; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 78b96bfb4e6ac..2340baaba8e67 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4045,6 +4045,30 @@ static unsigned int get_tx_affinity(struct ib_qp *qp, return tx_affinity; } +static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id, + struct mlx5_core_dev *mdev) +{ + struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; + struct mlx5_ib_rq *rq = &raw_packet_qp->rq; + u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {}; + void *rqc; + + if (!qp->rq.wqe_cnt) + return 0; + + MLX5_SET(modify_rq_in, in, rq_state, rq->state); + MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid); + + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); + + MLX5_SET64(modify_rq_in, in, modify_bitmask, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); + MLX5_SET(rqc, rqc, counter_set_id, set_id); + + return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in); +} + static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) { @@ -4060,6 +4084,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, else set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1); + if (mqp->type == IB_QPT_RAW_PACKET) + return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev); + base = &mqp->trans_qp.base; MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP); MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn); diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c index f2e093b0b9982..1b45b1d3077de 100644 --- a/drivers/input/rmi4/rmi_bus.c +++ b/drivers/input/rmi4/rmi_bus.c @@ -277,11 +277,11 @@ void rmi_unregister_function(struct rmi_function *fn) device_del(&fn->dev); of_node_put(fn->dev.of_node); - put_device(&fn->dev); for (i = 0; i < fn->num_of_irqs; i++) irq_dispose_mapping(fn->irq[i]); + put_device(&fn->dev); } /** diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c index 2c16917ba1fda..e76356f91125f 100644 --- a/drivers/interconnect/qcom/icc-rpm.c +++ b/drivers/interconnect/qcom/icc-rpm.c @@ -497,7 +497,7 @@ regmap_done: ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks); if (ret) - return ret; + goto err_disable_unprepare_clk; provider = &qp->provider; provider->dev = dev; @@ -512,13 +512,15 @@ regmap_done: /* If this fails, bus accesses will crash the platform! */ ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks); if (ret) - return ret; + goto err_disable_unprepare_clk; for (i = 0; i < num_nodes; i++) { size_t j; node = icc_node_create(qnodes[i]->id); if (IS_ERR(node)) { + clk_bulk_disable_unprepare(qp->num_intf_clks, + qp->intf_clks); ret = PTR_ERR(node); goto err_remove_nodes; } @@ -534,8 +536,11 @@ regmap_done: if (qnodes[i]->qos.ap_owned && qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) { ret = qcom_icc_qos_set(node); - if (ret) - return ret; + if (ret) { + clk_bulk_disable_unprepare(qp->num_intf_clks, + qp->intf_clks); + goto err_remove_nodes; + } } data->nodes[i] = node; @@ -563,6 +568,7 @@ err_deregister_provider: icc_provider_deregister(provider); err_remove_nodes: icc_nodes_remove(provider); +err_disable_unprepare_clk: clk_disable_unprepare(qp->bus_clk); return ret; diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c index dc321bb86d0be..e97478bbc2825 100644 --- a/drivers/interconnect/qcom/osm-l3.c +++ b/drivers/interconnect/qcom/osm-l3.c @@ -3,6 +3,7 @@ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */ +#include #include #include #include @@ -78,7 +79,7 @@ enum { .name = #_name, \ .id = _id, \ .buswidth = _buswidth, \ - .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \ + .num_links = COUNT_ARGS(__VA_ARGS__), \ .links = { __VA_ARGS__ }, \ } diff --git a/drivers/interconnect/qcom/qdu1000.c b/drivers/interconnect/qcom/qdu1000.c index bf800dd7d4ba1..a7392eb73d4a9 100644 --- a/drivers/interconnect/qcom/qdu1000.c +++ b/drivers/interconnect/qcom/qdu1000.c @@ -769,6 +769,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .num_nodes = 1, .nodes = { &ebi }, }; diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c index d94ab9b39f3db..af2be15438403 100644 --- a/drivers/interconnect/qcom/sc7180.c +++ b/drivers/interconnect/qcom/sc7180.c @@ -1238,6 +1238,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .keepalive = false, .num_nodes = 1, .nodes = { &ebi }, diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c index 6592839b4d94b..a626dbc719995 100644 --- a/drivers/interconnect/qcom/sc7280.c +++ b/drivers/interconnect/qcom/sc7280.c @@ -1285,6 +1285,7 @@ static struct qcom_icc_node srvc_snoc = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .num_nodes = 1, .nodes = { &ebi }, }; diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c index 0fb4898dabcfe..bdd3471d4ac89 100644 --- a/drivers/interconnect/qcom/sc8180x.c +++ b/drivers/interconnect/qcom/sc8180x.c @@ -1345,6 +1345,7 @@ static struct qcom_icc_node slv_qup_core_2 = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .num_nodes = 1, .nodes = { &slv_ebi } }; diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c index b82c5493cbb56..0270f6c64481a 100644 --- a/drivers/interconnect/qcom/sc8280xp.c +++ b/drivers/interconnect/qcom/sc8280xp.c @@ -1712,6 +1712,7 @@ static struct qcom_icc_node srvc_snoc = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .num_nodes = 1, .nodes = { &ebi }, }; diff --git a/drivers/interconnect/qcom/sdm670.c b/drivers/interconnect/qcom/sdm670.c index 540a2108b77c1..907e1ff4ff817 100644 --- a/drivers/interconnect/qcom/sdm670.c +++ b/drivers/interconnect/qcom/sdm670.c @@ -1047,6 +1047,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .keepalive = false, .num_nodes = 1, .nodes = { &ebi }, diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c index b9243c0aa626c..855802be93fea 100644 --- a/drivers/interconnect/qcom/sdm845.c +++ b/drivers/interconnect/qcom/sdm845.c @@ -1265,6 +1265,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .keepalive = false, .num_nodes = 1, .nodes = { &ebi }, diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c index 49aed492e9b80..f41d7e19ba269 100644 --- a/drivers/interconnect/qcom/sm6350.c +++ b/drivers/interconnect/qcom/sm6350.c @@ -1164,6 +1164,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .keepalive = false, .num_nodes = 1, .nodes = { &ebi }, diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c index c7c9cf7f746b0..edfe824cad353 100644 --- a/drivers/interconnect/qcom/sm8150.c +++ b/drivers/interconnect/qcom/sm8150.c @@ -1282,6 +1282,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .keepalive = false, .num_nodes = 1, .nodes = { &ebi }, diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c index d4a4ecef11f01..661dc18d99dba 100644 --- a/drivers/interconnect/qcom/sm8250.c +++ b/drivers/interconnect/qcom/sm8250.c @@ -1397,6 +1397,7 @@ static struct qcom_icc_node qup2_core_slave = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .keepalive = false, .num_nodes = 1, .nodes = { &ebi }, diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c index bdf75839e6d17..562322d4fc3c4 100644 --- a/drivers/interconnect/qcom/sm8350.c +++ b/drivers/interconnect/qcom/sm8350.c @@ -1356,6 +1356,7 @@ static struct qcom_icc_node qns_mem_noc_sf_disp = { static struct qcom_icc_bcm bcm_acv = { .name = "ACV", + .enable_mask = BIT(3), .keepalive = false, .num_nodes = 1, .nodes = { &ebi }, diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index 3a598182b7619..a4da1817e19dd 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -221,6 +221,18 @@ static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area, return 0; } +static struct iopt_area *iopt_area_alloc(void) +{ + struct iopt_area *area; + + area = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT); + if (!area) + return NULL; + RB_CLEAR_NODE(&area->node.rb); + RB_CLEAR_NODE(&area->pages_node.rb); + return area; +} + static int iopt_alloc_area_pages(struct io_pagetable *iopt, struct list_head *pages_list, unsigned long length, unsigned long *dst_iova, @@ -231,7 +243,7 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt, int rc = 0; list_for_each_entry(elm, pages_list, next) { - elm->area = kzalloc(sizeof(*elm->area), GFP_KERNEL_ACCOUNT); + elm->area = iopt_area_alloc(); if (!elm->area) return -ENOMEM; } @@ -1005,11 +1017,11 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova) iopt_area_start_byte(area, new_start) & (alignment - 1)) return -EINVAL; - lhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT); + lhs = iopt_area_alloc(); if (!lhs) return -ENOMEM; - rhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT); + rhs = iopt_area_alloc(); if (!rhs) { rc = -ENOMEM; goto err_free_lhs; diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index 8d9aa297c117e..528f356238b34 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -1507,6 +1507,8 @@ void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages) area, domain, iopt_area_index(area), iopt_area_last_index(area)); + if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) + WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb)); interval_tree_remove(&area->pages_node, &pages->domains_itree); iopt_area_unfill_domain(area, pages, area->storage_domain); area->storage_domain = NULL; diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index e1484905b7bdb..5b7bc4fd9517c 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -532,17 +532,18 @@ done: } /* - * We can have multiple PLIC instances so setup cpuhp state only - * when context handler for current/boot CPU is present. + * We can have multiple PLIC instances so setup cpuhp state + * and register syscore operations only when context handler + * for current/boot CPU is present. */ handler = this_cpu_ptr(&plic_handlers); if (handler->present && !plic_cpuhp_setup_done) { cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, "irqchip/sifive/plic:starting", plic_starting_cpu, plic_dying_cpu); + register_syscore_ops(&plic_irq_syscore_ops); plic_cpuhp_setup_done = true; } - register_syscore_ops(&plic_irq_syscore_ops); pr_info("%pOFP: mapped %d interrupts with %d handlers for" " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 419b710984ab6..2b3bf1353b707 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -53,7 +53,7 @@ static int led_pwm_set(struct led_classdev *led_cdev, duty = led_dat->pwmstate.period - duty; led_dat->pwmstate.duty_cycle = duty; - led_dat->pwmstate.enabled = duty > 0; + led_dat->pwmstate.enabled = true; return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate); } diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c index b8a95a917cfa4..b13a547e72c49 100644 --- a/drivers/leds/leds-turris-omnia.c +++ b/drivers/leds/leds-turris-omnia.c @@ -2,7 +2,7 @@ /* * CZ.NIC's Turris Omnia LEDs driver * - * 2020 by Marek Behún + * 2020, 2023 by Marek Behún */ #include @@ -41,6 +41,37 @@ struct omnia_leds { struct omnia_led leds[]; }; +static int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, u8 val) +{ + u8 buf[2] = { cmd, val }; + + return i2c_master_send(client, buf, sizeof(buf)); +} + +static int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd) +{ + struct i2c_msg msgs[2]; + u8 reply; + int ret; + + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &cmd; + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = 1; + msgs[1].buf = &reply; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (likely(ret == ARRAY_SIZE(msgs))) + return reply; + else if (ret < 0) + return ret; + else + return -EIO; +} + static int omnia_led_brightness_set_blocking(struct led_classdev *cdev, enum led_brightness brightness) { @@ -64,7 +95,7 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev, if (buf[2] || buf[3] || buf[4]) state |= CMD_LED_STATE_ON; - ret = i2c_smbus_write_byte_data(leds->client, CMD_LED_STATE, state); + ret = omnia_cmd_write_u8(leds->client, CMD_LED_STATE, state); if (ret >= 0 && (state & CMD_LED_STATE_ON)) ret = i2c_master_send(leds->client, buf, 5); @@ -114,9 +145,9 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led, cdev->brightness_set_blocking = omnia_led_brightness_set_blocking; /* put the LED into software mode */ - ret = i2c_smbus_write_byte_data(client, CMD_LED_MODE, - CMD_LED_MODE_LED(led->reg) | - CMD_LED_MODE_USER); + ret = omnia_cmd_write_u8(client, CMD_LED_MODE, + CMD_LED_MODE_LED(led->reg) | + CMD_LED_MODE_USER); if (ret < 0) { dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np, ret); @@ -124,8 +155,8 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led, } /* disable the LED */ - ret = i2c_smbus_write_byte_data(client, CMD_LED_STATE, - CMD_LED_STATE_LED(led->reg)); + ret = omnia_cmd_write_u8(client, CMD_LED_STATE, + CMD_LED_STATE_LED(led->reg)); if (ret < 0) { dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret); return ret; @@ -158,7 +189,7 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a, struct i2c_client *client = to_i2c_client(dev); int ret; - ret = i2c_smbus_read_byte_data(client, CMD_LED_GET_BRIGHTNESS); + ret = omnia_cmd_read_u8(client, CMD_LED_GET_BRIGHTNESS); if (ret < 0) return ret; @@ -179,8 +210,7 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a, if (brightness > 100) return -EINVAL; - ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS, - (u8)brightness); + ret = omnia_cmd_write_u8(client, CMD_LED_SET_BRIGHTNESS, brightness); return ret < 0 ? ret : count; } @@ -237,8 +267,8 @@ static void omnia_leds_remove(struct i2c_client *client) u8 buf[5]; /* put all LEDs into default (HW triggered) mode */ - i2c_smbus_write_byte_data(client, CMD_LED_MODE, - CMD_LED_MODE_LED(OMNIA_BOARD_LEDS)); + omnia_cmd_write_u8(client, CMD_LED_MODE, + CMD_LED_MODE_LED(OMNIA_BOARD_LEDS)); /* set all LEDs color to [255, 255, 255] */ buf[0] = CMD_LED_COLOR; diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c index 8af4f9bb9cde8..05848a2fecff6 100644 --- a/drivers/leds/trigger/ledtrig-cpu.c +++ b/drivers/leds/trigger/ledtrig-cpu.c @@ -130,7 +130,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu) static int __init ledtrig_cpu_init(void) { - int cpu; + unsigned int cpu; int ret; /* Supports up to 9999 cpu cores */ @@ -152,7 +152,7 @@ static int __init ledtrig_cpu_init(void) if (cpu >= 8) continue; - snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu); + snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu); led_trigger_register_simple(trig->name, &trig->_trig); } diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile index 26d2bc7783944..a51e98ab4958d 100644 --- a/drivers/media/cec/platform/Makefile +++ b/drivers/media/cec/platform/Makefile @@ -6,7 +6,7 @@ # Please keep it in alphabetic order obj-$(CONFIG_CEC_CROS_EC) += cros-ec/ obj-$(CONFIG_CEC_GPIO) += cec-gpio/ -obj-$(CONFIG_CEC_MESON_AO) += meson/ +obj-y += meson/ obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/ obj-$(CONFIG_CEC_SECO) += seco/ obj-$(CONFIG_CEC_STI) += sti/ diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 74ff833ff48ca..53b443be5a59e 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -99,6 +99,7 @@ config VIDEO_IMX214 config VIDEO_IMX219 tristate "Sony IMX219 sensor support" + select V4L2_CCI_I2C help This is a Video4Linux2 sensor driver for the Sony IMX219 camera. diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c index ec53abe2e84e5..3afa3f79c8a26 100644 --- a/drivers/media/i2c/imx219.c +++ b/drivers/media/i2c/imx219.c @@ -21,40 +21,56 @@ #include #include #include + +#include #include #include #include #include #include -#include -#define IMX219_REG_VALUE_08BIT 1 -#define IMX219_REG_VALUE_16BIT 2 +/* Chip ID */ +#define IMX219_REG_CHIP_ID CCI_REG16(0x0000) +#define IMX219_CHIP_ID 0x0219 -#define IMX219_REG_MODE_SELECT 0x0100 +#define IMX219_REG_MODE_SELECT CCI_REG8(0x0100) #define IMX219_MODE_STANDBY 0x00 #define IMX219_MODE_STREAMING 0x01 -/* Chip ID */ -#define IMX219_REG_CHIP_ID 0x0000 -#define IMX219_CHIP_ID 0x0219 +#define IMX219_REG_CSI_LANE_MODE CCI_REG8(0x0114) +#define IMX219_CSI_2_LANE_MODE 0x01 +#define IMX219_CSI_4_LANE_MODE 0x03 -/* External clock frequency is 24.0M */ -#define IMX219_XCLK_FREQ 24000000 +#define IMX219_REG_DPHY_CTRL CCI_REG8(0x0128) +#define IMX219_DPHY_CTRL_TIMING_AUTO 0 +#define IMX219_DPHY_CTRL_TIMING_MANUAL 1 -/* Pixel rate is fixed for all the modes */ -#define IMX219_PIXEL_RATE 182400000 -#define IMX219_PIXEL_RATE_4LANE 280800000 +#define IMX219_REG_EXCK_FREQ CCI_REG16(0x012a) +#define IMX219_EXCK_FREQ(n) ((n) * 256) /* n expressed in MHz */ -#define IMX219_DEFAULT_LINK_FREQ 456000000 -#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000 +/* Analog gain control */ +#define IMX219_REG_ANALOG_GAIN CCI_REG8(0x0157) +#define IMX219_ANA_GAIN_MIN 0 +#define IMX219_ANA_GAIN_MAX 232 +#define IMX219_ANA_GAIN_STEP 1 +#define IMX219_ANA_GAIN_DEFAULT 0x0 -#define IMX219_REG_CSI_LANE_MODE 0x0114 -#define IMX219_CSI_2_LANE_MODE 0x01 -#define IMX219_CSI_4_LANE_MODE 0x03 +/* Digital gain control */ +#define IMX219_REG_DIGITAL_GAIN CCI_REG16(0x0158) +#define IMX219_DGTL_GAIN_MIN 0x0100 +#define IMX219_DGTL_GAIN_MAX 0x0fff +#define IMX219_DGTL_GAIN_DEFAULT 0x0100 +#define IMX219_DGTL_GAIN_STEP 1 + +/* Exposure control */ +#define IMX219_REG_EXPOSURE CCI_REG16(0x015a) +#define IMX219_EXPOSURE_MIN 4 +#define IMX219_EXPOSURE_STEP 1 +#define IMX219_EXPOSURE_DEFAULT 0x640 +#define IMX219_EXPOSURE_MAX 65535 /* V_TIMING internal */ -#define IMX219_REG_VTS 0x0160 +#define IMX219_REG_VTS CCI_REG16(0x0160) #define IMX219_VTS_15FPS 0x0dc6 #define IMX219_VTS_30FPS_1080P 0x06e3 #define IMX219_VTS_30FPS_BINNED 0x06e3 @@ -72,37 +88,37 @@ /* HBLANK control - read only */ #define IMX219_PPL_DEFAULT 3448 -/* Exposure control */ -#define IMX219_REG_EXPOSURE 0x015a -#define IMX219_EXPOSURE_MIN 4 -#define IMX219_EXPOSURE_STEP 1 -#define IMX219_EXPOSURE_DEFAULT 0x640 -#define IMX219_EXPOSURE_MAX 65535 - -/* Analog gain control */ -#define IMX219_REG_ANALOG_GAIN 0x0157 -#define IMX219_ANA_GAIN_MIN 0 -#define IMX219_ANA_GAIN_MAX 232 -#define IMX219_ANA_GAIN_STEP 1 -#define IMX219_ANA_GAIN_DEFAULT 0x0 - -/* Digital gain control */ -#define IMX219_REG_DIGITAL_GAIN 0x0158 -#define IMX219_DGTL_GAIN_MIN 0x0100 -#define IMX219_DGTL_GAIN_MAX 0x0fff -#define IMX219_DGTL_GAIN_DEFAULT 0x0100 -#define IMX219_DGTL_GAIN_STEP 1 - -#define IMX219_REG_ORIENTATION 0x0172 +#define IMX219_REG_LINE_LENGTH_A CCI_REG16(0x0162) +#define IMX219_REG_X_ADD_STA_A CCI_REG16(0x0164) +#define IMX219_REG_X_ADD_END_A CCI_REG16(0x0166) +#define IMX219_REG_Y_ADD_STA_A CCI_REG16(0x0168) +#define IMX219_REG_Y_ADD_END_A CCI_REG16(0x016a) +#define IMX219_REG_X_OUTPUT_SIZE CCI_REG16(0x016c) +#define IMX219_REG_Y_OUTPUT_SIZE CCI_REG16(0x016e) +#define IMX219_REG_X_ODD_INC_A CCI_REG8(0x0170) +#define IMX219_REG_Y_ODD_INC_A CCI_REG8(0x0171) +#define IMX219_REG_ORIENTATION CCI_REG8(0x0172) /* Binning Mode */ -#define IMX219_REG_BINNING_MODE 0x0174 +#define IMX219_REG_BINNING_MODE CCI_REG16(0x0174) #define IMX219_BINNING_NONE 0x0000 #define IMX219_BINNING_2X2 0x0101 #define IMX219_BINNING_2X2_ANALOG 0x0303 +#define IMX219_REG_CSI_DATA_FORMAT_A CCI_REG16(0x018c) + +/* PLL Settings */ +#define IMX219_REG_VTPXCK_DIV CCI_REG8(0x0301) +#define IMX219_REG_VTSYCK_DIV CCI_REG8(0x0303) +#define IMX219_REG_PREPLLCK_VT_DIV CCI_REG8(0x0304) +#define IMX219_REG_PREPLLCK_OP_DIV CCI_REG8(0x0305) +#define IMX219_REG_PLL_VT_MPY CCI_REG16(0x0306) +#define IMX219_REG_OPPXCK_DIV CCI_REG8(0x0309) +#define IMX219_REG_OPSYCK_DIV CCI_REG8(0x030b) +#define IMX219_REG_PLL_OP_MPY CCI_REG16(0x030c) + /* Test Pattern Control */ -#define IMX219_REG_TEST_PATTERN 0x0600 +#define IMX219_REG_TEST_PATTERN CCI_REG16(0x0600) #define IMX219_TEST_PATTERN_DISABLE 0 #define IMX219_TEST_PATTERN_SOLID_COLOR 1 #define IMX219_TEST_PATTERN_COLOR_BARS 2 @@ -110,10 +126,10 @@ #define IMX219_TEST_PATTERN_PN9 4 /* Test pattern colour components */ -#define IMX219_REG_TESTP_RED 0x0602 -#define IMX219_REG_TESTP_GREENR 0x0604 -#define IMX219_REG_TESTP_BLUE 0x0606 -#define IMX219_REG_TESTP_GREENB 0x0608 +#define IMX219_REG_TESTP_RED CCI_REG16(0x0602) +#define IMX219_REG_TESTP_GREENR CCI_REG16(0x0604) +#define IMX219_REG_TESTP_BLUE CCI_REG16(0x0606) +#define IMX219_REG_TESTP_GREENB CCI_REG16(0x0608) #define IMX219_TESTP_COLOUR_MIN 0 #define IMX219_TESTP_COLOUR_MAX 0x03ff #define IMX219_TESTP_COLOUR_STEP 1 @@ -122,6 +138,19 @@ #define IMX219_TESTP_BLUE_DEFAULT 0 #define IMX219_TESTP_GREENB_DEFAULT 0 +#define IMX219_REG_TP_WINDOW_WIDTH CCI_REG16(0x0624) +#define IMX219_REG_TP_WINDOW_HEIGHT CCI_REG16(0x0626) + +/* External clock frequency is 24.0M */ +#define IMX219_XCLK_FREQ 24000000 + +/* Pixel rate is fixed for all the modes */ +#define IMX219_PIXEL_RATE 182400000 +#define IMX219_PIXEL_RATE_4LANE 280800000 + +#define IMX219_DEFAULT_LINK_FREQ 456000000 +#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000 + /* IMX219 native and active pixel array size. */ #define IMX219_NATIVE_WIDTH 3296U #define IMX219_NATIVE_HEIGHT 2480U @@ -130,14 +159,9 @@ #define IMX219_PIXEL_ARRAY_WIDTH 3280U #define IMX219_PIXEL_ARRAY_HEIGHT 2464U -struct imx219_reg { - u16 address; - u8 val; -}; - struct imx219_reg_list { unsigned int num_of_regs; - const struct imx219_reg *regs; + const struct cci_reg_sequence *regs; }; /* Mode : resolution and related config&values */ @@ -160,53 +184,48 @@ struct imx219_mode { bool binning; }; -static const struct imx219_reg imx219_common_regs[] = { - {0x0100, 0x00}, /* Mode Select */ +static const struct cci_reg_sequence imx219_common_regs[] = { + { IMX219_REG_MODE_SELECT, 0x00 }, /* Mode Select */ /* To Access Addresses 3000-5fff, send the following commands */ - {0x30eb, 0x0c}, - {0x30eb, 0x05}, - {0x300a, 0xff}, - {0x300b, 0xff}, - {0x30eb, 0x05}, - {0x30eb, 0x09}, + { CCI_REG8(0x30eb), 0x0c }, + { CCI_REG8(0x30eb), 0x05 }, + { CCI_REG8(0x300a), 0xff }, + { CCI_REG8(0x300b), 0xff }, + { CCI_REG8(0x30eb), 0x05 }, + { CCI_REG8(0x30eb), 0x09 }, /* PLL Clock Table */ - {0x0301, 0x05}, /* VTPXCK_DIV */ - {0x0303, 0x01}, /* VTSYSCK_DIV */ - {0x0304, 0x03}, /* PREPLLCK_VT_DIV 0x03 = AUTO set */ - {0x0305, 0x03}, /* PREPLLCK_OP_DIV 0x03 = AUTO set */ - {0x0306, 0x00}, /* PLL_VT_MPY */ - {0x0307, 0x39}, - {0x030b, 0x01}, /* OP_SYS_CLK_DIV */ - {0x030c, 0x00}, /* PLL_OP_MPY */ - {0x030d, 0x72}, + { IMX219_REG_VTPXCK_DIV, 5 }, + { IMX219_REG_VTSYCK_DIV, 1 }, + { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */ + { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */ + { IMX219_REG_PLL_VT_MPY, 57 }, + { IMX219_REG_OPSYCK_DIV, 1 }, + { IMX219_REG_PLL_OP_MPY, 114 }, /* Undocumented registers */ - {0x455e, 0x00}, - {0x471e, 0x4b}, - {0x4767, 0x0f}, - {0x4750, 0x14}, - {0x4540, 0x00}, - {0x47b4, 0x14}, - {0x4713, 0x30}, - {0x478b, 0x10}, - {0x478f, 0x10}, - {0x4793, 0x10}, - {0x4797, 0x0e}, - {0x479b, 0x0e}, + { CCI_REG8(0x455e), 0x00 }, + { CCI_REG8(0x471e), 0x4b }, + { CCI_REG8(0x4767), 0x0f }, + { CCI_REG8(0x4750), 0x14 }, + { CCI_REG8(0x4540), 0x00 }, + { CCI_REG8(0x47b4), 0x14 }, + { CCI_REG8(0x4713), 0x30 }, + { CCI_REG8(0x478b), 0x10 }, + { CCI_REG8(0x478f), 0x10 }, + { CCI_REG8(0x4793), 0x10 }, + { CCI_REG8(0x4797), 0x0e }, + { CCI_REG8(0x479b), 0x0e }, /* Frame Bank Register Group "A" */ - {0x0162, 0x0d}, /* Line_Length_A */ - {0x0163, 0x78}, - {0x0170, 0x01}, /* X_ODD_INC_A */ - {0x0171, 0x01}, /* Y_ODD_INC_A */ + { IMX219_REG_LINE_LENGTH_A, 3448 }, + { IMX219_REG_X_ODD_INC_A, 1 }, + { IMX219_REG_Y_ODD_INC_A, 1 }, /* Output setup registers */ - {0x0114, 0x01}, /* CSI 2-Lane Mode */ - {0x0128, 0x00}, /* DPHY Auto Mode */ - {0x012a, 0x18}, /* EXCK_Freq */ - {0x012b, 0x00}, + { IMX219_REG_DPHY_CTRL, IMX219_DPHY_CTRL_TIMING_AUTO }, + { IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) }, }; /* @@ -214,92 +233,58 @@ static const struct imx219_reg imx219_common_regs[] = { * driver. * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7. */ -static const struct imx219_reg mode_3280x2464_regs[] = { - {0x0164, 0x00}, - {0x0165, 0x00}, - {0x0166, 0x0c}, - {0x0167, 0xcf}, - {0x0168, 0x00}, - {0x0169, 0x00}, - {0x016a, 0x09}, - {0x016b, 0x9f}, - {0x016c, 0x0c}, - {0x016d, 0xd0}, - {0x016e, 0x09}, - {0x016f, 0xa0}, - {0x0624, 0x0c}, - {0x0625, 0xd0}, - {0x0626, 0x09}, - {0x0627, 0xa0}, +static const struct cci_reg_sequence mode_3280x2464_regs[] = { + { IMX219_REG_X_ADD_STA_A, 0 }, + { IMX219_REG_X_ADD_END_A, 3279 }, + { IMX219_REG_Y_ADD_STA_A, 0 }, + { IMX219_REG_Y_ADD_END_A, 2463 }, + { IMX219_REG_X_OUTPUT_SIZE, 3280 }, + { IMX219_REG_Y_OUTPUT_SIZE, 2464 }, + { IMX219_REG_TP_WINDOW_WIDTH, 3280 }, + { IMX219_REG_TP_WINDOW_HEIGHT, 2464 }, }; -static const struct imx219_reg mode_1920_1080_regs[] = { - {0x0164, 0x02}, - {0x0165, 0xa8}, - {0x0166, 0x0a}, - {0x0167, 0x27}, - {0x0168, 0x02}, - {0x0169, 0xb4}, - {0x016a, 0x06}, - {0x016b, 0xeb}, - {0x016c, 0x07}, - {0x016d, 0x80}, - {0x016e, 0x04}, - {0x016f, 0x38}, - {0x0624, 0x07}, - {0x0625, 0x80}, - {0x0626, 0x04}, - {0x0627, 0x38}, +static const struct cci_reg_sequence mode_1920_1080_regs[] = { + { IMX219_REG_X_ADD_STA_A, 680 }, + { IMX219_REG_X_ADD_END_A, 2599 }, + { IMX219_REG_Y_ADD_STA_A, 692 }, + { IMX219_REG_Y_ADD_END_A, 1771 }, + { IMX219_REG_X_OUTPUT_SIZE, 1920 }, + { IMX219_REG_Y_OUTPUT_SIZE, 1080 }, + { IMX219_REG_TP_WINDOW_WIDTH, 1920 }, + { IMX219_REG_TP_WINDOW_HEIGHT, 1080 }, }; -static const struct imx219_reg mode_1640_1232_regs[] = { - {0x0164, 0x00}, - {0x0165, 0x00}, - {0x0166, 0x0c}, - {0x0167, 0xcf}, - {0x0168, 0x00}, - {0x0169, 0x00}, - {0x016a, 0x09}, - {0x016b, 0x9f}, - {0x016c, 0x06}, - {0x016d, 0x68}, - {0x016e, 0x04}, - {0x016f, 0xd0}, - {0x0624, 0x06}, - {0x0625, 0x68}, - {0x0626, 0x04}, - {0x0627, 0xd0}, +static const struct cci_reg_sequence mode_1640_1232_regs[] = { + { IMX219_REG_X_ADD_STA_A, 0 }, + { IMX219_REG_X_ADD_END_A, 3279 }, + { IMX219_REG_Y_ADD_STA_A, 0 }, + { IMX219_REG_Y_ADD_END_A, 2463 }, + { IMX219_REG_X_OUTPUT_SIZE, 1640 }, + { IMX219_REG_Y_OUTPUT_SIZE, 1232 }, + { IMX219_REG_TP_WINDOW_WIDTH, 1640 }, + { IMX219_REG_TP_WINDOW_HEIGHT, 1232 }, }; -static const struct imx219_reg mode_640_480_regs[] = { - {0x0164, 0x03}, - {0x0165, 0xe8}, - {0x0166, 0x08}, - {0x0167, 0xe7}, - {0x0168, 0x02}, - {0x0169, 0xf0}, - {0x016a, 0x06}, - {0x016b, 0xaf}, - {0x016c, 0x02}, - {0x016d, 0x80}, - {0x016e, 0x01}, - {0x016f, 0xe0}, - {0x0624, 0x06}, - {0x0625, 0x68}, - {0x0626, 0x04}, - {0x0627, 0xd0}, +static const struct cci_reg_sequence mode_640_480_regs[] = { + { IMX219_REG_X_ADD_STA_A, 1000 }, + { IMX219_REG_X_ADD_END_A, 2279 }, + { IMX219_REG_Y_ADD_STA_A, 752 }, + { IMX219_REG_Y_ADD_END_A, 1711 }, + { IMX219_REG_X_OUTPUT_SIZE, 640 }, + { IMX219_REG_Y_OUTPUT_SIZE, 480 }, + { IMX219_REG_TP_WINDOW_WIDTH, 1640 }, + { IMX219_REG_TP_WINDOW_HEIGHT, 1232 }, }; -static const struct imx219_reg raw8_framefmt_regs[] = { - {0x018c, 0x08}, - {0x018d, 0x08}, - {0x0309, 0x08}, +static const struct cci_reg_sequence raw8_framefmt_regs[] = { + { IMX219_REG_CSI_DATA_FORMAT_A, 0x0808 }, + { IMX219_REG_OPPXCK_DIV, 8 }, }; -static const struct imx219_reg raw10_framefmt_regs[] = { - {0x018c, 0x0a}, - {0x018d, 0x0a}, - {0x0309, 0x0a}, +static const struct cci_reg_sequence raw10_framefmt_regs[] = { + { IMX219_REG_CSI_DATA_FORMAT_A, 0x0a0a }, + { IMX219_REG_OPPXCK_DIV, 10 }, }; static const s64 imx219_link_freq_menu[] = { @@ -460,6 +445,7 @@ struct imx219 { struct v4l2_subdev sd; struct media_pad pad; + struct regmap *regmap; struct clk *xclk; /* system clock to IMX219 */ u32 xclk_freq; @@ -491,78 +477,6 @@ static inline struct imx219 *to_imx219(struct v4l2_subdev *_sd) return container_of(_sd, struct imx219, sd); } -/* Read registers up to 2 at a time */ -static int imx219_read_reg(struct imx219 *imx219, u16 reg, u32 len, u32 *val) -{ - struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); - struct i2c_msg msgs[2]; - u8 addr_buf[2] = { reg >> 8, reg & 0xff }; - u8 data_buf[4] = { 0, }; - int ret; - - if (len > 4) - return -EINVAL; - - /* Write register address */ - msgs[0].addr = client->addr; - msgs[0].flags = 0; - msgs[0].len = ARRAY_SIZE(addr_buf); - msgs[0].buf = addr_buf; - - /* Read data from register */ - msgs[1].addr = client->addr; - msgs[1].flags = I2C_M_RD; - msgs[1].len = len; - msgs[1].buf = &data_buf[4 - len]; - - ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (ret != ARRAY_SIZE(msgs)) - return -EIO; - - *val = get_unaligned_be32(data_buf); - - return 0; -} - -/* Write registers up to 2 at a time */ -static int imx219_write_reg(struct imx219 *imx219, u16 reg, u32 len, u32 val) -{ - struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); - u8 buf[6]; - - if (len > 4) - return -EINVAL; - - put_unaligned_be16(reg, buf); - put_unaligned_be32(val << (8 * (4 - len)), buf + 2); - if (i2c_master_send(client, buf, len + 2) != len + 2) - return -EIO; - - return 0; -} - -/* Write a list of registers */ -static int imx219_write_regs(struct imx219 *imx219, - const struct imx219_reg *regs, u32 len) -{ - struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); - unsigned int i; - int ret; - - for (i = 0; i < len; i++) { - ret = imx219_write_reg(imx219, regs[i].address, 1, regs[i].val); - if (ret) { - dev_err_ratelimited(&client->dev, - "Failed to write reg 0x%4.4x. error = %d\n", - regs[i].address, ret); - - return ret; - } - } - - return 0; -} - /* Get bayer order based on flip setting. */ static u32 imx219_get_format_code(struct imx219 *imx219, u32 code) { @@ -586,7 +500,7 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl) struct imx219 *imx219 = container_of(ctrl->handler, struct imx219, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); - int ret; + int ret = 0; if (ctrl->id == V4L2_CID_VBLANK) { int exposure_max, exposure_def; @@ -610,48 +524,45 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl) switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: - ret = imx219_write_reg(imx219, IMX219_REG_ANALOG_GAIN, - IMX219_REG_VALUE_08BIT, ctrl->val); + cci_write(imx219->regmap, IMX219_REG_ANALOG_GAIN, + ctrl->val, &ret); break; case V4L2_CID_EXPOSURE: - ret = imx219_write_reg(imx219, IMX219_REG_EXPOSURE, - IMX219_REG_VALUE_16BIT, ctrl->val); + cci_write(imx219->regmap, IMX219_REG_EXPOSURE, + ctrl->val, &ret); break; case V4L2_CID_DIGITAL_GAIN: - ret = imx219_write_reg(imx219, IMX219_REG_DIGITAL_GAIN, - IMX219_REG_VALUE_16BIT, ctrl->val); + cci_write(imx219->regmap, IMX219_REG_DIGITAL_GAIN, + ctrl->val, &ret); break; case V4L2_CID_TEST_PATTERN: - ret = imx219_write_reg(imx219, IMX219_REG_TEST_PATTERN, - IMX219_REG_VALUE_16BIT, - imx219_test_pattern_val[ctrl->val]); + cci_write(imx219->regmap, IMX219_REG_TEST_PATTERN, + imx219_test_pattern_val[ctrl->val], &ret); break; case V4L2_CID_HFLIP: case V4L2_CID_VFLIP: - ret = imx219_write_reg(imx219, IMX219_REG_ORIENTATION, 1, - imx219->hflip->val | - imx219->vflip->val << 1); + cci_write(imx219->regmap, IMX219_REG_ORIENTATION, + imx219->hflip->val | imx219->vflip->val << 1, &ret); break; case V4L2_CID_VBLANK: - ret = imx219_write_reg(imx219, IMX219_REG_VTS, - IMX219_REG_VALUE_16BIT, - imx219->mode->height + ctrl->val); + cci_write(imx219->regmap, IMX219_REG_VTS, + imx219->mode->height + ctrl->val, &ret); break; case V4L2_CID_TEST_PATTERN_RED: - ret = imx219_write_reg(imx219, IMX219_REG_TESTP_RED, - IMX219_REG_VALUE_16BIT, ctrl->val); + cci_write(imx219->regmap, IMX219_REG_TESTP_RED, + ctrl->val, &ret); break; case V4L2_CID_TEST_PATTERN_GREENR: - ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENR, - IMX219_REG_VALUE_16BIT, ctrl->val); + cci_write(imx219->regmap, IMX219_REG_TESTP_GREENR, + ctrl->val, &ret); break; case V4L2_CID_TEST_PATTERN_BLUE: - ret = imx219_write_reg(imx219, IMX219_REG_TESTP_BLUE, - IMX219_REG_VALUE_16BIT, ctrl->val); + cci_write(imx219->regmap, IMX219_REG_TESTP_BLUE, + ctrl->val, &ret); break; case V4L2_CID_TEST_PATTERN_GREENB: - ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENB, - IMX219_REG_VALUE_16BIT, ctrl->val); + cci_write(imx219->regmap, IMX219_REG_TESTP_GREENB, + ctrl->val, &ret); break; default: dev_info(&client->dev, @@ -802,15 +713,15 @@ static int imx219_set_framefmt(struct imx219 *imx219, case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SBGGR8_1X8: - return imx219_write_regs(imx219, raw8_framefmt_regs, - ARRAY_SIZE(raw8_framefmt_regs)); + return cci_multi_reg_write(imx219->regmap, raw8_framefmt_regs, + ARRAY_SIZE(raw8_framefmt_regs), NULL); case MEDIA_BUS_FMT_SRGGB10_1X10: case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SBGGR10_1X10: - return imx219_write_regs(imx219, raw10_framefmt_regs, - ARRAY_SIZE(raw10_framefmt_regs)); + return cci_multi_reg_write(imx219->regmap, raw10_framefmt_regs, + ARRAY_SIZE(raw10_framefmt_regs), NULL); } return -EINVAL; @@ -819,28 +730,24 @@ static int imx219_set_framefmt(struct imx219 *imx219, static int imx219_set_binning(struct imx219 *imx219, const struct v4l2_mbus_framefmt *format) { - if (!imx219->mode->binning) { - return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE, - IMX219_REG_VALUE_16BIT, - IMX219_BINNING_NONE); - } + if (!imx219->mode->binning) + return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE, + IMX219_BINNING_NONE, NULL); switch (format->code) { case MEDIA_BUS_FMT_SRGGB8_1X8: case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SBGGR8_1X8: - return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE, - IMX219_REG_VALUE_16BIT, - IMX219_BINNING_2X2_ANALOG); + return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE, + IMX219_BINNING_2X2_ANALOG, NULL); case MEDIA_BUS_FMT_SRGGB10_1X10: case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SBGGR10_1X10: - return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE, - IMX219_REG_VALUE_16BIT, - IMX219_BINNING_2X2); + return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE, + IMX219_BINNING_2X2, NULL); } return -EINVAL; @@ -879,9 +786,9 @@ static int imx219_get_selection(struct v4l2_subdev *sd, static int imx219_configure_lanes(struct imx219 *imx219) { - return imx219_write_reg(imx219, IMX219_REG_CSI_LANE_MODE, - IMX219_REG_VALUE_08BIT, (imx219->lanes == 2) ? - IMX219_CSI_2_LANE_MODE : IMX219_CSI_4_LANE_MODE); + return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE, + imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE : + IMX219_CSI_4_LANE_MODE, NULL); }; static int imx219_start_streaming(struct imx219 *imx219, @@ -897,7 +804,8 @@ static int imx219_start_streaming(struct imx219 *imx219, return ret; /* Send all registers that are common to all modes */ - ret = imx219_write_regs(imx219, imx219_common_regs, ARRAY_SIZE(imx219_common_regs)); + ret = cci_multi_reg_write(imx219->regmap, imx219_common_regs, + ARRAY_SIZE(imx219_common_regs), NULL); if (ret) { dev_err(&client->dev, "%s failed to send mfg header\n", __func__); goto err_rpm_put; @@ -912,7 +820,8 @@ static int imx219_start_streaming(struct imx219 *imx219, /* Apply default values of current mode */ reg_list = &imx219->mode->reg_list; - ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs); + ret = cci_multi_reg_write(imx219->regmap, reg_list->regs, + reg_list->num_of_regs, NULL); if (ret) { dev_err(&client->dev, "%s failed to set mode\n", __func__); goto err_rpm_put; @@ -939,8 +848,8 @@ static int imx219_start_streaming(struct imx219 *imx219, goto err_rpm_put; /* set stream on register */ - ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT, - IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING); + ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT, + IMX219_MODE_STREAMING, NULL); if (ret) goto err_rpm_put; @@ -961,8 +870,8 @@ static void imx219_stop_streaming(struct imx219 *imx219) int ret; /* set stream off register */ - ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT, - IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY); + ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT, + IMX219_MODE_STANDBY, NULL); if (ret) dev_err(&client->dev, "%s failed to set stream\n", __func__); @@ -1101,10 +1010,9 @@ static int imx219_identify_module(struct imx219 *imx219) { struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd); int ret; - u32 val; + u64 val; - ret = imx219_read_reg(imx219, IMX219_REG_CHIP_ID, - IMX219_REG_VALUE_16BIT, &val); + ret = cci_read(imx219->regmap, IMX219_REG_CHIP_ID, &val, NULL); if (ret) { dev_err(&client->dev, "failed to read chip id %x\n", IMX219_CHIP_ID); @@ -1112,7 +1020,7 @@ static int imx219_identify_module(struct imx219 *imx219) } if (val != IMX219_CHIP_ID) { - dev_err(&client->dev, "chip id mismatch: %x!=%x\n", + dev_err(&client->dev, "chip id mismatch: %x!=%llx\n", IMX219_CHIP_ID, val); return -EIO; } @@ -1336,6 +1244,13 @@ static int imx219_probe(struct i2c_client *client) if (imx219_check_hwcfg(dev, imx219)) return -EINVAL; + imx219->regmap = devm_cci_regmap_init_i2c(client, 16); + if (IS_ERR(imx219->regmap)) { + ret = PTR_ERR(imx219->regmap); + dev_err(dev, "failed to initialize CCI: %d\n", ret); + return ret; + } + /* Get system clock (xclk) */ imx219->xclk = devm_clk_get(dev, NULL); if (IS_ERR(imx219->xclk)) { @@ -1379,17 +1294,19 @@ static int imx219_probe(struct i2c_client *client) * streaming is started, so upon power up switch the modes to: * streaming -> standby */ - ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT, - IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING); + ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT, + IMX219_MODE_STREAMING, NULL); if (ret < 0) goto error_power_off; + usleep_range(100, 110); /* put sensor back to standby mode */ - ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT, - IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY); + ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT, + IMX219_MODE_STANDBY, NULL); if (ret < 0) goto error_power_off; + usleep_range(100, 110); ret = imx219_init_controls(imx219); diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c index be84ff1e2b170..fc1cf196ef015 100644 --- a/drivers/media/i2c/max9286.c +++ b/drivers/media/i2c/max9286.c @@ -1449,7 +1449,6 @@ static int max9286_parse_dt(struct max9286_priv *priv) i2c_mux_mask |= BIT(id); } - of_node_put(node); of_node_put(i2c_mux); /* Parse the endpoints */ @@ -1513,7 +1512,6 @@ static int max9286_parse_dt(struct max9286_priv *priv) priv->source_mask |= BIT(ep.port); priv->nsources++; } - of_node_put(node); of_property_read_u32(dev->of_node, "maxim,bus-width", &priv->bus_width); switch (priv->bus_width) { diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c index dbc642c5995b6..8ebdb32dd3dbc 100644 --- a/drivers/media/i2c/ov13b10.c +++ b/drivers/media/i2c/ov13b10.c @@ -1501,7 +1501,7 @@ static int ov13b10_probe(struct i2c_client *client) full_power = acpi_dev_state_d0(&client->dev); if (full_power) { - ov13b10_power_on(&client->dev); + ret = ov13b10_power_on(&client->dev); if (ret) { dev_err(&client->dev, "failed to power on\n"); return ret; diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 5fe85aa2d2ec4..40532f7bcabea 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -2850,12 +2850,22 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd, return 0; } +static void __v4l2_ctrl_vblank_update(struct ov5640_dev *sensor, u32 vblank) +{ + const struct ov5640_mode_info *mode = sensor->current_mode; + + __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK, + OV5640_MAX_VTS - mode->height, 1, vblank); + + __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank); +} + static int ov5640_update_pixel_rate(struct ov5640_dev *sensor) { const struct ov5640_mode_info *mode = sensor->current_mode; enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate; struct v4l2_mbus_framefmt *fmt = &sensor->fmt; - const struct ov5640_timings *timings; + const struct ov5640_timings *timings = ov5640_timings(sensor, mode); s32 exposure_val, exposure_max; unsigned int hblank; unsigned int i = 0; @@ -2874,6 +2884,8 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor) __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, ov5640_calc_pixel_rate(sensor)); + __v4l2_ctrl_vblank_update(sensor, timings->vblank_def); + return 0; } @@ -2916,15 +2928,12 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor) __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate); __v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i); - timings = ov5640_timings(sensor, mode); hblank = timings->htot - mode->width; __v4l2_ctrl_modify_range(sensor->ctrls.hblank, hblank, hblank, 1, hblank); vblank = timings->vblank_def; - __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK, - OV5640_MAX_VTS - mode->height, 1, vblank); - __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank); + __v4l2_ctrl_vblank_update(sensor, vblank); exposure_max = timings->crop.height + vblank - 4; exposure_val = clamp_t(s32, sensor->ctrls.exposure->val, @@ -3919,7 +3928,7 @@ static int ov5640_probe(struct i2c_client *client) ret = ov5640_sensor_resume(dev); if (ret) { dev_err(dev, "failed to power on\n"); - goto entity_cleanup; + goto free_ctrls; } pm_runtime_set_active(dev); @@ -3944,8 +3953,9 @@ static int ov5640_probe(struct i2c_client *client) err_pm_runtime: pm_runtime_put_noidle(dev); pm_runtime_disable(dev); - v4l2_ctrl_handler_free(&sensor->ctrls.handler); ov5640_sensor_suspend(dev); +free_ctrls: + v4l2_ctrl_handler_free(&sensor->ctrls.handler); entity_cleanup: media_entity_cleanup(&sensor->sd.entity); mutex_destroy(&sensor->lock); diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index aa708a0e5eac6..09a193bb87df3 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -3474,6 +3474,7 @@ static void bttv_remove(struct pci_dev *pci_dev) /* free resources */ free_irq(btv->c.pci->irq,btv); + del_timer_sync(&btv->timeout); iounmap(btv->bt848_mmio); release_mem_region(pci_resource_start(btv->c.pci,0), pci_resource_len(btv->c.pci,0)); diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h index 667637eedb5d4..7320852668d64 100644 --- a/drivers/media/platform/amphion/vpu_defs.h +++ b/drivers/media/platform/amphion/vpu_defs.h @@ -71,6 +71,7 @@ enum { VPU_MSG_ID_TIMESTAMP_INFO, VPU_MSG_ID_FIRMWARE_XCPT, VPU_MSG_ID_PIC_SKIPPED, + VPU_MSG_ID_DBG_MSG, }; enum VPU_ENC_MEMORY_RESOURSE { diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c index af3b336e5dc32..d12310af9ebce 100644 --- a/drivers/media/platform/amphion/vpu_helpers.c +++ b/drivers/media/platform/amphion/vpu_helpers.c @@ -489,6 +489,7 @@ const char *vpu_id_name(u32 id) case VPU_MSG_ID_UNSUPPORTED: return "unsupported"; case VPU_MSG_ID_FIRMWARE_XCPT: return "exception"; case VPU_MSG_ID_PIC_SKIPPED: return "skipped"; + case VPU_MSG_ID_DBG_MSG: return "debug msg"; } return ""; } diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c index f771661980c01..d3425de7bccd3 100644 --- a/drivers/media/platform/amphion/vpu_malone.c +++ b/drivers/media/platform/amphion/vpu_malone.c @@ -745,6 +745,7 @@ static struct vpu_pair malone_msgs[] = { {VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM}, {VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT}, {VPU_MSG_ID_PIC_SKIPPED, VID_API_EVENT_PIC_SKIPPED}, + {VPU_MSG_ID_DBG_MSG, VID_API_EVENT_DBG_MSG_DEC}, }; static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt, diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c index d0ead051f7d18..b74a407a19f22 100644 --- a/drivers/media/platform/amphion/vpu_msgs.c +++ b/drivers/media/platform/amphion/vpu_msgs.c @@ -23,6 +23,7 @@ struct vpu_msg_handler { u32 id; void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt); + u32 is_str; }; static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt) @@ -154,7 +155,7 @@ static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event { char *str = (char *)pkt->data; - if (strlen(str)) + if (*str) dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str); else dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id); @@ -180,6 +181,21 @@ static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc vpu_inst_unlock(inst); } +static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt) +{ + char *str = (char *)pkt->data; + + if (*str) + dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str); +} + +static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt) +{ + if (pkt->hdr.num == ARRAY_SIZE(pkt->data)) + pkt->hdr.num--; + pkt->data[pkt->hdr.num] = 0; +} + static struct vpu_msg_handler handlers[] = { {VPU_MSG_ID_START_DONE, vpu_session_handle_start_done}, {VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done}, @@ -193,9 +209,10 @@ static struct vpu_msg_handler handlers[] = { {VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded}, {VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done}, {VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos}, - {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error}, - {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt}, + {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true}, + {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true}, {VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped}, + {VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true}, }; static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg) @@ -219,8 +236,12 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m } } - if (handler && handler->done) - handler->done(inst, msg); + if (handler) { + if (handler->is_str) + vpu_terminate_string_msg(msg); + if (handler->done) + handler->done(inst, msg); + } vpu_response_cmd(inst, msg_id, 1); diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c index 0d879d71d8185..9231ee7e9b3a9 100644 --- a/drivers/media/platform/cadence/cdns-csi2rx.c +++ b/drivers/media/platform/cadence/cdns-csi2rx.c @@ -479,8 +479,10 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx) asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh, struct v4l2_async_connection); of_node_put(ep); - if (IS_ERR(asd)) + if (IS_ERR(asd)) { + v4l2_async_nf_cleanup(&csi2rx->notifier); return PTR_ERR(asd); + } csi2rx->notifier.ops = &csi2rx_notifier_ops; @@ -543,6 +545,7 @@ static int csi2rx_probe(struct platform_device *pdev) return 0; err_cleanup: + v4l2_async_nf_unregister(&csi2rx->notifier); v4l2_async_nf_cleanup(&csi2rx->notifier); err_free_priv: kfree(csi2rx); @@ -553,6 +556,8 @@ static void csi2rx_remove(struct platform_device *pdev) { struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev); + v4l2_async_nf_unregister(&csi2rx->notifier); + v4l2_async_nf_cleanup(&csi2rx->notifier); v4l2_async_unregister_subdev(&csi2rx->subdev); kfree(csi2rx); } diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c index 2bbc48c7402ca..f8fa3b841ccfb 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c @@ -127,6 +127,7 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base) u32 img_stride; u32 mem_stride; u32 i, enc_quality; + u32 nr_enc_quality = ARRAY_SIZE(mtk_jpeg_enc_quality); value = width << 16 | height; writel(value, base + JPEG_ENC_IMG_SIZE); @@ -157,8 +158,8 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base) writel(img_stride, base + JPEG_ENC_IMG_STRIDE); writel(mem_stride, base + JPEG_ENC_STRIDE); - enc_quality = mtk_jpeg_enc_quality[0].hardware_value; - for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) { + enc_quality = mtk_jpeg_enc_quality[nr_enc_quality - 1].hardware_value; + for (i = 0; i < nr_enc_quality; i++) { if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) { enc_quality = mtk_jpeg_enc_quality[i].hardware_value; break; diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c index 3177592490bee..6adac857a4779 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c @@ -261,11 +261,11 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd, const struct v4l2_rect *compose; u32 out = 0; + ctx = &path->comps[index]; if (CFG_CHECK(MT8183, p_id)) out = CFG_COMP(MT8183, ctx->param, outputs[0]); compose = path->composes[out]; - ctx = &path->comps[index]; ret = call_op(ctx, config_frame, cmd, compose); if (ret) return ret; diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c index 908602031fd0e..9ce34a3b5ee67 100644 --- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c +++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c @@ -47,20 +47,32 @@ EXPORT_SYMBOL(mtk_vcodec_write_vdecsys); int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem) { + enum mtk_instance_type inst_type = *((unsigned int *)priv); + struct platform_device *plat_dev; unsigned long size = mem->size; - struct mtk_vcodec_dec_ctx *ctx = priv; - struct device *dev = &ctx->dev->plat_dev->dev; + int id; - mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); + if (inst_type == MTK_INST_ENCODER) { + struct mtk_vcodec_enc_ctx *enc_ctx = priv; + + plat_dev = enc_ctx->dev->plat_dev; + id = enc_ctx->id; + } else { + struct mtk_vcodec_dec_ctx *dec_ctx = priv; + + plat_dev = dec_ctx->dev->plat_dev; + id = dec_ctx->id; + } + + mem->va = dma_alloc_coherent(&plat_dev->dev, size, &mem->dma_addr, GFP_KERNEL); if (!mem->va) { - mtk_v4l2_vdec_err(ctx, "%s dma_alloc size=%ld failed!", dev_name(dev), size); + mtk_v4l2_err(plat_dev, "%s dma_alloc size=%ld failed!", + dev_name(&plat_dev->dev), size); return -ENOMEM; } - mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va); - mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id, - (unsigned long)mem->dma_addr); - mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size); + mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va, + (unsigned long)mem->dma_addr, size); return 0; } @@ -68,21 +80,33 @@ EXPORT_SYMBOL(mtk_vcodec_mem_alloc); void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem) { + enum mtk_instance_type inst_type = *((unsigned int *)priv); + struct platform_device *plat_dev; unsigned long size = mem->size; - struct mtk_vcodec_dec_ctx *ctx = priv; - struct device *dev = &ctx->dev->plat_dev->dev; + int id; + + if (inst_type == MTK_INST_ENCODER) { + struct mtk_vcodec_enc_ctx *enc_ctx = priv; + + plat_dev = enc_ctx->dev->plat_dev; + id = enc_ctx->id; + } else { + struct mtk_vcodec_dec_ctx *dec_ctx = priv; + + plat_dev = dec_ctx->dev->plat_dev; + id = dec_ctx->id; + } if (!mem->va) { - mtk_v4l2_vdec_err(ctx, "%s dma_free size=%ld failed!", dev_name(dev), size); + mtk_v4l2_err(plat_dev, "%s dma_free size=%ld failed!", + dev_name(&plat_dev->dev), size); return; } - mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va); - mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id, - (unsigned long)mem->dma_addr); - mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size); + mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va, + (unsigned long)mem->dma_addr, size); - dma_free_coherent(dev, size, mem->va, mem->dma_addr); + dma_free_coherent(&plat_dev->dev, size, mem->va, mem->dma_addr); mem->va = NULL; mem->dma_addr = 0; mem->size = 0; diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c index ae6290d28f8e9..84ad1cc6ad171 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c +++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c @@ -154,6 +154,11 @@ int vpu_enc_init(struct venc_vpu_inst *vpu) return -EINVAL; } + if (IS_ERR_OR_NULL(vpu->vsi)) { + mtk_venc_err(vpu->ctx, "invalid venc vsi"); + return -EINVAL; + } + return 0; } diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c index b7a720198ce57..0c8b204535ffc 100644 --- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c +++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c @@ -1322,6 +1322,20 @@ static bool mxc_jpeg_compare_format(const struct mxc_jpeg_fmt *fmt1, return false; } +static void mxc_jpeg_set_last_buffer(struct mxc_jpeg_ctx *ctx) +{ + struct vb2_v4l2_buffer *next_dst_buf; + + next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); + if (!next_dst_buf) { + ctx->fh.m2m_ctx->is_draining = true; + ctx->fh.m2m_ctx->next_buf_last = true; + return; + } + + v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, next_dst_buf); +} + static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx, struct mxc_jpeg_src_buf *jpeg_src_buf) { @@ -1334,7 +1348,8 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx, q_data_cap = mxc_jpeg_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); if (mxc_jpeg_compare_format(q_data_cap->fmt, jpeg_src_buf->fmt)) jpeg_src_buf->fmt = q_data_cap->fmt; - if (q_data_cap->fmt != jpeg_src_buf->fmt || + if (ctx->need_initial_source_change_evt || + q_data_cap->fmt != jpeg_src_buf->fmt || q_data_cap->w != jpeg_src_buf->w || q_data_cap->h != jpeg_src_buf->h) { dev_dbg(dev, "Detected jpeg res=(%dx%d)->(%dx%d), pixfmt=%c%c%c%c\n", @@ -1378,6 +1393,9 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx, mxc_jpeg_sizeimage(q_data_cap); notify_src_chg(ctx); ctx->source_change = 1; + ctx->need_initial_source_change_evt = false; + if (vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx))) + mxc_jpeg_set_last_buffer(ctx); } return ctx->source_change ? true : false; @@ -1595,6 +1613,9 @@ static int mxc_jpeg_queue_setup(struct vb2_queue *q, for (i = 0; i < *nplanes; i++) sizes[i] = mxc_jpeg_get_plane_size(q_data, i); + if (V4L2_TYPE_IS_OUTPUT(q->type)) + ctx->need_initial_source_change_evt = true; + return 0; } @@ -1638,8 +1659,13 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q) v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); } - if (V4L2_TYPE_IS_OUTPUT(q->type) || !ctx->source_change) - v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q); + v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q); + /* if V4L2_DEC_CMD_STOP is sent before the source change triggered, + * restore the is_draining flag + */ + if (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->source_change && ctx->fh.m2m_ctx->last_src_buf) + ctx->fh.m2m_ctx->is_draining = true; + if (V4L2_TYPE_IS_OUTPUT(q->type) && v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) { notify_eos(ctx); @@ -1916,7 +1942,7 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb) return -EINVAL; for (i = 0; i < q_data->fmt->mem_planes; i++) { sizeimage = mxc_jpeg_get_plane_size(q_data, i); - if (vb2_plane_size(vb, i) < sizeimage) { + if (!ctx->source_change && vb2_plane_size(vb, i) < sizeimage) { dev_err(dev, "plane %d too small (%lu < %lu)", i, vb2_plane_size(vb, i), sizeimage); return -EINVAL; diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h index d80e94cc9d992..dc4afeeff5b65 100644 --- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h +++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h @@ -99,6 +99,7 @@ struct mxc_jpeg_ctx { enum mxc_jpeg_enc_state enc_state; int slot; unsigned int source_change; + bool need_initial_source_change_evt; bool header_parsed; struct v4l2_ctrl_handler ctrl_handler; u8 jpeg_quality; diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c index 76634d242b103..0f5b3845d7b94 100644 --- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c +++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c @@ -1133,12 +1133,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx) ret = vb2_queue_init(q); if (ret) - goto err_vd_rel; + return ret; vp->pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad); if (ret) - goto err_vd_rel; + return ret; video_set_drvdata(vfd, vp); @@ -1171,8 +1171,6 @@ err_ctrlh_free: v4l2_ctrl_handler_free(&vp->ctrl_handler); err_me_cleanup: media_entity_cleanup(&vfd->entity); -err_vd_rel: - video_device_release(vfd); return ret; } diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c index 423fc85d79ee3..50ec24c753e9e 100644 --- a/drivers/media/platform/verisilicon/hantro_drv.c +++ b/drivers/media/platform/verisilicon/hantro_drv.c @@ -125,7 +125,8 @@ void hantro_watchdog(struct work_struct *work) ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev); if (ctx) { vpu_err("frame processing timed out!\n"); - ctx->codec_ops->reset(ctx); + if (ctx->codec_ops->reset) + ctx->codec_ops->reset(ctx); hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR); } } diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c index 0224ff68ab3fc..64d6fb852ae9b 100644 --- a/drivers/media/platform/verisilicon/hantro_postproc.c +++ b/drivers/media/platform/verisilicon/hantro_postproc.c @@ -107,7 +107,7 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx) static int down_scale_factor(struct hantro_ctx *ctx) { - if (ctx->src_fmt.width == ctx->dst_fmt.width) + if (ctx->src_fmt.width <= ctx->dst_fmt.width) return 0; return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width); diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c index 816ffa905a4bb..f975276707835 100644 --- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c +++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c @@ -648,7 +648,7 @@ static const char * const rockchip_vpu_clk_names[] = { }; static const char * const rk3588_vpu981_vpu_clk_names[] = { - "aclk", "hclk", "aclk_vdpu_root", "hclk_vdpu_root" + "aclk", "hclk", }; /* VDPU1/VEPU1 */ diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c index b51e6a3b8cbeb..f99878eff7ace 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_mux.c +++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c @@ -504,13 +504,16 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe, m->priv = args->priv; m->network_id = args->network_id; m->network_name = kstrdup(args->network_name, GFP_KERNEL); + if (!m->network_name) + goto free_mux_buf; + m->timing.current_jiffies = get_jiffies_64(); if (args->channels) m->channels = args->channels; else if (vidtv_channels_init(m) < 0) - goto free_mux_buf; + goto free_mux_network_name; /* will alloc data for pmt_sections after initializing pat */ if (vidtv_channel_si_init(m) < 0) @@ -527,6 +530,8 @@ free_channel_si: vidtv_channel_si_destroy(m); free_channels: vidtv_channels_destroy(m); +free_mux_network_name: + kfree(m->network_name); free_mux_buf: vfree(m->mux_buf); free_mux: diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c index ce0b7a6e92dc3..2a51c898c11eb 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_psi.c +++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c @@ -301,16 +301,29 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc desc->service_name_len = service_name_len; - if (service_name && service_name_len) + if (service_name && service_name_len) { desc->service_name = kstrdup(service_name, GFP_KERNEL); + if (!desc->service_name) + goto free_desc; + } desc->provider_name_len = provider_name_len; - if (provider_name && provider_name_len) + if (provider_name && provider_name_len) { desc->provider_name = kstrdup(provider_name, GFP_KERNEL); + if (!desc->provider_name) + goto free_desc_service_name; + } vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc); return desc; + +free_desc_service_name: + if (service_name && service_name_len) + kfree(desc->service_name); +free_desc: + kfree(desc); + return NULL; } struct vidtv_psi_desc_registration @@ -355,8 +368,13 @@ struct vidtv_psi_desc_network_name desc->length = network_name_len; - if (network_name && network_name_len) + if (network_name && network_name_len) { desc->network_name = kstrdup(network_name, GFP_KERNEL); + if (!desc->network_name) { + kfree(desc); + return NULL; + } + } vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc); return desc; @@ -442,15 +460,32 @@ struct vidtv_psi_desc_short_event iso_language_code = "eng"; desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL); + if (!desc->iso_language_code) + goto free_desc; - if (event_name && event_name_len) + if (event_name && event_name_len) { desc->event_name = kstrdup(event_name, GFP_KERNEL); + if (!desc->event_name) + goto free_desc_language_code; + } - if (text && text_len) + if (text && text_len) { desc->text = kstrdup(text, GFP_KERNEL); + if (!desc->text) + goto free_desc_event_name; + } vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc); return desc; + +free_desc_event_name: + if (event_name && event_name_len) + kfree(desc->event_name); +free_desc_language_code: + kfree(desc->iso_language_code); +free_desc: + kfree(desc); + return NULL; } struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc) diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 33a2aa8907e65..4eb7dd4599b7e 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -322,8 +322,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { - if (msg[0].len < 3 || msg[1].len < 1) - return -EOPNOTSUPP; + if (msg[0].len < 3 || msg[1].len < 1) { + ret = -EOPNOTSUPP; + goto unlock; + } /* demod access via firmware interface */ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; @@ -383,8 +385,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { - if (msg[0].len < 3) - return -EOPNOTSUPP; + if (msg[0].len < 3) { + ret = -EOPNOTSUPP; + goto unlock; + } /* demod access via firmware interface */ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; @@ -459,6 +463,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } +unlock: mutex_unlock(&d->i2c_mutex); if (ret < 0) diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c index 9e5b5dbd9c8df..2845041f32d69 100644 --- a/drivers/memory/tegra/tegra234.c +++ b/drivers/memory/tegra/tegra234.c @@ -986,6 +986,10 @@ static int tegra234_mc_icc_set(struct icc_node *src, struct icc_node *dst) msg.rx.data = &bwmgr_resp; msg.rx.size = sizeof(bwmgr_resp); + if (pclient->bpmp_id >= TEGRA_ICC_BPMP_CPU_CLUSTER0 && + pclient->bpmp_id <= TEGRA_ICC_BPMP_CPU_CLUSTER2) + msg.flags = TEGRA_BPMP_MESSAGE_RESET; + ret = tegra_bpmp_transfer(mc->bpmp, &msg); if (ret < 0) { dev_err(mc->dev, "BPMP transfer failed: %d\n", ret); diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c index 02cf4f3e91d76..de5d894ac04af 100644 --- a/drivers/mfd/arizona-spi.c +++ b/drivers/mfd/arizona-spi.c @@ -159,6 +159,9 @@ static int arizona_spi_acpi_probe(struct arizona *arizona) arizona->pdata.micd_ranges = arizona_micd_aosp_ranges; arizona->pdata.num_micd_ranges = ARRAY_SIZE(arizona_micd_aosp_ranges); + /* Use left headphone speaker for HP vs line-out detection */ + arizona->pdata.hpdet_channel = ARIZONA_ACCDET_MODE_HPL; + return 0; } diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index c7510434380a4..fbbe82c6e75b5 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -826,7 +826,6 @@ out_stop_rx: dln2_stop_rx_urbs(dln2); out_free: - usb_put_dev(dln2->usb_dev); dln2_free(dln2); return ret; diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 0ed7c0d7784e1..2b85509a90fc2 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -146,6 +146,7 @@ static int mfd_add_device(struct device *parent, int id, struct platform_device *pdev; struct device_node *np = NULL; struct mfd_of_node_entry *of_entry, *tmp; + bool disabled = false; int ret = -ENOMEM; int platform_id; int r; @@ -183,11 +184,10 @@ static int mfd_add_device(struct device *parent, int id, if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) { for_each_child_of_node(parent->of_node, np) { if (of_device_is_compatible(np, cell->of_compatible)) { - /* Ignore 'disabled' devices error free */ + /* Skip 'disabled' devices */ if (!of_device_is_available(np)) { - of_node_put(np); - ret = 0; - goto fail_alias; + disabled = true; + continue; } ret = mfd_match_of_node_to_dev(pdev, np, cell); @@ -197,10 +197,17 @@ static int mfd_add_device(struct device *parent, int id, if (ret) goto fail_alias; - break; + goto match; } } + if (disabled) { + /* Ignore 'disabled' devices error free */ + ret = 0; + goto fail_alias; + } + +match: if (!pdev->dev.of_node) pr_warn("%s: Failed to locate of_node [id: %d]\n", cell->name, platform_id); diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index c1a134bd8ba7b..b878431553abc 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -15,6 +15,7 @@ #include #include +#include /* * function pointer pointing to either, @@ -429,7 +430,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb) case ST_LL_AWAKE_TO_ASLEEP: pr_err("ST LL is illegal state(%ld)," "purging received skb.", st_ll_getstate(st_gdata)); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; case ST_LL_ASLEEP: skb_queue_tail(&st_gdata->tx_waitq, skb); @@ -438,7 +439,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb) default: pr_err("ST LL is illegal state(%ld)," "purging received skb.", st_ll_getstate(st_gdata)); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; } @@ -492,7 +493,7 @@ void st_tx_wakeup(struct st_data_s *st_data) spin_unlock_irqrestore(&st_data->lock, flags); break; } - kfree_skb(skb); + dev_kfree_skb_irq(skb); spin_unlock_irqrestore(&st_data->lock, flags); } /* if wake-up is set in another context- restart sending */ diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 4a4bab9aa7263..89cd48fcec79f 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card) case 3: /* MMC v3.1 - v3.3 */ case 4: /* MMC v4 */ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); - card->cid.oemid = UNSTUFF_BITS(resp, 104, 8); + card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 7f9334a8af500..735d5de3caa0e 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -132,7 +132,8 @@ static void can_restart(struct net_device *dev) struct can_frame *cf; int err; - BUG_ON(netif_carrier_ok(dev)); + if (netif_carrier_ok(dev)) + netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n"); /* No synchronization needed because the device is bus-off and * no messages can come in or go out. @@ -153,11 +154,12 @@ restart: priv->can_stats.restarts++; /* Now restart the device */ - err = priv->do_set_mode(dev, CAN_MODE_START); - netif_carrier_on(dev); - if (err) + err = priv->do_set_mode(dev, CAN_MODE_START); + if (err) { netdev_err(dev, "Error %d during restart", err); + netif_carrier_off(dev); + } } static void can_restart_work(struct work_struct *work) diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c index f6d05b3ef59ab..3ebd4f779b9bd 100644 --- a/drivers/net/can/dev/skb.c +++ b/drivers/net/can/dev/skb.c @@ -49,7 +49,11 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, { struct can_priv *priv = netdev_priv(dev); - BUG_ON(idx >= priv->echo_skb_max); + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return -EINVAL; + } /* check flag whether this packet has to be looped back */ if (!(dev->flags & IFF_ECHO) || diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 0c7f7505632cd..5e3a72b7c4691 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -2230,6 +2230,7 @@ static int es58x_probe(struct usb_interface *intf, for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) { int ret = es58x_init_netdev(es58x_dev, ch_idx); + if (ret) { es58x_free_netdevs(es58x_dev); return ret; diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h index c1ba1a4e8857b..2e183bdeedd72 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.h +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h @@ -378,13 +378,13 @@ struct es58x_sw_version { /** * struct es58x_hw_revision - Hardware revision number. - * @letter: Revision letter. + * @letter: Revision letter, an alphanumeric character. * @major: Version major number, represented on three digits. * @minor: Version minor number, represented on three digits. * * The hardware revision uses its own format: "axxx/xxx" where 'a' is - * a letter and 'x' a digit. It can be retrieved from the product - * information string. + * an alphanumeric character and 'x' a digit. It can be retrieved from + * the product information string. */ struct es58x_hw_revision { char letter; diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c index 9fba29e2f57c6..635edeb8f68cd 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c +++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c @@ -125,14 +125,28 @@ static int es58x_parse_hw_rev(struct es58x_device *es58x_dev, * firmware version, the bootloader version and the hardware * revision. * - * If the function fails, simply emit a log message and continue - * because product information is not critical for the driver to - * operate. + * If the function fails, set the version or revision to an invalid + * value and emit an informal message. Continue probing because the + * product information is not critical for the driver to operate. */ void es58x_parse_product_info(struct es58x_device *es58x_dev) { + static const struct es58x_sw_version sw_version_not_set = { + .major = -1, + .minor = -1, + .revision = -1, + }; + static const struct es58x_hw_revision hw_revision_not_set = { + .letter = '\0', + .major = -1, + .minor = -1, + }; char *prod_info; + es58x_dev->firmware_version = sw_version_not_set; + es58x_dev->bootloader_version = sw_version_not_set; + es58x_dev->hardware_revision = hw_revision_not_set; + prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX); if (!prod_info) { dev_warn(es58x_dev->dev, @@ -150,29 +164,36 @@ void es58x_parse_product_info(struct es58x_device *es58x_dev) } /** - * es58x_sw_version_is_set() - Check if the version is a valid number. + * es58x_sw_version_is_valid() - Check if the version is a valid number. * @sw_ver: Version number of either the firmware or the bootloader. * - * If &es58x_sw_version.major, &es58x_sw_version.minor and - * &es58x_sw_version.revision are all zero, the product string could - * not be parsed and the version number is invalid. + * If any of the software version sub-numbers do not fit on two + * digits, the version is invalid, most probably because the product + * string could not be parsed. + * + * Return: @true if the software version is valid, @false otherwise. */ -static inline bool es58x_sw_version_is_set(struct es58x_sw_version *sw_ver) +static inline bool es58x_sw_version_is_valid(struct es58x_sw_version *sw_ver) { - return sw_ver->major || sw_ver->minor || sw_ver->revision; + return sw_ver->major < 100 && sw_ver->minor < 100 && + sw_ver->revision < 100; } /** - * es58x_hw_revision_is_set() - Check if the revision is a valid number. + * es58x_hw_revision_is_valid() - Check if the revision is a valid number. * @hw_rev: Revision number of the hardware. * - * If &es58x_hw_revision.letter is the null character, the product - * string could not be parsed and the hardware revision number is - * invalid. + * If &es58x_hw_revision.letter is not a alphanumeric character or if + * any of the hardware revision sub-numbers do not fit on three + * digits, the revision is invalid, most probably because the product + * string could not be parsed. + * + * Return: @true if the hardware revision is valid, @false otherwise. */ -static inline bool es58x_hw_revision_is_set(struct es58x_hw_revision *hw_rev) +static inline bool es58x_hw_revision_is_valid(struct es58x_hw_revision *hw_rev) { - return hw_rev->letter != '\0'; + return isalnum(hw_rev->letter) && hw_rev->major < 1000 && + hw_rev->minor < 1000; } /** @@ -197,7 +218,7 @@ static int es58x_devlink_info_get(struct devlink *devlink, char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))]; int ret = 0; - if (es58x_sw_version_is_set(fw_ver)) { + if (es58x_sw_version_is_valid(fw_ver)) { snprintf(buf, sizeof(buf), "%02u.%02u.%02u", fw_ver->major, fw_ver->minor, fw_ver->revision); ret = devlink_info_version_running_put(req, @@ -207,7 +228,7 @@ static int es58x_devlink_info_get(struct devlink *devlink, return ret; } - if (es58x_sw_version_is_set(bl_ver)) { + if (es58x_sw_version_is_valid(bl_ver)) { snprintf(buf, sizeof(buf), "%02u.%02u.%02u", bl_ver->major, bl_ver->minor, bl_ver->revision); ret = devlink_info_version_running_put(req, @@ -217,7 +238,7 @@ static int es58x_devlink_info_get(struct devlink *devlink, return ret; } - if (es58x_hw_revision_is_set(hw_rev)) { + if (es58x_hw_revision_is_valid(hw_rev)) { snprintf(buf, sizeof(buf), "%c%03u/%03u", hw_rev->letter, hw_rev->major, hw_rev->minor); ret = devlink_info_version_fixed_put(req, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 14b311196b8f8..22b00912f7ac8 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18078,7 +18078,8 @@ static void tg3_shutdown(struct pci_dev *pdev) if (netif_running(dev)) dev_close(dev); - tg3_power_down(tp); + if (system_state == SYSTEM_POWER_OFF) + tg3_power_down(tp); rtnl_unlock(); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index 7750702900fa6..6f6525983130e 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) if (tp->snd_una != snd_una) { tp->snd_una = snd_una; - tp->rcv_tstamp = tcp_time_stamp(tp); + tp->rcv_tstamp = tcp_jiffies32; if (tp->snd_una == tp->snd_nxt && !csk_flag_nochk(csk, CSK_TX_FAILOVER)) csk_reset_flag(csk, CSK_TX_WAIT_IDLE); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 35461165de0d2..b92e3aa7cd041 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > priv->num_tx_rings) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)", + "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)", num_xdp_tx_queues, priv->min_num_stack_tx_queues, priv->num_tx_rings); diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 5704b5f57cd0d..83b09dcfafc4f 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -190,7 +190,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv) rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * priv->rx_cfg.num_queues; priv->stats_report_len = struct_size(priv->stats_report, stats, - tx_stats_num + rx_stats_num); + size_add(tx_stats_num, rx_stats_num)); priv->stats_report = dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len, &priv->stats_report_bus, GFP_KERNEL); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index de7fd43dc11c8..00ca2b88165cb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -16320,11 +16320,15 @@ static void i40e_remove(struct pci_dev *pdev) i40e_switch_branch_release(pf->veb[i]); } - /* Now we can shutdown the PF's VSI, just before we kill + /* Now we can shutdown the PF's VSIs, just before we kill * adminq and hmc. */ - if (pf->vsi[pf->lan_vsi]) - i40e_vsi_release(pf->vsi[pf->lan_vsi]); + for (i = pf->num_alloc_vsi; i--;) + if (pf->vsi[i]) { + i40e_vsi_close(pf->vsi[i]); + i40e_vsi_release(pf->vsi[i]); + pf->vsi[i] = NULL; + } i40e_cloud_filter_exit(pf); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index e110ba3461857..d8d7b62ceb24e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -298,8 +298,6 @@ struct iavf_adapter { #define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) #define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) #define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) -#define IAVF_FLAG_PROMISC_ON BIT(13) -#define IAVF_FLAG_ALLMULTI_ON BIT(14) #define IAVF_FLAG_LEGACY_RX BIT(15) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_QUEUES_DISABLED BIT(17) @@ -325,10 +323,7 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12) #define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) #define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) -#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15) -#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16) -#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17) -#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18) +#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15) #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19) #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20) #define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21) @@ -365,6 +360,12 @@ struct iavf_adapter { (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \ IAVF_EXTENDED_CAP_RECV_VLAN_V2) + /* Lock to prevent possible clobbering of + * current_netdev_promisc_flags + */ + spinlock_t current_netdev_promisc_flags_lock; + netdev_features_t current_netdev_promisc_flags; + /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; @@ -551,7 +552,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter); void iavf_del_ether_addrs(struct iavf_adapter *adapter); void iavf_add_vlans(struct iavf_adapter *adapter); void iavf_del_vlans(struct iavf_adapter *adapter); -void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags); +void iavf_set_promiscuous(struct iavf_adapter *adapter); +bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter); void iavf_request_stats(struct iavf_adapter *adapter); int iavf_request_reset(struct iavf_adapter *adapter); void iavf_get_hena(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index b3434dbc90d6f..68783a7b70962 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1186,6 +1186,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) return 0; } +/** + * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed + * @adapter: device specific adapter + */ +bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter) +{ + return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) & + (IFF_PROMISC | IFF_ALLMULTI); +} + /** * iavf_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure @@ -1199,19 +1209,10 @@ static void iavf_set_rx_mode(struct net_device *netdev) __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); spin_unlock_bh(&adapter->mac_vlan_list_lock); - if (netdev->flags & IFF_PROMISC && - !(adapter->flags & IAVF_FLAG_PROMISC_ON)) - adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; - else if (!(netdev->flags & IFF_PROMISC) && - adapter->flags & IAVF_FLAG_PROMISC_ON) - adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; - - if (netdev->flags & IFF_ALLMULTI && - !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) - adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; - else if (!(netdev->flags & IFF_ALLMULTI) && - adapter->flags & IAVF_FLAG_ALLMULTI_ON) - adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; + spin_lock_bh(&adapter->current_netdev_promisc_flags_lock); + if (iavf_promiscuous_mode_changed(adapter)) + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; + spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); } /** @@ -2162,19 +2163,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { - iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | - FLAG_VF_MULTICAST_PROMISC); - return 0; - } - - if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { - iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); - return 0; - } - if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || - (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { - iavf_set_promiscuous(adapter, 0); + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) { + iavf_set_promiscuous(adapter); return 0; } @@ -4970,6 +4960,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->cloud_filter_list_lock); spin_lock_init(&adapter->fdir_fltr_lock); spin_lock_init(&adapter->adv_rss_lock); + spin_lock_init(&adapter->current_netdev_promisc_flags_lock); INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index f9727e9c3d630..0b97b424e487a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -936,14 +936,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter) /** * iavf_set_promiscuous * @adapter: adapter structure - * @flags: bitmask to control unicast/multicast promiscuous. * * Request that the PF enable promiscuous mode for our VSI. **/ -void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) +void iavf_set_promiscuous(struct iavf_adapter *adapter) { + struct net_device *netdev = adapter->netdev; struct virtchnl_promisc_info vpi; - int promisc_all; + unsigned int flags; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -952,36 +952,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) return; } - promisc_all = FLAG_VF_UNICAST_PROMISC | - FLAG_VF_MULTICAST_PROMISC; - if ((flags & promisc_all) == promisc_all) { - adapter->flags |= IAVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; - dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); - } + /* prevent changes to promiscuous flags */ + spin_lock_bh(&adapter->current_netdev_promisc_flags_lock); - if (flags & FLAG_VF_MULTICAST_PROMISC) { - adapter->flags |= IAVF_FLAG_ALLMULTI_ON; - adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; - dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", - adapter->netdev->name); + /* sanity check to prevent duplicate AQ calls */ + if (!iavf_promiscuous_mode_changed(adapter)) { + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; + dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n"); + /* allow changes to promiscuous flags */ + spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); + return; } - if (!flags) { - if (adapter->flags & IAVF_FLAG_PROMISC_ON) { - adapter->flags &= ~IAVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; - dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); - } + /* there are 2 bits, but only 3 states */ + if (!(netdev->flags & IFF_PROMISC) && + netdev->flags & IFF_ALLMULTI) { + /* State 1 - only multicast promiscuous mode enabled + * - !IFF_PROMISC && IFF_ALLMULTI + */ + flags = FLAG_VF_MULTICAST_PROMISC; + adapter->current_netdev_promisc_flags |= IFF_ALLMULTI; + adapter->current_netdev_promisc_flags &= ~IFF_PROMISC; + dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + } else if (!(netdev->flags & IFF_PROMISC) && + !(netdev->flags & IFF_ALLMULTI)) { + /* State 2 - unicast/multicast promiscuous mode disabled + * - !IFF_PROMISC && !IFF_ALLMULTI + */ + flags = 0; + adapter->current_netdev_promisc_flags &= + ~(IFF_PROMISC | IFF_ALLMULTI); + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } else { + /* State 3 - unicast/multicast promiscuous mode enabled + * - IFF_PROMISC && IFF_ALLMULTI + * - IFF_PROMISC && !IFF_ALLMULTI + */ + flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC; + adapter->current_netdev_promisc_flags |= IFF_PROMISC; + if (netdev->flags & IFF_ALLMULTI) + adapter->current_netdev_promisc_flags |= IFF_ALLMULTI; + else + adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI; - if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { - adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; - adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; - dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", - adapter->netdev->name); - } + dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); } + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; + + /* allow changes to promiscuous flags */ + spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); + adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 7b1256992dcf6..fb40ad98e6aad 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -595,7 +595,7 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf) INIT_LIST_HEAD(&ndlist.node); rcu_read_lock(); for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { - nl = kzalloc(sizeof(*nl), GFP_KERNEL); + nl = kzalloc(sizeof(*nl), GFP_ATOMIC); if (!nl) break; @@ -1529,18 +1529,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr) */ static void ice_lag_disable_sriov_bond(struct ice_lag *lag) { - struct ice_lag_netdev_list *entry; struct ice_netdev_priv *np; - struct net_device *netdev; struct ice_pf *pf; - list_for_each_entry(entry, lag->netdev_head, node) { - netdev = entry->netdev; - np = netdev_priv(netdev); - pf = np->vsi->back; - - ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); - } + np = netdev_priv(lag->netdev); + pf = np->vsi->back; + ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); } /** @@ -1672,7 +1666,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event, rcu_read_lock(); for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) { - nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL); + nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC); if (!nd_list) break; @@ -2046,7 +2040,7 @@ void ice_lag_rebuild(struct ice_pf *pf) INIT_LIST_HEAD(&ndlist.node); rcu_read_lock(); for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { - nl = kzalloc(sizeof(*nl), GFP_KERNEL); + nl = kzalloc(sizeof(*nl), GFP_ATOMIC); if (!nl) break; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 81d96a40d5a74..c4270708a7694 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -2246,18 +2246,20 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) { - info->n_per_out = N_PER_OUT_E810; - - if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) - info->n_ext_ts = N_EXT_TS_E810; - if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { info->n_ext_ts = N_EXT_TS_E810; + info->n_per_out = N_PER_OUT_E810T; info->n_pins = NUM_PTP_PINS_E810T; info->verify = ice_verify_pin_e810t; /* Complete setup of the SMA pins */ ice_ptp_setup_sma_pins_e810t(pf, info); + } else if (ice_is_e810t(&pf->hw)) { + info->n_ext_ts = N_EXT_TS_NO_SMA_E810T; + info->n_per_out = N_PER_OUT_NO_SMA_E810T; + } else { + info->n_per_out = N_PER_OUT_E810; + info->n_ext_ts = N_EXT_TS_E810; } } diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 37b54db91df27..dd03cb69ad26b 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev) return ice_tc_tun_get_type(dev) != TNL_LAST; } -static int -ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, - struct flow_action_entry *act) +static bool ice_tc_is_dev_uplink(struct net_device *dev) +{ + return netif_is_ice(dev) || ice_is_tunnel_supported(dev); +} + +static int ice_tc_setup_redirect_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct net_device *target_dev) { struct ice_repr *repr; + fltr->action.fltr_act = ICE_FWD_TO_VSI; + + if (ice_is_port_repr_netdev(filter_dev) && + ice_is_port_repr_netdev(target_dev)) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_is_port_repr_netdev(filter_dev) && + ice_tc_is_dev_uplink(target_dev)) { + repr = ice_netdev_to_repr(filter_dev); + + fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi; + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_tc_is_dev_uplink(filter_dev) && + ice_is_port_repr_netdev(target_dev)) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unsupported netdevice in switchdev mode"); + return -EINVAL; + } + + return 0; +} + +static int +ice_tc_setup_drop_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr) +{ + fltr->action.fltr_act = ICE_DROP_PACKET; + + if (ice_is_port_repr_netdev(filter_dev)) { + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_tc_is_dev_uplink(filter_dev)) { + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unsupported netdevice in switchdev mode"); + return -EINVAL; + } + + return 0; +} + +static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct flow_action_entry *act) +{ + int err; + switch (act->id) { case FLOW_ACTION_DROP: - fltr->action.fltr_act = ICE_DROP_PACKET; + err = ice_tc_setup_drop_action(filter_dev, fltr); + if (err) + return err; + break; case FLOW_ACTION_REDIRECT: - fltr->action.fltr_act = ICE_FWD_TO_VSI; - - if (ice_is_port_repr_netdev(act->dev)) { - repr = ice_netdev_to_repr(act->dev); - - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else if (netif_is_ice(act->dev) || - ice_is_tunnel_supported(act->dev)) { - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else { - NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); - return -EINVAL; - } + err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev); + if (err) + return err; break; @@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) goto exit; } - /* egress traffic is always redirect to uplink */ - if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) - fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; - rule_info.sw_act.fltr_act = fltr->action.fltr_act; if (fltr->action.fltr_act != ICE_DROP_PACKET) rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; @@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.flags_info.act_valid = true; if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { + /* Uplink to VF */ rule_info.sw_act.flag |= ICE_FLTR_RX; rule_info.sw_act.src = hw->pf_id; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; - } else { + } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) { + /* VF to Uplink */ rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; + } else { + /* VF to VF */ + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; } /* specify the cookie as filter_rule_id */ @@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, /** * ice_parse_tc_flower_actions - Parse the actions for a TC filter + * @filter_dev: Pointer to device on which filter is being added * @vsi: Pointer to VSI * @cls_flower: Pointer to TC flower offload structure * @fltr: Pointer to TC flower filter structure * * Parse the actions for a TC filter */ -static int -ice_parse_tc_flower_actions(struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower, - struct ice_tc_flower_fltr *fltr) +static int ice_parse_tc_flower_actions(struct net_device *filter_dev, + struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, + struct ice_tc_flower_fltr *fltr) { struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); struct flow_action *flow_action = &rule->action; @@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi, flow_action_for_each(i, act, flow_action) { if (ice_is_eswitch_mode_switchdev(vsi->back)) - err = ice_eswitch_tc_parse_action(fltr, act); + err = ice_eswitch_tc_parse_action(filter_dev, fltr, act); else err = ice_tc_parse_action(vsi, fltr, act); if (err) @@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, if (err < 0) goto err; - err = ice_parse_tc_flower_actions(vsi, f, fltr); + err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr); if (err < 0) goto err; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 818ce76185b2f..629cf1659e5f9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -818,7 +818,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) int qidx, sqe_tail, sqe_head; struct otx2_snd_queue *sq; u64 incr, *ptr, val; - int timeout = 1000; ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { @@ -827,15 +826,11 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) continue; incr = (u64)qidx << 32; - while (timeout) { - val = otx2_atomic64_add(incr, ptr); - sqe_head = (val >> 20) & 0x3F; - sqe_tail = (val >> 28) & 0x3F; - if (sqe_head == sqe_tail) - break; - usleep_range(1, 3); - timeout--; - } + val = otx2_atomic64_add(incr, ptr); + sqe_head = (val >> 20) & 0x3F; + sqe_tail = (val >> 28) & 0x3F; + if (sqe_head != sqe_tail) + usleep_range(50, 60); } } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index c04a8ee53a82f..e7c69b57147e0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -977,6 +977,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); int otx2_txsch_alloc(struct otx2_nic *pfvf); void otx2_txschq_stop(struct otx2_nic *pfvf); void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); +void otx2_free_pending_sqe(struct otx2_nic *pfvf); void otx2_sqb_flush(struct otx2_nic *pfvf); int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, dma_addr_t *dma); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 6daf4d58c25d6..91b99fd703616 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1193,31 +1193,32 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = { }; static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = { - "NIX_SND_STATUS_GOOD", - "NIX_SND_STATUS_SQ_CTX_FAULT", - "NIX_SND_STATUS_SQ_CTX_POISON", - "NIX_SND_STATUS_SQB_FAULT", - "NIX_SND_STATUS_SQB_POISON", - "NIX_SND_STATUS_HDR_ERR", - "NIX_SND_STATUS_EXT_ERR", - "NIX_SND_STATUS_JUMP_FAULT", - "NIX_SND_STATUS_JUMP_POISON", - "NIX_SND_STATUS_CRC_ERR", - "NIX_SND_STATUS_IMM_ERR", - "NIX_SND_STATUS_SG_ERR", - "NIX_SND_STATUS_MEM_ERR", - "NIX_SND_STATUS_INVALID_SUBDC", - "NIX_SND_STATUS_SUBDC_ORDER_ERR", - "NIX_SND_STATUS_DATA_FAULT", - "NIX_SND_STATUS_DATA_POISON", - "NIX_SND_STATUS_NPC_DROP_ACTION", - "NIX_SND_STATUS_LOCK_VIOL", - "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR", - "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR", - "NIX_SND_STATUS_NPC_MCAST_ABORT", - "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", - "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", - "NIX_SND_STATUS_SEND_STATS_ERR", + [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD", + [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT", + [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON", + [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT", + [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON", + [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR", + [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR", + [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT", + [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON", + [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR", + [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR", + [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR", + [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR", + [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC", + [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR", + [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT", + [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON", + [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION", + [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL", + [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR", + [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR", + [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT", + [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", + [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", + [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT", + [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR", }; static irqreturn_t otx2_q_intr_handler(int irq, void *data) @@ -1238,14 +1239,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) continue; if (val & BIT_ULL(42)) { - netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", + netdev_err(pf->netdev, + "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", qidx, otx2_read64(pf, NIX_LF_ERR_INT)); } else { if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR)) netdev_err(pf->netdev, "CQ%lld: Doorbell error", qidx); if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT)) - netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM", + netdev_err(pf->netdev, + "CQ%lld: Memory fault on CQE write to LLC/DRAM", qidx); } @@ -1272,7 +1275,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) (val & NIX_SQINT_BITS)); if (val & BIT_ULL(42)) { - netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", + netdev_err(pf->netdev, + "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", qidx, otx2_read64(pf, NIX_LF_ERR_INT)); goto done; } @@ -1282,8 +1286,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) goto chk_mnq_err_dbg; sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg); - netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n", - qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]); + netdev_err(pf->netdev, + "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n", + qidx, sq_op_err_dbg, + nix_sqoperr_e_str[sq_op_err_code], + sq_op_err_code); otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44)); @@ -1300,16 +1307,21 @@ chk_mnq_err_dbg: goto chk_snd_err_dbg; mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg); - netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n", - qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]); + netdev_err(pf->netdev, + "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n", + qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code], + mnq_err_code); otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44)); chk_snd_err_dbg: snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG); if (snd_err_dbg & BIT(44)) { snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg); - netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n", - qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]); + netdev_err(pf->netdev, + "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n", + qidx, snd_err_dbg, + nix_snd_status_e_str[snd_err_code], + snd_err_code); otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44)); } @@ -1589,6 +1601,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) else otx2_cleanup_tx_cqes(pf, cq); } + otx2_free_pending_sqe(pf); otx2_free_sq_res(pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h index fa37b9f312cae..4e5899d8fa2e6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -318,23 +318,23 @@ enum nix_snd_status_e { NIX_SND_STATUS_EXT_ERR = 0x6, NIX_SND_STATUS_JUMP_FAULT = 0x7, NIX_SND_STATUS_JUMP_POISON = 0x8, - NIX_SND_STATUS_CRC_ERR = 0x9, - NIX_SND_STATUS_IMM_ERR = 0x10, - NIX_SND_STATUS_SG_ERR = 0x11, - NIX_SND_STATUS_MEM_ERR = 0x12, - NIX_SND_STATUS_INVALID_SUBDC = 0x13, - NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14, - NIX_SND_STATUS_DATA_FAULT = 0x15, - NIX_SND_STATUS_DATA_POISON = 0x16, - NIX_SND_STATUS_NPC_DROP_ACTION = 0x17, - NIX_SND_STATUS_LOCK_VIOL = 0x18, - NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19, - NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20, - NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21, - NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22, - NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23, - NIX_SND_STATUS_SEND_MEM_FAULT = 0x24, - NIX_SND_STATUS_SEND_STATS_ERR = 0x25, + NIX_SND_STATUS_CRC_ERR = 0x10, + NIX_SND_STATUS_IMM_ERR = 0x11, + NIX_SND_STATUS_SG_ERR = 0x12, + NIX_SND_STATUS_MEM_ERR = 0x13, + NIX_SND_STATUS_INVALID_SUBDC = 0x14, + NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15, + NIX_SND_STATUS_DATA_FAULT = 0x16, + NIX_SND_STATUS_DATA_POISON = 0x17, + NIX_SND_STATUS_NPC_DROP_ACTION = 0x20, + NIX_SND_STATUS_LOCK_VIOL = 0x21, + NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22, + NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23, + NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24, + NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25, + NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26, + NIX_SND_STATUS_SEND_MEM_FAULT = 0x27, + NIX_SND_STATUS_SEND_STATS_ERR = 0x28, NIX_SND_STATUS_MAX, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 53b2a4ef52985..6ee15f3c25ede 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1247,9 +1247,11 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) { + int tx_pkts = 0, tx_bytes = 0; struct sk_buff *skb = NULL; struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; + struct netdev_queue *txq; int processed_cqe = 0; struct sg_list *sg; int qidx; @@ -1270,12 +1272,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) sg = &sq->sg[cqe->comp.sqe_id]; skb = (struct sk_buff *)sg->skb; if (skb) { + tx_bytes += skb->len; + tx_pkts++; otx2_dma_unmap_skb_frags(pfvf, sg); dev_kfree_skb_any(skb); sg->skb = (u64)NULL; } } + if (likely(tx_pkts)) { + if (qidx >= pfvf->hw.tx_queues) + qidx -= pfvf->hw.xdp_queues; + txq = netdev_get_tx_queue(pfvf->netdev, qidx); + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); + } /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); @@ -1302,6 +1312,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) return err; } +void otx2_free_pending_sqe(struct otx2_nic *pfvf) +{ + int tx_pkts = 0, tx_bytes = 0; + struct sk_buff *skb = NULL; + struct otx2_snd_queue *sq; + struct netdev_queue *txq; + struct sg_list *sg; + int sq_idx, sqe; + + for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) { + sq = &pfvf->qset.sq[sq_idx]; + for (sqe = 0; sqe < sq->sqe_cnt; sqe++) { + sg = &sq->sg[sqe]; + skb = (struct sk_buff *)sg->skb; + if (skb) { + tx_bytes += skb->len; + tx_pkts++; + otx2_dma_unmap_skb_frags(pfvf, sg); + dev_kfree_skb_any(skb); + sg->skb = (u64)NULL; + } + } + + if (!tx_pkts) + continue; + txq = netdev_get_tx_queue(pfvf->netdev, sq_idx); + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); + tx_pkts = 0; + tx_bytes = 0; + } +} + static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, int len, int *offset) { diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h index 47ea69feb3b24..f87ab9b8a5901 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h @@ -64,8 +64,8 @@ struct mtk_wdma_desc { #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4) #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8) #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9) -#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12) -#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13) +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */ +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16) #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17) #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index e2aced7ab4547..95f63fcf4ba1f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks) * is 2^ACL_MAX_BF_LOG */ bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG); - bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks), + bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)), GFP_KERNEL); if (!bf) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 361b90007148b..0c76c162b8a9f 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2582,9 +2582,13 @@ static void rtl_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_PROMISC) { rx_mode |= AcceptAllPhys; + } else if (!(dev->flags & IFF_MULTICAST)) { + rx_mode &= ~AcceptMulticast; } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || dev->flags & IFF_ALLMULTI || - tp->mac_version == RTL_GIGA_MAC_VER_35) { + tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_46 || + tp->mac_version == RTL_GIGA_MAC_VER_48) { /* accept all multicasts */ } else if (netdev_mc_empty(dev)) { rx_mode &= ~AcceptMulticast; @@ -4596,7 +4600,11 @@ static void r8169_phylink_handler(struct net_device *ndev) if (netif_carrier_ok(ndev)) { rtl_link_chg_patch(tp); pm_request_resume(d); + netif_wake_queue(tp->dev); } else { + /* In few cases rx is broken after link-down otherwise */ + if (rtl_is_8125(tp)) + rtl_reset_work(tp); pm_runtime_idle(d); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 7a8f47e7b728b..a4e8b498dea96 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -259,7 +259,7 @@ ((val) << XGMAC_PPS_MINIDX(x)) #define XGMAC_PPSCMD_START 0x2 #define XGMAC_PPSCMD_STOP 0x5 -#define XGMAC_PPSEN0 BIT(4) +#define XGMAC_PPSENx(x) BIT(4 + (x) * 8) #define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10) #define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10) #define XGMAC_TRGTBUSY0 BIT(31) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index f352be269deb5..453e88b75be08 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -1178,7 +1178,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index, val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START); val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START); - val |= XGMAC_PPSEN0; + + /* XGMAC Core has 4 PPS outputs at most. + * + * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for + * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default, + * and can not be switched to Fixed mode, since PPSEN{1,2,3} are + * read-only reserved to 0. + * But we always set PPSEN{1,2,3} do not make things worse ;-) + * + * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must + * be set, or the PPS outputs stay in Fixed PPS mode by default. + */ + val |= XGMAC_PPSENx(index); writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index)); diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 4cf2a52e43783..3025e9c189702 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -177,7 +177,7 @@ static void icss_iep_set_counter(struct icss_iep *iep, u64 ns) if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]); - writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); + writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); } static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns); diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 50d7eacfec582..87e67121477cb 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2332,7 +2332,7 @@ spider_net_alloc_card(void) struct spider_net_card *card; netdev = alloc_etherdev(struct_size(card, darray, - tx_descriptors + rx_descriptors)); + size_add(tx_descriptors, rx_descriptors))); if (!netdev) return NULL; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index b22596b18ee8c..b1919278e931f 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -630,7 +630,7 @@ static void __gtp_encap_destroy(struct sock *sk) gtp->sk0 = NULL; else gtp->sk1u = NULL; - udp_sk(sk)->encap_type = 0; + WRITE_ONCE(udp_sk(sk)->encap_type, 0); rcu_assign_sk_user_data(sk, NULL); release_sock(sk); sock_put(sk); @@ -682,7 +682,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); - switch (udp_sk(sk)->encap_type) { + switch (READ_ONCE(udp_sk(sk)->encap_type)) { case UDP_ENCAP_GTP0: netdev_dbg(gtp->dev, "received GTP0 packet\n"); ret = gtp0_udp_encap_recv(gtp, skb); diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index c0c49f1813673..21e9cac731218 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -441,12 +441,12 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) err = ip_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; goto out; err: - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); out: return ret; @@ -482,12 +482,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) err = ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; goto out; err: - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); out: return ret; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1b55928e89b8a..57c79f5f29916 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -324,6 +324,7 @@ static void ipvlan_get_stats64(struct net_device *dev, s->rx_dropped = rx_errs; s->tx_dropped = tx_drps; } + s->tx_errors = DEV_STATS_READ(dev, tx_errors); } static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index c5cd4551c67ca..9663050a852d8 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3657,9 +3657,9 @@ static void macsec_get_stats64(struct net_device *dev, dev_fetch_sw_netstats(s, dev->tstats); - s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped); - s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped); - s->rx_errors = atomic_long_read(&dev->stats.__rx_errors); + s->rx_dropped = DEV_STATS_READ(dev, rx_dropped); + s->tx_dropped = DEV_STATS_READ(dev, tx_dropped); + s->rx_errors = DEV_STATS_READ(dev, rx_errors); } static int macsec_get_iflink(const struct net_device *dev) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index afb20c0ed688d..be18d72cefcce 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2543,7 +2543,7 @@ static int rx_bottom(struct r8152 *tp, int budget) } } - if (list_empty(&tp->rx_done)) + if (list_empty(&tp->rx_done) || work_done >= budget) goto out1; clear_bit(RX_EPROTO, &tp->flags); @@ -2559,6 +2559,15 @@ static int rx_bottom(struct r8152 *tp, int budget) struct urb *urb; u8 *rx_data; + /* A bulk transfer of USB may contain may packets, so the + * total packets may more than the budget. Deal with all + * packets in current bulk transfer, and stop to handle the + * next bulk transfer until next schedule, if budget is + * exhausted. + */ + if (work_done >= budget) + break; + list_del_init(cursor); agg = list_entry(cursor, struct rx_agg, list); @@ -2578,9 +2587,7 @@ static int rx_bottom(struct r8152 *tp, int budget) unsigned int pkt_len, rx_frag_head_sz; struct sk_buff *skb; - /* limit the skb numbers for rx_queue */ - if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000)) - break; + WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000); pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) @@ -2658,9 +2665,10 @@ submit: } } + /* Splice the remained list back to rx_done for next schedule */ if (!list_empty(&rx_queue)) { spin_lock_irqsave(&tp->rx_lock, flags); - list_splice_tail(&rx_queue, &tp->rx_done); + list_splice(&rx_queue, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d67f742fbd4c5..0c0be6b872c6a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -81,24 +81,24 @@ struct virtnet_stat_desc { struct virtnet_sq_stats { struct u64_stats_sync syncp; - u64 packets; - u64 bytes; - u64 xdp_tx; - u64 xdp_tx_drops; - u64 kicks; - u64 tx_timeouts; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t xdp_tx; + u64_stats_t xdp_tx_drops; + u64_stats_t kicks; + u64_stats_t tx_timeouts; }; struct virtnet_rq_stats { struct u64_stats_sync syncp; - u64 packets; - u64 bytes; - u64 drops; - u64 xdp_packets; - u64 xdp_tx; - u64 xdp_redirects; - u64 xdp_drops; - u64 kicks; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t drops; + u64_stats_t xdp_packets; + u64_stats_t xdp_tx; + u64_stats_t xdp_redirects; + u64_stats_t xdp_drops; + u64_stats_t kicks; }; #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) @@ -775,8 +775,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) return; u64_stats_update_begin(&sq->stats.syncp); - sq->stats.bytes += bytes; - sq->stats.packets += packets; + u64_stats_add(&sq->stats.bytes, bytes); + u64_stats_add(&sq->stats.packets, packets); u64_stats_update_end(&sq->stats.syncp); } @@ -975,11 +975,11 @@ static int virtnet_xdp_xmit(struct net_device *dev, } out: u64_stats_update_begin(&sq->stats.syncp); - sq->stats.bytes += bytes; - sq->stats.packets += packets; - sq->stats.xdp_tx += n; - sq->stats.xdp_tx_drops += n - nxmit; - sq->stats.kicks += kicks; + u64_stats_add(&sq->stats.bytes, bytes); + u64_stats_add(&sq->stats.packets, packets); + u64_stats_add(&sq->stats.xdp_tx, n); + u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); + u64_stats_add(&sq->stats.kicks, kicks); u64_stats_update_end(&sq->stats.syncp); virtnet_xdp_put_sq(vi, sq); @@ -1011,14 +1011,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, u32 act; act = bpf_prog_run_xdp(xdp_prog, xdp); - stats->xdp_packets++; + u64_stats_inc(&stats->xdp_packets); switch (act) { case XDP_PASS: return act; case XDP_TX: - stats->xdp_tx++; + u64_stats_inc(&stats->xdp_tx); xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) { netdev_dbg(dev, "convert buff to frame failed for xdp\n"); @@ -1036,7 +1036,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, return act; case XDP_REDIRECT: - stats->xdp_redirects++; + u64_stats_inc(&stats->xdp_redirects); err = xdp_do_redirect(dev, xdp, xdp_prog); if (err) return XDP_DROP; @@ -1232,9 +1232,9 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, return skb; err_xdp: - stats->xdp_drops++; + u64_stats_inc(&stats->xdp_drops); err: - stats->drops++; + u64_stats_inc(&stats->drops); put_page(page); xdp_xmit: return NULL; @@ -1253,7 +1253,7 @@ static struct sk_buff *receive_small(struct net_device *dev, struct sk_buff *skb; len -= vi->hdr_len; - stats->bytes += len; + u64_stats_add(&stats->bytes, len); if (unlikely(len > GOOD_PACKET_LEN)) { pr_debug("%s: rx error: len %u exceeds max size %d\n", @@ -1282,7 +1282,7 @@ static struct sk_buff *receive_small(struct net_device *dev, return skb; err: - stats->drops++; + u64_stats_inc(&stats->drops); put_page(page); return NULL; } @@ -1298,14 +1298,14 @@ static struct sk_buff *receive_big(struct net_device *dev, struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); - stats->bytes += len - vi->hdr_len; + u64_stats_add(&stats->bytes, len - vi->hdr_len); if (unlikely(!skb)) goto err; return skb; err: - stats->drops++; + u64_stats_inc(&stats->drops); give_pages(rq, page); return NULL; } @@ -1326,7 +1326,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf, dev->stats.rx_length_errors++; break; } - stats->bytes += len; + u64_stats_add(&stats->bytes, len); page = virt_to_head_page(buf); put_page(page); } @@ -1436,7 +1436,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, goto err; } - stats->bytes += len; + u64_stats_add(&stats->bytes, len); page = virt_to_head_page(buf); offset = buf - page_address(page); @@ -1600,8 +1600,8 @@ err_xdp: put_page(page); mergeable_buf_free(rq, num_buf, dev, stats); - stats->xdp_drops++; - stats->drops++; + u64_stats_inc(&stats->xdp_drops); + u64_stats_inc(&stats->drops); return NULL; } @@ -1625,7 +1625,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); head_skb = NULL; - stats->bytes += len - vi->hdr_len; + u64_stats_add(&stats->bytes, len - vi->hdr_len); if (unlikely(len > truesize - room)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", @@ -1666,7 +1666,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } - stats->bytes += len; + u64_stats_add(&stats->bytes, len); page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -1718,7 +1718,7 @@ err_skb: mergeable_buf_free(rq, num_buf, dev, stats); err_buf: - stats->drops++; + u64_stats_inc(&stats->drops); dev_kfree_skb(head_skb); return NULL; } @@ -1985,7 +1985,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, unsigned long flags; flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); - rq->stats.kicks++; + u64_stats_inc(&rq->stats.kicks); u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); } @@ -2065,22 +2065,23 @@ static int virtnet_receive(struct receive_queue *rq, int budget, struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_rq_stats stats = {}; unsigned int len; + int packets = 0; void *buf; int i; if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; - while (stats.packets < budget && + while (packets < budget && (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); - stats.packets++; + packets++; } } else { - while (stats.packets < budget && + while (packets < budget && (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) { receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); - stats.packets++; + packets++; } } @@ -2093,17 +2094,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } } + u64_stats_set(&stats.packets, packets); u64_stats_update_begin(&rq->stats.syncp); for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { size_t offset = virtnet_rq_stats_desc[i].offset; - u64 *item; + u64_stats_t *item, *src; - item = (u64 *)((u8 *)&rq->stats + offset); - *item += *(u64 *)((u8 *)&stats + offset); + item = (u64_stats_t *)((u8 *)&rq->stats + offset); + src = (u64_stats_t *)((u8 *)&stats + offset); + u64_stats_add(item, u64_stats_read(src)); } u64_stats_update_end(&rq->stats.syncp); - return stats.packets; + return packets; } static void virtnet_poll_cleantx(struct receive_queue *rq) @@ -2158,7 +2161,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) sq = virtnet_xdp_get_sq(vi); if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); - sq->stats.kicks++; + u64_stats_inc(&sq->stats.kicks); u64_stats_update_end(&sq->stats.syncp); } virtnet_xdp_put_sq(vi, sq); @@ -2370,7 +2373,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) if (kick || netif_xmit_stopped(txq)) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); - sq->stats.kicks++; + u64_stats_inc(&sq->stats.kicks); u64_stats_update_end(&sq->stats.syncp); } } @@ -2553,16 +2556,16 @@ static void virtnet_stats(struct net_device *dev, do { start = u64_stats_fetch_begin(&sq->stats.syncp); - tpackets = sq->stats.packets; - tbytes = sq->stats.bytes; - terrors = sq->stats.tx_timeouts; + tpackets = u64_stats_read(&sq->stats.packets); + tbytes = u64_stats_read(&sq->stats.bytes); + terrors = u64_stats_read(&sq->stats.tx_timeouts); } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); do { start = u64_stats_fetch_begin(&rq->stats.syncp); - rpackets = rq->stats.packets; - rbytes = rq->stats.bytes; - rdrops = rq->stats.drops; + rpackets = u64_stats_read(&rq->stats.packets); + rbytes = u64_stats_read(&rq->stats.bytes); + rdrops = u64_stats_read(&rq->stats.drops); } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); tot->rx_packets += rpackets; @@ -2855,6 +2858,9 @@ static void virtnet_get_ringparam(struct net_device *dev, ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); } +static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, + u16 vqn, u32 max_usecs, u32 max_packets); + static int virtnet_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -2890,12 +2896,36 @@ static int virtnet_set_ringparam(struct net_device *dev, err = virtnet_tx_resize(vi, sq, ring->tx_pending); if (err) return err; + + /* Upon disabling and re-enabling a transmit virtqueue, the device must + * set the coalescing parameters of the virtqueue to those configured + * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver + * did not set any TX coalescing parameters, to 0. + */ + err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i), + vi->intr_coal_tx.max_usecs, + vi->intr_coal_tx.max_packets); + if (err) + return err; + + vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs; + vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets; } if (ring->rx_pending != rx_pending) { err = virtnet_rx_resize(vi, rq, ring->rx_pending); if (err) return err; + + /* The reason is same as the transmit virtqueue reset */ + err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i), + vi->intr_coal_rx.max_usecs, + vi->intr_coal_rx.max_packets); + if (err) + return err; + + vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs; + vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets; } } @@ -3164,17 +3194,19 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); unsigned int idx = 0, start, i, j; const u8 *stats_base; + const u64_stats_t *p; size_t offset; for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; - stats_base = (u8 *)&rq->stats; + stats_base = (const u8 *)&rq->stats; do { start = u64_stats_fetch_begin(&rq->stats.syncp); for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { offset = virtnet_rq_stats_desc[j].offset; - data[idx + j] = *(u64 *)(stats_base + offset); + p = (const u64_stats_t *)(stats_base + offset); + data[idx + j] = u64_stats_read(p); } } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); idx += VIRTNET_RQ_STATS_LEN; @@ -3183,12 +3215,13 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, for (i = 0; i < vi->curr_queue_pairs; i++) { struct send_queue *sq = &vi->sq[i]; - stats_base = (u8 *)&sq->stats; + stats_base = (const u8 *)&sq->stats; do { start = u64_stats_fetch_begin(&sq->stats.syncp); for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { offset = virtnet_sq_stats_desc[j].offset; - data[idx + j] = *(u64 *)(stats_base + offset); + p = (const u64_stats_t *)(stats_base + offset); + data[idx + j] = u64_stats_read(p); } } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); idx += VIRTNET_SQ_STATS_LEN; @@ -3233,6 +3266,7 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { struct scatterlist sgs_tx, sgs_rx; + int i; vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); @@ -3246,6 +3280,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, /* Save parameters */ vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; + for (i = 0; i < vi->max_queue_pairs; i++) { + vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; + vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; + } vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); @@ -3259,6 +3297,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, /* Save parameters */ vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; + for (i = 0; i < vi->max_queue_pairs; i++) { + vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; + vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; + } return 0; } @@ -3287,27 +3329,23 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, { int err; - if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) { - err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), - ec->rx_coalesce_usecs, - ec->rx_max_coalesced_frames); - if (err) - return err; - /* Save parameters */ - vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs; - vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames; - } + err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), + ec->rx_coalesce_usecs, + ec->rx_max_coalesced_frames); + if (err) + return err; - if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) { - err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), - ec->tx_coalesce_usecs, - ec->tx_max_coalesced_frames); - if (err) - return err; - /* Save parameters */ - vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs; - vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames; - } + vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs; + vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames; + + err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), + ec->tx_coalesce_usecs, + ec->tx_max_coalesced_frames); + if (err) + return err; + + vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs; + vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames; return 0; } @@ -3453,7 +3491,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev, } else { ec->rx_max_coalesced_frames = 1; - if (vi->sq[0].napi.weight) + if (vi->sq[queue].napi.weight) ec->tx_max_coalesced_frames = 1; } @@ -3866,7 +3904,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); u64_stats_update_begin(&sq->stats.syncp); - sq->stats.tx_timeouts++; + u64_stats_inc(&sq->stats.tx_timeouts); u64_stats_update_end(&sq->stats.syncp); netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index c071bf5841af6..b328a0599818b 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -9042,6 +9042,14 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw, if (ar->state != ATH11K_STATE_ON) goto err_fallback; + /* Firmware doesn't provide Tx power during CAC hence no need to fetch + * the stats. + */ + if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { + mutex_unlock(&ar->conf_mutex); + return -EAGAIN; + } + req_param.pdev_id = ar->pdev->pdev_id; req_param.stats_id = WMI_REQUEST_PDEV_STAT; diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index a5aa1857ec14b..09e65c5e55c4a 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -854,10 +854,16 @@ unsupported_wcn6855_soc: if (ret) goto err_pci_disable_msi; + ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); + if (ret) { + ath11k_err(ab, "failed to set irq affinity %d\n", ret); + goto err_pci_disable_msi; + } + ret = ath11k_mhi_register(ab_pci); if (ret) { ath11k_err(ab, "failed to register mhi: %d\n", ret); - goto err_pci_disable_msi; + goto err_irq_affinity_cleanup; } ret = ath11k_hal_srng_init(ab); @@ -878,12 +884,6 @@ unsupported_wcn6855_soc: goto err_ce_free; } - ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); - if (ret) { - ath11k_err(ab, "failed to set irq affinity %d\n", ret); - goto err_free_irq; - } - /* kernel may allocate a dummy vector before request_irq and * then allocate a real vector when request_irq is called. * So get msi_data here again to avoid spurious interrupt @@ -892,20 +892,17 @@ unsupported_wcn6855_soc: ret = ath11k_pci_config_msi_data(ab_pci); if (ret) { ath11k_err(ab, "failed to config msi_data: %d\n", ret); - goto err_irq_affinity_cleanup; + goto err_free_irq; } ret = ath11k_core_init(ab); if (ret) { ath11k_err(ab, "failed to init core: %d\n", ret); - goto err_irq_affinity_cleanup; + goto err_free_irq; } ath11k_qmi_fwreset_from_cold_boot(ab); return 0; -err_irq_affinity_cleanup: - ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); - err_free_irq: ath11k_pcic_free_irq(ab); @@ -918,6 +915,9 @@ err_hal_srng_deinit: err_mhi_unregister: ath11k_mhi_unregister(ab_pci); +err_irq_affinity_cleanup: + ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); + err_pci_disable_msi: ath11k_pci_free_msi(ab_pci); diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index e6e64d437c47a..17da39bd3ab4d 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -3229,7 +3229,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, goto out_unlock; } - if (frag_no > __fls(rx_tid->rx_frag_bitmap)) + if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap))) __skb_queue_tail(&rx_tid->rx_frags, msdu); else ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu); diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 8874c815d7faf..16d889fc20433 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -330,8 +330,11 @@ tcl_ring_sel: fail_unmap_dma: dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); - dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, - sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); + + if (skb_cb->paddr_ext_desc) + dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, + sizeof(struct hal_tx_msdu_ext_desc), + DMA_TO_DEVICE); fail_remove_tx_buf: ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id); diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 27f4d74a41c80..2788a1b06c17c 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -206,7 +206,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq) INIT_LIST_HEAD(&cd->head); cd->freq = freq; - cd->detectors = kmalloc_array(dpd->num_radar_types, + cd->detectors = kcalloc(dpd->num_radar_types, sizeof(*cd->detectors), GFP_ATOMIC); if (cd->detectors == NULL) goto fail; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index b9893b22e41da..42e765fe3cfe1 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -134,12 +134,10 @@ static const struct iwl_base_params iwl_bz_base_params = { .ht_params = &iwl_gl_a_ht_params /* - * If the device doesn't support HE, no need to have that many buffers. - * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an + * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an * A-MPDU, with additional overhead to account for processing time. */ -#define IWL_NUM_RBDS_NON_HE 512 -#define IWL_NUM_RBDS_BZ_HE 4096 +#define IWL_NUM_RBDS_BZ_EHT (512 * 16) const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_BZ, @@ -160,16 +158,16 @@ const struct iwl_cfg iwl_cfg_bz = { .fw_name_mac = "bz", .uhb_supported = true, IWL_DEVICE_BZ, - .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, - .num_rbds = IWL_NUM_RBDS_BZ_HE, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_BZ_EHT, }; const struct iwl_cfg iwl_cfg_gl = { .fw_name_mac = "gl", .uhb_supported = true, IWL_DEVICE_BZ, - .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, - .num_rbds = IWL_NUM_RBDS_BZ_HE, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_BZ_EHT, }; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index ad283fd22e2a2..604e9cef6baac 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -127,12 +127,10 @@ static const struct iwl_base_params iwl_sc_base_params = { .ht_params = &iwl_22000_ht_params /* - * If the device doesn't support HE, no need to have that many buffers. - * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an + * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an * A-MPDU, with additional overhead to account for processing time. */ -#define IWL_NUM_RBDS_NON_HE 512 -#define IWL_NUM_RBDS_SC_HE 4096 +#define IWL_NUM_RBDS_SC_EHT (512 * 16) const struct iwl_cfg_trans_params iwl_sc_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_SC, @@ -153,8 +151,8 @@ const struct iwl_cfg iwl_cfg_sc = { .fw_name_mac = "sc", .uhb_supported = true, IWL_DEVICE_SC, - .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, - .num_rbds = IWL_NUM_RBDS_SC_HE, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_SC_EHT, }; MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index 60a7b61d59aa3..ca1daec641c4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -3,6 +3,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2023 Intel Corporation *****************************************************************************/ #include @@ -1169,7 +1170,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) iwlagn_check_ratid_empty(priv, sta_id, tid); } - iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); + iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false); freed = 0; @@ -1315,7 +1316,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn, - &reclaimed_skbs); + &reclaimed_skbs, false); IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n", diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index ba538d70985f4..39bee9c00e071 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -13,6 +13,7 @@ #define IWL_FW_INI_DOMAIN_ALWAYS_ON 0 #define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0) #define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16) +#define IWL_FW_INI_PRESET_DISABLE 0xff /** * struct iwl_fw_ini_hcmd diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 241a9e3f2a1a7..f45f645ca6485 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -86,10 +86,7 @@ enum iwl_nvm_type { #define IWL_DEFAULT_MAX_TX_POWER 22 #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\ NETIF_F_TSO | NETIF_F_TSO6) -#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6) -#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \ - IWL_TX_CSUM_NETIF_FLAGS_BZ | \ - NETIF_F_RXCSUM) +#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM) /* Antenna presence definitions */ #define ANT_NONE 0x0 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h index 128059ca77e60..06fb7d6653905 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __iwl_dbg_tlv_h__ #define __iwl_dbg_tlv_h__ @@ -10,7 +10,8 @@ #include #include -#define IWL_DBG_TLV_MAX_PRESET 15 +#define IWL_DBG_TLV_MAX_PRESET 15 +#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1) /** * struct iwl_dbg_tlv_node - debug TLV node diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 3d87d26845e74..fb5e254757e71 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1795,6 +1795,22 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) #endif drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans); + if (iwlwifi_mod_params.enable_ini != ENABLE_INI) { + /* We have a non-default value in the module parameter, + * take its value + */ + drv->trans->dbg.domains_bitmap &= 0xffff; + if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) { + if (iwlwifi_mod_params.enable_ini > ENABLE_INI) { + IWL_ERR(trans, + "invalid enable_ini module parameter value: max = %d, using 0 instead\n", + ENABLE_INI); + iwlwifi_mod_params.enable_ini = 0; + } + drv->trans->dbg.domains_bitmap = + BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini); + } + } ret = iwl_request_firmware(drv, true); if (ret) { @@ -1843,8 +1859,6 @@ void iwl_drv_stop(struct iwl_drv *drv) kfree(drv); } -#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1) - /* shared module parameters */ struct iwl_mod_params iwlwifi_mod_params = { .fw_restart = true, @@ -1964,38 +1978,7 @@ module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); -static int enable_ini_set(const char *arg, const struct kernel_param *kp) -{ - int ret = 0; - bool res; - __u32 new_enable_ini; - - /* in case the argument type is a number */ - ret = kstrtou32(arg, 0, &new_enable_ini); - if (!ret) { - if (new_enable_ini > ENABLE_INI) { - pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini); - return -EINVAL; - } - goto out; - } - - /* in case the argument type is boolean */ - ret = kstrtobool(arg, &res); - if (ret) - return ret; - new_enable_ini = (res ? ENABLE_INI : 0); - -out: - iwlwifi_mod_params.enable_ini = new_enable_ini; - return 0; -} - -static const struct kernel_param_ops enable_ini_ops = { - .set = enable_ini_set -}; - -module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644); +module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444); MODULE_PARM_DESC(enable_ini, "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined," "Debug INI TLV FW debug infrastructure (default: 16)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 6dd381ff0f9e7..2a63968b0e55b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -348,8 +348,8 @@ #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 #define WFPM_OTP_CFG1_ADDR 0x00a03098 -#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) -#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) +#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(5) +#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(4) #define WFPM_OTP_BZ_BNJ_JACKET_BIT 5 #define WFPM_OTP_BZ_BNJ_CDB_BIT 4 #define WFPM_OTP_CFG1_IS_JACKET(_val) (((_val) & 0x00000020) >> WFPM_OTP_BZ_BNJ_JACKET_BIT) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 3b6b0e03037f1..168eda2132fb8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -56,6 +56,10 @@ * 6) Eventually, the free function will be called. */ +/* default preset 0 (start from bit 16)*/ +#define IWL_FW_DBG_DOMAIN_POS 16 +#define IWL_FW_DBG_DOMAIN BIT(IWL_FW_DBG_DOMAIN_POS) + #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ @@ -584,7 +588,7 @@ struct iwl_trans_ops { int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int queue); void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, - struct sk_buff_head *skbs); + struct sk_buff_head *skbs, bool is_flush); void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr); @@ -1269,14 +1273,15 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, } static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, - int ssn, struct sk_buff_head *skbs) + int ssn, struct sk_buff_head *skbs, + bool is_flush) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } - trans->ops->reclaim(trans, queue, ssn, skbs); + trans->ops->reclaim(trans, queue, ssn, skbs, is_flush); } static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index f6488b4bbe68b..be2602d8c5bfa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2012,6 +2012,16 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status, if (IS_ERR(key_config)) return false; ieee80211_set_key_rx_seq(key_config, 0, &seq); + + if (key_config->keyidx == 4 || key_config->keyidx == 5) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int link_id = vif->active_links ? __ffs(vif->active_links) : 0; + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_id]; + + mvm_link->igtk = key_config; + } + return true; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index b49781d1a07a7..10b9219b3bfd3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include #include @@ -302,7 +302,12 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm, struct iwl_mvm_pasn_sta *sta) { list_del(&sta->list); - iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id); + + if (iwl_mvm_has_mld_api(mvm->fw)) + iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id); + else + iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id); + iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta); kfree(sta); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index ace82e2c5bd91..6e1ad65527d12 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -53,7 +53,6 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, unsigned int link_id = link_conf->link_id; struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; struct iwl_link_config_cmd cmd = {}; - struct iwl_mvm_phy_ctxt *phyctxt; if (WARN_ON_ONCE(!link_info)) return -EINVAL; @@ -77,12 +76,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd.link_id = cpu_to_le32(link_info->fw_link_id); cmd.mac_id = cpu_to_le32(mvmvif->id); cmd.spec_link_id = link_conf->link_id; - /* P2P-Device already has a valid PHY context during add */ - phyctxt = link_info->phy_ctxt; - if (phyctxt) - cmd.phy_id = cpu_to_le32(phyctxt->id); - else - cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); + WARN_ON_ONCE(link_info->phy_ctxt); + cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN); @@ -194,11 +189,14 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, flags_mask |= LINK_FLG_MU_EDCA_CW; } - if (link_conf->eht_puncturing && !iwlwifi_mod_params.disable_11be) - cmd.puncture_mask = cpu_to_le16(link_conf->eht_puncturing); - else - /* This flag can be set only if the MAC has eht support */ - changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS; + if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) { + if (iwlwifi_mod_params.disable_11be || + !link_conf->eht_support) + changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS; + else + cmd.puncture_mask = + cpu_to_le16(link_conf->eht_puncturing); + } cmd.bss_color = link_conf->he_bss_color.color; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 7369a45f7f2bd..9c97691e60384 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -286,6 +286,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) INIT_LIST_HEAD(&mvmvif->time_event_data.list); mvmvif->time_event_data.id = TE_MAX; + mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; + /* No need to allocate data queues to P2P Device MAC and NAN.*/ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) return 0; @@ -300,10 +304,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } - mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; - for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5918c1f2b10c3..a25ea638229b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1589,32 +1589,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI; } - /* - * P2P_DEVICE interface does not have a channel context assigned to it, - * so a dedicated PHY context is allocated to it and the corresponding - * MAC context is bound to it at this stage. - */ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - - mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!mvmvif->deflink.phy_ctxt) { - ret = -ENOSPC; - goto out_free_bf; - } - - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (ret) - goto out_unref_phy; - - ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); - if (ret) - goto out_unbind; - - /* Save a pointer to p2p device vif, so it can later be used to - * update the p2p device MAC when a GO is started/stopped */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_device_vif = vif; - } iwl_mvm_tcm_add_vif(mvm, vif); INIT_DELAYED_WORK(&mvmvif->csa_work, @@ -1643,11 +1619,6 @@ out: goto out_unlock; - out_unbind: - iwl_mvm_binding_remove_vif(mvm, vif); - out_unref_phy: - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | @@ -1744,12 +1715,17 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, if (iwl_mvm_mac_remove_interface_common(hw, vif)) goto out; + /* Before the interface removal, mac80211 would cancel the ROC, and the + * ROC worker would be scheduled if needed. The worker would be flushed + * in iwl_mvm_prepare_mac_removal() and thus at this point there is no + * binding etc. so nothing needs to be done here. + */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + if (mvmvif->deflink.phy_ctxt) { + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + mvmvif->deflink.phy_ctxt = NULL; + } mvm->p2p_device_vif = NULL; - iwl_mvm_rm_p2p_bcast_sta(mvm, vif); - iwl_mvm_binding_remove_vif(mvm, vif); - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - mvmvif->deflink.phy_ctxt = NULL; } iwl_mvm_mac_ctxt_remove(mvm, vif); @@ -3791,6 +3767,12 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); + /* MFP is set by default before the station is authorized. + * Clear it here in case it's not used. + */ + if (!sta->mfp) + return callbacks->update_sta(mvm, vif, sta); + return 0; } @@ -4531,30 +4513,20 @@ static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id) return ret; } -static int iwl_mvm_roc_switch_binding(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_phy_ctxt *new_phy_ctxt) +static int iwl_mvm_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret = 0; + int ret; lockdep_assert_held(&mvm->mutex); - /* Unbind the P2P_DEVICE from the current PHY context, - * and if the PHY context is not used remove it. - */ - ret = iwl_mvm_binding_remove_vif(mvm, vif); - if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (WARN(ret, "Failed binding P2P_DEVICE\n")) return ret; - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - - /* Bind the P2P_DEVICE to the current PHY Context */ - mvmvif->deflink.phy_ctxt = new_phy_ctxt; - - ret = iwl_mvm_binding_add_vif(mvm, vif); - WARN(ret, "Failed binding P2P_DEVICE\n"); - return ret; + /* The station and queue allocation must be done only after the binding + * is done, as otherwise the FW might incorrectly configure its state. + */ + return iwl_mvm_add_p2p_bcast_sta(mvm, vif); } static int iwl_mvm_roc(struct ieee80211_hw *hw, @@ -4565,7 +4537,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, { static const struct iwl_mvm_roc_ops ops = { .add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20, - .switch_phy_ctxt = iwl_mvm_roc_switch_binding, + .link = iwl_mvm_roc_link, }; return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); @@ -4581,7 +4553,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct cfg80211_chan_def chandef; struct iwl_mvm_phy_ctxt *phy_ctxt; - bool band_change_removal; int ret, i; u32 lmac_id; @@ -4610,82 +4581,61 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, /* handle below */ break; default: - IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); + IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type); ret = -EINVAL; goto out_unlock; } + /* Try using a PHY context that is already in use */ for (i = 0; i < NUM_PHY_CTX; i++) { phy_ctxt = &mvm->phy_ctxts[i]; - if (phy_ctxt->ref == 0 || mvmvif->deflink.phy_ctxt == phy_ctxt) + if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt) continue; - if (phy_ctxt->ref && channel == phy_ctxt->channel) { - ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt); - if (ret) - goto out_unlock; + if (channel == phy_ctxt->channel) { + if (mvmvif->deflink.phy_ctxt) + iwl_mvm_phy_ctxt_unref(mvm, + mvmvif->deflink.phy_ctxt); + mvmvif->deflink.phy_ctxt = phy_ctxt; iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); - goto schedule_time_event; + goto link_and_start_p2p_roc; } } - /* Need to update the PHY context only if the ROC channel changed */ - if (channel == mvmvif->deflink.phy_ctxt->channel) - goto schedule_time_event; - - cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); - - /* - * Check if the remain-on-channel is on a different band and that - * requires context removal, see iwl_mvm_phy_ctxt_changed(). If - * so, we'll need to release and then re-configure here, since we - * must not remove a PHY context that's part of a binding. + /* If the currently used PHY context is configured with a matching + * channel use it */ - band_change_removal = - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && - mvmvif->deflink.phy_ctxt->channel->band != chandef.chan->band; - - if (mvmvif->deflink.phy_ctxt->ref == 1 && !band_change_removal) { - /* - * Change the PHY context configuration as it is currently - * referenced only by the P2P Device MAC (and we can modify it) - */ - ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->deflink.phy_ctxt, - &chandef, 1, 1); - if (ret) - goto out_unlock; + if (mvmvif->deflink.phy_ctxt) { + if (channel == mvmvif->deflink.phy_ctxt->channel) + goto link_and_start_p2p_roc; } else { - /* - * The PHY context is shared with other MACs (or we're trying to - * switch bands), so remove the P2P Device from the binding, - * allocate an new PHY context and create a new binding. - */ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out_unlock; } - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, - 1, 1); - if (ret) { - IWL_ERR(mvm, "Failed to change PHY context\n"); - goto out_unlock; - } + mvmvif->deflink.phy_ctxt = phy_ctxt; + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); + } - ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt); - if (ret) - goto out_unlock; + /* Configure the PHY context */ + cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, + 1, 1); + if (ret) { + IWL_ERR(mvm, "Failed to change PHY context\n"); + goto out_unlock; } -schedule_time_event: - /* Schedule the time events */ - ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); +link_and_start_p2p_roc: + ret = ops->link(mvm, vif); + if (ret) + goto out_unlock; + ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); out_unlock: mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); @@ -5629,7 +5579,8 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } if (drop) { - if (iwl_mvm_flush_sta(mvm, mvmsta, false)) + if (iwl_mvm_flush_sta(mvm, mvmsta->deflink.sta_id, + mvmsta->tfd_queue_msk)) IWL_ERR(mvm, "flush request fail\n"); } else { if (iwl_mvm_has_new_tx_api(mvm)) @@ -5651,22 +5602,21 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int i; + struct iwl_mvm_link_sta *mvm_link_sta; + struct ieee80211_link_sta *link_sta; + int link_id; mutex_lock(&mvm->mutex); - for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { - struct iwl_mvm_sta *mvmsta; - struct ieee80211_sta *tmp; - - tmp = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (tmp != sta) + for_each_sta_active_link(vif, sta, link_sta, link_id) { + mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id], + lockdep_is_held(&mvm->mutex)); + if (!mvm_link_sta) continue; - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - if (iwl_mvm_flush_sta(mvm, mvmsta, false)) + if (iwl_mvm_flush_sta(mvm, mvm_link_sta->sta_id, + mvmsta->tfd_queue_msk)) IWL_ERR(mvm, "flush request fail\n"); } mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c index 2c9f2f71b083a..ea3e9e9c6e26c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c @@ -24,10 +24,15 @@ static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm, return 0; } - /* AP group keys are per link and should be on the mcast STA */ + /* AP group keys are per link and should be on the mcast/bcast STA */ if (vif->type == NL80211_IFTYPE_AP && - !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + /* IGTK/BIGTK to bcast STA */ + if (keyconf->keyidx >= 4) + return BIT(link_info->bcast_sta.sta_id); + /* GTK for data to mcast STA */ return BIT(link_info->mcast_sta.sta_id); + } /* for client mode use the AP STA also for group keys */ if (!sta && vif->type == NL80211_IFTYPE_STATION) @@ -91,7 +96,12 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm, if (!sta && vif->type == NL80211_IFTYPE_STATION) sta = mvmvif->ap_sta; - if (!IS_ERR_OR_NULL(sta) && sta->mfp) + /* Set the MFP flag also for an AP interface where the key is an IGTK + * key as in such a case the station would always be NULL + */ + if ((!IS_ERR_OR_NULL(sta) && sta->mfp) || + (vif->type == NL80211_IFTYPE_AP && + (keyconf->keyidx == 4 || keyconf->keyidx == 5))) flags |= IWL_SEC_KEY_FLAG_MFP; return flags; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index b719843e94576..2ddb6f763a0b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -56,43 +56,15 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI; } - /* - * P2P_DEVICE interface does not have a channel context assigned to it, - * so a dedicated PHY context is allocated to it and the corresponding - * MAC context is bound to it at this stage. - */ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!mvmvif->deflink.phy_ctxt) { - ret = -ENOSPC; - goto out_free_bf; - } - - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); - ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); - if (ret) - goto out_unref_phy; - - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, - LINK_CONTEXT_MODIFY_ACTIVE | - LINK_CONTEXT_MODIFY_RATES_INFO, - true); - if (ret) - goto out_remove_link; - - ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf); - if (ret) - goto out_remove_link; + ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (ret) + goto out_free_bf; - /* Save a pointer to p2p device vif, so it can later be used to - * update the p2p device MAC when a GO is started/stopped - */ + /* Save a pointer to p2p device vif, so it can later be used to + * update the p2p device MAC when a GO is started/stopped + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_device_vif = vif; - } else { - ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); - if (ret) - goto out_free_bf; - } ret = iwl_mvm_power_update_mac(mvm); if (ret) @@ -119,10 +91,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, goto out_unlock; - out_remove_link: - iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); - out_unref_phy: - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; @@ -130,7 +98,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI); } out_remove_mac: - mvmvif->deflink.phy_ctxt = NULL; mvmvif->link[0] = NULL; iwl_mvm_mld_mac_ctxt_remove(mvm, vif); out_unlock: @@ -185,14 +152,18 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_power_update_mac(mvm); + /* Before the interface removal, mac80211 would cancel the ROC, and the + * ROC worker would be scheduled if needed. The worker would be flushed + * in iwl_mvm_prepare_mac_removal() and thus at this point the link is + * not active. So need only to remove the link. + */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + if (mvmvif->deflink.phy_ctxt) { + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + mvmvif->deflink.phy_ctxt = NULL; + } mvm->p2p_device_vif = NULL; - - /* P2P device uses only one link */ - iwl_mvm_mld_rm_bcast_sta(mvm, vif, &vif->bss_conf); - iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - mvmvif->deflink.phy_ctxt = NULL; + iwl_mvm_remove_link(mvm, vif, &vif->bss_conf); } else { iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); } @@ -653,7 +624,7 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, } /* Update EHT Puncturing info */ - if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc && has_eht) + if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc) link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS; if (link_changes) { @@ -968,36 +939,29 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw, return 0; } -static int iwl_mvm_link_switch_phy_ctx(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_phy_ctxt *new_phy_ctxt) +static int iwl_mvm_mld_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret = 0; + int ret; lockdep_assert_held(&mvm->mutex); - /* Inorder to change the phy_ctx of a link, the link needs to be - * inactive. Therefore, first deactivate the link, then change its - * phy_ctx, and then activate it again. - */ - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, - LINK_CONTEXT_MODIFY_ACTIVE, false); - if (WARN(ret, "Failed to deactivate link\n")) + /* The PHY context ID might have changed so need to set it */ + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false); + if (WARN(ret, "Failed to set PHY context ID\n")) return ret; - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - - mvmvif->deflink.phy_ctxt = new_phy_ctxt; + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE | + LINK_CONTEXT_MODIFY_RATES_INFO, + true); - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false); - if (WARN(ret, "Failed to deactivate link\n")) + if (WARN(ret, "Failed linking P2P_DEVICE\n")) return ret; - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, - LINK_CONTEXT_MODIFY_ACTIVE, true); - WARN(ret, "Failed binding P2P_DEVICE\n"); - return ret; + /* The station and queue allocation must be done only after the linking + * is done, as otherwise the FW might incorrectly configure its state. + */ + return iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf); } static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1006,7 +970,7 @@ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { static const struct iwl_mvm_roc_ops ops = { .add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta, - .switch_phy_ctxt = iwl_mvm_link_switch_phy_ctx, + .link = iwl_mvm_mld_roc_link, }; return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); @@ -1089,9 +1053,6 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, } } - if (err) - goto out_err; - err = 0; if (new_links == 0) { mvmvif->link[0] = &mvmvif->deflink; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 524852cf5cd2d..56f51344c193c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -347,7 +347,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm, return -EINVAL; if (flush) - iwl_mvm_flush_sta(mvm, int_sta, true); + iwl_mvm_flush_sta(mvm, int_sta->sta_id, int_sta->tfd_queue_msk); iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b18c91c5dd5d1..218f3bc31104b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1658,7 +1658,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status); static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } #endif int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk); -int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal); +int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask); int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids); /* Utils to extract sta related data */ @@ -1942,13 +1942,12 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, * * @add_aux_sta_for_hs20: pointer to the function that adds an aux sta * for Hot Spot 2.0 - * @switch_phy_ctxt: pointer to the function that switches a vif from one - * phy_ctx to another + * @link: For a P2P Device interface, pointer to a function that links the + * MAC/Link to the PHY context */ struct iwl_mvm_roc_ops { int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id); - int (*switch_phy_ctxt)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_phy_ctxt *new_phy_ctxt); + int (*link)(struct iwl_mvm *mvm, struct ieee80211_vif *vif); }; int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 3b9a343d4f672..2c231f4623893 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2059,7 +2059,8 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, *status = IWL_MVM_QUEUE_FREE; } - if (vif->type == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION && + mvm_link->ap_sta_id == sta_id) { /* if associated - we can't remove the AP STA now */ if (vif->cfg.assoc) return true; @@ -2097,7 +2098,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, return ret; /* flush its queues here since we are freeing mvm_sta */ - ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); + ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id, + mvm_sta->tfd_queue_msk); if (ret) return ret; if (iwl_mvm_has_new_tx_api(mvm)) { @@ -2408,7 +2410,8 @@ void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true); + iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id, + mvmvif->deflink.bcast_sta.tfd_queue_msk); switch (vif->type) { case NL80211_IFTYPE_AP: @@ -2664,7 +2667,8 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true); + iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id, + mvmvif->deflink.mcast_sta.tfd_queue_msk); iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id, &mvmvif->deflink.cab_queue, 0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 5f0e7144a951c..158266719ffd7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -78,9 +78,29 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) */ if (!WARN_ON(!mvm->p2p_device_vif)) { - mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); - iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, - true); + struct ieee80211_vif *vif = mvm->p2p_device_vif; + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id, + mvmvif->deflink.bcast_sta.tfd_queue_msk); + + if (mvm->mld_api_is_used) { + iwl_mvm_mld_rm_bcast_sta(mvm, vif, + &vif->bss_conf); + + iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE, + false); + } else { + iwl_mvm_rm_p2p_bcast_sta(mvm, vif); + iwl_mvm_binding_remove_vif(mvm, vif); + } + + /* Do not remove the PHY context as removing and adding + * a PHY context has timing overheads. Leaving it + * configured in FW would be useful in case the next ROC + * is with the same channel. + */ } } @@ -93,7 +113,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) */ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { /* do the same in case of hot spot 2.0 */ - iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); + iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id, + mvm->aux_sta.tfd_queue_msk); if (mvm->mld_api_is_used) { iwl_mvm_mld_rm_aux_sta(mvm); @@ -880,8 +901,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) { /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; - ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_p2p_roc_finished(mvm); + ieee80211_remain_on_channel_expired(mvm->hw); } else if (le32_to_cpu(notif->start)) { if (WARN_ON(mvmvif->time_event_data.id != le32_to_cpu(notif->conf_id))) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 898dca3936435..2ede69132fee9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1599,7 +1599,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, seq_ctl = le16_to_cpu(tx_resp->seq_ctl); /* we can free until ssn % q.n_bd not inclusive */ - iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); + iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false); while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); @@ -1951,7 +1951,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ - iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); + iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush); skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -2293,24 +2293,10 @@ free_rsp: return ret; } -int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal) +int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask) { - u32 sta_id, tfd_queue_msk; - - if (internal) { - struct iwl_mvm_int_sta *int_sta = sta; - - sta_id = int_sta->sta_id; - tfd_queue_msk = int_sta->tfd_queue_msk; - } else { - struct iwl_mvm_sta *mvm_sta = sta; - - sta_id = mvm_sta->deflink.sta_id; - tfd_queue_msk = mvm_sta->tfd_queue_msk; - } - if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff); - return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk); + return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index fa46dad5fd680..2ecf6db95fb31 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -161,6 +161,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_synchronize_irqs(trans); iwl_pcie_rx_napi_sync(trans); iwl_txq_gen2_tx_free(trans); iwl_pcie_rx_stop(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 198933f853c55..583d1011963ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1263,6 +1263,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_synchronize_irqs(trans); iwl_pcie_rx_napi_sync(trans); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index 340240b8954f6..ca74b1b63cac1 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -1575,7 +1575,7 @@ void iwl_txq_progress(struct iwl_txq *txq) /* Frees buffers until index _not_ inclusive */ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs) + struct sk_buff_head *skbs, bool is_flush) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; int tfd_num, read_ptr, last_to_free; @@ -1650,9 +1650,11 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (iwl_txq_space(trans, txq) > txq->low_mark && test_bit(txq_id, trans->txqs.queue_stopped)) { struct sk_buff_head overflow_skbs; + struct sk_buff *skb; __skb_queue_head_init(&overflow_skbs); - skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); + skb_queue_splice_init(&txq->overflow_q, + is_flush ? skbs : &overflow_skbs); /* * We are going to transmit from the overflow queue. @@ -1672,8 +1674,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, */ spin_unlock_bh(&txq->lock); - while (!skb_queue_empty(&overflow_skbs)) { - struct sk_buff *skb = __skb_dequeue(&overflow_skbs); + while ((skb = __skb_dequeue(&overflow_skbs))) { struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h index b7d3808588bfb..4c09bc1930fa1 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h @@ -179,7 +179,7 @@ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs); void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs); + struct sk_buff_head *skbs, bool is_flush); void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze); diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index dc8f4e157eb29..6ca7b494c2c26 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -330,9 +330,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, if (e->txwi == DMA_DUMMY_DATA) e->txwi = NULL; - if (e->skb == DMA_DUMMY_DATA) - e->skb = NULL; - *prev_e = *e; memset(e, 0, sizeof(*e)); } diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index d158320bc15db..dbab400969202 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -1697,11 +1697,16 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, } EXPORT_SYMBOL_GPL(mt76_init_queue); -u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx) +u16 mt76_calculate_default_rate(struct mt76_phy *phy, + struct ieee80211_vif *vif, int rateidx) { + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = mvif->ctx ? + &mvif->ctx->def : + &phy->chandef; int offset = 0; - if (phy->chandef.chan->band != NL80211_BAND_2GHZ) + if (chandef->chan->band != NL80211_BAND_2GHZ) offset = 4; /* pick the lowest rate for hidden nodes */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index e8757865a3d06..dae5410d67e83 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -709,6 +709,7 @@ struct mt76_vif { u8 basic_rates_idx; u8 mcast_rates_idx; u8 beacon_rates_idx; + struct ieee80211_chanctx_conf *ctx; }; struct mt76_phy { @@ -1100,7 +1101,8 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len); struct mt76_queue * mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, int ring_base, u32 flags); -u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx); +u16 mt76_calculate_default_rate(struct mt76_phy *phy, + struct ieee80211_vif *vif, int rateidx); static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, int n_desc, int ring_base, u32 flags) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c index 888678732f290..c223f7c19e6da 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c @@ -9,6 +9,23 @@ struct beacon_bc_data { int count[MT7603_MAX_INTERFACES]; }; +static void +mt7603_mac_stuck_beacon_recovery(struct mt7603_dev *dev) +{ + if (dev->beacon_check % 5 != 4) + return; + + mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN); + mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET); + mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET); + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN); + + mt76_set(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS); + mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE); + mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE); + mt76_clear(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS); +} + static void mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { @@ -16,6 +33,8 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) struct mt76_dev *mdev = &dev->mt76; struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; struct sk_buff *skb = NULL; + u32 om_idx = mvif->idx; + u32 val; if (!(mdev->beacon_mask & BIT(mvif->idx))) return; @@ -24,20 +43,33 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) if (!skb) return; - mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], - MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL); + if (om_idx) + om_idx |= 0x10; + val = MT_DMA_FQCR0_BUSY | MT_DMA_FQCR0_MODE | + FIELD_PREP(MT_DMA_FQCR0_TARGET_BSS, om_idx) | + FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) | + FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8); spin_lock_bh(&dev->ps_lock); - mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | - FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) | - FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, - dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) | - FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) | - FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8)); - if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) + mt76_wr(dev, MT_DMA_FQCR0, val | + FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BCN)); + if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) { dev->beacon_check = MT7603_WATCHDOG_TIMEOUT; + goto out; + } + + mt76_wr(dev, MT_DMA_FQCR0, val | + FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BMC)); + if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) { + dev->beacon_check = MT7603_WATCHDOG_TIMEOUT; + goto out; + } + mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], + MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL); + +out: spin_unlock_bh(&dev->ps_lock); } @@ -81,6 +113,18 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) data.dev = dev; __skb_queue_head_init(&data.q); + /* Flush all previous CAB queue packets and beacons */ + mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0)); + + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false); + + if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > 0) + dev->beacon_check++; + else + dev->beacon_check = 0; + mt7603_mac_stuck_beacon_recovery(dev); + q = dev->mphy.q_tx[MT_TXQ_BEACON]; spin_lock(&q->lock); ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), @@ -89,14 +133,9 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) mt76_queue_kick(dev, q); spin_unlock(&q->lock); - /* Flush all previous CAB queue packets */ - mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0)); - - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false); - mt76_csa_check(mdev); if (mdev->csa_complete) - goto out; + return; q = dev->mphy.q_tx[MT_TXQ_CAB]; do { @@ -108,7 +147,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) skb_queue_len(&data.q) < 8); if (skb_queue_empty(&data.q)) - goto out; + return; for (i = 0; i < ARRAY_SIZE(data.tail); i++) { if (!data.tail[i]) @@ -136,11 +175,6 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) MT_WF_ARB_CAB_START_BSSn(0) | (MT_WF_ARB_CAB_START_BSS0n(1) * ((1 << (MT7603_MAX_INTERFACES - 1)) - 1))); - -out: - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false); - if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask)) - dev->beacon_check++; } void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval) diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c index 60a996b63c0c0..915b8349146af 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c @@ -42,11 +42,13 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance) } if (intr & MT_INT_RX_DONE(0)) { + dev->rx_pse_check = 0; mt7603_irq_disable(dev, MT_INT_RX_DONE(0)); napi_schedule(&dev->mt76.napi[0]); } if (intr & MT_INT_RX_DONE(1)) { + dev->rx_pse_check = 0; mt7603_irq_disable(dev, MT_INT_RX_DONE(1)); napi_schedule(&dev->mt76.napi[1]); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 99ae080502d80..cf21d06257e53 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -1441,15 +1441,6 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) mt7603_beacon_set_timer(dev, -1, 0); - if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || - dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY || - dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK || - dev->cur_reset_cause == RESET_CAUSE_TX_HANG) - mt7603_pse_reset(dev); - - if (dev->reset_cause[RESET_CAUSE_RESET_FAILED]) - goto skip_dma_reset; - mt7603_mac_stop(dev); mt76_clear(dev, MT_WPDMA_GLO_CFG, @@ -1459,28 +1450,32 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) mt7603_irq_disable(dev, mask); - mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); - mt7603_pse_client_reset(dev); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); for (i = 0; i < __MT_TXQ_MAX; i++) mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); + mt7603_dma_sched_reset(dev); + + mt76_tx_status_check(&dev->mt76, true); + mt76_for_each_q_rx(&dev->mt76, i) { mt76_queue_rx_reset(dev, i); } - mt76_tx_status_check(&dev->mt76, true); + if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || + dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY) + mt7603_pse_reset(dev); - mt7603_dma_sched_reset(dev); + if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) { + mt7603_mac_dma_start(dev); - mt7603_mac_dma_start(dev); + mt7603_irq_enable(dev, mask); - mt7603_irq_enable(dev, mask); + clear_bit(MT76_RESET, &dev->mphy.state); + } -skip_dma_reset: - clear_bit(MT76_RESET, &dev->mphy.state); mutex_unlock(&dev->mt76.mutex); mt76_worker_enable(&dev->mt76.tx_worker); @@ -1570,20 +1565,29 @@ static bool mt7603_rx_pse_busy(struct mt7603_dev *dev) { u32 addr, val; - if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES) - return true; - if (mt7603_rx_fifo_busy(dev)) - return false; + goto out; addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS); mt76_wr(dev, addr, 3); val = mt76_rr(dev, addr) >> 16; - if (is_mt7628(dev) && (val & 0x4001) == 0x4001) - return true; + if (!(val & BIT(0))) + return false; - return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001; + if (is_mt7628(dev)) + val &= 0xa000; + else + val &= 0x8000; + if (!val) + return false; + +out: + if (mt76_rr(dev, MT_INT_SOURCE_CSR) & + (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1))) + return false; + + return true; } static bool diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h index a39c9a0fcb1cb..524bceb8e9581 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h @@ -469,6 +469,11 @@ enum { #define MT_WF_SEC_BASE 0x21a00 #define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs)) +#define MT_WF_CFG_OFF_BASE 0x21e00 +#define MT_WF_CFG_OFF(ofs) (MT_WF_CFG_OFF_BASE + (ofs)) +#define MT_WF_CFG_OFF_WOCCR MT_WF_CFG_OFF(0x004) +#define MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS BIT(4) + #define MT_SEC_SCR MT_WF_SEC(0x004) #define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 8d745c9730c72..955974a82180f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -2147,7 +2147,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) }; if (cmd == MCU_EXT_CMD(SET_RX_PATH) || - dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) + phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c index 0019890fdb784..fbb1181c58ff3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -106,7 +106,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, else mt76_connac_write_hw_txp(mdev, tx_info, txp, id); - tx_info->skb = DMA_DUMMY_DATA; + tx_info->skb = NULL; return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h index 68ca0844cbbfa..87bfa441a9374 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h @@ -257,6 +257,8 @@ enum tx_mgnt_type { #define MT_TXD7_UDP_TCP_SUM BIT(15) #define MT_TXD7_TX_TIME GENMASK(9, 0) +#define MT_TXD9_WLAN_IDX GENMASK(23, 8) + #define MT_TX_RATE_STBC BIT(14) #define MT_TX_RATE_NSS GENMASK(13, 10) #define MT_TX_RATE_MODE GENMASK(9, 6) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c index ee5177fd6ddea..87479c6c2b505 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c @@ -151,23 +151,6 @@ void mt76_connac_tx_complete_skb(struct mt76_dev *mdev, return; } - /* error path */ - if (e->skb == DMA_DUMMY_DATA) { - struct mt76_connac_txp_common *txp; - struct mt76_txwi_cache *t; - u16 token; - - txp = mt76_connac_txwi_to_txp(mdev, e->txwi); - if (is_mt76_fw_txp(mdev)) - token = le16_to_cpu(txp->fw.token); - else - token = le16_to_cpu(txp->hw.msdu_id[0]) & - ~MT_MSDU_ID_VALID; - - t = mt76_token_put(mdev, token); - e->skb = t ? t->skb : NULL; - } - if (e->skb) mt76_tx_complete_skb(mdev, e->wcid, e->skb); } @@ -310,7 +293,10 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif, bool beacon, bool mcast) { - u8 nss = 0, mode = 0, band = mphy->chandef.chan->band; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = mvif->ctx ? + &mvif->ctx->def : &mphy->chandef; + u8 nss = 0, mode = 0, band = chandef->chan->band; int rateidx = 0, mcast_rate; if (!vif) @@ -343,7 +329,7 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy, rateidx = ffs(vif->bss_conf.basic_rates) - 1; legacy: - rateidx = mt76_calculate_default_rate(mphy, rateidx); + rateidx = mt76_calculate_default_rate(mphy, vif, rateidx); mode = rateidx >> 8; rateidx &= GENMASK(7, 0); out: diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 0f0a519f956f8..8274a57e1f0fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -829,7 +829,9 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, struct ieee80211_vif *vif, u8 rcpi, u8 sta_state) { - struct cfg80211_chan_def *chandef = &mphy->chandef; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = mvif->ctx ? + &mvif->ctx->def : &mphy->chandef; enum nl80211_band band = chandef->chan->band; struct mt76_dev *dev = mphy->dev; struct sta_rec_ra_info *ra_info; @@ -1369,7 +1371,10 @@ EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext); const struct ieee80211_sta_he_cap * mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif) { - enum nl80211_band band = phy->chandef.chan->band; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = mvif->ctx ? + &mvif->ctx->def : &phy->chandef; + enum nl80211_band band = chandef->chan->band; struct ieee80211_supported_band *sband; sband = phy->hw->wiphy->bands[band]; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index b8b0c0fda7522..2222fb9aa103e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -809,7 +809,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, txp->rept_wds_wcid = cpu_to_le16(wcid->idx); else txp->rept_wds_wcid = cpu_to_le16(0x3ff); - tx_info->skb = DMA_DUMMY_DATA; + tx_info->skb = NULL; /* pass partial skb header to fw */ tx_info->buf[1].len = MT_CT_PARSE_LEN; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 8ebbf186fab23..d06c25dda325e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -646,11 +646,13 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, mt7915_update_bss_color(hw, vif, &info->he_bss_color); if (changed & (BSS_CHANGED_BEACON | - BSS_CHANGED_BEACON_ENABLED | - BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | - BSS_CHANGED_FILS_DISCOVERY)) + BSS_CHANGED_BEACON_ENABLED)) mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed); + if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | + BSS_CHANGED_FILS_DISCOVERY)) + mt7915_mcu_add_inband_discov(dev, vif, changed); + if (set_bss_info == 0) mt7915_mcu_add_bss_info(phy, vif, false); if (set_sta == 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 50ae7bf3af91c..5d8e985cd7d45 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -1015,13 +1015,13 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool bfee) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - int tx_ant = hweight8(phy->mt76->chainmask) - 1; + int sts = hweight16(phy->mt76->chainmask); if (vif->type != NL80211_IFTYPE_STATION && vif->type != NL80211_IFTYPE_AP) return false; - if (!bfee && tx_ant < 2) + if (!bfee && sts < 2) return false; if (sta->deflink.he_cap.has_he) { @@ -1882,10 +1882,9 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif, memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); } -static void -mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct sk_buff *rskb, struct bss_info_bcn *bcn, - u32 changed) +int +mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, + u32 changed) { #define OFFLOAD_TX_MODE_SU BIT(0) #define OFFLOAD_TX_MODE_MU BIT(1) @@ -1895,14 +1894,27 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; enum nl80211_band band = chandef->chan->band; struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct bss_info_bcn *bcn; struct bss_info_inband_discovery *discov; struct ieee80211_tx_info *info; - struct sk_buff *skb = NULL; - struct tlv *tlv; + struct sk_buff *rskb, *skb = NULL; + struct tlv *tlv, *sub_tlv; bool ext_phy = phy != &dev->phy; u8 *buf, interval; int len; + if (vif->bss_conf.nontransmitted) + return 0; + + rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL, + MT7915_MAX_BSS_OFFLOAD_SIZE); + if (IS_ERR(rskb)) + return PTR_ERR(rskb); + + tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); + bcn = (struct bss_info_bcn *)tlv; + bcn->enable = true; + if (changed & BSS_CHANGED_FILS_DISCOVERY && vif->bss_conf.fils_discovery.max_interval) { interval = vif->bss_conf.fils_discovery.max_interval; @@ -1913,27 +1925,29 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif); } - if (!skb) - return; + if (!skb) { + dev_kfree_skb(rskb); + return -EINVAL; + } info = IEEE80211_SKB_CB(skb); info->control.vif = vif; info->band = band; - info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy); len = sizeof(*discov) + MT_TXD_SIZE + skb->len; len = (len & 0x3) ? ((len | 0x3) + 1) : len; - if (len > (MT7915_MAX_BSS_OFFLOAD_SIZE - rskb->len)) { + if (skb->len > MT7915_MAX_BEACON_SIZE) { dev_err(dev->mt76.dev, "inband discovery size limit exceed\n"); + dev_kfree_skb(rskb); dev_kfree_skb(skb); - return; + return -EINVAL; } - tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV, - len, &bcn->sub_ntlv, &bcn->len); - discov = (struct bss_info_inband_discovery *)tlv; + sub_tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV, + len, &bcn->sub_ntlv, &bcn->len); + discov = (struct bss_info_inband_discovery *)sub_tlv; discov->tx_mode = OFFLOAD_TX_MODE_SU; /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */ discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY); @@ -1941,13 +1955,16 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len); discov->enable = true; - buf = (u8 *)tlv + sizeof(*discov); + buf = (u8 *)sub_tlv + sizeof(*discov); mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL, 0, changed); memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); dev_kfree_skb(skb); + + return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, + MCU_EXT_CMD(BSS_INFO_UPDATE), true); } int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1980,11 +1997,14 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto out; skb = ieee80211_beacon_get_template(hw, vif, &offs, 0); - if (!skb) + if (!skb) { + dev_kfree_skb(rskb); return -EINVAL; + } - if (skb->len > MT7915_MAX_BEACON_SIZE - MT_TXD_SIZE) { + if (skb->len > MT7915_MAX_BEACON_SIZE) { dev_err(dev->mt76.dev, "Bcn size limit exceed\n"); + dev_kfree_skb(rskb); dev_kfree_skb(skb); return -EINVAL; } @@ -1997,11 +2017,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs); dev_kfree_skb(skb); - if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP || - changed & BSS_CHANGED_FILS_DISCOVERY) - mt7915_mcu_beacon_inband_discov(dev, vif, rskb, - bcn, changed); - out: return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, MCU_EXT_CMD(BSS_INFO_UPDATE), true); @@ -2725,10 +2740,10 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) if (mt76_connac_spe_idx(phy->mt76->antenna_mask)) req.tx_path_num = fls(phy->mt76->antenna_mask); - if (cmd == MCU_EXT_CMD(SET_RX_PATH) || - dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) + if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; - else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL || + phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, NL80211_IFTYPE_AP)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index b9ea297f382c3..1592b5d6751a0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -495,10 +495,14 @@ enum { SER_RECOVER }; -#define MT7915_MAX_BEACON_SIZE 512 -#define MT7915_MAX_INBAND_FRAME_SIZE 256 -#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \ - MT7915_MAX_INBAND_FRAME_SIZE + \ +#define MT7915_MAX_BEACON_SIZE 1308 +#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \ + sizeof(struct bss_info_bcn) + \ + sizeof(struct bss_info_bcn_cntdwn) + \ + sizeof(struct bss_info_bcn_mbss) + \ + MT_TXD_SIZE + \ + sizeof(struct bss_info_bcn_cont)) +#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \ MT7915_BEACON_UPDATE_SIZE) #define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ @@ -511,12 +515,6 @@ enum { sizeof(struct bss_info_bmc_rate) +\ sizeof(struct bss_info_ext_bss)) -#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \ - sizeof(struct bss_info_bcn_cntdwn) + \ - sizeof(struct bss_info_bcn_mbss) + \ - sizeof(struct bss_info_bcn_cont) + \ - sizeof(struct bss_info_inband_discovery)) - static inline s8 mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 0456e56f63480..21984e9723709 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -447,6 +447,8 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev, bool add); int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct cfg80211_he_bss_color *he_bss_color); +int mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, + u32 changed); int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int enable, u32 changed); int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 0844d28b3223d..d8851cb5f400b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -756,7 +756,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, - true, mvif->ctx); + true, mvif->mt76.ctx); ewma_avg_signal_init(&msta->avg_ack_signal); @@ -791,7 +791,7 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (!sta->tdls) mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, false, - mvif->ctx); + mvif->mt76.ctx); } spin_lock_bh(&dev->mt76.sta_poll_lock); @@ -1208,7 +1208,7 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, - true, mvif->ctx); + true, mvif->mt76.ctx); if (err) goto out; @@ -1240,7 +1240,7 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto out; mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false, - mvif->ctx); + mvif->mt76.ctx); out: mt792x_mutex_release(dev); @@ -1265,7 +1265,7 @@ static void mt7921_ctx_iter(void *priv, u8 *mac, struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct ieee80211_chanctx_conf *ctx = priv; - if (ctx != mvif->ctx) + if (ctx != mvif->mt76.ctx) return; if (vif->type == NL80211_IFTYPE_MONITOR) @@ -1298,7 +1298,7 @@ static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw, jiffies_to_msecs(HZ); mt792x_mutex_acquire(dev); - mt7921_set_roc(mvif->phy, mvif, mvif->ctx->def.chan, duration, + mt7921_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration, MT7921_ROC_REQ_JOIN); mt792x_mutex_release(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c index e7a995e7e70a3..c866144ff0613 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c @@ -48,7 +48,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, memset(txp, 0, sizeof(struct mt76_connac_hw_txp)); mt76_connac_write_hw_txp(mdev, tx_info, txp, id); - tx_info->skb = DMA_DUMMY_DATA; + tx_info->skb = NULL; return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h index 5d5ab8630041b..6c347495e1185 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x.h @@ -91,7 +91,6 @@ struct mt792x_vif { struct ewma_rssi rssi; struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; - struct ieee80211_chanctx_conf *ctx; }; struct mt792x_phy { diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c index 46be7f996c7e1..ec98450a938ff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c @@ -243,7 +243,7 @@ int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw, struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); - mvif->ctx = ctx; + mvif->mt76.ctx = ctx; mutex_unlock(&dev->mt76.mutex); return 0; @@ -259,7 +259,7 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw, struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); - mvif->ctx = NULL; + mvif->mt76.ctx = NULL; mutex_unlock(&dev->mt76.mutex); } EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index 26e03b28935f2..66d8cc0eeabee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -733,16 +733,17 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; + val = max_t(u8, sts - 1, 3); eht_cap_elem->phy_cap_info[0] |= - u8_encode_bits(u8_get_bits(sts - 1, BIT(0)), + u8_encode_bits(u8_get_bits(val, BIT(0)), IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK); eht_cap_elem->phy_cap_info[1] = - u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)), + u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)), IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) | - u8_encode_bits(sts - 1, + u8_encode_bits(val, IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) | - u8_encode_bits(sts - 1, + u8_encode_bits(val, IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); eht_cap_elem->phy_cap_info[2] = diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index ac8759febe485..c43839a205088 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -433,7 +433,9 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, case IEEE80211_STA_RX_BW_160: status->bw = RATE_INFO_BW_160; break; + /* rxv reports bw 320-1 and 320-2 separately */ case IEEE80211_STA_RX_BW_320: + case IEEE80211_STA_RX_BW_320 + 1: status->bw = RATE_INFO_BW_320; break; default: @@ -991,11 +993,9 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } txp->fw.token = cpu_to_le16(id); - if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) - txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx); - else - txp->fw.rept_wds_wcid = cpu_to_le16(0xfff); - tx_info->skb = DMA_DUMMY_DATA; + txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff); + + tx_info->skb = NULL; /* pass partial skb header to fw */ tx_info->buf[1].len = MT_CT_PARSE_LEN; @@ -1051,7 +1051,7 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) mt7996_tx_check_aggr(sta, txwi); } else { - wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); + wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); } __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index c3a479dc3f533..6e0f0c100db84 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -190,7 +190,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, mvif->mt76.omac_idx = idx; mvif->phy = phy; mvif->mt76.band_idx = band_idx; - mvif->mt76.wmm_idx = band_idx; + mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP; ret = mt7996_mcu_add_dev_info(phy, vif, true); if (ret) @@ -414,10 +414,16 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_tx_queue_params *params) { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + const u8 mq_to_aci[] = { + [IEEE80211_AC_VO] = 3, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_BE] = 0, + [IEEE80211_AC_BK] = 1, + }; + /* firmware uses access class index */ + mvif->queue_params[mq_to_aci[queue]] = *params; /* no need to update right away, we'll get BSS_CHANGED_QOS */ - queue = mt76_connac_lmac_mapping(queue); - mvif->queue_params[queue] = *params; return 0; } @@ -618,8 +624,8 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, mt7996_mcu_add_beacon(hw, vif, info->enable_beacon); } - if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP || - changed & BSS_CHANGED_FILS_DISCOVERY) + if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | + BSS_CHANGED_FILS_DISCOVERY)) mt7996_mcu_beacon_inband_discov(dev, vif, changed); if (changed & BSS_CHANGED_MU_GROUPS) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 4a30db49ef33f..7575d3506ea4e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -2016,7 +2016,7 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif, bcn->bcc_ie_pos = cpu_to_le16(offset - 3); } - buf = (u8 *)bcn + sizeof(*bcn) - MAX_BEACON_SIZE; + buf = (u8 *)bcn + sizeof(*bcn); mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0, BSS_CHANGED_BEACON); @@ -2034,26 +2034,22 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct sk_buff *skb, *rskb; struct tlv *tlv; struct bss_bcn_content_tlv *bcn; + int len; rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, - MT7996_BEACON_UPDATE_SIZE); + MT7996_MAX_BSS_OFFLOAD_SIZE); if (IS_ERR(rskb)) return PTR_ERR(rskb); - tlv = mt7996_mcu_add_uni_tlv(rskb, - UNI_BSS_INFO_BCN_CONTENT, sizeof(*bcn)); - bcn = (struct bss_bcn_content_tlv *)tlv; - bcn->enable = en; - - if (!en) - goto out; - skb = ieee80211_beacon_get_template(hw, vif, &offs, 0); - if (!skb) + if (!skb) { + dev_kfree_skb(rskb); return -EINVAL; + } - if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) { + if (skb->len > MT7996_MAX_BEACON_SIZE) { dev_err(dev->mt76.dev, "Bcn size limit exceed\n"); + dev_kfree_skb(rskb); dev_kfree_skb(skb); return -EINVAL; } @@ -2061,11 +2057,18 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, info = IEEE80211_SKB_CB(skb); info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx); + len = sizeof(*bcn) + MT_TXD_SIZE + skb->len; + tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len); + bcn = (struct bss_bcn_content_tlv *)tlv; + bcn->enable = en; + if (!en) + goto out; + mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs); /* TODO: subtag - 11v MBSSID */ mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs); - dev_kfree_skb(skb); out: + dev_kfree_skb(skb); return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true); } @@ -2086,9 +2089,13 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, struct sk_buff *rskb, *skb = NULL; struct tlv *tlv; u8 *buf, interval; + int len; + + if (vif->bss_conf.nontransmitted) + return 0; rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, - MT7996_INBAND_FRAME_SIZE); + MT7996_MAX_BSS_OFFLOAD_SIZE); if (IS_ERR(rskb)) return PTR_ERR(rskb); @@ -2102,11 +2109,14 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif); } - if (!skb) + if (!skb) { + dev_kfree_skb(rskb); return -EINVAL; + } - if (skb->len > MAX_INBAND_FRAME_SIZE - MT_TXD_SIZE) { + if (skb->len > MT7996_MAX_BEACON_SIZE) { dev_err(dev->mt76.dev, "inband discovery size limit exceed\n"); + dev_kfree_skb(rskb); dev_kfree_skb(skb); return -EINVAL; } @@ -2116,7 +2126,9 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, info->band = band; info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx); - tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, sizeof(*discov)); + len = sizeof(*discov) + MT_TXD_SIZE + skb->len; + + tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len); discov = (struct bss_inband_discovery_tlv *)tlv; discov->tx_mode = OFFLOAD_TX_MODE_SU; @@ -2127,7 +2139,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, discov->enable = true; discov->wcid = cpu_to_le16(MT7996_WTBL_RESERVED); - buf = (u8 *)tlv + sizeof(*discov) - MAX_INBAND_FRAME_SIZE; + buf = (u8 *)tlv + sizeof(*discov); mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0, changed); @@ -2679,7 +2691,7 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif) e = (struct edca *)tlv; e->set = WMM_PARAM_SET; - e->queue = ac + mvif->mt76.wmm_idx * MT7996_MAX_WMM_SETS; + e->queue = ac; e->aifs = q->aifs; e->txop = cpu_to_le16(q->txop); @@ -2960,10 +2972,10 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag) .channel_band = ch_band[chandef->chan->band], }; - if (tag == UNI_CHANNEL_RX_PATH || - dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) + if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; - else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL || + phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, NL80211_IFTYPE_AP)) @@ -3307,8 +3319,8 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action) tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en)); req_mod_en = (struct bf_mod_en_ctrl *)tlv; - req_mod_en->bf_num = 2; - req_mod_en->bf_bitmap = GENMASK(0, 0); + req_mod_en->bf_num = 3; + req_mod_en->bf_bitmap = GENMASK(2, 0); break; } default: @@ -3548,7 +3560,9 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev, int cmd) { struct { - u8 _rsv[4]; + /* fixed field */ + u8 bss; + u8 _rsv[3]; __le16 tag; __le16 len; @@ -3566,7 +3580,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev, u8 exponent; u8 is_ap; u8 agrt_params; - u8 __rsv2[135]; + u8 __rsv2[23]; } __packed req = { .tag = cpu_to_le16(UNI_CMD_TWT_ARGT_UPDATE), .len = cpu_to_le16(sizeof(req) - 4), @@ -3576,6 +3590,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev, .flowid = flow->id, .peer_id = cpu_to_le16(flow->wcid), .duration = flow->duration, + .bss = mvif->mt76.idx, .bss_idx = mvif->mt76.idx, .start_tsf = cpu_to_le64(flow->tsf), .mantissa = flow->mantissa, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h index 078f828586212..e4b31228ba0d2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h @@ -270,8 +270,6 @@ struct bss_inband_discovery_tlv { u8 enable; __le16 wcid; __le16 prob_rsp_len; -#define MAX_INBAND_FRAME_SIZE 512 - u8 pkt[MAX_INBAND_FRAME_SIZE]; } __packed; struct bss_bcn_content_tlv { @@ -283,8 +281,6 @@ struct bss_bcn_content_tlv { u8 enable; u8 type; __le16 pkt_len; -#define MAX_BEACON_SIZE 512 - u8 pkt[MAX_BEACON_SIZE]; } __packed; struct bss_bcn_cntdwn_tlv { @@ -591,13 +587,14 @@ enum { sizeof(struct sta_rec_hdr_trans) + \ sizeof(struct tlv)) +#define MT7996_MAX_BEACON_SIZE 1342 #define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \ sizeof(struct bss_bcn_content_tlv) + \ + MT_TXD_SIZE + \ sizeof(struct bss_bcn_cntdwn_tlv) + \ sizeof(struct bss_bcn_mbss_tlv)) - -#define MT7996_INBAND_FRAME_SIZE (sizeof(struct bss_req_hdr) + \ - sizeof(struct bss_inband_discovery_tlv)) +#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \ + MT7996_BEACON_UPDATE_SIZE) enum { UNI_BAND_CONFIG_RADIO_ENABLE, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c index 6f61d6a106272..5a34894a533be 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c @@ -799,7 +799,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw) } if (rtlpriv->btcoexist.bt_edca_dl != 0) { - edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + edca_be_dl = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c index 0b6a15c2e5ccd..d92aad60edfe9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c @@ -640,7 +640,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) } if (rtlpriv->btcoexist.bt_edca_dl != 0) { - edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + edca_be_dl = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c index 8ada31380efa4..0ff8e355c23a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c @@ -466,7 +466,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw) } if (rtlpriv->btcoexist.bt_edca_dl != 0) { - edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + edca_be_dl = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index f8ba133baff06..35bc37a3c469d 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -1233,9 +1233,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { #define rtw_debugfs_add_core(name, mode, fopname, parent) \ do { \ rtw_debug_priv_ ##name.rtwdev = rtwdev; \ - if (!debugfs_create_file(#name, mode, \ + if (IS_ERR(debugfs_create_file(#name, mode, \ parent, &rtw_debug_priv_ ##name,\ - &file_ops_ ##fopname)) \ + &file_ops_ ##fopname))) \ pr_debug("Unable to initialize debugfs:%s\n", \ #name); \ } while (0) diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index d879d7e3dc81f..e6ab1ac6d7093 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -611,8 +611,7 @@ static void rtw_usb_cancel_rx_bufs(struct rtw_usb *rtwusb) for (i = 0; i < RTW_USB_RXCB_NUM; i++) { rxcb = &rtwusb->rx_cb[i]; - if (rxcb->rx_urb) - usb_kill_urb(rxcb->rx_urb); + usb_kill_urb(rxcb->rx_urb); } } @@ -623,10 +622,8 @@ static void rtw_usb_free_rx_bufs(struct rtw_usb *rtwusb) for (i = 0; i < RTW_USB_RXCB_NUM; i++) { rxcb = &rtwusb->rx_cb[i]; - if (rxcb->rx_urb) { - usb_kill_urb(rxcb->rx_urb); - usb_free_urb(rxcb->rx_urb); - } + usb_kill_urb(rxcb->rx_urb); + usb_free_urb(rxcb->rx_urb); } } diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c index 6a5e52a96d183..caa22226b01bc 100644 --- a/drivers/net/wireless/silabs/wfx/data_tx.c +++ b/drivers/net/wireless/silabs/wfx/data_tx.c @@ -226,53 +226,40 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta, static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) { - int i; - bool finished; + bool has_rate0 = false; + int i, j; - /* Firmware is not able to mix rates with different flags */ - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI) - rates[i].flags |= IEEE80211_TX_RC_SHORT_GI; - if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI)) + for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) { + if (rates[j].idx == -1) + break; + /* The device use the rates in descending order, whatever the request from minstrel. + * We have to trade off here. Most important is to respect the primary rate + * requested by minstrel. So, we drops the entries with rate higher than the + * previous. + */ + if (rates[j].idx >= rates[i - 1].idx) { + rates[i - 1].count += rates[j].count; + rates[i - 1].count = min_t(u16, 15, rates[i - 1].count); + } else { + memcpy(rates + i, rates + j, sizeof(rates[i])); + if (rates[i].idx == 0) + has_rate0 = true; + /* The device apply Short GI only on the first rate */ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; - if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)) - rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS; - } - - /* Sort rates and remove duplicates */ - do { - finished = true; - for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) { - if (rates[i + 1].idx == rates[i].idx && - rates[i].idx != -1) { - rates[i].count += rates[i + 1].count; - if (rates[i].count > 15) - rates[i].count = 15; - rates[i + 1].idx = -1; - rates[i + 1].count = 0; - - finished = false; - } - if (rates[i + 1].idx > rates[i].idx) { - swap(rates[i + 1], rates[i]); - finished = false; - } + i++; } - } while (!finished); + } /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */ - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - if (rates[i].idx == 0) - break; - if (rates[i].idx == -1) { - rates[i].idx = 0; - rates[i].count = 8; /* == hw->max_rate_tries */ - rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS; - break; - } + if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) { + rates[i].idx = 0; + rates[i].count = 8; /* == hw->max_rate_tries */ + rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS; + i++; + } + for (; i < IEEE80211_TX_MAX_RATES; i++) { + memset(rates + i, 0, sizeof(rates[i])); + rates[i].idx = -1; } - /* All retries use long GI */ - for (i = 1; i < IEEE80211_TX_MAX_RATES; i++) - rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; } static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info) diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 1b9f5b8a6167e..d3fca0ab62900 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -30,7 +30,13 @@ static int of_pmem_region_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL); + priv->bus_desc.provider_name = devm_kstrdup(&pdev->dev, pdev->name, + GFP_KERNEL); + if (!priv->bus_desc.provider_name) { + kfree(priv); + return -ENOMEM; + } + priv->bus_desc.module = THIS_MODULE; priv->bus_desc.of_node = np; diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 0a81f87f6f6c0..e2f1fb99707fc 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -939,7 +939,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region) { unsigned int cpu, lane; - cpu = get_cpu(); + migrate_disable(); + cpu = smp_processor_id(); if (nd_region->num_lanes < nr_cpu_ids) { struct nd_percpu_lane *ndl_lock, *ndl_count; @@ -958,16 +959,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) { if (nd_region->num_lanes < nr_cpu_ids) { - unsigned int cpu = get_cpu(); + unsigned int cpu = smp_processor_id(); struct nd_percpu_lane *ndl_lock, *ndl_count; ndl_count = per_cpu_ptr(nd_region->lane, cpu); ndl_lock = per_cpu_ptr(nd_region->lane, lane); if (--ndl_count->count == 0) spin_unlock(&ndl_lock->lock); - put_cpu(); } - put_cpu(); + migrate_enable(); } EXPORT_SYMBOL(nd_region_release_lane); diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 747c879e8982b..529b9954d2b8c 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -510,10 +510,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); req->bio = pdu->bio; - if (nvme_req(req)->flags & NVME_REQ_CANCELLED) + if (nvme_req(req)->flags & NVME_REQ_CANCELLED) { pdu->nvme_status = -EINTR; - else + } else { pdu->nvme_status = nvme_req(req)->status; + if (!pdu->nvme_status) + pdu->nvme_status = blk_status_to_errno(err); + } pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64); /* diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index ad56df98b8e63..1c1c1aa940a51 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -525,8 +525,7 @@ static void vmd_domain_reset(struct vmd_dev *vmd) base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus, PCI_DEVFN(dev, 0), 0); - hdr_type = readb(base + PCI_HEADER_TYPE) & - PCI_HEADER_TYPE_MASK; + hdr_type = readb(base + PCI_HEADER_TYPE); functions = (hdr_type & 0x80) ? 8 : 1; for (fn = 0; fn < functions; fn++) { diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 5a4a8b0be6262..a7d3a92391a41 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -869,7 +869,6 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, put_dev: put_device(&epc->dev); - kfree(epc); err_ret: return ERR_PTR(ret); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 1bf6300592644..530c3bb5708c5 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1059,7 +1059,8 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) if (state & PCIE_LINK_STATE_L0S) link->aspm_disable |= ASPM_STATE_L0S; if (state & PCIE_LINK_STATE_L1) - link->aspm_disable |= ASPM_STATE_L1; + /* L1 PM substates require L1 */ + link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS; if (state & PCIE_LINK_STATE_L1_1) link->aspm_disable |= ASPM_STATE_L1_1; if (state & PCIE_LINK_STATE_L1_2) diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index 5658745c398f5..b33be1e63c98f 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -605,6 +605,7 @@ static int pccardd(void *__skt) dev_warn(&skt->dev, "PCMCIA: unable to register socket\n"); skt->thread = NULL; complete(&skt->thread_done); + put_device(&skt->dev); return 0; } ret = pccard_sysfs_add_socket(&skt->dev); diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index d500e5dbbc3f5..b4b8363d1de21 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -513,9 +513,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, /* by default don't allow DMA */ p_dev->dma_mask = 0; p_dev->dev.dma_mask = &p_dev->dma_mask; - dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no); - if (!dev_name(&p_dev->dev)) - goto err_free; p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev)); if (!p_dev->devname) goto err_free; @@ -573,8 +570,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, pcmcia_device_query(p_dev); - if (device_register(&p_dev->dev)) - goto err_unreg; + dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no); + if (device_register(&p_dev->dev)) { + mutex_lock(&s->ops_mutex); + list_del(&p_dev->socket_device_list); + s->device_count--; + mutex_unlock(&s->ops_mutex); + put_device(&p_dev->dev); + return NULL; + } return p_dev; diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 6b50bc5519846..caae2d3e9d3ea 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -112,7 +112,9 @@ #define CMN_DTM_PMEVCNTSR 0x240 -#define CMN_DTM_UNIT_INFO 0x0910 +#define CMN650_DTM_UNIT_INFO 0x0910 +#define CMN_DTM_UNIT_INFO 0x0960 +#define CMN_DTM_UNIT_INFO_DTC_DOMAIN GENMASK_ULL(1, 0) #define CMN_DTM_NUM_COUNTERS 4 /* Want more local counters? Why not replicate the whole DTM! Ugh... */ @@ -2117,6 +2119,16 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) return 0; } +static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region) +{ + int offset = CMN_DTM_UNIT_INFO; + + if (cmn->part == PART_CMN650 || cmn->part == PART_CI700) + offset = CMN650_DTM_UNIT_INFO; + + return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset)); +} + static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node) { int level; @@ -2248,7 +2260,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) if (cmn->part == PART_CMN600) xp->dtc = 0xf; else - xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO); + xp->dtc = 1 << arm_cmn_dtc_domain(cmn, xp_region); xp->dtm = dtm - cmn->dtms; arm_cmn_init_dtm(dtm++, xp, 0); diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 8fcaa26f0f8a6..d681638ec6b82 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -428,12 +428,12 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event) #define ARMV8_IDX_TO_COUNTER(x) \ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) -static inline u32 armv8pmu_pmcr_read(void) +static inline u64 armv8pmu_pmcr_read(void) { return read_pmcr(); } -static inline void armv8pmu_pmcr_write(u32 val) +static inline void armv8pmu_pmcr_write(u64 val) { val &= ARMV8_PMU_PMCR_MASK; isb(); @@ -957,7 +957,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, static void armv8pmu_reset(void *info) { struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; - u32 pmcr; + u64 pmcr; /* The counter and interrupt enable registers are unknown at reset. */ armv8pmu_disable_counter(U32_MAX); diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c index 5a00adb2de8c9..051efffc44c82 100644 --- a/drivers/perf/hisilicon/hisi_pcie_pmu.c +++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c @@ -353,6 +353,10 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event) struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; + /* Check the type first before going on, otherwise it's not our event */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + event->cpu = pcie_pmu->on_cpu; if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event))) @@ -360,9 +364,6 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event) else hwc->event_base = HISI_PCIE_CNT; - if (event->attr.type != event->pmu->type) - return -ENOENT; - /* Sampling is not supported. */ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EOPNOTSUPP; diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c index d941e746b4248..797cf201996a9 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c @@ -505,8 +505,8 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev) ret = perf_pmu_register(&pa_pmu->pmu, name, -1); if (ret) { dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, - &pa_pmu->node); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, + &pa_pmu->node); return ret; } diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c index 6fe534a665eda..e706ca5676764 100644 --- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c @@ -450,8 +450,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev) ret = perf_pmu_register(&sllc_pmu->pmu, name, -1); if (ret) { dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, - &sllc_pmu->node); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, + &sllc_pmu->node); return ret; } diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c index e0457d84af6b3..16869bf5bf4cc 100644 --- a/drivers/perf/hisilicon/hns3_pmu.c +++ b/drivers/perf/hisilicon/hns3_pmu.c @@ -1556,8 +1556,8 @@ static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1); if (ret) { pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, - &hns3_pmu->node); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); } return ret; @@ -1568,8 +1568,8 @@ static void hns3_pmu_uninit_pmu(struct pci_dev *pdev) struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev); perf_pmu_unregister(&hns3_pmu->pmu); - cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, - &hns3_pmu->node); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); } static int hns3_pmu_init_dev(struct pci_dev *pdev) diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 96c7f670c8f0d..fcb0c70ca2225 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -543,8 +543,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) - on_each_cpu_mask(mm_cpumask(event->owner->mm), - pmu_sbi_set_scounteren, (void *)event, 1); + pmu_sbi_set_scounteren((void *)event); } static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) @@ -554,8 +553,7 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) - on_each_cpu_mask(mm_cpumask(event->owner->mm), - pmu_sbi_reset_scounteren, (void *)event, 1); + pmu_sbi_reset_scounteren((void *)event); ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) && diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index faa8b7ff5bcf3..ec76e43527c5c 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -983,11 +983,18 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, break; case PIN_CONFIG_INPUT_DEBOUNCE: - if (arg) + if (arg) { conf |= BYT_DEBOUNCE_EN; - else + } else { conf &= ~BYT_DEBOUNCE_EN; + /* + * No need to update the pulse value. + * Debounce is going to be disabled. + */ + break; + } + switch (arg) { case 375: db_pulse = BYT_DEBOUNCE_PULSE_375US; diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index 37cdfe4b04f9a..2ea6ef99cc70b 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -1175,6 +1175,8 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d) u32 port; u8 bit; + irq_chip_disable_parent(d); + port = RZG2L_PIN_ID_TO_PORT(hwirq); bit = RZG2L_PIN_ID_TO_PIN(hwirq); @@ -1189,7 +1191,6 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d) spin_unlock_irqrestore(&pctrl->lock, flags); gpiochip_disable_irq(gc, hwirq); - irq_chip_disable_parent(d); } static void rzg2l_gpio_irq_enable(struct irq_data *d) diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index 5d36fbc75e1bb..badc68bbae8cc 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -321,17 +321,8 @@ void cros_ec_unregister(struct cros_ec_device *ec_dev) EXPORT_SYMBOL(cros_ec_unregister); #ifdef CONFIG_PM_SLEEP -/** - * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device. - * @ec_dev: Device to suspend. - * - * This can be called by drivers to handle a suspend event. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_suspend(struct cros_ec_device *ec_dev) +static void cros_ec_send_suspend_event(struct cros_ec_device *ec_dev) { - struct device *dev = ec_dev->dev; int ret; u8 sleep_event; @@ -343,7 +334,26 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev) if (ret < 0) dev_dbg(ec_dev->dev, "Error %d sending suspend event to ec\n", ret); +} +/** + * cros_ec_suspend_prepare() - Handle a suspend prepare operation for the ChromeOS EC device. + * @ec_dev: Device to suspend. + * + * This can be called by drivers to handle a suspend prepare stage of suspend. + * + * Return: 0 always. + */ +int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev) +{ + cros_ec_send_suspend_event(ec_dev); + return 0; +} +EXPORT_SYMBOL(cros_ec_suspend_prepare); + +static void cros_ec_disable_irq(struct cros_ec_device *ec_dev) +{ + struct device *dev = ec_dev->dev; if (device_may_wakeup(dev)) ec_dev->wake_enabled = !enable_irq_wake(ec_dev->irq); else @@ -351,7 +361,35 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev) disable_irq(ec_dev->irq); ec_dev->suspended = true; +} +/** + * cros_ec_suspend_late() - Handle a suspend late operation for the ChromeOS EC device. + * @ec_dev: Device to suspend. + * + * This can be called by drivers to handle a suspend late stage of suspend. + * + * Return: 0 always. + */ +int cros_ec_suspend_late(struct cros_ec_device *ec_dev) +{ + cros_ec_disable_irq(ec_dev); + return 0; +} +EXPORT_SYMBOL(cros_ec_suspend_late); + +/** + * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device. + * @ec_dev: Device to suspend. + * + * This can be called by drivers to handle a suspend event. + * + * Return: 0 always. + */ +int cros_ec_suspend(struct cros_ec_device *ec_dev) +{ + cros_ec_send_suspend_event(ec_dev); + cros_ec_disable_irq(ec_dev); return 0; } EXPORT_SYMBOL(cros_ec_suspend); @@ -370,22 +408,11 @@ static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev) } } -/** - * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device. - * @ec_dev: Device to resume. - * - * This can be called by drivers to handle a resume event. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_resume(struct cros_ec_device *ec_dev) +static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev) { int ret; u8 sleep_event; - ec_dev->suspended = false; - enable_irq(ec_dev->irq); - sleep_event = (!IS_ENABLED(CONFIG_ACPI) || pm_suspend_via_firmware()) ? HOST_SLEEP_EVENT_S3_RESUME : HOST_SLEEP_EVENT_S0IX_RESUME; @@ -394,6 +421,24 @@ int cros_ec_resume(struct cros_ec_device *ec_dev) if (ret < 0) dev_dbg(ec_dev->dev, "Error %d sending resume event to ec\n", ret); +} + +/** + * cros_ec_resume_complete() - Handle a resume complete operation for the ChromeOS EC device. + * @ec_dev: Device to resume. + * + * This can be called by drivers to handle a resume complete stage of resume. + */ +void cros_ec_resume_complete(struct cros_ec_device *ec_dev) +{ + cros_ec_send_resume_event(ec_dev); +} +EXPORT_SYMBOL(cros_ec_resume_complete); + +static void cros_ec_enable_irq(struct cros_ec_device *ec_dev) +{ + ec_dev->suspended = false; + enable_irq(ec_dev->irq); if (ec_dev->wake_enabled) disable_irq_wake(ec_dev->irq); @@ -403,8 +448,35 @@ int cros_ec_resume(struct cros_ec_device *ec_dev) * suspend. This way the clients know what to do with them. */ cros_ec_report_events_during_suspend(ec_dev); +} +/** + * cros_ec_resume_early() - Handle a resume early operation for the ChromeOS EC device. + * @ec_dev: Device to resume. + * + * This can be called by drivers to handle a resume early stage of resume. + * + * Return: 0 always. + */ +int cros_ec_resume_early(struct cros_ec_device *ec_dev) +{ + cros_ec_enable_irq(ec_dev); + return 0; +} +EXPORT_SYMBOL(cros_ec_resume_early); +/** + * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device. + * @ec_dev: Device to resume. + * + * This can be called by drivers to handle a resume event. + * + * Return: 0 always. + */ +int cros_ec_resume(struct cros_ec_device *ec_dev) +{ + cros_ec_enable_irq(ec_dev); + cros_ec_send_resume_event(ec_dev); return 0; } EXPORT_SYMBOL(cros_ec_resume); diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h index bbca0096868ac..566332f487892 100644 --- a/drivers/platform/chrome/cros_ec.h +++ b/drivers/platform/chrome/cros_ec.h @@ -14,7 +14,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev); void cros_ec_unregister(struct cros_ec_device *ec_dev); int cros_ec_suspend(struct cros_ec_device *ec_dev); +int cros_ec_suspend_late(struct cros_ec_device *ec_dev); +int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev); int cros_ec_resume(struct cros_ec_device *ec_dev); +int cros_ec_resume_early(struct cros_ec_device *ec_dev); +void cros_ec_resume_complete(struct cros_ec_device *ec_dev); irqreturn_t cros_ec_irq_thread(int irq, void *data); diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 356572452898d..42e1770887fb0 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -549,22 +549,36 @@ MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table); static int cros_ec_lpc_prepare(struct device *dev) { struct cros_ec_device *ec_dev = dev_get_drvdata(dev); - - return cros_ec_suspend(ec_dev); + return cros_ec_suspend_prepare(ec_dev); } static void cros_ec_lpc_complete(struct device *dev) { struct cros_ec_device *ec_dev = dev_get_drvdata(dev); - cros_ec_resume(ec_dev); + cros_ec_resume_complete(ec_dev); +} + +static int cros_ec_lpc_suspend_late(struct device *dev) +{ + struct cros_ec_device *ec_dev = dev_get_drvdata(dev); + + return cros_ec_suspend_late(ec_dev); +} + +static int cros_ec_lpc_resume_early(struct device *dev) +{ + struct cros_ec_device *ec_dev = dev_get_drvdata(dev); + + return cros_ec_resume_early(ec_dev); } #endif static const struct dev_pm_ops cros_ec_lpc_pm_ops = { #ifdef CONFIG_PM_SLEEP .prepare = cros_ec_lpc_prepare, - .complete = cros_ec_lpc_complete + .complete = cros_ec_lpc_complete, #endif + SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_lpc_suspend_late, cros_ec_lpc_resume_early) }; static struct platform_driver cros_ec_lpc_driver = { diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index a78ddd83cda02..317c907304149 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -911,21 +911,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver) } static int wmi_char_open(struct inode *inode, struct file *filp) { - const char *driver_name = filp->f_path.dentry->d_iname; - struct wmi_block *wblock; - struct wmi_block *next; - - list_for_each_entry_safe(wblock, next, &wmi_block_list, list) { - if (!wblock->dev.dev.driver) - continue; - if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) { - filp->private_data = wblock; - break; - } - } + /* + * The miscdevice already stores a pointer to itself + * inside filp->private_data + */ + struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev); - if (!filp->private_data) - return -ENODEV; + filp->private_data = wblock; return nonseekable_open(inode, filp); } @@ -1270,8 +1262,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) struct wmi_block *wblock, *next; union acpi_object *obj; acpi_status status; - int retval = 0; u32 i, total; + int retval; status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out); if (ACPI_FAILURE(status)) @@ -1282,8 +1274,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) return -ENXIO; if (obj->type != ACPI_TYPE_BUFFER) { - retval = -ENXIO; - goto out_free_pointer; + kfree(obj); + return -ENXIO; } gblock = (const struct guid_block *)obj->buffer.pointer; @@ -1298,8 +1290,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) wblock = kzalloc(sizeof(*wblock), GFP_KERNEL); if (!wblock) { - retval = -ENOMEM; - break; + dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid); + continue; } wblock->acpi_device = device; @@ -1338,9 +1330,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) } } -out_free_pointer: - kfree(out.pointer); - return retval; + kfree(obj); + + return 0; } /* diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c index a3faa9a3de7cc..a7d529bf76adc 100644 --- a/drivers/pwm/pwm-brcmstb.c +++ b/drivers/pwm/pwm-brcmstb.c @@ -288,7 +288,7 @@ static int brcmstb_pwm_suspend(struct device *dev) { struct brcmstb_pwm *p = dev_get_drvdata(dev); - clk_disable(p->clk); + clk_disable_unprepare(p->clk); return 0; } @@ -297,7 +297,7 @@ static int brcmstb_pwm_resume(struct device *dev) { struct brcmstb_pwm *p = dev_get_drvdata(dev); - clk_enable(p->clk); + clk_prepare_enable(p->clk); return 0; } diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index b1d1373648a38..c8800f84b917f 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -79,6 +79,7 @@ struct sti_pwm_compat_data { unsigned int cpt_num_devs; unsigned int max_pwm_cnt; unsigned int max_prescale; + struct sti_cpt_ddata *ddata; }; struct sti_pwm_chip { @@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm, { struct sti_pwm_chip *pc = to_sti_pwmchip(chip); struct sti_pwm_compat_data *cdata = pc->cdata; - struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm); + struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm]; struct device *dev = pc->dev; unsigned int effective_ticks; unsigned long long high, low; @@ -440,7 +441,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data) while (cpt_int_stat) { devicenum = ffs(cpt_int_stat) - 1; - ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]); + ddata = &pc->cdata->ddata[devicenum]; /* * Capture input: @@ -638,30 +639,28 @@ static int sti_pwm_probe(struct platform_device *pdev) dev_err(dev, "failed to prepare clock\n"); return ret; } + + cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL); + if (!cdata->ddata) + return -ENOMEM; } pc->chip.dev = dev; pc->chip.ops = &sti_pwm_ops; pc->chip.npwm = pc->cdata->pwm_num_devs; - ret = pwmchip_add(&pc->chip); - if (ret < 0) { - clk_unprepare(pc->pwm_clk); - clk_unprepare(pc->cpt_clk); - return ret; - } - for (i = 0; i < cdata->cpt_num_devs; i++) { - struct sti_cpt_ddata *ddata; - - ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); - if (!ddata) - return -ENOMEM; + struct sti_cpt_ddata *ddata = &cdata->ddata[i]; init_waitqueue_head(&ddata->wait); mutex_init(&ddata->lock); + } - pwm_set_chip_data(&pc->chip.pwms[i], ddata); + ret = pwmchip_add(&pc->chip); + if (ret < 0) { + clk_unprepare(pc->pwm_clk); + clk_unprepare(pc->cpt_clk); + return ret; } platform_set_drvdata(pdev, pc); diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c index 65fbd95f1dbb0..4ca8fbf4b3e2e 100644 --- a/drivers/regulator/mt6358-regulator.c +++ b/drivers/regulator/mt6358-regulator.c @@ -688,12 +688,18 @@ static int mt6358_regulator_probe(struct platform_device *pdev) const struct mt6358_regulator_info *mt6358_info; int i, max_regulator, ret; - if (mt6397->chip_id == MT6366_CHIP_ID) { - max_regulator = MT6366_MAX_REGULATOR; - mt6358_info = mt6366_regulators; - } else { + switch (mt6397->chip_id) { + case MT6358_CHIP_ID: max_regulator = MT6358_MAX_REGULATOR; mt6358_info = mt6358_regulators; + break; + case MT6366_CHIP_ID: + max_regulator = MT6366_MAX_REGULATOR; + mt6358_info = mt6366_regulators; + break; + default: + dev_err(&pdev->dev, "unsupported chip ID: %d\n", mt6397->chip_id); + return -EINVAL; } ret = mt6358_sync_vcn33_setting(&pdev->dev); diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index d990ba19c50eb..b2e359ac31693 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -1095,7 +1095,7 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = { RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"), RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"), RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"), - RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"), + RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"), RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"), RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"), RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"), diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c index 3cdc015692ca6..1a65a4e0dc003 100644 --- a/drivers/rtc/rtc-brcmstb-waketimer.c +++ b/drivers/rtc/rtc-brcmstb-waketimer.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright © 2014-2017 Broadcom + * Copyright © 2014-2023 Broadcom */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -34,6 +34,7 @@ struct brcmstb_waketmr { u32 rate; unsigned long rtc_alarm; bool alarm_en; + bool alarm_expired; }; #define BRCMSTB_WKTMR_EVENT 0x00 @@ -64,6 +65,11 @@ static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer) writel_relaxed(reg - 1, timer->base + BRCMSTB_WKTMR_ALARM); writel_relaxed(WKTMR_ALARM_EVENT, timer->base + BRCMSTB_WKTMR_EVENT); (void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT); + if (timer->alarm_expired) { + timer->alarm_expired = false; + /* maintain call balance */ + enable_irq(timer->alarm_irq); + } } static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer, @@ -105,10 +111,17 @@ static irqreturn_t brcmstb_alarm_irq(int irq, void *data) return IRQ_HANDLED; if (timer->alarm_en) { - if (!device_may_wakeup(timer->dev)) + if (device_may_wakeup(timer->dev)) { + disable_irq_nosync(irq); + timer->alarm_expired = true; + } else { writel_relaxed(WKTMR_ALARM_EVENT, timer->base + BRCMSTB_WKTMR_EVENT); + } rtc_update_irq(timer->rtc, 1, RTC_IRQF | RTC_AF); + } else { + writel_relaxed(WKTMR_ALARM_EVENT, + timer->base + BRCMSTB_WKTMR_EVENT); } return IRQ_HANDLED; @@ -221,8 +234,14 @@ static int brcmstb_waketmr_alarm_enable(struct device *dev, !brcmstb_waketmr_is_pending(timer)) return -EINVAL; timer->alarm_en = true; - if (timer->alarm_irq) + if (timer->alarm_irq) { + if (timer->alarm_expired) { + timer->alarm_expired = false; + /* maintain call balance */ + enable_irq(timer->alarm_irq); + } enable_irq(timer->alarm_irq); + } } else if (!enabled && timer->alarm_en) { if (timer->alarm_irq) disable_irq(timer->alarm_irq); @@ -352,6 +371,17 @@ static int brcmstb_waketmr_suspend(struct device *dev) return brcmstb_waketmr_prepare_suspend(timer); } +static int brcmstb_waketmr_suspend_noirq(struct device *dev) +{ + struct brcmstb_waketmr *timer = dev_get_drvdata(dev); + + /* Catch any alarms occurring prior to noirq */ + if (timer->alarm_expired && device_may_wakeup(dev)) + return -EBUSY; + + return 0; +} + static int brcmstb_waketmr_resume(struct device *dev) { struct brcmstb_waketmr *timer = dev_get_drvdata(dev); @@ -368,10 +398,17 @@ static int brcmstb_waketmr_resume(struct device *dev) return ret; } +#else +#define brcmstb_waketmr_suspend NULL +#define brcmstb_waketmr_suspend_noirq NULL +#define brcmstb_waketmr_resume NULL #endif /* CONFIG_PM_SLEEP */ -static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops, - brcmstb_waketmr_suspend, brcmstb_waketmr_resume); +static const struct dev_pm_ops brcmstb_waketmr_pm_ops = { + .suspend = brcmstb_waketmr_suspend, + .suspend_noirq = brcmstb_waketmr_suspend_noirq, + .resume = brcmstb_waketmr_resume, +}; static const __maybe_unused struct of_device_id brcmstb_waketmr_of_match[] = { { .compatible = "brcm,brcmstb-waketimer" }, diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c index 06194674d71c5..540042b9eec8f 100644 --- a/drivers/rtc/rtc-pcf85363.c +++ b/drivers/rtc/rtc-pcf85363.c @@ -438,7 +438,7 @@ static int pcf85363_probe(struct i2c_client *client) if (client->irq > 0 || wakeup_source) { regmap_write(pcf85363->regmap, CTRL_FLAGS, 0); regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO, - PIN_IO_INTA_OUT, PIN_IO_INTAPM); + PIN_IO_INTAPM, PIN_IO_INTA_OUT); } if (client->irq > 0) { diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 339812efe8221..d09e08b71cfba 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1865,15 +1865,18 @@ static inline void ap_scan_domains(struct ap_card *ac) } /* get it and thus adjust reference counter */ get_device(dev); - if (decfg) + if (decfg) { AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n", __func__, ac->id, dom); - else if (chkstop) + } else if (chkstop) { AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n", __func__, ac->id, dom); - else + } else { + /* nudge the queue's state machine */ + ap_queue_init_state(aq); AP_DBF_INFO("%s(%d,%d) new queue dev created\n", __func__, ac->id, dom); + } goto put_dev_and_continue; } /* handle state changes on already existing queue device */ @@ -1895,10 +1898,8 @@ static inline void ap_scan_domains(struct ap_card *ac) } else if (!chkstop && aq->chkstop) { /* checkstop off */ aq->chkstop = false; - if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { - aq->dev_state = AP_DEV_STATE_OPERATING; - aq->sm_state = AP_SM_STATE_RESET_START; - } + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) + _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n", __func__, ac->id, dom); @@ -1922,10 +1923,8 @@ static inline void ap_scan_domains(struct ap_card *ac) } else if (!decfg && !aq->config) { /* config on this queue device */ aq->config = true; - if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { - aq->dev_state = AP_DEV_STATE_OPERATING; - aq->sm_state = AP_SM_STATE_RESET_START; - } + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) + _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); AP_DBF_DBG("%s(%d,%d) queue dev config on\n", __func__, ac->id, dom); diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index be54b070c0316..3e34912a60506 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -287,6 +287,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_init_state(struct ap_queue *aq); +void _ap_queue_init_state(struct ap_queue *aq); struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, int comp_type, unsigned int functions, int ml); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 1336e632adc4a..2943b2529d3a0 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -1160,14 +1160,19 @@ void ap_queue_remove(struct ap_queue *aq) spin_unlock_bh(&aq->lock); } -void ap_queue_init_state(struct ap_queue *aq) +void _ap_queue_init_state(struct ap_queue *aq) { - spin_lock_bh(&aq->lock); aq->dev_state = AP_DEV_STATE_OPERATING; aq->sm_state = AP_SM_STATE_RESET_START; aq->last_err_rc = 0; aq->assoc_idx = ASSOC_IDX_INVALID; ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); +} + +void ap_queue_init_state(struct ap_queue *aq) +{ + spin_lock_bh(&aq->lock); + _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); } EXPORT_SYMBOL(ap_queue_init_state); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index ce9eb00e2ca04..470e8e6c41b62 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -5804,7 +5803,7 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, irq_failed: do { rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); - } while (rtas_busy_delay(rc)); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); reg_failed: LEAVE; return rc; diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index e32a4161a8d02..c61848595da06 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -944,6 +944,9 @@ static int qcom_llcc_probe(struct platform_device *pdev) u32 version; struct regmap *regmap; + if (!IS_ERR(drv_data)) + return -EBUSY; + drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL); if (!drv_data) { ret = -ENOMEM; diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c index d05e0d6edf493..974c14d1e0bfc 100644 --- a/drivers/soc/qcom/pmic_glink_altmode.c +++ b/drivers/soc/qcom/pmic_glink_altmode.c @@ -465,7 +465,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev, alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs; alt_port->bridge.of_node = to_of_node(fwnode); alt_port->bridge.ops = DRM_BRIDGE_OP_HPD; - alt_port->bridge.type = DRM_MODE_CONNECTOR_USB; + alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; ret = devm_drm_bridge_add(dev, &alt_port->bridge); if (ret) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 2c21d5b96fdce..bcbf840cd41c8 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1157,6 +1157,7 @@ config SPI_XTENSA_XTFPGA config SPI_ZYNQ_QSPI tristate "Xilinx Zynq QSPI controller" depends on ARCH_ZYNQ || COMPILE_TEST + depends on SPI_MEM help This enables support for the Zynq Quad SPI controller in master mode. diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index c964f41dcc428..168eff721ed37 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -759,7 +759,7 @@ static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op) f->memmap_len = len > NXP_FSPI_MIN_IOMAP ? len : NXP_FSPI_MIN_IOMAP; - f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start, + f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start, f->memmap_len); if (!f->ahb_addr) { diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index e5cd82eb9e549..ddf1c684bcc7d 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -117,7 +117,7 @@ struct omap2_mcspi_regs { struct omap2_mcspi { struct completion txdone; - struct spi_master *master; + struct spi_controller *ctlr; /* Virtual base address of the controller */ void __iomem *base; unsigned long phys; @@ -125,10 +125,12 @@ struct omap2_mcspi { struct omap2_mcspi_dma *dma_channels; struct device *dev; struct omap2_mcspi_regs ctx; + struct clk *ref_clk; int fifo_depth; - bool slave_aborted; + bool target_aborted; unsigned int pin_dir:1; size_t max_xfer_len; + u32 ref_clk_hz; }; struct omap2_mcspi_cs { @@ -141,17 +143,17 @@ struct omap2_mcspi_cs { u32 chconf0, chctrl0; }; -static inline void mcspi_write_reg(struct spi_master *master, +static inline void mcspi_write_reg(struct spi_controller *ctlr, int idx, u32 val) { - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); writel_relaxed(val, mcspi->base + idx); } -static inline u32 mcspi_read_reg(struct spi_master *master, int idx) +static inline u32 mcspi_read_reg(struct spi_controller *ctlr, int idx) { - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); return readl_relaxed(mcspi->base + idx); } @@ -235,7 +237,7 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable) { - struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller); u32 l; /* The controller handles the inverted chip selects @@ -266,24 +268,24 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable) } } -static void omap2_mcspi_set_mode(struct spi_master *master) +static void omap2_mcspi_set_mode(struct spi_controller *ctlr) { - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); struct omap2_mcspi_regs *ctx = &mcspi->ctx; u32 l; /* - * Choose master or slave mode + * Choose host or target mode */ - l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL); + l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL); l &= ~(OMAP2_MCSPI_MODULCTRL_STEST); - if (spi_controller_is_slave(master)) { + if (spi_controller_is_target(ctlr)) { l |= (OMAP2_MCSPI_MODULCTRL_MS); } else { l &= ~(OMAP2_MCSPI_MODULCTRL_MS); l |= OMAP2_MCSPI_MODULCTRL_SINGLE; } - mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); + mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l); ctx->modulctrl = l; } @@ -291,14 +293,14 @@ static void omap2_mcspi_set_mode(struct spi_master *master) static void omap2_mcspi_set_fifo(const struct spi_device *spi, struct spi_transfer *t, int enable) { - struct spi_master *master = spi->master; + struct spi_controller *ctlr = spi->controller; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi *mcspi; unsigned int wcnt; int max_fifo_depth, bytes_per_word; u32 chconf, xferlevel; - mcspi = spi_master_get_devdata(master); + mcspi = spi_controller_get_devdata(ctlr); chconf = mcspi_cached_chconf0(spi); if (enable) { @@ -326,7 +328,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, xferlevel |= bytes_per_word - 1; } - mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel); + mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel); mcspi_write_chconf0(spi, chconf); mcspi->fifo_depth = max_fifo_depth; @@ -364,9 +366,9 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi, struct completion *x) { - if (spi_controller_is_slave(mcspi->master)) { + if (spi_controller_is_target(mcspi->ctlr)) { if (wait_for_completion_interruptible(x) || - mcspi->slave_aborted) + mcspi->target_aborted) return -EINTR; } else { wait_for_completion(x); @@ -378,7 +380,7 @@ static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi, static void omap2_mcspi_rx_callback(void *data) { struct spi_device *spi = data; - struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller); struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)]; /* We must disable the DMA RX request */ @@ -390,7 +392,7 @@ static void omap2_mcspi_rx_callback(void *data) static void omap2_mcspi_tx_callback(void *data) { struct spi_device *spi = data; - struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller); struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)]; /* We must disable the DMA TX request */ @@ -407,7 +409,7 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi, struct omap2_mcspi_dma *mcspi_dma; struct dma_async_tx_descriptor *tx; - mcspi = spi_master_get_devdata(spi->master); + mcspi = spi_controller_get_devdata(spi->controller); mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)]; dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); @@ -445,13 +447,13 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; struct dma_async_tx_descriptor *tx; - mcspi = spi_master_get_devdata(spi->master); + mcspi = spi_controller_get_devdata(spi->controller); mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)]; count = xfer->len; /* * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM - * it mentions reducing DMA transfer length by one element in master + * it mentions reducing DMA transfer length by one element in host * normal mode. */ if (mcspi->fifo_depth == 0) @@ -514,7 +516,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, omap2_mcspi_set_dma_req(spi, 1, 1); ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion); - if (ret || mcspi->slave_aborted) { + if (ret || mcspi->target_aborted) { dmaengine_terminate_sync(mcspi_dma->dma_rx); omap2_mcspi_set_dma_req(spi, 1, 0); return 0; @@ -590,7 +592,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) void __iomem *irqstat_reg; int wait_res; - mcspi = spi_master_get_devdata(spi->master); + mcspi = spi_controller_get_devdata(spi->controller); mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)]; if (cs->word_len <= 8) { @@ -617,14 +619,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) rx = xfer->rx_buf; tx = xfer->tx_buf; - mcspi->slave_aborted = false; + mcspi->target_aborted = false; reinit_completion(&mcspi_dma->dma_tx_completion); reinit_completion(&mcspi_dma->dma_rx_completion); reinit_completion(&mcspi->txdone); if (tx) { - /* Enable EOW IRQ to know end of tx in slave mode */ - if (spi_controller_is_slave(spi->master)) - mcspi_write_reg(spi->master, + /* Enable EOW IRQ to know end of tx in target mode */ + if (spi_controller_is_target(spi->controller)) + mcspi_write_reg(spi->controller, OMAP2_MCSPI_IRQENABLE, OMAP2_MCSPI_IRQSTATUS_EOW); omap2_mcspi_tx_dma(spi, xfer, cfg); @@ -637,15 +639,15 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) int ret; ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion); - if (ret || mcspi->slave_aborted) { + if (ret || mcspi->target_aborted) { dmaengine_terminate_sync(mcspi_dma->dma_tx); omap2_mcspi_set_dma_req(spi, 0, 0); return 0; } - if (spi_controller_is_slave(mcspi->master)) { + if (spi_controller_is_target(mcspi->ctlr)) { ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone); - if (ret || mcspi->slave_aborted) + if (ret || mcspi->target_aborted) return 0; } @@ -656,7 +658,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) OMAP2_MCSPI_IRQSTATUS_EOW) < 0) dev_err(&spi->dev, "EOW timed out\n"); - mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS, + mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS, OMAP2_MCSPI_IRQSTATUS_EOW); } @@ -880,12 +882,12 @@ out: return count - c; } -static u32 omap2_mcspi_calc_divisor(u32 speed_hz) +static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz) { u32 div; for (div = 0; div < 15; div++) - if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div)) + if (speed_hz >= (ref_clk_hz >> div)) return div; return 15; @@ -897,11 +899,11 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, { struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi *mcspi; - u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0; + u32 ref_clk_hz, l = 0, clkd = 0, div, extclk = 0, clkg = 0; u8 word_len = spi->bits_per_word; u32 speed_hz = spi->max_speed_hz; - mcspi = spi_master_get_devdata(spi->master); + mcspi = spi_controller_get_devdata(spi->controller); if (t != NULL && t->bits_per_word) word_len = t->bits_per_word; @@ -911,14 +913,15 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, if (t && t->speed_hz) speed_hz = t->speed_hz; - speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ); - if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) { - clkd = omap2_mcspi_calc_divisor(speed_hz); - speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd; + ref_clk_hz = mcspi->ref_clk_hz; + speed_hz = min_t(u32, speed_hz, ref_clk_hz); + if (speed_hz < (ref_clk_hz / OMAP2_MCSPI_MAX_DIVIDER)) { + clkd = omap2_mcspi_calc_divisor(speed_hz, ref_clk_hz); + speed_hz = ref_clk_hz >> clkd; clkg = 0; } else { - div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz; - speed_hz = OMAP2_MCSPI_MAX_FREQ / div; + div = (ref_clk_hz + speed_hz - 1) / speed_hz; + speed_hz = ref_clk_hz / div; clkd = (div - 1) & 0xf; extclk = (div - 1) >> 4; clkg = OMAP2_MCSPI_CHCONF_CLKG; @@ -926,7 +929,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, l = mcspi_cached_chconf0(spi); - /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS + /* standard 4-wire host mode: SCK, MOSI/out, MISO/in, nCS * REVISIT: this controller could support SPI_3WIRE mode. */ if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) { @@ -1017,13 +1020,13 @@ no_dma: return ret; } -static void omap2_mcspi_release_dma(struct spi_master *master) +static void omap2_mcspi_release_dma(struct spi_controller *ctlr) { - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); struct omap2_mcspi_dma *mcspi_dma; int i; - for (i = 0; i < master->num_chipselect; i++) { + for (i = 0; i < ctlr->num_chipselect; i++) { mcspi_dma = &mcspi->dma_channels[i]; if (mcspi_dma->dma_rx) { @@ -1054,7 +1057,7 @@ static int omap2_mcspi_setup(struct spi_device *spi) { bool initial_setup = false; int ret; - struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller); struct omap2_mcspi_regs *ctx = &mcspi->ctx; struct omap2_mcspi_cs *cs = spi->controller_state; @@ -1096,24 +1099,24 @@ static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data) struct omap2_mcspi *mcspi = data; u32 irqstat; - irqstat = mcspi_read_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS); + irqstat = mcspi_read_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS); if (!irqstat) return IRQ_NONE; - /* Disable IRQ and wakeup slave xfer task */ - mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0); + /* Disable IRQ and wakeup target xfer task */ + mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQENABLE, 0); if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW) complete(&mcspi->txdone); return IRQ_HANDLED; } -static int omap2_mcspi_slave_abort(struct spi_master *master) +static int omap2_mcspi_target_abort(struct spi_controller *ctlr) { - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels; - mcspi->slave_aborted = true; + mcspi->target_aborted = true; complete(&mcspi_dma->dma_rx_completion); complete(&mcspi_dma->dma_tx_completion); complete(&mcspi->txdone); @@ -1121,7 +1124,7 @@ static int omap2_mcspi_slave_abort(struct spi_master *master) return 0; } -static int omap2_mcspi_transfer_one(struct spi_master *master, +static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *t) { @@ -1129,7 +1132,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master, /* We only enable one channel at a time -- the one whose message is * -- although this controller would gladly * arbitrate among multiple channels. This corresponds to "single - * channel" master mode. As a side effect, we need to manage the + * channel" host mode. As a side effect, we need to manage the * chipselect with the FORCE bit ... CS != channel enable. */ @@ -1141,13 +1144,13 @@ static int omap2_mcspi_transfer_one(struct spi_master *master, int status = 0; u32 chconf; - mcspi = spi_master_get_devdata(master); + mcspi = spi_controller_get_devdata(ctlr); mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0); cs = spi->controller_state; cd = spi->controller_data; /* - * The slave driver could have changed spi->mode in which case + * The target driver could have changed spi->mode in which case * it will be different from cs->mode (the current hardware setup). * If so, set par_override (even though its not a parity issue) so * omap2_mcspi_setup_transfer will be called to configure the hardware @@ -1175,7 +1178,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master, if (cd && cd->cs_per_word) { chconf = mcspi->ctx.modulctrl; chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE; - mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf); + mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf); mcspi->ctx.modulctrl = mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); } @@ -1201,8 +1204,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master, unsigned count; if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && - master->cur_msg_mapped && - master->can_dma(master, spi, t)) + ctlr->cur_msg_mapped && + ctlr->can_dma(ctlr, spi, t)) omap2_mcspi_set_fifo(spi, t, 1); omap2_mcspi_set_enable(spi, 1); @@ -1213,8 +1216,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master, + OMAP2_MCSPI_TX0); if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && - master->cur_msg_mapped && - master->can_dma(master, spi, t)) + ctlr->cur_msg_mapped && + ctlr->can_dma(ctlr, spi, t)) count = omap2_mcspi_txrx_dma(spi, t); else count = omap2_mcspi_txrx_pio(spi, t); @@ -1240,7 +1243,7 @@ out: if (cd && cd->cs_per_word) { chconf = mcspi->ctx.modulctrl; chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE; - mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf); + mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf); mcspi->ctx.modulctrl = mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); } @@ -1256,10 +1259,10 @@ out: return status; } -static int omap2_mcspi_prepare_message(struct spi_master *master, +static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); struct omap2_mcspi_regs *ctx = &mcspi->ctx; struct omap2_mcspi_cs *cs; @@ -1283,29 +1286,29 @@ static int omap2_mcspi_prepare_message(struct spi_master *master, return 0; } -static bool omap2_mcspi_can_dma(struct spi_master *master, +static bool omap2_mcspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller); struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)]; if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) return false; - if (spi_controller_is_slave(master)) + if (spi_controller_is_target(ctlr)) return true; - master->dma_rx = mcspi_dma->dma_rx; - master->dma_tx = mcspi_dma->dma_tx; + ctlr->dma_rx = mcspi_dma->dma_rx; + ctlr->dma_tx = mcspi_dma->dma_tx; return (xfer->len >= DMA_MIN_BYTES); } static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi) { - struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller); struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)]; @@ -1317,7 +1320,7 @@ static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi) static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi) { - struct spi_master *master = mcspi->master; + struct spi_controller *ctlr = mcspi->ctlr; struct omap2_mcspi_regs *ctx = &mcspi->ctx; int ret = 0; @@ -1325,11 +1328,11 @@ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi) if (ret < 0) return ret; - mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, + mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, OMAP2_MCSPI_WAKEUPENABLE_WKEN); ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN; - omap2_mcspi_set_mode(master); + omap2_mcspi_set_mode(ctlr); pm_runtime_mark_last_busy(mcspi->dev); pm_runtime_put_autosuspend(mcspi->dev); return 0; @@ -1353,8 +1356,8 @@ static int omap_mcspi_runtime_suspend(struct device *dev) */ static int omap_mcspi_runtime_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); struct omap2_mcspi_regs *ctx = &mcspi->ctx; struct omap2_mcspi_cs *cs; int error; @@ -1364,8 +1367,8 @@ static int omap_mcspi_runtime_resume(struct device *dev) dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error); /* McSPI: context restore */ - mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl); - mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable); + mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl); + mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable); list_for_each_entry(cs, &ctx->cs, node) { /* @@ -1420,7 +1423,7 @@ MODULE_DEVICE_TABLE(of, omap_mcspi_of_match); static int omap2_mcspi_probe(struct platform_device *pdev) { - struct spi_master *master; + struct spi_controller *ctlr; const struct omap2_mcspi_platform_config *pdata; struct omap2_mcspi *mcspi; struct resource *r; @@ -1430,32 +1433,30 @@ static int omap2_mcspi_probe(struct platform_device *pdev) const struct of_device_id *match; if (of_property_read_bool(node, "spi-slave")) - master = spi_alloc_slave(&pdev->dev, sizeof(*mcspi)); + ctlr = spi_alloc_target(&pdev->dev, sizeof(*mcspi)); else - master = spi_alloc_master(&pdev->dev, sizeof(*mcspi)); - if (!master) + ctlr = spi_alloc_host(&pdev->dev, sizeof(*mcspi)); + if (!ctlr) return -ENOMEM; /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); - master->setup = omap2_mcspi_setup; - master->auto_runtime_pm = true; - master->prepare_message = omap2_mcspi_prepare_message; - master->can_dma = omap2_mcspi_can_dma; - master->transfer_one = omap2_mcspi_transfer_one; - master->set_cs = omap2_mcspi_set_cs; - master->cleanup = omap2_mcspi_cleanup; - master->slave_abort = omap2_mcspi_slave_abort; - master->dev.of_node = node; - master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; - master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15; - master->use_gpio_descriptors = true; - - platform_set_drvdata(pdev, master); - - mcspi = spi_master_get_devdata(master); - mcspi->master = master; + ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); + ctlr->setup = omap2_mcspi_setup; + ctlr->auto_runtime_pm = true; + ctlr->prepare_message = omap2_mcspi_prepare_message; + ctlr->can_dma = omap2_mcspi_can_dma; + ctlr->transfer_one = omap2_mcspi_transfer_one; + ctlr->set_cs = omap2_mcspi_set_cs; + ctlr->cleanup = omap2_mcspi_cleanup; + ctlr->target_abort = omap2_mcspi_target_abort; + ctlr->dev.of_node = node; + ctlr->use_gpio_descriptors = true; + + platform_set_drvdata(pdev, ctlr); + + mcspi = spi_controller_get_devdata(ctlr); + mcspi->ctlr = ctlr; match = of_match_device(omap_mcspi_of_match, &pdev->dev); if (match) { @@ -1463,24 +1464,24 @@ static int omap2_mcspi_probe(struct platform_device *pdev) pdata = match->data; of_property_read_u32(node, "ti,spi-num-cs", &num_cs); - master->num_chipselect = num_cs; + ctlr->num_chipselect = num_cs; if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in")) mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN; } else { pdata = dev_get_platdata(&pdev->dev); - master->num_chipselect = pdata->num_cs; + ctlr->num_chipselect = pdata->num_cs; mcspi->pin_dir = pdata->pin_dir; } regs_offset = pdata->regs_offset; if (pdata->max_xfer_len) { mcspi->max_xfer_len = pdata->max_xfer_len; - master->max_transfer_size = omap2_mcspi_max_xfer_size; + ctlr->max_transfer_size = omap2_mcspi_max_xfer_size; } mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(mcspi->base)) { status = PTR_ERR(mcspi->base); - goto free_master; + goto free_ctlr; } mcspi->phys = r->start + regs_offset; mcspi->base += regs_offset; @@ -1489,36 +1490,44 @@ static int omap2_mcspi_probe(struct platform_device *pdev) INIT_LIST_HEAD(&mcspi->ctx.cs); - mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect, + mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect, sizeof(struct omap2_mcspi_dma), GFP_KERNEL); if (mcspi->dma_channels == NULL) { status = -ENOMEM; - goto free_master; + goto free_ctlr; } - for (i = 0; i < master->num_chipselect; i++) { + for (i = 0; i < ctlr->num_chipselect; i++) { sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i); sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i); status = omap2_mcspi_request_dma(mcspi, &mcspi->dma_channels[i]); if (status == -EPROBE_DEFER) - goto free_master; + goto free_ctlr; } status = platform_get_irq(pdev, 0); if (status < 0) - goto free_master; + goto free_ctlr; init_completion(&mcspi->txdone); status = devm_request_irq(&pdev->dev, status, omap2_mcspi_irq_handler, 0, pdev->name, mcspi); if (status) { dev_err(&pdev->dev, "Cannot request IRQ"); - goto free_master; + goto free_ctlr; } + mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); + if (mcspi->ref_clk) + mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk); + else + mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ; + ctlr->max_speed_hz = mcspi->ref_clk_hz; + ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15; + pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); pm_runtime_enable(&pdev->dev); @@ -1527,7 +1536,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev) if (status < 0) goto disable_pm; - status = devm_spi_register_controller(&pdev->dev, master); + status = devm_spi_register_controller(&pdev->dev, ctlr); if (status < 0) goto disable_pm; @@ -1537,18 +1546,18 @@ disable_pm: pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); -free_master: - omap2_mcspi_release_dma(master); - spi_master_put(master); +free_ctlr: + omap2_mcspi_release_dma(ctlr); + spi_controller_put(ctlr); return status; } static void omap2_mcspi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct spi_controller *ctlr = platform_get_drvdata(pdev); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); - omap2_mcspi_release_dma(master); + omap2_mcspi_release_dma(ctlr); pm_runtime_dont_use_autosuspend(mcspi->dev); pm_runtime_put_sync(mcspi->dev); @@ -1560,8 +1569,8 @@ MODULE_ALIAS("platform:omap2_mcspi"); static int __maybe_unused omap2_mcspi_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); int error; error = pinctrl_pm_select_sleep_state(dev); @@ -1569,9 +1578,9 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev) dev_warn(mcspi->dev, "%s: failed to set pins: %i\n", __func__, error); - error = spi_master_suspend(master); + error = spi_controller_suspend(ctlr); if (error) - dev_warn(mcspi->dev, "%s: master suspend failed: %i\n", + dev_warn(mcspi->dev, "%s: controller suspend failed: %i\n", __func__, error); return pm_runtime_force_suspend(dev); @@ -1579,13 +1588,13 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev) static int __maybe_unused omap2_mcspi_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); - struct omap2_mcspi *mcspi = spi_master_get_devdata(master); + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); int error; - error = spi_master_resume(master); + error = spi_controller_resume(ctlr); if (error) - dev_warn(mcspi->dev, "%s: master resume failed: %i\n", + dev_warn(mcspi->dev, "%s: controller resume failed: %i\n", __func__, error); return pm_runtime_force_resume(dev); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 4d6db6182c5ed..f5cd365c913a8 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1086,6 +1086,8 @@ static int tegra_slink_probe(struct platform_device *pdev) reset_control_deassert(tspi->rst); spi_irq = platform_get_irq(pdev, 0); + if (spi_irq < 0) + return spi_irq; tspi->irq = spi_irq; ret = request_threaded_irq(tspi->irq, tegra_slink_isr, tegra_slink_isr_thread, IRQF_ONESHOT, diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c index b696bf884cbd6..32af0e96e762b 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c @@ -172,12 +172,12 @@ int cedrus_hw_suspend(struct device *device) { struct cedrus_dev *dev = dev_get_drvdata(device); - reset_control_assert(dev->rstc); - clk_disable_unprepare(dev->ram_clk); clk_disable_unprepare(dev->mod_clk); clk_disable_unprepare(dev->ahb_clk); + reset_control_assert(dev->rstc); + return 0; } @@ -186,11 +186,18 @@ int cedrus_hw_resume(struct device *device) struct cedrus_dev *dev = dev_get_drvdata(device); int ret; + ret = reset_control_reset(dev->rstc); + if (ret) { + dev_err(dev->dev, "Failed to apply reset\n"); + + return ret; + } + ret = clk_prepare_enable(dev->ahb_clk); if (ret) { dev_err(dev->dev, "Failed to enable AHB clock\n"); - return ret; + goto err_rst; } ret = clk_prepare_enable(dev->mod_clk); @@ -207,21 +214,14 @@ int cedrus_hw_resume(struct device *device) goto err_mod_clk; } - ret = reset_control_reset(dev->rstc); - if (ret) { - dev_err(dev->dev, "Failed to apply reset\n"); - - goto err_ram_clk; - } - return 0; -err_ram_clk: - clk_disable_unprepare(dev->ram_clk); err_mod_clk: clk_disable_unprepare(dev->mod_clk); err_ahb_clk: clk_disable_unprepare(dev->ahb_clk); +err_rst: + reset_control_assert(dev->rstc); return ret; } diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c index 843214d30bd8b..8b0edb2048443 100644 --- a/drivers/thermal/mediatek/auxadc_thermal.c +++ b/drivers/thermal/mediatek/auxadc_thermal.c @@ -1267,7 +1267,7 @@ static int mtk_thermal_probe(struct platform_device *pdev) mtk_thermal_turn_on_buffer(mt, apmixed_base); - if (mt->conf->version != MTK_THERMAL_V2) + if (mt->conf->version != MTK_THERMAL_V1) mtk_thermal_release_periodic_ts(mt, auxadc_base); if (mt->conf->version == MTK_THERMAL_V1) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 58533ea75cd92..e6f3166a9208f 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -689,7 +689,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (result) goto release_ida; - sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); + snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point", + dev->id); sysfs_attr_init(&dev->attr.attr); dev->attr.attr.name = dev->attr_name; dev->attr.attr.mode = 0444; @@ -698,7 +699,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (result) goto remove_symbol_link; - sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id); + snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name), + "cdev%d_weight", dev->id); sysfs_attr_init(&dev->weight_attr.attr); dev->weight_attr.attr.name = dev->weight_attr_name; dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO; diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c index 024e2e365a26b..597ac4144e331 100644 --- a/drivers/thermal/thermal_trip.c +++ b/drivers/thermal/thermal_trip.c @@ -55,6 +55,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz) { struct thermal_trip trip; int low = -INT_MAX, high = INT_MAX; + bool same_trip = false; int i, ret; lockdep_assert_held(&tz->lock); @@ -63,6 +64,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz) return; for (i = 0; i < tz->num_trips; i++) { + bool low_set = false; int trip_low; ret = __thermal_zone_get_trip(tz, i , &trip); @@ -71,18 +73,31 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz) trip_low = trip.temperature - trip.hysteresis; - if (trip_low < tz->temperature && trip_low > low) + if (trip_low < tz->temperature && trip_low > low) { low = trip_low; + low_set = true; + same_trip = false; + } if (trip.temperature > tz->temperature && - trip.temperature < high) + trip.temperature < high) { high = trip.temperature; + same_trip = low_set; + } } /* No need to change trip points */ if (tz->prev_low_trip == low && tz->prev_high_trip == high) return; + /* + * If "high" and "low" are the same, skip the change unless this is the + * first time. + */ + if (same_trip && (tz->prev_low_trip != -INT_MAX || + tz->prev_high_trip != INT_MAX)) + return; + tz->prev_low_trip = low; tz->prev_high_trip = high; diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c index 0d04287da0984..ef8741c3e6629 100644 --- a/drivers/tty/tty_jobctrl.c +++ b/drivers/tty/tty_jobctrl.c @@ -300,12 +300,7 @@ void disassociate_ctty(int on_exit) return; } - spin_lock_irq(¤t->sighand->siglock); - put_pid(current->signal->tty_old_pgrp); - current->signal->tty_old_pgrp = NULL; - tty = tty_kref_get(current->signal->tty); - spin_unlock_irq(¤t->sighand->siglock); - + tty = get_current_tty(); if (tty) { unsigned long flags; @@ -320,6 +315,16 @@ void disassociate_ctty(int on_exit) tty_kref_put(tty); } + /* If tty->ctrl.pgrp is not NULL, it may be assigned to + * current->signal->tty_old_pgrp in a race condition, and + * cause pid memleak. Release current->signal->tty_old_pgrp + * after tty->ctrl.pgrp set to NULL. + */ + spin_lock_irq(¤t->sighand->siglock); + put_pid(current->signal->tty_old_pgrp); + current->signal->tty_old_pgrp = NULL; + spin_unlock_irq(¤t->sighand->siglock); + /* Now clear signal->tty under the lock */ read_lock(&tasklist_lock); session_clear_tty(task_session(current)); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 8382e8cfa414a..5767642982c13 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -3632,7 +3632,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, */ ret = utf16s_to_utf8s(uc_str->uc, uc_str->len - QUERY_DESC_HDR_SIZE, - UTF16_BIG_ENDIAN, str, ascii_len); + UTF16_BIG_ENDIAN, str, ascii_len - 1); /* replace non-printable or non-ASCII characters with spaces */ for (i = 0; i < ret; i++) diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 08af26b762a2d..0cce192083701 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -30,8 +30,7 @@ struct ehci_ci_priv { }; struct ci_hdrc_dma_aligned_buffer { - void *kmalloc_ptr; - void *old_xfer_buffer; + void *original_buffer; u8 data[]; }; @@ -380,59 +379,52 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd) return 0; } -static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb) +static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb, bool copy_back) { struct ci_hdrc_dma_aligned_buffer *temp; - size_t length; if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) return; + urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; temp = container_of(urb->transfer_buffer, struct ci_hdrc_dma_aligned_buffer, data); + urb->transfer_buffer = temp->original_buffer; + + if (copy_back && usb_urb_dir_in(urb)) { + size_t length; - if (usb_urb_dir_in(urb)) { if (usb_pipeisoc(urb->pipe)) length = urb->transfer_buffer_length; else length = urb->actual_length; - memcpy(temp->old_xfer_buffer, temp->data, length); + memcpy(temp->original_buffer, temp->data, length); } - urb->transfer_buffer = temp->old_xfer_buffer; - kfree(temp->kmalloc_ptr); - urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; + kfree(temp); } static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) { - struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr; - const unsigned int ci_hdrc_usb_dma_align = 32; - size_t kmalloc_size; + struct ci_hdrc_dma_aligned_buffer *temp; - if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 || - !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1))) + if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0) + return 0; + if (IS_ALIGNED((uintptr_t)urb->transfer_buffer, 4) + && IS_ALIGNED(urb->transfer_buffer_length, 4)) return 0; - /* Allocate a buffer with enough padding for alignment */ - kmalloc_size = urb->transfer_buffer_length + - sizeof(struct ci_hdrc_dma_aligned_buffer) + - ci_hdrc_usb_dma_align - 1; - - kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); - if (!kmalloc_ptr) + temp = kmalloc(sizeof(*temp) + ALIGN(urb->transfer_buffer_length, 4), mem_flags); + if (!temp) return -ENOMEM; - /* Position our struct dma_aligned_buffer such that data is aligned */ - temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1; - temp->kmalloc_ptr = kmalloc_ptr; - temp->old_xfer_buffer = urb->transfer_buffer; if (usb_urb_dir_out(urb)) memcpy(temp->data, urb->transfer_buffer, urb->transfer_buffer_length); - urb->transfer_buffer = temp->data; + temp->original_buffer = urb->transfer_buffer; + urb->transfer_buffer = temp->data; urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; return 0; @@ -449,7 +441,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); if (ret) - ci_hdrc_free_dma_aligned_buffer(urb); + ci_hdrc_free_dma_aligned_buffer(urb, false); return ret; } @@ -457,7 +449,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) { usb_hcd_unmap_urb_for_dma(hcd, urb); - ci_hdrc_free_dma_aligned_buffer(urb); + ci_hdrc_free_dma_aligned_buffer(urb, true); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 657f1f659ffaf..35c7a4df8e717 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -4769,8 +4769,8 @@ fail3: if (qh_allocated && qh->channel && qh->channel->qh == qh) qh->channel->qh = NULL; fail2: - spin_unlock_irqrestore(&hsotg->lock, flags); urb->hcpriv = NULL; + spin_unlock_irqrestore(&hsotg->lock, flags); kfree(qtd); fail1: if (qh_allocated) { diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index b9ae5c2a25275..bde43cef8846c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -535,6 +535,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) /* xHC spec requires PCI devices to support D3hot and D3cold */ if (xhci->hci_version >= 0x120) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; + else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110) + xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 28218c8f18376..b93161374293b 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -458,23 +458,38 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) int ret; if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) { - clk_prepare_enable(xhci->clk); - clk_prepare_enable(xhci->reg_clk); + ret = clk_prepare_enable(xhci->clk); + if (ret) + return ret; + + ret = clk_prepare_enable(xhci->reg_clk); + if (ret) { + clk_disable_unprepare(xhci->clk); + return ret; + } } ret = xhci_priv_resume_quirk(hcd); if (ret) - return ret; + goto disable_clks; ret = xhci_resume(xhci, PMSG_RESUME); if (ret) - return ret; + goto disable_clks; pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; + +disable_clks: + if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) { + clk_disable_unprepare(xhci->clk); + clk_disable_unprepare(xhci->reg_clk); + } + + return ret; } static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev) diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 9c6954aad6c88..ce625b1ce9a51 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -464,8 +464,13 @@ static void stub_disconnect(struct usb_device *udev) /* release port */ rc = usb_hub_release_port(udev->parent, udev->portnum, (struct usb_dev_state *) udev); - if (rc) { - dev_dbg(&udev->dev, "unable to release port\n"); + /* + * NOTE: If a HUB disconnect triggered disconnect of the down stream + * device usb_hub_release_port will return -ENODEV so we can safely ignore + * that error here. + */ + if (rc && (rc != -ENODEV)) { + dev_dbg(&udev->dev, "unable to release port (%i)\n", rc); return; } diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index a51fbab963680..289bd9ce4d36d 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -626,9 +626,14 @@ static void pwm_backlight_remove(struct platform_device *pdev) { struct backlight_device *bl = platform_get_drvdata(pdev); struct pwm_bl_data *pb = bl_get_data(bl); + struct pwm_state state; backlight_device_unregister(bl); pwm_backlight_power_off(pb); + pwm_get_state(pb->pwm, &state); + state.duty_cycle = 0; + state.enabled = false; + pwm_apply_state(pb->pwm, &state); if (pb->exit) pb->exit(&pdev->dev); @@ -638,8 +643,13 @@ static void pwm_backlight_shutdown(struct platform_device *pdev) { struct backlight_device *bl = platform_get_drvdata(pdev); struct pwm_bl_data *pb = bl_get_data(bl); + struct pwm_state state; pwm_backlight_power_off(pb); + pwm_get_state(pb->pwm, &state); + state.duty_cycle = 0; + state.enabled = false; + pwm_apply_state(pb->pwm, &state); } #ifdef CONFIG_PM_SLEEP @@ -647,12 +657,24 @@ static int pwm_backlight_suspend(struct device *dev) { struct backlight_device *bl = dev_get_drvdata(dev); struct pwm_bl_data *pb = bl_get_data(bl); + struct pwm_state state; if (pb->notify) pb->notify(pb->dev, 0); pwm_backlight_power_off(pb); + /* + * Note that disabling the PWM doesn't guarantee that the output stays + * at its inactive state. However without the PWM disabled, the PWM + * driver refuses to suspend. So disable here even though this might + * enable the backlight on poorly designed boards. + */ + pwm_get_state(pb->pwm, &state); + state.duty_cycle = 0; + state.enabled = false; + pwm_apply_state(pb->pwm, &state); + if (pb->notify_after) pb->notify_after(pb->dev, 0); diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 7fbd9f069ac2e..0bced82fa4940 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s) * Workaround for failed writing desc register of planes. * Needed with MPC5121 DIU rev 2.0 silicon. */ -void wr_reg_wa(u32 *reg, u32 val) +static void wr_reg_wa(u32 *reg, u32 val) { do { out_be32(reg, val); diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index f4c8677488fb8..f5eaa58a808fb 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1419,7 +1419,6 @@ static int init_imstt(struct fb_info *info) if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) { printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); - framebuffer_release(info); return -ENODEV; } @@ -1451,14 +1450,11 @@ static int init_imstt(struct fb_info *info) FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_YPAN; - if (fb_alloc_cmap(&info->cmap, 0, 0)) { - framebuffer_release(info); + if (fb_alloc_cmap(&info->cmap, 0, 0)) return -ENODEV; - } if (register_framebuffer(info) < 0) { fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); return -ENODEV; } @@ -1498,8 +1494,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!request_mem_region(addr, size, "imsttfb")) { printk(KERN_ERR "imsttfb: Can't reserve memory region\n"); - framebuffer_release(info); - return -ENODEV; + ret = -ENODEV; + goto release_info; } switch (pdev->device) { @@ -1516,36 +1512,39 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) printk(KERN_INFO "imsttfb: Device 0x%x unknown, " "contact maintainer.\n", pdev->device); ret = -ENODEV; - goto error; + goto release_mem_region; } info->fix.smem_start = addr; info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? 0x400000 : 0x800000); if (!info->screen_base) - goto error; + goto release_mem_region; info->fix.mmio_start = addr + 0x800000; par->dc_regs = ioremap(addr + 0x800000, 0x1000); if (!par->dc_regs) - goto error; + goto unmap_screen_base; par->cmap_regs_phys = addr + 0x840000; par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000); if (!par->cmap_regs) - goto error; + goto unmap_dc_regs; info->pseudo_palette = par->palette; ret = init_imstt(info); if (ret) - goto error; + goto unmap_cmap_regs; pci_set_drvdata(pdev, info); - return ret; + return 0; -error: - if (par->dc_regs) - iounmap(par->dc_regs); - if (info->screen_base) - iounmap(info->screen_base); +unmap_cmap_regs: + iounmap(par->cmap_regs); +unmap_dc_regs: + iounmap(par->dc_regs); +unmap_screen_base: + iounmap(info->screen_base); +release_mem_region: release_mem_region(addr, size); +release_info: framebuffer_release(info); return ret; } diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index 97dbe715e96ad..5bee58ef5f1e3 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -57,6 +57,11 @@ struct snp_guest_dev { struct snp_secrets_page_layout *layout; struct snp_req_data input; + union { + struct snp_report_req report; + struct snp_derived_key_req derived_key; + struct snp_ext_report_req ext_report; + } req; u32 *os_area_msg_seqno; u8 *vmpck; }; @@ -473,8 +478,8 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { struct snp_guest_crypto *crypto = snp_dev->crypto; + struct snp_report_req *req = &snp_dev->req.report; struct snp_report_resp *resp; - struct snp_report_req req; int rc, resp_len; lockdep_assert_held(&snp_cmd_mutex); @@ -482,7 +487,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io if (!arg->req_data || !arg->resp_data) return -EINVAL; - if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) + if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req))) return -EFAULT; /* @@ -496,7 +501,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io return -ENOMEM; rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, - SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data, + SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data, resp_len); if (rc) goto e_free; @@ -511,9 +516,9 @@ e_free: static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { + struct snp_derived_key_req *req = &snp_dev->req.derived_key; struct snp_guest_crypto *crypto = snp_dev->crypto; struct snp_derived_key_resp resp = {0}; - struct snp_derived_key_req req; int rc, resp_len; /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ u8 buf[64 + 16]; @@ -532,11 +537,11 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque if (sizeof(buf) < resp_len) return -ENOMEM; - if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) + if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req))) return -EFAULT; rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, - SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len); + SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len); if (rc) return rc; @@ -552,8 +557,8 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { + struct snp_ext_report_req *req = &snp_dev->req.ext_report; struct snp_guest_crypto *crypto = snp_dev->crypto; - struct snp_ext_report_req req; struct snp_report_resp *resp; int ret, npages = 0, resp_len; @@ -562,18 +567,18 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques if (!arg->req_data || !arg->resp_data) return -EINVAL; - if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req))) + if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req))) return -EFAULT; /* userspace does not want certificate data */ - if (!req.certs_len || !req.certs_address) + if (!req->certs_len || !req->certs_address) goto cmd; - if (req.certs_len > SEV_FW_BLOB_MAX_SIZE || - !IS_ALIGNED(req.certs_len, PAGE_SIZE)) + if (req->certs_len > SEV_FW_BLOB_MAX_SIZE || + !IS_ALIGNED(req->certs_len, PAGE_SIZE)) return -EINVAL; - if (!access_ok((const void __user *)req.certs_address, req.certs_len)) + if (!access_ok((const void __user *)req->certs_address, req->certs_len)) return -EFAULT; /* @@ -582,8 +587,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques * the host. If host does not supply any certs in it, then copy * zeros to indicate that certificate data was not provided. */ - memset(snp_dev->certs_data, 0, req.certs_len); - npages = req.certs_len >> PAGE_SHIFT; + memset(snp_dev->certs_data, 0, req->certs_len); + npages = req->certs_len >> PAGE_SHIFT; cmd: /* * The intermediate response buffer is used while decrypting the @@ -597,14 +602,14 @@ cmd: snp_dev->input.data_npages = npages; ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg, - SNP_MSG_REPORT_REQ, &req.data, - sizeof(req.data), resp->data, resp_len); + SNP_MSG_REPORT_REQ, &req->data, + sizeof(req->data), resp->data, resp_len); /* If certs length is invalid then copy the returned length */ if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) { - req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT; + req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT; - if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req))) + if (copy_to_user((void __user *)arg->req_data, req, sizeof(*req))) ret = -EFAULT; } @@ -612,8 +617,8 @@ cmd: goto e_free; if (npages && - copy_to_user((void __user *)req.certs_address, snp_dev->certs_data, - req.certs_len)) { + copy_to_user((void __user *)req->certs_address, snp_dev->certs_data, + req->certs_len)) { ret = -EFAULT; goto e_free; } diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c index 607ce4b8df574..ec0c08652ec2f 100644 --- a/drivers/watchdog/ixp4xx_wdt.c +++ b/drivers/watchdog/ixp4xx_wdt.c @@ -105,6 +105,25 @@ static const struct watchdog_ops ixp4xx_wdt_ops = { .owner = THIS_MODULE, }; +/* + * The A0 version of the IXP422 had a bug in the watchdog making + * is useless, but we still need to use it to restart the system + * as it is the only way, so in this special case we register a + * "dummy" watchdog that doesn't really work, but will support + * the restart operation. + */ +static int ixp4xx_wdt_dummy(struct watchdog_device *wdd) +{ + return 0; +} + +static const struct watchdog_ops ixp4xx_wdt_restart_only_ops = { + .start = ixp4xx_wdt_dummy, + .stop = ixp4xx_wdt_dummy, + .restart = ixp4xx_wdt_restart, + .owner = THIS_MODULE, +}; + static const struct watchdog_info ixp4xx_wdt_info = { .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE @@ -114,14 +133,17 @@ static const struct watchdog_info ixp4xx_wdt_info = { static int ixp4xx_wdt_probe(struct platform_device *pdev) { + static const struct watchdog_ops *iwdt_ops; struct device *dev = &pdev->dev; struct ixp4xx_wdt *iwdt; struct clk *clk; int ret; if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) { - dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n"); - return -ENODEV; + dev_info(dev, "Rev. A0 IXP42x CPU detected - only restart supported\n"); + iwdt_ops = &ixp4xx_wdt_restart_only_ops; + } else { + iwdt_ops = &ixp4xx_wdt_ops; } iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL); @@ -141,7 +163,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev) iwdt->rate = IXP4XX_TIMER_FREQ; iwdt->wdd.info = &ixp4xx_wdt_info; - iwdt->wdd.ops = &ixp4xx_wdt_ops; + iwdt->wdd.ops = iwdt_ops; iwdt->wdd.min_timeout = 1; iwdt->wdd.max_timeout = U32_MAX / iwdt->rate; iwdt->wdd.parent = dev; diff --git a/drivers/watchdog/marvell_gti_wdt.c b/drivers/watchdog/marvell_gti_wdt.c index d7eb8286e11ec..1ec1e014ba831 100644 --- a/drivers/watchdog/marvell_gti_wdt.c +++ b/drivers/watchdog/marvell_gti_wdt.c @@ -271,7 +271,7 @@ static int gti_wdt_probe(struct platform_device *pdev) &wdt_idx); if (!err) { if (wdt_idx >= priv->data->gti_num_timers) - return dev_err_probe(&pdev->dev, err, + return dev_err_probe(&pdev->dev, -EINVAL, "GTI wdog timer index not valid"); priv->wdt_timer_idx = wdt_idx; diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index f00ad5f5f1d4a..da88173bac432 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -935,7 +935,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) return -ENOMEM; dm_op = kirqfd + 1; - if (copy_from_user(dm_op, irqfd->dm_op, irqfd->size)) { + if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) { ret = -EFAULT; goto error_kfree; } diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 059de92aea7d0..d47eee6c51435 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c @@ -288,12 +288,6 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev) u16 val; int ret = 0; - err = pci_read_config_word(dev, PCI_COMMAND, &val); - if (err) - return err; - if (!(val & PCI_COMMAND_INTX_DISABLE)) - ret |= INTERRUPT_TYPE_INTX; - /* * Do not trust dev->msi(x)_enabled here, as enabling could be done * bypassing the pci_*msi* functions, by the qemu. @@ -316,6 +310,19 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev) if (val & PCI_MSIX_FLAGS_ENABLE) ret |= INTERRUPT_TYPE_MSIX; } + + /* + * PCIe spec says device cannot use INTx if MSI/MSI-X is enabled, + * so check for INTx only when both are disabled. + */ + if (!ret) { + err = pci_read_config_word(dev, PCI_COMMAND, &val); + if (err) + return err; + if (!(val & PCI_COMMAND_INTX_DISABLE)) + ret |= INTERRUPT_TYPE_INTX; + } + return ret ?: INTERRUPT_TYPE_NONE; } diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index 097316a741268..1948a9700c8fa 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c @@ -236,10 +236,16 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value, return PCIBIOS_SET_FAILED; if (new_value & field_config->enable_bit) { - /* don't allow enabling together with other interrupt types */ + /* + * Don't allow enabling together with other interrupt type, but do + * allow enabling MSI(-X) while INTx is still active to please Linuxes + * MSI(-X) startup sequence. It is safe to do, as according to PCI + * spec, device with enabled MSI(-X) shouldn't use INTx. + */ int int_type = xen_pcibk_get_interrupt_type(dev); if (int_type == INTERRUPT_TYPE_NONE || + int_type == INTERRUPT_TYPE_INTX || int_type == field_config->int_type) goto write; return PCIBIOS_SET_FAILED; diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index 981435103af1a..fc03326459664 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -104,24 +104,9 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) pci_clear_mwi(dev); } - if (dev_data && dev_data->allow_interrupt_control) { - if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) { - if (value & PCI_COMMAND_INTX_DISABLE) { - pci_intx(dev, 0); - } else { - /* Do not allow enabling INTx together with MSI or MSI-X. */ - switch (xen_pcibk_get_interrupt_type(dev)) { - case INTERRUPT_TYPE_NONE: - pci_intx(dev, 1); - break; - case INTERRUPT_TYPE_INTX: - break; - default: - return PCIBIOS_SET_FAILED; - } - } - } - } + if (dev_data && dev_data->allow_interrupt_control && + ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE)) + pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE)); cmd->val = value; diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 639bf628389ba..3205e5d724c8c 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -1025,7 +1025,7 @@ static int __init xenbus_init(void) if (err < 0) { pr_err("xenstore_late_init couldn't bind irq err=%d\n", err); - return err; + goto out_error; } xs_init_irq = err; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 8e7d03bc1b565..200dd780bc06f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1528,7 +1528,7 @@ static noinline int key_in_sk(struct btrfs_key *key, static noinline int copy_to_sk(struct btrfs_path *path, struct btrfs_key *key, struct btrfs_ioctl_search_key *sk, - size_t *buf_size, + u64 *buf_size, char __user *ubuf, unsigned long *sk_offset, int *num_found) @@ -1660,7 +1660,7 @@ out: static noinline int search_ioctl(struct inode *inode, struct btrfs_ioctl_search_key *sk, - size_t *buf_size, + u64 *buf_size, char __user *ubuf) { struct btrfs_fs_info *info = btrfs_sb(inode->i_sb); @@ -1733,7 +1733,7 @@ static noinline int btrfs_ioctl_tree_search(struct inode *inode, struct btrfs_ioctl_search_args __user *uargs = argp; struct btrfs_ioctl_search_key sk; int ret; - size_t buf_size; + u64 buf_size; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1763,8 +1763,8 @@ static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode, struct btrfs_ioctl_search_args_v2 __user *uarg = argp; struct btrfs_ioctl_search_args_v2 args; int ret; - size_t buf_size; - const size_t buf_limit = SZ_16M; + u64 buf_size; + const u64 buf_limit = SZ_16M; if (!capable(CAP_SYS_ADMIN)) return -EPERM; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index b877203f1dc5a..4445a52a07076 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1798,6 +1798,9 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group * */ ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES); + /* @found_logical_ret must be specified. */ + ASSERT(found_logical_ret); + stripe = &sctx->stripes[sctx->cur_stripe]; scrub_reset_stripe(stripe); ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path, @@ -1806,8 +1809,7 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group * /* Either >0 as no more extents or <0 for error. */ if (ret) return ret; - if (found_logical_ret) - *found_logical_ret = stripe->logical; + *found_logical_ret = stripe->logical; sctx->cur_stripe++; /* We filled one group, submit it. */ @@ -2010,7 +2012,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx, /* Go through each extent items inside the logical range */ while (cur_logical < logical_end) { - u64 found_logical; + u64 found_logical = U64_MAX; u64 cur_physical = physical + cur_logical - logical_start; /* Canceled? */ @@ -2045,6 +2047,8 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx, if (ret < 0) break; + /* queue_scrub_stripe() returned 0, @found_logical must be updated. */ + ASSERT(found_logical != U64_MAX); cur_logical = found_logical + BTRFS_STRIPE_LEN; /* Don't hold CPU for too long time */ diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 87b3753aa4b1e..c45e8c2d62e11 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -939,7 +939,7 @@ static ssize_t debugfs_write_file_str(struct file *file, const char __user *user new[pos + count] = '\0'; strim(new); - rcu_assign_pointer(*(char **)file->private_data, new); + rcu_assign_pointer(*(char __rcu **)file->private_data, new); synchronize_rcu(); kfree(old); diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 5aabcb6f0f157..c93359ceaae61 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -973,7 +973,8 @@ void dlm_delete_debug_comms_file(void *ctx) void dlm_create_debug_file(struct dlm_ls *ls) { - char name[DLM_LOCKSPACE_LEN + 8]; + /* Reserve enough space for the longest file name */ + char name[DLM_LOCKSPACE_LEN + sizeof("_queued_asts")]; /* format 1 */ @@ -986,7 +987,7 @@ void dlm_create_debug_file(struct dlm_ls *ls) /* format 2 */ memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_locks", ls->ls_name); + snprintf(name, sizeof(name), "%s_locks", ls->ls_name); ls->ls_debug_locks_dentry = debugfs_create_file(name, 0644, @@ -997,7 +998,7 @@ void dlm_create_debug_file(struct dlm_ls *ls) /* format 3 */ memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_all", ls->ls_name); + snprintf(name, sizeof(name), "%s_all", ls->ls_name); ls->ls_debug_all_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, @@ -1008,7 +1009,7 @@ void dlm_create_debug_file(struct dlm_ls *ls) /* format 4 */ memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_toss", ls->ls_name); + snprintf(name, sizeof(name), "%s_toss", ls->ls_name); ls->ls_debug_toss_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, @@ -1017,7 +1018,7 @@ void dlm_create_debug_file(struct dlm_ls *ls) &format4_fops); memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_waiters", ls->ls_name); + snprintf(name, sizeof(name), "%s_waiters", ls->ls_name); ls->ls_debug_waiters_dentry = debugfs_create_file(name, 0644, @@ -1028,7 +1029,7 @@ void dlm_create_debug_file(struct dlm_ls *ls) /* format 5 */ memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_queued_asts", ls->ls_name); + snprintf(name, sizeof(name), "%s_queued_asts", ls->ls_name); ls->ls_debug_queued_asts_dentry = debugfs_create_file(name, 0644, diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c index f641b36a36db0..2247ebb61be1e 100644 --- a/fs/dlm/midcomms.c +++ b/fs/dlm/midcomms.c @@ -337,13 +337,21 @@ static struct midcomms_node *nodeid2node(int nodeid) int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) { - int ret, r = nodeid_hash(nodeid); + int ret, idx, r = nodeid_hash(nodeid); struct midcomms_node *node; ret = dlm_lowcomms_addr(nodeid, addr, len); if (ret) return ret; + idx = srcu_read_lock(&nodes_srcu); + node = __find_node(nodeid, r); + if (node) { + srcu_read_unlock(&nodes_srcu, idx); + return 0; + } + srcu_read_unlock(&nodes_srcu, idx); + node = kmalloc(sizeof(*node), GFP_NOFS); if (!node) return -ENOMEM; @@ -1030,15 +1038,15 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, break; case DLM_VERSION_3_2: + /* send ack back if necessary */ + dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD); + msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation, ppc); if (!msg) { dlm_free_mhandle(mh); goto err; } - - /* send ack back if necessary */ - dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD); break; default: dlm_free_mhandle(mh); @@ -1260,12 +1268,23 @@ void dlm_midcomms_remove_member(int nodeid) idx = srcu_read_lock(&nodes_srcu); node = nodeid2node(nodeid); - if (WARN_ON_ONCE(!node)) { + /* in case of dlm_midcomms_close() removes node */ + if (!node) { srcu_read_unlock(&nodes_srcu, idx); return; } spin_lock(&node->state_lock); + /* case of dlm_midcomms_addr() created node but + * was not added before because dlm_midcomms_close() + * removed the node + */ + if (!node->users) { + spin_unlock(&node->state_lock); + srcu_read_unlock(&nodes_srcu, idx); + return; + } + node->users--; pr_debug("node %d users dec count %d\n", nodeid, node->users); @@ -1386,10 +1405,16 @@ void dlm_midcomms_shutdown(void) midcomms_shutdown(node); } } - srcu_read_unlock(&nodes_srcu, idx); - mutex_unlock(&close_lock); dlm_lowcomms_shutdown(); + + for (i = 0; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(node, &node_hash[i], hlist) { + midcomms_node_reset(node); + } + } + srcu_read_unlock(&nodes_srcu, idx); + mutex_unlock(&close_lock); } int dlm_midcomms_close(int nodeid) diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index cc6fb9e988991..4256a85719a1d 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -77,12 +77,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb, struct erofs_sb_info *const sbi = EROFS_SB(sb); struct erofs_workgroup *pre; - /* - * Bump up before making this visible to others for the XArray in order - * to avoid potential UAF without serialized by xa_lock. - */ - lockref_get(&grp->lockref); - + DBG_BUGON(grp->lockref.count < 1); repeat: xa_lock(&sbi->managed_pslots); pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index, @@ -96,7 +91,6 @@ repeat: cond_resched(); goto repeat; } - lockref_put_return(&grp->lockref); grp = pre; } xa_unlock(&sbi->managed_pslots); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 036f610e044b6..a7e6847f6f8f1 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -796,6 +796,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) return PTR_ERR(pcl); spin_lock_init(&pcl->obj.lockref.lock); + pcl->obj.lockref.count = 1; /* one ref for this request */ pcl->algorithmformat = map->m_algorithmformat; pcl->length = 0; pcl->partial = true; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 202c76996b621..4d8496d1a8ac4 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1010,6 +1010,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, ix = curp->p_idx; } + if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { + EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); + return -EFSCORRUPTED; + } + len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; BUG_ON(len < 0); if (len > 0) { @@ -1019,11 +1024,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); } - if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { - EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); - return -EFSCORRUPTED; - } - ix->ei_block = cpu_to_le32(logical); ext4_idx_store_pblock(ix, ptr); le16_add_cpu(&curp->p_hdr->eh_entries, 1); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index dbebd8b3127e5..6f48dec19f4a2 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -768,7 +768,8 @@ static void update_super_work(struct work_struct *work) */ if (!sb_rdonly(sbi->s_sb) && journal) { struct buffer_head *sbh = sbi->s_sbh; - bool call_notify_err; + bool call_notify_err = false; + handle = jbd2_journal_start(journal, 1); if (IS_ERR(handle)) goto write_directly; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 916e317ac925f..1ac34eb49a0e8 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2344,8 +2344,10 @@ skip_reading_dnode: f2fs_wait_on_block_writeback(inode, blkaddr); if (f2fs_load_compressed_page(sbi, page, blkaddr)) { - if (atomic_dec_and_test(&dic->remaining_pages)) + if (atomic_dec_and_test(&dic->remaining_pages)) { f2fs_decompress_cluster(dic, true); + break; + } continue; } @@ -3023,7 +3025,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping, { int ret = 0; int done = 0, retry = 0; - struct page *pages[F2FS_ONSTACK_PAGES]; + struct page *pages_local[F2FS_ONSTACK_PAGES]; + struct page **pages = pages_local; struct folio_batch fbatch; struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); struct bio *bio = NULL; @@ -3047,6 +3050,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping, #endif int nr_folios, p, idx; int nr_pages; + unsigned int max_pages = F2FS_ONSTACK_PAGES; pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; @@ -3056,6 +3060,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping, int submitted = 0; int i; +#ifdef CONFIG_F2FS_FS_COMPRESSION + if (f2fs_compressed_file(inode) && + 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) { + pages = f2fs_kzalloc(sbi, sizeof(struct page *) << + cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL); + max_pages = 1 << cc.log_cluster_size; + } +#endif + folio_batch_init(&fbatch); if (get_dirty_pages(mapping->host) <= @@ -3101,7 +3114,7 @@ again: add_more: pages[nr_pages] = folio_page(folio, idx); folio_get(folio); - if (++nr_pages == F2FS_ONSTACK_PAGES) { + if (++nr_pages == max_pages) { index = folio->index + idx + 1; folio_batch_release(&fbatch); goto write; @@ -3283,6 +3296,11 @@ next: if (bio) f2fs_submit_merged_ipu_write(sbi, &bio, NULL); +#ifdef CONFIG_F2FS_FS_COMPRESSION + if (pages != pages_local) + kfree(pages); +#endif + return ret; } diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index ca5904129b162..d034703eb323a 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -3258,6 +3258,7 @@ int f2fs_precache_extents(struct inode *inode) return -EOPNOTSUPP; map.m_lblk = 0; + map.m_pblk = 0; map.m_next_pgofs = NULL; map.m_next_extent = &m_next_extent; map.m_seg_type = NO_CHECK_TYPE; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index a8c8232852bb1..bc303a0522155 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -547,6 +547,29 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb, } #ifdef CONFIG_F2FS_FS_COMPRESSION +static bool is_compress_extension_exist(struct f2fs_sb_info *sbi, + const char *new_ext, bool is_ext) +{ + unsigned char (*ext)[F2FS_EXTENSION_LEN]; + int ext_cnt; + int i; + + if (is_ext) { + ext = F2FS_OPTION(sbi).extensions; + ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; + } else { + ext = F2FS_OPTION(sbi).noextensions; + ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; + } + + for (i = 0; i < ext_cnt; i++) { + if (!strcasecmp(new_ext, ext[i])) + return true; + } + + return false; +} + /* * 1. The same extension name cannot not appear in both compress and non-compress extension * at the same time. @@ -1149,6 +1172,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) return -EINVAL; } + if (is_compress_extension_exist(sbi, name, true)) { + kfree(name); + break; + } + strcpy(ext[ext_cnt], name); F2FS_OPTION(sbi).compress_ext_cnt++; kfree(name); @@ -1173,6 +1201,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) return -EINVAL; } + if (is_compress_extension_exist(sbi, name, false)) { + kfree(name); + break; + } + strcpy(noext[noext_cnt], name); F2FS_OPTION(sbi).nocompress_ext_cnt++; kfree(name); @@ -1629,7 +1662,7 @@ static void f2fs_put_super(struct super_block *sb) f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); - if (err) { + if (err || f2fs_cp_error(sbi)) { truncate_inode_pages_final(NODE_MAPPING(sbi)); truncate_inode_pages_final(META_MAPPING(sbi)); } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index c1af01b2c42d7..1767493dffda7 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -613,6 +613,24 @@ out_free: kfree(isw); } +static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw, + struct list_head *list, int *nr) +{ + struct inode *inode; + + list_for_each_entry(inode, list, i_io_list) { + if (!inode_prepare_wbs_switch(inode, isw->new_wb)) + continue; + + isw->inodes[*nr] = inode; + (*nr)++; + + if (*nr >= WB_MAX_INODES_PER_ISW - 1) + return true; + } + return false; +} + /** * cleanup_offline_cgwb - detach associated inodes * @wb: target wb @@ -625,7 +643,6 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb) { struct cgroup_subsys_state *memcg_css; struct inode_switch_wbs_context *isw; - struct inode *inode; int nr; bool restart = false; @@ -647,17 +664,17 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb) nr = 0; spin_lock(&wb->list_lock); - list_for_each_entry(inode, &wb->b_attached, i_io_list) { - if (!inode_prepare_wbs_switch(inode, isw->new_wb)) - continue; - - isw->inodes[nr++] = inode; - - if (nr >= WB_MAX_INODES_PER_ISW - 1) { - restart = true; - break; - } - } + /* + * In addition to the inodes that have completed writeback, also switch + * cgwbs for those inodes only with dirty timestamps. Otherwise, those + * inodes won't be written back for a long time when lazytime is + * enabled, and thus pinning the dying cgwbs. It won't break the + * bandwidth restrictions, as writeback of inode metadata is not + * accounted for. + */ + restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr); + if (!restart) + restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr); spin_unlock(&wb->list_lock); /* no attached inodes? bail out */ diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index ee9c923192e08..07bf219f9ae48 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -989,22 +989,21 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, unsigned char need = may_flags & NFSD_FILE_MAY_MASK; struct net *net = SVC_NET(rqstp); struct nfsd_file *new, *nf; - const struct cred *cred; + bool stale_retry = true; bool open_retry = true; struct inode *inode; __be32 status; int ret; +retry: status = fh_verify(rqstp, fhp, S_IFREG, may_flags|NFSD_MAY_OWNER_OVERRIDE); if (status != nfs_ok) return status; inode = d_inode(fhp->fh_dentry); - cred = get_current_cred(); -retry: rcu_read_lock(); - nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc); + nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc); rcu_read_unlock(); if (nf) { @@ -1026,7 +1025,7 @@ retry: rcu_read_lock(); spin_lock(&inode->i_lock); - nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc); + nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc); if (unlikely(nf)) { spin_unlock(&inode->i_lock); rcu_read_unlock(); @@ -1058,6 +1057,7 @@ wait_for_construction: goto construction_err; } open_retry = false; + fh_put(fhp); goto retry; } this_cpu_inc(nfsd_file_cache_hits); @@ -1074,7 +1074,6 @@ out: nfsd_file_check_write_error(nf); *pnf = nf; } - put_cred(cred); trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status); return status; @@ -1088,8 +1087,20 @@ open_file: status = nfs_ok; trace_nfsd_file_opened(nf, status); } else { - status = nfsd_open_verified(rqstp, fhp, may_flags, - &nf->nf_file); + ret = nfsd_open_verified(rqstp, fhp, may_flags, + &nf->nf_file); + if (ret == -EOPENSTALE && stale_retry) { + stale_retry = false; + nfsd_file_unhash(nf); + clear_and_wake_up_bit(NFSD_FILE_PENDING, + &nf->nf_flags); + if (refcount_dec_and_test(&nf->nf_ref)) + nfsd_file_free(nf); + nf = NULL; + fh_put(fhp); + goto retry; + } + status = nfserrno(ret); trace_nfsd_file_open(nf, status); } } else diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 02f5fcaad03f3..b24462efa1781 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -823,7 +823,7 @@ int nfsd_open_break_lease(struct inode *inode, int access) * and additional flags. * N.B. After this call fhp needs an fh_put */ -static __be32 +static int __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp) { @@ -831,14 +831,12 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, struct inode *inode; struct file *file; int flags = O_RDONLY|O_LARGEFILE; - __be32 err; - int host_err = 0; + int host_err = -EPERM; path.mnt = fhp->fh_export->ex_path.mnt; path.dentry = fhp->fh_dentry; inode = d_inode(path.dentry); - err = nfserr_perm; if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE)) goto out; @@ -847,7 +845,7 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, host_err = nfsd_open_break_lease(inode, may_flags); if (host_err) /* NOMEM or WOULDBLOCK */ - goto out_nfserr; + goto out; if (may_flags & NFSD_MAY_WRITE) { if (may_flags & NFSD_MAY_READ) @@ -859,13 +857,13 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, file = dentry_open(&path, flags, current_cred()); if (IS_ERR(file)) { host_err = PTR_ERR(file); - goto out_nfserr; + goto out; } host_err = ima_file_check(file, may_flags); if (host_err) { fput(file); - goto out_nfserr; + goto out; } if (may_flags & NFSD_MAY_64BIT_COOKIE) @@ -874,10 +872,8 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, file->f_mode |= FMODE_32BITHASH; *filp = file; -out_nfserr: - err = nfserrno(host_err); out: - return err; + return host_err; } __be32 @@ -885,6 +881,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp) { __be32 err; + int host_err; bool retried = false; validate_process_creds(); @@ -904,12 +901,13 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, retry: err = fh_verify(rqstp, fhp, type, may_flags); if (!err) { - err = __nfsd_open(rqstp, fhp, type, may_flags, filp); - if (err == nfserr_stale && !retried) { + host_err = __nfsd_open(rqstp, fhp, type, may_flags, filp); + if (host_err == -EOPENSTALE && !retried) { retried = true; fh_put(fhp); goto retry; } + err = nfserrno(host_err); } validate_process_creds(); return err; @@ -922,13 +920,13 @@ retry: * @may_flags: internal permission flags * @filp: OUT: open "struct file *" * - * Returns an nfsstat value in network byte order. + * Returns zero on success, or a negative errno value. */ -__be32 +int nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags, struct file **filp) { - __be32 err; + int err; validate_process_creds(); err = __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp); diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index a6890ea7b765b..e3c29596f4df1 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -104,8 +104,8 @@ __be32 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, int nfsd_open_break_lease(struct inode *, int); __be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t, int, struct file **); -__be32 nfsd_open_verified(struct svc_rqst *, struct svc_fh *, - int, struct file **); +int nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, + int may_flags, struct file **filp); __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, unsigned long *count, diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index e5bca9a004ccc..03425928d2fb3 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -464,6 +464,8 @@ out: */ int pstore_register(struct pstore_info *psi) { + char *new_backend; + if (backend && strcmp(backend, psi->name)) { pr_warn("backend '%s' already in use: ignoring '%s'\n", backend, psi->name); @@ -484,11 +486,16 @@ int pstore_register(struct pstore_info *psi) return -EINVAL; } + new_backend = kstrdup(psi->name, GFP_KERNEL); + if (!new_backend) + return -ENOMEM; + mutex_lock(&psinfo_lock); if (psinfo) { pr_warn("backend '%s' already loaded: ignoring '%s'\n", psinfo->name, psi->name); mutex_unlock(&psinfo_lock); + kfree(new_backend); return -EBUSY; } @@ -521,7 +528,7 @@ int pstore_register(struct pstore_info *psi) * Update the module parameter backend, so it is visible * through /sys/module/pstore/parameters/backend */ - backend = kstrdup(psi->name, GFP_KERNEL); + backend = new_backend; pr_info("Registered %s as persistent store backend\n", psi->name); diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c index 5fcfb634fec26..efbdc47c74dcf 100644 --- a/fs/tracefs/event_inode.c +++ b/fs/tracefs/event_inode.c @@ -113,14 +113,14 @@ static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry, mutex_lock(&eventfs_mutex); ef = dentry->d_fsdata; - if (ef->is_freed) { + if (ef && ef->is_freed) { /* Do not allow changes if the event is about to be removed. */ mutex_unlock(&eventfs_mutex); return -ENODEV; } ret = simple_setattr(idmap, dentry, iattr); - if (!ret) + if (!ret && ef) update_attr(ef, iattr); mutex_unlock(&eventfs_mutex); return ret; diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h index 05100e91ecb96..6fc9bb2979e45 100644 --- a/include/drm/bridge/samsung-dsim.h +++ b/include/drm/bridge/samsung-dsim.h @@ -53,6 +53,7 @@ struct samsung_dsim_driver_data { unsigned int plltmr_reg; unsigned int has_freqband:1; unsigned int has_clklane_stop:1; + unsigned int has_broken_fifoctrl_emptyhdr:1; unsigned int num_clks; unsigned int min_freq; unsigned int max_freq; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 49f8b691496c4..76055186d6248 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1029,6 +1029,11 @@ struct btf_func_model { */ #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) +/* Indicate that current trampoline is in a tail call context. Then, it has to + * cache and restore tail_call_cnt to avoid infinite tail call loop. + */ +#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index ec32ec58c59f7..ace3a4ce2fc98 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -74,7 +74,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core, unsigned long parent_rate); /** - * struct clk_duty - Struture encoding the duty cycle ratio of a clock + * struct clk_duty - Structure encoding the duty cycle ratio of a clock * * @num: Numerator of the duty cycle ratio * @den: Denominator of the duty cycle ratio @@ -129,7 +129,7 @@ struct clk_duty { * @restore_context: Restore the context of the clock after a restoration * of power. * - * @recalc_rate Recalculate the rate of this clock, by querying hardware. The + * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The * parent rate is an input parameter. It is up to the caller to * ensure that the prepare_mutex is held across this call. If the * driver cannot figure out a rate for this clock, it must return @@ -456,7 +456,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name, * clock with the clock framework * @dev: device that is registering this clock * @name: name of this clock - * @parent_name: name of clock's parent + * @parent_data: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate * @fixed_accuracy: non-adjustable clock accuracy @@ -471,7 +471,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name, * the clock framework * @dev: device that is registering this clock * @name: name of this clock - * @parent_name: name of clock's parent + * @parent_data: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate */ @@ -649,7 +649,7 @@ struct clk_div_table { * Clock with an adjustable divider affecting its output frequency. Implements * .recalc_rate, .set_rate and .round_rate * - * Flags: + * @flags: * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is * the raw value read from the register, with the value of zero considered @@ -1130,11 +1130,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev, * @mwidth: width of the numerator bit field * @nshift: shift to the denominator bit field * @nwidth: width of the denominator bit field + * @approximation: clk driver's callback for calculating the divider clock * @lock: register lock * * Clock with adjustable fractional divider affecting its output frequency. * - * Flags: + * @flags: * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED * is set then the numerator and denominator are both the value read @@ -1191,7 +1192,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw); * Clock with an adjustable multiplier affecting its output frequency. * Implements .recalc_rate, .set_rate and .round_rate * - * Flags: + * @flags: * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read * from the register, with 0 being a valid value effectively * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 068f7738be22a..28c1d3d77b70f 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -189,6 +189,7 @@ enum cpuhp_state { /* Must be the last timer callback */ CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, + CPUHP_AP_ARM_XEN_RUNSTATE_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 39fbfb4be944b..9da4f3f1e6d61 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -144,6 +144,13 @@ enum qm_vf_state { QM_NOT_READY, }; +enum qm_misc_ctl_bits { + QM_DRIVER_REMOVING = 0x0, + QM_RST_SCHED, + QM_RESETTING, + QM_MODULE_PARAM, +}; + enum qm_cap_bits { QM_SUPPORT_DB_ISOLATION = 0x0, QM_SUPPORT_FUNC_QOS, diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 8a3115516a1ba..136e9842120e8 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs); +extern long hwrng_yield(struct hwrng *rng); #endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/idr.h b/include/linux/idr.h index a0dce14090a9e..da5f5fa4a3a6a 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -200,7 +200,7 @@ static inline void idr_preload_end(void) */ #define idr_for_each_entry_ul(idr, entry, tmp, id) \ for (tmp = 0, id = 0; \ - tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ + ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \ tmp = id, ++id) /** @@ -224,10 +224,12 @@ static inline void idr_preload_end(void) * @id: Entry ID. * * Continue to iterate over entries, continuing after the current position. + * After normal termination @entry is left with the value NULL. This + * is convenient for a "not found" value. */ #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \ for (tmp = id; \ - tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ + ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \ tmp = id, ++id) /* diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 47e7a3a61ce69..e8bcad641d8c2 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -92,7 +92,7 @@ struct mfd_cell { * (above) when matching OF nodes with devices that have identical * compatible strings */ - const u64 of_reg; + u64 of_reg; /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */ bool use_of_reg; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0896aaa91dd7b..b646609f09c05 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -5214,5 +5214,6 @@ extern struct net_device *blackhole_netdev; #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) #define DEV_STATS_ADD(DEV, FIELD, VAL) \ atomic_long_add((VAL), &(DEV)->stats.__##FIELD) +#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD) #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/numa.h b/include/linux/numa.h index 59df211d051fa..a904861de8000 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -12,6 +12,7 @@ #define MAX_NUMNODES (1 << NODES_SHIFT) #define NUMA_NO_NODE (-1) +#define NUMA_NO_MEMBLK (-1) /* optionally keep NUMA memory info available post init */ #ifdef CONFIG_NUMA_KEEP_MEMINFO @@ -25,7 +26,7 @@ #include /* Generic implementation available */ -int numa_map_to_online_node(int node); +int numa_nearest_node(int node, unsigned int state); #ifndef memory_add_physaddr_to_nid static inline int memory_add_physaddr_to_nid(u64 start) @@ -43,11 +44,18 @@ static inline int phys_to_target_node(u64 start) return 0; } #endif +#ifndef numa_fill_memblks +static inline int __init numa_fill_memblks(u64 start, u64 end) +{ + return NUMA_NO_MEMBLK; +} +#endif #else /* !CONFIG_NUMA */ -static inline int numa_map_to_online_node(int node) +static inline int numa_nearest_node(int node, unsigned int state) { return NUMA_NO_NODE; } + static inline int memory_add_physaddr_to_nid(u64 start) { return 0; @@ -58,6 +66,8 @@ static inline int phys_to_target_node(u64 start) } #endif +#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE) + #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP extern const struct attribute_group arch_node_dev_group; #endif diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 03f82c2c2ebf6..b5440e7da55bf 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -130,7 +130,8 @@ * it will be ignored. */ .macro VALIDATE_UNRET_BEGIN -#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY) +#if defined(CONFIG_NOINSTR_VALIDATION) && \ + (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) .Lhere_\@: .pushsection .discard.validate_unret .long .Lhere_\@ - . diff --git a/include/linux/pci.h b/include/linux/pci.h index 8c7c2c3c6c652..b56417276042d 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1624,6 +1624,8 @@ struct msix_entry { u16 entry; /* Driver uses to specify entry, OS writes */ }; +struct msi_domain_template; + #ifdef CONFIG_PCI_MSI int pci_msi_vec_count(struct pci_dev *dev); void pci_disable_msi(struct pci_dev *dev); @@ -1656,6 +1658,11 @@ void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map); void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); +bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template, + unsigned int hwsize, void *data); +struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie, + const struct irq_affinity_desc *affdesc); +void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map); #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1719,6 +1726,25 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, { return cpu_possible_mask; } + +static inline bool pci_create_ims_domain(struct pci_dev *pdev, + const struct msi_domain_template *template, + unsigned int hwsize, void *data) +{ return false; } + +static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, + union msi_instance_cookie *icookie, + const struct irq_affinity_desc *affdesc) +{ + struct msi_map map = { .index = -ENOSYS, }; + + return map; +} + +static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map) +{ +} + #endif /** @@ -2616,14 +2642,6 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); #endif -struct msi_domain_template; - -bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template, - unsigned int hwsize, void *data); -struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie, - const struct irq_affinity_desc *affdesc); -void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map); - #include #define pci_printk(level, pdev, fmt, arg...) \ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7b5406e3288d9..16913318af930 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -879,6 +879,7 @@ struct perf_event_pmu_context { unsigned int embedded : 1; unsigned int nr_events; + unsigned int nr_cgroups; atomic_t refcount; /* event <-> epc */ struct rcu_head rcu_head; diff --git a/include/linux/pm.h b/include/linux/pm.h index 1400c37b29c75..629c1633bbd00 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -374,24 +374,39 @@ const struct dev_pm_ops name = { \ RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \ } -#ifdef CONFIG_PM -#define _EXPORT_DEV_PM_OPS(name, license, ns) \ +#define _EXPORT_PM_OPS(name, license, ns) \ const struct dev_pm_ops name; \ __EXPORT_SYMBOL(name, license, ns); \ const struct dev_pm_ops name -#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name) -#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns) -#else -#define _EXPORT_DEV_PM_OPS(name, license, ns) \ + +#define _DISCARD_PM_OPS(name, license, ns) \ static __maybe_unused const struct dev_pm_ops __static_##name + +#ifdef CONFIG_PM +#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns) +#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name) +#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns) +#else +#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns) #define EXPORT_PM_FN_GPL(name) #define EXPORT_PM_FN_NS_GPL(name, ns) #endif -#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "") -#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "") -#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns) -#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns) +#ifdef CONFIG_PM_SLEEP +#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns) +#else +#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns) +#endif + +#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "") +#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "") +#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns) +#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns) + +#define EXPORT_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "", "") +#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "") +#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns) +#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns) /* * Use this if you want to use the same suspend and resume callbacks for suspend @@ -404,19 +419,19 @@ const struct dev_pm_ops name = { \ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL) #define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ - EXPORT_DEV_PM_OPS(name) = { \ + EXPORT_DEV_SLEEP_PM_OPS(name) = { \ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } #define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ - EXPORT_GPL_DEV_PM_OPS(name) = { \ + EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } #define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \ - EXPORT_NS_DEV_PM_OPS(name, ns) = { \ + EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } #define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \ - EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \ + EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } diff --git a/include/linux/string.h b/include/linux/string.h index dbfc66400050f..9e3cb6923b0ef 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -277,10 +277,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, */ #define strtomem_pad(dest, src, pad) do { \ const size_t _dest_len = __builtin_object_size(dest, 1); \ + const size_t _src_len = __builtin_object_size(src, 1); \ \ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ _dest_len == (size_t)-1); \ - memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \ + memcpy_and_pad(dest, _dest_len, src, \ + strnlen(src, min(_src_len, _dest_len)), pad); \ } while (0) /** @@ -298,10 +300,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, */ #define strtomem(dest, src) do { \ const size_t _dest_len = __builtin_object_size(dest, 1); \ + const size_t _src_len = __builtin_object_size(src, 1); \ \ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ _dest_len == (size_t)-1); \ - memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \ + memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \ } while (0) /** diff --git a/include/linux/topology.h b/include/linux/topology.h index fea32377f7c77..52f5850730b3e 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -251,7 +251,7 @@ extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int #else static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) { - return cpumask_nth(cpu, cpus); + return cpumask_nth_and(cpu, cpus, cpu_online_mask); } static inline const struct cpumask * diff --git a/include/linux/udp.h b/include/linux/udp.h index 43c1fb2d2c21a..d04188714dca1 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -32,25 +32,30 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) return (num + net_hash_mix(net)) & mask; } +enum { + UDP_FLAGS_CORK, /* Cork is required */ + UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */ + UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */ + UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */ + UDP_FLAGS_ACCEPT_FRAGLIST, + UDP_FLAGS_ACCEPT_L4, + UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */ + UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */ + UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */ +}; + struct udp_sock { /* inet_sock has to be the first member */ struct inet_sock inet; #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0] #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1] #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node + + unsigned long udp_flags; + int pending; /* Any pending frames ? */ - unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ - unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ - encap_enabled:1, /* This socket enabled encap - * processing; UDP tunnels and - * different encapsulation layer set - * this - */ - gro_enabled:1, /* Request GRO aggregation */ - accept_udp_l4:1, - accept_udp_fraglist:1; + /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -62,12 +67,6 @@ struct udp_sock { */ __u16 pcslen; __u16 pcrlen; -/* indicator bits used by pcflag: */ -#define UDPLITE_BIT 0x1 /* set by udplite proto init function */ -#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */ -#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */ - __u8 pcflag; /* marks socket as UDP-Lite if > 0 */ - __u8 unused[3]; /* * For encapsulation sockets. */ @@ -95,28 +94,39 @@ struct udp_sock { int forward_threshold; }; +#define udp_test_bit(nr, sk) \ + test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) +#define udp_set_bit(nr, sk) \ + set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) +#define udp_test_and_set_bit(nr, sk) \ + test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) +#define udp_clear_bit(nr, sk) \ + clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags) +#define udp_assign_bit(nr, sk, val) \ + assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val) + #define UDP_MAX_SEGMENTS (1 << 6UL) #define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk) static inline void udp_set_no_check6_tx(struct sock *sk, bool val) { - udp_sk(sk)->no_check6_tx = val; + udp_assign_bit(NO_CHECK6_TX, sk, val); } static inline void udp_set_no_check6_rx(struct sock *sk, bool val) { - udp_sk(sk)->no_check6_rx = val; + udp_assign_bit(NO_CHECK6_RX, sk, val); } -static inline bool udp_get_no_check6_tx(struct sock *sk) +static inline bool udp_get_no_check6_tx(const struct sock *sk) { - return udp_sk(sk)->no_check6_tx; + return udp_test_bit(NO_CHECK6_TX, sk); } -static inline bool udp_get_no_check6_rx(struct sock *sk) +static inline bool udp_get_no_check6_rx(const struct sock *sk) { - return udp_sk(sk)->no_check6_rx; + return udp_test_bit(NO_CHECK6_RX, sk); } static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, @@ -135,10 +145,12 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) if (!skb_is_gso(skb)) return false; - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4) + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && + !udp_test_bit(ACCEPT_L4, sk)) return true; - if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist) + if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && + !udp_test_bit(ACCEPT_FRAGLIST, sk)) return true; return false; @@ -146,8 +158,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) static inline void udp_allow_gso(struct sock *sk) { - udp_sk(sk)->accept_udp_l4 = 1; - udp_sk(sk)->accept_udp_fraglist = 1; + udp_set_bit(ACCEPT_L4, sk); + udp_set_bit(ACCEPT_FRAGLIST, sk); } #define udp_portaddr_for_each_entry(__sk, list) \ diff --git a/include/linux/verification.h b/include/linux/verification.h index f34e50ebcf60a..cb2d47f280910 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -8,6 +8,7 @@ #ifndef _LINUX_VERIFICATION_H #define _LINUX_VERIFICATION_H +#include #include /* diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 87d92accc26ea..bdee5d649cc61 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky @@ -673,6 +674,8 @@ enum { #define HCI_TX_POWER_INVALID 127 #define HCI_RSSI_INVALID 127 +#define HCI_SYNC_HANDLE_INVALID 0xffff + #define HCI_ROLE_MASTER 0x00 #define HCI_ROLE_SLAVE 0x01 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c33348ba1657e..7fa95b72e5c85 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -350,6 +350,8 @@ struct hci_dev { struct list_head list; struct mutex lock; + struct ida unset_handle_ida; + const char *name; unsigned long flags; __u16 id; @@ -1314,7 +1316,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev * } static inline struct hci_conn * -hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big) +hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; @@ -1336,6 +1338,29 @@ hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big) return NULL; } +static inline struct hci_conn * +hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != ISO_LINK || + !test_bit(HCI_CONN_PA_SYNC, &c->flags)) + continue; + + if (c->sync_handle == sync_handle) { + rcu_read_unlock(); + return c; + } + } + rcu_read_unlock(); + + return NULL; +} + static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { @@ -1426,7 +1451,9 @@ int hci_le_create_cis_pending(struct hci_dev *hdev); int hci_conn_check_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, - u8 role); + u8 role, u16 handle); +struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, + bdaddr_t *dst, u8 role); void hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); void hci_conn_check_pending(struct hci_dev *hdev); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 7192346e4a22d..153a8c3e7213d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5826,6 +5826,16 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work); */ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work); +/** + * wiphy_work_flush - flush previously queued work + * @wiphy: the wiphy, for debug purposes + * @work: the work to flush, this can be %NULL to flush all work + * + * Flush the work (i.e. run it if pending). This must be called + * under the wiphy mutex acquired by wiphy_lock(). + */ +void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work); + struct wiphy_delayed_work { struct wiphy_work work; struct wiphy *wiphy; @@ -5869,6 +5879,17 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy, void wiphy_delayed_work_cancel(struct wiphy *wiphy, struct wiphy_delayed_work *dwork); +/** + * wiphy_delayed_work_flush - flush previously queued delayed work + * @wiphy: the wiphy, for debug purposes + * @work: the work to flush + * + * Flush the work (i.e. run it if pending). This must be called + * under the wiphy mutex acquired by wiphy_lock(). + */ +void wiphy_delayed_work_flush(struct wiphy *wiphy, + struct wiphy_delayed_work *dwork); + /** * struct wireless_dev - wireless device state * diff --git a/include/net/flow.h b/include/net/flow.h index 7f0adda3bf2fe..335bbc52171c1 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -40,8 +40,8 @@ struct flowi_common { #define FLOWI_FLAG_KNOWN_NH 0x02 __u32 flowic_secid; kuid_t flowic_uid; - struct flowi_tunnel flowic_tun_key; __u32 flowic_multipath_hash; + struct flowi_tunnel flowic_tun_key; }; union flowi_uli { diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h index 078d3c52c03f9..e5f2f0b73a9a0 100644 --- a/include/net/netfilter/nf_conntrack_act_ct.h +++ b/include/net/netfilter/nf_conntrack_act_ct.h @@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf #endif } -static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct) +static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo) +{ +#if IS_ENABLED(CONFIG_NET_ACT_CT) + struct nf_conn_act_ct_ext *act_ct_ext; + + act_ct_ext = nf_conn_act_ct_ext_find(ct); + if (dev_net(skb->dev) == &init_net && act_ct_ext) + act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex; +#endif +} + +static inline struct +nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo) { #if IS_ENABLED(CONFIG_NET_ACT_CT) struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT); @@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn * return act_ct; act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC); + nf_conn_act_ct_ext_fill(skb, ct, ctinfo); return act_ct; #else return NULL; #endif } -static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo) -{ -#if IS_ENABLED(CONFIG_NET_ACT_CT) - struct nf_conn_act_ct_ext *act_ct_ext; - - act_ct_ext = nf_conn_act_ct_ext_find(ct); - if (dev_net(skb->dev) == &init_net && act_ct_ext) - act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex; -#endif -} - #endif /* _NF_CONNTRACK_ACT_CT_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 4b03ca7cb8a5e..0239e815edf71 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -801,7 +801,7 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp) } /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */ -static inline u32 tcp_ns_to_ts(u64 ns) +static inline u64 tcp_ns_to_ts(u64 ns) { return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ); } diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 0ca9b7a11baf5..29251c3519cf0 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -174,16 +174,13 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) } #endif -static inline void udp_tunnel_encap_enable(struct socket *sock) +static inline void udp_tunnel_encap_enable(struct sock *sk) { - struct udp_sock *up = udp_sk(sock->sk); - - if (up->encap_enabled) + if (udp_test_and_set_bit(ENCAP_ENABLED, sk)) return; - up->encap_enabled = 1; #if IS_ENABLED(CONFIG_IPV6) - if (sock->sk->sk_family == PF_INET6) + if (READ_ONCE(sk->sk_family) == PF_INET6) ipv6_stub->udpv6_encap_enable(); #endif udp_encap_enable(); diff --git a/include/net/udplite.h b/include/net/udplite.h index bd33ff2b8f426..786919d29f8de 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -66,14 +66,18 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) /* Fast-path computation of checksum. Socket may not be locked. */ static inline __wsum udplite_csum(struct sk_buff *skb) { - const struct udp_sock *up = udp_sk(skb->sk); const int off = skb_transport_offset(skb); + const struct sock *sk = skb->sk; int len = skb->len - off; - if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) { - if (0 < up->pcslen) - len = up->pcslen; - udp_hdr(skb)->len = htons(up->pcslen); + if (udp_test_bit(UDPLITE_SEND_CC, sk)) { + u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen); + + if (pcslen < len) { + if (pcslen > 0) + len = pcslen; + udp_hdr(skb)->len = htons(pcslen); + } } skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h index 5842e38bb2880..f5e4ac5b8cce8 100644 --- a/include/soc/tegra/bpmp.h +++ b/include/soc/tegra/bpmp.h @@ -102,8 +102,12 @@ struct tegra_bpmp { #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_mirror; #endif + + bool suspended; }; +#define TEGRA_BPMP_MESSAGE_RESET BIT(0) + struct tegra_bpmp_message { unsigned int mrq; @@ -117,6 +121,8 @@ struct tegra_bpmp_message { size_t size; int ret; } rx; + + unsigned long flags; }; #if IS_ENABLED(CONFIG_TEGRA_BPMP) diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h index 1bf757901d024..2fe8c6b0d4cf3 100644 --- a/include/sound/cs35l41.h +++ b/include/sound/cs35l41.h @@ -11,7 +11,6 @@ #define __CS35L41_H #include -#include #include #define CS35L41_FIRSTREG 0x00000000 @@ -902,7 +901,8 @@ int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap); int cs35l41_init_boost(struct device *dev, struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg); bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type); +int cs35l41_mdsync_up(struct regmap *regmap); int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type, - int enable, struct completion *pll_lock, bool firmware_running); + int enable, bool firmware_running); #endif /* __CS35L41_H */ diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h index 375718ba4ab62..e145bca5105c5 100644 --- a/include/uapi/xen/privcmd.h +++ b/include/uapi/xen/privcmd.h @@ -102,7 +102,7 @@ struct privcmd_mmap_resource { #define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0) struct privcmd_irqfd { - void __user *dm_op; + __u64 dm_op; __u32 size; /* Size of structure pointed by dm_op */ __u32 fd; __u32 flags; @@ -138,6 +138,6 @@ struct privcmd_irqfd { #define IOCTL_PRIVCMD_MMAP_RESOURCE \ _IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource)) #define IOCTL_PRIVCMD_IRQFD \ - _IOC(_IOC_NONE, 'P', 8, sizeof(struct privcmd_irqfd)) + _IOW('P', 8, struct privcmd_irqfd) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 9123138aa9f48..f6e5ae026e4be 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -19,12 +19,15 @@ #define BGID_ARRAY 64 +/* BIDs are addressed by a 16-bit field in a CQE */ +#define MAX_BIDS_PER_BGID (1 << 16) + struct io_provide_buf { struct file *file; __u64 addr; __u32 len; __u32 bgid; - __u16 nbufs; + __u32 nbufs; __u16 bid; }; @@ -289,7 +292,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EINVAL; tmp = READ_ONCE(sqe->fd); - if (!tmp || tmp > USHRT_MAX) + if (!tmp || tmp > MAX_BIDS_PER_BGID) return -EINVAL; memset(p, 0, sizeof(*p)); @@ -332,7 +335,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe return -EINVAL; tmp = READ_ONCE(sqe->fd); - if (!tmp || tmp > USHRT_MAX) + if (!tmp || tmp > MAX_BIDS_PER_BGID) return -E2BIG; p->nbufs = tmp; p->addr = READ_ONCE(sqe->addr); @@ -352,7 +355,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe tmp = READ_ONCE(sqe->off); if (tmp > USHRT_MAX) return -E2BIG; - if (tmp + p->nbufs >= USHRT_MAX) + if (tmp + p->nbufs > MAX_BIDS_PER_BGID) return -EINVAL; p->bid = tmp; return 0; diff --git a/io_uring/net.c b/io_uring/net.c index 7a8e298af81b3..75d494dad7e2c 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -1461,16 +1461,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags) int ret; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - if (connect->in_progress) { - struct socket *socket; - - ret = -ENOTSOCK; - socket = sock_from_file(req->file); - if (socket) - ret = sock_error(socket->sk); - goto out; - } - if (req_has_async_data(req)) { io = req->async_data; } else { @@ -1490,9 +1480,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags) && force_nonblock) { if (ret == -EINPROGRESS) { connect->in_progress = true; - return -EAGAIN; - } - if (ret == -ECONNABORTED) { + } else if (ret == -ECONNABORTED) { if (connect->seen_econnaborted) goto out; connect->seen_econnaborted = true; @@ -1506,6 +1494,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags) memcpy(req->async_data, &__io, sizeof(__io)); return -EAGAIN; } + if (connect->in_progress) { + /* + * At least bluetooth will return -EBADFD on a re-connect + * attempt, and it's (supposedly) also valid to get -EISCONN + * which means the previous result is good. For both of these, + * grab the sock_error() and use that for the completion. + */ + if (ret == -EBADFD || ret == -EISCONN) + ret = sock_error(sock_from_file(req->file)->sk); + } if (ret == -ERESTARTSYS) ret = -EINTR; out: diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index a8c7e1c5abfac..fd8d4b0addfca 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -155,13 +155,15 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab, hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); preempt_disable(); + local_irq_save(flags); if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { __this_cpu_dec(*(htab->map_locked[hash])); + local_irq_restore(flags); preempt_enable(); return -EBUSY; } - raw_spin_lock_irqsave(&b->raw_lock, flags); + raw_spin_lock(&b->raw_lock); *pflags = flags; return 0; @@ -172,8 +174,9 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab, unsigned long flags) { hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); - raw_spin_unlock_irqrestore(&b->raw_lock, flags); + raw_spin_unlock(&b->raw_lock); __this_cpu_dec(*(htab->map_locked[hash])); + local_irq_restore(flags); preempt_enable(); } diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 8bd3812fb8df4..607be04db75b9 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1176,13 +1176,6 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map ret = -EBUSY; goto out; } - if (!atomic64_read(&map->usercnt)) { - /* maps with timers must be either held by user space - * or pinned in bpffs. - */ - ret = -EPERM; - goto out; - } /* allocate hrtimer via map_kmalloc to use memcg accounting */ t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); if (!t) { @@ -1195,7 +1188,21 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map rcu_assign_pointer(t->callback_fn, NULL); hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); t->timer.function = bpf_timer_cb; - timer->timer = t; + WRITE_ONCE(timer->timer, t); + /* Guarantee the order between timer->timer and map->usercnt. So + * when there are concurrent uref release and bpf timer init, either + * bpf_timer_cancel_and_free() called by uref release reads a no-NULL + * timer or atomic64_read() below returns a zero usercnt. + */ + smp_mb(); + if (!atomic64_read(&map->usercnt)) { + /* maps with timers must be either held by user space + * or pinned in bpffs. + */ + WRITE_ONCE(timer->timer, NULL); + kfree(t); + ret = -EPERM; + } out: __bpf_spin_unlock_irqrestore(&timer->lock); return ret; @@ -1370,7 +1377,7 @@ void bpf_timer_cancel_and_free(void *val) /* The subsequent bpf_timer_start/cancel() helpers won't be able to use * this timer, since it won't be initialized. */ - timer->timer = NULL; + WRITE_ONCE(timer->timer, NULL); out: __bpf_spin_unlock_irqrestore(&timer->lock); if (!t) @@ -2197,7 +2204,12 @@ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) { - return task_under_cgroup_hierarchy(task, ancestor); + long ret; + + rcu_read_lock(); + ret = task_under_cgroup_hierarchy(task, ancestor); + rcu_read_unlock(); + return ret; } #endif /* CONFIG_CGROUPS */ diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 53ff50cac61ea..e97aeda3a86b5 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut goto out; } - /* clear all bits except SHARE_IPMODIFY */ - tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY; + /* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */ + tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX); if (tlinks[BPF_TRAMP_FEXIT].nr_links || tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 873ade146f3de..82c9e5c470319 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11202,6 +11202,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ break; } case KF_ARG_PTR_TO_CALLBACK: + if (reg->type != PTR_TO_FUNC) { + verbose(env, "arg%d expected pointer to func\n", i); + return -EINVAL; + } meta->subprogno = reg->subprogno; break; case KF_ARG_PTR_TO_REFCOUNTED_KPTR: @@ -19641,6 +19645,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) if (!tr) return -ENOMEM; + if (tgt_prog && tgt_prog->aux->tail_call_reachable) + tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; + prog->aux->dst_trampoline = tr; return 0; } diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 58ec88efa4f82..4749e0c86c62c 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1304,13 +1304,23 @@ static int update_partition_exclusive(struct cpuset *cs, int new_prs) * * Changing load balance flag will automatically call * rebuild_sched_domains_locked(). + * This function is for cgroup v2 only. */ static void update_partition_sd_lb(struct cpuset *cs, int old_prs) { int new_prs = cs->partition_root_state; - bool new_lb = (new_prs != PRS_ISOLATED); bool rebuild_domains = (new_prs > 0) || (old_prs > 0); + bool new_lb; + /* + * If cs is not a valid partition root, the load balance state + * will follow its parent. + */ + if (new_prs > 0) { + new_lb = (new_prs != PRS_ISOLATED); + } else { + new_lb = is_sched_load_balance(parent_cs(cs)); + } if (new_lb != !!is_sched_load_balance(cs)) { rebuild_domains = true; if (new_lb) diff --git a/kernel/cpu.c b/kernel/cpu.c index 6de7c6bb74eee..1a189da3bdac5 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -659,11 +659,19 @@ static inline bool cpu_smt_thread_allowed(unsigned int cpu) #endif } -static inline bool cpu_smt_allowed(unsigned int cpu) +static inline bool cpu_bootable(unsigned int cpu) { if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu)) return true; + /* All CPUs are bootable if controls are not configured */ + if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED) + return true; + + /* All CPUs are bootable if CPU is not SMT capable */ + if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) + return true; + if (topology_is_primary_thread(cpu)) return true; @@ -685,7 +693,7 @@ bool cpu_smt_possible(void) EXPORT_SYMBOL_GPL(cpu_smt_possible); #else -static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } +static inline bool cpu_bootable(unsigned int cpu) { return true; } #endif static inline enum cpuhp_state @@ -788,10 +796,10 @@ static int bringup_wait_for_ap_online(unsigned int cpu) * SMT soft disabling on X86 requires to bring the CPU out of the * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The * CPU marked itself as booted_once in notify_cpu_starting() so the - * cpu_smt_allowed() check will now return false if this is not the + * cpu_bootable() check will now return false if this is not the * primary sibling. */ - if (!cpu_smt_allowed(cpu)) + if (!cpu_bootable(cpu)) return -ECANCELED; return 0; } @@ -1741,7 +1749,7 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target) err = -EBUSY; goto out; } - if (!cpu_smt_allowed(cpu)) { + if (!cpu_bootable(cpu)) { err = -EPERM; goto out; } diff --git a/kernel/events/core.c b/kernel/events/core.c index a2f2a9525d72e..452c15d747328 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -375,6 +375,7 @@ enum event_type_t { EVENT_TIME = 0x4, /* see ctx_resched() for details */ EVENT_CPU = 0x8, + EVENT_CGROUP = 0x10, EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, }; @@ -684,20 +685,26 @@ do { \ ___p; \ }) -static void perf_ctx_disable(struct perf_event_context *ctx) +static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup) { struct perf_event_pmu_context *pmu_ctx; - list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) + list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { + if (cgroup && !pmu_ctx->nr_cgroups) + continue; perf_pmu_disable(pmu_ctx->pmu); + } } -static void perf_ctx_enable(struct perf_event_context *ctx) +static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup) { struct perf_event_pmu_context *pmu_ctx; - list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) + list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { + if (cgroup && !pmu_ctx->nr_cgroups) + continue; perf_pmu_enable(pmu_ctx->pmu); + } } static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type); @@ -856,9 +863,9 @@ static void perf_cgroup_switch(struct task_struct *task) return; perf_ctx_lock(cpuctx, cpuctx->task_ctx); - perf_ctx_disable(&cpuctx->ctx); + perf_ctx_disable(&cpuctx->ctx, true); - ctx_sched_out(&cpuctx->ctx, EVENT_ALL); + ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP); /* * must not be done before ctxswout due * to update_cgrp_time_from_cpuctx() in @@ -870,9 +877,9 @@ static void perf_cgroup_switch(struct task_struct *task) * perf_cgroup_set_timestamp() in ctx_sched_in() * to not have to pass task around */ - ctx_sched_in(&cpuctx->ctx, EVENT_ALL); + ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP); - perf_ctx_enable(&cpuctx->ctx); + perf_ctx_enable(&cpuctx->ctx, true); perf_ctx_unlock(cpuctx, cpuctx->task_ctx); } @@ -965,6 +972,8 @@ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ct if (!is_cgroup_event(event)) return; + event->pmu_ctx->nr_cgroups++; + /* * Because cgroup events are always per-cpu events, * @ctx == &cpuctx->ctx. @@ -985,6 +994,8 @@ perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *c if (!is_cgroup_event(event)) return; + event->pmu_ctx->nr_cgroups--; + /* * Because cgroup events are always per-cpu events, * @ctx == &cpuctx->ctx. @@ -2679,9 +2690,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, event_type &= EVENT_ALL; - perf_ctx_disable(&cpuctx->ctx); + perf_ctx_disable(&cpuctx->ctx, false); if (task_ctx) { - perf_ctx_disable(task_ctx); + perf_ctx_disable(task_ctx, false); task_ctx_sched_out(task_ctx, event_type); } @@ -2699,9 +2710,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, perf_event_sched_in(cpuctx, task_ctx); - perf_ctx_enable(&cpuctx->ctx); + perf_ctx_enable(&cpuctx->ctx, false); if (task_ctx) - perf_ctx_enable(task_ctx); + perf_ctx_enable(task_ctx, false); } void perf_pmu_resched(struct pmu *pmu) @@ -3246,6 +3257,9 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); struct perf_event_pmu_context *pmu_ctx; int is_active = ctx->is_active; + bool cgroup = event_type & EVENT_CGROUP; + + event_type &= ~EVENT_CGROUP; lockdep_assert_held(&ctx->lock); @@ -3292,8 +3306,11 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) is_active ^= ctx->is_active; /* changed bits */ - list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) + list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { + if (cgroup && !pmu_ctx->nr_cgroups) + continue; __pmu_ctx_sched_out(pmu_ctx, is_active); + } } /* @@ -3484,7 +3501,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next) raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { - perf_ctx_disable(ctx); + perf_ctx_disable(ctx, false); /* PMIs are disabled; ctx->nr_pending is stable. */ if (local_read(&ctx->nr_pending) || @@ -3504,7 +3521,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next) perf_ctx_sched_task_cb(ctx, false); perf_event_swap_task_ctx_data(ctx, next_ctx); - perf_ctx_enable(ctx); + perf_ctx_enable(ctx, false); /* * RCU_INIT_POINTER here is safe because we've not @@ -3528,13 +3545,13 @@ unlock: if (do_switch) { raw_spin_lock(&ctx->lock); - perf_ctx_disable(ctx); + perf_ctx_disable(ctx, false); inside_switch: perf_ctx_sched_task_cb(ctx, false); task_ctx_sched_out(ctx, EVENT_ALL); - perf_ctx_enable(ctx); + perf_ctx_enable(ctx, false); raw_spin_unlock(&ctx->lock); } } @@ -3820,47 +3837,32 @@ static int merge_sched_in(struct perf_event *event, void *data) return 0; } -static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct pmu *pmu) +static void pmu_groups_sched_in(struct perf_event_context *ctx, + struct perf_event_groups *groups, + struct pmu *pmu) { - struct perf_event_pmu_context *pmu_ctx; int can_add_hw = 1; - - if (pmu) { - visit_groups_merge(ctx, &ctx->pinned_groups, - smp_processor_id(), pmu, - merge_sched_in, &can_add_hw); - } else { - list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { - can_add_hw = 1; - visit_groups_merge(ctx, &ctx->pinned_groups, - smp_processor_id(), pmu_ctx->pmu, - merge_sched_in, &can_add_hw); - } - } + visit_groups_merge(ctx, groups, smp_processor_id(), pmu, + merge_sched_in, &can_add_hw); } -static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct pmu *pmu) +static void ctx_groups_sched_in(struct perf_event_context *ctx, + struct perf_event_groups *groups, + bool cgroup) { struct perf_event_pmu_context *pmu_ctx; - int can_add_hw = 1; - if (pmu) { - visit_groups_merge(ctx, &ctx->flexible_groups, - smp_processor_id(), pmu, - merge_sched_in, &can_add_hw); - } else { - list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { - can_add_hw = 1; - visit_groups_merge(ctx, &ctx->flexible_groups, - smp_processor_id(), pmu_ctx->pmu, - merge_sched_in, &can_add_hw); - } + list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { + if (cgroup && !pmu_ctx->nr_cgroups) + continue; + pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu); } } -static void __pmu_ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu) +static void __pmu_ctx_sched_in(struct perf_event_context *ctx, + struct pmu *pmu) { - ctx_flexible_sched_in(ctx, pmu); + pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu); } static void @@ -3868,6 +3870,9 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type) { struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); int is_active = ctx->is_active; + bool cgroup = event_type & EVENT_CGROUP; + + event_type &= ~EVENT_CGROUP; lockdep_assert_held(&ctx->lock); @@ -3900,11 +3905,11 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type) * in order to give them the best chance of going on. */ if (is_active & EVENT_PINNED) - ctx_pinned_sched_in(ctx, NULL); + ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup); /* Then walk through the lower prio flexible groups */ if (is_active & EVENT_FLEXIBLE) - ctx_flexible_sched_in(ctx, NULL); + ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup); } static void perf_event_context_sched_in(struct task_struct *task) @@ -3919,11 +3924,11 @@ static void perf_event_context_sched_in(struct task_struct *task) if (cpuctx->task_ctx == ctx) { perf_ctx_lock(cpuctx, ctx); - perf_ctx_disable(ctx); + perf_ctx_disable(ctx, false); perf_ctx_sched_task_cb(ctx, true); - perf_ctx_enable(ctx); + perf_ctx_enable(ctx, false); perf_ctx_unlock(cpuctx, ctx); goto rcu_unlock; } @@ -3936,7 +3941,7 @@ static void perf_event_context_sched_in(struct task_struct *task) if (!ctx->nr_events) goto unlock; - perf_ctx_disable(ctx); + perf_ctx_disable(ctx, false); /* * We want to keep the following priority order: * cpu pinned (that don't need to move), task pinned, @@ -3946,7 +3951,7 @@ static void perf_event_context_sched_in(struct task_struct *task) * events, no need to flip the cpuctx's events around. */ if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) { - perf_ctx_disable(&cpuctx->ctx); + perf_ctx_disable(&cpuctx->ctx, false); ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE); } @@ -3955,9 +3960,9 @@ static void perf_event_context_sched_in(struct task_struct *task) perf_ctx_sched_task_cb(cpuctx->task_ctx, true); if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) - perf_ctx_enable(&cpuctx->ctx); + perf_ctx_enable(&cpuctx->ctx, false); - perf_ctx_enable(ctx); + perf_ctx_enable(ctx, false); unlock: perf_ctx_unlock(cpuctx, ctx); diff --git a/kernel/futex/core.c b/kernel/futex/core.c index f10587d1d4817..f30a93e50f65e 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -248,7 +248,17 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, * but access_ok() should be faster than find_vma() */ if (!fshared) { - key->private.mm = mm; + /* + * On no-MMU, shared futexes are treated as private, therefore + * we must not include the current process in the key. Since + * there is only one address space, the address is a unique key + * on its own. + */ + if (IS_ENABLED(CONFIG_MMU)) + key->private.mm = mm; + else + key->private.mm = NULL; + key->private.address = address; return 0; } diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 1698e77645acf..75d0ae490e29c 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -466,16 +466,16 @@ unsigned int irq_matrix_reserved(struct irq_matrix *m) } /** - * irq_matrix_allocated - Get the number of allocated irqs on the local cpu + * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU * @m: Pointer to the matrix to search * - * This returns number of allocated irqs + * This returns number of allocated non-managed interrupts. */ unsigned int irq_matrix_allocated(struct irq_matrix *m) { struct cpumap *cm = this_cpu_ptr(m->maps); - return cm->allocated; + return cm->allocated - cm->managed_allocated; } #ifdef CONFIG_GENERIC_IRQ_DEBUGFS diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 61328328c474c..ecbc9b6aba3a1 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -243,7 +243,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab, * symbols are exported and normal relas can be used instead. */ if (!sec_vmlinux && sym_vmlinux) { - pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section", + pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n", sym_name); return -EINVAL; } diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c index 87440f714c0ca..474e68f0f0634 100644 --- a/kernel/module/decompress.c +++ b/kernel/module/decompress.c @@ -100,7 +100,7 @@ static ssize_t module_gzip_decompress(struct load_info *info, s.next_in = buf + gzip_hdr_len; s.avail_in = size - gzip_hdr_len; - s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); + s.workspace = kvmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); if (!s.workspace) return -ENOMEM; @@ -138,7 +138,7 @@ static ssize_t module_gzip_decompress(struct load_info *info, out_inflate_end: zlib_inflateEnd(&s); out: - kfree(s.workspace); + kvfree(s.workspace); return retval; } #elif defined(CONFIG_MODULE_COMPRESS_XZ) @@ -241,7 +241,7 @@ static ssize_t module_zstd_decompress(struct load_info *info, } wksp_size = zstd_dstream_workspace_bound(header.windowSize); - wksp = vmalloc(wksp_size); + wksp = kvmalloc(wksp_size, GFP_KERNEL); if (!wksp) { retval = -ENOMEM; goto out; @@ -284,7 +284,7 @@ static ssize_t module_zstd_decompress(struct load_info *info, retval = new_size; out: - vfree(wksp); + kvfree(wksp); return retval; } #else diff --git a/kernel/padata.c b/kernel/padata.c index 222d60195de66..ff349e1084c1d 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -1102,12 +1102,16 @@ EXPORT_SYMBOL(padata_alloc_shell); */ void padata_free_shell(struct padata_shell *ps) { + struct parallel_data *pd; + if (!ps) return; mutex_lock(&ps->pinst->lock); list_del(&ps->list); - padata_free_pd(rcu_dereference_protected(ps->pd, 1)); + pd = rcu_dereference_protected(ps->pd, 1); + if (refcount_dec_and_test(&pd->refcnt)) + padata_free_pd(pd); mutex_unlock(&ps->pinst->lock); kfree(ps); diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 20d7a238d675a..253ed509b6abb 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -1242,10 +1242,37 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, spin_lock_irqsave_sdp_contention(sdp, &flags); if (rhp) rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); + /* + * The snapshot for acceleration must be taken _before_ the read of the + * current gp sequence used for advancing, otherwise advancing may fail + * and acceleration may then fail too. + * + * This could happen if: + * + * 1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the + * RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8). + * + * 2) The grace period for RCU_WAIT_TAIL is seen as started but not + * completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1. + * + * 3) This value is passed to rcu_segcblist_advance() which can't move + * any segment forward and fails. + * + * 4) srcu_gp_start_if_needed() still proceeds with callback acceleration. + * But then the call to rcu_seq_snap() observes the grace period for the + * RCU_WAIT_TAIL segment as completed and the subsequent one for the + * RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1) + * so it returns a snapshot of the next grace period, which is X + 12. + * + * 5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the + * freshly enqueued callback in RCU_NEXT_TAIL can't move to + * RCU_NEXT_READY_TAIL which already has callbacks for a previous grace + * period (gp_num = X + 8). So acceleration fails. + */ + s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq); rcu_segcblist_advance(&sdp->srcu_cblist, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)); - s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq); - (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); + WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp); if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { sdp->srcu_gp_seq_needed = s; needgp = true; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 802551e0009bf..9e9a45a3394fe 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2664,9 +2664,11 @@ static int migration_cpu_stop(void *data) * it. */ WARN_ON_ONCE(!pending->stop_pending); + preempt_disable(); task_rq_unlock(rq, p, &rf); stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, &pending->arg, &pending->stop_work); + preempt_enable(); return 0; } out: @@ -2986,12 +2988,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag complete = true; } + preempt_disable(); task_rq_unlock(rq, p, rf); - if (push_task) { stop_one_cpu_nowait(rq->cpu, push_cpu_stop, p, &rq->push_work); } + preempt_enable(); if (complete) complete_all(&pending->done); @@ -3057,12 +3060,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag if (flags & SCA_MIGRATE_ENABLE) p->migration_flags &= ~MDF_PUSH; + preempt_disable(); task_rq_unlock(rq, p, rf); - if (!stop_pending) { stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, &pending->arg, &pending->stop_work); } + preempt_enable(); if (flags & SCA_MIGRATE_ENABLE) return 0; @@ -9505,9 +9509,11 @@ static void balance_push(struct rq *rq) * Temporarily drop rq->lock such that we can wake-up the stop task. * Both preemption and IRQs are still disabled. */ + preempt_disable(); raw_spin_rq_unlock(rq); stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, this_cpu_ptr(&push_work)); + preempt_enable(); /* * At this point need_resched() is true and we'll take the loop in * schedule(). The next pick is obviously going to be the stop task diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 58b542bf28934..d78f2e8769fb4 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2449,9 +2449,11 @@ skip: double_unlock_balance(this_rq, src_rq); if (push_task) { + preempt_disable(); raw_spin_rq_unlock(this_rq); stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, push_task, &src_rq->push_work); + preempt_enable(); raw_spin_rq_lock(this_rq); } } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index df348aa55d3c7..1795f6fe991f3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4626,22 +4626,6 @@ static inline unsigned long task_util_est(struct task_struct *p) return max(task_util(p), _task_util_est(p)); } -#ifdef CONFIG_UCLAMP_TASK -static inline unsigned long uclamp_task_util(struct task_struct *p, - unsigned long uclamp_min, - unsigned long uclamp_max) -{ - return clamp(task_util_est(p), uclamp_min, uclamp_max); -} -#else -static inline unsigned long uclamp_task_util(struct task_struct *p, - unsigned long uclamp_min, - unsigned long uclamp_max) -{ - return task_util_est(p); -} -#endif - static inline void util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) { @@ -4932,7 +4916,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) { - return true; + return !cfs_rq->nr_running; } #define UPDATE_TG 0x0 @@ -7756,7 +7740,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) target = prev_cpu; sync_entity_load_avg(&p->se); - if (!uclamp_task_util(p, p_util_min, p_util_max)) + if (!task_util_est(p) && p_util_min == 0) goto unlock; eenv_task_busy_time(&eenv, p, prev_cpu); @@ -7764,11 +7748,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) for (; pd; pd = pd->next) { unsigned long util_min = p_util_min, util_max = p_util_max; unsigned long cpu_cap, cpu_thermal_cap, util; - unsigned long cur_delta, max_spare_cap = 0; + long prev_spare_cap = -1, max_spare_cap = -1; unsigned long rq_util_min, rq_util_max; - unsigned long prev_spare_cap = 0; + unsigned long cur_delta, base_energy; int max_spare_cap_cpu = -1; - unsigned long base_energy; int fits, max_fits = -1; cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask); @@ -7831,7 +7814,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) prev_spare_cap = cpu_cap; prev_fits = fits; } else if ((fits > max_fits) || - ((fits == max_fits) && (cpu_cap > max_spare_cap))) { + ((fits == max_fits) && ((long)cpu_cap > max_spare_cap))) { /* * Find the CPU with the maximum spare capacity * among the remaining CPUs in the performance @@ -7843,7 +7826,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) } } - if (max_spare_cap_cpu < 0 && prev_spare_cap == 0) + if (max_spare_cap_cpu < 0 && prev_spare_cap < 0) continue; eenv_pd_busy_time(&eenv, cpus, p); @@ -7851,7 +7834,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) base_energy = compute_energy(&eenv, pd, cpus, p, -1); /* Evaluate the energy impact of using prev_cpu. */ - if (prev_spare_cap > 0) { + if (prev_spare_cap > -1) { prev_delta = compute_energy(&eenv, pd, cpus, p, prev_cpu); /* CPU utilization has changed */ @@ -11251,13 +11234,15 @@ more_balance: busiest->push_cpu = this_cpu; active_balance = 1; } - raw_spin_rq_unlock_irqrestore(busiest, flags); + preempt_disable(); + raw_spin_rq_unlock_irqrestore(busiest, flags); if (active_balance) { stop_one_cpu_nowait(cpu_of(busiest), active_load_balance_cpu_stop, busiest, &busiest->active_balance_work); } + preempt_enable(); } } else { sd->nr_balance_failed = 0; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 0597ba0f85ff3..904dd85345973 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2109,9 +2109,11 @@ retry: */ push_task = get_push_task(rq); if (push_task) { + preempt_disable(); raw_spin_rq_unlock(rq); stop_one_cpu_nowait(rq->cpu, push_cpu_stop, push_task, &rq->push_work); + preempt_enable(); raw_spin_rq_lock(rq); } @@ -2448,9 +2450,11 @@ skip: double_unlock_balance(this_rq, src_rq); if (push_task) { + preempt_disable(); raw_spin_rq_unlock(this_rq); stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, push_task, &src_rq->push_work); + preempt_enable(); raw_spin_rq_lock(this_rq); } } diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 05a5bc678c089..423d08947962c 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2122,12 +2122,16 @@ static int hop_cmp(const void *a, const void *b) */ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) { - struct __cmp_key k = { .cpus = cpus, .node = node, .cpu = cpu }; + struct __cmp_key k = { .cpus = cpus, .cpu = cpu }; struct cpumask ***hop_masks; int hop, ret = nr_cpu_ids; rcu_read_lock(); + /* CPU-less node entries are uninitialized in sched_domains_numa_masks */ + node = numa_nearest_node(node, N_CPU); + k.node = node; + k.masks = rcu_dereference(sched_domains_numa_masks); if (!k.masks) goto unlock; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index e834f149695b7..47812aa16bb57 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1020,9 +1020,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init); /** * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @kretprobe: Is this a return probe? * @name: The name of the kprobe event * @loc: The location of the kprobe event - * @kretprobe: Is this a return probe? * @...: Variable number of arg (pairs), one pair for each field * * NOTE: Users normally won't want to call this function directly, but diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index a6348489d45fe..1236b3cd2fbb2 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -137,8 +137,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set) { struct kunit_suite * const *suites; - for (suites = suite_set.start; suites < suite_set.end; suites++) + for (suites = suite_set.start; suites < suite_set.end; suites++) { + kfree((*suites)->test_cases); kfree(*suites); + } kfree(suite_set.start); } @@ -155,10 +157,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, struct kunit_suite_set filtered = {NULL, NULL}; struct kunit_glob_filter parsed_glob; struct kunit_attr_filter *parsed_filters = NULL; + struct kunit_suite * const *suites; const size_t max = suite_set->end - suite_set->start; - copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL); + copy = kcalloc(max, sizeof(*filtered.start), GFP_KERNEL); if (!copy) { /* won't be able to run anything, return an empty set */ return filtered; } @@ -193,7 +196,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, parsed_glob.test_glob); if (IS_ERR(filtered_suite)) { *err = PTR_ERR(filtered_suite); - goto free_parsed_filters; + goto free_filtered_suite; } } if (filter_count > 0 && parsed_filters != NULL) { @@ -210,11 +213,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, filtered_suite = new_filtered_suite; if (*err) - goto free_parsed_filters; + goto free_filtered_suite; if (IS_ERR(filtered_suite)) { *err = PTR_ERR(filtered_suite); - goto free_parsed_filters; + goto free_filtered_suite; } if (!filtered_suite) break; @@ -229,6 +232,14 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, filtered.start = copy_start; filtered.end = copy; +free_filtered_suite: + if (*err) { + for (suites = copy_start; suites < copy; suites++) { + kfree((*suites)->test_cases); + kfree(*suites); + } + } + free_parsed_filters: if (filter_count) kfree(parsed_filters); @@ -241,7 +252,7 @@ free_parsed_glob: free_copy: if (*err) - kfree(copy); + kfree(copy_start); return filtered; } diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index b4f6f96b28445..22d4ee86dbedd 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -9,7 +9,7 @@ #include #include -static void kfree_at_end(struct kunit *test, const void *to_free); +static void free_suite_set_at_end(struct kunit *test, const void *to_free); static struct kunit_suite *alloc_fake_suite(struct kunit *test, const char *suite_name, struct kunit_case *test_cases); @@ -56,7 +56,7 @@ static void filter_suites_test(struct kunit *test) got = kunit_filter_suites(&suite_set, "suite2", NULL, NULL, &err); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); KUNIT_ASSERT_EQ(test, err, 0); - kfree_at_end(test, got.start); + free_suite_set_at_end(test, &got); /* Validate we just have suite2 */ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]); @@ -82,7 +82,7 @@ static void filter_suites_test_glob_test(struct kunit *test) got = kunit_filter_suites(&suite_set, "suite2.test2", NULL, NULL, &err); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); KUNIT_ASSERT_EQ(test, err, 0); - kfree_at_end(test, got.start); + free_suite_set_at_end(test, &got); /* Validate we just have suite2 */ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]); @@ -109,7 +109,7 @@ static void filter_suites_to_empty_test(struct kunit *test) got = kunit_filter_suites(&suite_set, "not_found", NULL, NULL, &err); KUNIT_ASSERT_EQ(test, err, 0); - kfree_at_end(test, got.start); /* just in case */ + free_suite_set_at_end(test, &got); /* just in case */ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end, "should be empty to indicate no match"); @@ -172,7 +172,7 @@ static void filter_attr_test(struct kunit *test) got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); KUNIT_ASSERT_EQ(test, err, 0); - kfree_at_end(test, got.start); + free_suite_set_at_end(test, &got); /* Validate we just have normal_suite */ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]); @@ -200,7 +200,7 @@ static void filter_attr_empty_test(struct kunit *test) got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err); KUNIT_ASSERT_EQ(test, err, 0); - kfree_at_end(test, got.start); /* just in case */ + free_suite_set_at_end(test, &got); /* just in case */ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end, "should be empty to indicate no match"); @@ -222,7 +222,7 @@ static void filter_attr_skip_test(struct kunit *test) got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); KUNIT_ASSERT_EQ(test, err, 0); - kfree_at_end(test, got.start); + free_suite_set_at_end(test, &got); /* Validate we have both the slow and normal test */ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases); @@ -256,18 +256,26 @@ kunit_test_suites(&executor_test_suite); /* Test helpers */ -/* Use the resource API to register a call to kfree(to_free). +static void free_suite_set(void *suite_set) +{ + kunit_free_suite_set(*(struct kunit_suite_set *)suite_set); + kfree(suite_set); +} + +/* Use the resource API to register a call to free_suite_set. * Since we never actually use the resource, it's safe to use on const data. */ -static void kfree_at_end(struct kunit *test, const void *to_free) +static void free_suite_set_at_end(struct kunit *test, const void *to_free) { - /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */ - if (IS_ERR_OR_NULL(to_free)) + struct kunit_suite_set *free; + + if (!((struct kunit_suite_set *)to_free)->start) return; - kunit_add_action(test, - (kunit_action_t *)kfree, - (void *)to_free); + free = kzalloc(sizeof(struct kunit_suite_set), GFP_KERNEL); + *free = *(struct kunit_suite_set *)to_free; + + kunit_add_action(test, free_suite_set, (void *)free); } static struct kunit_suite *alloc_fake_suite(struct kunit *test, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 29ebf1e7898cf..e52e3a0b8f2e6 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -131,22 +131,26 @@ static struct mempolicy default_policy = { static struct mempolicy preferred_node_policy[MAX_NUMNODES]; /** - * numa_map_to_online_node - Find closest online node + * numa_nearest_node - Find nearest node by state * @node: Node id to start the search + * @state: State to filter the search * - * Lookup the next closest node by distance if @nid is not online. + * Lookup the closest node by distance if @nid is not in state. * - * Return: this @node if it is online, otherwise the closest node by distance + * Return: this @node if it is in state, otherwise the closest node by distance */ -int numa_map_to_online_node(int node) +int numa_nearest_node(int node, unsigned int state) { int min_dist = INT_MAX, dist, n, min_node; - if (node == NUMA_NO_NODE || node_online(node)) + if (state >= NR_NODE_STATES) + return -EINVAL; + + if (node == NUMA_NO_NODE || node_state(node, state)) return node; min_node = node; - for_each_online_node(n) { + for_each_node_state(n, state) { dist = node_distance(node, n); if (dist < min_dist) { min_dist = dist; @@ -156,7 +160,7 @@ int numa_map_to_online_node(int node) return min_node; } -EXPORT_SYMBOL_GPL(numa_map_to_online_node); +EXPORT_SYMBOL_GPL(numa_nearest_node); struct mempolicy *get_task_policy(struct task_struct *p) { diff --git a/mm/readahead.c b/mm/readahead.c index e815c114de21e..6925e6959fd3f 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -735,7 +735,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count) */ ret = -EINVAL; if (!f.file->f_mapping || !f.file->f_mapping->a_ops || - !S_ISREG(file_inode(f.file)->i_mode)) + (!S_ISREG(file_inode(f.file)->i_mode) && + !S_ISBLK(file_inode(f.file)->i_mode))) goto out; ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED); diff --git a/net/9p/client.c b/net/9p/client.c index 86bbc7147fc14..b0e7cb7e1a54a 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -540,12 +540,14 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req) return 0; if (!p9_is_proto_dotl(c)) { - char *ename; + char *ename = NULL; err = p9pdu_readf(&req->rc, c->proto_version, "s?d", &ename, &ecode); - if (err) + if (err) { + kfree(ename); goto out_err; + } if (p9_is_proto_dotu(c) && ecode < 512) err = -ecode; diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c index 2134f92bd7ac2..5d698f19868c5 100644 --- a/net/bluetooth/amp.c +++ b/net/bluetooth/amp.c @@ -109,7 +109,7 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, struct hci_conn *hcon; u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE; - hcon = hci_conn_add(hdev, AMP_LINK, dst, role); + hcon = hci_conn_add(hdev, AMP_LINK, dst, role, __next_handle(mgr)); if (!hcon) return NULL; @@ -117,7 +117,6 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, hcon->state = BT_CONNECT; hcon->attempt++; - hcon->handle = __next_handle(mgr); hcon->remote_id = remote_id; hcon->amp_mgr = amp_mgr_get(mgr); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 73470cc3518a7..7450b550cff6d 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -153,6 +153,9 @@ static void hci_conn_cleanup(struct hci_conn *conn) hci_conn_hash_del(hdev, conn); + if (HCI_CONN_HANDLE_UNSET(conn->handle)) + ida_free(&hdev->unset_handle_ida, conn->handle); + if (conn->cleanup) conn->cleanup(conn); @@ -928,31 +931,18 @@ static void cis_cleanup(struct hci_conn *conn) hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig); } -static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev) +static int hci_conn_hash_alloc_unset(struct hci_dev *hdev) { - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c; - u16 handle = HCI_CONN_HANDLE_MAX + 1; - - rcu_read_lock(); - - list_for_each_entry_rcu(c, &h->list, list) { - /* Find the first unused handle */ - if (handle == 0xffff || c->handle != handle) - break; - handle++; - } - rcu_read_unlock(); - - return handle; + return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1, + U16_MAX, GFP_ATOMIC); } struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, - u8 role) + u8 role, u16 handle) { struct hci_conn *conn; - BT_DBG("%s dst %pMR", hdev->name, dst); + bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle); conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) @@ -960,7 +950,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, bacpy(&conn->dst, dst); bacpy(&conn->src, &hdev->bdaddr); - conn->handle = hci_conn_hash_alloc_unset(hdev); + conn->handle = handle; conn->hdev = hdev; conn->type = type; conn->role = role; @@ -973,6 +963,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, conn->rssi = HCI_RSSI_INVALID; conn->tx_power = HCI_TX_POWER_INVALID; conn->max_tx_power = HCI_TX_POWER_INVALID; + conn->sync_handle = HCI_SYNC_HANDLE_INVALID; set_bit(HCI_CONN_POWER_SAVE, &conn->flags); conn->disc_timeout = HCI_DISCONN_TIMEOUT; @@ -1044,6 +1035,20 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, return conn; } +struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, + bdaddr_t *dst, u8 role) +{ + int handle; + + bt_dev_dbg(hdev, "dst %pMR", dst); + + handle = hci_conn_hash_alloc_unset(hdev); + if (unlikely(handle < 0)) + return NULL; + + return hci_conn_add(hdev, type, dst, role, handle); +} + static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason) { if (!reason) @@ -1274,6 +1279,9 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle) if (conn->abort_reason) return conn->abort_reason; + if (HCI_CONN_HANDLE_UNSET(conn->handle)) + ida_free(&hdev->unset_handle_ida, conn->handle); + conn->handle = handle; return 0; @@ -1381,7 +1389,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, if (conn) { bacpy(&conn->dst, dst); } else { - conn = hci_conn_add(hdev, LE_LINK, dst, role); + conn = hci_conn_add_unset(hdev, LE_LINK, dst, role); if (!conn) return ERR_PTR(-ENOMEM); hci_conn_hold(conn); @@ -1546,7 +1554,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, memcmp(conn->le_per_adv_data, base, base_len))) return ERR_PTR(-EADDRINUSE); - conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); + conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); if (!conn) return ERR_PTR(-ENOMEM); @@ -1590,7 +1598,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, BT_DBG("requesting refresh of dst_addr"); - conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER); + conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER); if (!conn) return ERR_PTR(-ENOMEM); @@ -1638,7 +1646,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); if (!acl) { - acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); + acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); if (!acl) return ERR_PTR(-ENOMEM); } @@ -1698,7 +1706,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, sco = hci_conn_hash_lookup_ba(hdev, type, dst); if (!sco) { - sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER); + sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER); if (!sco) { hci_conn_drop(acl); return ERR_PTR(-ENOMEM); @@ -1890,7 +1898,7 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig, qos->ucast.cis); if (!cis) { - cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); + cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); if (!cis) return ERR_PTR(-ENOMEM); cis->cleanup = cis_cleanup; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 195aea2198a96..65601aa52e0d8 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2535,6 +2535,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); + ida_init(&hdev->unset_handle_ida); + INIT_LIST_HEAD(&hdev->mesh_pending); INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->reject_list); @@ -2789,6 +2791,7 @@ void hci_release_dev(struct hci_dev *hdev) hci_codec_list_clear(&hdev->local_codecs); hci_dev_unlock(hdev); + ida_destroy(&hdev->unset_handle_ida); ida_simple_remove(&hci_index_ida, hdev->id); kfree_skb(hdev->sent_cmd); kfree_skb(hdev->recv_event); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 1e1c9147356c3..f6d3150bcbb03 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2335,8 +2335,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) } } else { if (!conn) { - conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, - HCI_ROLE_MASTER); + conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr, + HCI_ROLE_MASTER); if (!conn) bt_dev_err(hdev, "no memory for new connection"); } @@ -3151,8 +3151,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, BDADDR_BREDR)) { - conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, - HCI_ROLE_SLAVE); + conn = hci_conn_add_unset(hdev, ev->link_type, + &ev->bdaddr, HCI_ROLE_SLAVE); if (!conn) { bt_dev_err(hdev, "no memory for new conn"); goto unlock; @@ -3317,8 +3317,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data, conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { - conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, - HCI_ROLE_SLAVE); + conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, + HCI_ROLE_SLAVE); if (!conn) { bt_dev_err(hdev, "no memory for new connection"); goto unlock; @@ -5890,7 +5890,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, if (status) goto unlock; - conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); + conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role); if (!conn) { bt_dev_err(hdev, "no memory for new connection"); goto unlock; @@ -5952,17 +5952,11 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); - if (handle > HCI_CONN_HANDLE_MAX) { - bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle, - HCI_CONN_HANDLE_MAX); - status = HCI_ERROR_INVALID_PARAMETERS; - } - /* All connection failure handling is taken care of by the * hci_conn_failed function which is triggered by the HCI * request completion callbacks used for connecting. */ - if (status) + if (status || hci_conn_set_handle(conn, handle)) goto unlock; /* Drop the connection if it has been aborted */ @@ -5986,7 +5980,6 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, mgmt_device_connected(hdev, conn, NULL, 0); conn->sec_level = BT_SECURITY_LOW; - conn->handle = handle; conn->state = BT_CONFIG; /* Store current advertising instance as connection advertising instance @@ -6603,7 +6596,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, struct hci_ev_le_pa_sync_established *ev = data; int mask = hdev->link_mode; __u8 flags = 0; - struct hci_conn *bis; + struct hci_conn *pa_sync; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); @@ -6620,20 +6613,19 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, if (!(flags & HCI_PROTO_DEFER)) goto unlock; - /* Add connection to indicate the PA sync event */ - bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, - HCI_ROLE_SLAVE); + if (ev->status) { + /* Add connection to indicate the failed PA sync event */ + pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY, + HCI_ROLE_SLAVE); - if (!bis) - goto unlock; + if (!pa_sync) + goto unlock; - if (ev->status) - set_bit(HCI_CONN_PA_SYNC_FAILED, &bis->flags); - else - set_bit(HCI_CONN_PA_SYNC, &bis->flags); + set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); - /* Notify connection to iso layer */ - hci_connect_cfm(bis, ev->status); + /* Notify iso layer */ + hci_connect_cfm(pa_sync, ev->status); + } unlock: hci_dev_unlock(hdev); @@ -7020,12 +7012,12 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, cis = hci_conn_hash_lookup_handle(hdev, cis_handle); if (!cis) { - cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE); + cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE, + cis_handle); if (!cis) { hci_le_reject_cis(hdev, ev->cis_handle); goto unlock; } - cis->handle = cis_handle; } cis->iso_qos.ucast.cig = ev->cig_id; @@ -7125,7 +7117,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, hci_dev_lock(hdev); if (!ev->status) { - pa_sync = hci_conn_hash_lookup_pa_sync(hdev, ev->handle); + pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle); if (pa_sync) /* Also mark the BIG sync established event on the * associated PA sync hcon @@ -7140,10 +7132,9 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, bis = hci_conn_hash_lookup_handle(hdev, handle); if (!bis) { bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, - HCI_ROLE_SLAVE); + HCI_ROLE_SLAVE, handle); if (!bis) continue; - bis->handle = handle; } if (ev->status != 0x42) @@ -7186,15 +7177,42 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, struct hci_evt_le_big_info_adv_report *ev = data; int mask = hdev->link_mode; __u8 flags = 0; + struct hci_conn *pa_sync; bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); hci_dev_lock(hdev); mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); - if (!(mask & HCI_LM_ACCEPT)) + if (!(mask & HCI_LM_ACCEPT)) { hci_le_pa_term_sync(hdev, ev->sync_handle); + goto unlock; + } + + if (!(flags & HCI_PROTO_DEFER)) + goto unlock; + + pa_sync = hci_conn_hash_lookup_pa_sync_handle + (hdev, + le16_to_cpu(ev->sync_handle)); + + if (pa_sync) + goto unlock; + /* Add connection to indicate the PA sync event */ + pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY, + HCI_ROLE_SLAVE); + + if (!pa_sync) + goto unlock; + + pa_sync->sync_handle = le16_to_cpu(ev->sync_handle); + set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags); + + /* Notify iso layer */ + hci_connect_cfm(pa_sync, 0x00); + +unlock: hci_dev_unlock(hdev); } diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index a15ab0b874a9d..9e71362c04b48 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -152,7 +152,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, struct sk_buff *skb; int err = 0; - bt_dev_dbg(hdev, "Opcode 0x%4x", opcode); + bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); hci_req_init(&req, hdev); @@ -248,7 +248,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); if (IS_ERR(skb)) { if (!event) - bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode, + bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode, PTR_ERR(skb)); return PTR_ERR(skb); } diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 71248163ce9a5..2132a16be93cd 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -77,6 +77,7 @@ static struct bt_iso_qos default_qos; static bool check_ucast_qos(struct bt_iso_qos *qos); static bool check_bcast_qos(struct bt_iso_qos *qos); static bool iso_match_sid(struct sock *sk, void *data); +static bool iso_match_sync_handle(struct sock *sk, void *data); static void iso_sock_disconn(struct sock *sk); /* ---- ISO timers ---- */ @@ -1202,7 +1203,6 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) { iso_conn_big_sync(sk); sk->sk_state = BT_LISTEN; - set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); } else { iso_conn_defer_accept(pi->conn->hcon); sk->sk_state = BT_CONFIG; @@ -1579,6 +1579,7 @@ static void iso_conn_ready(struct iso_conn *conn) struct sock *sk = conn->sk; struct hci_ev_le_big_sync_estabilished *ev = NULL; struct hci_ev_le_pa_sync_established *ev2 = NULL; + struct hci_evt_le_big_info_adv_report *ev3 = NULL; struct hci_conn *hcon; BT_DBG("conn %p", conn); @@ -1603,14 +1604,20 @@ static void iso_conn_ready(struct iso_conn *conn) parent = iso_get_sock_listen(&hcon->src, &hcon->dst, iso_match_big, ev); - } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags) || - test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { + } else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { ev2 = hci_recv_event_data(hcon->hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED); if (ev2) parent = iso_get_sock_listen(&hcon->src, &hcon->dst, iso_match_sid, ev2); + } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { + ev3 = hci_recv_event_data(hcon->hdev, + HCI_EVT_LE_BIG_INFO_ADV_REPORT); + if (ev3) + parent = iso_get_sock_listen(&hcon->src, + &hcon->dst, + iso_match_sync_handle, ev3); } if (!parent) @@ -1650,11 +1657,13 @@ static void iso_conn_ready(struct iso_conn *conn) hcon->sync_handle = iso_pi(parent)->sync_handle; } - if (ev2 && !ev2->status) { - iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; + if (ev3) { iso_pi(sk)->qos = iso_pi(parent)->qos; + iso_pi(sk)->qos.bcast.encryption = ev3->encryption; + hcon->iso_qos = iso_pi(sk)->qos; iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis; memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS); + set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); } bacpy(&iso_pi(sk)->dst, &hcon->dst); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 77cb75e63aca1..31f923e7b5c40 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -221,8 +221,12 @@ static int page_pool_init(struct page_pool *pool, return -ENOMEM; #endif - if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) + if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { +#ifdef CONFIG_PAGE_POOL_STATS + free_percpu(pool->recycle_stats); +#endif return -ENOMEM; + } atomic_set(&pool->pages_state_release_cnt, 0); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4eaf7ed0d1f44..97b4a42e6e347 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4254,6 +4254,7 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config) { + unsigned int patlen = config->ops->get_pattern_len(config); struct ts_state state; unsigned int ret; @@ -4265,7 +4266,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); ret = textsearch_find(config, &state); - return (ret <= to - from ? ret : UINT_MAX); + return (ret + patlen <= to - from ? ret : UINT_MAX); } EXPORT_SYMBOL(skb_find_text); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 69453b936bd55..524b7e581a036 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -629,9 +629,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (dccp_parse_options(sk, dreq, skb)) goto drop_and_free; - if (security_inet_conn_request(sk, skb, req)) - goto drop_and_free; - ireq = inet_rsk(req); sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); @@ -639,6 +636,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ireq->ireq_family = AF_INET; ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if); + if (security_inet_conn_request(sk, skb, req)) + goto drop_and_free; + /* * Step 3: Process LISTEN state * diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index c693a570682fb..6f5a556f4f6d7 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -360,15 +360,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (dccp_parse_options(sk, dreq, skb)) goto drop_and_free; - if (security_inet_conn_request(sk, skb, req)) - goto drop_and_free; - ireq = inet_rsk(req); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; ireq->ireq_family = AF_INET6; ireq->ir_mark = inet_request_mark(sk, skb); + if (security_inet_conn_request(sk, skb, req)) + goto drop_and_free; + if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index b71dab630a873..80cdc6f6b34c9 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -342,9 +342,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame, skb = skb_copy_expand(frame->skb_std, 0, skb_tailroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC); - prp_fill_rct(skb, frame, port); - - return skb; + return prp_fill_rct(skb, frame, port); } static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index dc478a0574cbe..3b4dafefb4b03 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -41,7 +41,6 @@ static siphash_aligned_key_t syncookie_secret[2]; * requested/supported by the syn/synack exchange. */ #define TSBITS 6 -#define TSMASK (((__u32)1 << TSBITS) - 1) static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, u32 count, int c) @@ -62,27 +61,22 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, */ u64 cookie_init_timestamp(struct request_sock *req, u64 now) { - struct inet_request_sock *ireq; - u32 ts, ts_now = tcp_ns_to_ts(now); + const struct inet_request_sock *ireq = inet_rsk(req); + u64 ts, ts_now = tcp_ns_to_ts(now); u32 options = 0; - ireq = inet_rsk(req); - options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK; if (ireq->sack_ok) options |= TS_OPT_SACK; if (ireq->ecn_ok) options |= TS_OPT_ECN; - ts = ts_now & ~TSMASK; + ts = (ts_now >> TSBITS) << TSBITS; ts |= options; - if (ts > ts_now) { - ts >>= TSBITS; - ts--; - ts <<= TSBITS; - ts |= options; - } - return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ); + if (ts > ts_now) + ts -= (1UL << TSBITS); + + return ts * (NSEC_PER_SEC / TCP_TS_HZ); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 804821d6bd4d4..1f9d1d445fb3b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6450,22 +6450,23 @@ reset_and_undo: static void tcp_rcv_synrecv_state_fastopen(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); struct request_sock *req; /* If we are still handling the SYNACK RTO, see if timestamp ECR allows * undo. If peer SACKs triggered fast recovery, we can't undo here. */ - if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) - tcp_try_undo_loss(sk, false); + if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out) + tcp_try_undo_recovery(sk); /* Reset rtx states to prevent spurious retransmits_timed_out() */ - tcp_sk(sk)->retrans_stamp = 0; + tp->retrans_stamp = 0; inet_csk(sk)->icsk_retransmits = 0; /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1, * we no longer need req so release it. */ - req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, + req = rcu_dereference_protected(tp->fastopen_rsk, lockdep_sock_is_held(sk)); reqsk_fastopen_remove(sk, req, false); diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index c196759f1d3bd..7aca12c59c184 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -470,11 +470,15 @@ void tcp_init_metrics(struct sock *sk) u32 val, crtt = 0; /* cached RTT scaled by 8 */ sk_dst_confirm(sk); + /* ssthresh may have been reduced unnecessarily during. + * 3WHS. Restore it back to its initial default. + */ + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; if (!dst) goto reset; rcu_read_lock(); - tm = tcp_get_metrics(sk, dst, true); + tm = tcp_get_metrics(sk, dst, false); if (!tm) { rcu_read_unlock(); goto reset; @@ -489,11 +493,6 @@ void tcp_init_metrics(struct sock *sk) tp->snd_ssthresh = val; if (tp->snd_ssthresh > tp->snd_cwnd_clamp) tp->snd_ssthresh = tp->snd_cwnd_clamp; - } else { - /* ssthresh may have been reduced unnecessarily during. - * 3WHS. Restore it back to its initial default. - */ - tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; } val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val && tp->reordering != val) @@ -908,7 +907,7 @@ static void tcp_metrics_flush_all(struct net *net) match = net ? net_eq(tm_net(tm), net) : !refcount_read(&tm_net(tm)->ns.count); if (match) { - *pp = tm->tcpm_next; + rcu_assign_pointer(*pp, tm->tcpm_next); kfree_rcu(tm, rcu_head); } else { pp = &tm->tcpm_next; @@ -949,7 +948,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info) if (addr_same(&tm->tcpm_daddr, &daddr) && (!src || addr_same(&tm->tcpm_saddr, &saddr)) && net_eq(tm_net(tm), net)) { - *pp = tm->tcpm_next; + rcu_assign_pointer(*pp, tm->tcpm_next); kfree_rcu(tm, rcu_head); found = true; } else { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f39b9c8445808..c3ff984b63547 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -714,7 +714,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) iph->saddr, uh->source, skb->dev->ifindex, inet_sdif(skb), udptable, NULL); - if (!sk || udp_sk(sk)->encap_type) { + if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) { /* No socket for error: try tunnels before discarding */ if (static_branch_unlikely(&udp_encap_needed_key)) { sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb, @@ -1051,7 +1051,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) u8 tos, scope; __be16 dport; int err, is_udplite = IS_UDPLITE(sk); - int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; + int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE; int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); struct sk_buff *skb; struct ip_options_data opt_copy; @@ -1315,11 +1315,11 @@ void udp_splice_eof(struct socket *sock) struct sock *sk = sock->sk; struct udp_sock *up = udp_sk(sk); - if (!up->pending || READ_ONCE(up->corkflag)) + if (!up->pending || udp_test_bit(CORK, sk)) return; lock_sock(sk); - if (up->pending && !READ_ONCE(up->corkflag)) + if (up->pending && !udp_test_bit(CORK, sk)) udp_push_pending_frames(sk); release_sock(sk); } @@ -1868,7 +1868,7 @@ try_again: (struct sockaddr *)sin); } - if (udp_sk(sk)->gro_enabled) + if (udp_test_bit(GRO_ENABLED, sk)) udp_cmsg_recv(msg, sk, skb); if (inet_cmsg_flags(inet)) @@ -2081,7 +2081,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) } nf_reset_ct(skb); - if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) { + if (static_branch_unlikely(&udp_encap_needed_key) && + READ_ONCE(up->encap_type)) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* @@ -2119,7 +2120,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets */ - if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) { + u16 pcrlen = READ_ONCE(up->pcrlen); /* * MIB statistics other than incrementing the error count are @@ -2132,7 +2134,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) * delivery of packets with coverage values less than a value * provided by the application." */ - if (up->pcrlen == 0) { /* full coverage was set */ + if (pcrlen == 0) { /* full coverage was set */ net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; @@ -2143,9 +2145,9 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) * that it wants x while sender emits packets of smaller size y. * Therefore the above ...()->partial_cov statement is essential. */ - if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { + if (UDP_SKB_CB(skb)->cscov < pcrlen) { net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", - UDP_SKB_CB(skb)->cscov, up->pcrlen); + UDP_SKB_CB(skb)->cscov, pcrlen); goto drop; } } @@ -2618,7 +2620,7 @@ void udp_destroy_sock(struct sock *sk) if (encap_destroy) encap_destroy(sk); } - if (up->encap_enabled) + if (udp_test_bit(ENCAP_ENABLED, sk)) static_branch_dec(&udp_encap_needed_key); } } @@ -2658,9 +2660,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, switch (optname) { case UDP_CORK: if (val != 0) { - WRITE_ONCE(up->corkflag, 1); + udp_set_bit(CORK, sk); } else { - WRITE_ONCE(up->corkflag, 0); + udp_clear_bit(CORK, sk); lock_sock(sk); push_pending_frames(sk); release_sock(sk); @@ -2675,17 +2677,17 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, case UDP_ENCAP_ESPINUDP_NON_IKE: #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) - up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv; + WRITE_ONCE(up->encap_rcv, + ipv6_stub->xfrm6_udp_encap_rcv); else #endif - up->encap_rcv = xfrm4_udp_encap_rcv; + WRITE_ONCE(up->encap_rcv, + xfrm4_udp_encap_rcv); #endif fallthrough; case UDP_ENCAP_L2TPINUDP: - up->encap_type = val; - lock_sock(sk); - udp_tunnel_encap_enable(sk->sk_socket); - release_sock(sk); + WRITE_ONCE(up->encap_type, val); + udp_tunnel_encap_enable(sk); break; default: err = -ENOPROTOOPT; @@ -2694,11 +2696,11 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, break; case UDP_NO_CHECK6_TX: - up->no_check6_tx = valbool; + udp_set_no_check6_tx(sk, valbool); break; case UDP_NO_CHECK6_RX: - up->no_check6_rx = valbool; + udp_set_no_check6_rx(sk, valbool); break; case UDP_SEGMENT: @@ -2708,14 +2710,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, break; case UDP_GRO: - lock_sock(sk); /* when enabling GRO, accept the related GSO packet type */ if (valbool) - udp_tunnel_encap_enable(sk->sk_socket); - up->gro_enabled = valbool; - up->accept_udp_l4 = valbool; - release_sock(sk); + udp_tunnel_encap_enable(sk); + udp_assign_bit(GRO_ENABLED, sk, valbool); + udp_assign_bit(ACCEPT_L4, sk, valbool); break; /* @@ -2730,8 +2730,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; - up->pcslen = val; - up->pcflag |= UDPLITE_SEND_CC; + WRITE_ONCE(up->pcslen, val); + udp_set_bit(UDPLITE_SEND_CC, sk); break; /* The receiver specifies a minimum checksum coverage value. To make @@ -2744,8 +2744,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; - up->pcrlen = val; - up->pcflag |= UDPLITE_RECV_CC; + WRITE_ONCE(up->pcrlen, val); + udp_set_bit(UDPLITE_RECV_CC, sk); break; default: @@ -2783,19 +2783,19 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, switch (optname) { case UDP_CORK: - val = READ_ONCE(up->corkflag); + val = udp_test_bit(CORK, sk); break; case UDP_ENCAP: - val = up->encap_type; + val = READ_ONCE(up->encap_type); break; case UDP_NO_CHECK6_TX: - val = up->no_check6_tx; + val = udp_get_no_check6_tx(sk); break; case UDP_NO_CHECK6_RX: - val = up->no_check6_rx; + val = udp_get_no_check6_rx(sk); break; case UDP_SEGMENT: @@ -2803,17 +2803,17 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, break; case UDP_GRO: - val = up->gro_enabled; + val = udp_test_bit(GRO_ENABLED, sk); break; /* The following two cannot be changed on UDP sockets, the return is * always 0 (which corresponds to the full checksum coverage of UDP). */ case UDPLITE_SEND_CSCOV: - val = up->pcslen; + val = READ_ONCE(up->pcslen); break; case UDPLITE_RECV_CSCOV: - val = up->pcrlen; + val = READ_ONCE(up->pcrlen); break; default: diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 0f46b3c2e4ac5..6c95d28d0c4a7 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -557,10 +557,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, NAPI_GRO_CB(skb)->is_flist = 0; if (!sk || !udp_sk(sk)->gro_receive) { if (skb->dev->features & NETIF_F_GRO_FRAGLIST) - NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1; + NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1; if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) || - (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) + (sk && udp_test_bit(GRO_ENABLED, sk)) || NAPI_GRO_CB(skb)->is_flist) return call_gro_receive(udp_gro_receive_segment, head, skb); /* no GRO, be sure flush the current packet */ diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c index 9b18f371af0d4..1e7e4aecdc48a 100644 --- a/net/ipv4/udp_tunnel_core.c +++ b/net/ipv4/udp_tunnel_core.c @@ -78,7 +78,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, udp_sk(sk)->gro_receive = cfg->gro_receive; udp_sk(sk)->gro_complete = cfg->gro_complete; - udp_tunnel_encap_enable(sock); + udp_tunnel_encap_enable(sk); } EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 39ecdad1b50ce..af37af3ab727b 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -21,7 +21,6 @@ EXPORT_SYMBOL(udplite_table); static int udplite_sk_init(struct sock *sk) { udp_init_sock(sk); - udp_sk(sk)->pcflag = UDPLITE_BIT; pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, " "please contact the netdev mailing list\n"); return 0; diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index eac206a290d05..183f6dc372429 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -85,11 +85,11 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) struct udphdr *uh; struct iphdr *iph; int iphlen, len; - __u8 *udpdata; __be32 *udpdata32; - __u16 encap_type = up->encap_type; + u16 encap_type; + encap_type = READ_ONCE(up->encap_type); /* if this is not encapsulated socket, then just return now */ if (!encap_type) return 1; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 54fc4c711f2c5..1121082901b99 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -162,7 +162,13 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk, int err; skb_mark_not_on_list(segs); - err = ip6_fragment(net, sk, segs, ip6_finish_output2); + /* Last GSO segment can be smaller than gso_size (and MTU). + * Adding a fragment header would produce an "atomic fragment", + * which is considered harmful (RFC-8021). Avoid that. + */ + err = segs->len > mtu ? + ip6_fragment(net, sk, segs, ip6_finish_output2) : + ip6_finish_output2(net, sk, segs); if (err && ret == 0) ret = err; } diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 5014aa6634527..8698b49dfc8de 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -180,14 +180,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) treq = tcp_rsk(req); treq->tfo_listener = false; - if (security_inet_conn_request(sk, skb, req)) - goto out_free; - req->mss = mss; ireq->ir_rmt_port = th->source; ireq->ir_num = ntohs(th->dest); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; + + if (security_inet_conn_request(sk, skb, req)) + goto out_free; + if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 86b5d509a4688..f60ba42954352 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -413,7 +413,7 @@ try_again: (struct sockaddr *)sin6); } - if (udp_sk(sk)->gro_enabled) + if (udp_test_bit(GRO_ENABLED, sk)) udp_cmsg_recv(msg, sk, skb); if (np->rxopt.all) @@ -571,7 +571,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, inet6_iif(skb), inet6_sdif(skb), udptable, NULL); - if (!sk || udp_sk(sk)->encap_type) { + if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) { /* No socket for error: try tunnels before discarding */ if (static_branch_unlikely(&udpv6_encap_needed_key)) { sk = __udp6_lib_err_encap(net, hdr, offset, uh, @@ -688,7 +688,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) } nf_reset_ct(skb); - if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { + if (static_branch_unlikely(&udpv6_encap_needed_key) && + READ_ONCE(up->encap_type)) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* @@ -726,16 +727,17 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ - if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) { + u16 pcrlen = READ_ONCE(up->pcrlen); - if (up->pcrlen == 0) { /* full coverage was set */ + if (pcrlen == 0) { /* full coverage was set */ net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } - if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { + if (UDP_SKB_CB(skb)->cscov < pcrlen) { net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", - UDP_SKB_CB(skb)->cscov, up->pcrlen); + UDP_SKB_CB(skb)->cscov, pcrlen); goto drop; } } @@ -858,7 +860,7 @@ start_lookup: /* If zero checksum and no_check is not on for * the socket then skip it. */ - if (!uh->check && !udp_sk(sk)->no_check6_rx) + if (!uh->check && !udp_get_no_check6_rx(sk)) continue; if (!first) { first = sk; @@ -980,7 +982,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp6_sk_rx_dst_set(sk, dst); - if (!uh->check && !udp_sk(sk)->no_check6_rx) { + if (!uh->check && !udp_get_no_check6_rx(sk)) { if (refcounted) sock_put(sk); goto report_csum_error; @@ -1002,7 +1004,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, /* Unicast */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk) { - if (!uh->check && !udp_sk(sk)->no_check6_rx) + if (!uh->check && !udp_get_no_check6_rx(sk)) goto report_csum_error; return udp6_unicast_rcv_skb(sk, skb, uh); } @@ -1241,7 +1243,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -EINVAL; } - if (udp_sk(sk)->no_check6_tx) { + if (udp_get_no_check6_tx(sk)) { kfree_skb(skb); return -EINVAL; } @@ -1262,7 +1264,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, if (is_udplite) csum = udplite_csum(skb); - else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ + else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */ skb->ip_summed = CHECKSUM_NONE; goto send; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ @@ -1332,7 +1334,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int addr_len = msg->msg_namelen; bool connected = false; int ulen = len; - int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; + int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE; int err; int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); @@ -1644,11 +1646,11 @@ static void udpv6_splice_eof(struct socket *sock) struct sock *sk = sock->sk; struct udp_sock *up = udp_sk(sk); - if (!up->pending || READ_ONCE(up->corkflag)) + if (!up->pending || udp_test_bit(CORK, sk)) return; lock_sock(sk); - if (up->pending && !READ_ONCE(up->corkflag)) + if (up->pending && !udp_test_bit(CORK, sk)) udp_v6_push_pending_frames(sk); release_sock(sk); } @@ -1670,7 +1672,7 @@ void udpv6_destroy_sock(struct sock *sk) if (encap_destroy) encap_destroy(sk); } - if (up->encap_enabled) { + if (udp_test_bit(ENCAP_ENABLED, sk)) { static_branch_dec(&udpv6_encap_needed_key); udp_encap_disable(); } diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 267d491e97075..a60bec9b14f14 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -17,7 +17,6 @@ static int udplitev6_sk_init(struct sock *sk) { udpv6_init_sock(sk); - udp_sk(sk)->pcflag = UDPLITE_BIT; pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, " "please contact the netdev mailing list\n"); return 0; diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 4907ab241d6be..4156387248e40 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -81,14 +81,14 @@ int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) struct ipv6hdr *ip6h; int len; int ip6hlen = sizeof(struct ipv6hdr); - __u8 *udpdata; __be32 *udpdata32; - __u16 encap_type = up->encap_type; + u16 encap_type; if (skb->protocol == htons(ETH_P_IP)) return xfrm4_udp_encap_rcv(sk, skb); + encap_type = READ_ONCE(up->encap_type); /* if this is not encapsulated socket, then just return now */ if (!encap_type) return 1; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 03608d3ded4b8..8d21ff25f1602 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1139,9 +1139,9 @@ static void l2tp_tunnel_destruct(struct sock *sk) switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: /* No longer an encapsulation socket. See net/ipv4/udp.c */ - (udp_sk(sk))->encap_type = 0; - (udp_sk(sk))->encap_rcv = NULL; - (udp_sk(sk))->encap_destroy = NULL; + WRITE_ONCE(udp_sk(sk)->encap_type, 0); + udp_sk(sk)->encap_rcv = NULL; + udp_sk(sk)->encap_destroy = NULL; break; case L2TP_ENCAPTYPE_IP: break; diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 7cac441862e21..51bccfb00a9cd 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c @@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb) skb->transport_header += llc_len; skb_pull(skb, llc_len); if (skb->protocol == htons(ETH_P_802_2)) { - __be16 pdulen = eth_hdr(skb)->h_proto; - s32 data_size = ntohs(pdulen) - llc_len; + __be16 pdulen; + s32 data_size; + + if (skb->mac_len < ETH_HLEN) + return 0; + + pdulen = eth_hdr(skb)->h_proto; + data_size = ntohs(pdulen) - llc_len; if (data_size < 0 || !pskb_may_pull(skb, data_size)) diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c index 79d1cef8f15a9..06fb8e6944b06 100644 --- a/net/llc/llc_s_ac.c +++ b/net/llc/llc_s_ac.c @@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb) int rc = 1; u32 data_size; + if (skb->mac_len < ETH_HLEN) + return 1; + llc_pdu_decode_sa(skb, mac_da); llc_pdu_decode_da(skb, mac_sa); llc_pdu_decode_ssap(skb, &dsap); diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 05c6ae0920534..f506542925109 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c @@ -76,6 +76,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb) u32 data_size; struct sk_buff *nskb; + if (skb->mac_len < ETH_HLEN) + goto out; + /* The test request command is type U (llc_len = 3) */ data_size = ntohs(eth_hdr(skb)->h_proto) - 3; nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index 30cd0c905a24f..aa37a1410f377 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -510,10 +510,13 @@ int drv_change_vif_links(struct ieee80211_local *local, if (ret) return ret; - for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) { - link = rcu_access_pointer(sdata->link[link_id]); + if (!local->in_reconfig) { + for_each_set_bit(link_id, &links_to_add, + IEEE80211_MLD_MAX_NUM_LINKS) { + link = rcu_access_pointer(sdata->link[link_id]); - ieee80211_link_debugfs_drv_add(link); + ieee80211_link_debugfs_drv_add(link); + } } return 0; diff --git a/net/mac80211/drop.h b/net/mac80211/drop.h index 49dc809cab290..1570fac8411f4 100644 --- a/net/mac80211/drop.h +++ b/net/mac80211/drop.h @@ -53,4 +53,7 @@ enum mac80211_drop_reason { #undef DEF }; +#define RX_RES_IS_UNUSABLE(result) \ + (((__force u32)(result) & SKB_DROP_REASON_SUBSYS_MASK) == ___RX_DROP_UNUSABLE) + #endif /* MAC80211_DROP_H */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 98ef1fe1226e7..07beb72ddd25a 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1406,7 +1406,7 @@ struct ieee80211_local { /* wowlan is enabled -- don't reconfig on resume */ bool wowlan; - struct work_struct radar_detected_work; + struct wiphy_work radar_detected_work; /* number of RX chains the hardware has */ u8 rx_chains; @@ -1483,14 +1483,14 @@ struct ieee80211_local { int hw_scan_ies_bufsize; struct cfg80211_scan_info scan_info; - struct work_struct sched_scan_stopped_work; + struct wiphy_work sched_scan_stopped_work; struct ieee80211_sub_if_data __rcu *sched_scan_sdata; struct cfg80211_sched_scan_request __rcu *sched_scan_req; u8 scan_addr[ETH_ALEN]; unsigned long leave_oper_channel_time; enum mac80211_scan_state next_scan_state; - struct delayed_work scan_work; + struct wiphy_delayed_work scan_work; struct ieee80211_sub_if_data __rcu *scan_sdata; /* For backward compatibility only -- do not use */ struct cfg80211_chan_def _oper_chandef; @@ -1583,9 +1583,9 @@ struct ieee80211_local { /* * Remain-on-channel support */ - struct delayed_work roc_work; + struct wiphy_delayed_work roc_work; struct list_head roc_list; - struct work_struct hw_roc_start, hw_roc_done; + struct wiphy_work hw_roc_start, hw_roc_done; unsigned long hw_roc_start_time; u64 roc_cookie_counter; @@ -1929,7 +1929,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed); /* scan/BSS handling */ -void ieee80211_scan_work(struct work_struct *work); +void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work); int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, const u8 *ssid, u8 ssid_len, struct ieee80211_channel **channels, @@ -1962,7 +1962,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req); int ieee80211_request_sched_scan_stop(struct ieee80211_local *local); void ieee80211_sched_scan_end(struct ieee80211_local *local); -void ieee80211_sched_scan_stopped_work(struct work_struct *work); +void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy, + struct wiphy_work *work); /* off-channel/mgmt-tx */ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local); @@ -2566,7 +2567,8 @@ bool ieee80211_is_radar_required(struct ieee80211_local *local); void ieee80211_dfs_cac_timer_work(struct work_struct *work); void ieee80211_dfs_cac_cancel(struct ieee80211_local *local); -void ieee80211_dfs_radar_detected_work(struct work_struct *work); +void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy, + struct wiphy_work *work); int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index be586bc0b5b7d..6e3bfb46af44d 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -691,7 +691,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do ieee80211_recalc_ps(local); if (cancel_scan) - flush_delayed_work(&local->scan_work); + wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work); if (local->open_count == 0) { ieee80211_stop_device(local); diff --git a/net/mac80211/link.c b/net/mac80211/link.c index 6148208b320e3..16cbaea93fc32 100644 --- a/net/mac80211/link.c +++ b/net/mac80211/link.c @@ -195,7 +195,7 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, memset(to_free, 0, sizeof(links)); - if (old_links == new_links) + if (old_links == new_links && dormant_links == sdata->vif.dormant_links) return 0; /* if there were no old links, need to clear the pointers to deflink */ diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 24315d7b31263..4548f84451095 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -335,10 +335,7 @@ static void ieee80211_restart_work(struct work_struct *work) struct ieee80211_sub_if_data *sdata; int ret; - /* wait for scan work complete */ flush_workqueue(local->workqueue); - flush_work(&local->sched_scan_stopped_work); - flush_work(&local->radar_detected_work); rtnl_lock(); /* we might do interface manipulations, so need both */ @@ -379,8 +376,8 @@ static void ieee80211_restart_work(struct work_struct *work) ieee80211_scan_cancel(local); /* make sure any new ROC will consider local->in_reconfig */ - flush_delayed_work(&local->roc_work); - flush_work(&local->hw_roc_done); + wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work); + wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done); /* wait for all packet processing to be done */ synchronize_net(); @@ -809,12 +806,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, INIT_LIST_HEAD(&local->chanctx_list); mutex_init(&local->chanctx_mtx); - INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); + wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work); INIT_WORK(&local->restart_work, ieee80211_restart_work); - INIT_WORK(&local->radar_detected_work, - ieee80211_dfs_radar_detected_work); + wiphy_work_init(&local->radar_detected_work, + ieee80211_dfs_radar_detected_work); INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); local->smps_mode = IEEE80211_SMPS_OFF; @@ -825,8 +822,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, ieee80211_dynamic_ps_disable_work); timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0); - INIT_WORK(&local->sched_scan_stopped_work, - ieee80211_sched_scan_stopped_work); + wiphy_work_init(&local->sched_scan_stopped_work, + ieee80211_sched_scan_stopped_work); spin_lock_init(&local->ack_status_lock); idr_init(&local->ack_status_frames); @@ -1482,13 +1479,15 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) */ ieee80211_remove_interfaces(local); + wiphy_lock(local->hw.wiphy); + wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work); + wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work); + wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work); + wiphy_unlock(local->hw.wiphy); rtnl_unlock(); - cancel_delayed_work_sync(&local->roc_work); cancel_work_sync(&local->restart_work); cancel_work_sync(&local->reconfig_filter); - flush_work(&local->sched_scan_stopped_work); - flush_work(&local->radar_detected_work); ieee80211_clear_tx_pending(local); rate_control_deinitialize(local); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index d32e304eeb4ba..3e52aaa57b1fc 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -648,7 +648,7 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata, cache = &sdata->u.mesh.tx_cache; spin_lock_bh(&cache->walk_lock); - entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params); + entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params); if (entry) mesh_fast_tx_entry_free(cache, entry); spin_unlock_bh(&cache->walk_lock); diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index cdf991e74ab99..5bedd9cef414d 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -230,7 +230,7 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local, if (dur == LONG_MAX) return false; - mod_delayed_work(local->workqueue, &local->roc_work, dur); + wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, dur); return true; } @@ -258,7 +258,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc, roc->notified = true; } -static void ieee80211_hw_roc_start(struct work_struct *work) +static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, hw_roc_start); @@ -285,7 +285,7 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw) trace_api_ready_on_channel(local); - ieee80211_queue_work(hw, &local->hw_roc_start); + wiphy_work_queue(hw->wiphy, &local->hw_roc_start); } EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel); @@ -338,7 +338,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local) tmp->started = true; tmp->abort = true; } - ieee80211_queue_work(&local->hw, &local->hw_roc_done); + wiphy_work_queue(local->hw.wiphy, &local->hw_roc_done); return; } @@ -368,8 +368,8 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local) ieee80211_hw_config(local, 0); } - ieee80211_queue_delayed_work(&local->hw, &local->roc_work, - msecs_to_jiffies(min_dur)); + wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, + msecs_to_jiffies(min_dur)); /* tell userspace or send frame(s) */ list_for_each_entry(tmp, &local->roc_list, list) { @@ -407,8 +407,8 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) _ieee80211_start_next_roc(local); } else { /* delay it a bit */ - ieee80211_queue_delayed_work(&local->hw, &local->roc_work, - round_jiffies_relative(HZ/2)); + wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, + round_jiffies_relative(HZ / 2)); } } @@ -451,7 +451,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local) } } -static void ieee80211_roc_work(struct work_struct *work) +static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, roc_work.work); @@ -461,7 +461,7 @@ static void ieee80211_roc_work(struct work_struct *work) mutex_unlock(&local->mtx); } -static void ieee80211_hw_roc_done(struct work_struct *work) +static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, hw_roc_done); @@ -482,7 +482,7 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw) trace_api_remain_on_channel_expired(local); - ieee80211_queue_work(hw, &local->hw_roc_done); + wiphy_work_queue(hw->wiphy, &local->hw_roc_done); } EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired); @@ -586,8 +586,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, /* if not HW assist, just queue & schedule work */ if (!local->ops->remain_on_channel) { list_add_tail(&roc->list, &local->roc_list); - ieee80211_queue_delayed_work(&local->hw, - &local->roc_work, 0); + wiphy_delayed_work_queue(local->hw.wiphy, + &local->roc_work, 0); } else { /* otherwise actually kick it off here * (for error handling) @@ -695,7 +695,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, if (!cookie) return -ENOENT; - flush_work(&local->hw_roc_start); + wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start); mutex_lock(&local->mtx); list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { @@ -745,7 +745,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, } else { /* go through work struct to return to the operating channel */ found->abort = true; - mod_delayed_work(local->workqueue, &local->roc_work, 0); + wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, 0); } out_unlock: @@ -994,9 +994,9 @@ int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, void ieee80211_roc_setup(struct ieee80211_local *local) { - INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start); - INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done); - INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work); + wiphy_work_init(&local->hw_roc_start, ieee80211_hw_roc_start); + wiphy_work_init(&local->hw_roc_done, ieee80211_hw_roc_done); + wiphy_delayed_work_init(&local->roc_work, ieee80211_roc_work); INIT_LIST_HEAD(&local->roc_list); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 8f6b6f56b65b4..26ca2f5dc52b2 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2112,7 +2112,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) /* either the frame has been decrypted or will be dropped */ status->flag |= RX_FLAG_DECRYPTED; - if (unlikely(ieee80211_is_beacon(fc) && (result & RX_DROP_UNUSABLE) && + if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) && rx->sdata->dev)) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, skb->data, skb->len); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 0805aa8603c61..68ec2124c3db5 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -274,8 +274,8 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) * the beacon/proberesp rx gives us an opportunity to upgrade * to active scan */ - set_bit(SCAN_BEACON_DONE, &local->scanning); - ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); + set_bit(SCAN_BEACON_DONE, &local->scanning); + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); } if (ieee80211_is_probe_resp(mgmt->frame_control)) { @@ -505,7 +505,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, memcpy(&local->scan_info, info, sizeof(*info)); - ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); } EXPORT_SYMBOL(ieee80211_scan_completed); @@ -545,8 +545,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local, /* We need to set power level at maximum rate for scanning. */ ieee80211_hw_config(local, 0); - ieee80211_queue_delayed_work(&local->hw, - &local->scan_work, 0); + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); return 0; } @@ -603,8 +602,8 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local) lockdep_is_held(&local->mtx)))) return; - ieee80211_queue_delayed_work(&local->hw, &local->scan_work, - round_jiffies_relative(0)); + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, + round_jiffies_relative(0)); } static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, @@ -795,8 +794,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, } /* Now, just wait a bit and we are all done! */ - ieee80211_queue_delayed_work(&local->hw, &local->scan_work, - next_delay); + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, + next_delay); return 0; } else { /* Do normal software scan */ @@ -1043,7 +1042,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local, local->next_scan_state = SCAN_SET_CHANNEL; } -void ieee80211_scan_work(struct work_struct *work) +void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, scan_work.work); @@ -1137,7 +1136,8 @@ void ieee80211_scan_work(struct work_struct *work) } } while (next_delay == 0); - ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay); + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, + next_delay); goto out; out_complete: @@ -1280,12 +1280,7 @@ void ieee80211_scan_cancel(struct ieee80211_local *local) goto out; } - /* - * If the work is currently running, it must be blocked on - * the mutex, but we'll set scan_sdata = NULL and it'll - * simply exit once it acquires the mutex. - */ - cancel_delayed_work(&local->scan_work); + wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work); /* and clean up */ memset(&local->scan_info, 0, sizeof(local->scan_info)); __ieee80211_scan_completed(&local->hw, true); @@ -1427,10 +1422,11 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local) mutex_unlock(&local->mtx); - cfg80211_sched_scan_stopped(local->hw.wiphy, 0); + cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0); } -void ieee80211_sched_scan_stopped_work(struct work_struct *work) +void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy, + struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, @@ -1453,6 +1449,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) if (local->in_reconfig) return; - schedule_work(&local->sched_scan_stopped_work); + wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work); } EXPORT_SYMBOL(ieee80211_sched_scan_stopped); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 7751f8ba960ee..0c5cc75857e4f 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2990,7 +2990,7 @@ void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta, WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1; if (val) - sta->sta.max_amsdu_subframes = 4 << val; + sta->sta.max_amsdu_subframes = 4 << (4 - val); } #ifdef CONFIG_LOCKDEP diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8a6917cf63cf9..172173b2a9eb8 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2340,8 +2340,8 @@ static void ieee80211_flush_completed_scan(struct ieee80211_local *local, */ if (aborted) set_bit(SCAN_ABORTED, &local->scanning); - ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); - flush_delayed_work(&local->scan_work); + wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); + wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work); } } @@ -4356,7 +4356,8 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) mutex_unlock(&local->mtx); } -void ieee80211_dfs_radar_detected_work(struct work_struct *work) +void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy, + struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, radar_detected_work); @@ -4374,9 +4375,7 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work) } mutex_unlock(&local->chanctx_mtx); - wiphy_lock(local->hw.wiphy); ieee80211_dfs_cac_cancel(local); - wiphy_unlock(local->hw.wiphy); if (num_chanctx > 1) /* XXX: multi-channel is not supported yet */ @@ -4391,7 +4390,7 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw) trace_api_radar_detected(local); - schedule_work(&local->radar_detected_work); + wiphy_work_queue(hw->wiphy, &local->radar_detected_work); } EXPORT_SYMBOL(ieee80211_radar_detected); diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c index bceaab8dd8e46..74698582a2859 100644 --- a/net/mptcp/fastopen.c +++ b/net/mptcp/fastopen.c @@ -52,6 +52,7 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf mptcp_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); + mptcp_sk(sk)->bytes_received += skb->len; sk->sk_data_ready(sk); diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c index 6616ba5d0b049..5b37487d9d11f 100644 --- a/net/netfilter/nf_nat_redirect.c +++ b/net/netfilter/nf_nat_redirect.c @@ -80,6 +80,26 @@ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4); static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT; +static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope) +{ + unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr); + + if (ifa_addr_type & IPV6_ADDR_MAPPED) + return false; + + if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC))) + return false; + + if (scope) { + unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK; + + if (!(scope & ifa_scope)) + return false; + } + + return true; +} + unsigned int nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, unsigned int hooknum) @@ -89,14 +109,19 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, if (hooknum == NF_INET_LOCAL_OUT) { newdst.in6 = loopback_addr; } else { + unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr); struct inet6_dev *idev; - struct inet6_ifaddr *ifa; bool addr = false; idev = __in6_dev_get(skb->dev); if (idev != NULL) { + const struct inet6_ifaddr *ifa; + read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { + if (!nf_nat_redirect_ipv6_usable(ifa, scope)) + continue; + newdst.in6 = ifa->addr; addr = true; break; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 29c651804cb22..3bf428a188ccf 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3465,10 +3465,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb, goto cont_skip; if (*idx < s_idx) goto cont; - if (*idx > s_idx) { - memset(&cb->args[1], 0, - sizeof(cb->args) - sizeof(cb->args[0])); - } if (prule) handle = prule->handle; else diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 7ddb9a78e3fc8..ef93e0d3bee04 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input, { struct recent_table *t = pde_data(file_inode(file)); struct recent_entry *e; - char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; + char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")]; const char *c = buf; union nf_inet_addr addr = {}; u_int16_t family; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 0b9a785dea459..3019a4406ca4f 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -985,7 +985,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, if (err) return err; - nf_conn_act_ct_ext_add(ct); + nf_conn_act_ct_ext_add(skb, ct, ctinfo); } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && labels_nonzero(&info->labels.mask)) { err = ovs_ct_set_labels(ct, key, &info->labels.value, diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index ac85d4644a3c3..df8a271948a1c 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -212,7 +212,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) conn->idle_timestamp = jiffies; if (atomic_dec_and_test(&conn->active)) rxrpc_set_service_reap_timer(conn->rxnet, - jiffies + rxrpc_connection_expiry); + jiffies + rxrpc_connection_expiry * HZ); } rxrpc_put_call(call, rxrpc_call_put_io_thread); diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 7d910aee4f8cb..c553a30e9c838 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -87,7 +87,7 @@ static void rxrpc_client_conn_reap_timeout(struct timer_list *timer) struct rxrpc_local *local = container_of(timer, struct rxrpc_local, client_conn_reap_timer); - if (local->kill_all_client_conns && + if (!local->kill_all_client_conns && test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags)) rxrpc_wake_up_io_thread(local); } diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index fb52d6f9aff93..3922d825ef2d8 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry, entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir]; } +static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry) +{ + struct nf_conn_act_ct_ext *act_ct_ext; + + act_ct_ext = nf_conn_act_ct_ext_find(entry->ct); + if (act_ct_ext) { + tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL); + tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY); + } +} + static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft, struct nf_conn *ct, bool tcp, bool bidirectional) @@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p, else ctinfo = IP_CT_ESTABLISHED_REPLY; + nf_conn_act_ct_ext_fill(skb, ct, ctinfo); + tcf_ct_flow_ct_ext_ifidx_update(flow); flow_offload_refresh(nf_ft, flow, force_refresh); if (!test_bit(IPS_ASSURED_BIT, &ct->status)) { /* Process this flow in SW to allow promoting to ASSURED */ @@ -1030,7 +1043,7 @@ do_nat: tcf_ct_act_set_labels(ct, p->labels, p->labels_mask); if (!nf_ct_is_confirmed(ct)) - nf_conn_act_ct_ext_add(ct); + nf_conn_act_ct_ext_add(skb, ct, ctinfo); /* This will take care of sending queued events * even if the connection is already confirmed. diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 35ddebae88941..4c047e0e1625e 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -275,7 +275,7 @@ static int __smc_release(struct smc_sock *smc) if (!smc->use_fallback) { rc = smc_close_active(smc); - sock_set_flag(sk, SOCK_DEAD); + smc_sock_set_flag(sk, SOCK_DEAD); sk->sk_shutdown |= SHUTDOWN_MASK; } else { if (sk->sk_state != SMC_CLOSED) { @@ -1743,7 +1743,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) if (new_clcsock) sock_release(new_clcsock); new_sk->sk_state = SMC_CLOSED; - sock_set_flag(new_sk, SOCK_DEAD); + smc_sock_set_flag(new_sk, SOCK_DEAD); sock_put(new_sk); /* final */ *new_smc = NULL; goto out; diff --git a/net/smc/smc.h b/net/smc/smc.h index 24745fde4ac26..e377980b84145 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -377,4 +377,9 @@ int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb); int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info); int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info); +static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag) +{ + set_bit(flag, &sk->sk_flags); +} + #endif /* __SMC_H */ diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 89105e95b4523..3c06625ceb200 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -28,13 +28,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, { struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd; struct smc_connection *conn = cdcpend->conn; + struct smc_buf_desc *sndbuf_desc; struct smc_sock *smc; int diff; + sndbuf_desc = conn->sndbuf_desc; smc = container_of(conn, struct smc_sock, conn); bh_lock_sock(&smc->sk); - if (!wc_status) { - diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len, + if (!wc_status && sndbuf_desc) { + diff = smc_curs_diff(sndbuf_desc->len, &cdcpend->conn->tx_curs_fin, &cdcpend->cursor); /* sndbuf_space is decreased in smc_sendmsg */ @@ -114,9 +116,6 @@ int smc_cdc_msg_send(struct smc_connection *conn, union smc_host_cursor cfed; int rc; - if (unlikely(!READ_ONCE(conn->sndbuf_desc))) - return -ENOBUFS; - smc_cdc_add_pending_send(conn, pend); conn->tx_cdc_seq++; @@ -385,7 +384,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, smc->sk.sk_shutdown |= RCV_SHUTDOWN; if (smc->clcsock && smc->clcsock->sk) smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN; - sock_set_flag(&smc->sk, SOCK_DONE); + smc_sock_set_flag(&smc->sk, SOCK_DONE); sock_hold(&smc->sk); /* sock_put in close_work */ if (!queue_work(smc_close_wq, &conn->close_work)) sock_put(&smc->sk); diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index dbdf03e8aa5b5..10219f55aad14 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -116,7 +116,8 @@ static void smc_close_cancel_work(struct smc_sock *smc) struct sock *sk = &smc->sk; release_sock(sk); - cancel_work_sync(&smc->conn.close_work); + if (cancel_work_sync(&smc->conn.close_work)) + sock_put(sk); cancel_delayed_work_sync(&smc->conn.tx_work); lock_sock(sk); } @@ -173,7 +174,7 @@ void smc_close_active_abort(struct smc_sock *smc) break; } - sock_set_flag(sk, SOCK_DEAD); + smc_sock_set_flag(sk, SOCK_DEAD); sk->sk_state_change(sk); if (release_clcsock) { diff --git a/net/tipc/link.c b/net/tipc/link.c index e33b4f29f77cf..d0143823658d5 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1446,7 +1446,7 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l, p = (struct tipc_gap_ack_blks *)msg_data(hdr); sz = ntohs(p->len); /* Sanity check */ - if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) { + if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) { /* Good, check if the desired type exists */ if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt)) goto ok; @@ -1533,7 +1533,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr) __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0; /* Total len */ - len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt); + len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt)); ga->len = htons(len); return len; } diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index e8fd257c0e688..1a9a5bdaccf4f 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -88,7 +88,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC }, - [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING, + [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING, .len = TIPC_MAX_LINK_NAME }, [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 }, [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG }, @@ -125,7 +125,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = { [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC }, - [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING, + [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING, .len = TIPC_MAX_BEARER_NAME }, [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED }, [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index e9d1e83a859d1..9634dfd636fd6 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1491,7 +1491,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, */ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); aead_size = ALIGN(aead_size, __alignof__(*dctx)); - mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout), + mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)), sk->sk_allocation); if (!mem) { err = -ENOMEM; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 352d042b130b5..8bc272b6003bb 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -68,6 +68,8 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, hdr->dst_port = cpu_to_le32(dst_port); hdr->flags = cpu_to_le32(info->flags); hdr->len = cpu_to_le32(len); + hdr->buf_alloc = cpu_to_le32(0); + hdr->fwd_cnt = cpu_to_le32(0); if (info->msg && len > 0) { payload = skb_put(skb, len); @@ -1204,11 +1206,17 @@ virtio_transport_recv_connected(struct sock *sk, vsk->peer_shutdown |= RCV_SHUTDOWN; if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) vsk->peer_shutdown |= SEND_SHUTDOWN; - if (vsk->peer_shutdown == SHUTDOWN_MASK && - vsock_stream_has_data(vsk) <= 0 && - !sock_flag(sk, SOCK_DONE)) { - (void)virtio_transport_reset(vsk, NULL); - virtio_transport_do_close(vsk, true); + if (vsk->peer_shutdown == SHUTDOWN_MASK) { + if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) { + (void)virtio_transport_reset(vsk, NULL); + virtio_transport_do_close(vsk, true); + } + /* Remove this socket anyway because the remote peer sent + * the shutdown. This way a new connection will succeed + * if the remote peer uses the same source port, + * even if the old socket is still unreleased, but now disconnected. + */ + vsock_remove_sock(vsk); } if (le32_to_cpu(virtio_vsock_hdr(skb)->flags)) sk->sk_state_change(sk); diff --git a/net/wireless/core.c b/net/wireless/core.c index acec41c1809a8..563cfbe3237c9 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1049,7 +1049,8 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy) } EXPORT_SYMBOL(wiphy_rfkill_start_polling); -void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev) +void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev, + struct wiphy_work *end) { unsigned int runaway_limit = 100; unsigned long flags; @@ -1068,6 +1069,10 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev) wk->func(&rdev->wiphy, wk); spin_lock_irqsave(&rdev->wiphy_work_lock, flags); + + if (wk == end) + break; + if (WARN_ON(--runaway_limit == 0)) INIT_LIST_HEAD(&rdev->wiphy_work_list); } @@ -1118,7 +1123,7 @@ void wiphy_unregister(struct wiphy *wiphy) #endif /* surely nothing is reachable now, clean up work */ - cfg80211_process_wiphy_works(rdev); + cfg80211_process_wiphy_works(rdev, NULL); wiphy_unlock(&rdev->wiphy); rtnl_unlock(); @@ -1640,6 +1645,21 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work) } EXPORT_SYMBOL_GPL(wiphy_work_cancel); +void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + unsigned long flags; + bool run; + + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); + run = !work || !list_empty(&work->entry); + spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); + + if (run) + cfg80211_process_wiphy_works(rdev, work); +} +EXPORT_SYMBOL_GPL(wiphy_work_flush); + void wiphy_delayed_work_timer(struct timer_list *t) { struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer); @@ -1672,6 +1692,16 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy, } EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel); +void wiphy_delayed_work_flush(struct wiphy *wiphy, + struct wiphy_delayed_work *dwork) +{ + lockdep_assert_held(&wiphy->mtx); + + del_timer_sync(&dwork->timer); + wiphy_work_flush(wiphy, &dwork->work); +} +EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush); + static int __init cfg80211_init(void) { int err; diff --git a/net/wireless/core.h b/net/wireless/core.h index ba9c7170afa44..e536c0b615a09 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -464,7 +464,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, struct vif_params *params); void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); -void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev); +void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev, + struct wiphy_work *end); void cfg80211_process_wdev_events(struct wireless_dev *wdev); bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 8210a6090ac16..e4cc6209c7b9b 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -2358,8 +2358,8 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies, /* elem might be invalid after the memmove */ next = (void *)(elem->data + elem->datalen); - elem_datalen = elem->datalen; + if (elem->id == WLAN_EID_EXTENSION) { copied = elem->datalen - 1; if (copied > data_len) @@ -2380,7 +2380,7 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies, for (elem = next; elem->data < ies + ieslen && - elem->data + elem->datalen < ies + ieslen; + elem->data + elem->datalen <= ies + ieslen; elem = next) { /* elem might be invalid after the memmove */ next = (void *)(elem->data + elem->datalen); diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index c629bac3f2983..565511a3f461e 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -105,14 +105,14 @@ static int wiphy_suspend(struct device *dev) cfg80211_leave_all(rdev); cfg80211_process_rdev_events(rdev); } - cfg80211_process_wiphy_works(rdev); + cfg80211_process_wiphy_works(rdev, NULL); if (rdev->ops->suspend) ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); if (ret == 1) { /* Driver refuse to configure wowlan */ cfg80211_leave_all(rdev); cfg80211_process_rdev_events(rdev); - cfg80211_process_wiphy_works(rdev); + cfg80211_process_wiphy_works(rdev, NULL); ret = rdev_suspend(rdev, NULL); } if (ret == 0) diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o index 0edfdb40364b8..25b3b587d37c0 100644 --- a/scripts/Makefile.vmlinux_o +++ b/scripts/Makefile.vmlinux_o @@ -37,7 +37,8 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION)) vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y) vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable -vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr $(if $(CONFIG_CPU_UNRET_ENTRY), --unret) +vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \ + $(if $(or $(CONFIG_CPU_UNRET_ENTRY),$(CONFIG_CPU_SRSO)), --unret) objtool-args = $(vmlinux-objtool-args-y) --link diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in index e3517d4ab8ec9..04c87b570aabe 100644 --- a/scripts/gdb/linux/constants.py.in +++ b/scripts/gdb/linux/constants.py.in @@ -66,10 +66,11 @@ LX_GDBPARSED(IRQD_LEVEL) LX_GDBPARSED(IRQ_HIDDEN) /* linux/module.h */ -LX_GDBPARSED(MOD_TEXT) -LX_GDBPARSED(MOD_DATA) -LX_GDBPARSED(MOD_RODATA) -LX_GDBPARSED(MOD_RO_AFTER_INIT) +if IS_BUILTIN(CONFIG_MODULES): + LX_GDBPARSED(MOD_TEXT) + LX_GDBPARSED(MOD_DATA) + LX_GDBPARSED(MOD_RODATA) + LX_GDBPARSED(MOD_RO_AFTER_INIT) /* linux/mount.h */ LX_VALUE(MNT_NOSUID) diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 7056751c29b1f..6583b36dbe694 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1348,13 +1348,13 @@ static int do_typec_entry(const char *filename, void *symval, char *alias) /* Looks like: tee:uuid */ static int do_tee_entry(const char *filename, void *symval, char *alias) { - DEF_FIELD(symval, tee_client_device_id, uuid); + DEF_FIELD_ADDR(symval, tee_client_device_id, uuid); sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", - uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4], - uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9], - uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14], - uuid.b[15]); + uuid->b[0], uuid->b[1], uuid->b[2], uuid->b[3], uuid->b[4], + uuid->b[5], uuid->b[6], uuid->b[7], uuid->b[8], uuid->b[9], + uuid->b[10], uuid->b[11], uuid->b[12], uuid->b[13], uuid->b[14], + uuid->b[15]); add_wildcard(alias); return 1; @@ -1401,10 +1401,10 @@ static int do_mhi_ep_entry(const char *filename, void *symval, char *alias) /* Looks like: ishtp:{guid} */ static int do_ishtp_entry(const char *filename, void *symval, char *alias) { - DEF_FIELD(symval, ishtp_device_id, guid); + DEF_FIELD_ADDR(symval, ishtp_device_id, guid); strcpy(alias, ISHTP_MODULE_PREFIX "{"); - add_guid(alias, guid); + add_guid(alias, *guid); strcat(alias, "}"); return 1; diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index b38f7b2a5e1d5..ec695a6caac7d 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -255,6 +255,7 @@ void aa_free_profile(struct aa_profile *profile) aa_put_ns(profile->ns); kfree_sensitive(profile->rename); + kfree_sensitive(profile->disconnected); free_attachment(&profile->attach); diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 8b8846073e142..b49201306753c 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -807,7 +807,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) const char *info = "failed to unpack profile"; size_t ns_len; struct rhashtable_params params = { 0 }; - char *key = NULL; + char *key = NULL, *disconnected = NULL; struct aa_data *data; int error = -EPROTO; kernel_cap_t tmpcap; @@ -873,7 +873,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) } /* disconnected attachment string is optional */ - (void) aa_unpack_str(e, &profile->disconnected, "disconnected"); + (void) aa_unpack_strdup(e, &disconnected, "disconnected"); + profile->disconnected = disconnected; /* per profile debug flags (complain, audit) */ if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) { diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c index c6031f7440996..3c157b006a5a2 100644 --- a/sound/pci/hda/cs35l41_hda.c +++ b/sound/pci/hda/cs35l41_hda.c @@ -570,7 +570,7 @@ static void cs35l41_hda_play_done(struct device *dev) dev_dbg(dev, "Play (Complete)\n"); - cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1, NULL, + cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1, cs35l41->firmware_running); if (cs35l41->firmware_running) { regmap_multi_reg_write(reg, cs35l41_hda_unmute_dsp, @@ -589,7 +589,7 @@ static void cs35l41_hda_pause_start(struct device *dev) dev_dbg(dev, "Pause (Start)\n"); regmap_multi_reg_write(reg, cs35l41_hda_mute, ARRAY_SIZE(cs35l41_hda_mute)); - cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0, NULL, + cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0, cs35l41->firmware_running); } @@ -1668,8 +1668,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i ret = component_add(cs35l41->dev, &cs35l41_hda_comp_ops); if (ret) { dev_err(cs35l41->dev, "Register component failed: %d\n", ret); - pm_runtime_disable(cs35l41->dev); - goto err; + goto err_pm; } dev_info(cs35l41->dev, "Cirrus Logic CS35L41 (%x), Revision: %02X\n", regid, reg_revid); @@ -1677,6 +1676,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i return 0; err_pm: + pm_runtime_dont_use_autosuspend(cs35l41->dev); pm_runtime_disable(cs35l41->dev); pm_runtime_put_noidle(cs35l41->dev); @@ -1695,6 +1695,7 @@ void cs35l41_hda_remove(struct device *dev) struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev); pm_runtime_get_sync(cs35l41->dev); + pm_runtime_dont_use_autosuspend(cs35l41->dev); pm_runtime_disable(cs35l41->dev); if (cs35l41->halo_initialized) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 9677c09cf7a98..3ed2cba5ee8ad 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7262,8 +7262,10 @@ enum { ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, ALC299_FIXUP_PREDATOR_SPK, ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, + ALC289_FIXUP_DELL_SPK1, ALC289_FIXUP_DELL_SPK2, ALC289_FIXUP_DUAL_SPK, + ALC289_FIXUP_RTK_AMP_DUAL_SPK, ALC294_FIXUP_SPK2_TO_DAC1, ALC294_FIXUP_ASUS_DUAL_SPK, ALC285_FIXUP_THINKPAD_X1_GEN7, @@ -7363,6 +7365,7 @@ enum { ALC287_FIXUP_THINKPAD_I2S_SPK, ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD, ALC2XX_FIXUP_HEADSET_MIC, + ALC289_FIXUP_DELL_CS35L41_SPI_2, }; /* A special fixup for Lenovo C940 and Yoga Duet 7; @@ -8589,6 +8592,15 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, + [ALC289_FIXUP_DELL_SPK1] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90170140 }, + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE + }, [ALC289_FIXUP_DELL_SPK2] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -8604,6 +8616,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC289_FIXUP_DELL_SPK2 }, + [ALC289_FIXUP_RTK_AMP_DUAL_SPK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_speaker2_to_dac1, + .chained = true, + .chain_id = ALC289_FIXUP_DELL_SPK1 + }, [ALC294_FIXUP_SPK2_TO_DAC1] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, @@ -9471,6 +9489,12 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mic, }, + [ALC289_FIXUP_DELL_CS35L41_SPI_2] = { + .type = HDA_FIXUP_FUNC, + .v.func = cs35l41_fixup_spi_two, + .chained = true, + .chain_id = ALC289_FIXUP_DUAL_SPK + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -9581,13 +9605,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), - SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC245_FIXUP_CS35L41_SPI_2), - SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC245_FIXUP_CS35L41_SPI_2), - SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC245_FIXUP_CS35L41_SPI_2), - SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2), - SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2), - SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC245_FIXUP_CS35L41_SPI_2), - SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC289_FIXUP_DELL_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cc0, "Dell Oasis 13", ALC289_FIXUP_RTK_AMP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis 14", ALC289_FIXUP_RTK_AMP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c index 4ec306cd2f476..2ec5fdc875b13 100644 --- a/sound/soc/codecs/cs35l41-lib.c +++ b/sound/soc/codecs/cs35l41-lib.c @@ -1192,8 +1192,28 @@ bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type) } EXPORT_SYMBOL_GPL(cs35l41_safe_reset); +/* + * Enabling the CS35L41_SHD_BOOST_ACTV and CS35L41_SHD_BOOST_PASS shared boosts + * does also require a call to cs35l41_mdsync_up(), but not before getting the + * PLL Lock signal. + * + * PLL Lock seems to be triggered soon after snd_pcm_start() is executed and + * SNDRV_PCM_TRIGGER_START command is processed, which happens (long) after the + * SND_SOC_DAPM_PRE_PMU event handler is invoked as part of snd_pcm_prepare(). + * + * This event handler is where cs35l41_global_enable() is normally called from, + * but waiting for PLL Lock here will time out. Increasing the wait duration + * will not help, as the only consequence of it would be to add an unnecessary + * delay in the invocation of snd_pcm_start(). + * + * Trying to move the wait in the SNDRV_PCM_TRIGGER_START callback is not a + * solution either, as the trigger is executed in an IRQ-off atomic context. + * + * The current approach is to invoke cs35l41_mdsync_up() right after receiving + * the PLL Lock interrupt, in the IRQ handler. + */ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type, - int enable, struct completion *pll_lock, bool firmware_running) + int enable, bool firmware_running) { int ret; unsigned int gpio1_func, pad_control, pwr_ctrl1, pwr_ctrl3, int_status, pup_pdn_mask; @@ -1203,11 +1223,6 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4 {CS35L41_GPIO_PAD_CONTROL, 0}, {CS35L41_PWR_CTRL1, 0, 3000}, }; - struct reg_sequence cs35l41_mdsync_up_seq[] = { - {CS35L41_PWR_CTRL3, 0}, - {CS35L41_PWR_CTRL1, 0x00000000, 3000}, - {CS35L41_PWR_CTRL1, 0x00000001, 3000}, - }; pup_pdn_mask = enable ? CS35L41_PUP_DONE_MASK : CS35L41_PDN_DONE_MASK; @@ -1241,24 +1256,12 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4 cs35l41_mdsync_down_seq[0].def = pwr_ctrl3; cs35l41_mdsync_down_seq[1].def = pad_control; cs35l41_mdsync_down_seq[2].def = pwr_ctrl1; + ret = regmap_multi_reg_write(regmap, cs35l41_mdsync_down_seq, ARRAY_SIZE(cs35l41_mdsync_down_seq)); - if (!enable) - break; - - if (!pll_lock) - return -EINVAL; - - ret = wait_for_completion_timeout(pll_lock, msecs_to_jiffies(1000)); - if (ret == 0) { - ret = -ETIMEDOUT; - } else { - regmap_read(regmap, CS35L41_PWR_CTRL3, &pwr_ctrl3); - pwr_ctrl3 |= CS35L41_SYNC_EN_MASK; - cs35l41_mdsync_up_seq[0].def = pwr_ctrl3; - ret = regmap_multi_reg_write(regmap, cs35l41_mdsync_up_seq, - ARRAY_SIZE(cs35l41_mdsync_up_seq)); - } + /* Activation to be completed later via cs35l41_mdsync_up() */ + if (ret || enable) + return ret; ret = regmap_read_poll_timeout(regmap, CS35L41_IRQ1_STATUS1, int_status, int_status & pup_pdn_mask, @@ -1266,7 +1269,7 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4 if (ret) dev_err(dev, "Enable(%d) failed: %d\n", enable, ret); - // Clear PUP/PDN status + /* Clear PUP/PDN status */ regmap_write(regmap, CS35L41_IRQ1_STATUS1, pup_pdn_mask); break; case CS35L41_INT_BOOST: @@ -1348,6 +1351,17 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4 } EXPORT_SYMBOL_GPL(cs35l41_global_enable); +/* + * To be called after receiving the IRQ Lock interrupt, in order to complete + * any shared boost activation initiated by cs35l41_global_enable(). + */ +int cs35l41_mdsync_up(struct regmap *regmap) +{ + return regmap_update_bits(regmap, CS35L41_PWR_CTRL3, + CS35L41_SYNC_EN_MASK, CS35L41_SYNC_EN_MASK); +} +EXPORT_SYMBOL_GPL(cs35l41_mdsync_up); + int cs35l41_gpio_config(struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg) { struct cs35l41_gpio_cfg *gpio1 = &hw_cfg->gpio1; diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c index 722b69a6de26c..5456e6bfa242f 100644 --- a/sound/soc/codecs/cs35l41.c +++ b/sound/soc/codecs/cs35l41.c @@ -386,10 +386,18 @@ static irqreturn_t cs35l41_irq(int irq, void *data) struct cs35l41_private *cs35l41 = data; unsigned int status[4] = { 0, 0, 0, 0 }; unsigned int masks[4] = { 0, 0, 0, 0 }; - int ret = IRQ_NONE; unsigned int i; + int ret; - pm_runtime_get_sync(cs35l41->dev); + ret = pm_runtime_resume_and_get(cs35l41->dev); + if (ret < 0) { + dev_err(cs35l41->dev, + "pm_runtime_resume_and_get failed in %s: %d\n", + __func__, ret); + return IRQ_NONE; + } + + ret = IRQ_NONE; for (i = 0; i < ARRAY_SIZE(status); i++) { regmap_read(cs35l41->regmap, @@ -459,7 +467,19 @@ static irqreturn_t cs35l41_irq(int irq, void *data) if (status[2] & CS35L41_PLL_LOCK) { regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS3, CS35L41_PLL_LOCK); - complete(&cs35l41->pll_lock); + + if (cs35l41->hw_cfg.bst_type == CS35L41_SHD_BOOST_ACTV || + cs35l41->hw_cfg.bst_type == CS35L41_SHD_BOOST_PASS) { + ret = cs35l41_mdsync_up(cs35l41->regmap); + if (ret) + dev_err(cs35l41->dev, "MDSYNC-up failed: %d\n", ret); + else + dev_dbg(cs35l41->dev, "MDSYNC-up done\n"); + + dev_dbg(cs35l41->dev, "PUP-done status: %d\n", + !!(status[0] & CS35L41_PUP_DONE_MASK)); + } + ret = IRQ_HANDLED; } @@ -500,11 +520,11 @@ static int cs35l41_main_amp_event(struct snd_soc_dapm_widget *w, ARRAY_SIZE(cs35l41_pup_patch)); ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type, - 1, &cs35l41->pll_lock, cs35l41->dsp.cs_dsp.running); + 1, cs35l41->dsp.cs_dsp.running); break; case SND_SOC_DAPM_POST_PMD: ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type, - 0, &cs35l41->pll_lock, cs35l41->dsp.cs_dsp.running); + 0, cs35l41->dsp.cs_dsp.running); regmap_multi_reg_write_bypassed(cs35l41->regmap, cs35l41_pdn_patch, @@ -802,10 +822,6 @@ static const struct snd_pcm_hw_constraint_list cs35l41_constraints = { static int cs35l41_pcm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { - struct cs35l41_private *cs35l41 = snd_soc_component_get_drvdata(dai->component); - - reinit_completion(&cs35l41->pll_lock); - if (substream->runtime) return snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, @@ -1295,8 +1311,6 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg * if (ret < 0) goto err; - init_completion(&cs35l41->pll_lock); - pm_runtime_set_autosuspend_delay(cs35l41->dev, 3000); pm_runtime_use_autosuspend(cs35l41->dev); pm_runtime_mark_last_busy(cs35l41->dev); @@ -1320,6 +1334,7 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg * return 0; err_pm: + pm_runtime_dont_use_autosuspend(cs35l41->dev); pm_runtime_disable(cs35l41->dev); pm_runtime_put_noidle(cs35l41->dev); @@ -1336,6 +1351,7 @@ EXPORT_SYMBOL_GPL(cs35l41_probe); void cs35l41_remove(struct cs35l41_private *cs35l41) { pm_runtime_get_sync(cs35l41->dev); + pm_runtime_dont_use_autosuspend(cs35l41->dev); pm_runtime_disable(cs35l41->dev); regmap_write(cs35l41->regmap, CS35L41_IRQ1_MASK1, 0xFFFFFFFF); diff --git a/sound/soc/codecs/cs35l41.h b/sound/soc/codecs/cs35l41.h index 34d967d4372b2..c85cbc1dd333b 100644 --- a/sound/soc/codecs/cs35l41.h +++ b/sound/soc/codecs/cs35l41.h @@ -33,7 +33,6 @@ struct cs35l41_private { int irq; /* GPIO for /RST */ struct gpio_desc *reset_gpio; - struct completion pll_lock; }; int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *hw_cfg); diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index 09eef6042aad6..20da1eaa4f1c7 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -877,18 +877,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component, void *data) { struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component); - int ret = -ENOTSUPP; if (hcp->hcd.ops->hook_plugged_cb) { hcp->jack = jack; - ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent, - hcp->hcd.data, - plugged_cb, - component->dev); - if (ret) - hcp->jack = NULL; + return 0; } - return ret; + + return -ENOTSUPP; } static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai) @@ -982,6 +977,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component, return ret; } +static int hdmi_probe(struct snd_soc_component *component) +{ + struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component); + int ret = 0; + + if (hcp->hcd.ops->hook_plugged_cb) { + ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent, + hcp->hcd.data, + plugged_cb, + component->dev); + } + + return ret; +} + static void hdmi_remove(struct snd_soc_component *component) { struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component); @@ -992,6 +1002,7 @@ static void hdmi_remove(struct snd_soc_component *component) } static const struct snd_soc_component_driver hdmi_driver = { + .probe = hdmi_probe, .remove = hdmi_remove, .dapm_widgets = hdmi_widgets, .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets), diff --git a/sound/soc/codecs/rt712-sdca.c b/sound/soc/codecs/rt712-sdca.c index 7077ff6ba1f4b..6954fbe7ec5f3 100644 --- a/sound/soc/codecs/rt712-sdca.c +++ b/sound/soc/codecs/rt712-sdca.c @@ -963,13 +963,6 @@ static int rt712_sdca_probe(struct snd_soc_component *component) rt712_sdca_parse_dt(rt712, &rt712->slave->dev); rt712->component = component; - if (!rt712->first_hw_init) - return 0; - - ret = pm_runtime_resume(component->dev); - if (ret < 0 && ret != -EACCES) - return ret; - /* add SPK route */ if (rt712->hw_id != RT712_DEV_ID_713) { snd_soc_add_component_controls(component, @@ -980,6 +973,13 @@ static int rt712_sdca_probe(struct snd_soc_component *component) rt712_sdca_spk_dapm_routes, ARRAY_SIZE(rt712_sdca_spk_dapm_routes)); } + if (!rt712->first_hw_init) + return 0; + + ret = pm_runtime_resume(component->dev); + if (ret < 0 && ret != -EACCES) + return ret; + return 0; } diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c index bab7d34cf585b..5f181b89838ac 100644 --- a/sound/soc/fsl/fsl-asoc-card.c +++ b/sound/soc/fsl/fsl-asoc-card.c @@ -41,6 +41,7 @@ /** * struct codec_priv - CODEC private data + * @mclk: Main clock of the CODEC * @mclk_freq: Clock rate of MCLK * @free_freq: Clock rate of MCLK for hw_free() * @mclk_id: MCLK (or main clock) id for set_sysclk() diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c index ba62995c909ac..ec53bda46a467 100644 --- a/sound/soc/fsl/fsl_easrc.c +++ b/sound/soc/fsl/fsl_easrc.c @@ -1966,17 +1966,21 @@ static int fsl_easrc_probe(struct platform_device *pdev) &fsl_easrc_dai, 1); if (ret) { dev_err(dev, "failed to register ASoC DAI\n"); - return ret; + goto err_pm_disable; } ret = devm_snd_soc_register_component(dev, &fsl_asrc_component, NULL, 0); if (ret) { dev_err(&pdev->dev, "failed to register ASoC platform\n"); - return ret; + goto err_pm_disable; } return 0; + +err_pm_disable: + pm_runtime_disable(&pdev->dev); + return ret; } static void fsl_easrc_remove(struct platform_device *pdev) diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c index 9014978100207..3f7ccae3f6b1a 100644 --- a/sound/soc/fsl/mpc5200_dma.c +++ b/sound/soc/fsl/mpc5200_dma.c @@ -100,6 +100,9 @@ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream) /** * psc_dma_trigger: start and stop the DMA transfer. + * @component: triggered component + * @substream: triggered substream + * @cmd: triggered command * * This function is called by ALSA to start, stop, pause, and resume the DMA * transfer of data. diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index 842649501e303..47d22cab5af62 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -1374,7 +1374,7 @@ static int create_sdw_dailink(struct snd_soc_card *card, int *link_index, continue; /* j reset after loop, adr_index only applies to first link */ - for (; j < adr_link_next->num_adr; j++) { + for (; j < adr_link_next->num_adr && codec_dlc_index < codec_num; j++) { const struct snd_soc_acpi_endpoint *endpoints; endpoints = adr_link_next->adr_d[j].endpoints; diff --git a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c index 623e3bebb8884..4360b9f5ff2c7 100644 --- a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c +++ b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c @@ -58,6 +58,11 @@ static const struct snd_soc_dapm_route rt712_sdca_map[] = { { "rt712 MIC2", NULL, "Headset Mic" }, }; +static const struct snd_soc_dapm_route rt713_sdca_map[] = { + { "Headphone", NULL, "rt713 HP" }, + { "rt713 MIC2", NULL, "Headset Mic" }, +}; + static const struct snd_kcontrol_new rt_sdca_jack_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone"), SOC_DAPM_PIN_SWITCH("Headset Mic"), @@ -109,6 +114,9 @@ static int rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd) } else if (strstr(component->name_prefix, "rt712")) { ret = snd_soc_dapm_add_routes(&card->dapm, rt712_sdca_map, ARRAY_SIZE(rt712_sdca_map)); + } else if (strstr(component->name_prefix, "rt713")) { + ret = snd_soc_dapm_add_routes(&card->dapm, rt713_sdca_map, + ARRAY_SIZE(rt713_sdca_map)); } else { dev_err(card->dev, "%s is not supported\n", component->name_prefix); return -EINVAL; diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c index 57ea815d3f041..b776c58dcf47a 100644 --- a/sound/soc/intel/skylake/skl-sst-utils.c +++ b/sound/soc/intel/skylake/skl-sst-utils.c @@ -299,6 +299,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL); if (!module->instance_id) { ret = -ENOMEM; + kfree(module); goto free_uuid_list; } diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c index 9c11016f032c2..9777ba89e956c 100644 --- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c +++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c @@ -1179,7 +1179,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev) playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs"); if (!playback_codec) { ret = -EINVAL; - dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n"); + dev_err_probe(&pdev->dev, ret, "Property 'playback-codecs' missing or invalid\n"); goto err_playback_codec; } @@ -1193,7 +1193,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev) for_each_card_prelinks(card, i, dai_link) { ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3"); if (ret) { - dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n", + dev_err_probe(&pdev->dev, ret, "%s set playback_codec fail\n", dai_link->name); goto err_probe; } diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 312e555798315..85e3bbf7e5f0e 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -3670,7 +3670,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD); break; case snd_soc_dapm_clock_supply: - w->clk = devm_clk_get(dapm->dev, w->name); + w->clk = devm_clk_get(dapm->dev, widget->name); if (IS_ERR(w->clk)) { ret = PTR_ERR(w->clk); goto request_failed; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 54704250c0a2c..0a20122b3e555 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -698,14 +698,12 @@ static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd, if (!rollback) { snd_soc_runtime_deactivate(rtd, substream->stream); - /* clear the corresponding DAIs parameters when going to be inactive */ - for_each_rtd_dais(rtd, i, dai) { - if (snd_soc_dai_active(dai) == 0) - soc_pcm_set_dai_params(dai, NULL); - if (snd_soc_dai_stream_active(dai, substream->stream) == 0) - snd_soc_dai_digital_mute(dai, 1, substream->stream); - } + /* Make sure DAI parameters cleared if the DAI becomes inactive */ + for_each_rtd_dais(rtd, i, dai) + if (snd_soc_dai_active(dai) == 0 && + (dai->rate || dai->channels || dai->sample_bits)) + soc_pcm_set_dai_params(dai, NULL); } for_each_rtd_dais(rtd, i, dai) @@ -936,6 +934,15 @@ static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd, snd_soc_dpcm_mutex_assert_held(rtd); + /* clear the corresponding DAIs parameters when going to be inactive */ + for_each_rtd_dais(rtd, i, dai) { + if (snd_soc_dai_active(dai) == 1) + soc_pcm_set_dai_params(dai, NULL); + + if (snd_soc_dai_stream_active(dai, substream->stream) == 1) + snd_soc_dai_digital_mute(dai, 1, substream->stream); + } + /* run the stream event */ snd_soc_dapm_stream_stop(rtd, substream->stream); diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c index 2d1616b81485c..0938b259f7034 100644 --- a/sound/soc/sof/core.c +++ b/sound/soc/sof/core.c @@ -459,9 +459,10 @@ int snd_sof_device_remove(struct device *dev) struct snd_sof_dev *sdev = dev_get_drvdata(dev); struct snd_sof_pdata *pdata = sdev->pdata; int ret; + bool aborted = false; if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)) - cancel_work_sync(&sdev->probe_work); + aborted = cancel_work_sync(&sdev->probe_work); /* * Unregister any registered client device first before IPC and debugfs @@ -487,6 +488,9 @@ int snd_sof_device_remove(struct device *dev) snd_sof_free_debug(sdev); snd_sof_remove(sdev); sof_ops_free(sdev); + } else if (aborted) { + /* probe_work never ran */ + sof_ops_free(sdev); } /* release firmware */ diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c index 7cb63e6b24dc9..c9c1d2ec7af25 100644 --- a/sound/soc/sof/ipc4-topology.c +++ b/sound/soc/sof/ipc4-topology.c @@ -895,7 +895,8 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget) if (process->init_config == SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT) { struct sof_ipc4_base_module_cfg_ext *base_cfg_ext; u32 ext_size = struct_size(base_cfg_ext, pin_formats, - swidget->num_input_pins + swidget->num_output_pins); + size_add(swidget->num_input_pins, + swidget->num_output_pins)); base_cfg_ext = kzalloc(ext_size, GFP_KERNEL); if (!base_cfg_ext) { diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c index 666057d50ea0d..dd3f59bb72faf 100644 --- a/sound/soc/ti/ams-delta.c +++ b/sound/soc/ti/ams-delta.c @@ -303,7 +303,7 @@ static int cx81801_open(struct tty_struct *tty) static void cx81801_close(struct tty_struct *tty) { struct snd_soc_component *component = tty->disc_data; - struct snd_soc_dapm_context *dapm = &component->card->dapm; + struct snd_soc_dapm_context *dapm; del_timer_sync(&cx81801_timer); @@ -315,6 +315,8 @@ static void cx81801_close(struct tty_struct *tty) v253_ops.close(tty); + dapm = &component->card->dapm; + /* Revert back to default audio input/output constellation */ snd_soc_dapm_mutex_lock(dapm); diff --git a/tools/crypto/ccp/dbc.c b/tools/crypto/ccp/dbc.c index 37e813175642f..a807df0f05974 100644 --- a/tools/crypto/ccp/dbc.c +++ b/tools/crypto/ccp/dbc.c @@ -8,6 +8,7 @@ */ #include +#include #include #include @@ -22,16 +23,14 @@ int get_nonce(int fd, void *nonce_out, void *signature) struct dbc_user_nonce tmp = { .auth_needed = !!signature, }; - int ret; assert(nonce_out); if (signature) memcpy(tmp.signature, signature, sizeof(tmp.signature)); - ret = ioctl(fd, DBCIOCNONCE, &tmp); - if (ret) - return ret; + if (ioctl(fd, DBCIOCNONCE, &tmp)) + return errno; memcpy(nonce_out, tmp.nonce, sizeof(tmp.nonce)); return 0; @@ -47,7 +46,9 @@ int set_uid(int fd, __u8 *uid, __u8 *signature) memcpy(tmp.uid, uid, sizeof(tmp.uid)); memcpy(tmp.signature, signature, sizeof(tmp.signature)); - return ioctl(fd, DBCIOCUID, &tmp); + if (ioctl(fd, DBCIOCUID, &tmp)) + return errno; + return 0; } int process_param(int fd, int msg_index, __u8 *signature, int *data) @@ -63,10 +64,10 @@ int process_param(int fd, int msg_index, __u8 *signature, int *data) memcpy(tmp.signature, signature, sizeof(tmp.signature)); - ret = ioctl(fd, DBCIOCPARAM, &tmp); - if (ret) - return ret; + if (ioctl(fd, DBCIOCPARAM, &tmp)) + return errno; *data = tmp.param; + memcpy(signature, tmp.signature, sizeof(tmp.signature)); return 0; } diff --git a/tools/crypto/ccp/dbc.py b/tools/crypto/ccp/dbc.py index 3f6a825ffc9e4..2b91415b19407 100644 --- a/tools/crypto/ccp/dbc.py +++ b/tools/crypto/ccp/dbc.py @@ -27,8 +27,7 @@ lib = ctypes.CDLL("./dbc_library.so", mode=ctypes.RTLD_GLOBAL) def handle_error(code): - val = code * -1 - raise OSError(val, os.strerror(val)) + raise OSError(code, os.strerror(code)) def get_nonce(device, signature): @@ -58,7 +57,8 @@ def process_param(device, message, signature, data=None): if type(message) != tuple: raise ValueError("Expected message tuple") arg = ctypes.c_int(data if data else 0) - ret = lib.process_param(device.fileno(), message[0], signature, ctypes.pointer(arg)) + sig = ctypes.create_string_buffer(signature, len(signature)) + ret = lib.process_param(device.fileno(), message[0], ctypes.pointer(sig), ctypes.pointer(arg)) if ret: handle_error(ret) - return arg, signature + return arg.value, sig.value diff --git a/tools/crypto/ccp/test_dbc.py b/tools/crypto/ccp/test_dbc.py index 998bb3e3cd040..79de3638a01ab 100755 --- a/tools/crypto/ccp/test_dbc.py +++ b/tools/crypto/ccp/test_dbc.py @@ -4,6 +4,12 @@ import unittest import os import time import glob +import fcntl +try: + import ioctl_opt as ioctl +except ImportError: + ioctl = None + pass from dbc import * # Artificial delay between set commands @@ -27,8 +33,8 @@ def system_is_secured() -> bool: class DynamicBoostControlTest(unittest.TestCase): def __init__(self, data) -> None: self.d = None - self.signature = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" - self.uid = "1111111111111111" + self.signature = b"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" + self.uid = b"1111111111111111" super().__init__(data) def setUp(self) -> None: @@ -64,13 +70,16 @@ class TestInvalidIoctls(DynamicBoostControlTest): def setUp(self) -> None: if not os.path.exists(DEVICE_NODE): self.skipTest("system is unsupported") + if not ioctl: + self.skipTest("unable to test IOCTLs without ioctl_opt") + return super().setUp() def test_invalid_nonce_ioctl(self) -> None: """tries to call get_nonce ioctl with invalid data structures""" # 0x1 (get nonce), and invalid data - INVALID1 = IOWR(ord("D"), 0x01, invalid_param) + INVALID1 = ioctl.IOWR(ord("D"), 0x01, invalid_param) with self.assertRaises(OSError) as error: fcntl.ioctl(self.d, INVALID1, self.data, True) self.assertEqual(error.exception.errno, 22) @@ -79,7 +88,7 @@ class TestInvalidIoctls(DynamicBoostControlTest): """tries to call set_uid ioctl with invalid data structures""" # 0x2 (set uid), and invalid data - INVALID2 = IOW(ord("D"), 0x02, invalid_param) + INVALID2 = ioctl.IOW(ord("D"), 0x02, invalid_param) with self.assertRaises(OSError) as error: fcntl.ioctl(self.d, INVALID2, self.data, True) self.assertEqual(error.exception.errno, 22) @@ -88,7 +97,7 @@ class TestInvalidIoctls(DynamicBoostControlTest): """tries to call set_uid ioctl with invalid data structures""" # 0x2 as RW (set uid), and invalid data - INVALID3 = IOWR(ord("D"), 0x02, invalid_param) + INVALID3 = ioctl.IOWR(ord("D"), 0x02, invalid_param) with self.assertRaises(OSError) as error: fcntl.ioctl(self.d, INVALID3, self.data, True) self.assertEqual(error.exception.errno, 22) @@ -96,7 +105,7 @@ class TestInvalidIoctls(DynamicBoostControlTest): def test_invalid_param_ioctl(self) -> None: """tries to call param ioctl with invalid data structures""" # 0x3 (param), and invalid data - INVALID4 = IOWR(ord("D"), 0x03, invalid_param) + INVALID4 = ioctl.IOWR(ord("D"), 0x03, invalid_param) with self.assertRaises(OSError) as error: fcntl.ioctl(self.d, INVALID4, self.data, True) self.assertEqual(error.exception.errno, 22) @@ -104,7 +113,7 @@ class TestInvalidIoctls(DynamicBoostControlTest): def test_invalid_call_ioctl(self) -> None: """tries to call the DBC ioctl with invalid data structures""" # 0x4, and invalid data - INVALID5 = IOWR(ord("D"), 0x04, invalid_param) + INVALID5 = ioctl.IOWR(ord("D"), 0x04, invalid_param) with self.assertRaises(OSError) as error: fcntl.ioctl(self.d, INVALID5, self.data, True) self.assertEqual(error.exception.errno, 22) @@ -183,12 +192,12 @@ class TestUnFusedSystem(DynamicBoostControlTest): # SOC power soc_power_max = process_param(self.d, PARAM_GET_SOC_PWR_MAX, self.signature) soc_power_min = process_param(self.d, PARAM_GET_SOC_PWR_MIN, self.signature) - self.assertGreater(soc_power_max.parameter, soc_power_min.parameter) + self.assertGreater(soc_power_max[0], soc_power_min[0]) # fmax fmax_max = process_param(self.d, PARAM_GET_FMAX_MAX, self.signature) fmax_min = process_param(self.d, PARAM_GET_FMAX_MIN, self.signature) - self.assertGreater(fmax_max.parameter, fmax_min.parameter) + self.assertGreater(fmax_max[0], fmax_min[0]) # cap values keys = { @@ -199,7 +208,7 @@ class TestUnFusedSystem(DynamicBoostControlTest): } for k in keys: result = process_param(self.d, keys[k], self.signature) - self.assertGreater(result.parameter, 0) + self.assertGreater(result[0], 0) def test_get_invalid_param(self) -> None: """fetch an invalid parameter""" @@ -217,17 +226,17 @@ class TestUnFusedSystem(DynamicBoostControlTest): original = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature) # set the fmax - target = original.parameter - 100 + target = original[0] - 100 process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, target) time.sleep(SET_DELAY) new = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature) - self.assertEqual(new.parameter, target) + self.assertEqual(new[0], target) # revert back to current - process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original.parameter) + process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original[0]) time.sleep(SET_DELAY) cur = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature) - self.assertEqual(cur.parameter, original.parameter) + self.assertEqual(cur[0], original[0]) def test_set_power_cap(self) -> None: """get/set power cap limit""" @@ -235,17 +244,17 @@ class TestUnFusedSystem(DynamicBoostControlTest): original = process_param(self.d, PARAM_GET_PWR_CAP, self.signature) # set the fmax - target = original.parameter - 10 + target = original[0] - 10 process_param(self.d, PARAM_SET_PWR_CAP, self.signature, target) time.sleep(SET_DELAY) new = process_param(self.d, PARAM_GET_PWR_CAP, self.signature) - self.assertEqual(new.parameter, target) + self.assertEqual(new[0], target) # revert back to current - process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original.parameter) + process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original[0]) time.sleep(SET_DELAY) cur = process_param(self.d, PARAM_GET_PWR_CAP, self.signature) - self.assertEqual(cur.parameter, original.parameter) + self.assertEqual(cur[0], original[0]) def test_set_3d_graphics_mode(self) -> None: """set/get 3d graphics mode""" diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c index 44bbf80f0cfdd..0d0a7a19d6f95 100644 --- a/tools/iio/iio_generic_buffer.c +++ b/tools/iio/iio_generic_buffer.c @@ -54,9 +54,12 @@ enum autochan { static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels) { unsigned int bytes = 0; - int i = 0; + int i = 0, max = 0; + unsigned int misalignment; while (i < num_channels) { + if (channels[i].bytes > max) + max = channels[i].bytes; if (bytes % channels[i].bytes == 0) channels[i].location = bytes; else @@ -66,6 +69,14 @@ static unsigned int size_from_channelarray(struct iio_channel_info *channels, in bytes = channels[i].location + channels[i].bytes; i++; } + /* + * We want the data in next sample to also be properly aligned so + * we'll add padding at the end if needed. Adding padding only + * works for channel data which size is 2^n bytes. + */ + misalignment = bytes % max; + if (misalignment) + bytes += max - misalignment; return bytes; } diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 3803479dbe106..1c13f8e88833b 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -362,8 +362,6 @@ struct pt_regs___arm64 { #define __PT_PARM7_REG a6 #define __PT_PARM8_REG a7 -/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */ -#define PT_REGS_SYSCALL_REGS(ctx) ctx #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG diff --git a/tools/lib/perf/include/internal/rc_check.h b/tools/lib/perf/include/internal/rc_check.h index d5d771ccdc7b4..e88a6d8a0b0f9 100644 --- a/tools/lib/perf/include/internal/rc_check.h +++ b/tools/lib/perf/include/internal/rc_check.h @@ -9,8 +9,12 @@ * Enable reference count checking implicitly with leak checking, which is * integrated into address sanitizer. */ -#if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) +#if defined(__SANITIZE_ADDRESS__) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) #define REFCNT_CHECKING 1 +#elif defined(__has_feature) +#if __has_feature(address_sanitizer) || __has_feature(leak_sanitizer) +#define REFCNT_CHECKING 1 +#endif #endif /* diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c index c54f7235c5d94..f40febdd6e36a 100644 --- a/tools/objtool/objtool.c +++ b/tools/objtool/objtool.c @@ -146,7 +146,5 @@ int main(int argc, const char **argv) exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED); pager_init(UNUSED); - objtool_run(argc, argv); - - return 0; + return objtool_run(argc, argv); } diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt index 3c36324712b6e..482d6c52e2edf 100644 --- a/tools/perf/Documentation/perf-kwork.txt +++ b/tools/perf/Documentation/perf-kwork.txt @@ -8,7 +8,7 @@ perf-kwork - Tool to trace/measure kernel work properties (latencies) SYNOPSIS -------- [verse] -'perf kwork' {record} +'perf kwork' {record|report|latency|timehist} DESCRIPTION ----------- diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 37af6df7b978d..86569f230e60d 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -69,6 +69,10 @@ include ../scripts/utilities.mak # Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support # for dwarf backtrace post unwind. # +# Define NO_LIBTRACEEVENT=1 if you don't want libtraceevent to be linked, +# this will remove multiple features and tools, such as 'perf trace', +# that need it to read tracefs event format files, etc. +# # Define NO_PERF_READ_VDSO32 if you do not want to build perf-read-vdso32 # for reading the 32-bit compatibility VDSO in 64-bit mode # diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c index 14bf7a8429e76..de2fbb7c56c32 100644 --- a/tools/perf/builtin-kwork.c +++ b/tools/perf/builtin-kwork.c @@ -406,12 +406,14 @@ static int work_push_atom(struct perf_kwork *kwork, work = work_findnew(&class->work_root, &key, &kwork->cmp_id); if (work == NULL) { - free(atom); + atom_free(atom); return -1; } - if (!profile_event_match(kwork, work, sample)) + if (!profile_event_match(kwork, work, sample)) { + atom_free(atom); return 0; + } if (dst_type < KWORK_TRACE_MAX) { dst_atom = list_last_entry_or_null(&work->atom_list[dst_type], @@ -1692,9 +1694,10 @@ int cmd_kwork(int argc, const char **argv) static struct perf_kwork kwork = { .class_list = LIST_HEAD_INIT(kwork.class_list), .tool = { - .mmap = perf_event__process_mmap, - .mmap2 = perf_event__process_mmap2, - .sample = perf_kwork__process_tracepoint_sample, + .mmap = perf_event__process_mmap, + .mmap2 = perf_event__process_mmap2, + .sample = perf_kwork__process_tracepoint_sample, + .ordered_events = true, }, .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list), .sort_list = LIST_HEAD_INIT(kwork.sort_list), diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index b141f21342740..0b4b4445c5207 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -524,6 +524,7 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack) struct map *kmap; struct symbol *sym; u64 ip; + const char *arch = perf_env__arch(machine->env); if (list_empty(&callstack_filters)) return true; @@ -531,7 +532,21 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack) for (int i = 0; i < max_stack_depth; i++) { struct callstack_filter *filter; - if (!callstack || !callstack[i]) + /* + * In powerpc, the callchain saved by kernel always includes + * first three entries as the NIP (next instruction pointer), + * LR (link register), and the contents of LR save area in the + * second stack frame. In certain scenarios its possible to have + * invalid kernel instruction addresses in either LR or the second + * stack frame's LR. In that case, kernel will store that address as + * zero. + * + * The below check will continue to look into callstack, + * incase first or second callstack index entry has 0 + * address for powerpc. + */ + if (!callstack || (!callstack[i] && (strcmp(arch, "powerpc") || + (i != 1 && i != 2)))) break; ip = callstack[i]; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 07b48f6df48eb..a3af805a1d572 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1622,7 +1622,7 @@ static int perf_stat_init_aggr_mode(void) * taking the highest cpu number to be the size of * the aggregation translate cpumap. */ - if (evsel_list->core.user_requested_cpus) + if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus)) nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; else nr = 0; diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json index 1e7e8901a4450..e2848a9d48487 100644 --- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json @@ -1,362 +1,384 @@ [ { + "MetricName": "branch_miss_pred_rate", "MetricExpr": "BR_MIS_PRED / BR_PRED", "BriefDescription": "Branch predictor misprediction rate. May not count branches that are never resolved because they are in the misprediction shadow of an earlier branch", - "MetricGroup": "Branch Prediction", - "MetricName": "Misprediction" + "MetricGroup": "branch", + "ScaleUnit": "100%" }, { - "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED", - "BriefDescription": "Branch predictor misprediction rate", - "MetricGroup": "Branch Prediction", - "MetricName": "Misprediction (retired)" - }, - { - "MetricExpr": "BUS_ACCESS / ( BUS_CYCLES * 1)", + "MetricName": "bus_utilization", + "MetricExpr": "((BUS_ACCESS / (BUS_CYCLES * 1)) * 100)", "BriefDescription": "Core-to-uncore bus utilization", "MetricGroup": "Bus", - "MetricName": "Bus utilization" + "ScaleUnit": "1percent of bus cycles" }, { - "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE", - "BriefDescription": "L1D cache miss rate", - "MetricGroup": "Cache", - "MetricName": "L1D cache miss" + "MetricName": "l1d_cache_miss_ratio", + "MetricExpr": "(L1D_CACHE_REFILL / L1D_CACHE)", + "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.", + "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness", + "ScaleUnit": "1per cache access" + }, + { + "MetricName": "l1i_cache_miss_ratio", + "MetricExpr": "(L1I_CACHE_REFILL / L1I_CACHE)", + "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.", + "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness", + "ScaleUnit": "1per cache access" }, { + "MetricName": "Miss_Ratio;l1d_cache_read_miss", "MetricExpr": "L1D_CACHE_LMISS_RD / L1D_CACHE_RD", "BriefDescription": "L1D cache read miss rate", "MetricGroup": "Cache", - "MetricName": "L1D cache read miss" + "ScaleUnit": "1per cache read access" }, { - "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE", - "BriefDescription": "L1I cache miss rate", - "MetricGroup": "Cache", - "MetricName": "L1I cache miss" - }, - { - "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE", - "BriefDescription": "L2 cache miss rate", - "MetricGroup": "Cache", - "MetricName": "L2 cache miss" + "MetricName": "l2_cache_miss_ratio", + "MetricExpr": "(L2D_CACHE_REFILL / L2D_CACHE)", + "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.", + "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness", + "ScaleUnit": "1per cache access" }, { + "MetricName": "l1i_cache_read_miss_rate", "MetricExpr": "L1I_CACHE_LMISS / L1I_CACHE", "BriefDescription": "L1I cache read miss rate", "MetricGroup": "Cache", - "MetricName": "L1I cache read miss" + "ScaleUnit": "1per cache access" }, { + "MetricName": "l2d_cache_read_miss_rate", "MetricExpr": "L2D_CACHE_LMISS_RD / L2D_CACHE_RD", "BriefDescription": "L2 cache read miss rate", "MetricGroup": "Cache", - "MetricName": "L2 cache read miss" + "ScaleUnit": "1per cache read access" }, { - "MetricExpr": "(L1D_CACHE_LMISS_RD * 1000) / INST_RETIRED", + "MetricName": "l1d_cache_miss_mpki", + "MetricExpr": "(L1D_CACHE_LMISS_RD * 1e3) / INST_RETIRED", "BriefDescription": "Misses per thousand instructions (data)", "MetricGroup": "Cache", - "MetricName": "MPKI data" + "ScaleUnit": "1MPKI" }, { - "MetricExpr": "(L1I_CACHE_LMISS * 1000) / INST_RETIRED", + "MetricName": "l1i_cache_miss_mpki", + "MetricExpr": "(L1I_CACHE_LMISS * 1e3) / INST_RETIRED", "BriefDescription": "Misses per thousand instructions (instruction)", "MetricGroup": "Cache", - "MetricName": "MPKI instruction" + "ScaleUnit": "1MPKI" }, { - "MetricExpr": "ASE_SPEC / OP_SPEC", - "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) operations", - "MetricGroup": "Instruction", - "MetricName": "ASE mix" + "MetricName": "simd_percentage", + "MetricExpr": "((ASE_SPEC / INST_SPEC) * 100)", + "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "1percent of operations" }, { - "MetricExpr": "CRYPTO_SPEC / OP_SPEC", - "BriefDescription": "Proportion of crypto data processing operations", - "MetricGroup": "Instruction", - "MetricName": "Crypto mix" + "MetricName": "crypto_percentage", + "MetricExpr": "((CRYPTO_SPEC / INST_SPEC) * 100)", + "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "1percent of operations" }, { - "MetricExpr": "VFP_SPEC / (duration_time *1000000000)", + "MetricName": "gflops", + "MetricExpr": "VFP_SPEC / (duration_time * 1e9)", "BriefDescription": "Giga-floating point operations per second", - "MetricGroup": "Instruction", - "MetricName": "GFLOPS_ISSUED" + "MetricGroup": "InstructionMix" }, { - "MetricExpr": "DP_SPEC / OP_SPEC", - "BriefDescription": "Proportion of integer data processing operations", - "MetricGroup": "Instruction", - "MetricName": "Integer mix" + "MetricName": "integer_dp_percentage", + "MetricExpr": "((DP_SPEC / INST_SPEC) * 100)", + "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "1percent of operations" }, { - "MetricExpr": "INST_RETIRED / CPU_CYCLES", - "BriefDescription": "Instructions per cycle", - "MetricGroup": "Instruction", - "MetricName": "IPC" + "MetricName": "ipc", + "MetricExpr": "(INST_RETIRED / CPU_CYCLES)", + "BriefDescription": "This metric measures the number of instructions retired per cycle.", + "MetricGroup": "General", + "ScaleUnit": "1per cycle" }, { - "MetricExpr": "LD_SPEC / OP_SPEC", - "BriefDescription": "Proportion of load operations", - "MetricGroup": "Instruction", - "MetricName": "Load mix" + "MetricName": "load_percentage", + "MetricExpr": "((LD_SPEC / INST_SPEC) * 100)", + "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "1percent of operations" }, { - "MetricExpr": "LDST_SPEC/ OP_SPEC", - "BriefDescription": "Proportion of load & store operations", - "MetricGroup": "Instruction", - "MetricName": "Load-store mix" + "MetricName": "load_store_spec_rate", + "MetricExpr": "((LDST_SPEC / INST_SPEC) * 100)", + "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "1percent of operations" }, { - "MetricExpr": "INST_RETIRED / (duration_time * 1000000)", + "MetricName": "retired_mips", + "MetricExpr": "INST_RETIRED / (duration_time * 1e6)", "BriefDescription": "Millions of instructions per second", - "MetricGroup": "Instruction", - "MetricName": "MIPS_RETIRED" + "MetricGroup": "InstructionMix" }, { - "MetricExpr": "INST_SPEC / (duration_time * 1000000)", + "MetricName": "spec_utilization_mips", + "MetricExpr": "INST_SPEC / (duration_time * 1e6)", "BriefDescription": "Millions of instructions per second", - "MetricGroup": "Instruction", - "MetricName": "MIPS_UTILIZATION" - }, - { - "MetricExpr": "PC_WRITE_SPEC / OP_SPEC", - "BriefDescription": "Proportion of software change of PC operations", - "MetricGroup": "Instruction", - "MetricName": "PC write mix" + "MetricGroup": "PEutilization" }, { - "MetricExpr": "ST_SPEC / OP_SPEC", - "BriefDescription": "Proportion of store operations", - "MetricGroup": "Instruction", - "MetricName": "Store mix" + "MetricName": "pc_write_spec_rate", + "MetricExpr": "((PC_WRITE_SPEC / INST_SPEC) * 100)", + "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "1percent of operations" }, { - "MetricExpr": "VFP_SPEC / OP_SPEC", - "BriefDescription": "Proportion of FP operations", - "MetricGroup": "Instruction", - "MetricName": "VFP mix" + "MetricName": "store_percentage", + "MetricExpr": "((ST_SPEC / INST_SPEC) * 100)", + "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "1percent of operations" }, { - "MetricExpr": "1 - (OP_RETIRED/ (CPU_CYCLES * 4))", - "BriefDescription": "Proportion of slots lost", - "MetricGroup": "Speculation / TDA", - "MetricName": "CPU lost" + "MetricName": "scalar_fp_percentage", + "MetricExpr": "((VFP_SPEC / INST_SPEC) * 100)", + "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "1percent of operations" }, { - "MetricExpr": "OP_RETIRED/ (CPU_CYCLES * 4)", - "BriefDescription": "Proportion of slots retiring", - "MetricGroup": "Speculation / TDA", - "MetricName": "CPU utilization" + "MetricName": "retired_rate", + "MetricExpr": "OP_RETIRED / OP_SPEC", + "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)", + "MetricGroup": "General", + "ScaleUnit": "100%" }, { - "MetricExpr": "OP_RETIRED - OP_SPEC", - "BriefDescription": "Operations lost due to misspeculation", - "MetricGroup": "Speculation / TDA", - "MetricName": "Operations lost" + "MetricName": "wasted", + "MetricExpr": "1 - (OP_RETIRED / (CPU_CYCLES * #slots))", + "BriefDescription": "Of all the micro-operations issued, what proportion are lost", + "MetricGroup": "General", + "ScaleUnit": "100%" }, { - "MetricExpr": "1 - (OP_RETIRED / OP_SPEC)", - "BriefDescription": "Proportion of operations lost", - "MetricGroup": "Speculation / TDA", - "MetricName": "Operations lost (ratio)" + "MetricName": "wasted_rate", + "MetricExpr": "1 - OP_RETIRED / OP_SPEC", + "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)", + "MetricGroup": "General", + "ScaleUnit": "100%" }, { - "MetricExpr": "OP_RETIRED / OP_SPEC", - "BriefDescription": "Proportion of operations retired", - "MetricGroup": "Speculation / TDA", - "MetricName": "Operations retired" - }, - { - "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES", + "MetricName": "stall_backend_cache_rate", + "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)", "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and cache miss", "MetricGroup": "Stall", - "MetricName": "Stall backend cache cycles" + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES", + "MetricName": "stall_backend_resource_rate", + "MetricExpr": "((STALL_BACKEND_RESOURCE / CPU_CYCLES) * 100)", "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and resource full", "MetricGroup": "Stall", - "MetricName": "Stall backend resource cycles" + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES", + "MetricName": "stall_backend_tlb_rate", + "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)", "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and TLB miss", "MetricGroup": "Stall", - "MetricName": "Stall backend tlb cycles" + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES", + "MetricName": "stall_frontend_cache_rate", + "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)", "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss", "MetricGroup": "Stall", - "MetricName": "Stall frontend cache cycles" + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES", + "MetricName": "stall_frontend_tlb_rate", + "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)", "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss", "MetricGroup": "Stall", - "MetricName": "Stall frontend tlb cycles" + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "DTLB_WALK / L1D_TLB", - "BriefDescription": "D-side walk per d-side translation request", - "MetricGroup": "TLB", - "MetricName": "DTLB walks" + "MetricName": "dtlb_walk_ratio", + "MetricExpr": "(DTLB_WALK / L1D_TLB)", + "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.", + "MetricGroup": "Miss_Ratio;DTLB_Effectiveness", + "ScaleUnit": "1per TLB access" }, { - "MetricExpr": "ITLB_WALK / L1I_TLB", - "BriefDescription": "I-side walk per i-side translation request", - "MetricGroup": "TLB", - "MetricName": "ITLB walks" + "MetricName": "itlb_walk_ratio", + "MetricExpr": "(ITLB_WALK / L1I_TLB)", + "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.", + "MetricGroup": "Miss_Ratio;ITLB_Effectiveness", + "ScaleUnit": "1per TLB access" }, { - "MetricExpr": "STALL_SLOT_BACKEND / (CPU_CYCLES * 4)", - "BriefDescription": "Fraction of slots backend bound", - "MetricGroup": "TopDownL1", - "MetricName": "backend" + "ArchStdEvent": "backend_bound" }, { - "MetricExpr": "1 - (retiring + lost + backend)", - "BriefDescription": "Fraction of slots frontend bound", - "MetricGroup": "TopDownL1", - "MetricName": "frontend" + "ArchStdEvent": "frontend_bound", + "MetricExpr": "100 - (retired_fraction + slots_lost_misspeculation_fraction + backend_bound)" }, { - "MetricExpr": "((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * 4))", + "MetricName": "slots_lost_misspeculation_fraction", + "MetricExpr": "100 * ((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots))", "BriefDescription": "Fraction of slots lost due to misspeculation", - "MetricGroup": "TopDownL1", - "MetricName": "lost" + "MetricGroup": "Default;TopdownL1", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "(OP_RETIRED / (CPU_CYCLES * 4))", + "MetricName": "retired_fraction", + "MetricExpr": "100 * (OP_RETIRED / (CPU_CYCLES * #slots))", "BriefDescription": "Fraction of slots retiring, useful work", - "MetricGroup": "TopDownL1", - "MetricName": "retiring" + "MetricGroup": "Default;TopdownL1", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "backend - backend_memory", + "MetricName": "backend_core", + "MetricExpr": "(backend_bound / 100) - backend_memory", "BriefDescription": "Fraction of slots the CPU was stalled due to backend non-memory subsystem issues", - "MetricGroup": "TopDownL2", - "MetricName": "backend_core" + "MetricGroup": "TopdownL2", + "ScaleUnit": "100%" }, { - "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE + STALL_BACKEND_MEM) / CPU_CYCLES ", + "MetricName": "backend_memory", + "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE) / CPU_CYCLES", "BriefDescription": "Fraction of slots the CPU was stalled due to backend memory subsystem issues (cache/tlb miss)", - "MetricGroup": "TopDownL2", - "MetricName": "backend_memory" + "MetricGroup": "TopdownL2", + "ScaleUnit": "100%" }, { - "MetricExpr": " (BR_MIS_PRED_RETIRED / GPC_FLUSH) * lost", + "MetricName": "branch_mispredict", + "MetricExpr": "(BR_MIS_PRED_RETIRED / GPC_FLUSH) * slots_lost_misspeculation_fraction", "BriefDescription": "Fraction of slots lost due to branch misprediciton", - "MetricGroup": "TopDownL2", - "MetricName": "branch_mispredict" + "MetricGroup": "TopdownL2", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "frontend - frontend_latency", + "MetricName": "frontend_bandwidth", + "MetricExpr": "frontend_bound - frontend_latency", "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)", - "MetricGroup": "TopDownL2", - "MetricName": "frontend_bandwidth" + "MetricGroup": "TopdownL2", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "(STALL_FRONTEND - ((STALL_SLOT_FRONTEND - (frontend * CPU_CYCLES * 4)) / 4)) / CPU_CYCLES", + "MetricName": "frontend_latency", + "MetricExpr": "((STALL_FRONTEND - ((STALL_SLOT_FRONTEND - ((frontend_bound / 100) * CPU_CYCLES * #slots)) / #slots)) / CPU_CYCLES) * 100", "BriefDescription": "Fraction of slots the CPU was stalled due to frontend latency issues (cache/tlb miss); nothing to dispatch", - "MetricGroup": "TopDownL2", - "MetricName": "frontend_latency" + "MetricGroup": "TopdownL2", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "lost - branch_mispredict", + "MetricName": "other_miss_pred", + "MetricExpr": "slots_lost_misspeculation_fraction - branch_mispredict", "BriefDescription": "Fraction of slots lost due to other/non-branch misprediction misspeculation", - "MetricGroup": "TopDownL2", - "MetricName": "other_clears" + "MetricGroup": "TopdownL2", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "(IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6)", + "MetricName": "pipe_utilization", + "MetricExpr": "100 * ((IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6))", "BriefDescription": "Fraction of execute slots utilized", - "MetricGroup": "TopDownL2", - "MetricName": "pipe_utilization" + "MetricGroup": "TopdownL2", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "STALL_BACKEND_MEM / CPU_CYCLES", + "MetricName": "d_cache_l2_miss_rate", + "MetricExpr": "((STALL_BACKEND_MEM / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled due to data L2 cache miss", - "MetricGroup": "TopDownL3", - "MetricName": "d_cache_l2_miss" + "MetricGroup": "TopdownL3", + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES", + "MetricName": "d_cache_miss_rate", + "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled due to data cache miss", - "MetricGroup": "TopDownL3", - "MetricName": "d_cache_miss" + "MetricGroup": "TopdownL3", + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES", + "MetricName": "d_tlb_miss_rate", + "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled due to data TLB miss", - "MetricGroup": "TopDownL3", - "MetricName": "d_tlb_miss" + "MetricGroup": "TopdownL3", + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "FSU_ISSUED / (CPU_CYCLES * 2)", + "MetricName": "fsu_pipe_utilization", + "MetricExpr": "((FSU_ISSUED / (CPU_CYCLES * 2)) * 100)", "BriefDescription": "Fraction of FSU execute slots utilized", - "MetricGroup": "TopDownL3", - "MetricName": "fsu_pipe_utilization" + "MetricGroup": "TopdownL3", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES", + "MetricName": "i_cache_miss_rate", + "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction cache miss", - "MetricGroup": "TopDownL3", - "MetricName": "i_cache_miss" + "MetricGroup": "TopdownL3", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": " STALL_FRONTEND_TLB / CPU_CYCLES ", + "MetricName": "i_tlb_miss_rate", + "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction TLB miss", - "MetricGroup": "TopDownL3", - "MetricName": "i_tlb_miss" + "MetricGroup": "TopdownL3", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "IXU_NUM_UOPS_ISSUED / (CPU_CYCLES / 4)", + "MetricName": "ixu_pipe_utilization", + "MetricExpr": "((IXU_NUM_UOPS_ISSUED / (CPU_CYCLES * #slots)) * 100)", "BriefDescription": "Fraction of IXU execute slots utilized", - "MetricGroup": "TopDownL3", - "MetricName": "ixu_pipe_utilization" + "MetricGroup": "TopdownL3", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "IDR_STALL_FLUSH / CPU_CYCLES", + "MetricName": "stall_recovery_rate", + "MetricExpr": "((IDR_STALL_FLUSH / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled due to flush recovery", - "MetricGroup": "TopDownL3", - "MetricName": "recovery" - }, - { - "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES", - "BriefDescription": "Fraction of cycles the CPU was stalled due to core resource shortage", - "MetricGroup": "TopDownL3", - "MetricName": "resource" + "MetricGroup": "TopdownL3", + "ScaleUnit": "1percent of slots" }, { - "MetricExpr": "IDR_STALL_FSU_SCHED / CPU_CYCLES ", + "MetricName": "stall_fsu_sched_rate", + "MetricExpr": "((IDR_STALL_FSU_SCHED / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled and FSU was full", - "MetricGroup": "TopDownL4", - "MetricName": "stall_fsu_sched" + "MetricGroup": "TopdownL4", + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "IDR_STALL_IXU_SCHED / CPU_CYCLES ", + "MetricName": "stall_ixu_sched_rate", + "MetricExpr": "((IDR_STALL_IXU_SCHED / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled and IXU was full", - "MetricGroup": "TopDownL4", - "MetricName": "stall_ixu_sched" + "MetricGroup": "TopdownL4", + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "IDR_STALL_LOB_ID / CPU_CYCLES ", + "MetricName": "stall_lob_id_rate", + "MetricExpr": "((IDR_STALL_LOB_ID / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled and LOB was full", - "MetricGroup": "TopDownL4", - "MetricName": "stall_lob_id" + "MetricGroup": "TopdownL4", + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "IDR_STALL_ROB_ID / CPU_CYCLES", + "MetricName": "stall_rob_id_rate", + "MetricExpr": "((IDR_STALL_ROB_ID / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled and ROB was full", - "MetricGroup": "TopDownL4", - "MetricName": "stall_rob_id" + "MetricGroup": "TopdownL4", + "ScaleUnit": "1percent of cycles" }, { - "MetricExpr": "IDR_STALL_SOB_ID / CPU_CYCLES ", + "MetricName": "stall_sob_id_rate", + "MetricExpr": "((IDR_STALL_SOB_ID / CPU_CYCLES) * 100)", "BriefDescription": "Fraction of cycles the CPU was stalled and SOB was full", - "MetricGroup": "TopDownL4", - "MetricName": "stall_sob_id" + "MetricGroup": "TopdownL4", + "ScaleUnit": "1percent of cycles" } ] diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json index c606ae03cd27d..0e0253d0e7577 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json @@ -195,7 +195,7 @@ "BriefDescription": "Threshold counter exceeded a value of 128." }, { - "EventCode": "0x400FA", + "EventCode": "0x500FA", "EventName": "PM_RUN_INST_CMPL", "BriefDescription": "PowerPC instruction completed while the run latch is set." } diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json index 8fc62b8f667d8..e1f55fcfa0d02 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json @@ -48,6 +48,12 @@ "MetricName": "C7_Pkg_Residency", "ScaleUnit": "100%" }, + { + "BriefDescription": "Uncore frequency per die [GHZ]", + "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9", + "MetricGroup": "SoC", + "MetricName": "UNCORE_FREQ" + }, { "BriefDescription": "Percentage of cycles spent in System Management Interrupts.", "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)", @@ -652,7 +658,7 @@ }, { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", - "MetricExpr": "64 * (arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@) / 1e6 / duration_time / 1e3", + "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time", "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW", "MetricName": "tma_info_system_dram_bw_use", "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full" @@ -690,6 +696,12 @@ "MetricGroup": "SMT", "MetricName": "tma_info_system_smt_2t_utilization" }, + { + "BriefDescription": "Socket actual clocks when any core is active on that socket", + "MetricExpr": "cbox_0@event\\=0x0@", + "MetricGroup": "SoC", + "MetricName": "tma_info_system_socket_clks" + }, { "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC", diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c index 01f70b8e705a8..21f4d9ba023d9 100644 --- a/tools/perf/util/bpf_off_cpu.c +++ b/tools/perf/util/bpf_off_cpu.c @@ -98,7 +98,7 @@ static void off_cpu_finish(void *arg __maybe_unused) /* v5.18 kernel added prev_state arg, so it needs to check the signature */ static void check_sched_switch_args(void) { - const struct btf *btf = bpf_object__btf(skel->obj); + const struct btf *btf = btf__load_vmlinux_btf(); const struct btf_type *t1, *t2, *t3; u32 type_id; @@ -116,7 +116,8 @@ static void check_sched_switch_args(void) return; t3 = btf__type_by_id(btf, t2->type); - if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) { + /* btf_trace func proto has one more argument for the context */ + if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) { /* new format: pass prev_state as 4th arg */ skel->rodata->has_prev_state = true; } diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c index 939ec769bf4a5..52c270330ae0d 100644 --- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c +++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c @@ -153,7 +153,7 @@ static inline unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len) { unsigned int augmented_len = sizeof(*augmented_arg); - int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg); + int string_len = bpf_probe_read_user_str(&augmented_arg->value, arg_len, arg); augmented_arg->size = augmented_arg->err = 0; /* @@ -203,7 +203,7 @@ int sys_enter_connect(struct syscall_enter_args *args) _Static_assert(is_power_of_2(sizeof(augmented_args->saddr)), "sizeof(augmented_args->saddr) needs to be a power of two"); socklen &= sizeof(augmented_args->saddr) - 1; - bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg); + bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg); return augmented__output(args, augmented_args, len + socklen); } @@ -221,7 +221,7 @@ int sys_enter_sendto(struct syscall_enter_args *args) socklen &= sizeof(augmented_args->saddr) - 1; - bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg); + bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg); return augmented__output(args, augmented_args, len + socklen); } @@ -311,7 +311,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args) if (augmented_args == NULL) goto failure; - if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0) + if (bpf_probe_read_user(&augmented_args->__data, sizeof(*attr), attr) < 0) goto failure; attr_read = (const struct perf_event_attr_size *)augmented_args->__data; @@ -325,7 +325,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args) goto failure; // Now that we read attr->size and tested it against the size limits, read it completely - if (bpf_probe_read(&augmented_args->__data, size, attr) < 0) + if (bpf_probe_read_user(&augmented_args->__data, size, attr) < 0) goto failure; return augmented__output(args, augmented_args, len + size); @@ -347,7 +347,7 @@ int sys_enter_clock_nanosleep(struct syscall_enter_args *args) if (size > sizeof(augmented_args->__data)) goto failure; - bpf_probe_read(&augmented_args->__data, size, rqtp_arg); + bpf_probe_read_user(&augmented_args->__data, size, rqtp_arg); return augmented__output(args, augmented_args, len + size); failure: @@ -385,7 +385,7 @@ int sys_enter(struct syscall_enter_args *args) if (augmented_args == NULL) return 1; - bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args); + bpf_probe_read_kernel(&augmented_args->args, sizeof(augmented_args->args), args); /* * Jump to syscall specific augmenter, even if the default one, @@ -406,7 +406,7 @@ int sys_exit(struct syscall_exit_args *args) if (pid_filter__has(&pids_filtered, getpid())) return 0; - bpf_probe_read(&exit_args, sizeof(exit_args), args); + bpf_probe_read_kernel(&exit_args, sizeof(exit_args), args); /* * Jump to syscall specific return augmenter, even if the default one, * "!raw_syscalls:unaugmented" that will just return 1 to return the diff --git a/tools/perf/util/bpf_skel/vmlinux/.gitignore b/tools/perf/util/bpf_skel/vmlinux/.gitignore new file mode 100644 index 0000000000000..49502c04183a2 --- /dev/null +++ b/tools/perf/util/bpf_skel/vmlinux/.gitignore @@ -0,0 +1 @@ +!vmlinux.h diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 3dc8a4968beb9..ac8c0ef48a7f3 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -2676,8 +2676,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, /* If we have branch cycles always annotate them. */ if (bs && bs->nr && entries[0].flags.cycles) { - int i; - bi = sample__resolve_bstack(sample, al); if (bi) { struct addr_map_symbol *prev = NULL; @@ -2692,7 +2690,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, * Note that perf stores branches reversed from * program order! */ - for (i = bs->nr - 1; i >= 0; i--) { + for (int i = bs->nr - 1; i >= 0; i--) { addr_map_symbol__account_cycles(&bi[i].from, nonany_branch_mode ? NULL : prev, bi[i].flags.cycles); @@ -2701,6 +2699,12 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, if (total_cycles) *total_cycles += bi[i].flags.cycles; } + for (unsigned int i = 0; i < bs->nr; i++) { + map__put(bi[i].to.ms.map); + maps__put(bi[i].to.ms.maps); + map__put(bi[i].from.ms.map); + maps__put(bi[i].from.ms.maps); + } free(bi); } } diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 88f31b3a63acb..e6a8d758f6fe4 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2624,16 +2624,18 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread, save_lbr_cursor_node(thread, cursor, i); } - /* Add LBR ip from first entries.to */ - ip = entries[0].to; - flags = &entries[0].flags; - *branch_from = entries[0].from; - err = add_callchain_ip(thread, cursor, parent, - root_al, &cpumode, ip, - true, flags, NULL, - *branch_from); - if (err) - return err; + if (lbr_nr > 0) { + /* Add LBR ip from first entries.to */ + ip = entries[0].to; + flags = &entries[0].flags; + *branch_from = entries[0].from; + err = add_callchain_ip(thread, cursor, parent, + root_al, &cpumode, ip, + true, flags, NULL, + *branch_from); + if (err) + return err; + } return 0; } diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index 39ffe8ceb3809..954b235e12e51 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c @@ -185,7 +185,6 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, { int i = *argv_nr, k = 0; struct perf_mem_event *e; - struct perf_pmu *pmu; for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) { e = perf_mem_events__ptr(j); @@ -202,6 +201,8 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, rec_argv[i++] = "-e"; rec_argv[i++] = perf_mem_events__name(j, NULL); } else { + struct perf_pmu *pmu = NULL; + if (!e->supported) { perf_mem_events__print_unsupport_hybrid(e, j); return -1; diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 21bfe7e0d9444..c3a86ef4b7cf3 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -79,7 +79,7 @@ static void free_list_evsel(struct list_head* list_evsel) %type PE_MODIFIER_BP %type PE_EVENT_NAME %type PE_DRV_CFG_TERM -%type name_or_raw name_or_legacy +%type name_or_raw %destructor { free ($$); } %type event_term %destructor { parse_events_term__delete ($$); } @@ -104,6 +104,7 @@ static void free_list_evsel(struct list_head* list_evsel) %type groups %destructor { free_list_evsel ($$); } %type tracepoint_name +%destructor { free ($$.sys); free ($$.event); } %type PE_TERM_HW %destructor { free ($$.str); } @@ -679,8 +680,6 @@ event_term name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE -name_or_legacy: PE_NAME | PE_LEGACY_CACHE - event_term: PE_RAW { @@ -695,7 +694,7 @@ PE_RAW $$ = term; } | -name_or_raw '=' name_or_legacy +name_or_raw '=' name_or_raw { struct parse_events_term *term; int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3, &@1, &@3); @@ -775,7 +774,7 @@ PE_TERM_HW $$ = term; } | -PE_TERM '=' name_or_legacy +PE_TERM '=' name_or_raw { struct parse_events_term *term; int err = parse_events_term__str(&term, (enum parse_events__term_type)$1, diff --git a/tools/power/cpupower/man/cpupower-powercap-info.1 b/tools/power/cpupower/man/cpupower-powercap-info.1 index df3087000efb8..145d6f06fa72d 100644 --- a/tools/power/cpupower/man/cpupower-powercap-info.1 +++ b/tools/power/cpupower/man/cpupower-powercap-info.1 @@ -17,7 +17,7 @@ settings of all cores, see cpupower(1) how to choose specific cores. .SH "DOCUMENTATION" kernel sources: -Documentation/power/powercap/powercap.txt +Documentation/power/powercap/powercap.rst .SH "SEE ALSO" diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 464fc39ed2776..68118c37f0b56 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -1450,11 +1450,11 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) mdata->mes.mds = mds; cxl_mock_add_event_logs(&mdata->mes); - cxlmd = devm_cxl_add_memdev(cxlds); + cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); - rc = cxl_memdev_setup_fw_upload(mds); + rc = devm_cxl_setup_fw_upload(&pdev->dev, mds); if (rc) return rc; diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 18cf7b17463d9..98dde091d2825 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -94,14 +94,8 @@ static struct { { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, { "incorrect_head_off1", "bpf_list_head not found at offset=25" }, { "incorrect_head_off2", "bpf_list_head not found at offset=1" }, - { "pop_front_off", - "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) " - "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n" - "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, - { "pop_back_off", - "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) " - "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n" - "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, + { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" }, + { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" }, }; static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg) diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c index c7636e18b1ebd..aa9f67eb1c95b 100644 --- a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c +++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c @@ -61,6 +61,11 @@ void test_module_fentry_shadow(void) int link_fd[2] = {}; __s32 btf_id[2] = {}; + if (!env.has_testmod) { + test__skip(); + return; + } + LIBBPF_OPTS(bpf_prog_load_opts, load_opts, .expected_attach_type = BPF_TRACE_FENTRY, ); diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 58fe2c586ed76..09c189761926c 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -271,11 +271,11 @@ static void test_tailcall_count(const char *which) data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) - return; + goto out; data_fd = bpf_map__fd(data_map); - if (CHECK_FAIL(map_fd < 0)) - return; + if (CHECK_FAIL(data_fd < 0)) + goto out; i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); @@ -352,11 +352,11 @@ static void test_tailcall_4(void) data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) - return; + goto out; data_fd = bpf_map__fd(data_map); - if (CHECK_FAIL(map_fd < 0)) - return; + if (CHECK_FAIL(data_fd < 0)) + goto out; for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); @@ -442,11 +442,11 @@ static void test_tailcall_5(void) data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) - return; + goto out; data_fd = bpf_map__fd(data_map); - if (CHECK_FAIL(map_fd < 0)) - return; + if (CHECK_FAIL(data_fd < 0)) + goto out; for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); @@ -631,11 +631,11 @@ static void test_tailcall_bpf2bpf_2(void) data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) - return; + goto out; data_fd = bpf_map__fd(data_map); - if (CHECK_FAIL(map_fd < 0)) - return; + if (CHECK_FAIL(data_fd < 0)) + goto out; i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); @@ -805,11 +805,11 @@ static void test_tailcall_bpf2bpf_4(bool noise) data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) - return; + goto out; data_fd = bpf_map__fd(data_map); - if (CHECK_FAIL(map_fd < 0)) - return; + if (CHECK_FAIL(data_fd < 0)) + goto out; i = 0; val.noise = noise; @@ -872,7 +872,7 @@ static void test_tailcall_bpf2bpf_6(void) ASSERT_EQ(topts.retval, 0, "tailcall retval"); data_fd = bpf_map__fd(obj->maps.bss); - if (!ASSERT_GE(map_fd, 0, "bss map fd")) + if (!ASSERT_GE(data_fd, 0, "bss map fd")) goto out; i = 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 38a57a2e70dbe..799fff4995d87 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -99,6 +99,9 @@ #elif defined(__TARGET_ARCH_arm64) #define SYSCALL_WRAPPER 1 #define SYS_PREFIX "__arm64_" +#elif defined(__TARGET_ARCH_riscv) +#define SYSCALL_WRAPPER 1 +#define SYS_PREFIX "__riscv_" #else #define SYSCALL_WRAPPER 0 #define SYS_PREFIX "__se_" diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c index f4c63daba2297..6438982b928bd 100644 --- a/tools/testing/selftests/bpf/progs/linked_list_fail.c +++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c @@ -591,7 +591,9 @@ int pop_ptr_off(void *(*op)(void *head)) n = op(&p->head); bpf_spin_unlock(&p->lock); - bpf_this_cpu_ptr(n); + if (!n) + return 0; + bpf_spin_lock((void *)n); return 0; } diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 77bd492c60248..2f9f6f250f171 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -417,6 +417,8 @@ int get_bpf_max_tramp_links(void); #define SYS_NANOSLEEP_KPROBE_NAME "__s390x_sys_nanosleep" #elif defined(__aarch64__) #define SYS_NANOSLEEP_KPROBE_NAME "__arm64_sys_nanosleep" +#elif defined(__riscv) +#define SYS_NANOSLEEP_KPROBE_NAME "__riscv_sys_nanosleep" #else #define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep" #endif diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c index bc91bef5d254e..0c5e469ae38fa 100644 --- a/tools/testing/selftests/mm/mdwe_test.c +++ b/tools/testing/selftests/mm/mdwe_test.c @@ -168,13 +168,10 @@ TEST_F(mdwe, mmap_FIXED) self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0); ASSERT_NE(self->p, MAP_FAILED); - p = mmap(self->p + self->size, self->size, PROT_READ | PROT_EXEC, + /* MAP_FIXED unmaps the existing page before mapping which is allowed */ + p = mmap(self->p, self->size, PROT_READ | PROT_EXEC, self->flags | MAP_FIXED, 0, 0); - if (variant->enabled) { - EXPECT_EQ(p, MAP_FAILED); - } else { - EXPECT_EQ(p, self->p); - } + EXPECT_EQ(p, self->p); } TEST_F(mdwe, arm64_BTI) diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index dc895b7b94e19..5917a74b749d0 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -3289,6 +3289,7 @@ userspace_pm_rm_sf_addr_ns1() local addr=$1 local id=$2 local tk sp da dp + local cnt_addr cnt_sf tk=$(grep "type:1," "$evts_ns1" | sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q') @@ -3298,11 +3299,13 @@ userspace_pm_rm_sf_addr_ns1() sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q') dp=$(grep "type:10" "$evts_ns1" | sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q') + cnt_addr=$(rm_addr_count ${ns1}) + cnt_sf=$(rm_sf_count ${ns1}) ip netns exec $ns1 ./pm_nl_ctl rem token $tk id $id ip netns exec $ns1 ./pm_nl_ctl dsf lip "::ffff:$addr" \ lport $sp rip $da rport $dp token $tk - wait_rm_addr $ns1 1 - wait_rm_sf $ns1 1 + wait_rm_addr $ns1 "${cnt_addr}" + wait_rm_sf $ns1 "${cnt_sf}" } userspace_pm_add_sf() @@ -3324,17 +3327,20 @@ userspace_pm_rm_sf_addr_ns2() local addr=$1 local id=$2 local tk da dp sp + local cnt_addr cnt_sf tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2") da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2") dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2") sp=$(grep "type:10" "$evts_ns2" | sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q') + cnt_addr=$(rm_addr_count ${ns2}) + cnt_sf=$(rm_sf_count ${ns2}) ip netns exec $ns2 ./pm_nl_ctl rem token $tk id $id ip netns exec $ns2 ./pm_nl_ctl dsf lip $addr lport $sp \ rip $da rport $dp token $tk - wait_rm_addr $ns2 1 - wait_rm_sf $ns2 1 + wait_rm_addr $ns2 "${cnt_addr}" + wait_rm_sf $ns2 "${cnt_sf}" } userspace_tests() @@ -3417,7 +3423,7 @@ userspace_tests() continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then set_userspace_pm $ns1 pm_nl_set_limits $ns2 1 1 - speed=10 \ + speed=5 \ run_tests $ns1 $ns2 10.0.1.1 & local tests_pid=$! wait_mpj $ns1 @@ -3438,7 +3444,7 @@ userspace_tests() continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then set_userspace_pm $ns2 pm_nl_set_limits $ns1 0 1 - speed=10 \ + speed=5 \ run_tests $ns1 $ns2 10.0.1.1 & local tests_pid=$! wait_mpj $ns2 diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index f838dd370f6af..b3b2dc5a630cf 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -2048,7 +2048,7 @@ run_test() { case $ret in 0) all_skipped=false - [ $exitcode=$ksft_skip ] && exitcode=0 + [ $exitcode -eq $ksft_skip ] && exitcode=0 ;; $ksft_skip) [ $all_skipped = true ] && exitcode=$ksft_skip diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index ef90aca4cc96a..bced422b78f72 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -7,7 +7,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \ conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \ - conntrack_sctp_collision.sh + conntrack_sctp_collision.sh xt_string.sh HOSTPKG_CONFIG := pkg-config diff --git a/tools/testing/selftests/netfilter/xt_string.sh b/tools/testing/selftests/netfilter/xt_string.sh new file mode 100755 index 0000000000000..1802653a47287 --- /dev/null +++ b/tools/testing/selftests/netfilter/xt_string.sh @@ -0,0 +1,128 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# return code to signal skipped test +ksft_skip=4 +rc=0 + +if ! iptables --version >/dev/null 2>&1; then + echo "SKIP: Test needs iptables" + exit $ksft_skip +fi +if ! ip -V >/dev/null 2>&1; then + echo "SKIP: Test needs iproute2" + exit $ksft_skip +fi +if ! nc -h >/dev/null 2>&1; then + echo "SKIP: Test needs netcat" + exit $ksft_skip +fi + +pattern="foo bar baz" +patlen=11 +hdrlen=$((20 + 8)) # IPv4 + UDP +ns="ns-$(mktemp -u XXXXXXXX)" +trap 'ip netns del $ns' EXIT +ip netns add "$ns" +ip -net "$ns" link add d0 type dummy +ip -net "$ns" link set d0 up +ip -net "$ns" addr add 10.1.2.1/24 dev d0 + +#ip netns exec "$ns" tcpdump -npXi d0 & +#tcpdump_pid=$! +#trap 'kill $tcpdump_pid; ip netns del $ns' EXIT + +add_rule() { # (alg, from, to) + ip netns exec "$ns" \ + iptables -A OUTPUT -o d0 -m string \ + --string "$pattern" --algo $1 --from $2 --to $3 +} +showrules() { # () + ip netns exec "$ns" iptables -v -S OUTPUT | grep '^-A' +} +zerorules() { + ip netns exec "$ns" iptables -Z OUTPUT +} +countrule() { # (pattern) + showrules | grep -c -- "$*" +} +send() { # (offset) + ( for ((i = 0; i < $1 - $hdrlen; i++)); do + printf " " + done + printf "$pattern" + ) | ip netns exec "$ns" nc -w 1 -u 10.1.2.2 27374 +} + +add_rule bm 1000 1500 +add_rule bm 1400 1600 +add_rule kmp 1000 1500 +add_rule kmp 1400 1600 + +zerorules +send 0 +send $((1000 - $patlen)) +if [ $(countrule -c 0 0) -ne 4 ]; then + echo "FAIL: rules match data before --from" + showrules + ((rc--)) +fi + +zerorules +send 1000 +send $((1400 - $patlen)) +if [ $(countrule -c 2) -ne 2 ]; then + echo "FAIL: only two rules should match at low offset" + showrules + ((rc--)) +fi + +zerorules +send $((1500 - $patlen)) +if [ $(countrule -c 1) -ne 4 ]; then + echo "FAIL: all rules should match at end of packet" + showrules + ((rc--)) +fi + +zerorules +send 1495 +if [ $(countrule -c 1) -ne 1 ]; then + echo "FAIL: only kmp with proper --to should match pattern spanning fragments" + showrules + ((rc--)) +fi + +zerorules +send 1500 +if [ $(countrule -c 1) -ne 2 ]; then + echo "FAIL: two rules should match pattern at start of second fragment" + showrules + ((rc--)) +fi + +zerorules +send $((1600 - $patlen)) +if [ $(countrule -c 1) -ne 2 ]; then + echo "FAIL: two rules should match pattern at end of largest --to" + showrules + ((rc--)) +fi + +zerorules +send $((1600 - $patlen + 1)) +if [ $(countrule -c 1) -ne 0 ]; then + echo "FAIL: no rules should match pattern extending largest --to" + showrules + ((rc--)) +fi + +zerorules +send 1600 +if [ $(countrule -c 1) -ne 0 ]; then + echo "FAIL: no rule should match pattern past largest --to" + showrules + ((rc--)) +fi + +exit $rc diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c index 4e86f927880c3..01cc37bf611c3 100644 --- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c +++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c @@ -62,7 +62,7 @@ static void error_report(struct error *err, const char *test_name) break; case PIDFD_PASS: - ksft_test_result_pass("%s test: Passed\n"); + ksft_test_result_pass("%s test: Passed\n", test_name); break; default: diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c index 00a07e7c571cd..c081ae91313aa 100644 --- a/tools/testing/selftests/pidfd/pidfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_test.c @@ -381,13 +381,13 @@ static int test_pidfd_send_signal_syscall_support(void) static void *test_pidfd_poll_exec_thread(void *priv) { - ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n", + ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n", getpid(), syscall(SYS_gettid)); ksft_print_msg("Child Thread: doing exec of sleep\n"); execl("/bin/sleep", "sleep", str(CHILD_THREAD_MIN_WAIT), (char *)NULL); - ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", + ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n", getpid(), syscall(SYS_gettid)); return NULL; } @@ -427,7 +427,7 @@ static int child_poll_exec_test(void *args) { pthread_t t1; - ksft_print_msg("Child (pidfd): starting. pid %d tid %d\n", getpid(), + ksft_print_msg("Child (pidfd): starting. pid %d tid %ld\n", getpid(), syscall(SYS_gettid)); pthread_create(&t1, NULL, test_pidfd_poll_exec_thread, NULL); /* @@ -480,10 +480,10 @@ static void test_pidfd_poll_exec(int use_waitpid) static void *test_pidfd_poll_leader_exit_thread(void *priv) { - ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n", + ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n", getpid(), syscall(SYS_gettid)); sleep(CHILD_THREAD_MIN_WAIT); - ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", getpid(), syscall(SYS_gettid)); + ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n", getpid(), syscall(SYS_gettid)); return NULL; } @@ -492,7 +492,7 @@ static int child_poll_leader_exit_test(void *args) { pthread_t t1, t2; - ksft_print_msg("Child: starting. pid %d tid %d\n", getpid(), syscall(SYS_gettid)); + ksft_print_msg("Child: starting. pid %d tid %ld\n", getpid(), syscall(SYS_gettid)); pthread_create(&t1, NULL, test_pidfd_poll_leader_exit_thread, NULL); pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL); diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c index d511daeb6851e..9e2bc8ba95f13 100644 --- a/tools/testing/selftests/resctrl/resctrl_tests.c +++ b/tools/testing/selftests/resctrl/resctrl_tests.c @@ -255,9 +255,14 @@ int main(int argc, char **argv) return ksft_exit_skip("Not running as root. Skipping...\n"); if (has_ben) { + if (argc - ben_ind >= BENCHMARK_ARGS) + ksft_exit_fail_msg("Too long benchmark command.\n"); + /* Extract benchmark command from command line. */ for (i = ben_ind; i < argc; i++) { benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i]; + if (strlen(argv[i]) >= BENCHMARK_ARG_SIZE) + ksft_exit_fail_msg("Too long benchmark command argument.\n"); sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]); } benchmark_cmd[ben_count] = NULL; diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c index eb0e46905bf9d..8f9b06d9ce039 100644 --- a/tools/testing/selftests/x86/lam.c +++ b/tools/testing/selftests/x86/lam.c @@ -573,7 +573,7 @@ int do_uring(unsigned long lam) char path[PATH_MAX] = {0}; /* get current process path */ - if (readlink("/proc/self/exe", path, PATH_MAX) <= 0) + if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0) return 1; int file_fd = open(path, O_RDONLY); @@ -680,14 +680,14 @@ static int handle_execve(struct testcases *test) perror("Fork failed."); ret = 1; } else if (pid == 0) { - char path[PATH_MAX]; + char path[PATH_MAX] = {0}; /* Set LAM mode in parent process */ if (set_lam(lam) != 0) return 1; /* Get current binary's path and the binary was run by execve */ - if (readlink("/proc/self/exe", path, PATH_MAX) <= 0) + if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0) exit(-1); /* run binary to get LAM mode and return to parent process */ diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c index 623a38908ed5b..c769d7b3842c0 100644 --- a/tools/tracing/rtla/src/utils.c +++ b/tools/tracing/rtla/src/utils.c @@ -538,7 +538,7 @@ static const int find_mount(const char *fs, char *mp, int sizeof_mp) { char mount_point[MAX_PATH]; char type[100]; - int found; + int found = 0; FILE *fp; fp = fopen("/proc/mounts", "r");