diff --git a/.mailmap b/.mailmap index 0374777cc662..5e829da09e7f 100644 --- a/.mailmap +++ b/.mailmap @@ -73,6 +73,8 @@ Andrey Ryabinin Andrzej Hajda André Almeida Andy Adamson +Andy Chiu +Andy Chiu Andy Shevchenko Andy Shevchenko Anilkumar Kolli @@ -197,18 +199,23 @@ Elliot Berman Enric Balletbo i Serra Enric Balletbo i Serra Erik Kaneda -Eugen Hristev +Eugen Hristev +Eugen Hristev Evgeniy Polyakov Ezequiel Garcia Faith Ekstrand Faith Ekstrand Faith Ekstrand +Fangrui Song Felipe W Damasio Felix Kuhling Felix Moeller Fenglin Wu Filipe Lautert Finn Thain +Fiona Behrens +Fiona Behrens +Fiona Behrens Franck Bui-Huu Frank Rowand Frank Rowand @@ -276,7 +283,7 @@ Jan Glauber Jan Kuliga Jarkko Sakkinen Jarkko Sakkinen -Jarkko Sakkinen +Jarkko Sakkinen Jason Gunthorpe Jason Gunthorpe Jason Gunthorpe @@ -300,6 +307,11 @@ Jens Axboe Jens Axboe Jens Osterkamp Jernej Skrabec +Jesper Dangaard Brouer +Jesper Dangaard Brouer +Jesper Dangaard Brouer +Jesper Dangaard Brouer +Jesper Dangaard Brouer Jessica Zhang Jilai Wang Jiri Kosina @@ -653,6 +665,7 @@ Tomeu Vizoso Thomas Graf Thomas Körper Thomas Pedersen +Thorsten Blum Tiezhu Yang Tingwei Zhang Tirupathi Reddy diff --git a/CREDITS b/CREDITS index d439f5a1bc00..96660c63f5b9 100644 --- a/CREDITS +++ b/CREDITS @@ -1204,6 +1204,10 @@ S: Dreisbachstrasse 24 S: D-57250 Netphen S: Germany +N: Florian Fainelli +E: f.fainelli@gmail.com +D: DSA + N: Rik Faith E: faith@acm.org D: Future Domain TMC-16x0 SCSI driver (author) @@ -1358,10 +1362,6 @@ D: Major kbuild rework during the 2.5 cycle D: ISDN Maintainer S: USA -N: Gerrit Renker -E: gerrit@erg.abdn.ac.uk -D: DCCP protocol support. - N: Philip Gladstone E: philip@gladstonefamily.net D: Kernel / timekeeping stuff @@ -1677,11 +1677,6 @@ W: http://www.carumba.com/ D: bug toaster (A1 sauce makes all the difference) D: Random linux hacker -N: James Hogan -E: jhogan@kernel.org -D: Metag architecture maintainer -D: TZ1090 SoC maintainer - N: Tim Hockin E: thockin@hockin.org W: http://www.hockin.org/~thockin @@ -1697,6 +1692,11 @@ D: hwmon subsystem maintainer D: i2c-sis96x and i2c-stub SMBus drivers S: USA +N: James Hogan +E: jhogan@kernel.org +D: Metag architecture maintainer +D: TZ1090 SoC maintainer + N: Dirk Hohndel E: hohndel@suse.de D: The XFree86[tm] Project @@ -1872,6 +1872,10 @@ S: K osmidomkum 723 S: 160 00 Praha 6 S: Czech Republic +N: Seth Jennings +E: sjenning@redhat.com +D: Creation and maintenance of zswap + N: Jeremy Kerr D: Maintainer of SPU File System @@ -2188,19 +2192,6 @@ N: Mike Kravetz E: mike.kravetz@oracle.com D: Maintenance and development of the hugetlb subsystem -N: Seth Jennings -E: sjenning@redhat.com -D: Creation and maintenance of zswap - -N: Dan Streetman -E: ddstreet@ieee.org -D: Maintenance and development of zswap -D: Creation and maintenance of the zpool API - -N: Vitaly Wool -E: vitaly.wool@konsulko.com -D: Maintenance and development of zswap - N: Andreas S. Krebs E: akrebs@altavista.net D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards @@ -3191,6 +3182,11 @@ N: Ken Pizzini E: ken@halcyon.com D: CDROM driver "sonycd535" (Sony CDU-535/531) +N: Mathieu Poirier +E: mathieu.poirier@linaro.org +D: CoreSight kernel subsystem, Maintainer 2014-2022 +D: Perf tool support for CoreSight + N: Stelian Pop E: stelian@popies.net P: 1024D/EDBB6147 7B36 0E07 04BC 11DC A7A0 D3F7 7185 9E7A EDBB 6147 @@ -3300,6 +3296,10 @@ S: Schlossbergring 9 S: 79098 Freiburg S: Germany +N: Gerrit Renker +E: gerrit@erg.abdn.ac.uk +D: DCCP protocol support. + N: Thomas Renninger E: trenn@suse.de D: cpupowerutils @@ -3576,11 +3576,6 @@ D: several improvements to system programs S: Oldenburg S: Germany -N: Mathieu Poirier -E: mathieu.poirier@linaro.org -D: CoreSight kernel subsystem, Maintainer 2014-2022 -D: Perf tool support for CoreSight - N: Robert Schwebel E: robert@schwebel.de W: https://www.schwebel.de @@ -3771,6 +3766,11 @@ S: Chr. Winthersvej 1 B, st.th. S: DK-1860 Frederiksberg C S: Denmark +N: Dan Streetman +E: ddstreet@ieee.org +D: Maintenance and development of zswap +D: Creation and maintenance of the zpool API + N: Drew Sullivan E: drew@ss.org W: http://www.ss.org/ @@ -4286,6 +4286,10 @@ S: Pipers Way S: Swindon. SN3 1RJ S: England +N: Vitaly Wool +E: vitaly.wool@konsulko.com +D: Maintenance and development of zswap + N: Chris Wright E: chrisw@sous-sol.org D: hacking on LSM framework and security modules. diff --git a/Documentation/admin-guide/LSM/ipe.rst b/Documentation/admin-guide/LSM/ipe.rst index f38e641df0e9..f93a467db628 100644 --- a/Documentation/admin-guide/LSM/ipe.rst +++ b/Documentation/admin-guide/LSM/ipe.rst @@ -223,7 +223,10 @@ are signed through the PKCS#7 message format to enforce some level of authorization of the policies (prohibiting an attacker from gaining unconstrained root, and deploying an "allow all" policy). These policies must be signed by a certificate that chains to the -``SYSTEM_TRUSTED_KEYRING``. With openssl, the policy can be signed by:: +``SYSTEM_TRUSTED_KEYRING``, or to the secondary and/or platform keyrings if +``CONFIG_IPE_POLICY_SIG_SECONDARY_KEYRING`` and/or +``CONFIG_IPE_POLICY_SIG_PLATFORM_KEYRING`` are enabled, respectively. +With openssl, the policy can be signed by:: openssl smime -sign \ -in "$MY_POLICY" \ @@ -266,7 +269,7 @@ in the kernel. This file is write-only and accepts a PKCS#7 signed policy. Two checks will always be performed on this policy: First, the ``policy_names`` must match with the updated version and the existing version. Second the updated policy must have a policy version greater than -or equal to the currently-running version. This is to prevent rollback attacks. +the currently-running version. This is to prevent rollback attacks. The ``delete`` file is used to remove a policy that is no longer needed. This file is write-only and accepts a value of ``1`` to delete the policy. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 1518343bbe22..1666576acc0e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6688,7 +6688,7 @@ 0: no polling (default) thp_anon= [KNL] - Format: ,[KMG]:;-[KMG]: + Format: [KMG],[KMG]:;[KMG]-[KMG]: state is one of "always", "madvise", "never" or "inherit". Control the default behavior of the system with respect to anonymous transparent hugepages. diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index cfdd16a52e39..a1bb495eab59 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -303,7 +303,7 @@ control by passing the parameter ``transparent_hugepage=always`` or kernel command line. Alternatively, each supported anonymous THP size can be controlled by -passing ``thp_anon=,[KMG]:;-[KMG]:``, +passing ``thp_anon=[KMG],[KMG]:;[KMG]-[KMG]:``, where ```` is the THP size (must be a power of 2 of PAGE_SIZE and supported anonymous THP) and ```` is one of ``always``, ``madvise``, ``never`` or ``inherit``. diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst index fe1be4ad88cb..a21369eba034 100644 --- a/Documentation/admin-guide/pm/cpufreq.rst +++ b/Documentation/admin-guide/pm/cpufreq.rst @@ -425,8 +425,8 @@ This governor exposes only one tunable: ``rate_limit_us`` Minimum time (in microseconds) that has to pass between two consecutive - runs of governor computations (default: 1000 times the scaling driver's - transition latency). + runs of governor computations (default: 1.5 times the scaling driver's + transition latency or the maximum 2ms). The purpose of this tunable is to reduce the scheduler context overhead of the governor which might be excessive without it. @@ -474,17 +474,17 @@ This governor exposes the following tunables: This is how often the governor's worker routine should run, in microseconds. - Typically, it is set to values of the order of 10000 (10 ms). Its - default value is equal to the value of ``cpuinfo_transition_latency`` - for each policy this governor is attached to (but since the unit here - is greater by 1000, this means that the time represented by - ``sampling_rate`` is 1000 times greater than the transition latency by - default). + Typically, it is set to values of the order of 2000 (2 ms). Its + default value is to add a 50% breathing room + to ``cpuinfo_transition_latency`` on each policy this governor is + attached to. The minimum is typically the length of two scheduler + ticks. If this tunable is per-policy, the following shell command sets the time - represented by it to be 750 times as high as the transition latency:: + represented by it to be 1.5 times as high as the transition latency + (the default):: - # echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) > ondemand/sampling_rate + # echo `$(($(cat cpuinfo_transition_latency) * 3 / 2)) > ondemand/sampling_rate ``up_threshold`` If the estimated CPU load is above this value (in percent), the governor diff --git a/Documentation/core-api/protection-keys.rst b/Documentation/core-api/protection-keys.rst index bf28ac0401f3..7eb7c6023e09 100644 --- a/Documentation/core-api/protection-keys.rst +++ b/Documentation/core-api/protection-keys.rst @@ -12,7 +12,10 @@ Pkeys Userspace (PKU) is a feature which can be found on: * Intel server CPUs, Skylake and later * Intel client CPUs, Tiger Lake (11th Gen Core) and later * Future AMD CPUs + * arm64 CPUs implementing the Permission Overlay Extension (FEAT_S1POE) +x86_64 +====== Pkeys work by dedicating 4 previously Reserved bits in each page table entry to a "protection key", giving 16 possible keys. @@ -28,6 +31,22 @@ register. The feature is only available in 64-bit mode, even though there is theoretically space in the PAE PTEs. These permissions are enforced on data access only and have no effect on instruction fetches. +arm64 +===== + +Pkeys use 3 bits in each page table entry, to encode a "protection key index", +giving 8 possible keys. + +Protections for each key are defined with a per-CPU user-writable system +register (POR_EL0). This is a 64-bit register encoding read, write and execute +overlay permissions for each protection key index. + +Being a CPU register, POR_EL0 is inherently thread-local, potentially giving +each thread a different set of protections from every other thread. + +Unlike x86_64, the protection key permissions also apply to instruction +fetches. + Syscalls ======== @@ -38,11 +57,10 @@ There are 3 system calls which directly interact with pkeys:: int pkey_mprotect(unsigned long start, size_t len, unsigned long prot, int pkey); -Before a pkey can be used, it must first be allocated with -pkey_alloc(). An application calls the WRPKRU instruction -directly in order to change access permissions to memory covered -with a key. In this example WRPKRU is wrapped by a C function -called pkey_set(). +Before a pkey can be used, it must first be allocated with pkey_alloc(). An +application writes to the architecture specific CPU register directly in order +to change access permissions to memory covered with a key. In this example +this is wrapped by a C function called pkey_set(). :: int real_prot = PROT_READ|PROT_WRITE; @@ -64,9 +82,9 @@ is no longer in use:: munmap(ptr, PAGE_SIZE); pkey_free(pkey); -.. note:: pkey_set() is a wrapper for the RDPKRU and WRPKRU instructions. - An example implementation can be found in - tools/testing/selftests/x86/protection_keys.c. +.. note:: pkey_set() is a wrapper around writing to the CPU register. + Example implementations can be found in + tools/testing/selftests/mm/pkey-{arm64,powerpc,x86}.h Behavior ======== @@ -96,3 +114,7 @@ with a read():: The kernel will send a SIGSEGV in both cases, but si_code will be set to SEGV_PKERR when violating protection keys versus SEGV_ACCERR when the plain mprotect() permissions are violated. + +Note that kernel accesses from a kthread (such as io_uring) will use a default +value for the protection key register and so will not be consistent with +userspace's value of the register or mprotect(). diff --git a/Documentation/devicetree/bindings/display/elgin,jg10309-01.yaml b/Documentation/devicetree/bindings/display/elgin,jg10309-01.yaml new file mode 100644 index 000000000000..faca0cb3f154 --- /dev/null +++ b/Documentation/devicetree/bindings/display/elgin,jg10309-01.yaml @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/elgin,jg10309-01.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Elgin JG10309-01 SPI-controlled display + +maintainers: + - Fabio Estevam + +description: | + The Elgin JG10309-01 SPI-controlled display is used on the RV1108-Elgin-r1 + board and is a custom display. + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + const: elgin,jg10309-01 + + reg: + maxItems: 1 + + spi-max-frequency: + maximum: 24000000 + + spi-cpha: true + + spi-cpol: true + +required: + - compatible + - reg + - spi-cpha + - spi-cpol + +additionalProperties: false + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + display@0 { + compatible = "elgin,jg10309-01"; + reg = <0>; + spi-max-frequency = <24000000>; + spi-cpha; + spi-cpol; + }; + }; diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml index 3a82aec9021c..497c0eb4ed0b 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml @@ -63,6 +63,16 @@ properties: - const: sleep power-domains: + description: | + The MediaTek DPI module is typically associated with one of the + following multimedia power domains: + POWER_DOMAIN_DISPLAY + POWER_DOMAIN_VDOSYS + POWER_DOMAIN_MM + The specific power domain used varies depending on the SoC design. + + It is recommended to explicitly add the appropriate power domain + property to the DPI node in the device tree. maxItems: 1 port: @@ -79,20 +89,6 @@ required: - clock-names - port -allOf: - - if: - not: - properties: - compatible: - contains: - enum: - - mediatek,mt6795-dpi - - mediatek,mt8173-dpi - - mediatek,mt8186-dpi - then: - properties: - power-domains: false - additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,split.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,split.yaml index e4affc854f3d..4b6ff546757e 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,split.yaml +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,split.yaml @@ -38,6 +38,7 @@ properties: description: A phandle and PM domain specifier as defined by bindings of the power controller specified by phandle. See Documentation/devicetree/bindings/power/power-domain.yaml for details. + maxItems: 1 mediatek,gce-client-reg: description: @@ -57,6 +58,9 @@ properties: clocks: items: - description: SPLIT Clock + - description: Used for interfacing with the HDMI RX signal source. + - description: Paired with receiving HDMI RX metadata. + minItems: 1 required: - compatible @@ -72,9 +76,24 @@ allOf: const: mediatek,mt8195-mdp3-split then: + properties: + clocks: + minItems: 3 + required: - mediatek,gce-client-reg + - if: + properties: + compatible: + contains: + const: mediatek,mt8173-disp-split + + then: + properties: + clocks: + maxItems: 1 + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml index 54d7d11bfed4..ff7a6f12cd00 100644 --- a/Documentation/devicetree/bindings/firmware/arm,scmi.yaml +++ b/Documentation/devicetree/bindings/firmware/arm,scmi.yaml @@ -124,7 +124,7 @@ properties: atomic mode of operation, even if requested. default: 0 - max-rx-timeout-ms: + arm,max-rx-timeout-ms: description: An optional time value, expressed in milliseconds, representing the transport maximum timeout value for the receive channel. The value should diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.yaml b/Documentation/devicetree/bindings/i2c/i2c-imx.yaml index 85ee1282d6d2..0682a5a10d41 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-imx.yaml +++ b/Documentation/devicetree/bindings/i2c/i2c-imx.yaml @@ -18,6 +18,7 @@ properties: - const: fsl,imx1-i2c - const: fsl,imx21-i2c - const: fsl,vf610-i2c + - const: nxp,s32g2-i2c - items: - enum: - fsl,ls1012a-i2c @@ -54,6 +55,9 @@ properties: - fsl,imx8mn-i2c - fsl,imx8mp-i2c - const: fsl,imx21-i2c + - items: + - const: nxp,s32g3-i2c + - const: nxp,s32g2-i2c reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/i2c/microchip,corei2c.yaml b/Documentation/devicetree/bindings/i2c/microchip,corei2c.yaml index afa3db726229..6ff58b64d496 100644 --- a/Documentation/devicetree/bindings/i2c/microchip,corei2c.yaml +++ b/Documentation/devicetree/bindings/i2c/microchip,corei2c.yaml @@ -16,7 +16,9 @@ properties: compatible: oneOf: - items: - - const: microchip,mpfs-i2c # Microchip PolarFire SoC compatible SoCs + - enum: + - microchip,pic64gx-i2c + - microchip,mpfs-i2c # Microchip PolarFire SoC compatible SoCs - const: microchip,corei2c-rtl-v7 # Microchip Fabric based i2c IP core - const: microchip,corei2c-rtl-v7 # Microchip Fabric based i2c IP core diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml index 7dab3852c7f8..ef26ba6eda28 100644 --- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml +++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml @@ -27,6 +27,7 @@ properties: - enum: - qcom,sc7280-cci - qcom,sc8280xp-cci + - qcom,sdm670-cci - qcom,sdm845-cci - qcom,sm6350-cci - qcom,sm8250-cci @@ -139,6 +140,24 @@ allOf: - const: cci - const: camss_ahb + - if: + properties: + compatible: + contains: + enum: + - qcom,sdm670-cci + then: + properties: + clocks: + minItems: 4 + maxItems: 4 + clock-names: + items: + - const: camnoc_axi + - const: soc_ahb + - const: cpas_ahb + - const: cci + - if: properties: compatible: diff --git a/Documentation/devicetree/bindings/i2c/realtek,rtl9301-i2c.yaml b/Documentation/devicetree/bindings/i2c/realtek,rtl9301-i2c.yaml new file mode 100644 index 000000000000..eddfd329c67b --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/realtek,rtl9301-i2c.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/i2c/realtek,rtl9301-i2c.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Realtek RTL I2C Controller + +maintainers: + - Chris Packham + +description: + The RTL9300 SoC has two I2C controllers. Each of these has an SCL line (which + if not-used for SCL can be a GPIO). There are 8 common SDA lines that can be + assigned to either I2C controller. + +properties: + compatible: + oneOf: + - items: + - enum: + - realtek,rtl9302b-i2c + - realtek,rtl9302c-i2c + - realtek,rtl9303-i2c + - const: realtek,rtl9301-i2c + - const: realtek,rtl9301-i2c + + reg: + description: Register offset and size this I2C controller. + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + +patternProperties: + '^i2c@[0-7]$': + $ref: /schemas/i2c/i2c-controller.yaml + unevaluatedProperties: false + + properties: + reg: + description: The SDA pin associated with the I2C bus. + maxItems: 1 + + required: + - reg + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + i2c@36c { + compatible = "realtek,rtl9301-i2c"; + reg = <0x36c 0x14>; + #address-cells = <1>; + #size-cells = <0>; + + i2c@2 { + reg = <2>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml index bd19abb867d9..0065d6508824 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml @@ -67,6 +67,10 @@ properties: A 2.5V to 3.3V supply for the external reference voltage. When omitted, the internal 2.5V reference is used. + refin-supply: + description: + A 2.5V to 3.3V supply for external reference voltage, for ad7380-4 only. + aina-supply: description: The common mode voltage supply for the AINA- pin on pseudo-differential @@ -135,6 +139,23 @@ allOf: ainc-supply: false aind-supply: false + # ad7380-4 uses refin-supply as external reference. + # All other chips from ad738x family use refio as optional external reference. + # When refio-supply is omitted, internal reference is used. + - if: + properties: + compatible: + enum: + - adi,ad7380-4 + then: + properties: + refio-supply: false + required: + - refin-supply + else: + properties: + refin-supply: false + examples: - | #include diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5686.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5686.yaml index b4400c52bec3..713f535bb33a 100644 --- a/Documentation/devicetree/bindings/iio/dac/adi,ad5686.yaml +++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5686.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/dac/adi,ad5686.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Analog Devices AD5360 and similar DACs +title: Analog Devices AD5360 and similar SPI DACs maintainers: - Michael Hennerich @@ -12,41 +12,22 @@ maintainers: properties: compatible: - oneOf: - - description: SPI devices - enum: - - adi,ad5310r - - adi,ad5672r - - adi,ad5674r - - adi,ad5676 - - adi,ad5676r - - adi,ad5679r - - adi,ad5681r - - adi,ad5682r - - adi,ad5683 - - adi,ad5683r - - adi,ad5684 - - adi,ad5684r - - adi,ad5685r - - adi,ad5686 - - adi,ad5686r - - description: I2C devices - enum: - - adi,ad5311r - - adi,ad5337r - - adi,ad5338r - - adi,ad5671r - - adi,ad5675r - - adi,ad5691r - - adi,ad5692r - - adi,ad5693 - - adi,ad5693r - - adi,ad5694 - - adi,ad5694r - - adi,ad5695r - - adi,ad5696 - - adi,ad5696r - + enum: + - adi,ad5310r + - adi,ad5672r + - adi,ad5674r + - adi,ad5676 + - adi,ad5676r + - adi,ad5679r + - adi,ad5681r + - adi,ad5682r + - adi,ad5683 + - adi,ad5683r + - adi,ad5684 + - adi,ad5684r + - adi,ad5685r + - adi,ad5686 + - adi,ad5686r reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5696.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5696.yaml index 56b0cda0f30a..b5a88b03dc2f 100644 --- a/Documentation/devicetree/bindings/iio/dac/adi,ad5696.yaml +++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5696.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/dac/adi,ad5696.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Analog Devices AD5696 and similar multi-channel DACs +title: Analog Devices AD5696 and similar I2C multi-channel DACs maintainers: - Michael Auchter @@ -16,6 +16,7 @@ properties: compatible: enum: - adi,ad5311r + - adi,ad5337r - adi,ad5338r - adi,ad5671r - adi,ad5675r diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.yaml index 199b34fdbefc..7ff4efc4758a 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.yaml @@ -82,9 +82,6 @@ allOf: enum: - fsl,ls1043a-extirq - fsl,ls1046a-extirq - - fsl,ls1088a-extirq - - fsl,ls2080a-extirq - - fsl,lx2160a-extirq then: properties: interrupt-map: @@ -95,6 +92,29 @@ allOf: - const: 0xf - const: 0 + - if: + properties: + compatible: + contains: + enum: + - fsl,ls1088a-extirq + - fsl,ls2080a-extirq + - fsl,lx2160a-extirq +# The driver(drivers/irqchip/irq-ls-extirq.c) have not use standard DT +# function to parser interrupt-map. So it doesn't consider '#address-size' +# in parent interrupt controller, such as GIC. +# +# When dt-binding verify interrupt-map, item data matrix is spitted at +# incorrect position. Remove interrupt-map restriction because it always +# wrong. + + then: + properties: + interrupt-map-mask: + items: + - const: 0xf + - const: 0 + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.yaml b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.yaml index 01b00d89a992..df45ff56d444 100644 --- a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.yaml +++ b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.yaml @@ -113,7 +113,7 @@ properties: msi-parent: deprecated: true - $ref: /schemas/types.yaml#/definitions/phandle + maxItems: 1 description: Describes the MSI controller node handling message interrupts for the MC. When there is no translation diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml index 23dfe0838dca..63bee5b542f5 100644 --- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml +++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml @@ -26,6 +26,7 @@ properties: - brcm,asp-v2.1-mdio - brcm,asp-v2.2-mdio - brcm,unimac-mdio + - brcm,bcm6846-mdio reg: minItems: 1 diff --git a/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml b/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml index e95c21628281..fb02e579463c 100644 --- a/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml +++ b/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml @@ -61,7 +61,7 @@ properties: - gmii - rgmii - sgmii - - 1000BaseX + - 1000base-x xlnx,phy-type: description: diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml index dcf4fa55fbba..380a9222a51d 100644 --- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml @@ -154,8 +154,6 @@ allOf: - qcom,sm8550-qmp-gen4x2-pcie-phy - qcom,sm8650-qmp-gen3x2-pcie-phy - qcom,sm8650-qmp-gen4x2-pcie-phy - - qcom,x1e80100-qmp-gen3x2-pcie-phy - - qcom,x1e80100-qmp-gen4x2-pcie-phy then: properties: clocks: @@ -171,6 +169,8 @@ allOf: - qcom,sc8280xp-qmp-gen3x1-pcie-phy - qcom,sc8280xp-qmp-gen3x2-pcie-phy - qcom,sc8280xp-qmp-gen3x4-pcie-phy + - qcom,x1e80100-qmp-gen3x2-pcie-phy + - qcom,x1e80100-qmp-gen4x2-pcie-phy - qcom,x1e80100-qmp-gen4x4-pcie-phy then: properties: @@ -201,6 +201,7 @@ allOf: - qcom,sm8550-qmp-gen4x2-pcie-phy - qcom,sm8650-qmp-gen4x2-pcie-phy - qcom,x1e80100-qmp-gen4x2-pcie-phy + - qcom,x1e80100-qmp-gen4x4-pcie-phy then: properties: resets: diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml index ab3206ffa4af..beef193aaaeb 100644 --- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml +++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml @@ -102,21 +102,21 @@ properties: default: 2 interrupts: - oneOf: - - minItems: 1 - items: - - description: TX interrupt - - description: RX interrupt - - items: - - description: common/combined interrupt + minItems: 1 + maxItems: 2 interrupt-names: oneOf: - - minItems: 1 + - description: TX interrupt + const: tx + - description: RX interrupt + const: rx + - description: TX and RX interrupts items: - const: tx - const: rx - - const: common + - description: Common/combined interrupt + const: common fck_parent: $ref: /schemas/types.yaml#/definitions/string diff --git a/Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml b/Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml index ecf3d7d968c8..2cf229a076f0 100644 --- a/Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml +++ b/Documentation/devicetree/bindings/sound/rockchip,rk3308-codec.yaml @@ -48,6 +48,10 @@ properties: - const: mclk_rx - const: hclk + port: + $ref: audio-graph-port.yaml# + unevaluatedProperties: false + resets: maxItems: 1 diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml index 0108d7507215..9bf0fb17a05e 100644 --- a/Documentation/devicetree/bindings/trivial-devices.yaml +++ b/Documentation/devicetree/bindings/trivial-devices.yaml @@ -101,8 +101,6 @@ properties: - domintech,dmard09 # DMARD10: 3-axis Accelerometer - domintech,dmard10 - # Elgin SPI-controlled LCD - - elgin,jg10309-01 # MMA7660FC: 3-Axis Orientation/Motion Detection Sensor - fsl,mma7660 # MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer diff --git a/Documentation/filesystems/caching/cachefiles.rst b/Documentation/filesystems/caching/cachefiles.rst index e04a27bdbe19..b3ccc782cb3b 100644 --- a/Documentation/filesystems/caching/cachefiles.rst +++ b/Documentation/filesystems/caching/cachefiles.rst @@ -115,7 +115,7 @@ set up cache ready for use. The following script commands are available: This mask can also be set through sysfs, eg:: - echo 5 >/sys/modules/cachefiles/parameters/debug + echo 5 > /sys/module/cachefiles/parameters/debug Starting the Cache diff --git a/Documentation/filesystems/iomap/operations.rst b/Documentation/filesystems/iomap/operations.rst index 8e6c721d2330..b93115ab8748 100644 --- a/Documentation/filesystems/iomap/operations.rst +++ b/Documentation/filesystems/iomap/operations.rst @@ -208,7 +208,7 @@ The filesystem must arrange to `cancel such `reservations `_ because writeback will not consume the reservation. -The ``iomap_file_buffered_write_punch_delalloc`` can be called from a +The ``iomap_write_delalloc_release`` can be called from a ``->iomap_end`` function to find all the clean areas of the folios caching a fresh (``IOMAP_F_NEW``) delalloc mapping. It takes the ``invalidate_lock``. diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index f0d2cb257bb8..73f0bfd7e903 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -592,4 +592,3 @@ API Function Reference .. kernel-doc:: include/linux/netfs.h .. kernel-doc:: fs/netfs/buffered_read.c -.. kernel-doc:: fs/netfs/io.c diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst index c840b597912c..47e8ac5b7099 100644 --- a/Documentation/i2c/busses/i2c-i801.rst +++ b/Documentation/i2c/busses/i2c-i801.rst @@ -49,6 +49,7 @@ Supported adapters: * Intel Meteor Lake (SOC and PCH) * Intel Birch Stream (SOC) * Intel Arrow Lake (SOC) + * Intel Panther Lake (SOC) Datasheets: Publicly available at the Intel website diff --git a/Documentation/i2c/busses/i2c-piix4.rst b/Documentation/i2c/busses/i2c-piix4.rst index 07fe6f6f4b18..94e20b18c59a 100644 --- a/Documentation/i2c/busses/i2c-piix4.rst +++ b/Documentation/i2c/busses/i2c-piix4.rst @@ -109,3 +109,66 @@ which can easily get corrupted due to a state machine bug. These are mostly Thinkpad laptops, but desktop systems may also be affected. We have no list of all affected systems, so the only safe solution was to prevent access to the SMBus on all IBM systems (detected using DMI data.) + + +Description in the ACPI code +---------------------------- + +Device driver for the PIIX4 chip creates a separate I2C bus for each of its +ports:: + + $ i2cdetect -l + ... + i2c-7 unknown SMBus PIIX4 adapter port 0 at 0b00 N/A + i2c-8 unknown SMBus PIIX4 adapter port 2 at 0b00 N/A + i2c-9 unknown SMBus PIIX4 adapter port 1 at 0b20 N/A + ... + +Therefore if you want to access one of these busses in the ACPI code, port +subdevices are needed to be declared inside the PIIX device:: + + Scope (\_SB_.PCI0.SMBS) + { + Name (_ADR, 0x00140000) + + Device (SMB0) { + Name (_ADR, 0) + } + Device (SMB1) { + Name (_ADR, 1) + } + Device (SMB2) { + Name (_ADR, 2) + } + } + +If this is not the case for your UEFI firmware and you don't have access to the +source code, you can use ACPI SSDT Overlays to provide the missing parts. Just +keep in mind that in this case you would need to load your extra SSDT table +before the piix4 driver starts, i.e. you should provide SSDT via initrd or EFI +variable methods and not via configfs. + +As an example of usage here is the ACPI snippet code that would assign jc42 +driver to the 0x1C device on the I2C bus created by the PIIX port 0:: + + Device (JC42) { + Name (_HID, "PRP0001") + Name (_DDN, "JC42 Temperature sensor") + Name (_CRS, ResourceTemplate () { + I2cSerialBusV2 ( + 0x001c, + ControllerInitiated, + 100000, + AddressingMode7Bit, + "\\_SB.PCI0.SMBS.SMB0", + 0 + ) + }) + + Name (_DSD, Package () { + ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + Package () { + Package () { "compatible", Package() { "jedec,jc-42.4-temp" } }, + } + }) + } diff --git a/Documentation/iio/ad7380.rst b/Documentation/iio/ad7380.rst index 9c784c1e652e..6f70b49b9ef2 100644 --- a/Documentation/iio/ad7380.rst +++ b/Documentation/iio/ad7380.rst @@ -41,13 +41,22 @@ supports only 1 SDO line. Reference voltage ----------------- -2 possible reference voltage sources are supported: +ad7380-4 +~~~~~~~~ + +ad7380-4 supports only an external reference voltage (2.5V to 3.3V). It must be +declared in the device tree as ``refin-supply``. + +All other devices from ad738x family +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +All other devices from ad738x support 2 possible reference voltage sources: - Internal reference (2.5V) - External reference (2.5V to 3.3V) The source is determined by the device tree. If ``refio-supply`` is present, -then the external reference is used, else the internal reference is used. +then it is used as external reference, else the internal reference is used. Oversampling and resolution boost --------------------------------- diff --git a/Documentation/mm/damon/maintainer-profile.rst b/Documentation/mm/damon/maintainer-profile.rst index 2365c9a3c1f0..ce3e98458339 100644 --- a/Documentation/mm/damon/maintainer-profile.rst +++ b/Documentation/mm/damon/maintainer-profile.rst @@ -7,26 +7,26 @@ The DAMON subsystem covers the files that are listed in 'DATA ACCESS MONITOR' section of 'MAINTAINERS' file. The mailing lists for the subsystem are damon@lists.linux.dev and -linux-mm@kvack.org. Patches should be made against the mm-unstable `tree -` whenever possible and posted to -the mailing lists. +linux-mm@kvack.org. Patches should be made against the `mm-unstable tree +`_ whenever possible and posted +to the mailing lists. SCM Trees --------- There are multiple Linux trees for DAMON development. Patches under development or testing are queued in `damon/next -` by the DAMON maintainer. +`_ by the DAMON maintainer. Sufficiently reviewed patches will be queued in `mm-unstable -` by the memory management +`_ by the memory management subsystem maintainer. After more sufficient tests, the patches will be queued -in `mm-stable ` , and finally +in `mm-stable `_, and finally pull-requested to the mainline by the memory management subsystem maintainer. -Note again the patches for mm-unstable `tree -` are queued by the memory +Note again the patches for `mm-unstable tree +`_ are queued by the memory management subsystem maintainer. If the patches requires some patches in -damon/next `tree ` which not yet merged +`damon/next tree `_ which not yet merged in mm-unstable, please make sure the requirement is clearly specified. Submit checklist addendum @@ -37,25 +37,25 @@ When making DAMON changes, you should do below. - Build changes related outputs including kernel and documents. - Ensure the builds introduce no new errors or warnings. - Run and ensure no new failures for DAMON `selftests - ` and + `_ and `kunittests - `. + `_. Further doing below and putting the results will be helpful. - Run `damon-tests/corr - ` for normal + `_ for normal changes. - Run `damon-tests/perf - ` for performance + `_ for performance changes. Key cycle dates --------------- Patches can be sent anytime. Key cycle dates of the `mm-unstable -` and `mm-stable -` trees depend on the memory +`_ and `mm-stable +`_ trees depend on the memory management subsystem maintainer. Review cadence @@ -72,13 +72,13 @@ Mailing tool Like many other Linux kernel subsystems, DAMON uses the mailing lists (damon@lists.linux.dev and linux-mm@kvack.org) as the major communication channel. There is a simple tool called `HacKerMaiL -` (``hkml``), which is for people who +`_ (``hkml``), which is for people who are not very familiar with the mailing lists based communication. The tool could be particularly helpful for DAMON community members since it is developed and maintained by DAMON maintainer. The tool is also officially announced to support DAMON and general Linux kernel development workflow. -In other words, `hkml ` is a mailing +In other words, `hkml `_ is a mailing tool for DAMON community, which DAMON maintainer is committed to support. Please feel free to try and report issues or feature requests for the tool to the maintainer. @@ -98,8 +98,8 @@ slots, and attendees should reserve one of those at least 24 hours before the time slot, by reaching out to the maintainer. Schedules and available reservation time slots are available at the Google `doc -`. +`_. There is also a public Google `calendar -` +`_ that has the events. Anyone can subscribe it. DAMON maintainer will also provide periodic reminder to the mailing list (damon@lists.linux.dev). diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml index 30d8342cacc8..dc190bf838fe 100644 --- a/Documentation/netlink/specs/mptcp_pm.yaml +++ b/Documentation/netlink/specs/mptcp_pm.yaml @@ -293,7 +293,6 @@ operations: doc: Get endpoint information attribute-set: attr dont-validate: [ strict ] - flags: [ uns-admin-perm ] do: &get-addr-attrs request: attributes: diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst index e4bd7aa1f5aa..544bad175aae 100644 --- a/Documentation/networking/j1939.rst +++ b/Documentation/networking/j1939.rst @@ -121,7 +121,7 @@ format, the Group Extension is set in the PS-field. On the other hand, when using PDU1 format, the PS-field contains a so-called Destination Address, which is _not_ part of the PGN. When communicating a PGN -from user space to kernel (or vice versa) and PDU2 format is used, the PS-field +from user space to kernel (or vice versa) and PDU1 format is used, the PS-field of the PGN shall be set to zero. The Destination Address shall be set elsewhere. diff --git a/Documentation/networking/packet_mmap.rst b/Documentation/networking/packet_mmap.rst index dca15d15feaf..02370786e77b 100644 --- a/Documentation/networking/packet_mmap.rst +++ b/Documentation/networking/packet_mmap.rst @@ -16,7 +16,7 @@ ii) transmit network traffic, or any other that needs raw Howto can be found at: - https://sites.google.com/site/packetmmap/ + https://web.archive.org/web/20220404160947/https://sites.google.com/site/packetmmap/ Please send your comments to - Ulisses Alonso Camaró @@ -166,7 +166,8 @@ As capture, each frame contains two parts:: /* bind socket to eth0 */ bind(this->socket, (struct sockaddr *)&my_addr, sizeof(struct sockaddr_ll)); - A complete tutorial is available at: https://sites.google.com/site/packetmmap/ + A complete tutorial is available at: + https://web.archive.org/web/20220404160947/https://sites.google.com/site/packetmmap/ By default, the user should put data at:: diff --git a/Documentation/networking/tcp_ao.rst b/Documentation/networking/tcp_ao.rst index e96e62d1dab3..d5b6d0df63c3 100644 --- a/Documentation/networking/tcp_ao.rst +++ b/Documentation/networking/tcp_ao.rst @@ -9,7 +9,7 @@ segments between trusted peers. It adds a new TCP header option with a Message Authentication Code (MAC). MACs are produced from the content of a TCP segment using a hashing function with a password known to both peers. The intent of TCP-AO is to deprecate TCP-MD5 providing better security, -key rotation and support for variety of hashing algorithms. +key rotation and support for a variety of hashing algorithms. 1. Introduction =============== @@ -164,9 +164,9 @@ A: It should not, no action needs to be performed [7.5.2.e]:: is not available, no action is required (RNextKeyID of a received segment needs to match the MKT’s SendID). -Q: How current_key is set and when does it change? It is a user-triggered -change, or is it by a request from the remote peer? Is it set by the user -explicitly, or by a matching rule? +Q: How is current_key set, and when does it change? Is it a user-triggered +change, or is it triggered by a request from the remote peer? Is it set by the +user explicitly, or by a matching rule? A: current_key is set by RNextKeyID [6.1]:: @@ -233,8 +233,8 @@ always have one current_key [3.3]:: Q: Can a non-TCP-AO connection become a TCP-AO-enabled one? -A: No: for already established non-TCP-AO connection it would be impossible -to switch using TCP-AO as the traffic key generation requires the initial +A: No: for an already established non-TCP-AO connection it would be impossible +to switch to using TCP-AO, as the traffic key generation requires the initial sequence numbers. Paraphrasing, starting using TCP-AO would require re-establishing the TCP connection. @@ -292,7 +292,7 @@ no transparency is really needed and modern BGP daemons already have Linux provides a set of ``setsockopt()s`` and ``getsockopt()s`` that let userspace manage TCP-AO on a per-socket basis. In order to add/delete MKTs -``TCP_AO_ADD_KEY`` and ``TCP_AO_DEL_KEY`` TCP socket options must be used +``TCP_AO_ADD_KEY`` and ``TCP_AO_DEL_KEY`` TCP socket options must be used. It is not allowed to add a key on an established non-TCP-AO connection as well as to remove the last key from TCP-AO connection. @@ -361,7 +361,7 @@ not implemented. 4. ``setsockopt()`` vs ``accept()`` race ======================================== -In contrast with TCP-MD5 established connection which has just one key, +In contrast with an established TCP-MD5 connection which has just one key, TCP-AO connections may have many keys, which means that accepted connections on a listen socket may have any amount of keys as well. As copying all those keys on a first properly signed SYN would make the request socket bigger, that @@ -374,7 +374,7 @@ keys from sockets that were already established, but not yet ``accept()``'ed, hanging in the accept queue. The reverse is valid as well: if userspace adds a new key for a peer on -a listener socket, the established sockets in accept queue won't +a listener socket, the established sockets in the accept queue won't have the new keys. At this moment, the resolution for the two races: @@ -382,7 +382,7 @@ At this moment, the resolution for the two races: and ``setsockopt(TCP_AO_DEL_KEY)`` vs ``accept()`` is delegated to userspace. This means that it's expected that userspace would check the MKTs on the socket that was returned by ``accept()`` to verify that any key rotation that -happened on listen socket is reflected on the newly established connection. +happened on the listen socket is reflected on the newly established connection. This is a similar "do-nothing" approach to TCP-MD5 from the kernel side and may be changed later by introducing new flags to ``tcp_ao_add`` diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst index c9edf9e7362d..1ae71e31591c 100644 --- a/Documentation/process/maintainer-netdev.rst +++ b/Documentation/process/maintainer-netdev.rst @@ -355,6 +355,8 @@ just do it. As a result, a sequence of smaller series gets merged quicker and with better review coverage. Re-posting large series also increases the mailing list traffic. +.. _rcs: + Local variable ordering ("reverse xmas tree", "RCS") ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -391,6 +393,21 @@ APIs and helpers, especially scoped iterators. However, direct use of ``__free()`` within networking core and drivers is discouraged. Similar guidance applies to declaring variables mid-function. +Clean-up patches +~~~~~~~~~~~~~~~~ + +Netdev discourages patches which perform simple clean-ups, which are not in +the context of other work. For example: + +* Addressing ``checkpatch.pl`` warnings +* Addressing :ref:`Local variable ordering` issues +* Conversions to device-managed APIs (``devm_`` helpers) + +This is because it is felt that the churn that such changes produce comes +at a greater cost than the value of such clean-ups. + +Conversely, spelling and grammar fixes are not discouraged. + Resending after review ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/process/maintainer-soc.rst b/Documentation/process/maintainer-soc.rst index 12637530d68f..fe9d8bcfbd2b 100644 --- a/Documentation/process/maintainer-soc.rst +++ b/Documentation/process/maintainer-soc.rst @@ -30,10 +30,13 @@ tree as a dedicated branch covering multiple subsystems. The main SoC tree is housed on git.kernel.org: https://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git/ +Maintainers +----------- + Clearly this is quite a wide range of topics, which no one person, or even small group of people are capable of maintaining. Instead, the SoC subsystem -is comprised of many submaintainers, each taking care of individual platforms -and driver subdirectories. +is comprised of many submaintainers (platform maintainers), each taking care of +individual platforms and driver subdirectories. In this regard, "platform" usually refers to a series of SoCs from a given vendor, for example, Nvidia's series of Tegra SoCs. Many submaintainers operate on a vendor level, responsible for multiple product lines. For several reasons, @@ -43,14 +46,43 @@ MAINTAINERS file. Most of these submaintainers have their own trees where they stage patches, sending pull requests to the main SoC tree. These trees are usually, but not -always, listed in MAINTAINERS. The main SoC maintainers can be reached via the -alias soc@kernel.org if there is no platform-specific maintainer, or if they -are unresponsive. +always, listed in MAINTAINERS. What the SoC tree is not, however, is a location for architecture-specific code changes. Each architecture has its own maintainers that are responsible for architectural details, CPU errata and the like. +Submitting Patches for Given SoC +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +All typical platform related patches should be sent via SoC submaintainers +(platform-specific maintainers). This includes also changes to per-platform or +shared defconfigs (scripts/get_maintainer.pl might not provide correct +addresses in such case). + +Submitting Patches to the Main SoC Maintainers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The main SoC maintainers can be reached via the alias soc@kernel.org only in +following cases: + +1. There are no platform-specific maintainers. + +2. Platform-specific maintainers are unresponsive. + +3. Introducing a completely new SoC platform. Such new SoC work should be sent + first to common mailing lists, pointed out by scripts/get_maintainer.pl, for + community review. After positive community review, work should be sent to + soc@kernel.org in one patchset containing new arch/foo/Kconfig entry, DTS + files, MAINTAINERS file entry and optionally initial drivers with their + Devicetree bindings. The MAINTAINERS file entry should list new + platform-specific maintainers, who are going to be responsible for handling + patches for the platform from now on. + +Note that the soc@kernel.org is usually not the place to discuss the patches, +thus work sent to this address should be already considered as acceptable by +the community. + Information for (new) Submaintainers ------------------------------------ diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst index 750ff371570a..54be7ddf3e57 100644 --- a/Documentation/rust/arch-support.rst +++ b/Documentation/rust/arch-support.rst @@ -17,7 +17,7 @@ Architecture Level of support Constraints ============= ================ ============================================== ``arm64`` Maintained Little Endian only. ``loongarch`` Maintained \- -``riscv`` Maintained ``riscv64`` only. +``riscv`` Maintained ``riscv64`` and LLVM/Clang only. ``um`` Maintained \- ``x86`` Maintained ``x86_64`` only. ============= ================ ============================================== diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst index 6c0d70e2e27d..7b59bbd2e564 100644 --- a/Documentation/scheduler/sched-ext.rst +++ b/Documentation/scheduler/sched-ext.rst @@ -66,7 +66,7 @@ BPF scheduler and reverts all tasks back to CFS. .. code-block:: none # make -j16 -C tools/sched_ext - # tools/sched_ext/scx_simple + # tools/sched_ext/build/bin/scx_simple local=0 global=3 local=5 global=24 local=9 global=44 diff --git a/Documentation/userspace-api/mseal.rst b/Documentation/userspace-api/mseal.rst index 4132eec995a3..41102f74c5e2 100644 --- a/Documentation/userspace-api/mseal.rst +++ b/Documentation/userspace-api/mseal.rst @@ -23,177 +23,166 @@ applications can additionally seal security critical data at runtime. A similar feature already exists in the XNU kernel with the VM_FLAGS_PERMANENT flag [1] and on OpenBSD with the mimmutable syscall [2]. -User API -======== -mseal() ------------ -The mseal() syscall has the following signature: +SYSCALL +======= +mseal syscall signature +----------------------- + ``int mseal(void \* addr, size_t len, unsigned long flags)`` -``int mseal(void addr, size_t len, unsigned long flags)`` + **addr**/**len**: virtual memory address range. + The address range set by **addr**/**len** must meet: + - The start address must be in an allocated VMA. + - The start address must be page aligned. + - The end address (**addr** + **len**) must be in an allocated VMA. + - no gap (unallocated memory) between start and end address. -**addr/len**: virtual memory address range. + The ``len`` will be paged aligned implicitly by the kernel. -The address range set by ``addr``/``len`` must meet: - - The start address must be in an allocated VMA. - - The start address must be page aligned. - - The end address (``addr`` + ``len``) must be in an allocated VMA. - - no gap (unallocated memory) between start and end address. + **flags**: reserved for future use. -The ``len`` will be paged aligned implicitly by the kernel. + **Return values**: + - **0**: Success. + - **-EINVAL**: + * Invalid input ``flags``. + * The start address (``addr``) is not page aligned. + * Address range (``addr`` + ``len``) overflow. + - **-ENOMEM**: + * The start address (``addr``) is not allocated. + * The end address (``addr`` + ``len``) is not allocated. + * A gap (unallocated memory) between start and end address. + - **-EPERM**: + * sealing is supported only on 64-bit CPUs, 32-bit is not supported. -**flags**: reserved for future use. + **Note about error return**: + - For above error cases, users can expect the given memory range is + unmodified, i.e. no partial update. + - There might be other internal errors/cases not listed here, e.g. + error during merging/splitting VMAs, or the process reaching the maximum + number of supported VMAs. In those cases, partial updates to the given + memory range could happen. However, those cases should be rare. -**return values**: + **Architecture support**: + mseal only works on 64-bit CPUs, not 32-bit CPUs. -- ``0``: Success. + **Idempotent**: + users can call mseal multiple times. mseal on an already sealed memory + is a no-action (not error). -- ``-EINVAL``: - - Invalid input ``flags``. - - The start address (``addr``) is not page aligned. - - Address range (``addr`` + ``len``) overflow. + **no munseal** + Once mapping is sealed, it can't be unsealed. The kernel should never + have munseal, this is consistent with other sealing feature, e.g. + F_SEAL_SEAL for file. -- ``-ENOMEM``: - - The start address (``addr``) is not allocated. - - The end address (``addr`` + ``len``) is not allocated. - - A gap (unallocated memory) between start and end address. +Blocked mm syscall for sealed mapping +------------------------------------- + It might be important to note: **once the mapping is sealed, it will + stay in the process's memory until the process terminates**. -- ``-EPERM``: - - sealing is supported only on 64-bit CPUs, 32-bit is not supported. + Example:: -- For above error cases, users can expect the given memory range is - unmodified, i.e. no partial update. + *ptr = mmap(0, 4096, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + rc = mseal(ptr, 4096, 0); + /* munmap will fail */ + rc = munmap(ptr, 4096); + assert(rc < 0); -- There might be other internal errors/cases not listed here, e.g. - error during merging/splitting VMAs, or the process reaching the max - number of supported VMAs. In those cases, partial updates to the given - memory range could happen. However, those cases should be rare. + Blocked mm syscall: + - munmap + - mmap + - mremap + - mprotect and pkey_mprotect + - some destructive madvise behaviors: MADV_DONTNEED, MADV_FREE, + MADV_DONTNEED_LOCKED, MADV_FREE, MADV_DONTFORK, MADV_WIPEONFORK -**Blocked operations after sealing**: - Unmapping, moving to another location, and shrinking the size, - via munmap() and mremap(), can leave an empty space, therefore - can be replaced with a VMA with a new set of attributes. + The first set of syscalls to block is munmap, mremap, mmap. They can + either leave an empty space in the address space, therefore allowing + replacement with a new mapping with new set of attributes, or can + overwrite the existing mapping with another mapping. - Moving or expanding a different VMA into the current location, - via mremap(). + mprotect and pkey_mprotect are blocked because they changes the + protection bits (RWX) of the mapping. - Modifying a VMA via mmap(MAP_FIXED). + Certain destructive madvise behaviors, specifically MADV_DONTNEED, + MADV_FREE, MADV_DONTNEED_LOCKED, and MADV_WIPEONFORK, can introduce + risks when applied to anonymous memory by threads lacking write + permissions. Consequently, these operations are prohibited under such + conditions. The aforementioned behaviors have the potential to modify + region contents by discarding pages, effectively performing a memset(0) + operation on the anonymous memory. - Size expansion, via mremap(), does not appear to pose any - specific risks to sealed VMAs. It is included anyway because - the use case is unclear. In any case, users can rely on - merging to expand a sealed VMA. + Kernel will return -EPERM for blocked syscalls. - mprotect() and pkey_mprotect(). + When blocked syscall return -EPERM due to sealing, the memory regions may + or may not be changed, depends on the syscall being blocked: - Some destructive madvice() behaviors (e.g. MADV_DONTNEED) - for anonymous memory, when users don't have write permission to the - memory. Those behaviors can alter region contents by discarding pages, - effectively a memset(0) for anonymous memory. + - munmap: munmap is atomic. If one of VMAs in the given range is + sealed, none of VMAs are updated. + - mprotect, pkey_mprotect, madvise: partial update might happen, e.g. + when mprotect over multiple VMAs, mprotect might update the beginning + VMAs before reaching the sealed VMA and return -EPERM. + - mmap and mremap: undefined behavior. - Kernel will return -EPERM for blocked operations. - - For blocked operations, one can expect the given address is unmodified, - i.e. no partial update. Note, this is different from existing mm - system call behaviors, where partial updates are made till an error is - found and returned to userspace. To give an example: - - Assume following code sequence: - - - ptr = mmap(null, 8192, PROT_NONE); - - munmap(ptr + 4096, 4096); - - ret1 = mprotect(ptr, 8192, PROT_READ); - - mseal(ptr, 4096); - - ret2 = mprotect(ptr, 8192, PROT_NONE); - - ret1 will be -ENOMEM, the page from ptr is updated to PROT_READ. - - ret2 will be -EPERM, the page remains to be PROT_READ. - -**Note**: - -- mseal() only works on 64-bit CPUs, not 32-bit CPU. - -- users can call mseal() multiple times, mseal() on an already sealed memory - is a no-action (not error). - -- munseal() is not supported. - -Use cases: -========== +Use cases +========= - glibc: The dynamic linker, during loading ELF executables, can apply sealing to - non-writable memory segments. + mapping segments. -- Chrome browser: protect some security sensitive data-structures. +- Chrome browser: protect some security sensitive data structures. -Notes on which memory to seal: -============================== - -It might be important to note that sealing changes the lifetime of a mapping, -i.e. the sealed mapping won’t be unmapped till the process terminates or the -exec system call is invoked. Applications can apply sealing to any virtual -memory region from userspace, but it is crucial to thoroughly analyze the -mapping's lifetime prior to apply the sealing. +When not to use mseal +===================== +Applications can apply sealing to any virtual memory region from userspace, +but it is *crucial to thoroughly analyze the mapping's lifetime* prior to +apply the sealing. This is because the sealed mapping *won’t be unmapped* +until the process terminates or the exec system call is invoked. For example: + - aio/shm + aio/shm can call mmap and munmap on behalf of userspace, e.g. + ksys_shmdt() in shm.c. The lifetimes of those mapping are not tied to + the lifetime of the process. If those memories are sealed from userspace, + then munmap will fail, causing leaks in VMA address space during the + lifetime of the process. -- aio/shm + - ptr allocated by malloc (heap) + Don't use mseal on the memory ptr return from malloc(). + malloc() is implemented by allocator, e.g. by glibc. Heap manager might + allocate a ptr from brk or mapping created by mmap. + If an app calls mseal on a ptr returned from malloc(), this can affect + the heap manager's ability to manage the mappings; the outcome is + non-deterministic. - aio/shm can call mmap()/munmap() on behalf of userspace, e.g. ksys_shmdt() in - shm.c. The lifetime of those mapping are not tied to the lifetime of the - process. If those memories are sealed from userspace, then munmap() will fail, - causing leaks in VMA address space during the lifetime of the process. + Example:: -- Brk (heap) + ptr = malloc(size); + /* don't call mseal on ptr return from malloc. */ + mseal(ptr, size); + /* free will success, allocator can't shrink heap lower than ptr */ + free(ptr); - Currently, userspace applications can seal parts of the heap by calling - malloc() and mseal(). - let's assume following calls from user space: +mseal doesn't block +=================== +In a nutshell, mseal blocks certain mm syscall from modifying some of VMA's +attributes, such as protection bits (RWX). Sealed mappings doesn't mean the +memory is immutable. - - ptr = malloc(size); - - mprotect(ptr, size, RO); - - mseal(ptr, size); - - free(ptr); - - Technically, before mseal() is added, the user can change the protection of - the heap by calling mprotect(RO). As long as the user changes the protection - back to RW before free(), the memory range can be reused. - - Adding mseal() into the picture, however, the heap is then sealed partially, - the user can still free it, but the memory remains to be RO. If the address - is re-used by the heap manager for another malloc, the process might crash - soon after. Therefore, it is important not to apply sealing to any memory - that might get recycled. - - Furthermore, even if the application never calls the free() for the ptr, - the heap manager may invoke the brk system call to shrink the size of the - heap. In the kernel, the brk-shrink will call munmap(). Consequently, - depending on the location of the ptr, the outcome of brk-shrink is - nondeterministic. - - -Additional notes: -================= As Jann Horn pointed out in [3], there are still a few ways to write -to RO memory, which is, in a way, by design. Those cases are not covered -by mseal(). If applications want to block such cases, sandbox tools (such as -seccomp, LSM, etc) might be considered. +to RO memory, which is, in a way, by design. And those could be blocked +by different security measures. Those cases are: -- Write to read-only memory through /proc/self/mem interface. -- Write to read-only memory through ptrace (such as PTRACE_POKETEXT). -- userfaultfd. + - Write to read-only memory through /proc/self/mem interface (FOLL_FORCE). + - Write to read-only memory through ptrace (such as PTRACE_POKETEXT). + - userfaultfd. The idea that inspired this patch comes from Stephen Röttger’s work in V8 CFI [4]. Chrome browser in ChromeOS will be the first user of this API. -Reference: -========== -[1] https://github.com/apple-oss-distributions/xnu/blob/1031c584a5e37aff177559b9f69dbd3c8c3fd30a/osfmk/mach/vm_statistics.h#L274 - -[2] https://man.openbsd.org/mimmutable.2 - -[3] https://lore.kernel.org/lkml/CAG48ez3ShUYey+ZAFsU2i1RpQn0a5eOs2hzQ426FkcgnfUGLvA@mail.gmail.com - -[4] https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/edit#heading=h.bvaojj9fu6hc +Reference +========= +- [1] https://github.com/apple-oss-distributions/xnu/blob/1031c584a5e37aff177559b9f69dbd3c8c3fd30a/osfmk/mach/vm_statistics.h#L274 +- [2] https://man.openbsd.org/mimmutable.2 +- [3] https://lore.kernel.org/lkml/CAG48ez3ShUYey+ZAFsU2i1RpQn0a5eOs2hzQ426FkcgnfUGLvA@mail.gmail.com +- [4] https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/edit#heading=h.bvaojj9fu6hc diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index e32471977d0a..edc070c6e19b 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -8098,13 +8098,15 @@ KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is disabled. -KVM_X86_QUIRK_SLOT_ZAP_ALL By default, KVM invalidates all SPTEs in - fast way for memslot deletion when VM type - is KVM_X86_DEFAULT_VM. - When this quirk is disabled or when VM type - is other than KVM_X86_DEFAULT_VM, KVM zaps - only leaf SPTEs that are within the range of - the memslot being deleted. +KVM_X86_QUIRK_SLOT_ZAP_ALL By default, for KVM_X86_DEFAULT_VM VMs, KVM + invalidates all SPTEs in all memslots and + address spaces when a memslot is deleted or + moved. When this quirk is disabled (or the + VM type isn't KVM_X86_DEFAULT_VM), KVM only + ensures the backing memory of the deleted + or moved memslot isn't reachable, i.e KVM + _may_ invalidate only SPTEs related to the + memslot. =================================== ============================================ 7.32 KVM_CAP_MAX_VCPU_ID diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst index 20a9a37d1cdd..1bedd56e2fe3 100644 --- a/Documentation/virt/kvm/locking.rst +++ b/Documentation/virt/kvm/locking.rst @@ -136,7 +136,7 @@ For direct sp, we can easily avoid it since the spte of direct sp is fixed to gfn. For indirect sp, we disabled fast page fault for simplicity. A solution for indirect sp could be to pin the gfn, for example via -kvm_vcpu_gfn_to_pfn_atomic, before the cmpxchg. After the pinning: +gfn_to_pfn_memslot_atomic, before the cmpxchg. After the pinning: - We have held the refcount of pfn; that means the pfn can not be freed and be reused for another gfn. diff --git a/MAINTAINERS b/MAINTAINERS index a097afd76ded..043a5c3dcc12 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -258,12 +258,6 @@ L: linux-acenic@sunsite.dk S: Maintained F: drivers/net/ethernet/alteon/acenic* -ACER ASPIRE 1 EMBEDDED CONTROLLER DRIVER -M: Nikita Travkin -S: Maintained -F: Documentation/devicetree/bindings/platform/acer,aspire1-ec.yaml -F: drivers/platform/arm64/acer-aspire1-ec.c - ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER M: Peter Kaestle L: platform-driver-x86@vger.kernel.org @@ -888,7 +882,6 @@ F: drivers/staging/media/sunxi/cedrus/ ALPHA PORT M: Richard Henderson -M: Ivan Kokshaysky M: Matt Turner L: linux-alpha@vger.kernel.org S: Odd Fixes @@ -1113,6 +1106,12 @@ L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-amd-mp2* +AMD ASF I2C DRIVER +M: Shyam Sundar S K +L: linux-i2c@vger.kernel.org +S: Supported +F: drivers/i2c/busses/i2c-amd-asf-plat.c + AMD PDS CORE DRIVER M: Shannon Nelson M: Brett Creeley @@ -1181,8 +1180,9 @@ F: Documentation/hid/amd-sfh* F: drivers/hid/amd-sfh-hid/ AMD SPI DRIVER -M: Sanjay R Mehta -S: Maintained +M: Raju Rangoju +L: linux-spi@vger.kernel.org +S: Supported F: drivers/spi/spi-amd.c AMD XGBE DRIVER @@ -1761,8 +1761,8 @@ F: include/uapi/linux/if_arcnet.h ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS) M: Arnd Bergmann M: Olof Johansson -M: soc@kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: soc@lists.linux.dev S: Maintained P: Documentation/process/maintainer-soc.rst C: irc://irc.libera.chat/armlinux @@ -2263,12 +2263,6 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ep93xx/ts72xx.c -ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE -M: Alexander Shiyan -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Odd Fixes -N: clps711x - ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE M: Hartley Sweeten M: Alexander Sverdlin @@ -2865,7 +2859,7 @@ F: Documentation/devicetree/bindings/arm/qcom.yaml F: Documentation/devicetree/bindings/bus/qcom* F: Documentation/devicetree/bindings/cache/qcom,llcc.yaml F: Documentation/devicetree/bindings/firmware/qcom,scm.yaml -F: Documentation/devicetree/bindings/reserved-memory/qcom +F: Documentation/devicetree/bindings/reserved-memory/qcom* F: Documentation/devicetree/bindings/soc/qcom/ F: arch/arm/boot/dts/qcom/ F: arch/arm/configs/qcom_defconfig @@ -3758,6 +3752,7 @@ F: drivers/spi/spi-axi-spi-engine.c AXI PWM GENERATOR M: Michael Hennerich M: Nuno Sá +R: Trevor Gamblin L: linux-pwm@vger.kernel.org S: Supported W: https://ez.analog.com/linux-software-drivers @@ -3815,14 +3810,6 @@ F: drivers/video/backlight/ F: include/linux/backlight.h F: include/linux/pwm_backlight.h -BAIKAL-T1 PVT HARDWARE MONITOR DRIVER -M: Serge Semin -L: linux-hwmon@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/hwmon/baikal,bt1-pvt.yaml -F: Documentation/hwmon/bt1-pvt.rst -F: drivers/hwmon/bt1-pvt.[ch] - BARCO P50 GPIO DRIVER M: Santosh Kumar Yadav M: Peter Korsgaard @@ -6476,7 +6463,6 @@ F: drivers/mtd/nand/raw/denali* DESIGNWARE EDMA CORE IP DRIVER M: Manivannan Sadhasivam -R: Serge Semin L: dmaengine@vger.kernel.org S: Maintained F: drivers/dma/dw-edma/ @@ -9745,6 +9731,7 @@ F: include/dt-bindings/gpio/ F: include/linux/gpio.h F: include/linux/gpio/ F: include/linux/of_gpio.h +K: (devm_)?gpio_(request|free|direction|get|set) GPIO UAPI M: Bartosz Golaszewski @@ -9759,14 +9746,6 @@ F: drivers/gpio/gpiolib-cdev.c F: include/uapi/linux/gpio.h F: tools/gpio/ -GRE DEMULTIPLEXER DRIVER -M: Dmitry Kozlov -L: netdev@vger.kernel.org -S: Maintained -F: include/net/gre.h -F: net/ipv4/gre_demux.c -F: net/ipv4/gre_offload.c - GRETH 10/100/1G Ethernet MAC device driver M: Andreas Larsson L: netdev@vger.kernel.org @@ -10270,7 +10249,7 @@ F: Documentation/devicetree/bindings/arm/hisilicon/low-pin-count.yaml F: drivers/bus/hisi_lpc.c HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3) -M: Yisen Zhuang +M: Jian Shen M: Salil Mehta M: Jijie Shao L: netdev@vger.kernel.org @@ -10279,7 +10258,7 @@ W: http://www.hisilicon.com F: drivers/net/ethernet/hisilicon/hns3/ HISILICON NETWORK SUBSYSTEM DRIVER -M: Yisen Zhuang +M: Jian Shen M: Salil Mehta L: netdev@vger.kernel.org S: Maintained @@ -10743,14 +10722,12 @@ F: Documentation/i2c/busses/i2c-viapro.rst F: drivers/i2c/busses/i2c-ali1535.c F: drivers/i2c/busses/i2c-ali1563.c F: drivers/i2c/busses/i2c-ali15x3.c -F: drivers/i2c/busses/i2c-amd756-s4882.c F: drivers/i2c/busses/i2c-amd756.c F: drivers/i2c/busses/i2c-amd8111.c F: drivers/i2c/busses/i2c-i801.c F: drivers/i2c/busses/i2c-isch.c -F: drivers/i2c/busses/i2c-nforce2-s4985.c F: drivers/i2c/busses/i2c-nforce2.c -F: drivers/i2c/busses/i2c-piix4.c +F: drivers/i2c/busses/i2c-piix4.* F: drivers/i2c/busses/i2c-sis5595.c F: drivers/i2c/busses/i2c-sis630.c F: drivers/i2c/busses/i2c-sis96x.c @@ -11283,10 +11260,10 @@ F: security/integrity/ F: security/integrity/ima/ INTEGRITY POLICY ENFORCEMENT (IPE) -M: Fan Wu +M: Fan Wu L: linux-security-module@vger.kernel.org S: Supported -T: git https://github.com/microsoft/ipe.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/wufan/ipe.git F: Documentation/admin-guide/LSM/ipe.rst F: Documentation/security/ipe.rst F: scripts/ipe/ @@ -11604,6 +11581,16 @@ F: drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c F: drivers/crypto/intel/keembay/ocs-hcu.c F: drivers/crypto/intel/keembay/ocs-hcu.h +INTEL LA JOLLA COVE ADAPTER (LJCA) USB I/O EXPANDER DRIVERS +M: Wentong Wu +M: Sakari Ailus +S: Maintained +F: drivers/gpio/gpio-ljca.c +F: drivers/i2c/busses/i2c-ljca.c +F: drivers/spi/spi-ljca.c +F: drivers/usb/misc/usb-ljca.c +F: include/linux/usb/ljca.h + INTEL MANAGEMENT ENGINE (mei) M: Tomas Winkler L: linux-kernel@vger.kernel.org @@ -12242,6 +12229,7 @@ R: Dmitry Vyukov R: Vincenzo Frascino L: kasan-dev@googlegroups.com S: Maintained +B: https://bugzilla.kernel.org/buglist.cgi?component=Sanitizers&product=Memory%20Management F: Documentation/dev-tools/kasan.rst F: arch/*/include/asm/*kasan.h F: arch/*/mm/kasan_init* @@ -12265,6 +12253,7 @@ R: Dmitry Vyukov R: Andrey Konovalov L: kasan-dev@googlegroups.com S: Maintained +B: https://bugzilla.kernel.org/buglist.cgi?component=Sanitizers&product=Memory%20Management F: Documentation/dev-tools/kcov.rst F: include/linux/kcov.h F: include/uapi/linux/kcov.h @@ -12944,49 +12933,29 @@ LIBATA PATA ARASAN COMPACT FLASH CONTROLLER M: Viresh Kumar L: linux-ide@vger.kernel.org S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git F: drivers/ata/pata_arasan_cf.c F: include/linux/pata_arasan_cf_data.h -LIBATA PATA DRIVERS -R: Sergey Shtylyov -L: linux-ide@vger.kernel.org -F: drivers/ata/ata_*.c -F: drivers/ata/pata_*.c - LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS M: Linus Walleij L: linux-ide@vger.kernel.org S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git F: drivers/ata/pata_ftide010.c F: drivers/ata/sata_gemini.c F: drivers/ata/sata_gemini.h LIBATA SATA AHCI PLATFORM devices support M: Hans de Goede -M: Jens Axboe L: linux-ide@vger.kernel.org S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git F: drivers/ata/ahci_platform.c F: drivers/ata/libahci_platform.c F: include/linux/ahci_platform.h -LIBATA SATA AHCI SYNOPSYS DWC CONTROLLER DRIVER -M: Serge Semin -L: linux-ide@vger.kernel.org -S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/libata.git -F: Documentation/devicetree/bindings/ata/baikal,bt1-ahci.yaml -F: Documentation/devicetree/bindings/ata/snps,dwc-ahci.yaml -F: drivers/ata/ahci_dwc.c - LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER M: Mikael Pettersson L: linux-ide@vger.kernel.org S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git F: drivers/ata/sata_promise.* LIBATA SUBSYSTEM (Serial and Parallel ATA drivers) @@ -14179,8 +14148,7 @@ T: git git://linuxtv.org/media_tree.git F: drivers/media/platform/nxp/imx-pxp.[ch] MEDIA DRIVERS FOR ASCOT2E -M: Sergey Kozlov -M: Abylay Ospan +M: Abylay Ospan L: linux-media@vger.kernel.org S: Supported W: https://linuxtv.org @@ -14197,8 +14165,7 @@ T: git git://linuxtv.org/media_tree.git F: drivers/media/dvb-frontends/cxd2099* MEDIA DRIVERS FOR CXD2841ER -M: Sergey Kozlov -M: Abylay Ospan +M: Abylay Ospan L: linux-media@vger.kernel.org S: Supported W: https://linuxtv.org @@ -14251,7 +14218,7 @@ F: drivers/media/platform/nxp/imx7-media-csi.c F: drivers/media/platform/nxp/imx8mq-mipi-csi2.c MEDIA DRIVERS FOR HELENE -M: Abylay Ospan +M: Abylay Ospan L: linux-media@vger.kernel.org S: Supported W: https://linuxtv.org @@ -14260,8 +14227,7 @@ T: git git://linuxtv.org/media_tree.git F: drivers/media/dvb-frontends/helene* MEDIA DRIVERS FOR HORUS3A -M: Sergey Kozlov -M: Abylay Ospan +M: Abylay Ospan L: linux-media@vger.kernel.org S: Supported W: https://linuxtv.org @@ -14270,8 +14236,7 @@ T: git git://linuxtv.org/media_tree.git F: drivers/media/dvb-frontends/horus3a* MEDIA DRIVERS FOR LNBH25 -M: Sergey Kozlov -M: Abylay Ospan +M: Abylay Ospan L: linux-media@vger.kernel.org S: Supported W: https://linuxtv.org @@ -14287,8 +14252,7 @@ T: git git://linuxtv.org/media_tree.git F: drivers/media/dvb-frontends/mxl5xx* MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices -M: Sergey Kozlov -M: Abylay Ospan +M: Abylay Ospan L: linux-media@vger.kernel.org S: Supported W: https://linuxtv.org @@ -14913,9 +14877,10 @@ N: include/linux/page[-_]* MEMORY MAPPING M: Andrew Morton -R: Liam R. Howlett +M: Liam R. Howlett +M: Lorenzo Stoakes R: Vlastimil Babka -R: Lorenzo Stoakes +R: Jann Horn L: linux-mm@kvack.org S: Maintained W: http://www.linux-mm.org @@ -14938,13 +14903,6 @@ F: drivers/mtd/ F: include/linux/mtd/ F: include/uapi/mtd/ -MEMSENSING MICROSYSTEMS MSA311 DRIVER -M: Dmitry Rokosov -L: linux-iio@vger.kernel.org -S: Maintained -F: Documentation/devicetree/bindings/iio/accel/memsensing,msa311.yaml -F: drivers/iio/accel/msa311.c - MEN A21 WATCHDOG DRIVER M: Johannes Thumshirn L: linux-watchdog@vger.kernel.org @@ -15089,6 +15047,7 @@ F: drivers/spi/spi-at91-usart.c MICROCHIP AUDIO ASOC DRIVERS M: Claudiu Beznea +M: Andrei Simion L: linux-sound@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/sound/atmel* @@ -15197,6 +15156,7 @@ F: include/video/atmel_lcdc.h MICROCHIP MCP16502 PMIC DRIVER M: Claudiu Beznea +M: Andrei Simion L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml @@ -15278,7 +15238,6 @@ F: drivers/tty/serial/8250/8250_pci1xxxx.c MICROCHIP POLARFIRE FPGA DRIVERS M: Conor Dooley -R: Vladimir Georgiev L: linux-fpga@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml @@ -15328,6 +15287,7 @@ F: drivers/spi/spi-atmel.* MICROCHIP SSC DRIVER M: Claudiu Beznea +M: Andrei Simion L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/misc/atmel-ssc.txt @@ -15533,17 +15493,6 @@ F: arch/mips/ F: drivers/platform/mips/ F: include/dt-bindings/mips/ -MIPS BAIKAL-T1 PLATFORM -M: Serge Semin -L: linux-mips@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/bus/baikal,bt1-*.yaml -F: Documentation/devicetree/bindings/clock/baikal,bt1-*.yaml -F: drivers/bus/bt1-*.c -F: drivers/clk/baikal-t1/ -F: drivers/memory/bt1-l2-ctl.c -F: drivers/mtd/maps/physmap-bt1-rom.[ch] - MIPS BOSTON DEVELOPMENT BOARD M: Paul Burton L: linux-mips@vger.kernel.org @@ -15556,7 +15505,6 @@ F: include/dt-bindings/clock/boston-clock.h MIPS CORE DRIVERS M: Thomas Bogendoerfer -M: Serge Semin L: linux-mips@vger.kernel.org S: Supported F: drivers/bus/mips_cdmm.c @@ -16092,6 +16040,7 @@ F: include/uapi/linux/net_dropmon.h F: net/core/drop_monitor.c NETWORKING DRIVERS +M: Andrew Lunn M: "David S. Miller" M: Eric Dumazet M: Jakub Kicinski @@ -16139,7 +16088,6 @@ F: drivers/net/wireless/ NETWORKING [DSA] M: Andrew Lunn -M: Florian Fainelli M: Vladimir Oltean S: Maintained F: Documentation/devicetree/bindings/net/dsa/ @@ -16157,6 +16105,7 @@ M: "David S. Miller" M: Eric Dumazet M: Jakub Kicinski M: Paolo Abeni +R: Simon Horman L: netdev@vger.kernel.org S: Maintained P: Documentation/process/maintainer-netdev.rst @@ -16199,10 +16148,22 @@ F: include/uapi/linux/rtnetlink.h F: lib/net_utils.c F: lib/random32.c F: net/ +F: samples/pktgen/ F: tools/net/ F: tools/testing/selftests/net/ +X: Documentation/networking/mac80211-injection.rst +X: Documentation/networking/mac80211_hwsim/ +X: Documentation/networking/regulatory.rst +X: include/net/cfg80211.h +X: include/net/ieee80211_radiotap.h +X: include/net/iw_handler.h +X: include/net/mac80211.h +X: include/net/wext.h X: net/9p/ X: net/bluetooth/ +X: net/mac80211/ +X: net/rfkill/ +X: net/wireless/ NETWORKING [IPSEC] M: Steffen Klassert @@ -16512,12 +16473,6 @@ F: include/linux/ntb.h F: include/linux/ntb_transport.h F: tools/testing/selftests/ntb/ -NTB IDT DRIVER -M: Serge Semin -L: ntb@lists.linux.dev -S: Supported -F: drivers/ntb/hw/idt/ - NTB INTEL DRIVER M: Dave Jiang L: ntb@lists.linux.dev @@ -18538,13 +18493,6 @@ F: drivers/pps/ F: include/linux/pps*.h F: include/uapi/linux/pps.h -PPTP DRIVER -M: Dmitry Kozlov -L: netdev@vger.kernel.org -S: Maintained -W: http://sourceforge.net/projects/accel-pptp -F: drivers/net/ppp/pptp.c - PRESSURE STALL INFORMATION (PSI) M: Johannes Weiner M: Suren Baghdasaryan @@ -19518,6 +19466,14 @@ S: Maintained F: Documentation/tools/rtla/ F: tools/tracing/rtla/ +Real-time Linux (PREEMPT_RT) +M: Sebastian Andrzej Siewior +M: Clark Williams +M: Steven Rostedt +L: linux-rt-devel@lists.linux.dev +S: Supported +K: PREEMPT_RT + REALTEK AUDIO CODECS M: Oder Chiou S: Maintained @@ -19627,15 +19583,6 @@ S: Supported F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml F: drivers/i2c/busses/i2c-emev2.c -RENESAS ETHERNET AVB DRIVER -R: Sergey Shtylyov -L: netdev@vger.kernel.org -L: linux-renesas-soc@vger.kernel.org -F: Documentation/devicetree/bindings/net/renesas,etheravb.yaml -F: drivers/net/ethernet/renesas/Kconfig -F: drivers/net/ethernet/renesas/Makefile -F: drivers/net/ethernet/renesas/ravb* - RENESAS ETHERNET SWITCH DRIVER R: Yoshihiro Shimoda L: netdev@vger.kernel.org @@ -19685,14 +19632,6 @@ F: Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml F: drivers/i2c/busses/i2c-rcar.c F: drivers/i2c/busses/i2c-sh_mobile.c -RENESAS R-CAR SATA DRIVER -R: Sergey Shtylyov -L: linux-ide@vger.kernel.org -L: linux-renesas-soc@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml -F: drivers/ata/sata_rcar.c - RENESAS R-CAR THERMAL DRIVERS M: Niklas Söderlund L: linux-renesas-soc@vger.kernel.org @@ -19768,16 +19707,6 @@ S: Supported F: Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml F: drivers/i2c/busses/i2c-rzv2m.c -RENESAS SUPERH ETHERNET DRIVER -R: Sergey Shtylyov -L: netdev@vger.kernel.org -L: linux-renesas-soc@vger.kernel.org -F: Documentation/devicetree/bindings/net/renesas,ether.yaml -F: drivers/net/ethernet/renesas/Kconfig -F: drivers/net/ethernet/renesas/Makefile -F: drivers/net/ethernet/renesas/sh_eth* -F: include/linux/sh_eth.h - RENESAS USB PHY DRIVER M: Yoshihiro Shimoda L: linux-renesas-soc@vger.kernel.org @@ -19922,12 +19851,10 @@ L: linux-riscv@lists.infradead.org S: Maintained Q: https://patchwork.kernel.org/project/linux-riscv/list/ T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/ -F: Documentation/devicetree/bindings/riscv/ -F: arch/riscv/boot/dts/ -X: arch/riscv/boot/dts/allwinner/ -X: arch/riscv/boot/dts/renesas/ -X: arch/riscv/boot/dts/sophgo/ -X: arch/riscv/boot/dts/thead/ +F: arch/riscv/boot/dts/canaan/ +F: arch/riscv/boot/dts/microchip/ +F: arch/riscv/boot/dts/sifive/ +F: arch/riscv/boot/dts/starfive/ RISC-V PMU DRIVERS M: Atish Patra @@ -20188,6 +20115,13 @@ S: Maintained T: git https://github.com/pkshih/rtw.git F: drivers/net/wireless/realtek/rtl8xxxu/ +RTL9300 I2C DRIVER (rtl9300-i2c) +M: Chris Packham +L: linux-i2c@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/i2c/realtek,rtl9301-i2c.yaml +F: drivers/i2c/busses/i2c-rtl9300.c + RTRS TRANSPORT DRIVERS M: Md. Haris Iqbal M: Jack Wang @@ -21694,6 +21628,15 @@ S: Supported W: https://github.com/thesofproject/linux/ F: sound/soc/sof/ +SOUND - GENERIC SOUND CARD (Simple-Audio-Card, Audio-Graph-Card) +M: Kuninori Morimoto +S: Supported +L: linux-sound@vger.kernel.org +F: sound/soc/generic/ +F: include/sound/simple_card* +F: Documentation/devicetree/bindings/sound/simple-card.yaml +F: Documentation/devicetree/bindings/sound/audio-graph*.yaml + SOUNDWIRE SUBSYSTEM M: Vinod Koul M: Bard Liao @@ -21772,8 +21715,8 @@ F: drivers/accessibility/speakup/ SPEAR PLATFORM/CLOCK/PINCTRL SUPPORT M: Viresh Kumar M: Shiraz Hashim -M: soc@kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: soc@lists.linux.dev S: Maintained W: http://www.st.com/spear F: arch/arm/boot/dts/st/spear* @@ -22431,19 +22374,11 @@ F: drivers/tty/serial/8250/8250_lpss.c SYNOPSYS DESIGNWARE APB GPIO DRIVER M: Hoan Tran -M: Serge Semin L: linux-gpio@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml F: drivers/gpio/gpio-dwapb.c -SYNOPSYS DESIGNWARE APB SSI DRIVER -M: Serge Semin -L: linux-spi@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml -F: drivers/spi/spi-dw* - SYNOPSYS DESIGNWARE AXI DMAC DRIVER M: Eugeniy Paltsev S: Maintained @@ -23287,7 +23222,7 @@ F: Documentation/devicetree/bindings/iio/adc/ti,lmp92064.yaml F: drivers/iio/adc/ti-lmp92064.c TI PCM3060 ASoC CODEC DRIVER -M: Kirill Marinushkin +M: Kirill Marinushkin L: linux-sound@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/sound/pcm3060.txt @@ -23753,12 +23688,6 @@ L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/hid-udraw-ps3.c -UFS FILESYSTEM -M: Evgeniy Dushistov -S: Maintained -F: Documentation/admin-guide/ufs.rst -F: fs/ufs/ - UHID USERSPACE HID IO DRIVER M: David Rheinsberg L: linux-input@vger.kernel.org @@ -24061,6 +23990,7 @@ USB RAW GADGET DRIVER R: Andrey Konovalov L: linux-usb@vger.kernel.org S: Maintained +B: https://github.com/xairy/raw-gadget/issues F: Documentation/usb/raw-gadget.rst F: drivers/usb/gadget/legacy/raw_gadget.c F: include/uapi/linux/usb/raw_gadget.h @@ -24177,8 +24107,12 @@ F: drivers/usb/host/xhci* USER DATAGRAM PROTOCOL (UDP) M: Willem de Bruijn +L: netdev@vger.kernel.org S: Maintained F: include/linux/udp.h +F: include/net/udp.h +F: include/trace/events/udp.h +F: include/uapi/linux/udp.h F: net/ipv4/udp.c F: net/ipv6/udp.c @@ -24728,9 +24662,10 @@ F: tools/testing/vsock/ VMA M: Andrew Morton -R: Liam R. Howlett +M: Liam R. Howlett +M: Lorenzo Stoakes R: Vlastimil Babka -R: Lorenzo Stoakes +R: Jann Horn L: linux-mm@kvack.org S: Maintained W: https://www.linux-mm.org @@ -25404,7 +25339,7 @@ F: include/xen/arm/swiotlb-xen.h F: include/xen/swiotlb-xen.h XFS FILESYSTEM -M: Chandan Babu R +M: Carlos Maiolino R: Darrick J. Wong L: linux-xfs@vger.kernel.org S: Supported diff --git a/Makefile b/Makefile index c5493c0c0ca1..79192a3024bf 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 12 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc7 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/Kconfig b/arch/Kconfig index 8af374ea1adc..bd9f095d69fa 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -838,7 +838,7 @@ config CFI_CLANG config CFI_ICALL_NORMALIZE_INTEGERS bool "Normalize CFI tags for integers" depends on CFI_CLANG - depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS + depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS_CLANG help This option normalizes the CFI tags for integer types so that all integer types of the same size and signedness receive the same CFI @@ -851,21 +851,19 @@ config CFI_ICALL_NORMALIZE_INTEGERS This option is necessary for using CFI with Rust. If unsure, say N. -config HAVE_CFI_ICALL_NORMALIZE_INTEGERS - def_bool !GCOV_KERNEL && !KASAN - depends on CFI_CLANG +config HAVE_CFI_ICALL_NORMALIZE_INTEGERS_CLANG + def_bool y depends on $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers) - help - Is CFI_ICALL_NORMALIZE_INTEGERS supported with the set of compilers - currently in use? + # With GCOV/KASAN we need this fix: https://github.com/llvm/llvm-project/pull/104826 + depends on CLANG_VERSION >= 190103 || (!GCOV_KERNEL && !KASAN_GENERIC && !KASAN_SW_TAGS) - This option defaults to false if GCOV or KASAN is enabled, as there is - an LLVM bug that makes normalized integers tags incompatible with - KASAN and GCOV. Kconfig currently does not have the infrastructure to - detect whether your rustc compiler contains the fix for this bug, so - it is assumed that it doesn't. If your compiler has the fix, you can - explicitly enable this option in your config file. The Kconfig logic - needed to detect this will be added in a future kernel release. +config HAVE_CFI_ICALL_NORMALIZE_INTEGERS_RUSTC + def_bool y + depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS_CLANG + depends on RUSTC_VERSION >= 107900 + # With GCOV/KASAN we need this fix: https://github.com/rust-lang/rust/pull/129373 + depends on (RUSTC_LLVM_VERSION >= 190103 && RUSTC_VERSION >= 108200) || \ + (!GCOV_KERNEL && !KASAN_GENERIC && !KASAN_SW_TAGS) config CFI_PERMISSIVE bool "Use CFI in permissive mode" diff --git a/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts b/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts index 72d26d130efa..85f54fa595aa 100644 --- a/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts +++ b/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts @@ -77,7 +77,7 @@ &gpio { }; &hdmi { - hpd-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>; + hpd-gpios = <&expgpio 0 GPIO_ACTIVE_LOW>; power-domains = <&power RPI_POWER_DOMAIN_HDMI>; status = "okay"; }; diff --git a/arch/arm/boot/dts/rockchip/rk3036-kylin.dts b/arch/arm/boot/dts/rockchip/rk3036-kylin.dts index e32c73d32f0a..2f84e2805712 100644 --- a/arch/arm/boot/dts/rockchip/rk3036-kylin.dts +++ b/arch/arm/boot/dts/rockchip/rk3036-kylin.dts @@ -325,8 +325,8 @@ regulator-state-mem { &i2c2 { status = "okay"; - rt5616: rt5616@1b { - compatible = "rt5616"; + rt5616: audio-codec@1b { + compatible = "realtek,rt5616"; reg = <0x1b>; clocks = <&cru SCLK_I2S_OUT>; clock-names = "mclk"; diff --git a/arch/arm/boot/dts/rockchip/rk3036.dtsi b/arch/arm/boot/dts/rockchip/rk3036.dtsi index 96279d1e02fe..63b9912be06a 100644 --- a/arch/arm/boot/dts/rockchip/rk3036.dtsi +++ b/arch/arm/boot/dts/rockchip/rk3036.dtsi @@ -384,12 +384,13 @@ reboot-mode { }; }; - acodec: acodec-ana@20030000 { - compatible = "rk3036-codec"; + acodec: audio-codec@20030000 { + compatible = "rockchip,rk3036-codec"; reg = <0x20030000 0x4000>; - rockchip,grf = <&grf>; clock-names = "acodec_pclk"; clocks = <&cru PCLK_ACODEC>; + rockchip,grf = <&grf>; + #sound-dai-cells = <0>; status = "disabled"; }; @@ -399,7 +400,6 @@ hdmi: hdmi@20034000 { interrupts = ; clocks = <&cru PCLK_HDMI>; clock-names = "pclk"; - rockchip,grf = <&grf>; pinctrl-names = "default"; pinctrl-0 = <&hdmi_ctl>; #sound-dai-cells = <0>; @@ -553,11 +553,11 @@ i2c0: i2c@20072000 { }; spi: spi@20074000 { - compatible = "rockchip,rockchip-spi"; + compatible = "rockchip,rk3036-spi"; reg = <0x20074000 0x1000>; interrupts = ; - clocks = <&cru PCLK_SPI>, <&cru SCLK_SPI>; - clock-names = "apb-pclk","spi_pclk"; + clocks = <&cru SCLK_SPI>, <&cru PCLK_SPI>; + clock-names = "spiclk", "apb_pclk"; dmas = <&pdma 8>, <&pdma 9>; dma-names = "tx", "rx"; pinctrl-names = "default"; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fd9df6dcc593..70d7f4f20225 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2214,6 +2214,7 @@ config ARM64_SME bool "ARM Scalable Matrix Extension support" default y depends on ARM64_SVE + depends on BROKEN help The Scalable Matrix Extension (SME) is an extension to the AArch64 execution state which utilises a substantial subset of the SVE diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi index d00036204a8c..dad0dc8fb431 100644 --- a/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8-ss-lvds0.dtsi @@ -14,7 +14,7 @@ qm_lvds0_lis_lpcg: qxp_mipi1_lis_lpcg: clock-controller@56243000 { compatible = "fsl,imx8qxp-lpcg"; reg = <0x56243000 0x4>; #clock-cells = <1>; - clock-output-names = "mipi1_lis_lpcg_ipg_clk"; + clock-output-names = "lvds0_lis_lpcg_ipg_clk"; power-domains = <&pd IMX_SC_R_MIPI_1>; }; @@ -22,9 +22,9 @@ qm_lvds0_pwm_lpcg: qxp_mipi1_pwm_lpcg: clock-controller@5624300c { compatible = "fsl,imx8qxp-lpcg"; reg = <0x5624300c 0x4>; #clock-cells = <1>; - clock-output-names = "mipi1_pwm_lpcg_clk", - "mipi1_pwm_lpcg_ipg_clk", - "mipi1_pwm_lpcg_32k_clk"; + clock-output-names = "lvds0_pwm_lpcg_clk", + "lvds0_pwm_lpcg_ipg_clk", + "lvds0_pwm_lpcg_32k_clk"; power-domains = <&pd IMX_SC_R_MIPI_1_PWM_0>; }; @@ -32,8 +32,8 @@ qm_lvds0_i2c0_lpcg: qxp_mipi1_i2c0_lpcg: clock-controller@56243010 { compatible = "fsl,imx8qxp-lpcg"; reg = <0x56243010 0x4>; #clock-cells = <1>; - clock-output-names = "mipi1_i2c0_lpcg_clk", - "mipi1_i2c0_lpcg_ipg_clk"; + clock-output-names = "lvds0_i2c0_lpcg_clk", + "lvds0_i2c0_lpcg_ipg_clk"; power-domains = <&pd IMX_SC_R_MIPI_1_I2C_0>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-vpu.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-vpu.dtsi index c6540768bdb9..87211c18d65a 100644 --- a/arch/arm64/boot/dts/freescale/imx8-ss-vpu.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8-ss-vpu.dtsi @@ -15,7 +15,7 @@ vpu: vpu@2c000000 { mu_m0: mailbox@2d000000 { compatible = "fsl,imx6sx-mu"; reg = <0x2d000000 0x20000>; - interrupts = ; + interrupts = ; #mbox-cells = <2>; power-domains = <&pd IMX_SC_R_VPU_MU_0>; status = "disabled"; @@ -24,7 +24,7 @@ mu_m0: mailbox@2d000000 { mu1_m0: mailbox@2d020000 { compatible = "fsl,imx6sx-mu"; reg = <0x2d020000 0x20000>; - interrupts = ; + interrupts = ; #mbox-cells = <2>; power-domains = <&pd IMX_SC_R_VPU_MU_1>; status = "disabled"; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts index 50debe821c42..9c102acb8052 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts @@ -218,6 +218,18 @@ ldb_lvds_ch1: endpoint { }; }; +&media_blk_ctrl { + /* + * The LVDS panel on this device uses 72.4 MHz pixel clock, + * set IMX8MP_VIDEO_PLL1 to 72.4 * 7 = 506.8 MHz so the LDB + * serializer and LCDIFv3 scanout engine can reach accurate + * pixel clock of exactly 72.4 MHz. + */ + assigned-clock-rates = <500000000>, <200000000>, + <0>, <0>, <500000000>, + <506800000>; +}; + &snvs_pwrkey { status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts index 3c2efdc59bfa..30962922b361 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts @@ -71,6 +71,7 @@ &media_blk_ctrl { assigned-clock-rates = <500000000>, <200000000>, <0>, /* IMX8MP_CLK_MEDIA_DISP2_PIX = pixelclk of lvds panel */ <68900000>, + <500000000>, /* IMX8MP_VIDEO_PLL1 = IMX8MP_CLK_MEDIA_LDB * 2 */ <964600000>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index f3531cfb0d79..40e847bc0b7f 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -1261,7 +1261,7 @@ usdhc1: mmc@30b40000 { compatible = "fsl,imx8mp-usdhc", "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc"; reg = <0x30b40000 0x10000>; interrupts = ; - clocks = <&clk IMX8MP_CLK_DUMMY>, + clocks = <&clk IMX8MP_CLK_IPG_ROOT>, <&clk IMX8MP_CLK_NAND_USDHC_BUS>, <&clk IMX8MP_CLK_USDHC1_ROOT>; clock-names = "ipg", "ahb", "per"; @@ -1275,7 +1275,7 @@ usdhc2: mmc@30b50000 { compatible = "fsl,imx8mp-usdhc", "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc"; reg = <0x30b50000 0x10000>; interrupts = ; - clocks = <&clk IMX8MP_CLK_DUMMY>, + clocks = <&clk IMX8MP_CLK_IPG_ROOT>, <&clk IMX8MP_CLK_NAND_USDHC_BUS>, <&clk IMX8MP_CLK_USDHC2_ROOT>; clock-names = "ipg", "ahb", "per"; @@ -1289,7 +1289,7 @@ usdhc3: mmc@30b60000 { compatible = "fsl,imx8mp-usdhc", "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc"; reg = <0x30b60000 0x10000>; interrupts = ; - clocks = <&clk IMX8MP_CLK_DUMMY>, + clocks = <&clk IMX8MP_CLK_IPG_ROOT>, <&clk IMX8MP_CLK_NAND_USDHC_BUS>, <&clk IMX8MP_CLK_USDHC3_ROOT>; clock-names = "ipg", "ahb", "per"; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ss-vpu.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-ss-vpu.dtsi index 7894a3ab26d6..f81937b5fb72 100644 --- a/arch/arm64/boot/dts/freescale/imx8qxp-ss-vpu.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8qxp-ss-vpu.dtsi @@ -5,6 +5,14 @@ * Author: Alexander Stein */ +&mu_m0 { + interrupts = ; +}; + +&mu1_m0 { + interrupts = ; +}; + &vpu_core0 { reg = <0x2d040000 0x10000>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi index e32d5afcf4a9..43f543768444 100644 --- a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi @@ -384,7 +384,7 @@ pcc4: clock-controller@29800000 { }; flexspi2: spi@29810000 { - compatible = "nxp,imx8mm-fspi"; + compatible = "nxp,imx8ulp-fspi"; reg = <0x29810000 0x10000>, <0x60000000 0x10000000>; reg-names = "fspi_base", "fspi_mmap"; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi b/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi index 4676e3488f54..cb8d54895a77 100644 --- a/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi +++ b/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi @@ -136,7 +136,7 @@ cp0_i2c0_pins: cp0-i2c0-pins { }; cp0_mdio_pins: cp0-mdio-pins { - marvell,pins = "mpp40", "mpp41"; + marvell,pins = "mpp0", "mpp1"; marvell,function = "ge"; }; diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi index 28634789a8a9..7af210789879 100644 --- a/arch/arm64/boot/dts/qcom/msm8939.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi @@ -248,7 +248,7 @@ rpm: remoteproc { smd-edge { interrupts = ; - mboxes = <&apcs1_mbox 0>; + qcom,ipc = <&apcs1_mbox 8 0>; qcom,smd-edge = <15>; rpm_requests: rpm-requests { diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi index 9bafb3b350ff..38cb524cc568 100644 --- a/arch/arm64/boot/dts/qcom/sm8450.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi @@ -1973,7 +1973,7 @@ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, clocks = <&gcc GCC_PCIE_1_PIPE_CLK>, <&gcc GCC_PCIE_1_PIPE_CLK_SRC>, - <&pcie1_phy>, + <&pcie1_phy QMP_PCIE_PIPE_CLK>, <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_PCIE_1_AUX_CLK>, <&gcc GCC_PCIE_1_CFG_AHB_CLK>, diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts index 941dfddd6713..fdde988ae01e 100644 --- a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts +++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts @@ -139,6 +139,8 @@ vreg_nvme: regulator-nvme { pinctrl-0 = <&nvme_reg_en>; pinctrl-names = "default"; + + regulator-boot-on; }; vph_pwr: regulator-vph-pwr { diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts index 20616bd4aa6c..fb4a48a1e2a8 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts @@ -134,6 +134,8 @@ vreg_nvme: regulator-nvme { pinctrl-0 = <&nvme_reg_en>; pinctrl-names = "default"; + + regulator-boot-on; }; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts index 10b28d870f08..c6e0356ed9a2 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts @@ -177,9 +177,9 @@ sound { compatible = "qcom,x1e80100-sndcard"; model = "X1E80100-CRD"; audio-routing = "WooferLeft IN", "WSA WSA_SPK1 OUT", - "TwitterLeft IN", "WSA WSA_SPK2 OUT", + "TweeterLeft IN", "WSA WSA_SPK2 OUT", "WooferRight IN", "WSA2 WSA_SPK2 OUT", - "TwitterRight IN", "WSA2 WSA_SPK2 OUT", + "TweeterRight IN", "WSA2 WSA_SPK2 OUT", "IN1_HPHL", "HPHL_OUT", "IN2_HPHR", "HPHR_OUT", "AMIC2", "MIC BIAS2", @@ -300,6 +300,8 @@ vreg_nvme: regulator-nvme { pinctrl-names = "default"; pinctrl-0 = <&nvme_reg_en>; + + regulator-boot-on; }; vreg_wwan: regulator-wwan { @@ -933,7 +935,7 @@ left_tweeter: speaker@0,1 { reg = <0 1>; reset-gpios = <&lpass_tlmm 12 GPIO_ACTIVE_LOW>; #sound-dai-cells = <0>; - sound-name-prefix = "TwitterLeft"; + sound-name-prefix = "TweeterLeft"; vdd-1p8-supply = <&vreg_l15b_1p8>; vdd-io-supply = <&vreg_l12b_1p2>; qcom,port-mapping = <4 5 6 7 11 13>; @@ -986,7 +988,7 @@ right_tweeter: speaker@0,1 { reg = <0 1>; reset-gpios = <&lpass_tlmm 13 GPIO_ACTIVE_LOW>; #sound-dai-cells = <0>; - sound-name-prefix = "TwitterRight"; + sound-name-prefix = "TweeterRight"; vdd-1p8-supply = <&vreg_l15b_1p8>; vdd-io-supply = <&vreg_l12b_1p2>; qcom,port-mapping = <4 5 6 7 11 13>; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts index 3c13331a9ef4..0cdaff9c8cf0 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts @@ -205,6 +205,8 @@ vreg_nvme: regulator-nvme { pinctrl-0 = <&nvme_reg_en>; pinctrl-names = "default"; + + regulator-boot-on; }; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi index 42e02ad6a9c3..cdb401767c42 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi +++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi @@ -164,6 +164,8 @@ vreg_nvme: regulator-nvme { pinctrl-0 = <&nvme_reg_en>; pinctrl-names = "default"; + + regulator-boot-on; }; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts index 1c3a6a7b3ed6..5ef030c60abe 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts @@ -253,6 +253,8 @@ vreg_nvme: regulator-nvme { pinctrl-names = "default"; pinctrl-0 = <&nvme_reg_en>; + + regulator-boot-on; }; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi index a36076e3c56b..0510abc0edf0 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi +++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi @@ -2924,14 +2924,14 @@ pcie6a: pci@1bf8000 { "mhi"; #address-cells = <3>; #size-cells = <2>; - ranges = <0x01000000 0 0x00000000 0 0x70200000 0 0x100000>, - <0x02000000 0 0x70300000 0 0x70300000 0 0x3d00000>; - bus-range = <0 0xff>; + ranges = <0x01000000 0x0 0x00000000 0x0 0x70200000 0x0 0x100000>, + <0x02000000 0x0 0x70300000 0x0 0x70300000 0x0 0x1d00000>; + bus-range = <0x00 0xff>; dma-coherent; linux,pci-domain = <6>; - num-lanes = <2>; + num-lanes = <4>; interrupts = , , @@ -2997,19 +2997,22 @@ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, }; pcie6a_phy: phy@1bfc000 { - compatible = "qcom,x1e80100-qmp-gen4x2-pcie-phy"; - reg = <0 0x01bfc000 0 0x2000>; + compatible = "qcom,x1e80100-qmp-gen4x4-pcie-phy"; + reg = <0 0x01bfc000 0 0x2000>, + <0 0x01bfe000 0 0x2000>; clocks = <&gcc GCC_PCIE_6A_PHY_AUX_CLK>, <&gcc GCC_PCIE_6A_CFG_AHB_CLK>, - <&rpmhcc RPMH_CXO_CLK>, + <&tcsr TCSR_PCIE_4L_CLKREF_EN>, <&gcc GCC_PCIE_6A_PHY_RCHNG_CLK>, - <&gcc GCC_PCIE_6A_PIPE_CLK>; + <&gcc GCC_PCIE_6A_PIPE_CLK>, + <&gcc GCC_PCIE_6A_PIPEDIV2_CLK>; clock-names = "aux", "cfg_ahb", "ref", "rchng", - "pipe"; + "pipe", + "pipediv2"; resets = <&gcc GCC_PCIE_6A_PHY_BCR>, <&gcc GCC_PCIE_6A_NOCSR_COM_PHY_BCR>; @@ -3021,6 +3024,8 @@ pcie6a_phy: phy@1bfc000 { power-domains = <&gcc GCC_PCIE_6_PHY_GDSC>; + qcom,4ln-config-sel = <&tcsr 0x1a000 0>; + #clock-cells = <0>; clock-output-names = "pcie6a_pipe_clk"; @@ -3097,7 +3102,7 @@ pcie5: pci@1c00000 { assigned-clocks = <&gcc GCC_PCIE_5_AUX_CLK>; assigned-clock-rates = <19200000>; - interconnects = <&pcie_south_anoc MASTER_PCIE_5 QCOM_ICC_TAG_ALWAYS + interconnects = <&pcie_north_anoc MASTER_PCIE_5 QCOM_ICC_TAG_ALWAYS &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS &cnoc_main SLAVE_PCIE_5 QCOM_ICC_TAG_ALWAYS>; @@ -3124,14 +3129,16 @@ pcie5_phy: phy@1c06000 { clocks = <&gcc GCC_PCIE_5_AUX_CLK>, <&gcc GCC_PCIE_5_CFG_AHB_CLK>, - <&rpmhcc RPMH_CXO_CLK>, + <&tcsr TCSR_PCIE_2L_5_CLKREF_EN>, <&gcc GCC_PCIE_5_PHY_RCHNG_CLK>, - <&gcc GCC_PCIE_5_PIPE_CLK>; + <&gcc GCC_PCIE_5_PIPE_CLK>, + <&gcc GCC_PCIE_5_PIPEDIV2_CLK>; clock-names = "aux", "cfg_ahb", "ref", "rchng", - "pipe"; + "pipe", + "pipediv2"; resets = <&gcc GCC_PCIE_5_PHY_BCR>; reset-names = "phy"; @@ -3166,8 +3173,8 @@ pcie4: pci@1c08000 { "mhi"; #address-cells = <3>; #size-cells = <2>; - ranges = <0x01000000 0 0x00000000 0 0x7c200000 0 0x100000>, - <0x02000000 0 0x7c300000 0 0x7c300000 0 0x3d00000>; + ranges = <0x01000000 0x0 0x00000000 0x0 0x7c200000 0x0 0x100000>, + <0x02000000 0x0 0x7c300000 0x0 0x7c300000 0x0 0x1d00000>; bus-range = <0x00 0xff>; dma-coherent; @@ -3217,7 +3224,7 @@ pcie4: pci@1c08000 { assigned-clocks = <&gcc GCC_PCIE_4_AUX_CLK>; assigned-clock-rates = <19200000>; - interconnects = <&pcie_south_anoc MASTER_PCIE_4 QCOM_ICC_TAG_ALWAYS + interconnects = <&pcie_north_anoc MASTER_PCIE_4 QCOM_ICC_TAG_ALWAYS &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS &cnoc_main SLAVE_PCIE_4 QCOM_ICC_TAG_ALWAYS>; @@ -3254,14 +3261,16 @@ pcie4_phy: phy@1c0e000 { clocks = <&gcc GCC_PCIE_4_AUX_CLK>, <&gcc GCC_PCIE_4_CFG_AHB_CLK>, - <&rpmhcc RPMH_CXO_CLK>, + <&tcsr TCSR_PCIE_2L_4_CLKREF_EN>, <&gcc GCC_PCIE_4_PHY_RCHNG_CLK>, - <&gcc GCC_PCIE_4_PIPE_CLK>; + <&gcc GCC_PCIE_4_PIPE_CLK>, + <&gcc GCC_PCIE_4_PIPEDIV2_CLK>; clock-names = "aux", "cfg_ahb", "ref", "rchng", - "pipe"; + "pipe", + "pipediv2"; resets = <&gcc GCC_PCIE_4_PHY_BCR>; reset-names = "phy"; @@ -6084,7 +6093,8 @@ system-cache-controller@25000000 { <0 0x25a00000 0 0x200000>, <0 0x25c00000 0 0x200000>, <0 0x25e00000 0 0x200000>, - <0 0x26000000 0 0x200000>; + <0 0x26000000 0 0x200000>, + <0 0x26200000 0 0x200000>; reg-names = "llcc0_base", "llcc1_base", "llcc2_base", @@ -6093,7 +6103,8 @@ system-cache-controller@25000000 { "llcc5_base", "llcc6_base", "llcc7_base", - "llcc_broadcast_base"; + "llcc_broadcast_base", + "llcc_broadcast_and_base"; interrupts = ; }; diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi index bb1aea82e666..b7163ed74232 100644 --- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi @@ -66,7 +66,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; mmc-hs200-1_8v; - supports-emmc; mmc-pwrseq = <&emmc_pwrseq>; non-removable; vmmc-supply = <&vcc_3v3>; diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts index 9232357f4fec..d9e191ad1d77 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts @@ -36,14 +36,14 @@ leds { power_led: led-0 { label = "firefly:red:power"; - linux,default-trigger = "ir-power-click"; + linux,default-trigger = "default-on"; default-state = "on"; gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>; }; user_led: led-1 { label = "firefly:blue:user"; - linux,default-trigger = "ir-user-click"; + linux,default-trigger = "rc-feedback"; default-state = "off"; gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_HIGH>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts index cb81ba3f23ff..4b9ced67742d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s-plus.dts @@ -24,9 +24,7 @@ &emmc { disable-wp; mmc-hs200-1_8v; non-removable; - num-slots = <1>; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; - supports-emmc; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 16b4faa22e4f..c01a4cad48f3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -754,8 +754,7 @@ hdmi: hdmi@ff3c0000 { compatible = "rockchip,rk3328-dw-hdmi"; reg = <0x0 0xff3c0000 0x0 0x20000>; reg-io-width = <4>; - interrupts = , - ; + interrupts = ; clocks = <&cru PCLK_HDMI>, <&cru SCLK_HDMI_SFC>, <&cru SCLK_RTC32K>; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi index 8ac8acf4082d..ab3fda69a1fb 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi @@ -61,7 +61,6 @@ i2c_lvds_blc: i2c@0 { fan: fan@18 { compatible = "ti,amc6821"; reg = <0x18>; - #cooling-cells = <2>; }; rtc_twi: rtc@6f { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts b/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts index 1489eb32e266..4feb78797982 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts @@ -541,7 +541,7 @@ &i2c1 { status = "okay"; rt5651: audio-codec@1a { - compatible = "rockchip,rt5651"; + compatible = "realtek,rt5651"; reg = <0x1a>; clocks = <&cru SCLK_I2S_8CH_OUT>; clock-names = "mclk"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts index 1a44582a49fb..09a016ea8c76 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts @@ -166,7 +166,6 @@ vcc1v8_lcd: vcc1v8-lcd { regulator-max-microvolt = <1800000>; vin-supply = <&vcc3v3_sys>; gpio = <&gpio3 RK_PA5 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; }; /* MIPI DSI panel 2.8v supply */ @@ -178,7 +177,6 @@ vcc2v8_lcd: vcc2v8-lcd { regulator-max-microvolt = <2800000>; vin-supply = <&vcc3v3_sys>; gpio = <&gpio3 RK_PA1 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; }; vibrator { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts index 7ba1c28f70a9..2f06bfdd70bf 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts @@ -114,7 +114,6 @@ &i2c1 { es8388: es8388@11 { compatible = "everest,es8388"; reg = <0x11>; - clock-names = "mclk"; clocks = <&cru SCLK_I2S_8CH_OUT>; #sound-dai-cells = <0>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi index 8146f870d2bd..ab890e7b6c59 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi @@ -576,7 +576,7 @@ &uart0 { bluetooth { compatible = "brcm,bcm43438-bt"; clocks = <&rk808 1>; - clock-names = "ext_clock"; + clock-names = "txco"; device-wakeup-gpios = <&gpio2 RK_PD3 GPIO_ACTIVE_HIGH>; host-wakeup-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>; shutdown-gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index dbec2b7173a0..31ea3d0182c0 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts @@ -163,7 +163,7 @@ &i2c1 { status = "okay"; rt5651: rt5651@1a { - compatible = "rockchip,rt5651"; + compatible = "realtek,rt5651"; reg = <0x1a>; clocks = <&cru SCLK_I2S_8CH_OUT>; clock-names = "mclk"; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353p.dts b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353p.dts index a73cf30801ec..9816a4ed4599 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353p.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353p.dts @@ -92,7 +92,7 @@ button-r2 { }; &i2c2 { - pintctrl-names = "default"; + pinctrl-names = "default"; pinctrl-0 = <&i2c2m1_xfer>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353v.dts b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353v.dts index e9954a33e8cd..a79a5614bcc8 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353v.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353v.dts @@ -79,7 +79,7 @@ button-r2 { }; &i2c2 { - pintctrl-names = "default"; + pinctrl-names = "default"; pinctrl-0 = <&i2c2m1_xfer>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts b/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts index 0c18406e4c59..7d4680933823 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts @@ -449,9 +449,9 @@ &uart1 { bluetooth { compatible = "brcm,bcm43438-bt"; clocks = <&pmucru CLK_RTC_32K>; - clock-names = "ext_clock"; - device-wake-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>; - host-wake-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>; + clock-names = "txco"; + device-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>; + host-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>; shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts index c1194d1e438d..9a2f59a351de 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts @@ -507,7 +507,6 @@ &sdhci { non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>; - supports-emmc; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi index ae2536c65a83..0131f2cdd312 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi @@ -684,11 +684,11 @@ bluetooth { compatible = "brcm,bcm43438-bt"; clocks = <&rk817 1>; clock-names = "lpo"; - device-wake-gpios = <&gpio0 RK_PC2 GPIO_ACTIVE_HIGH>; - host-wake-gpios = <&gpio0 RK_PC3 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_LOW>; + device-wakeup-gpios = <&gpio0 RK_PC2 GPIO_ACTIVE_HIGH>; + host-wakeup-gpios = <&gpio0 RK_PC3 GPIO_ACTIVE_HIGH>; pinctrl-0 = <&bt_enable_h>, <&bt_host_wake_l>, <&bt_wake_h>; pinctrl-names = "default"; + shutdown-gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>; vbat-supply = <&vcc_wl>; vddio-supply = <&vcca_1v8_pmu>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-radxa-cm3.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-radxa-cm3.dtsi index 45de2630bb50..1e36f73840da 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-radxa-cm3.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-radxa-cm3.dtsi @@ -402,9 +402,9 @@ bluetooth { clock-names = "lpo"; device-wakeup-gpios = <&gpio2 RK_PB2 GPIO_ACTIVE_HIGH>; host-wakeup-gpios = <&gpio2 RK_PB1 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&bt_host_wake_h &bt_reg_on_h &bt_wake_host_h>; + shutdown-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>; vbat-supply = <&vcc_3v3>; vddio-supply = <&vcc_1v8>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts index a3112d5df200..b505a4537ee8 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts @@ -589,7 +589,6 @@ &sdhci { non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>; - supports-emmc; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-roc-pc.dts b/arch/arm64/boot/dts/rockchip/rk3568-roc-pc.dts index e333449ead04..2fa89a0eeafc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-roc-pc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-roc-pc.dts @@ -272,7 +272,6 @@ vdd_logic: DCDC_REG1 { regulator-name = "vdd_logic"; regulator-always-on; regulator-boot-on; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -285,7 +284,6 @@ regulator-state-mem { vdd_gpu: DCDC_REG2 { regulator-name = "vdd_gpu"; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; @@ -309,7 +307,6 @@ regulator-state-mem { vdd_npu: DCDC_REG4 { regulator-name = "vdd_npu"; - regulator-init-microvolt = <900000>; regulator-initial-mode = <0x2>; regulator-min-microvolt = <500000>; regulator-max-microvolt = <1350000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi index d97d84b88837..fc67585b64b7 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi @@ -337,15 +337,19 @@ l2_cache_b3: l2-cache-b3 { cache-unified; next-level-cache = <&l3_cache>; }; + }; - l3_cache: l3-cache { - compatible = "cache"; - cache-size = <3145728>; - cache-line-size = <64>; - cache-sets = <4096>; - cache-level = <3>; - cache-unified; - }; + /* + * The L3 cache belongs to the DynamIQ Shared Unit (DSU), + * so it's represented here, outside the "cpus" node + */ + l3_cache: l3-cache { + compatible = "cache"; + cache-size = <3145728>; + cache-line-size = <64>; + cache-sets = <4096>; + cache-level = <3>; + cache-unified; }; display_subsystem: display-subsystem { diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts index c3a6812cc93a..dd4c79bcad87 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts @@ -328,7 +328,6 @@ es8388: audio-codec@11 { compatible = "everest,es8388"; reg = <0x11>; clocks = <&cru I2S0_8CH_MCLKOUT>; - clock-names = "mclk"; AVDD-supply = <&vcc_1v8_s0>; DVDD-supply = <&vcc_1v8_s0>; HPVDD-supply = <&vcc_3v3_s0>; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts index e4a20cda65ed..b38dab009ccc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts @@ -316,7 +316,6 @@ es8388: audio-codec@11 { assigned-clocks = <&cru I2S0_8CH_MCLKOUT>; assigned-clock-rates = <12288000>; clocks = <&cru I2S0_8CH_MCLKOUT>; - clock-names = "mclk"; AVDD-supply = <&avcc_1v8_codec_s0>; DVDD-supply = <&avcc_1v8_codec_s0>; HPVDD-supply = <&vcc_3v3_s0>; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts index 966bbc582d89..6bd06e46a101 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts @@ -304,12 +304,12 @@ package_fan1: package-fan1 { }; cooling-maps { - map1 { + map0 { trip = <&package_fan0>; cooling-device = <&fan THERMAL_NO_LIMIT 1>; }; - map2 { + map1 { trip = <&package_fan1>; cooling-device = <&fan 2 THERMAL_NO_LIMIT>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts b/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts index d0021524e7f9..328dcb894ccb 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts @@ -428,7 +428,6 @@ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 { regulator-boot-on; regulator-min-microvolt = <550000>; regulator-max-microvolt = <950000>; - regulator-init-microvolt = <750000>; regulator-ramp-delay = <12500>; regulator-state-mem { diff --git a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi index dbaa94ca69f4..432133251e31 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi @@ -296,6 +296,7 @@ pmic@0 { pinctrl-names = "default"; pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>, <&rk806_dvs2_null>, <&rk806_dvs3_null>; + system-power-controller; vcc1-supply = <&vcc5v0_sys>; vcc2-supply = <&vcc5v0_sys>; diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts index d8c50fdcca3b..8ba111d9283f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts @@ -377,7 +377,6 @@ es8388: audio-codec@11 { assigned-clock-rates = <12288000>; assigned-clocks = <&cru I2S0_8CH_MCLKOUT>; AVDD-supply = <&vcc_3v3_s3>; - clock-names = "mclk"; clocks = <&cru I2S0_8CH_MCLKOUT>; DVDD-supply = <&vcc_1v8_s3>; HPVDD-supply = <&vcc_3v3_s3>; diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index b36a3b6cc011..67afac659231 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -178,6 +178,7 @@ struct kvm_nvhe_init_params { unsigned long hcr_el2; unsigned long vttbr; unsigned long vtcr; + unsigned long tmp; }; /* diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 94cff508874b..bf64fed9820e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -51,6 +51,7 @@ #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) +#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8) #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) @@ -211,6 +212,12 @@ struct kvm_s2_mmu { */ bool nested_stage2_enabled; + /* + * true when this MMU needs to be unmapped before being used for a new + * purpose. + */ + bool pending_unmap; + /* * 0: Nobody is currently using this, check vttbr for validity * >0: Somebody is actively using this. diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index cd4087fbda9a..66d93e320ec8 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -166,7 +166,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr); void __init free_hyp_pgds(void); -void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size); +void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, + u64 size, bool may_block); void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end); void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end); diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index e8bc6d67aba2..233e65522716 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -78,6 +78,8 @@ extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid, extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu); extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu); +extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu); + struct kvm_s2_trans { phys_addr_t output; unsigned long block_size; @@ -124,7 +126,7 @@ extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans); extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2); extern void kvm_nested_s2_wp(struct kvm *kvm); -extern void kvm_nested_s2_unmap(struct kvm *kvm); +extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block); extern void kvm_nested_s2_flush(struct kvm *kvm); unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val); diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 9e39217b4afb..798d965760d4 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -6,6 +6,8 @@ #ifndef BUILD_VDSO #include +#include +#include #include static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, @@ -31,19 +33,21 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) -static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) +static inline unsigned long arch_calc_vm_flag_bits(struct file *file, + unsigned long flags) { /* * Only allow MTE on anonymous mappings as these are guaranteed to be * backed by tags-capable memory. The vm_flags may be overridden by a * filesystem supporting MTE (RAM-based). */ - if (system_supports_mte() && (flags & MAP_ANONYMOUS)) + if (system_supports_mte() && + ((flags & MAP_ANONYMOUS) || shmem_file(file))) return VM_MTE_ALLOWED; return 0; } -#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) +#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags) static inline bool arch_validate_prot(unsigned long prot, unsigned long addr __always_unused) diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 5fc3af9f8f29..341174bf9106 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -26,10 +26,6 @@ void update_freq_counters_refs(void); #define arch_scale_freq_invariant topology_scale_freq_invariant #define arch_scale_freq_ref topology_get_freq_ref -#ifdef CONFIG_ACPI_CPPC_LIB -#define arch_init_invariance_cppc topology_init_cpu_capacity_cppc -#endif - /* Replace task scheduler's default cpu-invariant accounting */ #define arch_scale_cpu_capacity topology_get_cpu_scale diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h index 2b09495499c6..014b02897f8e 100644 --- a/arch/arm64/include/asm/uprobes.h +++ b/arch/arm64/include/asm/uprobes.h @@ -10,11 +10,9 @@ #include #include -#define MAX_UINSN_BYTES AARCH64_INSN_SIZE - #define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES) #define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE -#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES +#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE typedef __le32 uprobe_opcode_t; @@ -23,8 +21,8 @@ struct arch_uprobe_task { struct arch_uprobe { union { - u8 insn[MAX_UINSN_BYTES]; - u8 ixol[MAX_UINSN_BYTES]; + __le32 insn; + __le32 ixol; }; struct arch_probe_insn api; bool simulate; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 27de1dddb0ab..b21dd24b8efc 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -146,6 +146,7 @@ int main(void) DEFINE(NVHE_INIT_HCR_EL2, offsetof(struct kvm_nvhe_init_params, hcr_el2)); DEFINE(NVHE_INIT_VTTBR, offsetof(struct kvm_nvhe_init_params, vttbr)); DEFINE(NVHE_INIT_VTCR, offsetof(struct kvm_nvhe_init_params, vtcr)); + DEFINE(NVHE_INIT_TMP, offsetof(struct kvm_nvhe_init_params, tmp)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 77006df20a75..6d21971ae559 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1367,6 +1367,7 @@ static void sve_init_regs(void) } else { fpsimd_to_sve(current); current->thread.fp_type = FP_STATE_SVE; + fpsimd_flush_task_state(current); } } diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c index 968d5fffe233..3496d6169e59 100644 --- a/arch/arm64/kernel/probes/decode-insn.c +++ b/arch/arm64/kernel/probes/decode-insn.c @@ -99,10 +99,6 @@ arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api) aarch64_insn_is_blr(insn) || aarch64_insn_is_ret(insn)) { api->handler = simulate_br_blr_ret; - } else if (aarch64_insn_is_ldr_lit(insn)) { - api->handler = simulate_ldr_literal; - } else if (aarch64_insn_is_ldrsw_lit(insn)) { - api->handler = simulate_ldrsw_literal; } else { /* * Instruction cannot be stepped out-of-line and we don't @@ -140,6 +136,17 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) probe_opcode_t insn = le32_to_cpu(*addr); probe_opcode_t *scan_end = NULL; unsigned long size = 0, offset = 0; + struct arch_probe_insn *api = &asi->api; + + if (aarch64_insn_is_ldr_lit(insn)) { + api->handler = simulate_ldr_literal; + decoded = INSN_GOOD_NO_SLOT; + } else if (aarch64_insn_is_ldrsw_lit(insn)) { + api->handler = simulate_ldrsw_literal; + decoded = INSN_GOOD_NO_SLOT; + } else { + decoded = arm_probe_decode_insn(insn, &asi->api); + } /* * If there's a symbol defined in front of and near enough to @@ -157,7 +164,6 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) else scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; } - decoded = arm_probe_decode_insn(insn, &asi->api); if (decoded != INSN_REJECTED && scan_end) if (is_probed_address_atomic(addr - 1, scan_end)) diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c index 22d0b3252476..b65334ab79d2 100644 --- a/arch/arm64/kernel/probes/simulate-insn.c +++ b/arch/arm64/kernel/probes/simulate-insn.c @@ -171,17 +171,15 @@ simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs) void __kprobes simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs) { - u64 *load_addr; + unsigned long load_addr; int xn = opcode & 0x1f; - int disp; - disp = ldr_displacement(opcode); - load_addr = (u64 *) (addr + disp); + load_addr = addr + ldr_displacement(opcode); if (opcode & (1 << 30)) /* x0-x30 */ - set_x_reg(regs, xn, *load_addr); + set_x_reg(regs, xn, READ_ONCE(*(u64 *)load_addr)); else /* w0-w30 */ - set_w_reg(regs, xn, *load_addr); + set_w_reg(regs, xn, READ_ONCE(*(u32 *)load_addr)); instruction_pointer_set(regs, instruction_pointer(regs) + 4); } @@ -189,14 +187,12 @@ simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs) void __kprobes simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs) { - s32 *load_addr; + unsigned long load_addr; int xn = opcode & 0x1f; - int disp; - disp = ldr_displacement(opcode); - load_addr = (s32 *) (addr + disp); + load_addr = addr + ldr_displacement(opcode); - set_x_reg(regs, xn, *load_addr); + set_x_reg(regs, xn, READ_ONCE(*(s32 *)load_addr)); instruction_pointer_set(regs, instruction_pointer(regs) + 4); } diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index d49aef2657cd..a2f137a595fc 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE)) return -EINVAL; - insn = *(probe_opcode_t *)(&auprobe->insn[0]); + insn = le32_to_cpu(auprobe->insn); switch (arm_probe_decode_insn(insn, &auprobe->api)) { case INSN_REJECTED: @@ -108,7 +108,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) if (!auprobe->simulate) return false; - insn = *(probe_opcode_t *)(&auprobe->insn[0]); + insn = le32_to_cpu(auprobe->insn); addr = instruction_pointer(regs); if (auprobe->api.handler) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 0540653fbf38..3e7c8c8195c3 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -412,6 +412,9 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) p->thread.cpu_context.x19 = (unsigned long)args->fn; p->thread.cpu_context.x20 = (unsigned long)args->fn_arg; + + if (system_supports_poe()) + p->thread.por_el0 = POR_EL0_INIT; } p->thread.cpu_context.pc = (unsigned long)ret_from_fork; p->thread.cpu_context.sp = (unsigned long)childregs; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 561986947530..c7d311d8b92a 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -66,10 +67,63 @@ struct rt_sigframe_user_layout { unsigned long end_offset; }; +/* + * Holds any EL0-controlled state that influences unprivileged memory accesses. + * This includes both accesses done in userspace and uaccess done in the kernel. + * + * This state needs to be carefully managed to ensure that it doesn't cause + * uaccess to fail when setting up the signal frame, and the signal handler + * itself also expects a well-defined state when entered. + */ +struct user_access_state { + u64 por_el0; +}; + #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) +/* + * Save the user access state into ua_state and reset it to disable any + * restrictions. + */ +static void save_reset_user_access_state(struct user_access_state *ua_state) +{ + if (system_supports_poe()) { + u64 por_enable_all = 0; + + for (int pkey = 0; pkey < arch_max_pkey(); pkey++) + por_enable_all |= POE_RXW << (pkey * POR_BITS_PER_PKEY); + + ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0); + write_sysreg_s(por_enable_all, SYS_POR_EL0); + /* Ensure that any subsequent uaccess observes the updated value */ + isb(); + } +} + +/* + * Set the user access state for invoking the signal handler. + * + * No uaccess should be done after that function is called. + */ +static void set_handler_user_access_state(void) +{ + if (system_supports_poe()) + write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); +} + +/* + * Restore the user access state to the values saved in ua_state. + * + * No uaccess should be done after that function is called. + */ +static void restore_user_access_state(const struct user_access_state *ua_state) +{ + if (system_supports_poe()) + write_sysreg_s(ua_state->por_el0, SYS_POR_EL0); +} + static void init_user_layout(struct rt_sigframe_user_layout *user) { const size_t reserved_size = @@ -261,18 +315,20 @@ static int restore_fpmr_context(struct user_ctxs *user) return err; } -static int preserve_poe_context(struct poe_context __user *ctx) +static int preserve_poe_context(struct poe_context __user *ctx, + const struct user_access_state *ua_state) { int err = 0; __put_user_error(POE_MAGIC, &ctx->head.magic, err); __put_user_error(sizeof(*ctx), &ctx->head.size, err); - __put_user_error(read_sysreg_s(SYS_POR_EL0), &ctx->por_el0, err); + __put_user_error(ua_state->por_el0, &ctx->por_el0, err); return err; } -static int restore_poe_context(struct user_ctxs *user) +static int restore_poe_context(struct user_ctxs *user, + struct user_access_state *ua_state) { u64 por_el0; int err = 0; @@ -282,7 +338,7 @@ static int restore_poe_context(struct user_ctxs *user) __get_user_error(por_el0, &(user->poe->por_el0), err); if (!err) - write_sysreg_s(por_el0, SYS_POR_EL0); + ua_state->por_el0 = por_el0; return err; } @@ -850,7 +906,8 @@ static int parse_user_sigframe(struct user_ctxs *user, } static int restore_sigframe(struct pt_regs *regs, - struct rt_sigframe __user *sf) + struct rt_sigframe __user *sf, + struct user_access_state *ua_state) { sigset_t set; int i, err; @@ -899,7 +956,7 @@ static int restore_sigframe(struct pt_regs *regs, err = restore_zt_context(&user); if (err == 0 && system_supports_poe() && user.poe) - err = restore_poe_context(&user); + err = restore_poe_context(&user, ua_state); return err; } @@ -908,6 +965,7 @@ SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; + struct user_access_state ua_state; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; @@ -924,12 +982,14 @@ SYSCALL_DEFINE0(rt_sigreturn) if (!access_ok(frame, sizeof (*frame))) goto badframe; - if (restore_sigframe(regs, frame)) + if (restore_sigframe(regs, frame, &ua_state)) goto badframe; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; + restore_user_access_state(&ua_state); + return regs->regs[0]; badframe: @@ -1035,7 +1095,8 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, } static int setup_sigframe(struct rt_sigframe_user_layout *user, - struct pt_regs *regs, sigset_t *set) + struct pt_regs *regs, sigset_t *set, + const struct user_access_state *ua_state) { int i, err = 0; struct rt_sigframe __user *sf = user->sigframe; @@ -1097,10 +1158,9 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user, struct poe_context __user *poe_ctx = apply_user_offset(user, user->poe_offset); - err |= preserve_poe_context(poe_ctx); + err |= preserve_poe_context(poe_ctx, ua_state); } - /* ZA state if present */ if (system_supports_sme() && err == 0 && user->za_offset) { struct za_context __user *za_ctx = @@ -1237,9 +1297,6 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, sme_smstop(); } - if (system_supports_poe()) - write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); - if (ka->sa.sa_flags & SA_RESTORER) sigtramp = ka->sa.sa_restorer; else @@ -1253,6 +1310,7 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, { struct rt_sigframe_user_layout user; struct rt_sigframe __user *frame; + struct user_access_state ua_state; int err = 0; fpsimd_signal_preserve_current_state(); @@ -1260,13 +1318,14 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, if (get_sigframe(&user, ksig, regs)) return 1; + save_reset_user_access_state(&ua_state); frame = user.sigframe; __put_user_error(0, &frame->uc.uc_flags, err); __put_user_error(NULL, &frame->uc.uc_link, err); err |= __save_altstack(&frame->uc.uc_stack, regs->sp); - err |= setup_sigframe(&user, regs, set); + err |= setup_sigframe(&user, regs, set, &ua_state); if (err == 0) { setup_return(regs, &ksig->ka, &user, usig); if (ksig->ka.sa.sa_flags & SA_SIGINFO) { @@ -1276,6 +1335,11 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, } } + if (err == 0) + set_handler_user_access_state(); + else + restore_user_access_state(&ua_state); + return err; } diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S index 487381164ff6..2def9d0dd3dd 100644 --- a/arch/arm64/kernel/smccc-call.S +++ b/arch/arm64/kernel/smccc-call.S @@ -7,48 +7,19 @@ #include #include -#include - -/* - * If we have SMCCC v1.3 and (as is likely) no SVE state in - * the registers then set the SMCCC hint bit to say there's no - * need to preserve it. Do this by directly adjusting the SMCCC - * function value which is already stored in x0 ready to be called. - */ -SYM_FUNC_START(__arm_smccc_sve_check) - - ldr_l x16, smccc_has_sve_hint - cbz x16, 2f - - get_current_task x16 - ldr x16, [x16, #TSK_TI_FLAGS] - tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state? - tbnz x16, #TIF_SVE, 2f // Does that state include SVE? - -1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT - -2: ret -SYM_FUNC_END(__arm_smccc_sve_check) -EXPORT_SYMBOL(__arm_smccc_sve_check) .macro SMCCC instr - stp x29, x30, [sp, #-16]! - mov x29, sp -alternative_if ARM64_SVE - bl __arm_smccc_sve_check -alternative_else_nop_endif \instr #0 - ldr x4, [sp, #16] + ldr x4, [sp] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] - ldr x4, [sp, #24] + ldr x4, [sp, #8] cbz x4, 1f /* no quirk structure */ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS] cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6 b.ne 1f str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] -1: ldp x29, x30, [sp], #16 - ret +1: ret .endm /* diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index a0d01c46e408..48cafb65d6ac 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -997,6 +997,9 @@ static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu) static int check_vcpu_requests(struct kvm_vcpu *vcpu) { if (kvm_request_pending(vcpu)) { + if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) + return -EIO; + if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) kvm_vcpu_sleep(vcpu); @@ -1031,6 +1034,8 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_dirty_ring_check_request(vcpu)) return 0; + + check_nested_vcpu_requests(vcpu); } return 1; diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 401af1835be6..fc1866226067 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -24,28 +24,25 @@ .align 11 SYM_CODE_START(__kvm_hyp_init) - ventry __invalid // Synchronous EL2t - ventry __invalid // IRQ EL2t - ventry __invalid // FIQ EL2t - ventry __invalid // Error EL2t + ventry . // Synchronous EL2t + ventry . // IRQ EL2t + ventry . // FIQ EL2t + ventry . // Error EL2t - ventry __invalid // Synchronous EL2h - ventry __invalid // IRQ EL2h - ventry __invalid // FIQ EL2h - ventry __invalid // Error EL2h + ventry . // Synchronous EL2h + ventry . // IRQ EL2h + ventry . // FIQ EL2h + ventry . // Error EL2h ventry __do_hyp_init // Synchronous 64-bit EL1 - ventry __invalid // IRQ 64-bit EL1 - ventry __invalid // FIQ 64-bit EL1 - ventry __invalid // Error 64-bit EL1 + ventry . // IRQ 64-bit EL1 + ventry . // FIQ 64-bit EL1 + ventry . // Error 64-bit EL1 - ventry __invalid // Synchronous 32-bit EL1 - ventry __invalid // IRQ 32-bit EL1 - ventry __invalid // FIQ 32-bit EL1 - ventry __invalid // Error 32-bit EL1 - -__invalid: - b . + ventry . // Synchronous 32-bit EL1 + ventry . // IRQ 32-bit EL1 + ventry . // FIQ 32-bit EL1 + ventry . // Error 32-bit EL1 /* * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers. @@ -76,6 +73,13 @@ __do_hyp_init: eret SYM_CODE_END(__kvm_hyp_init) +SYM_CODE_START_LOCAL(__kvm_init_el2_state) + /* Initialize EL2 CPU state to sane values. */ + init_el2_state // Clobbers x0..x2 + finalise_el2_state + ret +SYM_CODE_END(__kvm_init_el2_state) + /* * Initialize the hypervisor in EL2. * @@ -102,9 +106,12 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init) // TPIDR_EL2 is used to preserve x0 across the macro maze... isb msr tpidr_el2, x0 - init_el2_state - finalise_el2_state + str lr, [x0, #NVHE_INIT_TMP] + + bl __kvm_init_el2_state + mrs x0, tpidr_el2 + ldr lr, [x0, #NVHE_INIT_TMP] 1: ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] @@ -199,9 +206,8 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) 2: msr SPsel, #1 // We want to use SP_EL{1,2} - /* Initialize EL2 CPU state to sane values. */ - init_el2_state // Clobbers x0..x2 - finalise_el2_state + bl __kvm_init_el2_state + __init_el2_nvhe_prepare_eret /* Enable MMU, set vectors and stack. */ diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 5763d979d8ca..ee6573befb81 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -317,7 +317,7 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) * to the guest, and hide SSBS so that the * guest stays protected. */ - if (cpus_have_final_cap(ARM64_SSBS)) + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SSBS, IMP)) break; fallthrough; case SPECTRE_UNAFFECTED: @@ -428,7 +428,7 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) * Convert the workaround level into an easy-to-compare number, where higher * values mean better protection. */ -static int get_kernel_wa_level(u64 regid) +static int get_kernel_wa_level(struct kvm_vcpu *vcpu, u64 regid) { switch (regid) { case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: @@ -449,7 +449,7 @@ static int get_kernel_wa_level(u64 regid) * don't have any FW mitigation if SSBS is there at * all times. */ - if (cpus_have_final_cap(ARM64_SSBS)) + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SSBS, IMP)) return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; fallthrough; case SPECTRE_UNAFFECTED: @@ -486,7 +486,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: - val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; + val = get_kernel_wa_level(vcpu, reg->id) & KVM_REG_FEATURE_LEVEL_MASK; break; case KVM_REG_ARM_STD_BMAP: val = READ_ONCE(smccc_feat->std_bmap); @@ -588,7 +588,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (val & ~KVM_REG_FEATURE_LEVEL_MASK) return -EINVAL; - if (get_kernel_wa_level(reg->id) < val) + if (get_kernel_wa_level(vcpu, reg->id) < val) return -EINVAL; return 0; @@ -624,7 +624,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the * other way around. */ - if (get_kernel_wa_level(reg->id) < wa_level) + if (get_kernel_wa_level(vcpu, reg->id) < wa_level) return -EINVAL; return 0; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index a509b63bd4dd..0f7658aefa1a 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -328,9 +328,10 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 may_block)); } -void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, + u64 size, bool may_block) { - __unmap_stage2_range(mmu, start, size, true); + __unmap_stage2_range(mmu, start, size, may_block); } void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) @@ -1015,7 +1016,7 @@ static void stage2_unmap_memslot(struct kvm *kvm, if (!(vma->vm_flags & VM_PFNMAP)) { gpa_t gpa = addr + (vm_start - memslot->userspace_addr); - kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, vm_end - vm_start); + kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, vm_end - vm_start, true); } hva = vm_end; } while (hva < reg_end); @@ -1042,7 +1043,7 @@ void stage2_unmap_vm(struct kvm *kvm) kvm_for_each_memslot(memslot, bkt, slots) stage2_unmap_memslot(kvm, memslot); - kvm_nested_s2_unmap(kvm); + kvm_nested_s2_unmap(kvm, true); write_unlock(&kvm->mmu_lock); mmap_read_unlock(current->mm); @@ -1912,7 +1913,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) (range->end - range->start) << PAGE_SHIFT, range->may_block); - kvm_nested_s2_unmap(kvm); + kvm_nested_s2_unmap(kvm, range->may_block); return false; } @@ -2179,8 +2180,8 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, phys_addr_t size = slot->npages << PAGE_SHIFT; write_lock(&kvm->mmu_lock); - kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, size); - kvm_nested_s2_unmap(kvm); + kvm_stage2_unmap_range(&kvm->arch.mmu, gpa, size, true); + kvm_nested_s2_unmap(kvm, true); write_unlock(&kvm->mmu_lock); } diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index f9e30dd34c7a..c4b17d90fc49 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -632,9 +632,9 @@ static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu) /* Set the scene for the next search */ kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size; - /* Clear the old state */ + /* Make sure we don't forget to do the laundry */ if (kvm_s2_mmu_valid(s2_mmu)) - kvm_stage2_unmap_range(s2_mmu, 0, kvm_phys_size(s2_mmu)); + s2_mmu->pending_unmap = true; /* * The virtual VMID (modulo CnP) will be used as a key when matching @@ -650,6 +650,16 @@ static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu) out: atomic_inc(&s2_mmu->refcnt); + + /* + * Set the vCPU request to perform an unmap, even if the pending unmap + * originates from another vCPU. This guarantees that the MMU has been + * completely unmapped before any vCPU actually uses it, and allows + * multiple vCPUs to lend a hand with completing the unmap. + */ + if (s2_mmu->pending_unmap) + kvm_make_request(KVM_REQ_NESTED_S2_UNMAP, vcpu); + return s2_mmu; } @@ -663,6 +673,13 @@ void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu) void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu) { + /* + * The vCPU kept its reference on the MMU after the last put, keep + * rolling with it. + */ + if (vcpu->arch.hw_mmu) + return; + if (is_hyp_ctxt(vcpu)) { vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; } else { @@ -674,10 +691,18 @@ void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu) void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu) { - if (kvm_is_nested_s2_mmu(vcpu->kvm, vcpu->arch.hw_mmu)) { + /* + * Keep a reference on the associated stage-2 MMU if the vCPU is + * scheduling out and not in WFI emulation, suggesting it is likely to + * reuse the MMU sometime soon. + */ + if (vcpu->scheduled_out && !vcpu_get_flag(vcpu, IN_WFI)) + return; + + if (kvm_is_nested_s2_mmu(vcpu->kvm, vcpu->arch.hw_mmu)) atomic_dec(&vcpu->arch.hw_mmu->refcnt); - vcpu->arch.hw_mmu = NULL; - } + + vcpu->arch.hw_mmu = NULL; } /* @@ -730,7 +755,7 @@ void kvm_nested_s2_wp(struct kvm *kvm) } } -void kvm_nested_s2_unmap(struct kvm *kvm) +void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block) { int i; @@ -740,7 +765,7 @@ void kvm_nested_s2_unmap(struct kvm *kvm) struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; if (kvm_s2_mmu_valid(mmu)) - kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu)); + kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block); } } @@ -1184,3 +1209,17 @@ int kvm_init_nv_sysregs(struct kvm *kvm) return 0; } + +void check_nested_vcpu_requests(struct kvm_vcpu *vcpu) +{ + if (kvm_check_request(KVM_REQ_NESTED_S2_UNMAP, vcpu)) { + struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; + + write_lock(&vcpu->kvm->mmu_lock); + if (mmu->pending_unmap) { + kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), true); + mmu->pending_unmap = false; + } + write_unlock(&vcpu->kvm->mmu_lock); + } +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index dad88e31f953..ff8c4e1b847e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1527,6 +1527,14 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR); break; case SYS_ID_AA64PFR2_EL1: /* We only expose FPMR */ @@ -1550,7 +1558,8 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; break; case SYS_ID_AA64MMFR3_EL1: - val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE; + val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE | + ID_AA64MMFR3_EL1_S1PIE; break; case SYS_ID_MMFR4_EL1: val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX); @@ -1985,7 +1994,7 @@ static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) * one cache line. */ if (kvm_has_mte(vcpu->kvm)) - clidr |= 2 << CLIDR_TTYPE_SHIFT(loc); + clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc); __vcpu_sys_reg(vcpu, r->reg) = clidr; @@ -2376,7 +2385,19 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64PFR0_EL1_RAS | ID_AA64PFR0_EL1_AdvSIMD | ID_AA64PFR0_EL1_FP), }, - ID_SANITISED(ID_AA64PFR1_EL1), + ID_WRITABLE(ID_AA64PFR1_EL1, ~(ID_AA64PFR1_EL1_PFAR | + ID_AA64PFR1_EL1_DF2 | + ID_AA64PFR1_EL1_MTEX | + ID_AA64PFR1_EL1_THE | + ID_AA64PFR1_EL1_GCS | + ID_AA64PFR1_EL1_MTE_frac | + ID_AA64PFR1_EL1_NMI | + ID_AA64PFR1_EL1_RNDR_trap | + ID_AA64PFR1_EL1_SME | + ID_AA64PFR1_EL1_RES0 | + ID_AA64PFR1_EL1_MPAM_frac | + ID_AA64PFR1_EL1_RAS_frac | + ID_AA64PFR1_EL1_MTE)), ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR), ID_UNALLOCATED(4,3), ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), @@ -2390,7 +2411,21 @@ static const struct sys_reg_desc sys_reg_descs[] = { .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, .reset = read_sanitised_id_aa64dfr0_el1, - .val = ID_AA64DFR0_EL1_PMUVer_MASK | + /* + * Prior to FEAT_Debugv8.9, the architecture defines context-aware + * breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs). + * KVM does not trap + emulate the breakpoint registers, and as such + * cannot support a layout that misaligns with the underlying hardware. + * While it may be possible to describe a subset that aligns with + * hardware, just prevent changes to BRPs and CTX_CMPs altogether for + * simplicity. + * + * See DDI0487K.a, section D2.8.3 Breakpoint types and linking + * of breakpoints for more details. + */ + .val = ID_AA64DFR0_EL1_DoubleLock_MASK | + ID_AA64DFR0_EL1_WRPs_MASK | + ID_AA64DFR0_EL1_PMUVer_MASK | ID_AA64DFR0_EL1_DebugVer_MASK, }, ID_SANITISED(ID_AA64DFR1_EL1), ID_UNALLOCATED(5,2), @@ -2433,6 +2468,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64MMFR2_EL1_NV | ID_AA64MMFR2_EL1_CCIDX)), ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX | + ID_AA64MMFR3_EL1_S1PIE | ID_AA64MMFR3_EL1_S1POE)), ID_SANITISED(ID_AA64MMFR4_EL1), ID_UNALLOCATED(7,5), @@ -2903,7 +2939,7 @@ static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the * corresponding VMIDs. */ - kvm_nested_s2_unmap(vcpu->kvm); + kvm_nested_s2_unmap(vcpu->kvm, true); write_unlock(&vcpu->kvm->mmu_lock); @@ -2955,7 +2991,30 @@ union tlbi_info { static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu, const union tlbi_info *info) { - kvm_stage2_unmap_range(mmu, info->range.start, info->range.size); + /* + * The unmap operation is allowed to drop the MMU lock and block, which + * means that @mmu could be used for a different context than the one + * currently being invalidated. + * + * This behavior is still safe, as: + * + * 1) The vCPU(s) that recycled the MMU are responsible for invalidating + * the entire MMU before reusing it, which still honors the intent + * of a TLBI. + * + * 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC + * and ERET to the guest), other vCPUs are allowed to use stale + * translations. + * + * 3) Accidentally unmapping an unrelated MMU context is nonfatal, and + * at worst may cause more aborts for shadow stage-2 fills. + * + * Dropping the MMU lock also implies that shadow stage-2 fills could + * happen behind the back of the TLBI. This is still safe, though, as + * the L1 needs to put its stage-2 in a consistent state before doing + * the TLBI. + */ + kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true); } static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, @@ -3050,7 +3109,11 @@ static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu, max_size = compute_tlb_inval_range(mmu, info->ipa.addr); base_addr &= ~(max_size - 1); - kvm_stage2_unmap_range(mmu, base_addr, max_size); + /* + * See comment in s2_mmu_unmap_range() for why this is allowed to + * reschedule. + */ + kvm_stage2_unmap_range(mmu, base_addr, max_size, true); } static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index e7c53e8af3d1..48c952563e85 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -417,8 +417,28 @@ static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) kfree(vgic_cpu->private_irqs); vgic_cpu->private_irqs = NULL; - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { + /* + * If this vCPU is being destroyed because of a failed creation + * then unregister the redistributor to avoid leaving behind a + * dangling pointer to the vCPU struct. + * + * vCPUs that have been successfully created (i.e. added to + * kvm->vcpu_array) get unregistered in kvm_vgic_destroy(), as + * this function gets called while holding kvm->arch.config_lock + * in the VM teardown path and would otherwise introduce a lock + * inversion w.r.t. kvm->srcu. + * + * vCPUs that failed creation are torn down outside of the + * kvm->arch.config_lock and do not get unregistered in + * kvm_vgic_destroy(), meaning it is both safe and necessary to + * do so here. + */ + if (kvm_get_vcpu_by_id(vcpu->kvm, vcpu->vcpu_id) != vcpu) + vgic_unregister_redist_iodev(vcpu); + vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; + } } void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) @@ -524,22 +544,31 @@ int kvm_vgic_map_resources(struct kvm *kvm) if (ret) goto out; - dist->ready = true; dist_base = dist->vgic_dist_base; mutex_unlock(&kvm->arch.config_lock); ret = vgic_register_dist_iodev(kvm, dist_base, type); - if (ret) + if (ret) { kvm_err("Unable to register VGIC dist MMIO regions\n"); + goto out_slots; + } + /* + * kvm_io_bus_register_dev() guarantees all readers see the new MMIO + * registration before returning through synchronize_srcu(), which also + * implies a full memory barrier. As such, marking the distributor as + * 'ready' here is guaranteed to be ordered after all vCPUs having seen + * a completely configured distributor. + */ + dist->ready = true; goto out_slots; out: mutex_unlock(&kvm->arch.config_lock); out_slots: - mutex_unlock(&kvm->slots_lock); - if (ret) - kvm_vgic_destroy(kvm); + kvm_vm_dead(kvm); + + mutex_unlock(&kvm->slots_lock); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 1d26bb5b02f4..5f4f57aaa23e 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -236,7 +236,12 @@ static int vgic_set_common_attr(struct kvm_device *dev, mutex_lock(&dev->kvm->arch.config_lock); - if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) + /* + * Either userspace has already configured NR_IRQS or + * the vgic has already been initialized and vgic_init() + * supplied a default amount of SPIs. + */ + if (dev->kvm->arch.vgic.nr_spis) ret = -EBUSY; else dev->kvm->arch.vgic.nr_spis = diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 8bbd0b20136a..5db82bfc9dc1 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2220,7 +2220,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); if (flags & BPF_TRAMP_F_CALL_ORIG) { - emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); + /* for the first pass, assume the worst case */ + if (!ctx->image) + ctx->idx += 4; + else + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_enter, ctx); } @@ -2264,7 +2268,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->ro_image + ctx->idx; - emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); + /* for the first pass, assume the worst case */ + if (!ctx->image) + ctx->idx += 4; + else + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_exit, ctx); } diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h index 6d5846dd075c..7657e016233f 100644 --- a/arch/loongarch/include/asm/bootinfo.h +++ b/arch/loongarch/include/asm/bootinfo.h @@ -26,6 +26,10 @@ struct loongson_board_info { #define NR_WORDS DIV_ROUND_UP(NR_CPUS, BITS_PER_LONG) +/* + * The "core" of cores_per_node and cores_per_package stands for a + * logical core, which means in a SMT system it stands for a thread. + */ struct loongson_system_configuration { int nr_cpus; int nr_nodes; diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h index cd6084f4e153..c6bce5fbff57 100644 --- a/arch/loongarch/include/asm/kasan.h +++ b/arch/loongarch/include/asm/kasan.h @@ -16,7 +16,7 @@ #define XRANGE_SHIFT (48) /* Valid address length */ -#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) +#define XRANGE_SHADOW_SHIFT min(cpu_vabits, VA_BITS) /* Used for taking out the valid address */ #define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0) /* One segment whole address space size */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 26542413a5b0..64ad277e096e 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -250,7 +250,7 @@ #define CSR_ESTAT_IS_WIDTH 15 #define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT) -#define LOONGARCH_CSR_ERA 0x6 /* ERA */ +#define LOONGARCH_CSR_ERA 0x6 /* Exception return address */ #define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */ diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index 4e2d6b7ca2ee..a7b9c9e73593 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -10,6 +10,7 @@ #define __HAVE_ARCH_PMD_ALLOC_ONE #define __HAVE_ARCH_PUD_ALLOC_ONE +#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL #include static inline void pmd_populate_kernel(struct mm_struct *mm, @@ -44,6 +45,16 @@ extern void pagetable_init(void); extern pgd_t *pgd_alloc(struct mm_struct *mm); +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + pte_t *pte = __pte_alloc_one_kernel(mm); + + if (pte) + kernel_pte_init(pte); + + return pte; +} + #define __pte_free_tlb(tlb, pte, address) \ do { \ pagetable_pte_dtor(page_ptdesc(pte)); \ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 9965f52ef65b..20714b73f14c 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -269,6 +269,7 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm extern void pgd_init(void *addr); extern void pud_init(void *addr); extern void pmd_init(void *addr); +extern void kernel_pte_init(void *addr); /* * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that @@ -325,39 +326,17 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) { WRITE_ONCE(*ptep, pteval); - if (pte_val(pteval) & _PAGE_GLOBAL) { - pte_t *buddy = ptep_buddy(ptep); - /* - * Make sure the buddy is global too (if it's !none, - * it better already be global) - */ - if (pte_none(ptep_get(buddy))) { #ifdef CONFIG_SMP - /* - * For SMP, multiple CPUs can race, so we need - * to do this atomically. - */ - __asm__ __volatile__( - __AMOR "$zero, %[global], %[buddy] \n" - : [buddy] "+ZB" (buddy->pte) - : [global] "r" (_PAGE_GLOBAL) - : "memory"); - - DBAR(0b11000); /* o_wrw = 0b11000 */ -#else /* !CONFIG_SMP */ - WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL)); -#endif /* CONFIG_SMP */ - } - } + if (pte_val(pteval) & _PAGE_GLOBAL) + DBAR(0b11000); /* o_wrw = 0b11000 */ +#endif } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - /* Preserve global status for the pair */ - if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL) - set_pte(ptep, __pte(_PAGE_GLOBAL)); - else - set_pte(ptep, __pte(0)); + pte_t pte = ptep_get(ptep); + pte_val(pte) &= _PAGE_GLOBAL; + set_pte(ptep, pte); } #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index f2ff8b5d591e..6e58f65455c7 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -293,13 +293,15 @@ unsigned long stack_top(void) { unsigned long top = TASK_SIZE & PAGE_MASK; - /* Space for the VDSO & data page */ - top -= PAGE_ALIGN(current->thread.vdso->size); - top -= VVAR_SIZE; + if (current->thread.vdso) { + /* Space for the VDSO & data page */ + top -= PAGE_ALIGN(current->thread.vdso->size); + top -= VVAR_SIZE; - /* Space to randomize the VDSO base */ - if (current->flags & PF_RANDOMIZE) - top -= VDSO_RANDOMIZE_SIZE; + /* Space to randomize the VDSO base */ + if (current->flags & PF_RANDOMIZE) + top -= VDSO_RANDOMIZE_SIZE; + } return top; } diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 00e307203ddb..cbd3c09a93c1 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -55,6 +55,7 @@ #define SMBIOS_FREQHIGH_OFFSET 0x17 #define SMBIOS_FREQLOW_MASK 0xFF #define SMBIOS_CORE_PACKAGE_OFFSET 0x23 +#define SMBIOS_THREAD_PACKAGE_OFFSET 0x25 #define LOONGSON_EFI_ENABLE (1 << 3) unsigned long fw_arg0, fw_arg1, fw_arg2; @@ -125,7 +126,7 @@ static void __init parse_cpu_table(const struct dmi_header *dm) cpu_clock_freq = freq_temp * 1000000; loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]); - loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET); + loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET); pr_info("CpuClock = %llu\n", cpu_clock_freq); } diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index f9f4eb00c92e..c57b4134f3e8 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -555,6 +555,9 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs) #else unsigned int *pc; + if (regs->csr_prmd & CSR_PRMD_PIE) + local_irq_enable(); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr); /* @@ -579,6 +582,8 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs) die_if_kernel("Kernel ale access", regs); force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); out: + if (regs->csr_prmd & CSR_PRMD_PIE) + local_irq_disable(); #endif irqentry_exit(regs, state); } diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c index f6fcc52aefae..2c0d852ca536 100644 --- a/arch/loongarch/kernel/vdso.c +++ b/arch/loongarch/kernel/vdso.c @@ -34,7 +34,6 @@ static union { struct loongarch_vdso_data vdata; } loongarch_vdso_data __page_aligned_data; -static struct page *vdso_pages[] = { NULL }; struct vdso_data *vdso_data = generic_vdso_data.data; struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata; struct vdso_rng_data *vdso_rng_data = &loongarch_vdso_data.vdata.rng_data; @@ -85,10 +84,8 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, struct loongarch_vdso_info vdso_info = { .vdso = vdso_start, - .size = PAGE_SIZE, .code_mapping = { .name = "[vdso]", - .pages = vdso_pages, .mremap = vdso_mremap, }, .data_mapping = { @@ -103,11 +100,14 @@ static int __init init_vdso(void) unsigned long i, cpu, pfn; BUG_ON(!PAGE_ALIGNED(vdso_info.vdso)); - BUG_ON(!PAGE_ALIGNED(vdso_info.size)); for_each_possible_cpu(cpu) vdso_pdata[cpu].node = cpu_to_node(cpu); + vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start); + vdso_info.code_mapping.pages = + kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL); + pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso)); for (i = 0; i < vdso_info.size / PAGE_SIZE; i++) vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i); diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 74a4b5c272d6..32dc213374be 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -161,10 +161,11 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) if (kvm_vcpu_is_blocking(vcpu)) { /* - * HRTIMER_MODE_PINNED is suggested since vcpu may run in - * the same physical cpu in next time + * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in + * the same physical cpu in next time, and the timer should run + * in hardirq context even in the PREEMPT_RT case. */ - hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD); } } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 0697b1064251..174734a23d0a 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1457,7 +1457,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.vpid = 0; vcpu->arch.flush_gpa = INVALID_GPA; - hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); vcpu->arch.swtimer.function = kvm_swtimer_wakeup; vcpu->arch.handle_exit = kvm_handle_exit; diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 8a87a482c8f4..188b52bbb254 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -201,7 +201,9 @@ pte_t * __init populate_kernel_pte(unsigned long addr) pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pte) panic("%s: Failed to allocate memory\n", __func__); + pmd_populate_kernel(&init_mm, pmd, pte); + kernel_pte_init(pte); } return pte_offset_kernel(pmd, addr); diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index eb6a29b491a7..3fa69b23ff84 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -116,6 +116,26 @@ void pud_init(void *addr) EXPORT_SYMBOL_GPL(pud_init); #endif +void kernel_pte_init(void *addr) +{ + unsigned long *p, *end; + + p = (unsigned long *)addr; + end = p + PTRS_PER_PTE; + + do { + p[0] = _PAGE_GLOBAL; + p[1] = _PAGE_GLOBAL; + p[2] = _PAGE_GLOBAL; + p[3] = _PAGE_GLOBAL; + p[4] = _PAGE_GLOBAL; + p += 8; + p[-3] = _PAGE_GLOBAL; + p[-2] = _PAGE_GLOBAL; + p[-1] = _PAGE_GLOBAL; + } while (p != end); +} + pmd_t mk_pmd(struct page *page, pgprot_t prot) { pmd_t pmd; diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index e974a4954df8..c371def2302d 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -102,3 +102,4 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, return old; } } +EXPORT_SYMBOL(__cmpxchg_small); diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h index 89b6beeda0b8..663f587dc789 100644 --- a/arch/parisc/include/asm/mman.h +++ b/arch/parisc/include/asm/mman.h @@ -2,6 +2,7 @@ #ifndef __ASM_MMAN_H__ #define __ASM_MMAN_H__ +#include #include /* PARISC cannot allow mdwe as it needs writable stacks */ @@ -11,7 +12,7 @@ static inline bool arch_memory_deny_write_exec_supported(void) } #define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported -static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) +static inline unsigned long arch_calc_vm_flag_bits(struct file *file, unsigned long flags) { /* * The stack on parisc grows upwards, so if userspace requests memory @@ -23,6 +24,6 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) return 0; } -#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) +#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags) #endif /* __ASM_MMAN_H__ */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 811a7130505c..56c5ebe21b99 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -494,6 +494,7 @@ FixupDAR:/* Entry point for dcbx workaround. */ bctr /* jump into table */ 152: mfdar r11 + mtdar r10 mtctr r11 /* restore ctr reg from DAR */ mfspr r11, SPRN_SPRG_THREAD stw r10, DAR(r11) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ba0492f9de65..ad8dc4ccdaab 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4898,6 +4898,18 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, BOOK3S_INTERRUPT_EXTERNAL, 0); else lpcr |= LPCR_MER; + } else { + /* + * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit + * unexpectedly set - for e.g. during NMI handling when all register + * states are synchronized from L0 to L1. L1 needs to inform L0 about + * MER=1 only when there are pending external interrupts. + * In the above if check, MER bit is set if there are pending + * external interrupts. Hence, explicity mask off MER bit + * here as otherwise it may generate spurious interrupts in L2 KVM + * causing an endless loop, which results in L2 guest getting hung. + */ + lpcr &= ~LPCR_MER; } } else if (vcpu->arch.pending_exceptions || vcpu->arch.doorbell_request || diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index 56a1f7ce78d2..d92759c21fae 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -282,6 +282,7 @@ int __init opal_event_init(void) name, NULL); if (rc) { pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start); + kfree(name); continue; } } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 62545946ecf4..f4c570538d55 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -177,7 +177,7 @@ config RISCV select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RETHOOK if !XIP_KERNEL select HAVE_RSEQ - select HAVE_RUST if RUSTC_SUPPORTS_RISCV + select HAVE_RUST if RUSTC_SUPPORTS_RISCV && CC_IS_CLANG select HAVE_SAMPLE_FTRACE_DIRECT select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_STACKPROTECTOR diff --git a/arch/riscv/boot/dts/sophgo/sg2042.dtsi b/arch/riscv/boot/dts/sophgo/sg2042.dtsi index 4e5fa6591623..e62ac51ac55a 100644 --- a/arch/riscv/boot/dts/sophgo/sg2042.dtsi +++ b/arch/riscv/boot/dts/sophgo/sg2042.dtsi @@ -112,7 +112,7 @@ port0a: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; - snps,nr-gpios = <32>; + ngpios = <32>; reg = <0>; interrupt-controller; #interrupt-cells = <2>; @@ -134,7 +134,7 @@ port1a: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; - snps,nr-gpios = <32>; + ngpios = <32>; reg = <0>; interrupt-controller; #interrupt-cells = <2>; @@ -156,7 +156,7 @@ port2a: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; - snps,nr-gpios = <32>; + ngpios = <32>; reg = <0>; interrupt-controller; #interrupt-cells = <2>; diff --git a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi index c7771b3b6475..d6c55f1cc96a 100644 --- a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi +++ b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi @@ -128,7 +128,6 @@ &camss { assigned-clocks = <&ispcrg JH7110_ISPCLK_DOM4_APB_FUNC>, <&ispcrg JH7110_ISPCLK_MIPI_RX0_PXL>; assigned-clock-rates = <49500000>, <198000000>; - status = "okay"; ports { #address-cells = <1>; @@ -151,7 +150,6 @@ camss_from_csi2rx: endpoint { &csi2rx { assigned-clocks = <&ispcrg JH7110_ISPCLK_VIN_SYS>; assigned-clock-rates = <297000000>; - status = "okay"; ports { #address-cells = <1>; diff --git a/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts index b720cdd15ed6..8e39fdc73ecb 100644 --- a/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts +++ b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts @@ -44,8 +44,7 @@ &pcie1 { }; &phy0 { - rx-internal-delay-ps = <1900>; - tx-internal-delay-ps = <1500>; + rx-internal-delay-ps = <1500>; motorcomm,rx-clk-drv-microamp = <2910>; motorcomm,rx-data-drv-microamp = <2910>; motorcomm,tx-clk-adj-enabled; diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile index 8a2739485123..f0da9d7b39c3 100644 --- a/arch/riscv/errata/Makefile +++ b/arch/riscv/errata/Makefile @@ -2,6 +2,12 @@ ifdef CONFIG_RELOCATABLE KBUILD_CFLAGS += -fno-pie endif +ifdef CONFIG_RISCV_ALTERNATIVE_EARLY +ifdef CONFIG_FORTIFY_SOURCE +KBUILD_CFLAGS += -D__NO_FORTIFY +endif +endif + obj-$(CONFIG_ERRATA_ANDES) += andes/ obj-$(CONFIG_ERRATA_SIFIVE) += sifive/ obj-$(CONFIG_ERRATA_THEAD) += thead/ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 7f88cc4931f5..69dc8aaab3fb 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -36,6 +36,11 @@ KASAN_SANITIZE_alternative.o := n KASAN_SANITIZE_cpufeature.o := n KASAN_SANITIZE_sbi_ecall.o := n endif +ifdef CONFIG_FORTIFY_SOURCE +CFLAGS_alternative.o += -D__NO_FORTIFY +CFLAGS_cpufeature.o += -D__NO_FORTIFY +CFLAGS_sbi_ecall.o += -D__NO_FORTIFY +endif endif extra-y += vmlinux.lds diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c index 6e0d333f57e5..2fd29695a788 100644 --- a/arch/riscv/kernel/acpi.c +++ b/arch/riscv/kernel/acpi.c @@ -210,7 +210,7 @@ void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) if (!size) return NULL; - return early_ioremap(phys, size); + return early_memremap(phys, size); } void __init __acpi_unmap_table(void __iomem *map, unsigned long size) @@ -218,7 +218,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size) if (!map || !size) return; - early_iounmap(map, size); + early_memunmap(map, size); } void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index e94180ba432f..c2f3129a8e5c 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -4,8 +4,6 @@ * Copyright (C) 2017 SiFive */ -#define GENERATING_ASM_OFFSETS - #include #include #include diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index b320b1d9aa01..2d40736fc37c 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -80,8 +80,7 @@ int populate_cache_leaves(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf = this_cpu_ci->info_list; - struct device_node *np = of_cpu_device_node_get(cpu); - struct device_node *prev = NULL; + struct device_node *np, *prev; int levels = 1, level = 1; if (!acpi_disabled) { @@ -105,6 +104,10 @@ int populate_cache_leaves(unsigned int cpu) return 0; } + np = of_cpu_device_node_get(cpu); + if (!np) + return -ENOENT; + if (of_property_read_bool(np, "cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level); if (of_property_read_bool(np, "i-cache-size")) diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c index 28b58fc5ad19..a1e38ecfc8be 100644 --- a/arch/riscv/kernel/cpu-hotplug.c +++ b/arch/riscv/kernel/cpu-hotplug.c @@ -58,7 +58,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) if (cpu_ops->cpu_is_stopped) ret = cpu_ops->cpu_is_stopped(cpu); if (ret) - pr_warn("CPU%d may not have stopped: %d\n", cpu, ret); + pr_warn("CPU%u may not have stopped: %d\n", cpu, ret); } /* diff --git a/arch/riscv/kernel/efi-header.S b/arch/riscv/kernel/efi-header.S index 515b2dfbca75..c5f17c2710b5 100644 --- a/arch/riscv/kernel/efi-header.S +++ b/arch/riscv/kernel/efi-header.S @@ -64,7 +64,7 @@ extra_header_fields: .long efi_header_end - _start // SizeOfHeaders .long 0 // CheckSum .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem - .short 0 // DllCharacteristics + .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics .quad 0 // SizeOfStackReserve .quad 0 // SizeOfStackCommit .quad 0 // SizeOfHeapReserve diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile index d5bf1bc7de62..81d69d45c06c 100644 --- a/arch/riscv/kernel/pi/Makefile +++ b/arch/riscv/kernel/pi/Makefile @@ -16,8 +16,12 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS)) KBUILD_CFLAGS += -mcmodel=medany CFLAGS_cmdline_early.o += -D__NO_FORTIFY -CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY CFLAGS_fdt_early.o += -D__NO_FORTIFY +# lib/string.c already defines __NO_FORTIFY +CFLAGS_ctype.o += -D__NO_FORTIFY +CFLAGS_lib-fdt.o += -D__NO_FORTIFY +CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY +CFLAGS_archrandom_early.o += -D__NO_FORTIFY $(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \ --remove-section=.note.gnu.property \ diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index d4fd8af7aaf5..1b9867136b61 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -136,8 +136,6 @@ #define REG_PTR(insn, pos, regs) \ (ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)) -#define GET_RM(insn) (((insn) >> 12) & 7) - #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 960feb1526ca..3f1c4b2d0b06 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -18,6 +18,7 @@ obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o ccflags-y := -fno-stack-protector ccflags-y += -DDISABLE_BRANCH_PROFILING +ccflags-y += -fno-builtin ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y) diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c index 0a1e859323b4..a8085cd8215e 100644 --- a/arch/riscv/kvm/aia_imsic.c +++ b/arch/riscv/kvm/aia_imsic.c @@ -55,7 +55,7 @@ struct imsic { /* IMSIC SW-file */ struct imsic_mrif *swfile; phys_addr_t swfile_pa; - spinlock_t swfile_extirq_lock; + raw_spinlock_t swfile_extirq_lock; }; #define imsic_vs_csr_read(__c) \ @@ -622,7 +622,7 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu) * interruptions between reading topei and updating pending status. */ - spin_lock_irqsave(&imsic->swfile_extirq_lock, flags); + raw_spin_lock_irqsave(&imsic->swfile_extirq_lock, flags); if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) && imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis)) @@ -630,7 +630,7 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu) else kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); - spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags); + raw_spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags); } static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear, @@ -1051,7 +1051,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu) } imsic->swfile = page_to_virt(swfile_page); imsic->swfile_pa = page_to_phys(swfile_page); - spin_lock_init(&imsic->swfile_extirq_lock); + raw_spin_lock_init(&imsic->swfile_extirq_lock); /* Setup IO device */ kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops); diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 99f34409fb60..4cc631fa7039 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -18,6 +18,7 @@ #define RV_MAX_REG_ARGS 8 #define RV_FENTRY_NINSNS 2 #define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4) +#define RV_KCFI_NINSNS (IS_ENABLED(CONFIG_CFI_CLANG) ? 1 : 0) /* imm that allows emit_imm to emit max count insns */ #define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF @@ -271,7 +272,8 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) if (!is_tail_call) emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx); emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA, - is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */ + /* kcfi, fentry and TCC init insns will be skipped on tailcall */ + is_tail_call ? (RV_KCFI_NINSNS + RV_FENTRY_NINSNS + 1) * 4 : 0, ctx); } @@ -548,8 +550,8 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, rv_lr_w(r0, 0, rd, 0, 0), ctx); jmp_offset = ninsns_rvoff(8); emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx); - emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) : - rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx); + emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 1) : + rv_sc_w(RV_REG_T3, rs, rd, 0, 1), ctx); jmp_offset = ninsns_rvoff(-6); emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx); emit(rv_fence(0x3, 0x3), ctx); diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 9b57add02cd5..fb0e9a1d9be2 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -50,7 +50,6 @@ CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_CERT_STORE=y CONFIG_EXPOLINE=y -# CONFIG_EXPOLINE_EXTERN is not set CONFIG_EXPOLINE_AUTO=y CONFIG_CHSC_SCH=y CONFIG_VFIO_CCW=m @@ -95,6 +94,7 @@ CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_BUCKETS=y CONFIG_SLUB_STATS=y # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y @@ -426,6 +426,13 @@ CONFIG_DEVTMPFS_SAFE=y # CONFIG_FW_LOADER is not set CONFIG_CONNECTOR=y CONFIG_ZRAM=y +CONFIG_ZRAM_BACKEND_LZ4=y +CONFIG_ZRAM_BACKEND_LZ4HC=y +CONFIG_ZRAM_BACKEND_ZSTD=y +CONFIG_ZRAM_BACKEND_DEFLATE=y +CONFIG_ZRAM_BACKEND_842=y +CONFIG_ZRAM_BACKEND_LZO=y +CONFIG_ZRAM_DEF_COMP_DEFLATE=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m @@ -486,6 +493,7 @@ CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_PLATFORM_KEYRING=y CONFIG_DM_SWITCH=m CONFIG_DM_INTEGRITY=m CONFIG_DM_VDO=m @@ -535,6 +543,7 @@ CONFIG_NLMON=m CONFIG_MLX4_EN=m CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE_EN=y +# CONFIG_NET_VENDOR_META is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROSEMI is not set @@ -695,6 +704,7 @@ CONFIG_NFSD=m CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_LEGACY_CLIENT_TRACKING is not set CONFIG_CIFS=m CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y @@ -740,7 +750,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index df4addd1834a..88be0a734b60 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -48,7 +48,6 @@ CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_CERT_STORE=y CONFIG_EXPOLINE=y -# CONFIG_EXPOLINE_EXTERN is not set CONFIG_EXPOLINE_AUTO=y CONFIG_CHSC_SCH=y CONFIG_VFIO_CCW=m @@ -89,6 +88,7 @@ CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_BUCKETS=y # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y @@ -416,6 +416,13 @@ CONFIG_DEVTMPFS_SAFE=y # CONFIG_FW_LOADER is not set CONFIG_CONNECTOR=y CONFIG_ZRAM=y +CONFIG_ZRAM_BACKEND_LZ4=y +CONFIG_ZRAM_BACKEND_LZ4HC=y +CONFIG_ZRAM_BACKEND_ZSTD=y +CONFIG_ZRAM_BACKEND_DEFLATE=y +CONFIG_ZRAM_BACKEND_842=y +CONFIG_ZRAM_BACKEND_LZO=y +CONFIG_ZRAM_DEF_COMP_DEFLATE=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m @@ -476,6 +483,7 @@ CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_PLATFORM_KEYRING=y CONFIG_DM_SWITCH=m CONFIG_DM_INTEGRITY=m CONFIG_DM_VDO=m @@ -525,6 +533,7 @@ CONFIG_NLMON=m CONFIG_MLX4_EN=m CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE_EN=y +# CONFIG_NET_VENDOR_META is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROSEMI is not set @@ -682,6 +691,7 @@ CONFIG_NFSD=m CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_V4_SECURITY_LABEL=y +# CONFIG_NFSD_LEGACY_CLIENT_TRACKING is not set CONFIG_CIFS=m CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y @@ -726,7 +736,6 @@ CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_ECRDSA=m -CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m @@ -767,6 +776,7 @@ CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_ZSTD=m CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_JITTERENTROPY_OSR=1 CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 8c2b61363bab..bcbaa069de96 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -49,6 +49,7 @@ CONFIG_ZFCP=y # CONFIG_HVC_IUCV is not set # CONFIG_HW_RANDOM_S390 is not set # CONFIG_HMC_DRV is not set +# CONFIG_S390_UV_UAPI is not set # CONFIG_S390_TAPE is not set # CONFIG_VMCP is not set # CONFIG_MONWRITER is not set diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 0fbc992d7a5e..fc9933a743d6 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -16,8 +16,10 @@ #include #define xlate_dev_mem_ptr xlate_dev_mem_ptr +#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr void *xlate_dev_mem_ptr(phys_addr_t phys); #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define IO_SPACE_LIMIT 0 diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 66200d4a2134..29ee289108c5 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -49,6 +49,7 @@ struct perf_sf_sde_regs { }; #define perf_arch_fetch_caller_regs(regs, __ip) do { \ + (regs)->psw.mask = 0; \ (regs)->psw.addr = (__ip); \ (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \ offsetof(struct stack_frame, back_chain); \ diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 2a32438e09ce..74f73141f9b9 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) vcpu->stat.instruction_diagnose_258++; if (vcpu->run->s.regs.gprs[rx] & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); + rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index e65f597e3044..a688351f4ab5 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -828,6 +828,8 @@ static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, const gfn_t gfn = gpa_to_gfn(gpa); int rc; + if (!gfn_to_memslot(kvm, gfn)) + return PGM_ADDRESSING; if (mode == GACC_STORE) rc = kvm_write_guest_page(kvm, gfn, data, offset, len); else @@ -985,6 +987,8 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, gra += fragment_len; data += fragment_len; } + if (rc > 0) + vcpu->arch.pgm.code = rc; return rc; } diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index b320d12aa049..3fde45a151f2 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -405,11 +405,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, * @len: number of bytes to copy * * Copy @len bytes from @data (kernel space) to @gra (guest real address). - * It is up to the caller to ensure that the entire guest memory range is - * valid memory before calling this function. * Guest low address and key protection are not checked. * - * Returns zero on success or -EFAULT on error. + * Returns zero on success, -EFAULT when copying from @data failed, or + * PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info + * is also stored to allow injecting into the guest (if applicable) using + * kvm_s390_inject_prog_cond(). * * If an error occurs data may have been copied partially to guest memory. */ @@ -428,11 +429,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, * @len: number of bytes to copy * * Copy @len bytes from @gra (guest real address) to @data (kernel space). - * It is up to the caller to ensure that the entire guest memory range is - * valid memory before calling this function. * Guest key protection is not checked. * - * Returns zero on success or -EFAULT on error. + * Returns zero on success, -EFAULT when copying to @data failed, or + * PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info + * is also stored to allow injecting into the guest (if applicable) using + * kvm_s390_inject_prog_cond(). * * If an error occurs data may have been copied partially to kernel space. */ diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index dbe95ec5917e..d4f19d33914c 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -280,18 +280,19 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) goto no_pdev; switch (ccdf->pec) { - case 0x003a: /* Service Action or Error Recovery Successful */ + case 0x002a: /* Error event concerns FMB */ + case 0x002b: + case 0x002c: + break; + case 0x0040: /* Service Action or Error Recovery Failed */ + case 0x003b: + zpci_event_io_failure(pdev, pci_channel_io_perm_failure); + break; + default: /* PCI function left in the error state attempt to recover */ ers_res = zpci_event_attempt_error_recovery(pdev); if (ers_res != PCI_ERS_RESULT_RECOVERED) zpci_event_io_failure(pdev, pci_channel_io_perm_failure); break; - default: - /* - * Mark as frozen not permanently failed because the device - * could be subsequently recovered by the platform. - */ - zpci_event_io_failure(pdev, pci_channel_io_frozen); - break; } pci_dev_put(pdev); no_pdev: diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2852fcd82cbd..16354dfa6d96 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2257,6 +2257,7 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING config ADDRESS_MASKING bool "Linear Address Masking support" depends on X86_64 + depends on COMPILE_TEST || !CPU_MITIGATIONS # wait for LASS help Linear Address Masking (LAM) modifies the checking that is applied to 64-bit linear addresses, allowing software to use of the diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S index d9feadffa972..324686bca368 100644 --- a/arch/x86/entry/entry.S +++ b/arch/x86/entry/entry.S @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include "calling.h" @@ -19,6 +21,9 @@ SYM_FUNC_START(entry_ibpb) movl $PRED_CMD_IBPB, %eax xorl %edx, %edx wrmsr + + /* Make sure IBPB clears return stack preductions too. */ + FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET RET SYM_FUNC_END(entry_ibpb) /* For KVM */ diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index d3a814efbff6..20be5758c2d2 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -871,6 +871,8 @@ SYM_FUNC_START(entry_SYSENTER_32) /* Now ready to switch the cr3 */ SWITCH_TO_USER_CR3 scratch_reg=%eax + /* Clobbers ZF */ + CLEAR_CPU_BUFFERS /* * Restore all flags except IF. (We restore IF separately because @@ -881,7 +883,6 @@ SYM_FUNC_START(entry_SYSENTER_32) BUG_IF_WRONG_CR3 no_user_check=1 popfl popl %eax - CLEAR_CPU_BUFFERS /* * Return back to the vDSO, which will pop ecx and edx. @@ -1144,7 +1145,6 @@ SYM_CODE_START(asm_exc_nmi) /* Not on SYSENTER stack. */ call exc_nmi - CLEAR_CPU_BUFFERS jmp .Lnmi_return .Lnmi_from_sysenter_stack: @@ -1165,6 +1165,7 @@ SYM_CODE_START(asm_exc_nmi) CHECK_AND_APPLY_ESPFIX RESTORE_ALL_NMI cr3_reg=%edi pop=4 + CLEAR_CPU_BUFFERS jmp .Lirq_return #ifdef CONFIG_X86_ESPFIX32 @@ -1206,6 +1207,7 @@ SYM_CODE_START(asm_exc_nmi) * 1 - orig_ax */ lss (1+5+6)*4(%esp), %esp # back to espfix stack + CLEAR_CPU_BUFFERS jmp .Lirq_return #endif SYM_CODE_END(asm_exc_nmi) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 6f3b6aef47ba..d0caac26533f 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -116,7 +116,10 @@ static inline bool amd_gart_present(void) #define amd_nb_num(x) 0 #define amd_nb_has_feature(x) false -#define node_to_amd_nb(x) NULL +static inline struct amd_northbridge *node_to_amd_nb(int node) +{ + return NULL; +} #define amd_gart_present(x) false #endif diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index dd4682857c12..913fd3a7bac6 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -215,7 +215,7 @@ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */ #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */ #define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */ -#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier */ +#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */ #define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */ #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */ @@ -348,6 +348,7 @@ #define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */ #define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */ #define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */ +#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* IBPB clears return address predictor */ #define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ @@ -523,4 +524,5 @@ #define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */ #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ #define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */ +#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index ff5f1ecc7d1e..96b410b1d4e8 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -323,7 +323,16 @@ * Note: Only the memory operand variant of VERW clears the CPU buffers. */ .macro CLEAR_CPU_BUFFERS - ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF +#ifdef CONFIG_X86_64 + ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF +#else + /* + * In 32bit mode, the memory operand must be a %cs reference. The data + * segments may not be usable (vm86 mode), and the stack segment may not + * be flat (ESPFIX32). + */ + ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF +#endif .endm #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/runtime-const.h b/arch/x86/include/asm/runtime-const.h index 24e3a53ca255..6652ebddfd02 100644 --- a/arch/x86/include/asm/runtime-const.h +++ b/arch/x86/include/asm/runtime-const.h @@ -6,7 +6,7 @@ typeof(sym) __ret; \ asm_inline("mov %1,%0\n1:\n" \ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \ - ".long 1b - %c2 - .\n\t" \ + ".long 1b - %c2 - .\n" \ ".popsection" \ :"=r" (__ret) \ :"i" ((unsigned long)0x0123456789abcdefull), \ @@ -20,7 +20,7 @@ typeof(0u+(val)) __ret = (val); \ asm_inline("shrl $12,%k0\n1:\n" \ ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \ - ".long 1b - 1 - .\n\t" \ + ".long 1b - 1 - .\n" \ ".popsection" \ :"+r" (__ret)); \ __ret; }) diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index aef70336d624..92f3664dd933 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -305,9 +305,4 @@ static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled extern void arch_scale_freq_tick(void); #define arch_scale_freq_tick arch_scale_freq_tick -#ifdef CONFIG_ACPI_CPPC_LIB -void init_freq_invariance_cppc(void); -#define arch_init_invariance_cppc init_freq_invariance_cppc -#endif - #endif /* _ASM_X86_TOPOLOGY_H */ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index afce8ee5d7b7..b0a887209400 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -12,6 +12,13 @@ #include #include #include +#include + +/* + * Virtual variable: there's no actual backing store for this, + * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)' + */ +extern unsigned long USER_PTR_MAX; #ifdef CONFIG_ADDRESS_MASKING /* @@ -46,19 +53,24 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, #endif -/* - * The virtual address space space is logically divided into a kernel - * half and a user half. When cast to a signed type, user pointers - * are positive and kernel pointers are negative. - */ -#define valid_user_address(x) ((__force long)(x) >= 0) +#define valid_user_address(x) \ + ((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX)) /* * Masking the user address is an alternative to a conditional * user_access_begin that can avoid the fencing. This only works * for dense accesses starting at the address. */ -#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63))) +static inline void __user *mask_user_address(const void __user *ptr) +{ + unsigned long mask; + asm("cmp %1,%0\n\t" + "sbb %0,%0" + :"=r" (mask) + :"r" (ptr), + "0" (runtime_const_ptr(USER_PTR_MAX))); + return (__force void __user *)(mask | (__force unsigned long)ptr); +} #define masked_user_access_begin(x) ({ \ __auto_type __masked_ptr = (x); \ __masked_ptr = mask_user_address(__masked_ptr); \ @@ -69,23 +81,16 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, * arbitrary values in those bits rather then masking them off. * * Enforce two rules: - * 1. 'ptr' must be in the user half of the address space + * 1. 'ptr' must be in the user part of the address space * 2. 'ptr+size' must not overflow into kernel addresses * - * Note that addresses around the sign change are not valid addresses, - * and will GP-fault even with LAM enabled if the sign bit is set (see - * "CR3.LAM_SUP" that can narrow the canonicality check if we ever - * enable it, but not remove it entirely). - * - * So the "overflow into kernel addresses" does not imply some sudden - * exact boundary at the sign bit, and we can allow a lot of slop on the - * size check. + * Note that we always have at least one guard page between the + * max user address and the non-canonical gap, allowing us to + * ignore small sizes entirely. * * In fact, we could probably remove the size check entirely, since * any kernel accesses will be in increasing address order starting - * at 'ptr', and even if the end might be in kernel space, we'll - * hit the GP faults for non-canonical accesses before we ever get - * there. + * at 'ptr'. * * That's a separate optimization, for now just handle the small * constant case. diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c index 956984054bf3..aab9d0570841 100644 --- a/arch/x86/kernel/acpi/cppc.c +++ b/arch/x86/kernel/acpi/cppc.c @@ -110,7 +110,7 @@ static void amd_set_max_freq_ratio(void) static DEFINE_MUTEX(freq_invariance_lock); -void init_freq_invariance_cppc(void) +static inline void init_freq_invariance_cppc(void) { static bool init_done; @@ -127,6 +127,11 @@ void init_freq_invariance_cppc(void) mutex_unlock(&freq_invariance_lock); } +void acpi_processor_init_invariance_cppc(void) +{ + init_freq_invariance_cppc(); +} + /* * Get the highest performance register value. * @cpu: CPU from which to get highest performance. diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index dc5d3216af24..9fe9972d2071 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -44,6 +44,7 @@ #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 +#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F4 0x16fc #define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4 0x124c #define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4 0x12bc #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 @@ -127,6 +128,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) }, diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 6513c53c9459..c5fb28e6451a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -440,7 +440,19 @@ static int lapic_timer_shutdown(struct clock_event_device *evt) v = apic_read(APIC_LVTT); v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); apic_write(APIC_LVTT, v); - apic_write(APIC_TMICT, 0); + + /* + * Setting APIC_LVT_MASKED (above) should be enough to tell + * the hardware that this timer will never fire. But AMD + * erratum 411 and some Intel CPU behavior circa 2024 say + * otherwise. Time for belt and suspenders programming: mask + * the timer _and_ zero the counter registers: + */ + if (v & APIC_LVT_TIMER_TSCDEADLINE) + wrmsrl(MSR_IA32_TSC_DEADLINE, 0); + else + apic_write(APIC_TMICT, 0); + return 0; } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 015971adadfc..fab5caec0b72 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1202,5 +1202,6 @@ void amd_check_microcode(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return; - on_each_cpu(zenbleed_check_cpu, NULL, 1); + if (cpu_feature_enabled(X86_FEATURE_ZEN2)) + on_each_cpu(zenbleed_check_cpu, NULL, 1); } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d1915427b4ff..47a01d4028f6 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1115,8 +1115,25 @@ static void __init retbleed_select_mitigation(void) case RETBLEED_MITIGATION_IBPB: setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + + /* + * IBPB on entry already obviates the need for + * software-based untraining so clear those in case some + * other mitigation like SRSO has selected them. + */ + setup_clear_cpu_cap(X86_FEATURE_UNRET); + setup_clear_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); mitigate_smt = true; + + /* + * There is no need for RSB filling: entry_ibpb() ensures + * all predictions, including the RSB, are invalidated, + * regardless of IBPB implementation. + */ + setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); + break; case RETBLEED_MITIGATION_STUFF: @@ -2627,6 +2644,14 @@ static void __init srso_select_mitigation(void) if (has_microcode) { setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); srso_mitigation = SRSO_MITIGATION_IBPB; + + /* + * IBPB on entry already obviates the need for + * software-based untraining so clear those in case some + * other mitigation like Retbleed has selected them. + */ + setup_clear_cpu_cap(X86_FEATURE_UNRET); + setup_clear_cpu_cap(X86_FEATURE_RETHUNK); } } else { pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); @@ -2638,6 +2663,13 @@ static void __init srso_select_mitigation(void) if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; + + /* + * There is no need for RSB filling: entry_ibpb() ensures + * all predictions, including the RSB, are invalidated, + * regardless of IBPB implementation. + */ + setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); } } else { pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 07a34d723505..a5f221ea5688 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -69,6 +69,7 @@ #include #include #include +#include #include "cpu.h" @@ -1443,6 +1444,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) boot_cpu_has(X86_FEATURE_HYPERVISOR))) setup_force_cpu_bug(X86_BUG_BHI); + if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET)) + setup_force_cpu_bug(X86_BUG_IBPB_NO_RET); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; @@ -2386,6 +2390,15 @@ void __init arch_cpu_finalize_init(void) alternative_instructions(); if (IS_ENABLED(CONFIG_X86_64)) { + unsigned long USER_PTR_MAX = TASK_SIZE_MAX-1; + + /* + * Enable this when LAM is gated on LASS support + if (cpu_feature_enabled(X86_FEATURE_LAM)) + USER_PTR_MAX = (1ul << 63) - PAGE_SIZE - 1; + */ + runtime_const_init(ptr, USER_PTR_MAX); + /* * Make sure the first 2MB area is not mapped by huge pages * There are typically fixed size MTRRs in there and overlapping diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index f63b051f25a0..31a73715d755 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -584,7 +584,7 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_ native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy); } -static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); +static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size); static int __init save_microcode_in_initrd(void) { @@ -605,7 +605,7 @@ static int __init save_microcode_in_initrd(void) if (!desc.mc) return -EINVAL; - ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); + ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); if (ret > UCODE_UPDATED) return -EINVAL; @@ -613,16 +613,19 @@ static int __init save_microcode_in_initrd(void) } early_initcall(save_microcode_in_initrd); -static inline bool patch_cpus_equivalent(struct ucode_patch *p, struct ucode_patch *n) +static inline bool patch_cpus_equivalent(struct ucode_patch *p, + struct ucode_patch *n, + bool ignore_stepping) { /* Zen and newer hardcode the f/m/s in the patch ID */ if (x86_family(bsp_cpuid_1_eax) >= 0x17) { union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id); union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id); - /* Zap stepping */ - p_cid.stepping = 0; - n_cid.stepping = 0; + if (ignore_stepping) { + p_cid.stepping = 0; + n_cid.stepping = 0; + } return p_cid.full == n_cid.full; } else { @@ -644,13 +647,13 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi WARN_ON_ONCE(!n.patch_id); list_for_each_entry(p, µcode_cache, plist) - if (patch_cpus_equivalent(p, &n)) + if (patch_cpus_equivalent(p, &n, false)) return p; return NULL; } -static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n) +static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n) { /* Zen and newer hardcode the f/m/s in the patch ID */ if (x86_family(bsp_cpuid_1_eax) >= 0x17) { @@ -659,6 +662,9 @@ static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n) zp.ucode_rev = p->patch_id; zn.ucode_rev = n->patch_id; + if (zn.stepping != zp.stepping) + return -1; + return zn.rev > zp.rev; } else { return n->patch_id > p->patch_id; @@ -668,10 +674,14 @@ static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n) static void update_cache(struct ucode_patch *new_patch) { struct ucode_patch *p; + int ret; list_for_each_entry(p, µcode_cache, plist) { - if (patch_cpus_equivalent(p, new_patch)) { - if (!patch_newer(p, new_patch)) { + if (patch_cpus_equivalent(p, new_patch, true)) { + ret = patch_newer(p, new_patch); + if (ret < 0) + continue; + else if (!ret) { /* we already have the latest patch */ kfree(new_patch->data); kfree(new_patch); @@ -944,6 +954,20 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, return UCODE_OK; } +static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size) +{ + enum ucode_state ret; + + /* free old equiv table */ + free_equiv_cpu_table(); + + ret = __load_microcode_amd(family, data, size); + if (ret != UCODE_OK) + cleanup(); + + return ret; +} + static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) { struct cpuinfo_x86 *c; @@ -951,14 +975,9 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz struct ucode_patch *p; enum ucode_state ret; - /* free old equiv table */ - free_equiv_cpu_table(); - - ret = __load_microcode_amd(family, data, size); - if (ret != UCODE_OK) { - cleanup(); + ret = _load_microcode_amd(family, data, size); + if (ret != UCODE_OK) return ret; - } for_each_node(nid) { cpu = cpumask_first(cpumask_of_node(nid)); diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 8591d53c144b..b681c2e07dbf 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -207,7 +207,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r) return false; } -static bool __get_mem_config_intel(struct rdt_resource *r) +static __init bool __get_mem_config_intel(struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_3_eax eax; @@ -241,7 +241,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r) return true; } -static bool __rdt_get_mem_config_amd(struct rdt_resource *r) +static __init bool __rdt_get_mem_config_amd(struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); u32 eax, ebx, ecx, edx, subleaf; diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 50fa1fe9a073..200d89a64027 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -29,10 +29,10 @@ * hardware. The allocated bandwidth percentage is rounded to the next * control step available on the hardware. */ -static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) +static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r) { - unsigned long bw; int ret; + u32 bw; /* * Only linear delay values is supported for current Intel SKUs. @@ -42,16 +42,21 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) return false; } - ret = kstrtoul(buf, 10, &bw); + ret = kstrtou32(buf, 10, &bw); if (ret) { - rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); + rdt_last_cmd_printf("Invalid MB value %s\n", buf); return false; } - if ((bw < r->membw.min_bw || bw > r->default_ctrl) && - !is_mba_sc(r)) { - rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, - r->membw.min_bw, r->default_ctrl); + /* Nothing else to do if software controller is enabled. */ + if (is_mba_sc(r)) { + *data = bw; + return true; + } + + if (bw < r->membw.min_bw || bw > r->default_ctrl) { + rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n", + bw, r->membw.min_bw, r->default_ctrl); return false; } @@ -65,7 +70,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, struct resctrl_staged_config *cfg; u32 closid = data->rdtgrp->closid; struct rdt_resource *r = s->res; - unsigned long bw_val; + u32 bw_val; cfg = &d->staged_config[s->conf_type]; if (cfg->have_new_ctrl) { diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 263f8aed4e2c..21e9e4845354 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -980,6 +981,9 @@ static void __init kvm_init_platform(void) } kvmclock_init(); x86_platform.apic_post_init = kvm_apic_init; + + /* Set WB as the default cache mode for SEV-SNP and TDX */ + mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK); } #if defined(CONFIG_AMD_MEM_ENCRYPT) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d05392db5d0f..2dbadf347b5f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -261,12 +261,6 @@ static noinstr bool handle_bug(struct pt_regs *regs) int ud_type; u32 imm; - /* - * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug() - * is a rare case that uses @regs without passing them to - * irqentry_enter(). - */ - kmsan_unpoison_entry_regs(regs); ud_type = decode_bug(regs->ip, &imm); if (ud_type == BUG_NONE) return handled; @@ -275,6 +269,12 @@ static noinstr bool handle_bug(struct pt_regs *regs) * All lies, just get the WARN/BUG out. */ instrumentation_begin(); + /* + * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug() + * is a rare case that uses @regs without passing them to + * irqentry_enter(). + */ + kmsan_unpoison_entry_regs(regs); /* * Since we're emulating a CALL with exceptions, restore the interrupt * state to what it was at the exception site. diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 6726be89b7a6..b8c5741d2fb4 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -358,6 +358,7 @@ SECTIONS #endif RUNTIME_CONST_VARIABLES + RUNTIME_CONST(ptr, USER_PTR_MAX) . = ALIGN(PAGE_SIZE); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a9a23e058555..8e853a5fc867 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1556,6 +1556,17 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { bool flush = false; + /* + * To prevent races with vCPUs faulting in a gfn using stale data, + * zapping a gfn range must be protected by mmu_invalidate_in_progress + * (and mmu_invalidate_seq). The only exception is memslot deletion; + * in that case, SRCU synchronization ensures that SPTEs are zapped + * after all vCPUs have unlocked SRCU, guaranteeing that vCPUs see the + * invalid slot. + */ + lockdep_assert_once(kvm->mmu_invalidate_in_progress || + lockdep_is_held(&kvm->slots_lock)); + if (kvm_memslots_have_rmaps(kvm)) flush = __kvm_rmap_zap_gfn_range(kvm, range->slot, range->start, range->end, @@ -1884,14 +1895,10 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp) if (is_obsolete_sp((_kvm), (_sp))) { \ } else -#define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ +#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \ for_each_valid_sp(_kvm, _sp, \ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ - if ((_sp)->gfn != (_gfn)) {} else - -#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \ - for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ - if (!sp_has_gptes(_sp)) {} else + if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { @@ -7063,15 +7070,15 @@ static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm, /* * Since accounting information is stored in struct kvm_arch_memory_slot, - * shadow pages deletion (e.g. unaccount_shadowed()) requires that all - * gfns with a shadow page have a corresponding memslot. Do so before - * the memslot goes away. + * all MMU pages that are shadowing guest PTEs must be zapped before the + * memslot is deleted, as freeing such pages after the memslot is freed + * will result in use-after-free, e.g. in unaccount_shadowed(). */ for (i = 0; i < slot->npages; i++) { struct kvm_mmu_page *sp; gfn_t gfn = slot->base_gfn + i; - for_each_gfn_valid_sp(kvm, sp, gfn) + for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index d5314cb7dff4..cf84103ce38b 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -63,8 +63,12 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) u64 pdpte; int ret; + /* + * Note, nCR3 is "assumed" to be 32-byte aligned, i.e. the CPU ignores + * nCR3[4:0] when loading PDPTEs from memory. + */ ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, - offset_in_page(cr3) + index * 8, 8); + (cr3 & GENMASK(11, 5)) + index * 8, 8); if (ret) return 0; return pdpte; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 1a4438358c5e..81ed596e4454 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4888,9 +4888,6 @@ void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx->hv_deadline_tsc = -1; kvm_set_cr8(vcpu, 0); - vmx_segment_cache_clear(vmx); - kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS); - seg_setup(VCPU_SREG_CS); vmcs_write16(GUEST_CS_SELECTOR, 0xf000); vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); @@ -4917,6 +4914,9 @@ void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmcs_writel(GUEST_IDTR_BASE, 0); vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); + vmx_segment_cache_clear(vmx); + kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS); + vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index d066aecf8aeb..4357ec2a0bfc 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -39,8 +39,13 @@ .macro check_range size:req .if IS_ENABLED(CONFIG_X86_64) - mov %rax, %rdx - sar $63, %rdx + movq $0x0123456789abcdef,%rdx + 1: + .pushsection runtime_ptr_USER_PTR_MAX,"a" + .long 1b - 8 - . + .popsection + cmp %rax, %rdx + sbb %rdx, %rdx or %rdx, %rax .else cmp $TASK_SIZE_MAX-\size+1, %eax diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c index 0ce17766c0e5..9a6a943d8e41 100644 --- a/arch/x86/virt/svm/sev.c +++ b/arch/x86/virt/svm/sev.c @@ -173,6 +173,8 @@ static void __init __snp_fixup_e820_tables(u64 pa) e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); e820__range_update_table(e820_table_firmware, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); + if (!memblock_is_region_reserved(pa, PMD_SIZE)) + memblock_reserve(pa, PMD_SIZE); } } diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 2c12ae42dc8b..d6818c6cafda 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1032,6 +1032,10 @@ static u64 xen_do_read_msr(unsigned int msr, int *err) switch (msr) { case MSR_IA32_APICBASE: val &= ~X2APIC_ENABLE; + if (smp_processor_id() == 0) + val |= MSR_IA32_APICBASE_BSP; + else + val &= ~MSR_IA32_APICBASE_BSP; break; } return val; diff --git a/block/blk-map.c b/block/blk-map.c index 0e1167b23934..b5fd1d857461 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -561,57 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio); /* Prepare bio for passthrough IO given ITER_BVEC iter */ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) { - struct request_queue *q = rq->q; - size_t nr_iter = iov_iter_count(iter); - size_t nr_segs = iter->nr_segs; - struct bio_vec *bvecs, *bvprvp = NULL; - const struct queue_limits *lim = &q->limits; - unsigned int nsegs = 0, bytes = 0; + const struct queue_limits *lim = &rq->q->limits; + unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT; + unsigned int nsegs; struct bio *bio; - size_t i; + int ret; - if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) - return -EINVAL; - if (nr_segs > queue_max_segments(q)) + if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes) return -EINVAL; - /* no iovecs to alloc, as we already have a BVEC iterator */ + /* reuse the bvecs from the iterator instead of allocating new ones */ bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); - if (bio == NULL) + if (!bio) return -ENOMEM; - bio_iov_bvec_set(bio, (struct iov_iter *)iter); - blk_rq_bio_prep(rq, bio, nr_segs); - /* loop to perform a bunch of sanity checks */ - bvecs = (struct bio_vec *)iter->bvec; - for (i = 0; i < nr_segs; i++) { - struct bio_vec *bv = &bvecs[i]; - - /* - * If the queue doesn't support SG gaps and adding this - * offset would create a gap, fallback to copy. - */ - if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { - blk_mq_map_bio_put(bio); - return -EREMOTEIO; - } - /* check full condition */ - if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) - goto put_bio; - if (bytes + bv->bv_len > nr_iter) - goto put_bio; - if (bv->bv_offset + bv->bv_len > PAGE_SIZE) - goto put_bio; - - nsegs++; - bytes += bv->bv_len; - bvprvp = bv; + /* check that the data layout matches the hardware restrictions */ + ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes); + if (ret) { + /* if we would have to split the bio, copy instead */ + if (ret > 0) + ret = -EREMOTEIO; + blk_mq_map_bio_put(bio); + return ret; } + + blk_rq_bio_prep(rq, bio, nsegs); return 0; -put_bio: - blk_mq_map_bio_put(bio); - return -EINVAL; } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index 4b2c8e940f59..cf626e061dd7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4310,6 +4310,12 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, /* mark the queue as mq asap */ q->mq_ops = set->ops; + /* + * ->tag_set has to be setup before initialize hctx, which cpuphp + * handler needs it for checking queue mapping + */ + q->tag_set = set; + if (blk_mq_alloc_ctxs(q)) goto err_exit; @@ -4328,8 +4334,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, INIT_WORK(&q->timeout_work, blk_mq_timeout_work); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); - q->tag_set = set; - q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c index 2cfb297d9a62..058f92c4f9d5 100644 --- a/block/blk-rq-qos.c +++ b/block/blk-rq-qos.c @@ -219,8 +219,8 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr, data->got_token = true; smp_wmb(); - list_del_init(&curr->entry); wake_up_process(data->task); + list_del_init_careful(&curr->entry); return 1; } diff --git a/block/elevator.c b/block/elevator.c index 4122026b11f1..9430cde13d1a 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -106,8 +106,7 @@ static struct elevator_type *__elevator_find(const char *name) return NULL; } -static struct elevator_type *elevator_find_get(struct request_queue *q, - const char *name) +static struct elevator_type *elevator_find_get(const char *name) { struct elevator_type *e; @@ -551,7 +550,7 @@ EXPORT_SYMBOL_GPL(elv_unregister); static inline bool elv_support_iosched(struct request_queue *q) { if (!queue_is_mq(q) || - (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))) + (q->tag_set->flags & BLK_MQ_F_NO_SCHED)) return false; return true; } @@ -562,14 +561,14 @@ static inline bool elv_support_iosched(struct request_queue *q) */ static struct elevator_type *elevator_get_default(struct request_queue *q) { - if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) + if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) return NULL; if (q->nr_hw_queues != 1 && !blk_mq_is_shared_tags(q->tag_set->flags)) return NULL; - return elevator_find_get(q, "mq-deadline"); + return elevator_find_get("mq-deadline"); } /* @@ -697,7 +696,7 @@ static int elevator_change(struct request_queue *q, const char *elevator_name) if (q->elevator && elevator_match(q->elevator->type, elevator_name)) return 0; - e = elevator_find_get(q, elevator_name); + e = elevator_find_get(elevator_name); if (!e) return -EINVAL; ret = elevator_switch(q, e); @@ -709,13 +708,21 @@ int elv_iosched_load_module(struct gendisk *disk, const char *buf, size_t count) { char elevator_name[ELV_NAME_MAX]; + struct elevator_type *found; + const char *name; if (!elv_support_iosched(disk->queue)) return -EOPNOTSUPP; strscpy(elevator_name, buf, sizeof(elevator_name)); + name = strstrip(elevator_name); - request_module("%s-iosched", strstrip(elevator_name)); + spin_lock(&elv_list_lock); + found = __elevator_find(name); + spin_unlock(&elv_list_lock); + + if (!found) + request_module("%s-iosched", name); return 0; } diff --git a/crypto/algapi.c b/crypto/algapi.c index 74e2261c184c..004d27e41315 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -373,7 +373,7 @@ void crypto_alg_tested(const char *name, int err) q->cra_flags |= CRYPTO_ALG_DEAD; alg = test->adult; - if (list_empty(&alg->cra_list)) + if (crypto_is_dead(alg)) goto complete; if (err == -ECANCELED) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index ee8da628e9da..2f5f6b52b2d4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1940,7 +1940,7 @@ static int __alg_test_hash(const struct hash_testvec *vecs, atfm = crypto_alloc_ahash(driver, type, mask); if (IS_ERR(atfm)) { if (PTR_ERR(atfm) == -ENOENT) - return -ENOENT; + return 0; pr_err("alg: hash: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(atfm)); return PTR_ERR(atfm); @@ -2706,7 +2706,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_aead(driver, type, mask); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) - return -ENOENT; + return 0; pr_err("alg: aead: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3285,7 +3285,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_skcipher(driver, type, mask); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) - return -ENOENT; + return 0; pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3700,7 +3700,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc, tfm = crypto_alloc_cipher(driver, type, mask); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) - return -ENOENT; + return 0; printk(KERN_ERR "alg: cipher: Failed to load transform for " "%s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -3726,7 +3726,7 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, acomp = crypto_alloc_acomp(driver, type, mask); if (IS_ERR(acomp)) { if (PTR_ERR(acomp) == -ENOENT) - return -ENOENT; + return 0; pr_err("alg: acomp: Failed to load transform for %s: %ld\n", driver, PTR_ERR(acomp)); return PTR_ERR(acomp); @@ -3740,7 +3740,7 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, comp = crypto_alloc_comp(driver, type, mask); if (IS_ERR(comp)) { if (PTR_ERR(comp) == -ENOENT) - return -ENOENT; + return 0; pr_err("alg: comp: Failed to load transform for %s: %ld\n", driver, PTR_ERR(comp)); return PTR_ERR(comp); @@ -3818,7 +3818,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, rng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(rng)) { if (PTR_ERR(rng) == -ENOENT) - return -ENOENT; + return 0; printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(rng)); return PTR_ERR(rng); @@ -3846,12 +3846,11 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr, drng = crypto_alloc_rng(driver, type, mask); if (IS_ERR(drng)) { + kfree_sensitive(buf); if (PTR_ERR(drng) == -ENOENT) - goto out_no_rng; + return 0; printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " "%s\n", driver); -out_no_rng: - kfree_sensitive(buf); return PTR_ERR(drng); } @@ -4095,7 +4094,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, tfm = crypto_alloc_kpp(driver, type, mask); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) - return -ENOENT; + return 0; pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); @@ -4325,7 +4324,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, tfm = crypto_alloc_akcipher(driver, type, mask); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) - return -ENOENT; + return 0; pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); return PTR_ERR(tfm); diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index 6f86f8df30db..8d50981594d1 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -108,6 +108,14 @@ static int reset_pending_show(struct seq_file *s, void *v) return 0; } +static int firewall_irq_counter_show(struct seq_file *s, void *v) +{ + struct ivpu_device *vdev = seq_to_ivpu(s); + + seq_printf(s, "%d\n", atomic_read(&vdev->hw->firewall_irq_counter)); + return 0; +} + static const struct drm_debugfs_info vdev_debugfs_list[] = { {"bo_list", bo_list_show, 0}, {"fw_name", fw_name_show, 0}, @@ -116,6 +124,7 @@ static const struct drm_debugfs_info vdev_debugfs_list[] = { {"last_bootmode", last_bootmode_show, 0}, {"reset_counter", reset_counter_show, 0}, {"reset_pending", reset_pending_show, 0}, + {"firewall_irq_counter", firewall_irq_counter_show, 0}, }; static ssize_t diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c index 27f0fe4d54e0..e69c0613513f 100644 --- a/drivers/accel/ivpu/ivpu_hw.c +++ b/drivers/accel/ivpu/ivpu_hw.c @@ -249,6 +249,7 @@ int ivpu_hw_init(struct ivpu_device *vdev) platform_init(vdev); wa_init(vdev); timeouts_init(vdev); + atomic_set(&vdev->hw->firewall_irq_counter, 0); return 0; } diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index 1c0c98e3afb8..a96a05b2acda 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -52,6 +52,7 @@ struct ivpu_hw_info { int dma_bits; ktime_t d0i3_entry_host_ts; u64 d0i3_entry_vpu_ts; + atomic_t firewall_irq_counter; }; int ivpu_hw_init(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c index dfd2f4a5b526..60b33fc59d96 100644 --- a/drivers/accel/ivpu/ivpu_hw_ip.c +++ b/drivers/accel/ivpu/ivpu_hw_ip.c @@ -1062,7 +1062,10 @@ static void irq_wdt_mss_handler(struct ivpu_device *vdev) static void irq_noc_firewall_handler(struct ivpu_device *vdev) { - ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ"); + atomic_inc(&vdev->hw->firewall_irq_counter); + + ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n", + atomic_read(&vdev->hw->firewall_irq_counter)); } /* Handler for IRQs from NPU core */ diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index 9e8a8cbadf6b..d8bdab69f800 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -496,7 +496,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr nents = sgt->nents; nents_dma = nents; *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans); - for_each_sgtable_sg(sgt, sg, i) { + for_each_sgtable_dma_sg(sgt, sg, i) { *size -= sizeof(*asp); /* Save 1K for possible follow-up transactions. */ if (*size < SZ_1K) { diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index e86e71c1cdd8..c20eb63750f5 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -184,7 +184,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl nents = 0; size = size ? size : PAGE_SIZE; - for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) { + for_each_sgtable_dma_sg(sgt_in, sg, j) { len = sg_dma_len(sg); if (!len) @@ -221,7 +221,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl /* copy relevant sg node and fix page and length */ sgn = sgf; - for_each_sgtable_sg(sgt, sg, j) { + for_each_sgtable_dma_sg(sgt, sg, j) { memcpy(sg, sgn, sizeof(*sg)); if (sgn == sgf) { sg_dma_address(sg) += offf; @@ -301,7 +301,7 @@ static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, * fence. */ dev_addr = req->dev_addr; - for_each_sgtable_sg(slice->sgt, sg, i) { + for_each_sgtable_dma_sg(slice->sgt, sg, i) { slice->reqs[i].cmd = cmd; slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? sg_dma_address(sg) : dev_addr); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 51470208e6da..7773e6b860e7 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -130,6 +130,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = { }, .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, }, + { + /* + * Samsung galaxybook2 ,initial _LID device notification returns + * lid closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "750XED"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, {} }; diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index b73b3aa92f3f..5c0cc7aae872 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -671,10 +671,6 @@ static int pcc_data_alloc(int pcc_ss_id) * ) */ -#ifndef arch_init_invariance_cppc -static inline void arch_init_invariance_cppc(void) { } -#endif - /** * acpi_cppc_processor_probe - Search for per CPU _CPC objects. * @pr: Ptr to acpi_processor containing this CPU's logical ID. @@ -867,7 +863,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) /* Store CPU Logical ID */ cpc_ptr->cpu_id = pr->id; - spin_lock_init(&cpc_ptr->rmw_lock); + raw_spin_lock_init(&cpc_ptr->rmw_lock); /* Parse PSD data for this CPU */ ret = acpi_get_psd(cpc_ptr, handle); @@ -905,8 +901,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) goto out_free; } - arch_init_invariance_cppc(); - kfree(output.pointer); return 0; @@ -1087,6 +1081,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; struct cpc_desc *cpc_desc; + unsigned long flags; size = GET_BIT_WIDTH(reg); @@ -1126,7 +1121,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) return -ENODEV; } - spin_lock(&cpc_desc->rmw_lock); + raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags); switch (size) { case 8: prev_val = readb_relaxed(vaddr); @@ -1141,7 +1136,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) prev_val = readq_relaxed(vaddr); break; default: - spin_unlock(&cpc_desc->rmw_lock); + raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags); return -EFAULT; } val = MASK_VAL_WRITE(reg, prev_val, val); @@ -1174,7 +1169,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) } if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - spin_unlock(&cpc_desc->rmw_lock); + raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags); return ret_val; } @@ -1916,9 +1911,15 @@ unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf) u64 mul, div; if (caps->lowest_freq && caps->nominal_freq) { - mul = caps->nominal_freq - caps->lowest_freq; + /* Avoid special case when nominal_freq is equal to lowest_freq */ + if (caps->lowest_freq == caps->nominal_freq) { + mul = caps->nominal_freq; + div = caps->nominal_perf; + } else { + mul = caps->nominal_freq - caps->lowest_freq; + div = caps->nominal_perf - caps->lowest_perf; + } mul *= KHZ_PER_MHZ; - div = caps->nominal_perf - caps->lowest_perf; offset = caps->nominal_freq * KHZ_PER_MHZ - div64_u64(caps->nominal_perf * mul, div); } else { @@ -1939,11 +1940,17 @@ unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq) { s64 retval, offset = 0; static u64 max_khz; - u64 mul, div; + u64 mul, div; if (caps->lowest_freq && caps->nominal_freq) { - mul = caps->nominal_perf - caps->lowest_perf; - div = caps->nominal_freq - caps->lowest_freq; + /* Avoid special case when nominal_freq is equal to lowest_freq */ + if (caps->lowest_freq == caps->nominal_freq) { + mul = caps->nominal_perf; + div = caps->nominal_freq; + } else { + mul = caps->nominal_perf - caps->lowest_perf; + div = caps->nominal_freq - caps->lowest_freq; + } /* * We don't need to convert to kHz for computing offset and can * directly use nominal_freq and lowest_freq as the div64_u64 diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c index 1cfaa5957ac4..747f83f7114d 100644 --- a/drivers/acpi/prmt.c +++ b/drivers/acpi/prmt.c @@ -52,7 +52,7 @@ struct prm_context_buffer { static LIST_HEAD(prm_module_list); struct prm_handler_info { - guid_t guid; + efi_guid_t guid; efi_status_t (__efiapi *handler_addr)(u64, void *); u64 static_data_buffer_addr; u64 acpi_param_buffer_addr; @@ -72,17 +72,21 @@ struct prm_module_info { struct prm_handler_info handlers[] __counted_by(handler_count); }; -static u64 efi_pa_va_lookup(u64 pa) +static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa) { efi_memory_desc_t *md; u64 pa_offset = pa & ~PAGE_MASK; u64 page = pa & PAGE_MASK; for_each_efi_memory_desc(md) { - if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages) + if ((md->attribute & EFI_MEMORY_RUNTIME) && + (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)) { return pa_offset + md->virt_addr + page - md->phys_addr; + } } + pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa); + return 0; } @@ -148,9 +152,15 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end) th = &tm->handlers[cur_handler]; guid_copy(&th->guid, (guid_t *)handler_info->handler_guid); - th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address); - th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address); - th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address); + th->handler_addr = + (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address); + + th->static_data_buffer_addr = + efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address); + + th->acpi_param_buffer_addr = + efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address); + } while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info))); return 0; @@ -277,6 +287,13 @@ static acpi_status acpi_platformrt_space_handler(u32 function, if (!handler || !module) goto invalid_guid; + if (!handler->handler_addr || + !handler->static_data_buffer_addr || + !handler->acpi_param_buffer_addr) { + buffer->prm_status = PRM_HANDLER_ERROR; + return AE_OK; + } + ACPI_COPY_NAMESEG(context.signature, "PRMC"); context.revision = 0x0; context.reserved = 0x0; diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index cb52dd000b95..3b281bc1e73c 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -237,6 +237,9 @@ static struct notifier_block acpi_processor_notifier_block = { .notifier_call = acpi_processor_notifier, }; +void __weak acpi_processor_init_invariance_cppc(void) +{ } + /* * We keep the driver loaded even when ACPI is not running. * This is needed for the powernow-k8 driver, that works even without @@ -270,6 +273,12 @@ static int __init acpi_processor_driver_init(void) NULL, acpi_soft_cpu_dead); acpi_processor_throttling_init(); + + /* + * Frequency invariance calculations on AMD platforms can't be run until + * after acpi_cppc_processor_probe() has been called for all online CPUs + */ + acpi_processor_init_invariance_cppc(); return 0; err: driver_unregister(&acpi_processor_driver); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 3d74ebe9dbd8..7fe842dae1ec 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -448,73 +448,31 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { }, }, { - /* Asus ExpertBook B1402CBA */ + /* Asus ExpertBook B1402C* */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"), + DMI_MATCH(DMI_BOARD_NAME, "B1402C"), }, }, { - /* Asus ExpertBook B1402CVA */ + /* Asus ExpertBook B1502C* */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"), + DMI_MATCH(DMI_BOARD_NAME, "B1502C"), }, }, { - /* Asus ExpertBook B1502CBA */ + /* Asus ExpertBook B2402 (B2402CBA / B2402FBA / B2402CVA / B2402FVA) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"), + DMI_MATCH(DMI_BOARD_NAME, "B2402"), }, }, { - /* Asus ExpertBook B1502CGA */ + /* Asus ExpertBook B2502 (B2502CBA / B2502FBA / B2502CVA / B2502FVA) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B1502CGA"), - }, - }, - { - /* Asus ExpertBook B1502CVA */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B1502CVA"), - }, - }, - { - /* Asus ExpertBook B2402CBA */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"), - }, - }, - { - /* Asus ExpertBook B2402FBA */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B2402FBA"), - }, - }, - { - /* Asus ExpertBook B2502 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"), - }, - }, - { - /* Asus ExpertBook B2502FBA */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B2502FBA"), - }, - }, - { - /* Asus ExpertBook B2502CVA */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "B2502CVA"), + DMI_MATCH(DMI_BOARD_NAME, "B2502"), }, }, { @@ -532,24 +490,10 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { }, }, { - /* Asus Vivobook Pro N6506MV */ + /* Asus Vivobook Pro N6506M* */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "N6506MV"), - }, - }, - { - /* Asus Vivobook Pro N6506MU */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "N6506MU"), - }, - }, - { - /* Asus Vivobook Pro N6506MJ */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_BOARD_NAME, "N6506MJ"), + DMI_MATCH(DMI_BOARD_NAME, "N6506M"), }, }, { @@ -559,6 +503,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { DMI_MATCH(DMI_BOARD_NAME, "17U70P"), }, }, + { + /* LG Electronics 16T90SP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), + DMI_MATCH(DMI_BOARD_NAME, "16T90SP"), + }, + }, { } }; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 3f0144e7dc80..3b303d4ae37a 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -651,6 +651,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, /* the scmd has an associated qc */ if (!(qc->flags & ATA_QCFLAG_EH)) { /* which hasn't failed yet, timeout */ + set_host_byte(scmd, DID_TIME_OUT); qc->err_mask |= AC_ERR_TIMEOUT; qc->flags |= ATA_QCFLAG_EH; nr_timedout++; @@ -4099,10 +4100,20 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap) WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); - /* Set all devices attached to the port in standby mode */ - ata_for_each_link(link, ap, HOST_FIRST) { - ata_for_each_dev(dev, link, ENABLED) - ata_dev_power_set_standby(dev); + /* + * We will reach this point for all of the PM events: + * PM_EVENT_SUSPEND (if runtime pm, PM_EVENT_AUTO will also be set) + * PM_EVENT_FREEZE, and PM_EVENT_HIBERNATE. + * + * We do not want to perform disk spin down for PM_EVENT_FREEZE. + * (Spin down will be performed by the subsequent PM_EVENT_HIBERNATE.) + */ + if (!(ap->pm_mesg.event & PM_EVENT_FREEZE)) { + /* Set all devices attached to the port in standby mode */ + ata_for_each_link(link, ap, HOST_FIRST) { + ata_for_each_dev(dev, link, ENABLED) + ata_dev_power_set_standby(dev); + } } /* diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 75fcb75d5515..3ebe77566788 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -366,7 +366,7 @@ void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) #ifdef CONFIG_ACPI_CPPC_LIB #include -void topology_init_cpu_capacity_cppc(void) +static inline void topology_init_cpu_capacity_cppc(void) { u64 capacity, capacity_scale = 0; struct cppc_perf_caps perf_caps; @@ -417,6 +417,10 @@ void topology_init_cpu_capacity_cppc(void) exit: free_raw_capacity(); } +void acpi_processor_init_invariance_cppc(void) +{ + topology_init_cpu_capacity_cppc(); +} #endif #ifdef CONFIG_CPU_FREQ diff --git a/drivers/base/core.c b/drivers/base/core.c index a4c853411a6b..048ff98dbdfd 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -2634,7 +2633,6 @@ static const char *dev_uevent_name(const struct kobject *kobj) static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { const struct device *dev = kobj_to_dev(kobj); - struct device_driver *driver; int retval = 0; /* add device node properties if present */ @@ -2663,12 +2661,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name); - /* Synchronize with module_remove_driver() */ - rcu_read_lock(); - driver = READ_ONCE(dev->driver); - if (driver) - add_uevent_var(env, "DRIVER=%s", driver->name); - rcu_read_unlock(); + if (dev->driver) + add_uevent_var(env, "DRIVER=%s", dev->driver->name); /* Add common DT information about the device */ of_device_uevent(dev, env); @@ -2738,8 +2732,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM; + /* Synchronize with really_probe() */ + device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); + device_unlock(dev); if (retval) goto out; @@ -4037,6 +4034,41 @@ int device_for_each_child_reverse(struct device *parent, void *data, } EXPORT_SYMBOL_GPL(device_for_each_child_reverse); +/** + * device_for_each_child_reverse_from - device child iterator in reversed order. + * @parent: parent struct device. + * @from: optional starting point in child list + * @fn: function to be called for each device. + * @data: data for the callback. + * + * Iterate over @parent's child devices, starting at @from, and call @fn + * for each, passing it @data. This helper is identical to + * device_for_each_child_reverse() when @from is NULL. + * + * @fn is checked each iteration. If it returns anything other than 0, + * iteration stop and that value is returned to the caller of + * device_for_each_child_reverse_from(); + */ +int device_for_each_child_reverse_from(struct device *parent, + struct device *from, const void *data, + int (*fn)(struct device *, const void *)) +{ + struct klist_iter i; + struct device *child; + int error = 0; + + if (!parent->p) + return 0; + + klist_iter_init_node(&parent->p->klist_children, &i, + (from ? &from->p->knode_parent : NULL)); + while ((child = prev_device(&i)) && !error) + error = fn(child, data); + klist_iter_exit(&i); + return error; +} +EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from); + /** * device_find_child - device iterator for locating a particular device. * @parent: parent struct device diff --git a/drivers/base/module.c b/drivers/base/module.c index c4eaa1158d54..5bc71bea883a 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -7,7 +7,6 @@ #include #include #include -#include #include "base.h" static char *make_driver_name(const struct device_driver *drv) @@ -102,9 +101,6 @@ void module_remove_driver(const struct device_driver *drv) if (!drv) return; - /* Synchronize with dev_uevent() */ - synchronize_rcu(); - sysfs_remove_link(&drv->p->kobj, "module"); if (drv->owner) diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 8c34ae1cd8d5..cca2fd0a1aed 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -195,6 +195,7 @@ int dev_pm_domain_attach_list(struct device *dev, struct device *pd_dev = NULL; int ret, i, num_pds = 0; bool by_id = true; + size_t size; u32 pd_flags = data ? data->pd_flags : 0; u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 : DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; @@ -217,19 +218,17 @@ int dev_pm_domain_attach_list(struct device *dev, if (num_pds <= 0) return 0; - pds = devm_kzalloc(dev, sizeof(*pds), GFP_KERNEL); + pds = kzalloc(sizeof(*pds), GFP_KERNEL); if (!pds) return -ENOMEM; - pds->pd_devs = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_devs), - GFP_KERNEL); - if (!pds->pd_devs) - return -ENOMEM; - - pds->pd_links = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_links), - GFP_KERNEL); - if (!pds->pd_links) - return -ENOMEM; + size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links); + pds->pd_devs = kcalloc(num_pds, size, GFP_KERNEL); + if (!pds->pd_devs) { + ret = -ENOMEM; + goto free_pds; + } + pds->pd_links = (void *)(pds->pd_devs + num_pds); if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON) link_flags |= DL_FLAG_RPM_ACTIVE; @@ -272,6 +271,9 @@ int dev_pm_domain_attach_list(struct device *dev, device_link_del(pds->pd_links[i]); dev_pm_domain_detach(pds->pd_devs[i], true); } + kfree(pds->pd_devs); +free_pds: + kfree(pds); return ret; } EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list); @@ -363,6 +365,9 @@ void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) device_link_del(list->pd_links[i]); dev_pm_domain_detach(list->pd_devs[i], true); } + + kfree(list->pd_devs); + kfree(list); } EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list); diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 2a05d955e30b..e21492981f7d 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1364,7 +1364,6 @@ extern struct bio_set drbd_io_bio_set; extern struct mutex resources_mutex; -extern int conn_lowest_minor(struct drbd_connection *connection); extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor); extern void drbd_destroy_device(struct kref *kref); extern void drbd_delete_device(struct drbd_device *device); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 0d74d75260ef..5bbd312c3e14 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -471,20 +471,6 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait) wait_for_completion(&thi->stop); } -int conn_lowest_minor(struct drbd_connection *connection) -{ - struct drbd_peer_device *peer_device; - int vnr = 0, minor = -1; - - rcu_read_lock(); - peer_device = idr_get_next(&connection->peer_devices, &vnr); - if (peer_device) - minor = device_to_minor(peer_device->device); - rcu_read_unlock(); - - return minor; -} - #ifdef CONFIG_SMP /* * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index a6c8e5cc6051..6ba2c1dd1d87 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2380,10 +2380,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) * TODO: provide forward progress for RECOVERY handler, so that * unprivileged device can benefit from it */ - if (info.flags & UBLK_F_UNPRIVILEGED_DEV) + if (info.flags & UBLK_F_UNPRIVILEGED_DEV) { info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE | UBLK_F_USER_RECOVERY); + /* + * For USER_COPY, we depends on userspace to fill request + * buffer by pwrite() to ublk char device, which can't be + * used for unprivileged device + */ + if (info.flags & UBLK_F_USER_COPY) + return -EINVAL; + } + /* the created device is always owned by current user */ ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index f23c8801ad5c..e9534fbc92e3 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1345,10 +1345,15 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) if (!urb) return -ENOMEM; - /* Use maximum HCI Event size so the USB stack handles - * ZPL/short-transfer automatically. - */ - size = HCI_MAX_EVENT_SIZE; + if (le16_to_cpu(data->udev->descriptor.idVendor) == 0x0a12 && + le16_to_cpu(data->udev->descriptor.idProduct) == 0x0001) + /* Fake CSR devices don't seem to support sort-transter */ + size = le16_to_cpu(data->intr_ep->wMaxPacketSize); + else + /* Use maximum HCI Event size so the USB stack handles + * ZPL/short-transfer automatically. + */ + size = HCI_MAX_EVENT_SIZE; buf = kmalloc(size, mem_flags); if (!buf) { @@ -4041,8 +4046,10 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) BT_DBG("intf %p", intf); - /* Don't suspend if there are connections */ - if (hci_conn_count(data->hdev)) + /* Don't auto-suspend if there are connections; external suspend calls + * shall never fail. + */ + if (PMSG_IS_AUTO(message) && hci_conn_count(data->hdev)) return -EBUSY; if (data->suspend_count++) diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 9b0f37d4b9d4..6a99a459b80b 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2313,7 +2313,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, return -EINVAL; /* Prevent arg from speculatively bypassing the length check */ - barrier_nospec(); + arg = array_index_nospec(arg, cdi->capacity); info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 854546000c92..7df7abaf3e52 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -525,10 +525,6 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); - /* Give back zero bytes, as TPM chip has not yet fully resumed: */ - if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) - return 0; - return tpm_get_random(chip, data, max); } @@ -674,6 +670,16 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); */ void tpm_chip_unregister(struct tpm_chip *chip) { +#ifdef CONFIG_TCG_TPM2_HMAC + int rc; + + rc = tpm_try_get_ops(chip); + if (!rc) { + tpm2_end_auth_session(chip); + tpm_put_ops(chip); + } +#endif + tpm_del_legacy_sysfs(chip); if (tpm_is_hwrng_enabled(chip)) hwrng_unregister(&chip->hwrng); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index c3fbbf4d3db7..48ff87444f85 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -27,6 +27,9 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, struct tpm_header *header = (void *)buf; ssize_t ret, len; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + tpm2_end_auth_session(chip); + ret = tpm2_prepare_space(chip, space, buf, bufsiz); /* If the command is not implemented by the TPM, synthesize a * response with a TPM2_RC_COMMAND_CODE return for user-space. diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 5da134f12c9a..b1daa0d7b341 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -370,6 +370,13 @@ int tpm_pm_suspend(struct device *dev) if (!chip) return -ENODEV; + rc = tpm_try_get_ops(chip); + if (rc) { + /* Can be safely set out of locks, as no action cannot race: */ + chip->flags |= TPM_CHIP_FLAG_SUSPENDED; + goto out; + } + if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) goto suspended; @@ -377,19 +384,19 @@ int tpm_pm_suspend(struct device *dev) !pm_suspend_via_firmware()) goto suspended; - rc = tpm_try_get_ops(chip); - if (!rc) { - if (chip->flags & TPM_CHIP_FLAG_TPM2) - tpm2_shutdown(chip, TPM2_SU_STATE); - else - rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); - - tpm_put_ops(chip); + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + tpm2_end_auth_session(chip); + tpm2_shutdown(chip, TPM2_SU_STATE); + goto suspended; } + rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); + suspended: chip->flags |= TPM_CHIP_FLAG_SUSPENDED; + tpm_put_ops(chip); +out: if (rc) dev_err(dev, "Ignoring error %d while suspending\n", rc); return 0; @@ -438,11 +445,18 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max) if (!chip) return -ENODEV; + /* Give back zero bytes, as TPM chip has not yet fully resumed: */ + if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) { + rc = 0; + goto out; + } + if (chip->flags & TPM_CHIP_FLAG_TPM2) rc = tpm2_get_random(chip, out, max); else rc = tpm1_get_random(chip, out, max); +out: tpm_put_ops(chip); return rc; } diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index 511c67061728..0739830904b2 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -333,6 +333,9 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, } #ifdef CONFIG_TCG_TPM2_HMAC + /* The first write to /dev/tpm{rm0} will flush the session. */ + attributes |= TPM2_SA_CONTINUE_SESSION; + /* * The Architecture Guide requires us to strip trailing zeros * before computing the HMAC @@ -484,7 +487,8 @@ static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v, sha256_final(&sctx, out); } -static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip) +static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip, + struct tpm2_auth *auth) { struct crypto_kpp *kpp; struct kpp_request *req; @@ -543,7 +547,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip) sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ); sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ); kpp_request_set_input(req, s, EC_PT_SZ*2); - sg_init_one(d, chip->auth->salt, EC_PT_SZ); + sg_init_one(d, auth->salt, EC_PT_SZ); kpp_request_set_output(req, d, EC_PT_SZ); crypto_kpp_compute_shared_secret(req); kpp_request_free(req); @@ -554,8 +558,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip) * This works because KDFe fully consumes the secret before it * writes the salt */ - tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x, - chip->auth->salt); + tpm2_KDFe(auth->salt, "SECRET", x, chip->null_ec_key_x, auth->salt); out: crypto_free_kpp(kpp); @@ -853,7 +856,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, if (rc) /* manually close the session if it wasn't consumed */ tpm2_flush_context(chip, auth->handle); - memzero_explicit(auth, sizeof(*auth)); + + kfree_sensitive(auth); + chip->auth = NULL; } else { /* reset for next use */ auth->session = TPM_HEADER_SIZE; @@ -881,7 +886,8 @@ void tpm2_end_auth_session(struct tpm_chip *chip) return; tpm2_flush_context(chip, auth->handle); - memzero_explicit(auth, sizeof(*auth)); + kfree_sensitive(auth); + chip->auth = NULL; } EXPORT_SYMBOL(tpm2_end_auth_session); @@ -915,33 +921,37 @@ static int tpm2_parse_start_auth_session(struct tpm2_auth *auth, static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key) { - int rc; unsigned int offset = 0; /* dummy offset for null seed context */ u8 name[SHA256_DIGEST_SIZE + 2]; + u32 tmp_null_key; + int rc; rc = tpm2_load_context(chip, chip->null_key_context, &offset, - null_key); - if (rc != -EINVAL) - return rc; + &tmp_null_key); + if (rc != -EINVAL) { + if (!rc) + *null_key = tmp_null_key; + goto err; + } - /* an integrity failure may mean the TPM has been reset */ - dev_err(&chip->dev, "NULL key integrity failure!\n"); - /* check the null name against what we know */ - tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name); - if (memcmp(name, chip->null_key_name, sizeof(name)) == 0) - /* name unchanged, assume transient integrity failure */ - return rc; - /* - * Fatal TPM failure: the NULL seed has actually changed, so - * the TPM must have been illegally reset. All in-kernel TPM - * operations will fail because the NULL primary can't be - * loaded to salt the sessions, but disable the TPM anyway so - * userspace programmes can't be compromised by it. - */ - dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n"); + /* Try to re-create null key, given the integrity failure: */ + rc = tpm2_create_primary(chip, TPM2_RH_NULL, &tmp_null_key, name); + if (rc) + goto err; + + /* Return null key if the name has not been changed: */ + if (!memcmp(name, chip->null_key_name, sizeof(name))) { + *null_key = tmp_null_key; + return 0; + } + + /* Deduce from the name change TPM interference: */ + dev_err(&chip->dev, "null key integrity check failed\n"); + tpm2_flush_context(chip, tmp_null_key); chip->flags |= TPM_CHIP_FLAG_DISABLE; - return rc; +err: + return rc ? -ENODEV : 0; } /** @@ -958,16 +968,20 @@ static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key) */ int tpm2_start_auth_session(struct tpm_chip *chip) { + struct tpm2_auth *auth; struct tpm_buf buf; - struct tpm2_auth *auth = chip->auth; - int rc; u32 null_key; + int rc; - if (!auth) { - dev_warn_once(&chip->dev, "auth session is not active\n"); + if (chip->auth) { + dev_warn_once(&chip->dev, "auth session is active\n"); return 0; } + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) + return -ENOMEM; + rc = tpm2_load_null(chip, &null_key); if (rc) goto out; @@ -988,7 +1002,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip) tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce)); /* append encrypted salt and squirrel away unencrypted in auth */ - tpm_buf_append_salt(&buf, chip); + tpm_buf_append_salt(&buf, chip, auth); /* session type (HMAC, audit or policy) */ tpm_buf_append_u8(&buf, TPM2_SE_HMAC); @@ -1010,10 +1024,13 @@ int tpm2_start_auth_session(struct tpm_chip *chip) tpm_buf_destroy(&buf); - if (rc) - goto out; + if (rc == TPM2_RC_SUCCESS) { + chip->auth = auth; + return 0; + } - out: +out: + kfree_sensitive(auth); return rc; } EXPORT_SYMBOL(tpm2_start_auth_session); @@ -1347,18 +1364,21 @@ static int tpm2_create_null_primary(struct tpm_chip *chip) * * Derive and context save the null primary and allocate memory in the * struct tpm_chip for the authorizations. + * + * Return: + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error */ int tpm2_sessions_init(struct tpm_chip *chip) { int rc; rc = tpm2_create_null_primary(chip); - if (rc) - dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc); - - chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL); - if (!chip->auth) - return -ENOMEM; + if (rc) { + dev_err(&chip->dev, "null key creation failed with %d\n", rc); + return rc; + } return rc; } diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 99a7f2441e70..c62b208b42f1 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2006,25 +2006,27 @@ static int virtcons_probe(struct virtio_device *vdev) multiport = true; } - err = init_vqs(portdev); - if (err < 0) { - dev_err(&vdev->dev, "Error %d initializing vqs\n", err); - goto free_chrdev; - } - spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); INIT_LIST_HEAD(&portdev->list); - virtio_device_ready(portdev->vdev); - INIT_WORK(&portdev->config_work, &config_work_handler); INIT_WORK(&portdev->control_work, &control_work_handler); if (multiport) { spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ovq_lock); + } + err = init_vqs(portdev); + if (err < 0) { + dev_err(&vdev->dev, "Error %d initializing vqs\n", err); + goto free_chrdev; + } + + virtio_device_ready(portdev->vdev); + + if (multiport) { err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); if (err < 0) { dev_err(&vdev->dev, diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c index 41fc8eba3418..aa3ddcfc00eb 100644 --- a/drivers/clk/clk_test.c +++ b/drivers/clk/clk_test.c @@ -473,7 +473,7 @@ clk_multiple_parents_mux_test_init(struct kunit *test) &clk_dummy_rate_ops, 0); ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1; - ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw); + ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[0].hw); if (ret) return ret; @@ -481,7 +481,7 @@ clk_multiple_parents_mux_test_init(struct kunit *test) &clk_dummy_rate_ops, 0); ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2; - ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw); + ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw); if (ret) return ret; @@ -489,23 +489,13 @@ clk_multiple_parents_mux_test_init(struct kunit *test) ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents, &clk_multiple_parents_mux_ops, CLK_SET_RATE_PARENT); - ret = clk_hw_register(NULL, &ctx->hw); + ret = clk_hw_register_kunit(test, NULL, &ctx->hw); if (ret) return ret; return 0; } -static void -clk_multiple_parents_mux_test_exit(struct kunit *test) -{ - struct clk_multiple_parent_ctx *ctx = test->priv; - - clk_hw_unregister(&ctx->hw); - clk_hw_unregister(&ctx->parents_ctx[0].hw); - clk_hw_unregister(&ctx->parents_ctx[1].hw); -} - /* * Test that for a clock with multiple parents, clk_get_parent() * actually returns the current one. @@ -561,18 +551,18 @@ clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test) { struct clk_multiple_parent_ctx *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL); struct clk *parent1, *parent2; unsigned long rate; int ret; kunit_skip(test, "This needs to be fixed in the core."); - parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL); + parent1 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[0].hw, NULL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1); KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1)); - parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + parent2 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2); ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1); @@ -593,10 +583,6 @@ clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test) KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000); - - clk_put(parent2); - clk_put(parent1); - clk_put(clk); } static struct kunit_case clk_multiple_parents_mux_test_cases[] = { @@ -617,7 +603,6 @@ static struct kunit_suite clk_multiple_parents_mux_test_suite = { .name = "clk-multiple-parents-mux-test", .init = clk_multiple_parents_mux_test_init, - .exit = clk_multiple_parents_mux_test_exit, .test_cases = clk_multiple_parents_mux_test_cases, }; @@ -637,29 +622,20 @@ clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test) &clk_dummy_rate_ops, 0); ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE; - ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw); + ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw); if (ret) return ret; ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents, &clk_multiple_parents_mux_ops, CLK_SET_RATE_PARENT); - ret = clk_hw_register(NULL, &ctx->hw); + ret = clk_hw_register_kunit(test, NULL, &ctx->hw); if (ret) return ret; return 0; } -static void -clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test) -{ - struct clk_multiple_parent_ctx *ctx = test->priv; - - clk_hw_unregister(&ctx->hw); - clk_hw_unregister(&ctx->parents_ctx[1].hw); -} - /* * Test that, for a mux whose current parent hasn't been registered yet and is * thus orphan, clk_get_parent() will return NULL. @@ -912,7 +888,7 @@ clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(st { struct clk_multiple_parent_ctx *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL); struct clk *parent; unsigned long rate; int ret; @@ -921,7 +897,7 @@ clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(st clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2); - parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + parent = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); ret = clk_set_parent(clk, parent); @@ -931,9 +907,6 @@ clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(st KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); - - clk_put(parent); - clk_put(clk); } static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = { @@ -961,7 +934,6 @@ static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = { .name = "clk-orphan-transparent-multiple-parent-mux-test", .init = clk_orphan_transparent_multiple_parent_mux_test_init, - .exit = clk_orphan_transparent_multiple_parent_mux_test_exit, .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases, }; @@ -986,7 +958,7 @@ static int clk_single_parent_mux_test_init(struct kunit *test) &clk_dummy_rate_ops, 0); - ret = clk_hw_register(NULL, &ctx->parent_ctx.hw); + ret = clk_hw_register_kunit(test, NULL, &ctx->parent_ctx.hw); if (ret) return ret; @@ -994,7 +966,7 @@ static int clk_single_parent_mux_test_init(struct kunit *test) &clk_dummy_single_parent_ops, CLK_SET_RATE_PARENT); - ret = clk_hw_register(NULL, &ctx->hw); + ret = clk_hw_register_kunit(test, NULL, &ctx->hw); if (ret) return ret; @@ -1060,7 +1032,7 @@ clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test) { struct clk_single_parent_ctx *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL); struct clk *parent; int ret; @@ -1074,8 +1046,6 @@ clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test) ret = clk_set_rate_range(clk, 3000, 4000); KUNIT_EXPECT_LT(test, ret, 0); - - clk_put(clk); } /* @@ -1092,7 +1062,7 @@ clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test) { struct clk_single_parent_ctx *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL); struct clk *parent; int ret; @@ -1106,8 +1076,6 @@ clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test) ret = clk_set_rate_range(parent, 3000, 4000); KUNIT_EXPECT_LT(test, ret, 0); - - clk_put(clk); } /* @@ -1238,7 +1206,6 @@ static struct kunit_suite clk_single_parent_mux_test_suite = { .name = "clk-single-parent-mux-test", .init = clk_single_parent_mux_test_init, - .exit = clk_single_parent_mux_test_exit, .test_cases = clk_single_parent_mux_test_cases, }; diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index f9105443d7db..be9bee6ab65f 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -40,7 +40,7 @@ #define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL]) # define PLL_POST_DIV_SHIFT 8 -# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0) +# define PLL_POST_DIV_MASK(p) GENMASK((p)->width ? (p)->width - 1 : 3, 0) # define PLL_ALPHA_MSB BIT(15) # define PLL_ALPHA_EN BIT(24) # define PLL_ALPHA_MODE BIT(25) diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index 0f578771071f..8ea25aa25dff 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -3123,7 +3123,7 @@ static struct clk_branch gcc_pcie_3_pipe_clk = { static struct clk_branch gcc_pcie_3_pipediv2_clk = { .halt_reg = 0x58060, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52020, .enable_mask = BIT(5), @@ -3248,7 +3248,7 @@ static struct clk_branch gcc_pcie_4_pipe_clk = { static struct clk_branch gcc_pcie_4_pipediv2_clk = { .halt_reg = 0x6b054, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52010, .enable_mask = BIT(27), @@ -3373,7 +3373,7 @@ static struct clk_branch gcc_pcie_5_pipe_clk = { static struct clk_branch gcc_pcie_5_pipediv2_clk = { .halt_reg = 0x2f054, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52018, .enable_mask = BIT(19), @@ -3511,7 +3511,7 @@ static struct clk_branch gcc_pcie_6a_pipe_clk = { static struct clk_branch gcc_pcie_6a_pipediv2_clk = { .halt_reg = 0x31060, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52018, .enable_mask = BIT(28), @@ -3649,7 +3649,7 @@ static struct clk_branch gcc_pcie_6b_pipe_clk = { static struct clk_branch gcc_pcie_6b_pipediv2_clk = { .halt_reg = 0x8d060, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52010, .enable_mask = BIT(28), @@ -6155,7 +6155,7 @@ static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = { .pd = { .name = "gcc_usb3_mp_ss1_phy_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, }; diff --git a/drivers/clk/qcom/videocc-sm8350.c b/drivers/clk/qcom/videocc-sm8350.c index 5bd6fe3e1298..874d4da95ff8 100644 --- a/drivers/clk/qcom/videocc-sm8350.c +++ b/drivers/clk/qcom/videocc-sm8350.c @@ -452,7 +452,7 @@ static struct gdsc mvs0_gdsc = { .pd = { .name = "mvs0_gdsc", }, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE, .pwrsts = PWRSTS_OFF_ON, }; @@ -461,7 +461,7 @@ static struct gdsc mvs1_gdsc = { .pd = { .name = "mvs1_gdsc", }, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE, .pwrsts = PWRSTS_OFF_ON, }; diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index 2fa7253c73b2..88629a9abc9c 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -439,7 +439,7 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list, if (list->id > max) max = list->id; if (list->child && list->child->id > max) - max = list->id; + max = list->child->id; } return max; diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c index 7ba9748c0526..f60f0a0c598d 100644 --- a/drivers/clk/samsung/clk-exynosautov920.c +++ b/drivers/clk/samsung/clk-exynosautov920.c @@ -1155,6 +1155,7 @@ static const struct of_device_id exynosautov920_cmu_of_match[] = { .compatible = "samsung,exynosautov920-cmu-peric0", .data = &peric0_cmu_info, }, + { } }; static struct platform_driver exynosautov920_cmu_driver __refdata = { diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 15e201d5e911..b63863f77c67 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -536,11 +536,16 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy) static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) { - u32 max_limit_perf, min_limit_perf, lowest_perf; + u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf; struct amd_cpudata *cpudata = policy->driver_data; - max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); - min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); + if (cpudata->boost_supported && !policy->boost_enabled) + max_perf = READ_ONCE(cpudata->nominal_perf); + else + max_perf = READ_ONCE(cpudata->highest_perf); + + max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq); + min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq); lowest_perf = READ_ONCE(cpudata->lowest_perf); if (min_limit_perf < lowest_perf) @@ -1201,11 +1206,21 @@ static int amd_pstate_register_driver(int mode) return -EINVAL; cppc_state = mode; + + ret = amd_pstate_enable(true); + if (ret) { + pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n", + ret); + amd_pstate_driver_cleanup(); + return ret; + } + ret = cpufreq_register_driver(current_pstate_driver); if (ret) { amd_pstate_driver_cleanup(); return ret; } + return 0; } @@ -1496,10 +1511,13 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) u64 value; s16 epp; - max_perf = READ_ONCE(cpudata->highest_perf); + if (cpudata->boost_supported && !policy->boost_enabled) + max_perf = READ_ONCE(cpudata->nominal_perf); + else + max_perf = READ_ONCE(cpudata->highest_perf); min_perf = READ_ONCE(cpudata->lowest_perf); - max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); - min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); + max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq); + min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq); if (min_limit_perf < min_perf) min_limit_perf = min_perf; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b0018f371ea3..cd2ac1ba53d2 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1034,7 +1034,7 @@ static void __hybrid_init_cpu_capacity_scaling(void) hybrid_update_cpu_capacity_scaling(); } -static void hybrid_init_cpu_capacity_scaling(void) +static void hybrid_init_cpu_capacity_scaling(bool refresh) { bool disable_itmt = false; @@ -1045,7 +1045,7 @@ static void hybrid_init_cpu_capacity_scaling(void) * scaling has been enabled already and the driver is just changing the * operation mode. */ - if (hybrid_max_perf_cpu) { + if (refresh) { __hybrid_init_cpu_capacity_scaling(); goto unlock; } @@ -1071,6 +1071,18 @@ static void hybrid_init_cpu_capacity_scaling(void) sched_clear_itmt_support(); } +static bool hybrid_clear_max_perf_cpu(void) +{ + bool ret; + + guard(mutex)(&hybrid_capacity_lock); + + ret = !!hybrid_max_perf_cpu; + hybrid_max_perf_cpu = NULL; + + return ret; +} + static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) { u64 cap; @@ -2263,6 +2275,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) } else { cpu->pstate.scaling = perf_ctl_scaling; } + /* + * If the CPU is going online for the first time and it was + * offline initially, asym capacity scaling needs to be updated. + */ + hybrid_update_capacity(cpu); } else { cpu->pstate.scaling = perf_ctl_scaling; cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); @@ -3352,6 +3369,7 @@ static void intel_pstate_driver_cleanup(void) static int intel_pstate_register_driver(struct cpufreq_driver *driver) { + bool refresh_cpu_cap_scaling; int ret; if (driver == &intel_pstate) @@ -3364,6 +3382,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) arch_set_max_freq_ratio(global.turbo_disabled); + refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu(); + intel_pstate_driver = driver; ret = cpufreq_register_driver(intel_pstate_driver); if (ret) { @@ -3373,7 +3393,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) global.min_perf_pct = min_perf_pct_min(); - hybrid_init_cpu_capacity_scaling(); + hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling); return 0; } diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index 8d84ad45571c..f150861ceaf6 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -947,7 +947,7 @@ struct ahash_alg mv_md5_alg = { .base = { .cra_name = "md5", .cra_driver_name = "mv-md5", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1018,7 +1018,7 @@ struct ahash_alg mv_sha1_alg = { .base = { .cra_name = "sha1", .cra_driver_name = "mv-sha1", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1092,7 +1092,7 @@ struct ahash_alg mv_sha256_alg = { .base = { .cra_name = "sha256", .cra_driver_name = "mv-sha256", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1302,7 +1302,7 @@ struct ahash_alg mv_ahmac_md5_alg = { .base = { .cra_name = "hmac(md5)", .cra_driver_name = "mv-hmac-md5", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1373,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = { .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "mv-hmac-sha1", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1444,7 +1444,7 @@ struct ahash_alg mv_ahmac_sha256_alg = { .base = { .cra_name = "hmac(sha256)", .cra_driver_name = "mv-hmac-sha256", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig index 29c192f20082..876469e23f7a 100644 --- a/drivers/cxl/Kconfig +++ b/drivers/cxl/Kconfig @@ -60,6 +60,7 @@ config CXL_ACPI default CXL_BUS select ACPI_TABLE_LIB select ACPI_HMAT + select CXL_PORT help Enable support for host managed device memory (HDM) resources published by a platform's ACPI CXL memory layout description. See diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile index db321f48ba52..2caa90fa4bf2 100644 --- a/drivers/cxl/Makefile +++ b/drivers/cxl/Makefile @@ -1,13 +1,21 @@ # SPDX-License-Identifier: GPL-2.0 + +# Order is important here for the built-in case: +# - 'core' first for fundamental init +# - 'port' before platform root drivers like 'acpi' so that CXL-root ports +# are immediately enabled +# - 'mem' and 'pmem' before endpoint drivers so that memdevs are +# immediately enabled +# - 'pci' last, also mirrors the hardware enumeration hierarchy obj-y += core/ -obj-$(CONFIG_CXL_PCI) += cxl_pci.o -obj-$(CONFIG_CXL_MEM) += cxl_mem.o +obj-$(CONFIG_CXL_PORT) += cxl_port.o obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o -obj-$(CONFIG_CXL_PORT) += cxl_port.o +obj-$(CONFIG_CXL_MEM) += cxl_mem.o +obj-$(CONFIG_CXL_PCI) += cxl_pci.o -cxl_mem-y := mem.o -cxl_pci-y := pci.o +cxl_port-y := port.o cxl_acpi-y := acpi.o cxl_pmem-y := pmem.o security.o -cxl_port-y := port.o +cxl_mem-y := mem.o +cxl_pci-y := pci.o diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 82b78e331d8e..432b7cfd12a8 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -924,6 +924,13 @@ static void __exit cxl_acpi_exit(void) /* load before dax_hmem sees 'Soft Reserved' CXL ranges */ subsys_initcall(cxl_acpi_init); + +/* + * Arrange for host-bridge ports to be active synchronous with + * cxl_acpi_probe() exit. + */ +MODULE_SOFTDEP("pre: cxl_port"); + module_exit(cxl_acpi_exit); MODULE_DESCRIPTION("CXL ACPI: Platform Support"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index ef1621d40f05..e9cd7939c407 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -641,6 +641,9 @@ static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr, void *ptr; int rc; + if (!dev_is_pci(cxlds->dev)) + return -ENODEV; + if (cxlds->rcd) return -ENODEV; diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 3df10517a327..223c273c0cd1 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -712,7 +712,44 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld) return 0; } -static int cxl_decoder_reset(struct cxl_decoder *cxld) +static int commit_reap(struct device *dev, const void *data) +{ + struct cxl_port *port = to_cxl_port(dev->parent); + struct cxl_decoder *cxld; + + if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + if (port->commit_end == cxld->id && + ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { + port->commit_end--; + dev_dbg(&port->dev, "reap: %s commit_end: %d\n", + dev_name(&cxld->dev), port->commit_end); + } + + return 0; +} + +void cxl_port_commit_reap(struct cxl_decoder *cxld) +{ + struct cxl_port *port = to_cxl_port(cxld->dev.parent); + + lockdep_assert_held_write(&cxl_region_rwsem); + + /* + * Once the highest committed decoder is disabled, free any other + * decoders that were pinned allocated by out-of-order release. + */ + port->commit_end--; + dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev), + port->commit_end); + device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL, + commit_reap); +} +EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, CXL); + +static void cxl_decoder_reset(struct cxl_decoder *cxld) { struct cxl_port *port = to_cxl_port(cxld->dev.parent); struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); @@ -721,14 +758,14 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld) u32 ctrl; if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) - return 0; + return; - if (port->commit_end != id) { + if (port->commit_end == id) + cxl_port_commit_reap(cxld); + else dev_dbg(&port->dev, "%s: out of order reset, expected decoder%d.%d\n", dev_name(&cxld->dev), port->id, port->commit_end); - return -EBUSY; - } down_read(&cxl_dpa_rwsem); ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); @@ -741,7 +778,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld) writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); up_read(&cxl_dpa_rwsem); - port->commit_end--; cxld->flags &= ~CXL_DECODER_F_ENABLE; /* Userspace is now responsible for reconfiguring this decoder */ @@ -751,8 +787,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld) cxled = to_cxl_endpoint_decoder(&cxld->dev); cxled->state = CXL_DECODER_STATE_MANUAL; } - - return 0; } static int cxl_setup_hdm_decoder_from_dvsec( diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index e666ec6a9085..af92c67bc954 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -2084,11 +2084,18 @@ static void cxl_bus_remove(struct device *dev) static struct workqueue_struct *cxl_bus_wq; +static int cxl_rescan_attach(struct device *dev, void *data) +{ + int rc = device_attach(dev); + + dev_vdbg(dev, "rescan: %s\n", rc ? "attach" : "detached"); + + return 0; +} + static void cxl_bus_rescan_queue(struct work_struct *w) { - int rc = bus_rescan_devices(&cxl_bus_type); - - pr_debug("CXL bus rescan result: %d\n", rc); + bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_rescan_attach); } void cxl_bus_rescan(void) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index e701e4b04032..dff618c708dc 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -232,8 +232,8 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) "Bypassing cpu_cache_invalidate_memregion() for testing!\n"); return 0; } else { - dev_err(&cxlr->dev, - "Failed to synchronize CPU cache state\n"); + dev_WARN(&cxlr->dev, + "Failed to synchronize CPU cache state\n"); return -ENXIO; } } @@ -242,19 +242,17 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) return 0; } -static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) +static void cxl_region_decode_reset(struct cxl_region *cxlr, int count) { struct cxl_region_params *p = &cxlr->params; - int i, rc = 0; + int i; /* - * Before region teardown attempt to flush, and if the flush - * fails cancel the region teardown for data consistency - * concerns + * Before region teardown attempt to flush, evict any data cached for + * this region, or scream loudly about missing arch / platform support + * for CXL teardown. */ - rc = cxl_region_invalidate_memregion(cxlr); - if (rc) - return rc; + cxl_region_invalidate_memregion(cxlr); for (i = count - 1; i >= 0; i--) { struct cxl_endpoint_decoder *cxled = p->targets[i]; @@ -277,23 +275,17 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) cxl_rr = cxl_rr_load(iter, cxlr); cxld = cxl_rr->decoder; if (cxld->reset) - rc = cxld->reset(cxld); - if (rc) - return rc; + cxld->reset(cxld); set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); } endpoint_reset: - rc = cxled->cxld.reset(&cxled->cxld); - if (rc) - return rc; + cxled->cxld.reset(&cxled->cxld); set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); } /* all decoders associated with this region have been torn down */ clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); - - return 0; } static int commit_decoder(struct cxl_decoder *cxld) @@ -409,16 +401,8 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr, * still pending. */ if (p->state == CXL_CONFIG_RESET_PENDING) { - rc = cxl_region_decode_reset(cxlr, p->interleave_ways); - /* - * Revert to committed since there may still be active - * decoders associated with this region, or move forward - * to active to mark the reset successful - */ - if (rc) - p->state = CXL_CONFIG_COMMIT; - else - p->state = CXL_CONFIG_ACTIVE; + cxl_region_decode_reset(cxlr, p->interleave_ways); + p->state = CXL_CONFIG_ACTIVE; } } @@ -794,26 +778,50 @@ static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos) return rc; } +static int check_commit_order(struct device *dev, const void *data) +{ + struct cxl_decoder *cxld = to_cxl_decoder(dev); + + /* + * if port->commit_end is not the only free decoder, then out of + * order shutdown has occurred, block further allocations until + * that is resolved + */ + if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) + return -EBUSY; + return 0; +} + static int match_free_decoder(struct device *dev, void *data) { + struct cxl_port *port = to_cxl_port(dev->parent); struct cxl_decoder *cxld; - int *id = data; + int rc; if (!is_switch_decoder(dev)) return 0; cxld = to_cxl_decoder(dev); - /* enforce ordered allocation */ - if (cxld->id != *id) + if (cxld->id != port->commit_end + 1) return 0; - if (!cxld->region) - return 1; + if (cxld->region) { + dev_dbg(dev->parent, + "next decoder to commit (%s) is already reserved (%s)\n", + dev_name(dev), dev_name(&cxld->region->dev)); + return 0; + } - (*id)++; - - return 0; + rc = device_for_each_child_reverse_from(dev->parent, dev, NULL, + check_commit_order); + if (rc) { + dev_dbg(dev->parent, + "unable to allocate %s due to out of order shutdown\n", + dev_name(dev)); + return 0; + } + return 1; } static int match_auto_decoder(struct device *dev, void *data) @@ -840,7 +848,6 @@ cxl_region_find_decoder(struct cxl_port *port, struct cxl_region *cxlr) { struct device *dev; - int id = 0; if (port == cxled_to_port(cxled)) return &cxled->cxld; @@ -849,7 +856,7 @@ cxl_region_find_decoder(struct cxl_port *port, dev = device_find_child(&port->dev, &cxlr->params, match_auto_decoder); else - dev = device_find_child(&port->dev, &id, match_free_decoder); + dev = device_find_child(&port->dev, NULL, match_free_decoder); if (!dev) return NULL; /* @@ -2054,13 +2061,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) get_device(&cxlr->dev); if (p->state > CXL_CONFIG_ACTIVE) { - /* - * TODO: tear down all impacted regions if a device is - * removed out of order - */ - rc = cxl_region_decode_reset(cxlr, p->interleave_ways); - if (rc) - goto out; + cxl_region_decode_reset(cxlr, p->interleave_ways); p->state = CXL_CONFIG_ACTIVE; } diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 8672b42ee4d1..8389a94adb1a 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -279,7 +279,7 @@ TRACE_EVENT(cxl_generic_event, #define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00 #define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01 #define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02 -#define show_mem_event_type(type) __print_symbolic(type, \ +#define show_gmer_mem_event_type(type) __print_symbolic(type, \ { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \ { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \ { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \ @@ -373,7 +373,7 @@ TRACE_EVENT(cxl_general_media, "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), - show_mem_event_type(__entry->type), + show_gmer_mem_event_type(__entry->type), show_trans_type(__entry->transaction_type), __entry->channel, __entry->rank, __entry->device, __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), @@ -391,6 +391,17 @@ TRACE_EVENT(cxl_general_media, * DRAM Event Record defines many fields the same as the General Media Event * Record. Reuse those definitions as appropriate. */ +#define CXL_DER_MEM_EVT_TYPE_ECC_ERROR 0x00 +#define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01 +#define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02 +#define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03 +#define show_dram_mem_event_type(type) __print_symbolic(type, \ + { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \ + { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \ + { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \ + { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \ +) + #define CXL_DER_VALID_CHANNEL BIT(0) #define CXL_DER_VALID_RANK BIT(1) #define CXL_DER_VALID_NIBBLE BIT(2) @@ -477,7 +488,7 @@ TRACE_EVENT(cxl_dram, "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), - show_mem_event_type(__entry->type), + show_dram_mem_event_type(__entry->type), show_trans_type(__entry->transaction_type), __entry->channel, __entry->rank, __entry->nibble_mask, __entry->bank_group, __entry->bank, diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 0d8b810a51f0..5406e3ab3d4a 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -359,7 +359,7 @@ struct cxl_decoder { struct cxl_region *region; unsigned long flags; int (*commit)(struct cxl_decoder *cxld); - int (*reset)(struct cxl_decoder *cxld); + void (*reset)(struct cxl_decoder *cxld); }; /* @@ -730,6 +730,7 @@ static inline bool is_cxl_root(struct cxl_port *port) int cxl_num_decoders_committed(struct cxl_port *port); bool is_cxl_port(const struct device *dev); struct cxl_port *to_cxl_port(const struct device *dev); +void cxl_port_commit_reap(struct cxl_decoder *cxld); struct pci_bus; int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, struct pci_bus *bus); diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index 861dde65768f..9dc394295e1f 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -208,7 +208,22 @@ static struct cxl_driver cxl_port_driver = { }, }; -module_cxl_driver(cxl_port_driver); +static int __init cxl_port_init(void) +{ + return cxl_driver_register(&cxl_port_driver); +} +/* + * Be ready to immediately enable ports emitted by the platform CXL root + * (e.g. cxl_acpi) when CONFIG_CXL_PORT=y. + */ +subsys_initcall(cxl_port_init); + +static void __exit cxl_port_exit(void) +{ + cxl_driver_unregister(&cxl_port_driver); +} +module_exit(cxl_port_exit); + MODULE_DESCRIPTION("CXL: Port enumeration and services"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(CXL); diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 9c1a729cd77e..6d74e62bbee0 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -86,7 +86,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, nr_pages = 1; pgoff = linear_page_index(vmf->vma, - ALIGN(vmf->address, fault_size)); + ALIGN_DOWN(vmf->address, fault_size)); for (i = 0; i < nr_pages; i++) { struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i); diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 995427afe077..6b98a23e3332 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -1391,11 +1391,12 @@ static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pde INIT_LIST_HEAD(&dma_dev->channels); for (i = 0; i < edma->num_channels; i++) { struct ep93xx_dma_chan *edmac = &edma->channels[i]; + int len; edmac->chan.device = dma_dev; edmac->regs = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(edmac->regs)) - return edmac->regs; + return ERR_CAST(edmac->regs); edmac->irq = fwnode_irq_get(dev_fwnode(dev), i); if (edmac->irq < 0) @@ -1404,9 +1405,11 @@ static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pde edmac->edma = edma; if (edma->m2m) - snprintf(dma_clk_name, sizeof(dma_clk_name), "m2m%u", i); + len = snprintf(dma_clk_name, sizeof(dma_clk_name), "m2m%u", i); else - snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i); + len = snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i); + if (len >= sizeof(dma_clk_name)) + return ERR_PTR(-ENOBUFS); edmac->clk = devm_clk_get(dev, dma_clk_name); if (IS_ERR(edmac->clk)) { diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c index 65a27c5a7bce..811389fc9cb8 100644 --- a/drivers/dma/sh/rz-dmac.c +++ b/drivers/dma/sh/rz-dmac.c @@ -601,22 +601,25 @@ static int rz_dmac_config(struct dma_chan *chan, struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); u32 val; - channel->src_per_address = config->src_addr; channel->dst_per_address = config->dst_addr; - - val = rz_dmac_ds_to_val_mapping(config->dst_addr_width); - if (val == CHCFG_DS_INVALID) - return -EINVAL; - channel->chcfg &= ~CHCFG_FILL_DDS_MASK; - channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val); + if (channel->dst_per_address) { + val = rz_dmac_ds_to_val_mapping(config->dst_addr_width); + if (val == CHCFG_DS_INVALID) + return -EINVAL; - val = rz_dmac_ds_to_val_mapping(config->src_addr_width); - if (val == CHCFG_DS_INVALID) - return -EINVAL; + channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val); + } + channel->src_per_address = config->src_addr; channel->chcfg &= ~CHCFG_FILL_SDS_MASK; - channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val); + if (channel->src_per_address) { + val = rz_dmac_ds_to_val_mapping(config->src_addr_width); + if (val == CHCFG_DS_INVALID) + return -EINVAL; + + channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val); + } return 0; } diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 406ee199c2ac..b3f27b3f9209 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -3185,27 +3185,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, d->static_tr.elcnt = elcnt; - /* - * PDMA must to close the packet when the channel is in packet mode. - * For TR mode when the channel is not cyclic we also need PDMA to close - * the packet otherwise the transfer will stall because PDMA holds on - * the data it has received from the peripheral. - */ if (uc->config.pkt_mode || !uc->cyclic) { + /* + * PDMA must close the packet when the channel is in packet mode. + * For TR mode when the channel is not cyclic we also need PDMA + * to close the packet otherwise the transfer will stall because + * PDMA holds on the data it has received from the peripheral. + */ unsigned int div = dev_width * elcnt; if (uc->cyclic) d->static_tr.bstcnt = d->residue / d->sglen / div; else d->static_tr.bstcnt = d->residue / div; + } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA && + uc->config.dir == DMA_DEV_TO_MEM && + uc->cyclic) { + /* + * For cyclic mode with BCDMA we have to set EOP in each TR to + * prevent short packet errors seen on channel teardown. So the + * PDMA must close the packet after every TR transfer by setting + * burst count equal to the number of bytes transferred. + */ + struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base; - if (uc->config.dir == DMA_DEV_TO_MEM && - d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) - return -EINVAL; + d->static_tr.bstcnt = + (tr_req->icnt0 * tr_req->icnt1) / dev_width; } else { d->static_tr.bstcnt = 0; } + if (uc->config.dir == DMA_DEV_TO_MEM && + d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) + return -EINVAL; + return 0; } @@ -3450,8 +3463,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* static TR for remote PDMA */ if (udma_configure_statictr(uc, d, dev_width, burst)) { dev_err(uc->ud->dev, - "%s: StaticTR Z is limited to maximum 4095 (%u)\n", - __func__, d->static_tr.bstcnt); + "%s: StaticTR Z is limited to maximum %u (%u)\n", + __func__, uc->ud->match_data->statictr_z_mask, + d->static_tr.bstcnt); udma_free_hwdesc(uc, d); kfree(d); @@ -3476,6 +3490,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; unsigned int i; int num_tr; + u32 period_csf = 0; num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); @@ -3498,6 +3513,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, period_addr = buf_addr | ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); + /* + * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the + * last TR of a descriptor, to mark the packet as complete. + * This is required for getting the teardown completion message in case + * of TX, and to avoid short-packet error in case of RX. + * + * As we are in cyclic mode, we do not know which period might be the + * last one, so set the flag for each period. + */ + if (uc->config.ep_type == PSIL_EP_PDMA_XY && + uc->ud->match_data->type == DMA_TYPE_BCDMA) { + period_csf = CPPI5_TR_CSF_EOP; + } + for (i = 0; i < periods; i++) { int tr_idx = i * num_tr; @@ -3525,8 +3554,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, } if (!(flags & DMA_PREP_INTERRUPT)) - cppi5_tr_csf_set(&tr_req[tr_idx].flags, - CPPI5_TR_CSF_SUPR_EVT); + period_csf |= CPPI5_TR_CSF_SUPR_EVT; + + if (period_csf) + cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf); period_addr += period_len; } @@ -3655,8 +3686,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, /* static TR for remote PDMA */ if (udma_configure_statictr(uc, d, dev_width, burst)) { dev_err(uc->ud->dev, - "%s: StaticTR Z is limited to maximum 4095 (%u)\n", - __func__, d->static_tr.bstcnt); + "%s: StaticTR Z is limited to maximum %u (%u)\n", + __func__, uc->ud->match_data->statictr_z_mask, + d->static_tr.bstcnt); udma_free_hwdesc(uc, d); kfree(d); diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index d3cd4cc54ace..a9a8ba067007 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -342,9 +342,11 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev) int ecc_irq; int rc; - rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap); - if (rc) - return rc; + if (!llcc_driv_data->ecc_irq_configured) { + rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap); + if (rc) + return rc; + } /* Allocate edac control info */ edev_ctl = edac_device_alloc_ctl_info(0, "qcom-llcc", 1, "bank", diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 6adadb11962e..892b94cfd626 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -204,7 +204,7 @@ static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self // the node->ports array where the parent node should be. Later, // when we handle the parent node, we fix up the reference. ++parent_count; - node->color = i; + node->color = port_index; break; case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD: diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 4d231bc375e0..b14cbdae94e8 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -481,11 +481,16 @@ static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid, struct ffa_send_direct_data2 *data) { u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); + union { + uuid_t uuid; + __le64 regs[2]; + } uuid_regs = { .uuid = *uuid }; ffa_value_t ret, args = { - .a0 = FFA_MSG_SEND_DIRECT_REQ2, .a1 = src_dst_ids, + .a0 = FFA_MSG_SEND_DIRECT_REQ2, + .a1 = src_dst_ids, + .a2 = le64_to_cpu(uuid_regs.regs[0]), + .a3 = le64_to_cpu(uuid_regs.regs[1]), }; - - export_uuid((u8 *)&args.a2, uuid); memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data)); invoke_ffa_fn(args, &ret); @@ -496,7 +501,7 @@ static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid, return ffa_to_linux_errno((int)ret.a2); if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) { - memcpy(data, &ret.a4, sizeof(*data)); + memcpy(data, (void *)&ret + offsetof(ffa_value_t, a4), sizeof(*data)); return 0; } diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 96b2e5f9a8ef..157172a5f2b5 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -325,7 +325,10 @@ EXPORT_SYMBOL_GPL(scmi_driver_unregister); static void scmi_device_release(struct device *dev) { - kfree(to_scmi_dev(dev)); + struct scmi_device *scmi_dev = to_scmi_dev(dev); + + kfree_const(scmi_dev->name); + kfree(scmi_dev); } static void __scmi_device_destroy(struct scmi_device *scmi_dev) @@ -338,7 +341,6 @@ static void __scmi_device_destroy(struct scmi_device *scmi_dev) if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM) atomic_set(&scmi_syspower_registered, 0); - kfree_const(scmi_dev->name); ida_free(&scmi_bus_id, scmi_dev->id); device_unregister(&scmi_dev->dev); } @@ -410,7 +412,6 @@ __scmi_device_create(struct device_node *np, struct device *parent, return scmi_dev; put_dev: - kfree_const(scmi_dev->name); put_device(&scmi_dev->dev); ida_free(&scmi_bus_id, id); return NULL; diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index c4b8e7ff88aa..cdec50a698a1 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -163,6 +163,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); * used to initialize this channel * @dev: Reference to device in the SCMI hierarchy corresponding to this * channel + * @is_p2a: A flag to identify a channel as P2A (RX) * @rx_timeout_ms: The configured RX timeout in milliseconds. * @handle: Pointer to SCMI entity handle * @no_completion_irq: Flag to indicate that this channel has no completion @@ -174,6 +175,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); struct scmi_chan_info { int id; struct device *dev; + bool is_p2a; unsigned int rx_timeout_ms; struct scmi_handle *handle; bool no_completion_irq; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 88c5c4ff4bb6..f8934d049d68 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -1048,6 +1048,11 @@ static inline void scmi_xfer_command_release(struct scmi_info *info, static inline void scmi_clear_channel(struct scmi_info *info, struct scmi_chan_info *cinfo) { + if (!cinfo->is_p2a) { + dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n"); + return; + } + if (info->desc->ops->clear_channel) info->desc->ops->clear_channel(cinfo); } @@ -2638,6 +2643,7 @@ static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node, if (!cinfo) return -ENOMEM; + cinfo->is_p2a = !tx; cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; /* Create a unique name for this transport device */ @@ -2976,10 +2982,8 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info) dbg->top_dentry = top_dentry; if (devm_add_action_or_reset(info->dev, - scmi_debugfs_common_cleanup, dbg)) { - scmi_debugfs_common_cleanup(dbg); + scmi_debugfs_common_cleanup, dbg)) return NULL; - } return dbg; } @@ -3044,10 +3048,10 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev) dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier)); - ret = of_property_read_u32(dev->of_node, "max-rx-timeout-ms", + ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms", &trans->desc->max_rx_timeout_ms); if (ret && ret != -EINVAL) - dev_err(dev, "Malformed max-rx-timeout-ms DT property.\n"); + dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n"); dev_info(dev, "SCMI max-rx-timeout: %dms\n", trans->desc->max_rx_timeout_ms); diff --git a/drivers/firmware/arm_scmi/transports/Makefile b/drivers/firmware/arm_scmi/transports/Makefile index 362a406f08e6..3ba3d3bee151 100644 --- a/drivers/firmware/arm_scmi/transports/Makefile +++ b/drivers/firmware/arm_scmi/transports/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only -scmi_transport_mailbox-objs := mailbox.o -obj-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += scmi_transport_mailbox.o +# Keep before scmi_transport_mailbox.o to allow precedence +# while matching the compatible. scmi_transport_smc-objs := smc.o obj-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += scmi_transport_smc.o +scmi_transport_mailbox-objs := mailbox.o +obj-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += scmi_transport_mailbox.o scmi_transport_optee-objs := optee.o obj-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += scmi_transport_optee.o scmi_transport_virtio-objs := virtio.o diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c index 1a754dee24f7..e3d5f7560990 100644 --- a/drivers/firmware/arm_scmi/transports/mailbox.c +++ b/drivers/firmware/arm_scmi/transports/mailbox.c @@ -25,6 +25,7 @@ * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel * @cinfo: SCMI channel info * @shmem: Transmit/Receive shared memory area + * @chan_lock: Lock that prevents multiple xfers from being queued */ struct scmi_mailbox { struct mbox_client cl; @@ -33,6 +34,7 @@ struct scmi_mailbox { struct mbox_chan *chan_platform_receiver; struct scmi_chan_info *cinfo; struct scmi_shared_mem __iomem *shmem; + struct mutex chan_lock; }; #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) @@ -238,6 +240,7 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, cinfo->transport_info = smbox; smbox->cinfo = cinfo; + mutex_init(&smbox->chan_lock); return 0; } @@ -267,13 +270,23 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo, struct scmi_mailbox *smbox = cinfo->transport_info; int ret; + /* + * The mailbox layer has its own queue. However the mailbox queue + * confuses the per message SCMI timeouts since the clock starts when + * the message is submitted into the mailbox queue. So when multiple + * messages are queued up the clock starts on all messages instead of + * only the one inflight. + */ + mutex_lock(&smbox->chan_lock); + ret = mbox_send_message(smbox->chan, xfer); + /* mbox_send_message returns non-negative value on success */ + if (ret < 0) { + mutex_unlock(&smbox->chan_lock); + return ret; + } - /* mbox_send_message returns non-negative value on success, so reset */ - if (ret > 0) - ret = 0; - - return ret; + return 0; } static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, @@ -281,13 +294,10 @@ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, { struct scmi_mailbox *smbox = cinfo->transport_info; - /* - * NOTE: we might prefer not to need the mailbox ticker to manage the - * transfer queueing since the protocol layer queues things by itself. - * Unfortunately, we have to kick the mailbox framework after we have - * received our message. - */ mbox_client_txdone(smbox->chan, ret); + + /* Release channel */ + mutex_unlock(&smbox->chan_lock); } static void mailbox_fetch_response(struct scmi_chan_info *cinfo, diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 285fe7ad490d..3e8051fe8296 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -763,7 +763,7 @@ static int sdei_device_freeze(struct device *dev) int err; /* unregister private events */ - cpuhp_remove_state(sdei_entry_point); + cpuhp_remove_state(sdei_hp_state); err = sdei_unregister_shared(); if (err) diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c index 9ca5ee58edbd..0f7ec8848202 100644 --- a/drivers/firmware/microchip/mpfs-auto-update.c +++ b/drivers/firmware/microchip/mpfs-auto-update.c @@ -76,14 +76,11 @@ #define AUTO_UPDATE_INFO_SIZE SZ_1M #define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_INFO_SIZE) -#define AUTO_UPDATE_TIMEOUT_MS 60000 - struct mpfs_auto_update_priv { struct mpfs_sys_controller *sys_controller; struct device *dev; struct mtd_info *flash; struct fw_upload *fw_uploader; - struct completion programming_complete; size_t size_per_bitstream; bool cancel_request; }; @@ -156,19 +153,6 @@ static void mpfs_auto_update_cancel(struct fw_upload *fw_uploader) static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_uploader) { - struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; - int ret; - - /* - * There is no meaningful way to get the status of the programming while - * it is in progress, so attempting anything other than waiting for it - * to complete would be misplaced. - */ - ret = wait_for_completion_timeout(&priv->programming_complete, - msecs_to_jiffies(AUTO_UPDATE_TIMEOUT_MS)); - if (!ret) - return FW_UPLOAD_ERR_TIMEOUT; - return FW_UPLOAD_ERR_NONE; } @@ -349,33 +333,23 @@ static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader, u32 offset, u32 size, u32 *written) { struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle; - enum fw_upload_err err = FW_UPLOAD_ERR_NONE; int ret; - reinit_completion(&priv->programming_complete); - ret = mpfs_auto_update_write_bitstream(fw_uploader, data, offset, size, written); - if (ret) { - err = FW_UPLOAD_ERR_RW_ERROR; - goto out; - } + if (ret) + return FW_UPLOAD_ERR_RW_ERROR; - if (priv->cancel_request) { - err = FW_UPLOAD_ERR_CANCELED; - goto out; - } + if (priv->cancel_request) + return FW_UPLOAD_ERR_CANCELED; if (mpfs_auto_update_is_bitstream_info(data, size)) - goto out; + return FW_UPLOAD_ERR_NONE; ret = mpfs_auto_update_verify_image(fw_uploader); if (ret) - err = FW_UPLOAD_ERR_FW_INVALID; + return FW_UPLOAD_ERR_FW_INVALID; -out: - complete(&priv->programming_complete); - - return err; + return FW_UPLOAD_ERR_NONE; } static const struct fw_upload_ops mpfs_auto_update_ops = { @@ -461,8 +435,6 @@ static int mpfs_auto_update_probe(struct platform_device *pdev) return dev_err_probe(dev, ret, "The current bitstream does not support auto-update\n"); - init_completion(&priv->programming_complete); - fw_uploader = firmware_upload_register(THIS_MODULE, dev, "mpfs-auto-update", &mpfs_auto_update_ops, priv); if (IS_ERR(fw_uploader)) diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 10986cb11ec0..2e4260ba5f79 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -112,6 +112,7 @@ enum qcom_scm_qseecom_tz_cmd_info { }; #define QSEECOM_MAX_APP_NAME_SIZE 64 +#define SHMBRIDGE_RESULT_NOTSUPP 4 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { @@ -216,7 +217,7 @@ static DEFINE_SPINLOCK(scm_query_lock); struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void) { - return __scm->mempool; + return __scm ? __scm->mempool : NULL; } static enum qcom_scm_convention __get_convention(void) @@ -545,7 +546,7 @@ static void qcom_scm_set_download_mode(u32 dload_mode) } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_SET_DLOAD_MODE)) { ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode); - } else { + } else if (dload_mode) { dev_err(__scm->dev, "No available mechanism for setting download mode\n"); } @@ -1361,6 +1362,8 @@ EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); int qcom_scm_shm_bridge_enable(void) { + int ret; + struct qcom_scm_desc desc = { .svc = QCOM_SCM_SVC_MP, .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE, @@ -1373,7 +1376,15 @@ int qcom_scm_shm_bridge_enable(void) QCOM_SCM_MP_SHM_BRIDGE_ENABLE)) return -EOPNOTSUPP; - return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0]; + ret = qcom_scm_call(__scm->dev, &desc, &res); + + if (ret) + return ret; + + if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP) + return -EOPNOTSUPP; + + return res.result[0]; } EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable); diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c index d670635914ec..a74600d9f2d7 100644 --- a/drivers/firmware/smccc/smccc.c +++ b/drivers/firmware/smccc/smccc.c @@ -16,7 +16,6 @@ static u32 smccc_version = ARM_SMCCC_VERSION_1_0; static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE; bool __ro_after_init smccc_trng_available = false; -u64 __ro_after_init smccc_has_sve_hint = false; s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED; s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED; @@ -28,9 +27,6 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit) smccc_conduit = conduit; smccc_trng_available = smccc_probe_trng(); - if (IS_ENABLED(CONFIG_ARM64_SVE) && - smccc_version >= ARM_SMCCC_VERSION_1_3) - smccc_has_sve_hint = true; if ((smccc_version >= ARM_SMCCC_VERSION_1_2) && (smccc_conduit != SMCCC_CONDUIT_NONE)) { diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 04c03402db6d..ea40ad43a79b 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -406,6 +406,8 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, gpio->dcache[GPIO_BANK(offset)] = reg; iowrite32(reg, addr); + /* Flush write */ + ioread32(addr); } static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, @@ -1191,7 +1193,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev) if (!gpio_id) return -EINVAL; - gpio->clk = of_clk_get(pdev->dev.of_node, 0); + gpio->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(gpio->clk)) { dev_warn(&pdev->dev, "Failed to get clock from devicetree, debouncing disabled\n"); diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c index 07e0d7180579..59a8f3a5c4e4 100644 --- a/drivers/gpio/gpio-sloppy-logic-analyzer.c +++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c @@ -234,7 +234,9 @@ static int gpio_la_poll_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - devm_mutex_init(dev, &priv->blob_lock); + ret = devm_mutex_init(dev, &priv->blob_lock); + if (ret) + return ret; fops_buf_size_set(priv, GPIO_LA_DEFAULT_BUF_SIZE); diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c index 2b2dd7e92211..51d2475c05c5 100644 --- a/drivers/gpio/gpiolib-swnode.c +++ b/drivers/gpio/gpiolib-swnode.c @@ -64,7 +64,7 @@ struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode, struct fwnode_reference_args args; struct gpio_desc *desc; char propname[32]; /* 32 is max size of property name */ - int ret; + int ret = 0; swnode = to_software_node(fwnode); if (!swnode) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index d5952ab7752c..2b02655abb56 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -4926,6 +4926,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos) return NULL; s->private = priv; + if (*pos > 0) + priv->newline = true; priv->idx = srcu_read_lock(&gpio_devices_srcu); list_for_each_entry_srcu(gdev, &gpio_devices, list, @@ -4969,7 +4971,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v) gc = srcu_dereference(gdev->chip, &gdev->srcu); if (!gc) { - seq_printf(s, "%s%s: (dangling chip)", + seq_printf(s, "%s%s: (dangling chip)\n", priv->newline ? "\n" : "", dev_name(&gdev->dev)); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index f85ace0384d2..7dd55ed57c1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, struct acpi_buffer *params) { acpi_status status; + union acpi_object *obj; union acpi_object atif_arg_elements[2]; struct acpi_object_list atif_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, status = acpi_evaluate_object(atif->handle, NULL, &atif_arg, &buffer); + obj = (union acpi_object *)buffer.pointer; - /* Fail only if calling the method fails and ATIF is supported */ - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { + /* Fail if calling the method fails */ + if (ACPI_FAILURE(status)) { DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", acpi_format_exception(status)); - kfree(buffer.pointer); + kfree(obj); return NULL; } - return buffer.pointer; + if (obj->type != ACPI_TYPE_BUFFER) { + DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n", + obj->type); + kfree(obj); + return NULL; + } + + return obj; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index ce5ca304dba9..fa572ba7f9fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1439,8 +1439,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, list_add_tail(&vm->vm_list_node, &(vm->process_info->vm_list_head)); vm->process_info->n_vms++; - - *ef = dma_fence_get(&vm->process_info->eviction_fence->base); + if (ef) + *ef = dma_fence_get(&vm->process_info->eviction_fence->base); mutex_unlock(&vm->process_info->lock); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1e475eb01417..d891ab779ca7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, /* Only a single BO list is allowed to simplify handling. */ if (p->bo_list) - ret = -EINVAL; + goto free_partial_kdata; ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index cbef720de779..9da4414de617 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -402,7 +402,7 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz int r; uint32_t *data, x; - if (size & 0x3 || *pos & 0x3) + if (size > 4096 || size & 0x3 || *pos & 0x3) return -EINVAL; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); @@ -1648,7 +1648,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { ent = debugfs_create_file(debugfs_regs_names[i], - S_IFREG | 0444, root, + S_IFREG | 0400, root, adev, debugfs_regs[i]); if (!i && !IS_ERR_OR_NULL(ent)) i_size_write(ent->d_inode, adev->rmmio_size); @@ -2100,11 +2100,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_securedisplay_debugfs_init(adev); amdgpu_fw_attestation_debugfs_init(adev); - debugfs_create_file("amdgpu_evict_vram", 0444, root, adev, + debugfs_create_file("amdgpu_evict_vram", 0400, root, adev, &amdgpu_evict_vram_fops); - debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev, + debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev, &amdgpu_evict_gtt_fops); - debugfs_create_file("amdgpu_test_ib", 0444, root, adev, + debugfs_create_file("amdgpu_test_ib", 0400, root, adev, &amdgpu_debugfs_test_ib_fops); debugfs_create_file("amdgpu_vm_info", 0444, root, adev, &amdgpu_debugfs_vm_info_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 83e54697f0ee..f1ffab5a1eae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1635,11 +1635,9 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) { int r; - if (!amdgpu_sriov_vf(adev)) { - r = device_create_file(adev->dev, &dev_attr_enforce_isolation); - if (r) - return r; - } + r = device_create_file(adev->dev, &dev_attr_enforce_isolation); + if (r) + return r; r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); if (r) @@ -1650,8 +1648,7 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) { - if (!amdgpu_sriov_vf(adev)) - device_remove_file(adev->dev, &dev_attr_enforce_isolation); + device_remove_file(adev->dev, &dev_attr_enforce_isolation); device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 10b61ff63802..7d4b540340e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1203,8 +1203,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); - if (r) + if (r) { + amdgpu_mes_unlock(&adev->mes); goto clean_up_memory; + } amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); @@ -1237,7 +1239,6 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, amdgpu_ring_fini(ring); clean_up_memory: kfree(ring); - amdgpu_mes_unlock(&adev->mes); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 5e8833e4fed2..ccfd2a4b4acc 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -482,7 +482,7 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, case AMDGPU_SPX_PARTITION_MODE: return adev->gmc.num_mem_partitions == 1 && num_xcc > 0; case AMDGPU_DPX_PARTITION_MODE: - return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0; + return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0; case AMDGPU_TPX_PARTITION_MODE: return (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 3) && diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 8d27421689c9..a37a6801c9ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -621,7 +621,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) if (amdgpu_mes_log_enable) { mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; - mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; + mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE; } return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, @@ -1336,7 +1336,7 @@ static int mes_v12_0_sw_init(void *handle) adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini; adev->mes.enable_legacy_queue_map = true; - adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; + adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE; r = amdgpu_mes_init(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index a8763496aed3..9288f37a3cc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin"); #define SDMA0_HYP_DEC_REG_END 0x589a #define SDMA1_HYP_DEC_REG_OFFSET 0x20 +/*define for compression field for sdma7*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask 0x00000001 +#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift 16 +#define SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift) + static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = { SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG), SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG), @@ -1724,7 +1730,8 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib, uint64_t dst_offset, uint32_t byte_count) { - ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL) | + SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(1); ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); ib->ptr[ib->length_dw++] = src_data; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 9044bdb38cf4..3e6b4736a7fe 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1148,7 +1148,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM) size >>= 1; - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size)); + atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage); } mutex_unlock(&p->mutex); @@ -1219,7 +1219,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, kfd_process_device_remove_obj_handle( pdd, GET_IDR_HANDLE(args->handle)); - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size); + atomic64_sub(size, &pdd->vram_usage); err_unlock: err_pdd: @@ -2347,7 +2347,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { bo_bucket->restored_offset = offset; /* Update the VRAM usage count */ - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size); + atomic64_add(bo_bucket->size, &pdd->vram_usage); } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d6530febabad..26e48fdc8728 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -775,7 +775,7 @@ struct kfd_process_device { enum kfd_pdd_bound bound; /* VRAM usage */ - uint64_t vram_usage; + atomic64_t vram_usage; struct attribute attr_vram; char vram_filename[MAX_SYSFS_FILENAME_LEN]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d07acf1b2f93..d4aa843aacfd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -332,7 +332,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, } else if (strncmp(attr->name, "vram_", 5) == 0) { struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, attr_vram); - return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); + return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage)); } else if (strncmp(attr->name, "sdma_", 5) == 0) { struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, attr_sdma); @@ -1625,7 +1625,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, pdd->bound = PDD_UNBOUND; pdd->already_dequeued = false; pdd->runtime_inuse = false; - pdd->vram_usage = 0; + atomic64_set(&pdd->vram_usage, 0); pdd->sdma_past_activity_counter = 0; pdd->user_gpu_id = dev->id; atomic64_set(&pdd->evict_duration_counter, 0); @@ -1702,12 +1702,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm, &p->kgd_process_info, - &ef); + p->ef ? NULL : &ef); if (ret) { dev_err(dev->adev->dev, "Failed to create process VM object\n"); return ret; } - RCU_INIT_POINTER(p->ef, ef); + + if (!p->ef) + RCU_INIT_POINTER(p->ef, ef); + pdd->drm_priv = drm_file->private_data; ret = kfd_process_device_reserve_ib_mem(pdd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 04e746923697..1893c27746a5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -405,6 +405,27 @@ static void svm_range_bo_release(struct kref *kref) spin_lock(&svm_bo->list_lock); } spin_unlock(&svm_bo->list_lock); + + if (mmget_not_zero(svm_bo->eviction_fence->mm)) { + struct kfd_process_device *pdd; + struct kfd_process *p; + struct mm_struct *mm; + + mm = svm_bo->eviction_fence->mm; + /* + * The forked child process takes svm_bo device pages ref, svm_bo could be + * released after parent process is gone. + */ + p = kfd_lookup_process_by_mm(mm); + if (p) { + pdd = kfd_get_process_device_data(svm_bo->node, p); + if (pdd) + atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage); + kfd_unref_process(p); + } + mmput(mm); + } + if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) /* We're not in the eviction worker. Signal the fence. */ dma_fence_signal(&svm_bo->eviction_fence->base); @@ -532,6 +553,7 @@ int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear) { + struct kfd_process_device *pdd; struct amdgpu_bo_param bp; struct svm_range_bo *svm_bo; struct amdgpu_bo_user *ubo; @@ -623,6 +645,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, list_add(&prange->svm_bo_list, &svm_bo->range_list); spin_unlock(&svm_bo->list_lock); + pdd = svm_range_get_pdd_by_node(prange, node); + if (pdd) + atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage); + return 0; reserve_bo_failed: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 60c617fcc97e..07e9ce99694f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2972,10 +2972,11 @@ static int dm_suspend(void *handle) hpd_rx_irq_work_suspend(dm); - if (adev->dm.dc->caps.ips_support) - dc_allow_idle_optimizations(adev->dm.dc, true); - dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); + + if (dm->dc->caps.ips_support && adev->in_s0ix) + dc_allow_idle_optimizations(dm->dc, true); + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); return 0; @@ -8373,7 +8374,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0) || acrtc_state->stream->link->psr_settings.psr_version < - DC_PSR_VERSION_UNSUPPORTED) { + DC_PSR_VERSION_UNSUPPORTED || + !(adev->flags & AMD_IS_APU)) { timing = &acrtc_state->stream->timing; /* at least 2 frames */ @@ -9427,6 +9429,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, bool mode_set_reset_required = false; u32 i; struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -9546,6 +9549,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; + set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { drm_dbg_atomic(dev, "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -9597,6 +9601,19 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->otg_inst = status->primary_otg_inst; } } + + /* During boot up and resume the DC layer will reset the panel brightness + * to fix a flicker issue. + * It will cause the dm->actual_brightness is not the current panel brightness + * level. (the dm->brightness is the correct panel level) + * So we set the backlight level with dm->brightness value after set mode + */ + if (set_backlight_level) { + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + } } static void dm_set_writeback(struct amdgpu_display_manager *dm, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 069e0195e50a..eea317dcbe8c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -44,6 +44,7 @@ #include "dm_helpers.h" #include "ddc_service_types.h" +#include "clk_mgr.h" static u32 edid_extract_panel_id(struct edid *edid) { @@ -1121,6 +1122,8 @@ bool dm_helpers_dp_handle_test_pattern_request( struct pipe_ctx *pipe_ctx = NULL; struct amdgpu_dm_connector *aconnector = link->priv; struct drm_device *dev = aconnector->base.dev; + struct dc_state *dc_state = ctx->dc->current_state; + struct clk_mgr *clk_mgr = ctx->dc->clk_mgr; int i; for (i = 0; i < MAX_PIPES; i++) { @@ -1221,6 +1224,16 @@ bool dm_helpers_dp_handle_test_pattern_request( pipe_ctx->stream->test_pattern.type = test_pattern; pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; + /* Temp W/A for compliance test failure */ + dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false; + dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ? + clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz; + dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz; + ctx->dc->clk_mgr->funcs->update_clocks( + ctx->dc->clk_mgr, + dc_state, + false); + dc_link_dp_set_test_pattern( (struct dc_link *) link, test_pattern, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 0d8498ab9b23..be8fbb04ad98 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -3127,7 +3127,9 @@ static enum bp_result bios_parser_get_vram_info( struct atom_data_revision revision; // vram info moved to umc_info for DCN4x - if (info && DATA_TABLES(umc_info)) { + if (dcb->ctx->dce_version >= DCN_VERSION_4_01 && + dcb->ctx->dce_version < DCN_VERSION_MAX && + info && DATA_TABLES(umc_info)) { header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(umc_info)); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5c39390ecbd5..a88f1b6ea64c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -5065,11 +5065,26 @@ static bool update_planes_and_stream_v3(struct dc *dc, return true; } +static void clear_update_flags(struct dc_surface_update *srf_updates, + int surface_count, struct dc_stream_state *stream) +{ + int i; + + if (stream) + stream->update_flags.raw = 0; + + for (i = 0; i < surface_count; i++) + if (srf_updates[i].surface) + srf_updates[i].surface->update_flags.raw = 0; +} + bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update) { + bool ret = false; + dc_exit_ips_for_hw_access(dc); /* * update planes and stream version 3 separates FULL and FAST updates @@ -5086,10 +5101,16 @@ bool dc_update_planes_and_stream(struct dc *dc, * features as they are now transparent to the new sequence. */ if (dc->ctx->dce_version >= DCN_VERSION_4_01) - return update_planes_and_stream_v3(dc, srf_updates, + ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); - return update_planes_and_stream_v2(dc, srf_updates, + else + ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); + + if (ret) + clear_update_flags(srf_updates, surface_count, stream); + + return ret; } void dc_commit_updates_for_stream(struct dc *dc, @@ -5099,6 +5120,8 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_stream_update *stream_update, struct dc_state *state) { + bool ret = false; + dc_exit_ips_for_hw_access(dc); /* TODO: Since change commit sequence can have a huge impact, * we decided to only enable it for DCN3x. However, as soon as @@ -5106,17 +5129,17 @@ void dc_commit_updates_for_stream(struct dc *dc, * the new sequence for all ASICs. */ if (dc->ctx->dce_version >= DCN_VERSION_4_01) { - update_planes_and_stream_v3(dc, srf_updates, surface_count, + ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); - return; - } - if (dc->ctx->dce_version >= DCN_VERSION_3_2) { - update_planes_and_stream_v2(dc, srf_updates, surface_count, + } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) { + ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); - return; - } - update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, - stream_update, state); + } else + ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, + stream_update, state); + + if (ret) + clear_update_flags(srf_updates, surface_count, stream); } uint8_t dc_get_current_stream_count(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c index 11c904ae2958..c4c52173ef22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c @@ -303,6 +303,7 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m if (project == dml_project_dcn35 || project == dml_project_dcn351) { policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false; + policy->EnhancedPrefetchScheduleAccelerationFinal = 0; policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/ policy->UseOnlyMaxPrefetchModes = 1; } diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 3cd52e7a9c77..95838c7ab054 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link) isPSRSUSupported = false; else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03) isPSRSUSupported = false; + else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01) + isPSRSUSupported = false; else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1) isPSRSUSupported = true; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 9118fcddbf11..227bf0e84a13 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -60,7 +60,7 @@ struct vi_dpm_level { struct vi_dpm_table { uint32_t count; - struct vi_dpm_level dpm_level[] __counted_by(count); + struct vi_dpm_level dpm_level[]; }; #define PCIE_PERF_REQ_REMOVE_REGISTRY 0 @@ -91,7 +91,7 @@ struct phm_set_power_state_input { struct phm_clock_array { uint32_t count; - uint32_t values[] __counted_by(count); + uint32_t values[]; }; struct phm_clock_voltage_dependency_record { @@ -123,7 +123,7 @@ struct phm_acpclock_voltage_dependency_record { struct phm_clock_voltage_dependency_table { uint32_t count; - struct phm_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_clock_voltage_dependency_record entries[]; }; struct phm_phase_shedding_limits_record { @@ -140,7 +140,7 @@ struct phm_uvd_clock_voltage_dependency_record { struct phm_uvd_clock_voltage_dependency_table { uint8_t count; - struct phm_uvd_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_uvd_clock_voltage_dependency_record entries[]; }; struct phm_acp_clock_voltage_dependency_record { @@ -150,7 +150,7 @@ struct phm_acp_clock_voltage_dependency_record { struct phm_acp_clock_voltage_dependency_table { uint32_t count; - struct phm_acp_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_acp_clock_voltage_dependency_record entries[]; }; struct phm_vce_clock_voltage_dependency_record { @@ -161,32 +161,32 @@ struct phm_vce_clock_voltage_dependency_record { struct phm_phase_shedding_limits_table { uint32_t count; - struct phm_phase_shedding_limits_record entries[] __counted_by(count); + struct phm_phase_shedding_limits_record entries[]; }; struct phm_vceclock_voltage_dependency_table { uint8_t count; - struct phm_vceclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_vceclock_voltage_dependency_record entries[]; }; struct phm_uvdclock_voltage_dependency_table { uint8_t count; - struct phm_uvdclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_uvdclock_voltage_dependency_record entries[]; }; struct phm_samuclock_voltage_dependency_table { uint8_t count; - struct phm_samuclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_samuclock_voltage_dependency_record entries[]; }; struct phm_acpclock_voltage_dependency_table { uint32_t count; - struct phm_acpclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_acpclock_voltage_dependency_record entries[]; }; struct phm_vce_clock_voltage_dependency_table { uint8_t count; - struct phm_vce_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_vce_clock_voltage_dependency_record entries[]; }; @@ -393,7 +393,7 @@ union phm_cac_leakage_record { struct phm_cac_leakage_table { uint32_t count; - union phm_cac_leakage_record entries[] __counted_by(count); + union phm_cac_leakage_record entries[]; }; struct phm_samu_clock_voltage_dependency_record { @@ -404,7 +404,7 @@ struct phm_samu_clock_voltage_dependency_record { struct phm_samu_clock_voltage_dependency_table { uint8_t count; - struct phm_samu_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_samu_clock_voltage_dependency_record entries[]; }; struct phm_cac_tdp_table { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index bb3bc68dfc39..ee1bcfaae3e3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1234,6 +1234,14 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu) } } +static bool smu_is_workload_profile_available(struct smu_context *smu, + u32 profile) +{ + if (profile >= PP_SMC_POWER_PROFILE_COUNT) + return false; + return smu->workload_map && smu->workload_map[profile].valid_mapping; +} + static int smu_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1251,21 +1259,33 @@ static int smu_sw_init(void *handle) smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + smu->user_dpm_profile.user_workload_mask = 0; atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; - smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; - smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; - smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; - smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; - smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; - smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; + smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; + smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; + smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3; + smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4; + smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; + smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; + if (smu->is_apu || + !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + } else { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; + } + + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; @@ -2226,7 +2246,7 @@ static int smu_bump_power_profile_mode(struct smu_context *smu, static int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, bool skip_display_settings, - bool force_update) + bool init) { int ret = 0; int index = 0; @@ -2255,7 +2275,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, } } - if (force_update || smu_dpm_ctx->dpm_level != level) { + if (smu_dpm_ctx->dpm_level != level) { ret = smu_asic_set_performance_level(smu, level); if (ret) { dev_err(smu->adev->dev, "Failed to set performance level!"); @@ -2272,7 +2292,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; - if (force_update || smu->power_profile_mode != workload[0]) + if (init || smu->power_profile_mode != workload[0]) smu_bump_power_profile_mode(smu, workload, 0); } @@ -2335,17 +2355,20 @@ static int smu_switch_power_profile(void *handle, return -EINVAL; if (!en) { - smu->workload_mask &= ~(1 << smu->workload_prority[type]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } else { - smu->workload_mask |= (1 << smu->workload_prority[type]); + smu->driver_workload_mask |= (1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) smu_bump_power_profile_mode(smu, workload, 0); @@ -3036,12 +3059,23 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; + int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - return smu_bump_power_profile_mode(smu, param, param_size); + if (smu->user_dpm_profile.user_workload_mask & + (1 << smu->workload_priority[param[param_size]])) + return 0; + + smu->user_dpm_profile.user_workload_mask = + (1 << smu->workload_priority[param[param_size]]); + smu->workload_mask = smu->user_dpm_profile.user_workload_mask | + smu->driver_workload_mask; + ret = smu_bump_power_profile_mode(smu, param, param_size); + + return ret; } static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index b44a185d07e8..d60d9a12a47e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -240,6 +240,7 @@ struct smu_user_dpm_profile { /* user clock state information */ uint32_t clk_mask[SMU_CLK_COUNT]; uint32_t clk_dependency; + uint32_t user_workload_mask; }; #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ @@ -557,7 +558,8 @@ struct smu_context { bool disable_uclk_switch; uint32_t workload_mask; - uint32_t workload_prority[WORKLOAD_POLICY_MAX]; + uint32_t driver_workload_mask; + uint32_t workload_priority[WORKLOAD_POLICY_MAX]; uint32_t workload_setting[WORKLOAD_POLICY_MAX]; uint32_t power_profile_mode; uint32_t default_power_profile_mode; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h index ee457a6f0813..c2fd0a4a13e5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h @@ -25,7 +25,7 @@ #define SMU14_DRIVER_IF_V14_0_H //Increment this version if SkuTable_t or BoardTable_t change -#define PPTABLE_VERSION 0x18 +#define PPTABLE_VERSION 0x1B #define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_SOCCLK_DPM_LEVELS 8 @@ -145,7 +145,7 @@ typedef enum { } FEATURE_BTC_e; // Debug Overrides Bitmask -#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001 +#define DEBUG_OVERRIDE_NOT_USE 0x00000001 #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002 #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004 #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008 @@ -161,6 +161,7 @@ typedef enum { #define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000 #define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000 #define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000 +#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000 // VR Mapping Bit Defines #define VR_MAPPING_VR_SELECT_MASK 0x01 @@ -391,6 +392,21 @@ typedef struct { EccInfo_t EccInfo[24]; } EccInfoTable_t; +#define EPCS_HIGH_POWER 600 +#define EPCS_NORMAL_POWER 450 +#define EPCS_LOW_POWER 300 +#define EPCS_SHORTED_POWER 150 +#define EPCS_NO_BOOTUP 0 + +typedef enum{ + EPCS_SHORTED_LIMIT, + EPCS_LOW_POWER_LIMIT, + EPCS_NORMAL_POWER_LIMIT, + EPCS_HIGH_POWER_LIMIT, + EPCS_NOT_CONFIGURED, + EPCS_STATUS_COUNT, +} EPCS_STATUS_e; + //D3HOT sequences typedef enum { BACO_SEQUENCE, @@ -662,7 +678,7 @@ typedef enum { } PP_GRTAVFS_FW_SEP_FUSE_e; #define PP_NUM_RTAVFS_PWL_ZONES 5 - +#define PP_NUM_PSM_DIDT_PWL_ZONES 3 // VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3 // Slope Q1.7, Offset Q1.2 @@ -746,10 +762,10 @@ typedef struct { uint16_t Padding; //Frequency changes - int16_t GfxclkFmin; // MHz - int16_t GfxclkFmax; // MHz - uint16_t UclkFmin; // MHz - uint16_t UclkFmax; // MHz + int16_t GfxclkFoffset; + uint16_t Padding1; + uint16_t UclkFmin; + uint16_t UclkFmax; uint16_t FclkFmin; uint16_t FclkFmax; @@ -770,19 +786,23 @@ typedef struct { uint8_t MaxOpTemp; uint8_t AdvancedOdModeEnabled; - uint8_t Padding1[3]; + uint8_t Padding2[3]; uint16_t GfxVoltageFullCtrlMode; uint16_t SocVoltageFullCtrlMode; uint16_t GfxclkFullCtrlMode; uint16_t UclkFullCtrlMode; uint16_t FclkFullCtrlMode; - uint16_t Padding2; + uint16_t Padding3; int16_t GfxEdc; int16_t GfxPccLimitControl; - uint32_t Spare[10]; + uint16_t GfxclkFmaxVmax; + uint8_t GfxclkFmaxVmaxTemperature; + uint8_t Padding4[1]; + + uint32_t Spare[9]; uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround } OverDriveTable_t; @@ -802,8 +822,8 @@ typedef struct { uint16_t VddSocVmax; //gfxclk - int16_t GfxclkFmin; // MHz - int16_t GfxclkFmax; // MHz + int16_t GfxclkFoffset; + uint16_t Padding; //uclk uint16_t UclkFmin; // MHz uint16_t UclkFmax; // MHz @@ -828,7 +848,7 @@ typedef struct { uint8_t FanZeroRpmEnable; //temperature uint8_t MaxOpTemp; - uint8_t Padding[2]; + uint8_t Padding1[2]; //Full Ctrl uint16_t GfxVoltageFullCtrlMode; @@ -839,7 +859,7 @@ typedef struct { //EDC int16_t GfxEdc; int16_t GfxPccLimitControl; - int16_t Padding1; + int16_t Padding2; uint32_t Spare[5]; } OverDriveLimits_t; @@ -987,8 +1007,9 @@ typedef struct { uint16_t BaseClockDc; uint16_t GameClockDc; uint16_t BoostClockDc; - - uint32_t Reserved[4]; + uint16_t MaxReportedClock; + uint16_t Padding; + uint32_t Reserved[3]; } DriverReportedClocks_t; typedef struct { @@ -1132,7 +1153,7 @@ typedef struct { uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz uint16_t GfxclkAibFmax; - uint16_t GfxclkFreqCap; + uint16_t GfxDpmPadding; //GFX Idle Power Settings uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz @@ -1172,8 +1193,7 @@ typedef struct { uint32_t DvoFmaxLowScaler; //Unitless float // GFX DCS - uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase - uint16_t PaddingDcs; + uint32_t PaddingDcs; uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. @@ -1205,8 +1225,7 @@ typedef struct { uint16_t DalDcModeMaxUclkFreq; uint8_t PaddingsMem[2]; //FCLK Section - uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value - uint16_t PaddingFclk; + uint32_t PaddingFclk; // Link DPM Settings uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5 @@ -1215,12 +1234,19 @@ typedef struct { // SECTION: VDD_GFX AVFS uint8_t OverrideGfxAvfsFuses; - uint8_t GfxAvfsPadding[3]; + uint8_t GfxAvfsPadding[1]; + uint16_t DroopGBStDev; uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; - uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + + uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1]; + uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t spare_HwRtAvfsFuses[19]; uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; @@ -1246,11 +1272,7 @@ typedef struct { uint32_t dGbV_dT_vmin; uint32_t dGbV_dT_vmax; - //Unused: PMFW-9370 - uint32_t V2F_vmin_range_low; - uint32_t V2F_vmin_range_high; - uint32_t V2F_vmax_range_low; - uint32_t V2F_vmax_range_high; + uint32_t PaddingV2F[4]; AvfsDcBtcParams_t DcBtcGfxParams; QuadraticInt_t SSCurve_GFX; @@ -1327,18 +1349,18 @@ typedef struct { uint16_t PsmDidtReleaseTimer; uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog // CAC EDC - uint32_t Leakage_C0; // in IEEE float - uint32_t Leakage_C1; // in IEEE float - uint32_t Leakage_C2; // in IEEE float - uint32_t Leakage_C3; // in IEEE float - uint32_t Leakage_C4; // in IEEE float - uint32_t Leakage_C5; // in IEEE float - uint32_t GFX_CLK_SCALAR; // in IEEE float - uint32_t GFX_CLK_INTERCEPT; // in IEEE float - uint32_t GFX_CAC_M; // in IEEE float - uint32_t GFX_CAC_B; // in IEEE float - uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float - uint32_t DynToTotalCacScalar; // in IEEE + uint32_t CacEdcCacLeakageC0; + uint32_t CacEdcCacLeakageC1; + uint32_t CacEdcCacLeakageC2; + uint32_t CacEdcCacLeakageC3; + uint32_t CacEdcCacLeakageC4; + uint32_t CacEdcCacLeakageC5; + uint32_t CacEdcGfxClkScalar; + uint32_t CacEdcGfxClkIntercept; + uint32_t CacEdcCac_m; + uint32_t CacEdcCac_b; + uint32_t CacEdcCurrLimitGuardband; + uint32_t CacEdcDynToTotalCacRatio; // GFX EDC XVMIN uint32_t XVmin_Gfx_EdcThreshScalar; uint32_t XVmin_Gfx_EdcEnableFreq; @@ -1467,7 +1489,7 @@ typedef struct { uint8_t VddqOffEnabled; uint8_t PaddingUmcFlags[2]; - uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued + uint32_t Paddign1; uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS uint8_t FuseWritePowerMuxPresent; @@ -1530,7 +1552,7 @@ typedef struct { int16_t FuzzyFan_ErrorSetDelta; int16_t FuzzyFan_ErrorRateSetDelta; int16_t FuzzyFan_PwmSetDelta; - uint16_t FuzzyFan_Reserved; + uint16_t FanPadding2; uint16_t FwCtfLimit[TEMP_COUNT]; @@ -1547,9 +1569,10 @@ typedef struct { uint16_t FanSpare[1]; uint8_t FanIntakeSensorSupport; uint8_t FanIntakePadding; - uint32_t FanAmbientPerfBoostThreshold; uint32_t FanSpare2[12]; + uint32_t ODFeatureCtrlMask; + uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron uint16_t TemperatureFwCtfLimit_Hynix; @@ -1637,7 +1660,7 @@ typedef struct { uint16_t AverageDclk0Frequency ; uint16_t AverageVclk1Frequency ; uint16_t AverageDclk1Frequency ; - uint16_t PCIeBusy ; + uint16_t AveragePCIeBusy ; uint16_t dGPU_W_MAX ; uint16_t padding ; @@ -1665,12 +1688,12 @@ typedef struct { uint16_t AverageGfxActivity ; uint16_t AverageUclkActivity ; - uint16_t Vcn0ActivityPercentage ; + uint16_t AverageVcn0ActivityPercentage; uint16_t Vcn1ActivityPercentage ; uint32_t EnergyAccumulator; uint16_t AverageSocketPower; - uint16_t MovingAverageTotalBoardPower; + uint16_t AverageTotalBoardPower; uint16_t AvgTemperature[TEMP_COUNT]; uint16_t AvgTemperatureFanIntake; @@ -1684,7 +1707,8 @@ typedef struct { uint8_t ThrottlingPercentage[THROTTLER_COUNT]; - uint8_t padding1[3]; + uint8_t VmaxThrottlingPercentage; + uint8_t padding1[2]; //metrics for D3hot entry/exit and driver ARM msgs uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; @@ -1693,7 +1717,7 @@ typedef struct { uint16_t ApuSTAPMSmartShiftLimit; uint16_t ApuSTAPMLimit; - uint16_t MovingAvgApuSocketPower; + uint16_t AvgApuSocketPower; uint16_t AverageUclkActivity_MAX; @@ -1823,6 +1847,17 @@ typedef struct { #define TABLE_TRANSFER_FAILED 0xFF #define TABLE_TRANSFER_PENDING 0xAB +#define TABLE_PPT_FAILED 0x100 +#define TABLE_TDC_FAILED 0x200 +#define TABLE_TEMP_FAILED 0x400 +#define TABLE_FAN_TARGET_TEMP_FAILED 0x800 +#define TABLE_FAN_STOP_TEMP_FAILED 0x1000 +#define TABLE_FAN_START_TEMP_FAILED 0x2000 +#define TABLE_FAN_PWM_MIN_FAILED 0x4000 +#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000 +#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000 +#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000 + // Table types #define TABLE_PPTABLE 0 #define TABLE_COMBO_PPTABLE 1 @@ -1849,5 +1884,6 @@ typedef struct { #define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 #define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 +#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 46b456590a08..727d5b405435 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -28,7 +28,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E #define FEATURE_MASK(feature) (1ULL << feature) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index c0f6b59369b7..31fe512028f4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1455,7 +1455,6 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return -EINVAL; } - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && (smu->smc_fw_version >= 0x360d00)) { if (size != 10) @@ -1523,14 +1522,14 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 16af1a329621..12223f507977 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2081,10 +2081,13 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 9c3c48297cba..3b7b2ec8319a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1786,10 +1786,13 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 22737b11b1bf..952ee22cbc90 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -242,7 +242,9 @@ static int vangogh_tables_init(struct smu_context *smu) goto err0_out; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2)); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3)); + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4)); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err1_out; @@ -1077,7 +1079,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", @@ -1085,7 +1087,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index cc0504b063fa..62316a6707ef 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -890,14 +890,14 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 1d024b122b0c..5dd7ceca64fe 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2555,26 +2555,34 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, workload_mask = 1 << workload_type; /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { - if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && - ((smu->adev->pm.fw_version == 0x004e6601) || - (smu->adev->pm.fw_version >= 0x004e7300))) || - (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && - smu->adev->pm.fw_version >= 0x00504500)) { - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_POWERSAVING); - if (workload_type >= 0) - workload_mask |= 1 << workload_type; - } + if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && + ((smu->adev->pm.fw_version == 0x004e6601) || + (smu->adev->pm.fw_version >= 0x004e7300))) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + smu->adev->pm.fw_version >= 0x00504500)) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_POWERSAVING); + if (workload_type >= 0) + workload_mask |= 1 << workload_type; } + smu->workload_mask |= workload_mask; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - workload_mask, + smu->workload_mask, NULL); - if (!ret) - smu->workload_mask = workload_mask; + if (!ret) { + smu_cmn_assign_power_profile(smu); + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_FULLSCREEN3D); + smu->power_profile_mode = smu->workload_mask & (1 << workload_type) + ? PP_SMC_POWER_PROFILE_FULLSCREEN3D + : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + } + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index b891a5e0a396..9d0b19419de0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2499,13 +2499,14 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); else - smu->workload_mask = (1 << workload_type); + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 5899d01fa73d..1aa13d32ceb2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -367,54 +367,6 @@ static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) return 0; } -#ifndef atom_smc_dpm_info_table_14_0_0 -struct atom_smc_dpm_info_table_14_0_0 { - struct atom_common_table_header table_header; - BoardTable_t BoardTable; -}; -#endif - -static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *smc_pptable = table_context->driver_pptable; - struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table; - BoardTable_t *BoardTable = &smc_pptable->BoardTable; - int index, ret; - - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - smc_dpm_info); - - ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, - (uint8_t **)&smc_dpm_table); - if (ret) - return ret; - - memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); - - return 0; -} - -#if 0 -static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, - void **table, - uint32_t *size) -{ - struct smu_table_context *smu_table = &smu->smu_table; - void *combo_pptable = smu_table->combo_pptable; - int ret = 0; - - ret = smu_cmn_get_combo_pptable(smu); - if (ret) - return ret; - - *table = combo_pptable; - *size = sizeof(struct smu_14_0_powerplay_table); - - return 0; -} -#endif - static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, void **table, uint32_t *size) @@ -436,16 +388,12 @@ static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, static int smu_v14_0_2_setup_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - struct amdgpu_device *adev = smu->adev; int ret = 0; if (amdgpu_sriov_vf(smu->adev)) return 0; - if (!adev->scpm_enabled) - ret = smu_v14_0_setup_pptable(smu); - else - ret = smu_v14_0_2_get_pptable_from_pmfw(smu, + ret = smu_v14_0_2_get_pptable_from_pmfw(smu, &smu_table->power_play_table, &smu_table->power_play_table_size); if (ret) @@ -455,16 +403,6 @@ static int smu_v14_0_2_setup_pptable(struct smu_context *smu) if (ret) return ret; - /* - * With SCPM enabled, the operation below will be handled - * by PSP. Driver involvment is unnecessary and useless. - */ - if (!adev->scpm_enabled) { - ret = smu_v14_0_2_append_powerplay_table(smu); - if (ret) - return ret; - } - ret = smu_v14_0_2_check_powerplay_table(smu); if (ret) return ret; @@ -1077,12 +1015,9 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu, switch (od_feature_bit) { case PP_OD_FEATURE_GFXCLK_FMIN: - od_min_setting = overdrive_lowerlimits->GfxclkFmin; - od_max_setting = overdrive_upperlimits->GfxclkFmin; - break; case PP_OD_FEATURE_GFXCLK_FMAX: - od_min_setting = overdrive_lowerlimits->GfxclkFmax; - od_max_setting = overdrive_upperlimits->GfxclkFmax; + od_min_setting = overdrive_lowerlimits->GfxclkFoffset; + od_max_setting = overdrive_upperlimits->GfxclkFoffset; break; case PP_OD_FEATURE_UCLK_FMIN: od_min_setting = overdrive_lowerlimits->UclkFmin; @@ -1269,10 +1204,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFXCLK_BIT)) break; - size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); - size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", - od_table->OverDriveTable.GfxclkFmin, - od_table->OverDriveTable.GfxclkFmax); + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + + size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n"); + size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n", + overdrive_lowerlimits->GfxclkFoffset, + overdrive_upperlimits->GfxclkFoffset); break; case SMU_OD_MCLK: @@ -1414,7 +1355,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFXCLK_FMAX, NULL, &max_value); - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n", min_value, max_value); } @@ -1796,7 +1737,7 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); int workload_type, ret = 0; - + uint32_t current_profile_mode = smu->power_profile_mode; smu->power_profile_mode = input[size]; if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { @@ -1854,6 +1795,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, } } + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) + smu_v14_0_deep_sleep_control(smu, false); + else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) + smu_v14_0_deep_sleep_control(smu, true); + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, @@ -1861,12 +1807,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - 1 << workload_type, - NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + smu->workload_mask, NULL); + if (!ret) - smu->workload_mask = 1 << workload_type; + smu_cmn_assign_power_profile(smu); return ret; } @@ -2158,7 +2103,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; - gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage, + gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage, metrics->Vcn1ActivityPercentage); gpu_metrics->average_socket_power = metrics->AverageSocketPower; @@ -2217,8 +2162,7 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu, { struct amdgpu_device *adev = smu->adev; - dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, - od_table->OverDriveTable.GfxclkFmax); + dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset); dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, od_table->OverDriveTable.UclkFmax); } @@ -2309,10 +2253,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu) memcpy(user_od_table, boot_od_table, sizeof(OverDriveTableExternal_t)); - user_od_table->OverDriveTable.GfxclkFmin = - user_od_table_bak.OverDriveTable.GfxclkFmin; - user_od_table->OverDriveTable.GfxclkFmax = - user_od_table_bak.OverDriveTable.GfxclkFmax; + user_od_table->OverDriveTable.GfxclkFoffset = + user_od_table_bak.OverDriveTable.GfxclkFoffset; user_od_table->OverDriveTable.UclkFmin = user_od_table_bak.OverDriveTable.UclkFmin; user_od_table->OverDriveTable.UclkFmax = @@ -2441,22 +2383,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, } switch (input[i]) { - case 0: - smu_v14_0_2_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_FMIN, - &minimum, - &maximum); - if (input[i + 1] < minimum || - input[i + 1] > maximum) { - dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", - input[i + 1], minimum, maximum); - return -EINVAL; - } - - od_table->OverDriveTable.GfxclkFmin = input[i + 1]; - od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; - break; - case 1: smu_v14_0_2_get_od_setting_limits(smu, PP_OD_FEATURE_GFXCLK_FMAX, @@ -2469,7 +2395,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, return -EINVAL; } - od_table->OverDriveTable.GfxclkFmax = input[i + 1]; + od_table->OverDriveTable.GfxclkFoffset = input[i + 1]; od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; break; @@ -2480,13 +2406,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, } } - if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { - dev_err(adev->dev, - "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", - (uint32_t)od_table->OverDriveTable.GfxclkFmin, - (uint32_t)od_table->OverDriveTable.GfxclkFmax); - return -EINVAL; - } break; case PP_OD_EDIT_MCLK_VDDC_TABLE: @@ -2817,7 +2736,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .setup_pptable = smu_v14_0_2_setup_pptable, .check_fw_version = smu_v14_0_check_fw_version, - .write_pptable = smu_cmn_write_pptable, .set_driver_table_location = smu_v14_0_set_driver_table_location, .system_features_control = smu_v14_0_system_features_control, .set_allowed_mask = smu_v14_0_set_allowed_mask, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 91ad434bcdae..bdfc5e617333 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1138,6 +1138,14 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, return ret; } +void smu_cmn_assign_power_profile(struct smu_context *smu) +{ + uint32_t index; + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; +} + bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) { struct pci_dev *p = NULL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 1de685defe85..8a801e389659 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -130,6 +130,8 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); +void smu_cmn_assign_power_profile(struct smu_context *smu); + /* * Helper function to make sysfs_emit_at() happy. Align buf to * the current page boundary and record the offset. diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c index 496c7120e515..c231389936bd 100644 --- a/drivers/gpu/drm/ast/ast_sil164.c +++ b/drivers/gpu/drm/ast/ast_sil164.c @@ -29,6 +29,8 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector if (ast_connector->physical_status == connector_status_connected) { count = drm_connector_helper_get_modes(connector); } else { + drm_edid_connector_update(connector, NULL); + /* * There's no EDID data without a connected monitor. Set BMC- * compatible modes in this case. The XGA default resolution diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c index 3e815da43fbd..dd389a0a8f4a 100644 --- a/drivers/gpu/drm/ast/ast_vga.c +++ b/drivers/gpu/drm/ast/ast_vga.c @@ -29,6 +29,8 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) if (ast_connector->physical_status == connector_status_connected) { count = drm_connector_helper_get_modes(connector); } else { + drm_edid_connector_update(connector, NULL); + /* * There's no EDID data without a connected monitor. Set BMC- * compatible modes in this case. The XGA default resolution diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index b29980f95379..295e9d031e2d 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -58,9 +58,10 @@ int drm_aux_bridge_register(struct device *parent) adev->id = ret; adev->name = "aux_bridge"; adev->dev.parent = parent; - adev->dev.of_node = of_node_get(parent->of_node); adev->dev.release = drm_aux_bridge_release; + device_set_of_node_from_dev(&adev->dev, parent); + ret = auxiliary_device_init(adev); if (ret) { ida_free(&drm_aux_bridge_ida, adev->id); diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 290e2532fab1..f3afdab55c11 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -2391,6 +2391,7 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc) if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 || tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) { dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n"); + of_node_put(node); return -EINVAL; } } diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index b0602c4f3628..51c2d742d199 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -50,7 +50,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info) if (!fb_helper->dev) return; - fb_deferred_io_cleanup(info); + if (info->fbdefio) + fb_deferred_io_cleanup(info); drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 0830cae9a4d0..2d84d7ea1ab7 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -403,7 +403,6 @@ static const struct dmi_system_id orientation_data[] = { }, { /* Lenovo Yoga Tab 3 X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)&lcd1600x2560_rightside_up, diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index faa253b27664..14ac351fd76d 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -123,9 +123,8 @@ config DRM_I915_USERPTR config DRM_I915_GVT_KVMGT tristate "Enable KVM host support Intel GVT-g graphics virtualization" depends on DRM_I915 - depends on X86 + depends on KVM_X86 depends on 64BIT - depends on KVM depends on VFIO select DRM_I915_GVT select KVM_EXTERNAL_WRITE_TRACKING diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 15541932b809..eeaedd979354 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -89,25 +89,19 @@ static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state, static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state, const struct intel_connector *connector, - bool ssc, bool dsc, int bpp_x16) + bool ssc, int dsc_slice_count, int bpp_x16) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; unsigned long flags = DRM_DP_BW_OVERHEAD_MST; - int dsc_slice_count = 0; int overhead; flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0; flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0; flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0; - if (dsc) { + if (dsc_slice_count) flags |= DRM_DP_BW_OVERHEAD_DSC; - dsc_slice_count = intel_dp_dsc_get_slice_count(connector, - adjusted_mode->clock, - adjusted_mode->hdisplay, - crtc_state->joiner_pipes); - } overhead = drm_dp_bw_overhead(crtc_state->lane_count, adjusted_mode->hdisplay, @@ -153,6 +147,19 @@ static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead) return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000); } +static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector, + const struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int num_joined_pipes = crtc_state->joiner_pipes; + + return intel_dp_dsc_get_slice_count(connector, + adjusted_mode->clock, + adjusted_mode->hdisplay, + num_joined_pipes); +} + static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int max_bpp, @@ -172,6 +179,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpp, slots = -EINVAL; + int dsc_slice_count = 0; int max_dpt_bpp; int ret = 0; @@ -203,6 +211,15 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n", min_bpp, max_bpp); + if (dsc) { + dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state); + if (!dsc_slice_count) { + drm_dbg_kms(&i915->drm, "Can't get valid DSC slice count\n"); + + return -ENOSPC; + } + } + for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) { int local_bw_overhead; int remote_bw_overhead; @@ -216,9 +233,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, intel_dp_output_bpp(crtc_state->output_format, bpp)); local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector, - false, dsc, link_bpp_x16); + false, dsc_slice_count, link_bpp_x16); remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector, - true, dsc, link_bpp_x16); + true, dsc_slice_count, link_bpp_x16); intel_dp_mst_compute_m_n(crtc_state, connector, local_bw_overhead, @@ -449,6 +466,9 @@ hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector, if (mode_hblank_period_ns(adjusted_mode) > hblank_limit) return false; + if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state)) + return false; + return true; } diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 5be7bb43e2e0..35557d98d7a7 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -438,6 +438,19 @@ bool intel_fb_needs_64k_phys(u64 modifier) INTEL_PLANE_CAP_NEED64K_PHYS); } +/** + * intel_fb_is_tile4_modifier: Check if a modifier is a tile4 modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a tile4 modifier. + */ +bool intel_fb_is_tile4_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_TILING_4); +} + static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md, u8 display_ver_from, u8 display_ver_until) { diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 10de437e8ef8..827be3f7934c 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -35,6 +35,7 @@ bool intel_fb_is_ccs_modifier(u64 modifier); bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier); bool intel_fb_is_mc_ccs_modifier(u64 modifier); bool intel_fb_needs_64k_phys(u64 modifier); +bool intel_fb_is_tile4_modifier(u64 modifier); bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane); int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 6980b98792c2..377939de0ff4 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -1094,7 +1094,8 @@ static void intel_hdcp_update_value(struct intel_connector *connector, hdcp->value = value; if (update_property) { drm_connector_get(&connector->base); - queue_work(i915->unordered_wq, &hdcp->prop_work); + if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) + drm_connector_put(&connector->base); } } @@ -2524,7 +2525,8 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, mutex_lock(&hdcp->mutex); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; drm_connector_get(&connector->base); - queue_work(i915->unordered_wq, &hdcp->prop_work); + if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) + drm_connector_put(&connector->base); mutex_unlock(&hdcp->mutex); } @@ -2541,7 +2543,9 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, */ if (!desired_and_not_enabled && !content_protection_type_changed) { drm_connector_get(&connector->base); - queue_work(i915->unordered_wq, &hdcp->prop_work); + if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) + drm_connector_put(&connector->base); + } } diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 17d4c880ecc4..c8720d31d101 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -1591,6 +1591,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, return -EINVAL; } + /* + * Display20 onward tile4 hflip is not supported + */ + if (rotation & DRM_MODE_REFLECT_X && + intel_fb_is_tile4_modifier(fb->modifier) && + DISPLAY_VER(dev_priv) >= 20) { + drm_dbg_kms(&dev_priv->drm, + "horizontal flip is not supported with tile4 surface formats\n"); + return -EINVAL; + } + if (drm_rotation_90_or_270(rotation)) { if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) { drm_dbg_kms(&dev_priv->drm, diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c index eded5e955cc0..4cb3494c0bb2 100644 --- a/drivers/gpu/drm/imagination/pvr_context.c +++ b/drivers/gpu/drm/imagination/pvr_context.c @@ -17,10 +17,14 @@ #include #include + +#include #include #include +#include #include #include +#include #include #include #include @@ -354,6 +358,10 @@ int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_co return err; } + spin_lock(&pvr_dev->ctx_list_lock); + list_add_tail(&ctx->file_link, &pvr_file->contexts); + spin_unlock(&pvr_dev->ctx_list_lock); + return 0; err_destroy_fw_obj: @@ -380,6 +388,11 @@ pvr_context_release(struct kref *ref_count) container_of(ref_count, struct pvr_context, ref_count); struct pvr_device *pvr_dev = ctx->pvr_dev; + WARN_ON(in_interrupt()); + spin_lock(&pvr_dev->ctx_list_lock); + list_del(&ctx->file_link); + spin_unlock(&pvr_dev->ctx_list_lock); + xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id); pvr_context_destroy_queues(ctx); pvr_fw_object_destroy(ctx->fw_obj); @@ -437,11 +450,30 @@ pvr_context_destroy(struct pvr_file *pvr_file, u32 handle) */ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) { + struct pvr_device *pvr_dev = pvr_file->pvr_dev; struct pvr_context *ctx; unsigned long handle; xa_for_each(&pvr_file->ctx_handles, handle, ctx) pvr_context_destroy(pvr_file, handle); + + spin_lock(&pvr_dev->ctx_list_lock); + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + + while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) { + list_del_init(&ctx->file_link); + + if (pvr_context_get_if_referenced(ctx)) { + spin_unlock(&pvr_dev->ctx_list_lock); + + pvr_vm_unmap_all(ctx->vm_ctx); + + pvr_context_put(ctx); + spin_lock(&pvr_dev->ctx_list_lock); + } + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + } + spin_unlock(&pvr_dev->ctx_list_lock); } /** @@ -451,6 +483,7 @@ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) void pvr_context_device_init(struct pvr_device *pvr_dev) { xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1); + spin_lock_init(&pvr_dev->ctx_list_lock); } /** diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h index 0c7b97dfa6ba..07afa179cdf4 100644 --- a/drivers/gpu/drm/imagination/pvr_context.h +++ b/drivers/gpu/drm/imagination/pvr_context.h @@ -85,6 +85,9 @@ struct pvr_context { /** @compute: Transfer queue. */ struct pvr_queue *transfer; } queues; + + /** @file_link: pvr_file PVR context list link. */ + struct list_head file_link; }; static __always_inline struct pvr_queue * @@ -123,6 +126,24 @@ pvr_context_get(struct pvr_context *ctx) return ctx; } +/** + * pvr_context_get_if_referenced() - Take an additional reference on a still + * referenced context. + * @ctx: Context pointer. + * + * Call pvr_context_put() to release. + * + * Returns: + * * True on success, or + * * false if no context pointer passed, or the context wasn't still + * * referenced. + */ +static __always_inline bool +pvr_context_get_if_referenced(struct pvr_context *ctx) +{ + return ctx != NULL && kref_get_unless_zero(&ctx->ref_count) != 0; +} + /** * pvr_context_lookup() - Lookup context pointer from handle and file. * @pvr_file: Pointer to pvr_file structure. diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index b574e23d484b..6d0dfacb677b 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -293,6 +294,12 @@ struct pvr_device { /** @sched_wq: Workqueue for schedulers. */ struct workqueue_struct *sched_wq; + + /** + * @ctx_list_lock: Lock to be held when accessing the context list in + * struct pvr_file. + */ + spinlock_t ctx_list_lock; }; /** @@ -344,6 +351,9 @@ struct pvr_file { * This array is used to allocate handles returned to userspace. */ struct xarray vm_ctx_handles; + + /** @contexts: PVR context list. */ + struct list_head contexts; }; /** diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c index 1a0cb7aa9cea..fb17196e05f4 100644 --- a/drivers/gpu/drm/imagination/pvr_drv.c +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -1326,6 +1327,8 @@ pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file) */ pvr_file->pvr_dev = pvr_dev; + INIT_LIST_HEAD(&pvr_file->contexts); + xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1); diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 97c0f772ed65..7bd6ba4c6e8a 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -597,12 +598,26 @@ pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context) } /** - * pvr_vm_context_release() - Teardown a VM context. - * @ref_count: Pointer to reference counter of the VM context. + * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. + * @vm_ctx: Target VM context. * * This function ensures that no mappings are left dangling by unmapping them * all in order of ascending device-virtual address. */ +void +pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) +{ + WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, + vm_ctx->gpuvm_mgr.mm_range)); +} + +/** + * pvr_vm_context_release() - Teardown a VM context. + * @ref_count: Pointer to reference counter of the VM context. + * + * This function also ensures that no mappings are left dangling by calling + * pvr_vm_unmap_all. + */ static void pvr_vm_context_release(struct kref *ref_count) { @@ -612,8 +627,7 @@ pvr_vm_context_release(struct kref *ref_count) if (vm_ctx->fw_mem_ctx_obj) pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj); - WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, - vm_ctx->gpuvm_mgr.mm_range)); + pvr_vm_unmap_all(vm_ctx); pvr_mmu_context_destroy(vm_ctx->mmu_ctx); drm_gem_private_object_fini(&vm_ctx->dummy_gem); diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h index f2a6463f2b05..79406243617c 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.h +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -39,6 +39,7 @@ int pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, u64 device_addr, u64 size); int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size); +void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx); dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx); struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx); diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 175b00e5a253..eb0e1233ad04 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -127,9 +127,8 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc) mtk_mutex_put(mtk_crtc->mutex); #if IS_REACHABLE(CONFIG_MTK_CMDQ) - cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); - if (mtk_crtc->cmdq_client.chan) { + cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); mbox_free_channel(mtk_crtc->cmdq_client.chan); mtk_crtc->cmdq_client.chan = NULL; } @@ -913,6 +912,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, BIT(pipe), mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes), mtk_ddp_comp_supported_rotations(comp), + mtk_ddp_comp_get_blend_modes(comp), mtk_ddp_comp_get_formats(comp), mtk_ddp_comp_get_num_formats(comp), i); if (ret) diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index be66d94be361..edc6417639e6 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -363,6 +363,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .layer_config = mtk_ovl_layer_config, .bgclr_in_on = mtk_ovl_bgclr_in_on, .bgclr_in_off = mtk_ovl_bgclr_in_off, + .get_blend_modes = mtk_ovl_get_blend_modes, .get_formats = mtk_ovl_get_formats, .get_num_formats = mtk_ovl_get_num_formats, }; @@ -416,6 +417,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = { .disconnect = mtk_ovl_adaptor_disconnect, .add = mtk_ovl_adaptor_add_comp, .remove = mtk_ovl_adaptor_remove_comp, + .get_blend_modes = mtk_ovl_adaptor_get_blend_modes, .get_formats = mtk_ovl_adaptor_get_formats, .get_num_formats = mtk_ovl_adaptor_get_num_formats, .mode_valid = mtk_ovl_adaptor_mode_valid, diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index ecf6dc283cd7..39720b27f4e9 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -80,6 +80,7 @@ struct mtk_ddp_comp_funcs { void (*ctm_set)(struct device *dev, struct drm_crtc_state *state); struct device * (*dma_dev_get)(struct device *dev); + u32 (*get_blend_modes)(struct device *dev); const u32 *(*get_formats)(struct device *dev); size_t (*get_num_formats)(struct device *dev); void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); @@ -266,6 +267,15 @@ static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp) return comp->dev; } +static inline +u32 mtk_ddp_comp_get_blend_modes(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->get_blend_modes) + return comp->funcs->get_blend_modes(comp->dev); + + return 0; +} + static inline const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp) { diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 082ac18fe04a..04154db9085c 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -103,6 +103,7 @@ void mtk_ovl_register_vblank_cb(struct device *dev, void mtk_ovl_unregister_vblank_cb(struct device *dev); void mtk_ovl_enable_vblank(struct device *dev); void mtk_ovl_disable_vblank(struct device *dev); +u32 mtk_ovl_get_blend_modes(struct device *dev); const u32 *mtk_ovl_get_formats(struct device *dev); size_t mtk_ovl_get_num_formats(struct device *dev); @@ -131,6 +132,7 @@ void mtk_ovl_adaptor_start(struct device *dev); void mtk_ovl_adaptor_stop(struct device *dev); unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev); struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev); +u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev); const u32 *mtk_ovl_adaptor_get_formats(struct device *dev); size_t mtk_ovl_adaptor_get_num_formats(struct device *dev); enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 89b439dcf3a6..e0c0bb01f65a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -65,8 +65,8 @@ #define OVL_CON_CLRFMT_RGB (1 << 12) #define OVL_CON_CLRFMT_ARGB8888 (2 << 12) #define OVL_CON_CLRFMT_RGBA8888 (3 << 12) -#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP) -#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP) #define OVL_CON_CLRFMT_UYVY (4 << 12) #define OVL_CON_CLRFMT_YUYV (5 << 12) #define OVL_CON_MTX_YUV_TO_RGB (6 << 16) @@ -146,6 +146,7 @@ struct mtk_disp_ovl_data { bool fmt_rgb565_is_0; bool smi_id_en; bool supports_afbc; + const u32 blend_modes; const u32 *formats; size_t num_formats; bool supports_clrfmt_ext; @@ -214,6 +215,13 @@ void mtk_ovl_disable_vblank(struct device *dev) writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN); } +u32 mtk_ovl_get_blend_modes(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->blend_modes; +} + const u32 *mtk_ovl_get_formats(struct device *dev) { struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); @@ -386,14 +394,27 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx, DISP_REG_OVL_RDMA_CTRL(idx)); } -static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt, - unsigned int blend_mode) +static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl, + struct mtk_plane_state *state) { - /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" - * is defined in mediatek HW data sheet. - * The alphabet order in XXX is no relation to data - * arrangement in memory. + unsigned int fmt = state->pending.format; + unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE; + + /* + * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet + * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888. + * + * Check blend_modes in the driver data to see if premultiplied mode is supported. + * If not, use coverage mode instead to set it to the supported color formats. + * + * Current DRM assumption is that alpha is default premultiplied, so the bitmask of + * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init() + * will get an error return from drm_plane_create_blend_mode_property() and + * state->base.pixel_blend_mode should not be used. */ + if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI)) + blend_mode = state->base.pixel_blend_mode; + switch (fmt) { default: case DRM_FORMAT_RGB565: @@ -471,20 +492,26 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, return; } - con = ovl_fmt_convert(ovl, fmt, blend_mode); + con = mtk_ovl_fmt_convert(ovl, state); if (state->base.fb) { - con |= OVL_CON_AEN; con |= state->base.alpha & OVL_CON_ALPHA; - } - /* CONST_BLD must be enabled for XRGB formats although the alpha channel - * can be ignored, or OVL will still read the value from memory. - * For RGB888 related formats, whether CONST_BLD is enabled or not won't - * affect the result. Therefore we use !has_alpha as the condition. - */ - if ((state->base.fb && !state->base.fb->format->has_alpha) || - blend_mode == DRM_MODE_BLEND_PIXEL_NONE) - ignore_pixel_alpha = OVL_CONST_BLEND; + /* + * For blend_modes supported SoCs, always enable alpha blending. + * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set. + */ + if (blend_mode || state->base.fb->format->has_alpha) + con |= OVL_CON_AEN; + + /* + * Although the alpha channel can be ignored, CONST_BLD must be enabled + * for XRGB format, otherwise OVL will still read the value from memory. + * For RGB888 related formats, whether CONST_BLD is enabled or not won't + * affect the result. Therefore we use !has_alpha as the condition. + */ + if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha) + ignore_pixel_alpha = OVL_CONST_BLEND; + } if (pending->rotation & DRM_MODE_REFLECT_Y) { con |= OVL_CON_VIRT_FLIP; @@ -663,6 +690,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = { .layer_nr = 4, .fmt_rgb565_is_0 = true, .smi_id_en = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), .formats = mt8173_formats, .num_formats = ARRAY_SIZE(mt8173_formats), }; @@ -673,6 +703,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = { .layer_nr = 2, .fmt_rgb565_is_0 = true, .smi_id_en = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), .formats = mt8173_formats, .num_formats = ARRAY_SIZE(mt8173_formats), }; @@ -684,6 +717,9 @@ static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = { .fmt_rgb565_is_0 = true, .smi_id_en = true, .supports_afbc = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), .formats = mt8195_formats, .num_formats = ARRAY_SIZE(mt8195_formats), .supports_clrfmt_ext = true, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index c6768210b08b..bf2546c4681a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -400,6 +400,13 @@ void mtk_ovl_adaptor_disable_vblank(struct device *dev) mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); } +u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + return mtk_ethdr_get_blend_modes(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); +} + const u32 *mtk_ovl_adaptor_get_formats(struct device *dev) { struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index d8796a904eca..f2bee617f063 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -145,6 +145,89 @@ struct mtk_dp_data { u16 audio_m_div2_bit; }; +static const struct mtk_dp_efuse_fmt mt8188_dp_efuse_fmt[MTK_DP_CAL_MAX] = { + [MTK_DP_CAL_GLB_BIAS_TRIM] = { + .idx = 0, + .shift = 10, + .mask = 0x1f, + .min_val = 1, + .max_val = 0x1e, + .default_val = 0xf, + }, + [MTK_DP_CAL_CLKTX_IMPSE] = { + .idx = 0, + .shift = 15, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = { + .idx = 1, + .shift = 0, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = { + .idx = 1, + .shift = 8, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = { + .idx = 1, + .shift = 16, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = { + .idx = 1, + .shift = 24, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = { + .idx = 1, + .shift = 4, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = { + .idx = 1, + .shift = 12, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = { + .idx = 1, + .shift = 20, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = { + .idx = 1, + .shift = 28, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, +}; + static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = { [MTK_DP_CAL_GLB_BIAS_TRIM] = { .idx = 3, @@ -2771,7 +2854,7 @@ static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume); static const struct mtk_dp_data mt8188_dp_data = { .bridge_type = DRM_MODE_CONNECTOR_DisplayPort, .smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE, - .efuse_fmt = mt8195_dp_efuse_fmt, + .efuse_fmt = mt8188_dp_efuse_fmt, .audio_supported = true, .audio_pkt_in_hblank_area = true, .audio_m_div2_bit = MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2, diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c index d1d9cf8b10e1..0f22e7d337cb 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.c +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c @@ -145,6 +145,13 @@ static irqreturn_t mtk_ethdr_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } +u32 mtk_ethdr_get_blend_modes(struct device *dev) +{ + return BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE); +} + void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, struct mtk_plane_state *state, struct cmdq_pkt *cmdq_pkt) diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.h b/drivers/gpu/drm/mediatek/mtk_ethdr.h index 81af9edea3f7..a72aeee46829 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.h +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.h @@ -13,6 +13,7 @@ void mtk_ethdr_clk_disable(struct device *dev); void mtk_ethdr_config(struct device *dev, unsigned int w, unsigned int h, unsigned int vrefresh, unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +u32 mtk_ethdr_get_blend_modes(struct device *dev); void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, struct mtk_plane_state *state, struct cmdq_pkt *cmdq_pkt); diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 7d2cb4e0fafa..8a48b3b0a956 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -320,8 +320,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int supported_rotations, const u32 *formats, - size_t num_formats, unsigned int plane_idx) + unsigned int supported_rotations, const u32 blend_modes, + const u32 *formats, size_t num_formats, unsigned int plane_idx) { int err; @@ -366,12 +366,11 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, if (err) DRM_ERROR("failed to create property: alpha\n"); - err = drm_plane_create_blend_mode_property(plane, - BIT(DRM_MODE_BLEND_PREMULTI) | - BIT(DRM_MODE_BLEND_COVERAGE) | - BIT(DRM_MODE_BLEND_PIXEL_NONE)); - if (err) - DRM_ERROR("failed to create property: blend_mode\n"); + if (blend_modes) { + err = drm_plane_create_blend_mode_property(plane, blend_modes); + if (err) + DRM_ERROR("failed to create property: blend_mode\n"); + } drm_plane_helper_add(plane, &mtk_plane_helper_funcs); diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h index 5b177eac67b7..3b13b89989c7 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -48,6 +48,6 @@ to_mtk_plane_state(struct drm_plane_state *state) int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int supported_rotations, const u32 *formats, - size_t num_formats, unsigned int plane_idx); + unsigned int supported_rotations, const u32 blend_modes, + const u32 *formats, size_t num_formats, unsigned int plane_idx); #endif diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 6623ee4e3277..9f5925693686 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -18,7 +18,6 @@ #include #include #include -#include #include "mgag200_drv.h" @@ -85,34 +84,6 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size) return offset - 65536; } -static irqreturn_t mgag200_irq_handler(int irq, void *arg) -{ - struct drm_device *dev = arg; - struct mga_device *mdev = to_mga_device(dev); - struct drm_crtc *crtc; - u32 status, ien; - - status = RREG32(MGAREG_STATUS); - - if (status & MGAREG_STATUS_VLINEPEN) { - ien = RREG32(MGAREG_IEN); - if (!(ien & MGAREG_IEN_VLINEIEN)) - goto out; - - crtc = drm_crtc_from_index(dev, 0); - if (WARN_ON_ONCE(!crtc)) - goto out; - drm_crtc_handle_vblank(crtc); - - WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); - - return IRQ_HANDLED; - } - -out: - return IRQ_NONE; -} - /* * DRM driver */ @@ -196,7 +167,6 @@ int mgag200_device_init(struct mga_device *mdev, const struct mgag200_device_funcs *funcs) { struct drm_device *dev = &mdev->base; - struct pci_dev *pdev = to_pci_dev(dev->dev); u8 crtcext3, misc; int ret; @@ -223,14 +193,6 @@ int mgag200_device_init(struct mga_device *mdev, mutex_unlock(&mdev->rmmio_lock); WREG32(MGAREG_IEN, 0); - WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); - - ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED, - dev->driver->name, dev); - if (ret) { - drm_err(dev, "Failed to acquire interrupt, error %d\n", ret); - return ret; - } return 0; } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 4760ba92871b..988967eafbf2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -391,24 +391,17 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state); void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state); void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state); -bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, - int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode); #define MGAG200_CRTC_HELPER_FUNCS \ .mode_valid = mgag200_crtc_helper_mode_valid, \ .atomic_check = mgag200_crtc_helper_atomic_check, \ .atomic_flush = mgag200_crtc_helper_atomic_flush, \ .atomic_enable = mgag200_crtc_helper_atomic_enable, \ - .atomic_disable = mgag200_crtc_helper_atomic_disable, \ - .get_scanout_position = mgag200_crtc_helper_get_scanout_position + .atomic_disable = mgag200_crtc_helper_atomic_disable void mgag200_crtc_reset(struct drm_crtc *crtc); struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc); void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state); -int mgag200_crtc_enable_vblank(struct drm_crtc *crtc); -void mgag200_crtc_disable_vblank(struct drm_crtc *crtc); #define MGAG200_CRTC_FUNCS \ .reset = mgag200_crtc_reset, \ @@ -416,10 +409,7 @@ void mgag200_crtc_disable_vblank(struct drm_crtc *crtc); .set_config = drm_atomic_helper_set_config, \ .page_flip = drm_atomic_helper_page_flip, \ .atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \ - .atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \ - .enable_vblank = mgag200_crtc_enable_vblank, \ - .disable_vblank = mgag200_crtc_disable_vblank, \ - .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp + .atomic_destroy_state = mgag200_crtc_atomic_destroy_state void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode, bool set_vidrst); diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c index 77ce8d36cef0..f874e2949840 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200.c @@ -8,7 +8,6 @@ #include #include #include -#include #include "mgag200_drv.h" @@ -404,9 +403,5 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c index 09ced65c1d2f..e2305f8e00f8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c @@ -8,7 +8,6 @@ #include #include #include -#include #include "mgag200_drv.h" @@ -276,9 +275,5 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c index 5daa469137bd..11ae76eb081d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c @@ -7,7 +7,6 @@ #include #include #include -#include #include "mgag200_drv.h" @@ -181,9 +180,5 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev, drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c index 09cfffafe130..c20ed0ab50ec 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200er.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c @@ -8,7 +8,6 @@ #include #include #include -#include #include "mgag200_drv.h" @@ -206,8 +205,6 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = { @@ -215,8 +212,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200er_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable, - .get_scanout_position = mgag200_crtc_helper_get_scanout_position, + .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = { @@ -312,9 +308,5 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c index 3d48baa91d8b..78be964eb97c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c @@ -8,7 +8,6 @@ #include #include #include -#include #include "mgag200_drv.h" @@ -207,8 +206,6 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = { @@ -216,8 +213,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable, - .get_scanout_position = mgag200_crtc_helper_get_scanout_position, + .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = { @@ -317,9 +313,5 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c index dabc778e64e8..31624c9ab7b7 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c @@ -7,7 +7,6 @@ #include #include #include -#include #include "mgag200_drv.h" @@ -199,9 +198,5 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev, drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c index 9dcbe8304271..7a32d3b1d226 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200se.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c @@ -8,7 +8,6 @@ #include #include #include -#include #include "mgag200_drv.h" @@ -338,8 +337,6 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = { @@ -347,8 +344,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200se_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable, - .get_scanout_position = mgag200_crtc_helper_get_scanout_position, + .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = { @@ -517,9 +513,5 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c index 83a24aedbf2f..a0e7b9ad46cd 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c @@ -8,7 +8,6 @@ #include #include #include -#include #include "mgag200_drv.h" @@ -323,9 +322,5 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 7159909aca1e..fb71658c3117 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -22,7 +22,6 @@ #include #include #include -#include #include "mgag200_ddc.h" #include "mgag200_drv.h" @@ -227,14 +226,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod vblkstr = mode->crtc_vblank_start; vblkend = vtotal + 1; - /* - * There's no VBLANK interrupt on Matrox chipsets, so we use - * the VLINE interrupt instead. It triggers when the current - * has been reached. For VBLANK, this is the first - * non-visible line at the bottom of the screen. Therefore, - * keep in sync with . - */ - linecomp = vblkstr; + linecomp = vdispend; misc = RREG8(MGA_MISC_IN); @@ -645,8 +637,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); - struct drm_pending_vblank_event *event; - unsigned long flags; if (crtc_state->enable && crtc_state->color_mgmt_changed) { const struct drm_format_info *format = mgag200_crtc_state->format; @@ -656,18 +646,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s else mgag200_crtc_set_gamma_linear(mdev, format); } - - event = crtc->state->event; - if (event) { - crtc->state->event = NULL; - - spin_lock_irqsave(&dev->event_lock, flags); - if (drm_crtc_vblank_get(crtc) != 0) - drm_crtc_send_vblank_event(crtc, event); - else - drm_crtc_arm_vblank_event(crtc, event); - spin_unlock_irqrestore(&dev->event_lock, flags); - } } void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) @@ -692,44 +670,15 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_ mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct mga_device *mdev = to_mga_device(crtc->dev); - drm_crtc_vblank_off(crtc); - mgag200_disable_display(mdev); } -bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, - int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) -{ - struct mga_device *mdev = to_mga_device(crtc->dev); - u32 vcount; - - if (stime) - *stime = ktime_get(); - - if (vpos) { - vcount = RREG32(MGAREG_VCOUNT); - *vpos = vcount & GENMASK(11, 0); - } - - if (hpos) - *hpos = mode->htotal >> 1; // near middle of scanline on average - - if (etime) - *etime = ktime_get(); - - return true; -} - void mgag200_crtc_reset(struct drm_crtc *crtc) { struct mgag200_crtc_state *mgag200_crtc_state; @@ -774,30 +723,6 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st kfree(mgag200_crtc_state); } -int mgag200_crtc_enable_vblank(struct drm_crtc *crtc) -{ - struct mga_device *mdev = to_mga_device(crtc->dev); - u32 ien; - - WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); - - ien = RREG32(MGAREG_IEN); - ien |= MGAREG_IEN_VLINEIEN; - WREG32(MGAREG_IEN, ien); - - return 0; -} - -void mgag200_crtc_disable_vblank(struct drm_crtc *crtc) -{ - struct mga_device *mdev = to_mga_device(crtc->dev); - u32 ien; - - ien = RREG32(MGAREG_IEN); - ien &= ~(MGAREG_IEN_VLINEIEN); - WREG32(MGAREG_IEN, ien); -} - /* * Mode config */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 06cab2c6fd66..702b8d4b3497 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -101,9 +101,10 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, } static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, - struct msm_ringbuffer *ring, struct msm_file_private *ctx) + struct msm_ringbuffer *ring, struct msm_gem_submit *submit) { bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; + struct msm_file_private *ctx = submit->queue->ctx; struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; phys_addr_t ttbr; u32 asid; @@ -115,6 +116,15 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) return; + if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) { + /* Wait for previous submit to complete before continuing: */ + OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4); + OUT_RING(ring, 0); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, submit->seqno - 1); + } + if (!sysprof) { if (!adreno_is_a7xx(adreno_gpu)) { /* Turn off protected mode to write to special registers */ @@ -193,7 +203,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; - a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + a6xx_set_pagetable(a6xx_gpu, ring, submit); get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_start)); @@ -283,7 +293,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_THREAD_CONTROL, 1); OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR); - a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + a6xx_set_pagetable(a6xx_gpu, ring, submit); get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_start)); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 4c1be2f0555f..db6c57900781 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -711,12 +711,13 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc) _dpu_crtc_complete_flip(crtc); } -static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, +static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); struct drm_display_mode *adj_mode = &state->adjusted_mode; u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); int i; for (i = 0; i < cstate->num_mixers; i++) { @@ -727,7 +728,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, r->y2 = adj_mode->vdisplay; trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); + + if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width) + return -E2BIG; } + + return 0; } static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, @@ -803,7 +809,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); - _dpu_crtc_setup_lm_bounds(crtc, crtc->state); + _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state); /* encoder will trigger pending mask now */ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) @@ -1091,9 +1097,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, dpu_core_perf_crtc_update(crtc, 0); - memset(cstate->mixers, 0, sizeof(cstate->mixers)); - cstate->num_mixers = 0; - /* disable clk & bw control until clk & bw properties are set */ cstate->bw_control = false; cstate->bw_split_vote = false; @@ -1192,8 +1195,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, if (crtc_state->active_changed) crtc_state->mode_changed = true; - if (cstate->num_mixers) - _dpu_crtc_setup_lm_bounds(crtc, crtc_state); + if (cstate->num_mixers) { + rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); + if (rc) + return rc; + } /* FIXME: move this to dpu_plane_atomic_check? */ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 3b171bf227d1..bd3698bf0cf7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -624,6 +624,40 @@ static struct msm_display_topology dpu_encoder_get_topology( return topology; } +static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms, + struct drm_encoder *drm_enc, + struct dpu_global_state *global_state, + struct drm_crtc_state *crtc_state) +{ + struct dpu_crtc_state *cstate; + struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC]; + int num_lm, num_ctl, num_dspp, i; + + cstate = to_dpu_crtc_state(crtc_state); + + memset(cstate->mixers, 0, sizeof(cstate->mixers)); + + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); + num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, + ARRAY_SIZE(hw_dspp)); + + for (i = 0; i < num_lm; i++) { + int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); + + cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); + cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); + cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL; + } + + cstate->num_mixers = num_lm; +} + static int dpu_encoder_virt_atomic_check( struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state, @@ -692,6 +726,9 @@ static int dpu_encoder_virt_atomic_check( if (!crtc_state->active_changed || crtc_state->enable) ret = dpu_rm_reserve(&dpu_kms->rm, global_state, drm_enc, crtc_state, topology); + if (!ret) + dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc, + global_state, crtc_state); } trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); @@ -1093,14 +1130,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, struct dpu_encoder_virt *dpu_enc; struct msm_drm_private *priv; struct dpu_kms *dpu_kms; - struct dpu_crtc_state *cstate; struct dpu_global_state *global_state; struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; - int num_lm, num_ctl, num_pp, num_dsc; + int num_ctl, num_pp, num_dsc; unsigned int dsc_mask = 0; int i; @@ -1129,11 +1163,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, ARRAY_SIZE(hw_pp)); num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); - num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); - dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, - ARRAY_SIZE(hw_dspp)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) @@ -1159,36 +1188,23 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; } - cstate = to_dpu_crtc_state(crtc_state); - - for (i = 0; i < num_lm; i++) { - int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); - - cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); - cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); - cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); - } - - cstate->num_mixers = num_lm; - for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (!dpu_enc->hw_pp[i]) { + phys->hw_pp = dpu_enc->hw_pp[i]; + if (!phys->hw_pp) { DPU_ERROR_ENC(dpu_enc, "no pp block assigned at idx: %d\n", i); return; } - if (!hw_ctl[i]) { + phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL; + if (!phys->hw_ctl) { DPU_ERROR_ENC(dpu_enc, "no ctl block assigned at idx: %d\n", i); return; } - phys->hw_pp = dpu_enc->hw_pp[i]; - phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); - phys->cached_mode = crtc_state->adjusted_mode; if (phys->ops.atomic_mode_set) phys->ops.atomic_mode_set(phys, crtc_state, conn_state); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index ba8878d21cf0..d8a2edebfe8c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -302,7 +302,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine( intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); - if (phys_enc->hw_pp->merge_3d) + if (intf_cfg.mode_3d && phys_enc->hw_pp->merge_3d) intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); @@ -440,10 +440,12 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) struct dpu_hw_ctl *ctl; const struct msm_format *fmt; u32 fmt_fourcc; + u32 mode_3d; ctl = phys_enc->hw_ctl; fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc); fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0); + mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); DPU_DEBUG_VIDENC(phys_enc, "\n"); @@ -466,7 +468,8 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) goto skip_flush; ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx); - if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d) + if (mode_3d && ctl->ops.update_pending_flush_merge_3d && + phys_enc->hw_pp->merge_3d) ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx); if (ctl->ops.update_pending_flush_cdm && phys_enc->hw_cdm) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 882c717859ce..07035ab77b79 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -275,6 +275,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) struct dpu_hw_pingpong *hw_pp; struct dpu_hw_cdm *hw_cdm; u32 pending_flush = 0; + u32 mode_3d; if (!phys_enc) return; @@ -283,6 +284,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) hw_pp = phys_enc->hw_pp; hw_ctl = phys_enc->hw_ctl; hw_cdm = phys_enc->hw_cdm; + mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); @@ -294,7 +296,8 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) if (hw_ctl->ops.update_pending_flush_wb) hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx); - if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d) + if (mode_3d && hw_ctl->ops.update_pending_flush_merge_3d && + hw_pp && hw_pp->merge_3d) hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl, hw_pp->merge_3d->idx); diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index add72bbc28b1..4d55e3cf570f 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -26,7 +26,7 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b end_addr = base_addr + aligned_len; if (!(*reg)) - *reg = kzalloc(len_padded, GFP_KERNEL); + *reg = kvzalloc(len_padded, GFP_KERNEL); if (*reg) dump_addr = *reg; @@ -48,20 +48,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b } } -static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr, - struct drm_printer *p) +static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len, + void __iomem *base_addr, struct drm_printer *p) { int i; - u32 *dump_addr = NULL; void __iomem *addr; u32 num_rows; + if (!dump_addr) { + drm_printf(p, "Registers not stored\n"); + return; + } + addr = base_addr; num_rows = len / REG_DUMP_ALIGN; - if (*reg) - dump_addr = *reg; - for (i = 0; i < num_rows; i++) { drm_printf(p, "0x%lx : %08x %08x %08x %08x\n", (unsigned long)(addr - base_addr), @@ -89,7 +90,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p) list_for_each_entry_safe(block, tmp, &state->blocks, node) { drm_printf(p, "====================%s================\n", block->name); - msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p); + msm_disp_state_print_regs(block->state, block->size, block->base_addr, p); } drm_printf(p, "===================dpu drm state================\n"); @@ -161,7 +162,7 @@ void msm_disp_state_free(void *data) list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) { list_del(&block->node); - kfree(block->state); + kvfree(block->state); kfree(block); } diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 185d7de0bf37..a98d24b7cb00 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -542,7 +542,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; - return new_htotal * mode->vtotal * drm_mode_vrefresh(mode); + return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal); } static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, @@ -550,7 +550,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, { unsigned long pclk_rate; - pclk_rate = mode->clock * 1000; + pclk_rate = mode->clock * 1000u; if (dsc) pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c index 0e3a2b16a2ce..e6ffaf92d26d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c @@ -153,15 +153,6 @@ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk) return dividend - 1; } -static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk) -{ - u64 fdata = ((u64)pll_cmp) * ref_clk * 10; - - do_div(fdata, HDMI_PLL_CMP_CNT); - - return fdata; -} - #define HDMI_REF_CLOCK_HZ ((u64)19200000) #define HDMI_MHZ_TO_HZ ((u64)1000000) static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 9e6f39912368..a2055f2a014a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -210,7 +210,7 @@ struct nvkm_gsp { } *rm; struct { - struct mutex mutex;; + struct mutex mutex; struct idr idr; } client_id; diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 1f2d649f4b96..1a072568cef6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) if (!spage || !(src & MIGRATE_PFN_MIGRATE)) goto done; - dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); + dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address); if (!dpage) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index f6e78dba594f..34985771b2a2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -331,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm) return; } - ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan); + ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); } diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c index 6e4b7e4644ce..8b48bba18131 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx83102.c +++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c @@ -298,7 +298,7 @@ static int ivo_t109nw41_init(struct hx83102 *ctx) msleep(60); hx83102_enable_extended_cmds(&dsi_ctx, true); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x0f, 0xcf, 0x42, + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x27, 0xe7, 0x52, 0xf5, 0x39, 0x36, 0x36, 0x36, 0x36, 0x32, 0x8b, 0x11, 0x65, 0x00, 0x88, 0xfa, 0xff, 0xff, 0x8f, 0xff, 0x08, 0xd6, 0x33); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x12, @@ -343,11 +343,11 @@ static int ivo_t109nw41_init(struct hx83102 *ctx) 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, - 0x12, 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, - 0x7a, 0x41, 0x50, 0x68, 0x73, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, 0x12, - 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, 0x7a, - 0x41, 0x50, 0x68, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, + 0x48, 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, + 0x9c, 0x4d, 0x56, 0x5d, 0x73, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, 0x48, + 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, 0x9c, + 0x4d, 0x56, 0x5d, 0x73); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x07, 0x10, 0x10, 0x1a, 0x26, 0x9e, 0x00, 0x4f, 0xa0, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12, 0x0a, 0x02, 0x02, 0x00, 0x33, 0x02, 0x04, 0x18, 0x01); diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index 4082c8f2951d..6fbff516c1c1 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -390,11 +390,15 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct * { u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + switch (offset) { case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET: if (vma->vm_end - vma->vm_start != PAGE_SIZE || (vma->vm_flags & (VM_WRITE | VM_EXEC))) return -EINVAL; + vm_flags_clear(vma, VM_MAYWRITE); break; diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index ef232c0c2049..4e2d3a02ea06 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -487,6 +487,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, struct panthor_fw_binary_iter *iter, u32 ehdr) { + ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm); struct panthor_fw_binary_section_entry_hdr hdr; struct panthor_fw_section *section; u32 section_size; @@ -515,8 +516,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, return -EINVAL; } - if ((hdr.va.start & ~PAGE_MASK) != 0 || - (hdr.va.end & ~PAGE_MASK) != 0) { + if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) { drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n", hdr.va.start, hdr.va.end); return -EINVAL; diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 38f560864879..be97d56bc011 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo) to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm))) goto out_free_bo; - ret = panthor_vm_unmap_range(vm, bo->va_node.start, - panthor_kernel_bo_size(bo)); + ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size); if (ret) goto out_free_bo; @@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, } bo = to_panthor_bo(&obj->base); - size = obj->base.size; kbo->obj = &obj->base; bo->flags = bo_flags; + /* The system and GPU MMU page size might differ, which becomes a + * problem for FW sections that need to be mapped at explicit address + * since our PAGE_SIZE alignment might cover a VA range that's + * expected to be used for another section. + * Make sure we never map more than we need. + */ + size = ALIGN(size, panthor_vm_page_size(vm)); ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node); if (ret) goto err_put_obj; diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 3cd2bce59edc..7db2edb3374c 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm) mutex_unlock(&ptdev->mmu->as.slots_lock); } +u32 panthor_vm_page_size(struct panthor_vm *vm) +{ + const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); + u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1; + + return 1u << pg_shift; +} + static void panthor_vm_stop(struct panthor_vm *vm) { drm_sched_stop(&vm->sched, NULL); @@ -1025,12 +1033,13 @@ int panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, struct drm_mm_node *va_node) { + ssize_t vm_pgsz = panthor_vm_page_size(vm); int ret; - if (!size || (size & ~PAGE_MASK)) + if (!size || !IS_ALIGNED(size, vm_pgsz)) return -EINVAL; - if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK)) + if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz)) return -EINVAL; mutex_lock(&vm->mm_lock); @@ -1571,7 +1580,9 @@ panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle) { struct panthor_vm *vm; + xa_lock(&pool->xa); vm = panthor_vm_get(xa_load(&pool->xa, handle)); + xa_unlock(&pool->xa); return vm; } @@ -2366,11 +2377,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file, const struct drm_panthor_vm_bind_op *op, struct panthor_vm_op_ctx *op_ctx) { + ssize_t vm_pgsz = panthor_vm_page_size(vm); struct drm_gem_object *gem; int ret; /* Aligned on page size. */ - if ((op->va | op->size) & ~PAGE_MASK) + if (!IS_ALIGNED(op->va | op->size, vm_pgsz)) return -EINVAL; switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) { diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h index 6788771071e3..8d21e83d8aba 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.h +++ b/drivers/gpu/drm/panthor/panthor_mmu.h @@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset); int panthor_vm_active(struct panthor_vm *vm); void panthor_vm_idle(struct panthor_vm *vm); +u32 panthor_vm_page_size(struct panthor_vm *vm); int panthor_vm_as(struct panthor_vm *vm); int panthor_vm_flush_all(struct panthor_vm *vm); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index aee362abb710..9929e22f4d8d 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -589,10 +589,11 @@ struct panthor_group { * @timedout: True when a timeout occurred on any of the queues owned by * this group. * - * Timeouts can be reported by drm_sched or by the FW. In any case, any - * timeout situation is unrecoverable, and the group becomes useless. - * We simply wait for all references to be dropped so we can release the - * group object. + * Timeouts can be reported by drm_sched or by the FW. If a reset is required, + * and the group can't be suspended, this also leads to a timeout. In any case, + * any timeout situation is unrecoverable, and the group becomes useless. We + * simply wait for all references to be dropped so we can release the group + * object. */ bool timedout; @@ -2640,6 +2641,12 @@ void panthor_sched_suspend(struct panthor_device *ptdev) csgs_upd_ctx_init(&upd_ctx); while (slot_mask) { u32 csg_id = ffs(slot_mask) - 1; + struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; + + /* We consider group suspension failures as fatal and flag the + * group as unusable by setting timedout=true. + */ + csg_slot->group->timedout = true; csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, CSG_STATE_TERMINATE, @@ -3409,6 +3416,11 @@ panthor_job_create(struct panthor_file *pfile, goto err_put_job; } + if (!group_can_run(job->group)) { + ret = -EINVAL; + goto err_put_job; + } + if (job->queue_idx >= job->group->queue_count || !job->group->queues[job->queue_idx]) { ret = -EINVAL; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index fca8b08535a5..6328627b7c34 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -228,10 +228,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) { struct drm_device *dev = radeon_connector->base.dev; struct radeon_device *rdev = dev->dev_private; - int ret; radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd; - radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev; if (ASIC_IS_DCE5(rdev)) { if (radeon_auxch) @@ -242,11 +240,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; } - ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); - if (!ret) - radeon_connector->ddc_bus->has_aux = true; - - WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret); + drm_dp_aux_init(&radeon_connector->ddc_bus->aux); + radeon_connector->ddc_bus->has_aux = true; } /***** general DP utility functions *****/ diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 528a8f3677c2..f9c73c55f04f 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1786,6 +1786,20 @@ static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector return MODE_OK; } +static int +radeon_connector_late_register(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + int r = 0; + + if (radeon_connector->ddc_bus->has_aux) { + radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; + r = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); + } + + return r; +} + static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { .get_modes = radeon_dp_get_modes, .mode_valid = radeon_dp_mode_valid, @@ -1800,6 +1814,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; static const struct drm_connector_funcs radeon_edp_connector_funcs = { @@ -1810,6 +1825,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { @@ -1820,6 +1836,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; void diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 0f723292409e..fafed331e0a0 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -43,7 +43,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *clone_encoder; - uint32_t index_mask = 0; + uint32_t index_mask = drm_encoder_mask(encoder); int count; /* DIG routing gets problematic */ diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 9735f4968b86..bf2d4b16dc2a 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -44,8 +44,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); -const struct drm_gem_object_funcs radeon_gem_object_funcs; - static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo = vmf->vma->vm_private_data; @@ -132,7 +130,6 @@ int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, return r; } *obj = &robj->tbo.base; - (*obj)->funcs = &radeon_gem_object_funcs; robj->pid = task_pid_nr(current); mutex_lock(&rdev->gem.mutex); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d0e4b43d155c..7672404fdb29 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -151,6 +151,7 @@ int radeon_bo_create(struct radeon_device *rdev, if (bo == NULL) return -ENOMEM; drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size); + bo->tbo.base.funcs = &radeon_gem_object_funcs; bo->rdev = rdev; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 6f27cab0b76d..e97c6c60bc96 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -87,6 +87,12 @@ #define CREATE_TRACE_POINTS #include "gpu_scheduler_trace.h" +#ifdef CONFIG_LOCKDEP +static struct lockdep_map drm_sched_lockdep_map = { + .name = "drm_sched_lockdep_map" +}; +#endif + #define to_drm_sched_job(sched_job) \ container_of((sched_job), struct drm_sched_job, queue_node) @@ -1269,7 +1275,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->submit_wq = submit_wq; sched->own_submit_wq = false; } else { - sched->submit_wq = alloc_ordered_workqueue(name, 0); +#ifdef CONFIG_LOCKDEP + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, + WQ_MEM_RECLAIM, + &drm_sched_lockdep_map); +#else + sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); +#endif if (!sched->submit_wq) return -ENOMEM; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index c9eb329665ec..34d22ba210b0 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1153,8 +1153,8 @@ static int host1x_drm_probe(struct host1x_device *dev) if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) { tegra->domain = iommu_paging_domain_alloc(dma_dev); - if (!tegra->domain) { - err = -ENOMEM; + if (IS_ERR(tegra->domain)) { + err = PTR_ERR(tegra->domain); goto free; } diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 4de1ea0fc7c0..00c8564520e7 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -46,7 +46,6 @@ struct gr3d { unsigned int nclocks; struct reset_control_bulk_data resets[RST_GR3D_MAX]; unsigned int nresets; - struct dev_pm_domain_list *pd_list; DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS); }; @@ -370,12 +369,18 @@ static int gr3d_power_up_legacy_domain(struct device *dev, const char *name, return 0; } +static void gr3d_del_link(void *link) +{ + device_link_del(link); +} + static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) { - struct dev_pm_domain_attach_data pd_data = { - .pd_names = (const char *[]) { "3d0", "3d1" }, - .num_pd_names = 2, - }; + static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL }; + const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; + struct device **opp_virt_devs, *pd_dev; + struct device_link *link; + unsigned int i; int err; err = of_count_phandle_with_args(dev->of_node, "power-domains", @@ -409,10 +414,29 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) if (dev->pm_domain) return 0; - err = dev_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list); - if (err < 0) + err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs); + if (err) return err; + for (i = 0; opp_genpd_names[i]; i++) { + pd_dev = opp_virt_devs[i]; + if (!pd_dev) { + dev_err(dev, "failed to get %s power domain\n", + opp_genpd_names[i]); + return -EINVAL; + } + + link = device_link_add(dev, pd_dev, link_flags); + if (!link) { + dev_err(dev, "failed to link to %s\n", dev_name(pd_dev)); + return -EINVAL; + } + + err = devm_add_action_or_reset(dev, gr3d_del_link, link); + if (err) + return err; + } + return 0; } @@ -503,13 +527,13 @@ static int gr3d_probe(struct platform_device *pdev) err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); if (err) - goto err; + return err; err = host1x_client_register(&gr3d->client.base); if (err < 0) { dev_err(&pdev->dev, "failed to register host1x client: %d\n", err); - goto err; + return err; } /* initialize address register map */ @@ -517,9 +541,6 @@ static int gr3d_probe(struct platform_device *pdev) set_bit(gr3d_addr_regs[i], gr3d->addr_regs); return 0; -err: - dev_pm_domain_detach_list(gr3d->pd_list); - return err; } static void gr3d_remove(struct platform_device *pdev) @@ -528,7 +549,6 @@ static void gr3d_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); host1x_client_unregister(&gr3d->client.base); - dev_pm_domain_detach_list(gr3d->pd_list); } static int __maybe_unused gr3d_runtime_suspend(struct device *dev) diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c index 15e36a8db685..6bba97d0be88 100644 --- a/drivers/gpu/drm/tests/drm_connector_test.c +++ b/drivers/gpu/drm/tests/drm_connector_test.c @@ -996,7 +996,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1017,7 +1017,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1038,7 +1038,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1(struct kunit *t unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); rate = drm_hdmi_compute_mode_clock(mode, 10, HDMI_COLORSPACE_RGB); @@ -1056,7 +1056,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1077,7 +1077,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1(struct kunit *t unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); rate = drm_hdmi_compute_mode_clock(mode, 12, HDMI_COLORSPACE_RGB); @@ -1095,7 +1095,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_double(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 6); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 6); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_TRUE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1118,7 +1118,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_valid(struct kunit unsigned long long rate; unsigned int vic = *(unsigned int *)test->param_value; - mode = drm_display_mode_from_cea_vic(drm, vic); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1155,7 +1155,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc(struct kuni drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0]; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, vic); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1180,7 +1180,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc(struct kuni drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0]; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, vic); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1203,7 +1203,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc(struct kunit struct drm_device *drm = &priv->drm; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1225,7 +1225,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc(struct kuni struct drm_device *drm = &priv->drm; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1247,7 +1247,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc(struct kuni struct drm_device *drm = &priv->drm; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c index 34ee95d41f29..294773342e71 100644 --- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c +++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c @@ -441,7 +441,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); drm = &priv->drm; @@ -555,7 +555,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); drm = &priv->drm; @@ -671,7 +671,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); drm = &priv->drm; @@ -1263,7 +1263,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); /* diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index aa62719dab0e..04a6b8cc62ac 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -311,6 +312,47 @@ drm_kunit_helper_create_crtc(struct kunit *test, } EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc); +static void kunit_action_drm_mode_destroy(void *ptr) +{ + struct drm_display_mode *mode = ptr; + + drm_mode_destroy(NULL, mode); +} + +/** + * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC + for a KUnit test + * @test: The test context object + * @dev: DRM device + * @video_code: CEA VIC of the mode + * + * Creates a new mode matching the specified CEA VIC for a KUnit test. + * + * Resources will be cleaned up automatically. + * + * Returns: A new drm_display_mode on success or NULL on failure + */ +struct drm_display_mode * +drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev, + u8 video_code) +{ + struct drm_display_mode *mode; + int ret; + + mode = drm_display_mode_from_cea_vic(dev, video_code); + if (!mode) + return NULL; + + ret = kunit_add_action_or_reset(test, + kunit_action_drm_mode_destroy, + mode); + if (ret) + return NULL; + + return mode; +} +EXPORT_SYMBOL_GPL(drm_kunit_display_mode_from_cea_vic); + MODULE_AUTHOR("Maxime Ripard "); MODULE_DESCRIPTION("KUnit test suite helper functions"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index cd7f1eedf17f..00cd081d7873 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) static int v3d_perfmon_idr_del(int id, void *elem, void *data) { struct v3d_perfmon *perfmon = elem; + struct v3d_dev *v3d = (struct v3d_dev *)data; + + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == v3d->active_perfmon) + v3d_perfmon_stop(v3d, perfmon, false); v3d_perfmon_put(perfmon); @@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data) void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) { + struct v3d_dev *v3d = v3d_priv->v3d; + mutex_lock(&v3d_priv->perfmon.lock); - idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); + idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d); idr_destroy(&v3d_priv->perfmon.idr); mutex_unlock(&v3d_priv->perfmon.lock); mutex_destroy(&v3d_priv->perfmon.lock); diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index c4ac2c946238..c00a5cc2316d 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file) static int vc4_perfmon_idr_del(int id, void *elem, void *data) { struct vc4_perfmon *perfmon = elem; + struct vc4_dev *vc4 = (struct vc4_dev *)data; + + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == vc4->active_perfmon) + vc4_perfmon_stop(vc4, perfmon, false); vc4_perfmon_put(perfmon); @@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file) return; mutex_lock(&vc4file->perfmon.lock); - idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL); + idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4); idr_destroy(&vc4file->perfmon.idr); mutex_unlock(&vc4file->perfmon.lock); mutex_destroy(&vc4file->perfmon.lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 890a66a2361f..64bd7d74854e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -635,10 +635,8 @@ int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst, kunmap_atomic(d.src_addr); if (d.dst_addr) kunmap_atomic(d.dst_addr); - if (src_pages) - kvfree(src_pages); - if (dst_pages) - kvfree(dst_pages); + kvfree(src_pages); + kvfree(dst_pages); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 3f4719b3c268..4e2807f5f94c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -62,7 +62,7 @@ #define VMWGFX_DRIVER_MINOR 20 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) -#define VMWGFX_MAX_DISPLAYS 16 +#define VMWGFX_NUM_DISPLAY_UNITS 8 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 #define VMWGFX_MIN_INITIAL_WIDTH 1280 @@ -82,7 +82,7 @@ #define VMWGFX_NUM_GB_CONTEXT 256 #define VMWGFX_NUM_GB_SHADER 20000 #define VMWGFX_NUM_GB_SURFACE 32768 -#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS +#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS #define VMWGFX_NUM_DXCONTEXT 256 #define VMWGFX_NUM_DXQUERY 512 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 288ed0bb75cb..63b8d7591253 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1283,7 +1283,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, { struct drm_device *dev = &dev_priv->drm; struct vmw_framebuffer_surface *vfbs; - enum SVGA3dSurfaceFormat format; struct vmw_surface *surface; int ret; @@ -1320,34 +1319,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, return -EINVAL; } - switch (mode_cmd->pixel_format) { - case DRM_FORMAT_ARGB8888: - format = SVGA3D_A8R8G8B8; - break; - case DRM_FORMAT_XRGB8888: - format = SVGA3D_X8R8G8B8; - break; - case DRM_FORMAT_RGB565: - format = SVGA3D_R5G6B5; - break; - case DRM_FORMAT_XRGB1555: - format = SVGA3D_A1R5G5B5; - break; - default: - DRM_ERROR("Invalid pixel format: %p4cc\n", - &mode_cmd->pixel_format); - return -EINVAL; - } - - /* - * For DX, surface format validation is done when surface->scanout - * is set. - */ - if (!has_sm4_context(dev_priv) && format != surface->metadata.format) { - DRM_ERROR("Invalid surface format for requested mode.\n"); - return -EINVAL; - } - vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); if (!vfbs) { ret = -ENOMEM; @@ -1539,6 +1510,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, DRM_ERROR("Surface size cannot exceed %dx%d\n", dev_priv->texture_max_width, dev_priv->texture_max_height); + ret = -EINVAL; goto err_out; } @@ -2225,7 +2197,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_mode_config *mode_config = &dev->mode_config; struct drm_vmw_update_layout_arg *arg = (struct drm_vmw_update_layout_arg *)data; - void __user *user_rects; + const void __user *user_rects; struct drm_vmw_rect *rects; struct drm_rect *drm_rects; unsigned rects_size; @@ -2237,6 +2209,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, VMWGFX_MIN_INITIAL_HEIGHT}; vmw_du_update_layout(dev_priv, 1, &def_rect); return 0; + } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) { + return -E2BIG; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 6141fadf81ef..2a6c6d6581e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -199,9 +199,6 @@ struct vmw_kms_dirty { s32 unit_y2; }; -#define VMWGFX_NUM_DISPLAY_UNITS 8 - - #define vmw_framebuffer_to_vfb(x) \ container_of(x, struct vmw_framebuffer, base) #define vmw_framebuffer_to_vfbs(x) \ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index fab155a68054..82d18b88f4a7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -886,6 +886,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn, struct drm_crtc_state *new_crtc_state; conn_state = drm_atomic_get_connector_state(state, conn); + + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); + du = vmw_connector_to_stdu(conn); if (!conn_state->crtc) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 1625b30d9970..5721c74da3e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -2276,9 +2276,12 @@ int vmw_dumb_create(struct drm_file *file_priv, const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format); SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE | SVGA3D_SURFACE_HINT_RENDERTARGET | - SVGA3D_SURFACE_SCREENTARGET | - SVGA3D_SURFACE_BIND_SHADER_RESOURCE | - SVGA3D_SURFACE_BIND_RENDER_TARGET; + SVGA3D_SURFACE_SCREENTARGET; + + if (vmw_surface_is_dx_screen_target_format(format)) { + flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE | + SVGA3D_SURFACE_BIND_RENDER_TARGET; + } /* * Without mob support we're just going to use raw memory buffer diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 75736faf2a80..c6e0c8d77a70 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -309,18 +309,7 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe) } /* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */ -void xe_display_pm_runtime_suspend(struct xe_device *xe) -{ - if (!xe->info.probe_display) - return; - - if (xe->d3cold.allowed) - xe_display_pm_suspend(xe, true); - - intel_hpd_poll_enable(xe); -} - -void xe_display_pm_suspend(struct xe_device *xe, bool runtime) +static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) { struct intel_display *display = &xe->display; bool s2idle = suspend_to_idle(); @@ -353,6 +342,27 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime) intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold); intel_dmc_suspend(xe); + + if (runtime && has_display(xe)) + intel_hpd_poll_enable(xe); +} + +void xe_display_pm_suspend(struct xe_device *xe) +{ + __xe_display_pm_suspend(xe, false); +} + +void xe_display_pm_runtime_suspend(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return; + + if (xe->d3cold.allowed) { + __xe_display_pm_suspend(xe, true); + return; + } + + intel_hpd_poll_enable(xe); } void xe_display_pm_suspend_late(struct xe_device *xe) @@ -366,17 +376,6 @@ void xe_display_pm_suspend_late(struct xe_device *xe) intel_display_power_suspend_late(xe); } -void xe_display_pm_runtime_resume(struct xe_device *xe) -{ - if (!xe->info.probe_display) - return; - - intel_hpd_poll_disable(xe); - - if (xe->d3cold.allowed) - xe_display_pm_resume(xe, true); -} - void xe_display_pm_resume_early(struct xe_device *xe) { if (!xe->info.probe_display) @@ -387,7 +386,7 @@ void xe_display_pm_resume_early(struct xe_device *xe) intel_power_domains_resume(xe); } -void xe_display_pm_resume(struct xe_device *xe, bool runtime) +static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) { struct intel_display *display = &xe->display; @@ -411,9 +410,11 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime) intel_display_driver_resume(xe); drm_kms_helper_poll_enable(&xe->drm); intel_display_driver_enable_user_access(xe); - intel_hpd_poll_disable(xe); } + if (has_display(xe)) + intel_hpd_poll_disable(xe); + intel_opregion_resume(display); intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false); @@ -421,6 +422,26 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime) intel_power_domains_enable(xe); } +void xe_display_pm_resume(struct xe_device *xe) +{ + __xe_display_pm_resume(xe, false); +} + +void xe_display_pm_runtime_resume(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return; + + if (xe->d3cold.allowed) { + __xe_display_pm_resume(xe, true); + return; + } + + intel_hpd_init(xe); + intel_hpd_poll_disable(xe); +} + + static void display_device_remove(struct drm_device *dev, void *arg) { struct xe_device *xe = arg; diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h index 53d727fd792b..bed55fd26f30 100644 --- a/drivers/gpu/drm/xe/display/xe_display.h +++ b/drivers/gpu/drm/xe/display/xe_display.h @@ -34,10 +34,10 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir); void xe_display_irq_reset(struct xe_device *xe); void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); -void xe_display_pm_suspend(struct xe_device *xe, bool runtime); +void xe_display_pm_suspend(struct xe_device *xe); void xe_display_pm_suspend_late(struct xe_device *xe); void xe_display_pm_resume_early(struct xe_device *xe); -void xe_display_pm_resume(struct xe_device *xe, bool runtime); +void xe_display_pm_resume(struct xe_device *xe); void xe_display_pm_runtime_suspend(struct xe_device *xe); void xe_display_pm_runtime_resume(struct xe_device *xe); @@ -65,10 +65,10 @@ static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) static inline void xe_display_irq_reset(struct xe_device *xe) {} static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} -static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {} +static inline void xe_display_pm_suspend(struct xe_device *xe) {} static inline void xe_display_pm_suspend_late(struct xe_device *xe) {} static inline void xe_display_pm_resume_early(struct xe_device *xe) {} -static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {} +static inline void xe_display_pm_resume(struct xe_device *xe) {} static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {} static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {} diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index ac9c437e103d..bd604b9f08e4 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -393,9 +393,6 @@ #define XE2_GLOBAL_INVAL XE_REG(0xb404) -#define SCRATCH1LPFC XE_REG(0xb474) -#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0) - #define XE2LPM_L3SQCREG2 XE_REG_MCR(0xb604) #define XE2LPM_L3SQCREG3 XE_REG_MCR(0xb608) @@ -520,7 +517,7 @@ * [4-6] RSVD * [7] Disabled */ -#define CCS_MODE XE_REG(0x14804) +#define CCS_MODE XE_REG(0x14804, XE_REG_OPTION_MASKED) #define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */ #define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */ #define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1) diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index 668615c6b172..fe4319eb13fd 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -187,7 +187,7 @@ void xe_debugfs_register(struct xe_device *xe) debugfs_create_file("forcewake_all", 0400, root, xe, &forcewake_all_fops); - debugfs_create_file("wedged_mode", 0400, root, xe, + debugfs_create_file("wedged_mode", 0600, root, xe, &wedged_mode_fops); for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) { diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 5a63d135ba96..a1987b554a8d 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -87,10 +87,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) mutex_init(&xef->exec_queue.lock); xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); - spin_lock(&xe->clients.lock); - xe->clients.count++; - spin_unlock(&xe->clients.lock); - file->driver_priv = xef; kref_init(&xef->refcount); @@ -107,17 +103,12 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) static void xe_file_destroy(struct kref *ref) { struct xe_file *xef = container_of(ref, struct xe_file, refcount); - struct xe_device *xe = xef->xe; xa_destroy(&xef->exec_queue.xa); mutex_destroy(&xef->exec_queue.lock); xa_destroy(&xef->vm.xa); mutex_destroy(&xef->vm.lock); - spin_lock(&xe->clients.lock); - xe->clients.count--; - spin_unlock(&xe->clients.lock); - xe_drm_client_put(xef->client); kfree(xef->process_name); kfree(xef); @@ -333,7 +324,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe->info.force_execlist = xe_modparam.force_execlist; spin_lock_init(&xe->irq.lock); - spin_lock_init(&xe->clients.lock); init_waitqueue_head(&xe->ufence_wq); @@ -890,7 +880,7 @@ void xe_device_l2_flush(struct xe_device *xe) spin_lock(>->global_invl_lock); xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); - if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) + if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); spin_unlock(>->global_invl_lock); @@ -980,13 +970,13 @@ void xe_device_declare_wedged(struct xe_device *xe) return; } + xe_pm_runtime_get_noresume(xe); + if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n"); return; } - xe_pm_runtime_get_noresume(xe); - if (!atomic_xchg(&xe->wedged.flag, 1)) { xe->needs_flr_on_fini = true; drm_err(&xe->drm, diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 894f04770454..34620ef855c0 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -178,4 +178,18 @@ void xe_device_declare_wedged(struct xe_device *xe); struct xe_file *xe_file_get(struct xe_file *xef); void xe_file_put(struct xe_file *xef); +/* + * Occasionally it is seen that the G2H worker starts running after a delay of more than + * a second even after being queued and activated by the Linux workqueue subsystem. This + * leads to G2H timeout error. The root cause of issue lies with scheduling latency of + * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS + * and this is beyond xe kmd. + * + * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. + */ +#define LNL_FLUSH_WORKQUEUE(wq__) \ + flush_workqueue(wq__) +#define LNL_FLUSH_WORK(wrk__) \ + flush_work(wrk__) + #endif diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 09d731a9125c..687f3a9039bb 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -353,15 +353,6 @@ struct xe_device { struct workqueue_struct *wq; } sriov; - /** @clients: drm clients info */ - struct { - /** @clients.lock: Protects drm clients info */ - spinlock_t lock; - - /** @clients.count: number of drm clients */ - u64 count; - } clients; - /** @usm: unified memory state */ struct { /** @usm.asid: convert a ASID to VM */ diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 7b38485817dc..756b492f13b0 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -41,11 +41,6 @@ * user knows an exec writes to a BO and reads from the BO in the next exec, it * is the user's responsibility to pass in / out fence between the two execs). * - * Implicit dependencies for external BOs are handled by using the dma-buf - * implicit dependency uAPI (TODO: add link). To make this works each exec must - * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external - * BO mapped in the VM. - * * We do not allow a user to trigger a bind at exec time rather we have a VM * bind IOCTL which uses the same in / out fence interface as exec. In that * sense, a VM bind is basically the same operation as an exec from the user @@ -59,8 +54,8 @@ * behind any pending kernel operations on any external BOs in VM or any BOs * private to the VM. This is accomplished by the rebinds waiting on BOs * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs - * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and - * in DMA_RESV_USAGE_WRITE for external BOs). + * slots (inflight execs are in the DMA_RESV_USAGE_BOOKKEEP for private BOs and + * for external BOs). * * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute * mode VMs we use preempt fences and a rebind worker (TODO: add link). @@ -137,12 +132,16 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (XE_IOCTL_DBG(xe, !q)) return -ENOENT; - if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) - return -EINVAL; + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, args->num_batch_buffer && - q->width != args->num_batch_buffer)) - return -EINVAL; + q->width != args->num_batch_buffer)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) { err = -ECANCELED; @@ -225,6 +224,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); if (IS_ERR(fence)) { err = PTR_ERR(fence); + xe_vm_unlock(vm); goto err_unlock_list; } for (i = 0; i < num_syncs; i++) @@ -304,7 +304,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) xe_sched_job_arm(job); if (!xe_vm_in_lr_mode(vm)) drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, - DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_BOOKKEEP, + DMA_RESV_USAGE_BOOKKEEP); for (i = 0; i < num_syncs; i++) { xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished); diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index d098d2dd1b2d..fd0f3b3c9101 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -260,8 +260,14 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) { int i; + /* + * Before releasing our ref to lrc and xef, accumulate our run ticks + */ + xe_exec_queue_update_run_ticks(q); + for (i = 0; i < q->width; ++i) xe_lrc_put(q->lrc[i]); + __xe_exec_queue_free(q); } diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index b263fff15273..7d9fc489dcb8 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC, &value, true); if (ret) - xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n", - domain->id, str_wake_sleep(wake), ERR_PTR(ret), - domain->reg_ack.addr, value); + xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n", + domain->id, str_wake_sleep(wake), ERR_PTR(ret), + domain->reg_ack.addr, value); + if (value == ~0) { + xe_gt_err(gt, + "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n", + domain->id, str_wake_sleep(wake)); + ret = -EIO; + } return ret; } diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 2895f154654c..ff19eca5d358 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -397,6 +397,16 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt) static void xe_ggtt_invalidate(struct xe_ggtt *ggtt) { + struct xe_device *xe = tile_to_xe(ggtt->tile); + + /* + * XXX: Barrier for GGTT pages. Unsure exactly why this required but + * without this LNL is having issues with the GuC reading scratch page + * vs. correct GGTT page. Not particularly a hot code path so blindly + * do a mmio read here which results in GuC reading correct GGTT page. + */ + xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG); + /* Each GT in a tile has its own TLB to cache GGTT lookups */ ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt); ggtt_invalidate_gt_tlb(ggtt->tile->media_gt); diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h index 5ad5629a6c60..64b2ae6839db 100644 --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h @@ -63,7 +63,9 @@ xe_sched_invalidate_job(struct xe_sched_job *job, int threshold) static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched, struct xe_sched_job *job) { + spin_lock(&sched->base.job_list_lock); list_add(&job->drm.list, &sched->base.pending_list); + spin_unlock(&sched->base.job_list_lock); } static inline diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index f0dc2bf24c7b..d5fd6a089b7c 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -108,7 +108,6 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) return; if (!xe_gt_is_media_type(gt)) { - xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH); reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); reg |= CG_DIS_CNTLBUS; xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); @@ -874,7 +873,9 @@ int xe_gt_sanitize_freq(struct xe_gt *gt) int ret = 0; if ((!xe_uc_fw_is_available(>->uc.gsc.fw) || - xe_uc_fw_is_loaded(>->uc.gsc.fw)) && XE_WA(gt, 22019338487)) + xe_uc_fw_is_loaded(>->uc.gsc.fw) || + xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) && + XE_WA(gt, 22019338487)) ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc); return ret; diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c index d2e4dc3aaf61..ffcbd05671fc 100644 --- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c +++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c @@ -68,6 +68,12 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) } } + /* + * Mask bits need to be set for the register. Though only Xe2+ + * platforms require setting of mask bits, it won't harm for older + * platforms as these bits are unused there. + */ + mode |= CCS_MODE_CSLICE_0_3_MASK << 16; xe_mmio_write32(gt, CCS_MODE, mode); xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n", @@ -133,9 +139,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, } /* CCS mode can only be updated when there are no drm clients */ - spin_lock(&xe->clients.lock); - if (xe->clients.count) { - spin_unlock(&xe->clients.lock); + mutex_lock(&xe->drm.filelist_mutex); + if (!list_empty(&xe->drm.filelist)) { + mutex_unlock(&xe->drm.filelist_mutex); + xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n"); return -EBUSY; } @@ -146,7 +153,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, xe_gt_reset_async(gt); } - spin_unlock(&xe->clients.lock); + mutex_unlock(&xe->drm.filelist_mutex); return count; } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 8250ef71e685..afdb477ecf83 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -387,6 +387,8 @@ static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node) * the xe_ggtt_clear() called by below xe_ggtt_remove_node(). */ xe_ggtt_node_remove(node, false); + } else { + xe_ggtt_node_fini(node); } } @@ -442,7 +444,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) config->ggtt_region = node; return 0; err: - xe_ggtt_node_fini(node); + pf_release_ggtt(tile, node); return err; } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index cca9cf536f76..9d82ea30f4df 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -37,6 +37,15 @@ static long tlb_timeout_jiffies(struct xe_gt *gt) return hw_tlb_timeout + 2 * delay; } +static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence) +{ + if (WARN_ON_ONCE(!fence->gt)) + return; + + xe_pm_runtime_put(gt_to_xe(fence->gt)); + fence->gt = NULL; /* fini() should be called once */ +} + static void __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence) { @@ -63,6 +72,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work) struct xe_device *xe = gt_to_xe(gt); struct xe_gt_tlb_invalidation_fence *fence, *next; + LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker); + spin_lock_irq(>->tlb_invalidation.pending_lock); list_for_each_entry_safe(fence, next, >->tlb_invalidation.pending_fences, link) { @@ -204,7 +215,7 @@ static int send_tlb_invalidation(struct xe_guc *guc, tlb_timeout_jiffies(gt)); } spin_unlock_irq(>->tlb_invalidation.pending_lock); - } else if (ret < 0) { + } else { __invalidation_fence_signal(xe, fence); } if (!ret) { @@ -267,10 +278,8 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) xe_gt_tlb_invalidation_fence_init(gt, &fence, true); ret = xe_gt_tlb_invalidation_guc(gt, &fence); - if (ret < 0) { - xe_gt_tlb_invalidation_fence_fini(&fence); + if (ret) return ret; - } xe_gt_tlb_invalidation_fence_wait(&fence); } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) { @@ -496,7 +505,8 @@ static const struct dma_fence_ops invalidation_fence_ops = { * @stack: fence is stack variable * * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini - * must be called if fence is not signaled. + * will be automatically called when fence is signalled (all fences must signal), + * even on error. */ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, @@ -516,14 +526,3 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, dma_fence_get(&fence->base); fence->gt = gt; } - -/** - * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence - * @fence: TLB invalidation fence to finalize - * - * Drop PM ref which fence took durinig init. - */ -void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence) -{ - xe_pm_runtime_put(gt_to_xe(fence->gt)); -} diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index a84065fa324c..f430d5797af7 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -28,7 +28,6 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len); void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, bool stack); -void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence); static inline void xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence) diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index f24dd5223926..9c505d3517cd 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -667,16 +667,12 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, num_g2h = 1; if (g2h_fence_needs_alloc(g2h_fence)) { - void *ptr; - g2h_fence->seqno = next_ct_seqno(ct, true); - ptr = xa_store(&ct->fence_lookup, - g2h_fence->seqno, - g2h_fence, GFP_ATOMIC); - if (IS_ERR(ptr)) { - ret = PTR_ERR(ptr); + ret = xa_err(xa_store(&ct->fence_lookup, + g2h_fence->seqno, g2h_fence, + GFP_ATOMIC)); + if (ret) goto out; - } } seqno = g2h_fence->seqno; @@ -879,14 +875,11 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, retry_same_fence: ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence); if (unlikely(ret == -ENOMEM)) { - void *ptr; - /* Retry allocation /w GFP_KERNEL */ - ptr = xa_store(&ct->fence_lookup, - g2h_fence.seqno, - &g2h_fence, GFP_KERNEL); - if (IS_ERR(ptr)) - return PTR_ERR(ptr); + ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno, + &g2h_fence, GFP_KERNEL)); + if (ret) + return ret; goto retry_same_fence; } else if (unlikely(ret)) { @@ -903,16 +896,35 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, } ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); + if (!ret) { - xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x", - g2h_fence.seqno, action[0]); + LNL_FLUSH_WORK(&ct->g2h_worker); + if (g2h_fence.done) { + xe_gt_warn(gt, "G2H fence %u, action %04x, done\n", + g2h_fence.seqno, action[0]); + ret = 1; + } + } + + /* + * Ensure we serialize with completion side to prevent UAF with fence going out of scope on + * the stack, since we have no clue if it will fire after the timeout before we can erase + * from the xa. Also we have some dependent loads and stores below for which we need the + * correct ordering, and we lack the needed barriers. + */ + mutex_lock(&ct->lock); + if (!ret) { + xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s", + g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done)); xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); + mutex_unlock(&ct->lock); return -ETIME; } if (g2h_fence.retry) { xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n", action[0], g2h_fence.reason); + mutex_unlock(&ct->lock); goto retry; } if (g2h_fence.fail) { @@ -921,7 +933,12 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, ret = -EIO; } - return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret; + if (ret > 0) + ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data; + + mutex_unlock(&ct->lock); + + return ret; } /** diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 80062e1d3f66..4f5d00aea716 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -224,80 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) EXEC_QUEUE_STATE_BANNED)); } -#ifdef CONFIG_PROVE_LOCKING -static int alloc_submit_wq(struct xe_guc *guc) -{ - int i; - - for (i = 0; i < NUM_SUBMIT_WQ; ++i) { - guc->submission_state.submit_wq_pool[i] = - alloc_ordered_workqueue("submit_wq", 0); - if (!guc->submission_state.submit_wq_pool[i]) - goto err_free; - } - - return 0; - -err_free: - while (i) - destroy_workqueue(guc->submission_state.submit_wq_pool[--i]); - - return -ENOMEM; -} - -static void free_submit_wq(struct xe_guc *guc) -{ - int i; - - for (i = 0; i < NUM_SUBMIT_WQ; ++i) - destroy_workqueue(guc->submission_state.submit_wq_pool[i]); -} - -static struct workqueue_struct *get_submit_wq(struct xe_guc *guc) -{ - int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ; - - return guc->submission_state.submit_wq_pool[idx]; -} -#else -static int alloc_submit_wq(struct xe_guc *guc) -{ - return 0; -} - -static void free_submit_wq(struct xe_guc *guc) -{ - -} - -static struct workqueue_struct *get_submit_wq(struct xe_guc *guc) -{ - return NULL; -} -#endif - -static void xe_guc_submit_fini(struct xe_guc *guc) -{ - struct xe_device *xe = guc_to_xe(guc); - struct xe_gt *gt = guc_to_gt(guc); - int ret; - - ret = wait_event_timeout(guc->submission_state.fini_wq, - xa_empty(&guc->submission_state.exec_queue_lookup), - HZ * 5); - - drain_workqueue(xe->destroy_wq); - - xe_gt_assert(gt, ret); -} - static void guc_submit_fini(struct drm_device *drm, void *arg) { struct xe_guc *guc = arg; - xe_guc_submit_fini(guc); xa_destroy(&guc->submission_state.exec_queue_lookup); - free_submit_wq(guc); } static void guc_submit_wedged_fini(void *arg) @@ -359,10 +290,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) if (err) return err; - err = alloc_submit_wq(guc); - if (err) - return err; - gt->exec_queue_ops = &guc_exec_queue_ops; xa_init(&guc->submission_state.exec_queue_lookup); @@ -393,7 +320,6 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) { int ret; - void *ptr; int i; /* @@ -413,12 +339,10 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) q->guc->id = ret; for (i = 0; i < q->width; ++i) { - ptr = xa_store(&guc->submission_state.exec_queue_lookup, - q->guc->id + i, q, GFP_NOWAIT); - if (IS_ERR(ptr)) { - ret = PTR_ERR(ptr); + ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup, + q->guc->id + i, q, GFP_NOWAIT)); + if (ret) goto err_release; - } } return 0; @@ -821,8 +745,6 @@ static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) { struct xe_sched_job *job = to_xe_sched_job(drm_job); - xe_exec_queue_update_run_ticks(job->q); - trace_xe_sched_job_free(job); xe_sched_job_put(job); } @@ -992,12 +914,22 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) { struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q)); - u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); - u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); + u32 ctx_timestamp, ctx_job_timestamp; u32 timeout_ms = q->sched_props.job_timeout_ms; u32 diff; u64 running_time_ms; + if (!xe_sched_job_started(job)) { + xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started", + xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job), + q->guc->id); + + return xe_sched_invalidate_job(job, 2); + } + + ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); + ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); + /* * Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch * possible overflows with a high timeout. @@ -1106,10 +1038,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) /* * TDR has fired before free job worker. Common if exec queue - * immediately closed after last fence signaled. + * immediately closed after last fence signaled. Add back to pending + * list so job can be freed and kick scheduler ensuring free job is not + * lost. */ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) { - guc_exec_queue_free_job(drm_job); + xe_sched_add_pending_job(sched, job); + xe_sched_submission_start(sched); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -1122,10 +1057,6 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) exec_queue_killed_or_banned_or_wedged(q) || exec_queue_destroyed(q); - /* Job hasn't started, can't be timed out */ - if (!skip_timeout_check && !xe_sched_job_started(job)) - goto rearm; - /* * XXX: Sampling timeout doesn't work in wedged mode as we have to * modify scheduling state to read timestamp. We could read the @@ -1482,8 +1413,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(q->sched_props.job_timeout_ms); err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, - get_submit_wq(guc), - q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, + NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, timeout, guc_to_gt(guc)->ordered_wq, NULL, q->name, gt_to_xe(q->gt)->drm.dev); if (err) @@ -1800,8 +1730,13 @@ void xe_guc_submit_stop(struct xe_guc *guc) mutex_lock(&guc->submission_state.lock); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to stop parallel queues */ + if (q->guc->id != index) + continue; + guc_exec_queue_stop(guc, q); + } mutex_unlock(&guc->submission_state.lock); @@ -1839,8 +1774,13 @@ int xe_guc_submit_start(struct xe_guc *guc) mutex_lock(&guc->submission_state.lock); atomic_dec(&guc->submission_state.stopped); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to start parallel queues */ + if (q->guc->id != index) + continue; + guc_exec_queue_start(q); + } mutex_unlock(&guc->submission_state.lock); wake_up_all(&guc->ct.wq); diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index 69046f698271..ed150fc09ad0 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -72,13 +72,6 @@ struct xe_guc { atomic_t stopped; /** @submission_state.lock: protects submission state */ struct mutex lock; -#ifdef CONFIG_PROVE_LOCKING -#define NUM_SUBMIT_WQ 256 - /** @submission_state.submit_wq_pool: submission ordered workqueues pool */ - struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ]; - /** @submission_state.submit_wq_idx: submission ordered workqueue index */ - int submit_wq_idx; -#endif /** @submission_state.enabled: submission is enabled */ bool enabled; /** @submission_state.fini_wq: submit fini wait queue */ diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 7cf2160fe040..33eb039053e4 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -123,7 +123,7 @@ int xe_pm_suspend(struct xe_device *xe) for_each_gt(gt, xe, id) xe_gt_suspend_prepare(gt); - xe_display_pm_suspend(xe, false); + xe_display_pm_suspend(xe); /* FIXME: Super racey... */ err = xe_bo_evict_all(xe); @@ -133,7 +133,7 @@ int xe_pm_suspend(struct xe_device *xe) for_each_gt(gt, xe, id) { err = xe_gt_suspend(gt); if (err) { - xe_display_pm_resume(xe, false); + xe_display_pm_resume(xe); goto err; } } @@ -187,7 +187,7 @@ int xe_pm_resume(struct xe_device *xe) for_each_gt(gt, xe, id) xe_gt_resume(gt); - xe_display_pm_resume(xe, false); + xe_display_pm_resume(xe); err = xe_bo_restore_user(xe); if (err) diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 28d9bb3b825d..848da8e68c7a 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -161,7 +161,11 @@ query_engine_cycles(struct xe_device *xe, cpu_clock); xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - resp.width = 36; + + if (GRAPHICS_VER(xe) >= 20) + resp.width = 64; + else + resp.width = 36; /* Only write to the output fields of user query */ if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp)) diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index bb3c2a830362..2e72c06fd40d 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -54,11 +54,12 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr, { struct xe_user_fence *ufence; u64 __user *ptr = u64_to_user_ptr(addr); + u64 __maybe_unused prefetch_val; - if (!access_ok(ptr, sizeof(*ptr))) + if (get_user(prefetch_val, ptr)) return ERR_PTR(-EFAULT); - ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); + ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); if (!ufence) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index ce9dca4d4e87..c99380271de6 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3199,10 +3199,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) ret = xe_gt_tlb_invalidation_vma(tile->primary_gt, &fence[fence_id], vma); - if (ret < 0) { - xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]); + if (ret) goto wait; - } ++fence_id; if (!tile->media_gt) @@ -3214,10 +3212,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) ret = xe_gt_tlb_invalidation_vma(tile->media_gt, &fence[fence_id], vma); - if (ret < 0) { - xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]); + if (ret) goto wait; - } ++fence_id; } } diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index d424992514a4..353936a0f877 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -710,6 +710,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = { DIS_PARTIAL_AUTOSTRIP | DIS_AUTOSTRIP)) }, + { XE_RTP_NAME("15016589081"), + XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) + }, /* Xe2_HPG */ { XE_RTP_NAME("15010599737"), diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c index d46fa8374980..5b4264ea38bd 100644 --- a/drivers/gpu/drm/xe/xe_wait_user_fence.c +++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c @@ -155,6 +155,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, } if (!timeout) { + LNL_FLUSH_WORKQUEUE(xe->ordered_wq); + err = do_compare(addr, args->value, args->mask, + args->op); + if (err <= 0) { + drm_dbg(&xe->drm, "LNL_FLUSH_WORKQUEUE resolved ufence timeout\n"); + break; + } err = -ETIME; break; } @@ -169,9 +176,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, args->timeout = 0; } - if (!timeout && !(err < 0)) - err = -ETIME; - if (q) xe_exec_queue_put(q); diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c index 955c971c528d..a6f6779662a3 100644 --- a/drivers/gpu/host1x/context.c +++ b/drivers/gpu/host1x/context.c @@ -58,6 +58,7 @@ int host1x_memory_context_list_init(struct host1x *host1x) ctx->dev.parent = host1x->dev; ctx->dev.release = host1x_memory_context_release; + ctx->dev.dma_parms = &ctx->dma_parms; dma_set_max_seg_size(&ctx->dev, UINT_MAX); err = device_add(&ctx->dev); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index b62e4f0e8130..e98528777faa 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -625,12 +625,6 @@ static int host1x_probe(struct platform_device *pdev) goto free_contexts; } - err = host1x_intr_init(host); - if (err) { - dev_err(&pdev->dev, "failed to initialize interrupts\n"); - goto deinit_syncpt; - } - pm_runtime_enable(&pdev->dev); err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); @@ -642,6 +636,12 @@ static int host1x_probe(struct platform_device *pdev) if (err) goto pm_disable; + err = host1x_intr_init(host); + if (err) { + dev_err(&pdev->dev, "failed to initialize interrupts\n"); + goto pm_put; + } + host1x_debug_init(host); err = host1x_register(host); @@ -658,13 +658,11 @@ static int host1x_probe(struct platform_device *pdev) host1x_unregister(host); deinit_debugfs: host1x_debug_deinit(host); - + host1x_intr_deinit(host); +pm_put: pm_runtime_put_sync_suspend(&pdev->dev); pm_disable: pm_runtime_disable(&pdev->dev); - - host1x_intr_deinit(host); -deinit_syncpt: host1x_syncpt_deinit(host); free_contexts: host1x_memory_context_list_free(&host->context_list); diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c index 4b59687ff5d8..3438d392920f 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c @@ -236,9 +236,9 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) cl_data->in_data = in_data; for (i = 0; i < cl_data->num_hid_devices; i++) { - in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8, - &cl_data->sensor_dma_addr[i], - GFP_KERNEL); + in_data->sensor_virt_addr[i] = dmam_alloc_coherent(dev, sizeof(int) * 8, + &cl_data->sensor_dma_addr[i], + GFP_KERNEL); if (!in_data->sensor_virt_addr[i]) { rc = -ENOMEM; goto cleanup; @@ -331,7 +331,6 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata) { struct amdtp_cl_data *cl_data = privdata->cl_data; - struct amd_input_data *in_data = cl_data->in_data; int i, status; for (i = 0; i < cl_data->num_hid_devices; i++) { @@ -351,12 +350,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata) cancel_delayed_work_sync(&cl_data->work_buffer); amdtp_hid_remove(cl_data); - for (i = 0; i < cl_data->num_hid_devices; i++) { - if (in_data->sensor_virt_addr[i]) { - dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int), - in_data->sensor_virt_addr[i], - cl_data->sensor_dma_addr[i]); - } - } return 0; } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 612ee6ddfc8d..582fd234eec7 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1875,7 +1875,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) u32 len = hid_report_len(report) + 7; - return kmalloc(len, flags); + return kzalloc(len, flags); } EXPORT_SYMBOL_GPL(hid_alloc_report_buf); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 86820a3d9766..92cff3f2658c 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -509,6 +509,7 @@ #define I2C_DEVICE_ID_GOODIX_01E8 0x01e8 #define I2C_DEVICE_ID_GOODIX_01E9 0x01e9 #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0 +#define I2C_DEVICE_ID_GOODIX_0D42 0x0d42 #define USB_VENDOR_ID_GOODTOUCH 0x1aad #define USB_DEVICE_ID_GOODTOUCH_000f 0x000f @@ -868,6 +869,7 @@ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539 #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a +#define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER 0xc548 #define USB_DEVICE_ID_SPACETRAVELLER 0xc623 #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 #define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704 @@ -1036,6 +1038,8 @@ #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056 #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057 #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058 +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES 0x430c +#define USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES 0x431e #define USB_VENDOR_ID_PANASONIC 0x04da #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 3b0c779ce8f7..f66194fde891 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -473,6 +473,7 @@ static int lenovo_input_mapping(struct hid_device *hdev, return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field, usage, bit, max); case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max); default: return 0; @@ -583,6 +584,7 @@ static ssize_t attr_fn_lock_store(struct device *dev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value); if (ret) return ret; @@ -776,6 +778,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field, return lenovo_event_cptkbd(hdev, field, usage, value); case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_event_tp10ubkbd(hdev, field, usage, value); default: return 0; @@ -1056,6 +1059,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value); break; } @@ -1286,6 +1290,7 @@ static int lenovo_probe(struct hid_device *hdev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_probe_tp10ubkbd(hdev); break; default: @@ -1372,6 +1377,7 @@ static void lenovo_remove(struct hid_device *hdev) break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: lenovo_remove_tp10ubkbd(hdev); break; } @@ -1421,6 +1427,8 @@ static const struct hid_device_id lenovo_devices[] = { */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) }, { } }; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 638e36c6d0f1..e936019d21fe 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2026,6 +2026,10 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x3148) }, + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_ELAN, 0x32ae) }, + /* Elitegroup panel */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, @@ -2095,6 +2099,11 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, 0x347d, 0x7853) }, + /* HONOR MagicBook Art 14 touchpad */ + { .driver_data = MT_CLS_VTL, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + 0x35cc, 0x0104) }, + /* Ilitek dual touch panel */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_ILITEK, @@ -2137,6 +2146,10 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) }, + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_LOGITECH, + USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER) }, /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index 3d414ae194ac..25cfd964dc25 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -38,8 +38,10 @@ (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) #define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0) +#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1) #define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */ +#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */ struct plt_drv_data { unsigned long device_type; @@ -137,6 +139,21 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field, drv_data->last_volume_key_ts = cur_ts; } + if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) { + unsigned long prev_ts, cur_ts; + + /* Usages are filtered in plantronics_usages. */ + + if (!value) /* Handle key presses only. */ + return 0; + + prev_ts = drv_data->last_volume_key_ts; + cur_ts = jiffies; + if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT) + return 1; /* Ignore the followed opposite volume key. */ + + drv_data->last_volume_key_ts = cur_ts; + } return 0; } @@ -210,6 +227,12 @@ static const struct hid_device_id plantronics_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES), .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, + USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES), + .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, + USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES), + .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { } }; diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index be5d342d5d13..43664a24176f 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -50,6 +50,7 @@ #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(3) #define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4) #define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5) +#define I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME BIT(6) /* Command opcodes */ #define I2C_HID_OPCODE_RESET 0x01 @@ -140,6 +141,8 @@ static const struct i2c_hid_quirks { { USB_VENDOR_ID_ELAN, HID_ANY_ID, I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET | I2C_HID_QUIRK_BOGUS_IRQ }, + { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_0D42, + I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME }, { 0, 0 } }; @@ -981,6 +984,13 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid) return -ENXIO; } + /* On Goodix 27c6:0d42 wait extra time before device wakeup. + * It's not clear why but if we send wakeup too early, the device will + * never trigger input interrupts. + */ + if (ihid->quirks & I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME) + msleep(1500); + /* Instead of resetting device, simply powers the device on. This * solves "incomplete reports" on Raydium devices 2386:3118 and * 2386:4B33 and fixes various SIS touchscreens no longer sending diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c index e157863a8b25..b3c3cfcd97fc 100644 --- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c @@ -635,7 +635,7 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, const struct firmware *fw, const struct shim_fw_info fw_info) { - int rv; + int rv = 0; void *dma_buf; dma_addr_t dma_buf_phy; u32 fragment_offset, fragment_size, payload_max_size; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 59a13ad9371c..413606bdf476 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2567,6 +2567,8 @@ static void wacom_wac_pen_report(struct hid_device *hdev, /* Going into range select tool */ if (wacom_wac->hid_data.invert_state) wacom_wac->tool[0] = BTN_TOOL_RUBBER; + else if (wacom_wac->features.quirks & WACOM_QUIRK_AESPEN) + wacom_wac->tool[0] = BTN_TOOL_PEN; else if (wacom_wac->id[0]) wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]); else diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 65ea92529406..08a3c863f80a 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -162,6 +162,7 @@ config SENSORS_ADM9240 tristate "Analog Devices ADM9240 and compatibles" depends on I2C select HWMON_VID + select REGMAP_I2C help If you say yes here you get support for Analog Devices ADM9240, Dallas DS1780, National Semiconductor LM81 sensor chips. @@ -223,6 +224,7 @@ config SENSORS_ADT7462 config SENSORS_ADT7470 tristate "Analog Devices ADT7470" depends on I2C + select REGMAP_I2C help If you say yes here you get support for the Analog Devices ADT7470 temperature monitoring chips. @@ -999,6 +1001,7 @@ config SENSORS_LTC2990 config SENSORS_LTC2991 tristate "Analog Devices LTC2991" depends on I2C + select REGMAP_I2C help If you say yes here you get support for Analog Devices LTC2991 Octal I2C Voltage, Current, and Temperature Monitor. The LTC2991 @@ -1146,6 +1149,7 @@ config SENSORS_MAX1619 config SENSORS_MAX1668 tristate "Maxim MAX1668 and compatibles" depends on I2C + select REGMAP_I2C help If you say yes here you get support for MAX1668, MAX1989 and MAX1805 chips. @@ -1275,6 +1279,7 @@ config SENSORS_MAX31790 config SENSORS_MC34VR500 tristate "NXP MC34VR500 hardware monitoring driver" depends on I2C + select REGMAP_I2C help If you say yes here you get support for the temperature and input voltage sensors of the NXP MC34VR500. @@ -2312,6 +2317,7 @@ config SENSORS_TMP464 config SENSORS_TMP513 tristate "Texas Instruments TMP513 and compatibles" depends on I2C + select REGMAP_I2C help If you say yes here you get support for Texas Instruments TMP512, and TMP513 temperature and power supply sensor chips. diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index ca466d12475a..5f2541c11fe9 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -1735,11 +1735,10 @@ static int adt7475_pwm_properties_parse_args(struct fwnode_handle *fwnode, static int adt7475_fan_pwm_config(struct i2c_client *client) { struct adt7475_data *data = i2c_get_clientdata(client); - struct fwnode_handle *child; struct adt7475_pwm_config cfg = {}; int ret; - device_for_each_child_node(&client->dev, child) { + device_for_each_child_node_scoped(&client->dev, child) { if (!fwnode_property_present(child, "pwms")) continue; diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c index ca2dff158925..96397ae6ff18 100644 --- a/drivers/hwmon/intel-m10-bmc-hwmon.c +++ b/drivers/hwmon/intel-m10-bmc-hwmon.c @@ -358,7 +358,7 @@ static const struct m10bmc_sdata n6000bmc_temp_tbl[] = { { 0x4f0, 0x4f4, 0x4f8, 0x52c, 0x0, 500, "Board Top Near FPGA Temperature" }, { 0x4fc, 0x500, 0x504, 0x52c, 0x0, 500, "Board Bottom Near CVL Temperature" }, { 0x508, 0x50c, 0x510, 0x52c, 0x0, 500, "Board Top East Near VRs Temperature" }, - { 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "Columbiaville Die Temperature" }, + { 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "CVL Die Temperature" }, { 0x520, 0x524, 0x528, 0x52c, 0x0, 500, "Board Rear Side Temperature" }, { 0x530, 0x534, 0x538, 0x52c, 0x0, 500, "Board Front Side Temperature" }, { 0x53c, 0x540, 0x544, 0x0, 0x0, 500, "QSFP1 Case Temperature" }, diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index a260cff750a5..c459dce496a6 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -417,7 +417,7 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info) return -ENODEV; if ((devid & TSE2004_DEVID_MASK) == TSE2004_DEVID && - (cap & 0x00e7) != 0x00e7) + (cap & 0x0062) != 0x0062) return -ENODEV; for (i = 0; i < ARRAY_SIZE(jc42_chips); i++) { diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 6b3ba7e5723a..bd9a6bcaaca4 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -62,19 +62,6 @@ config I2C_AMD756 This driver can also be built as a module. If so, the module will be called i2c-amd756. -config I2C_AMD756_S4882 - tristate "SMBus multiplexing on the Tyan S4882" - depends on I2C_AMD756 && X86 - help - Enabling this option will add specific SMBus support for the Tyan - S4882 motherboard. On this 4-CPU board, the SMBus is multiplexed - over 8 different channels, where the various memory module EEPROMs - and temperature sensors live. Saying yes here will give you access - to these in addition to the trunk. - - This driver can also be built as a module. If so, the module - will be called i2c-amd756-s4882. - config I2C_AMD8111 tristate "AMD 8111" depends on PCI && HAS_IOPORT @@ -95,6 +82,23 @@ config I2C_AMD_MP2 This driver can also be built as modules. If so, the modules will be called i2c-amd-mp2-pci and i2c-amd-mp2-plat. +config I2C_AMD_ASF + tristate "AMD ASF I2C Controller Support" + depends on I2C_PIIX4 + select I2C_SLAVE + help + This option enables support for the AMD ASF (Alert Standard Format) + I2C controller. The AMD ASF controller is an SMBus controller with + built-in ASF functionality, allowing it to issue generic SMBus + packets and communicate with the DASH controller using MCTP over + ASF. + + If you have an AMD system with ASF support and want to enable this + functionality, say Y or M here. If unsure, say N. + + To compile this driver as a module, choose M here: the module will + be called i2c_amd_asf_plat. + config I2C_HIX5HD2 tristate "Hix5hd2 high-speed I2C driver" depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST @@ -160,6 +164,7 @@ config I2C_I801 Meteor Lake (SOC and PCH) Birch Stream (SOC) Arrow Lake (SOC) + Panther Lake (SOC) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -250,19 +255,6 @@ config I2C_NFORCE2 This driver can also be built as a module. If so, the module will be called i2c-nforce2. -config I2C_NFORCE2_S4985 - tristate "SMBus multiplexing on the Tyan S4985" - depends on I2C_NFORCE2 && X86 - help - Enabling this option will add specific SMBus support for the Tyan - S4985 motherboard. On this 4-CPU board, the SMBus is multiplexed - over 4 different channels, where the various memory module EEPROMs - live. Saying yes here will give you access to these in addition - to the trunk. - - This driver can also be built as a module. If so, the module - will be called i2c-nforce2-s4985. - config I2C_NVIDIA_GPU tristate "NVIDIA GPU I2C controller" depends on PCI @@ -439,7 +431,7 @@ config I2C_AT91 are facing this situation, use the i2c-gpio driver. config I2C_AT91_SLAVE_EXPERIMENTAL - tristate "Microchip AT91 I2C experimental slave mode" + bool "Microchip AT91 I2C experimental slave mode" depends on I2C_AT91 select I2C_SLAVE help @@ -448,7 +440,7 @@ config I2C_AT91_SLAVE_EXPERIMENTAL been tested in a heavy way, help wanted. There are known bugs: - It can hang, on a SAMA5D4, after several transfers. - - There are some mismtaches with a SAMA5D4 as slave and a SAMA5D2 as + - There are some mismatches with a SAMA5D4 as slave and a SAMA5D2 as master. config I2C_AU1550 @@ -741,13 +733,14 @@ config I2C_IMG config I2C_IMX tristate "IMX I2C interface" - depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST + depends on ARCH_MXC || ARCH_LAYERSCAPE || ARCH_S32 || COLDFIRE \ + || COMPILE_TEST select I2C_SLAVE help Say Y here if you want to use the IIC bus controller on - the Freescale i.MX/MXC, Layerscape or ColdFire processors. + the Freescale i.MX/MXC/S32G, Layerscape or ColdFire processors. - This driver can also be built as a module. If so, the module + This driver can also be built as a module. If so, the module will be called i2c-imx. config I2C_IMX_LPI2C @@ -1060,6 +1053,16 @@ config I2C_RK3X This driver can also be built as a module. If so, the module will be called i2c-rk3x. +config I2C_RTL9300 + tristate "Realtek RTL9300 I2C controller" + depends on MACH_REALTEK_RTL || COMPILE_TEST + help + Say Y here to include support for the I2C controller in Realtek + RTL9300 SoCs. + + This driver can also be built as a module. If so, the module will + be called i2c-rtl9300. + config I2C_RZV2M tristate "Renesas RZ/V2M adapter" depends on ARCH_RENESAS || COMPILE_TEST diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index ecc07c50f2a0..49154c7bceee 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -14,14 +14,12 @@ obj-$(CONFIG_I2C_ALI1535) += i2c-ali1535.o obj-$(CONFIG_I2C_ALI1563) += i2c-ali1563.o obj-$(CONFIG_I2C_ALI15X3) += i2c-ali15x3.o obj-$(CONFIG_I2C_AMD756) += i2c-amd756.o -obj-$(CONFIG_I2C_AMD756_S4882) += i2c-amd756-s4882.o obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o obj-$(CONFIG_I2C_CHT_WC) += i2c-cht-wc.o obj-$(CONFIG_I2C_I801) += i2c-i801.o obj-$(CONFIG_I2C_ISCH) += i2c-isch.o obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o -obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o obj-$(CONFIG_I2C_NVIDIA_GPU) += i2c-nvidia-gpu.o obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o @@ -38,12 +36,11 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o # Embedded system I2C/SMBus host controller drivers obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o obj-$(CONFIG_I2C_AMD_MP2) += i2c-amd-mp2-pci.o i2c-amd-mp2-plat.o +obj-$(CONFIG_I2C_AMD_ASF) += i2c-amd-asf-plat.o obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o obj-$(CONFIG_I2C_AT91) += i2c-at91.o -i2c-at91-objs := i2c-at91-core.o i2c-at91-master.o -ifeq ($(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL),y) - i2c-at91-objs += i2c-at91-slave.o -endif +i2c-at91-y := i2c-at91-core.o i2c-at91-master.o +i2c-at91-$(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL) += i2c-at91-slave.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o @@ -103,6 +100,7 @@ obj-$(CONFIG_I2C_QCOM_GENI) += i2c-qcom-geni.o obj-$(CONFIG_I2C_QUP) += i2c-qup.o obj-$(CONFIG_I2C_RIIC) += i2c-riic.o obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o +obj-$(CONFIG_I2C_RTL9300) += i2c-rtl9300.o obj-$(CONFIG_I2C_RZV2M) += i2c-rzv2m.o obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o @@ -111,8 +109,8 @@ obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o obj-$(CONFIG_I2C_SPRD) += i2c-sprd.o obj-$(CONFIG_I2C_ST) += i2c-st.o obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o -i2c-stm32f7-drv-objs := i2c-stm32f7.o i2c-stm32.o obj-$(CONFIG_I2C_STM32F7) += i2c-stm32f7-drv.o +i2c-stm32f7-drv-y := i2c-stm32f7.o i2c-stm32.o obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o obj-$(CONFIG_I2C_SYNQUACER) += i2c-synquacer.o obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o @@ -121,10 +119,10 @@ obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o obj-$(CONFIG_I2C_WMT) += i2c-viai2c-wmt.o i2c-viai2c-common.o -i2c-octeon-objs := i2c-octeon-core.o i2c-octeon-platdrv.o obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o -i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o +i2c-octeon-y := i2c-octeon-core.o i2c-octeon-platdrv.o obj-$(CONFIG_I2C_THUNDERX) += i2c-thunderx.o +i2c-thunderx-y := i2c-octeon-core.o i2c-thunderx-pcidrv.o obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index f4dde08a3b92..2da73173ce24 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -482,7 +482,7 @@ MODULE_DEVICE_TABLE(of, altr_i2c_of_match); static struct platform_driver altr_i2c_driver = { .probe = altr_i2c_probe, - .remove_new = altr_i2c_remove, + .remove = altr_i2c_remove, .driver = { .name = "altera-i2c", .of_match_table = altr_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c new file mode 100644 index 000000000000..ba47df5370c7 --- /dev/null +++ b/drivers/i2c/busses/i2c-amd-asf-plat.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD Alert Standard Format Platform Driver + * + * Copyright (c) 2024, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Shyam Sundar S K + * Sanket Goswami + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-piix4.h" + +/* ASF register bits */ +#define ASF_SLV_LISTN 0 +#define ASF_SLV_INTR 1 +#define ASF_SLV_RST 4 +#define ASF_PEC_SP 5 +#define ASF_DATA_EN 7 +#define ASF_MSTR_EN 16 +#define ASF_CLK_EN 17 + +/* ASF address offsets */ +#define ASFINDEX (0x07 + piix4_smba) +#define ASFLISADDR (0x09 + piix4_smba) +#define ASFSTA (0x0A + piix4_smba) +#define ASFSLVSTA (0x0D + piix4_smba) +#define ASFDATARWPTR (0x11 + piix4_smba) +#define ASFSETDATARDPTR (0x12 + piix4_smba) +#define ASFDATABNKSEL (0x13 + piix4_smba) +#define ASFSLVEN (0x15 + piix4_smba) + +#define ASF_BLOCK_MAX_BYTES 72 +#define ASF_ERROR_STATUS GENMASK(3, 1) + +struct amd_asf_dev { + struct i2c_adapter adap; + void __iomem *eoi_base; + struct i2c_client *target; + struct delayed_work work_buf; + struct sb800_mmio_cfg mmio_cfg; + struct resource *port_addr; +}; + +static void amd_asf_process_target(struct work_struct *work) +{ + struct amd_asf_dev *dev = container_of(work, struct amd_asf_dev, work_buf.work); + unsigned short piix4_smba = dev->port_addr->start; + u8 data[ASF_BLOCK_MAX_BYTES]; + u8 bank, reg, cmd; + u8 len = 0, idx, val; + + /* Read target status register */ + reg = inb_p(ASFSLVSTA); + + /* Check if no error bits are set in target status register */ + if (reg & ASF_ERROR_STATUS) { + /* Set bank as full */ + cmd = 0; + reg |= GENMASK(3, 2); + outb_p(reg, ASFDATABNKSEL); + } else { + /* Read data bank */ + reg = inb_p(ASFDATABNKSEL); + bank = (reg & BIT(3)) ? 1 : 0; + + /* Set read data bank */ + if (bank) { + reg |= BIT(4); + reg &= ~BIT(3); + } else { + reg &= ~BIT(4); + reg &= ~BIT(2); + } + + /* Read command register */ + outb_p(reg, ASFDATABNKSEL); + cmd = inb_p(ASFINDEX); + len = inb_p(ASFDATARWPTR); + for (idx = 0; idx < len; idx++) + data[idx] = inb_p(ASFINDEX); + + /* Clear data bank status */ + if (bank) { + reg |= BIT(3); + outb_p(reg, ASFDATABNKSEL); + } else { + reg |= BIT(2); + outb_p(reg, ASFDATABNKSEL); + } + } + + outb_p(0, ASFSETDATARDPTR); + if (cmd & BIT(0)) + return; + + /* + * Although i2c_slave_event() returns an appropriate error code, we + * don't check it here because we're operating in the workqueue context. + */ + i2c_slave_event(dev->target, I2C_SLAVE_WRITE_REQUESTED, &val); + for (idx = 0; idx < len; idx++) { + val = data[idx]; + i2c_slave_event(dev->target, I2C_SLAVE_WRITE_RECEIVED, &val); + } + i2c_slave_event(dev->target, I2C_SLAVE_STOP, &val); +} + +static void amd_asf_update_ioport_target(unsigned short piix4_smba, u8 bit, + unsigned long offset, bool set) +{ + unsigned long reg; + + reg = inb_p(offset); + __assign_bit(bit, ®, set); + outb_p(reg, offset); +} + +static void amd_asf_update_mmio_target(struct amd_asf_dev *dev, u8 bit, bool set) +{ + unsigned long reg; + + reg = ioread32(dev->mmio_cfg.addr); + __assign_bit(bit, ®, set); + iowrite32(reg, dev->mmio_cfg.addr); +} + +static void amd_asf_setup_target(struct amd_asf_dev *dev) +{ + unsigned short piix4_smba = dev->port_addr->start; + + /* Reset both host and target before setting up */ + outb_p(0, SMBHSTSTS); + outb_p(0, ASFSLVSTA); + outb_p(0, ASFSTA); + + /* Update target address */ + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_LISTN, ASFLISADDR, true); + /* Enable target and set the clock */ + amd_asf_update_mmio_target(dev, ASF_MSTR_EN, false); + amd_asf_update_mmio_target(dev, ASF_CLK_EN, true); + /* Enable target interrupt */ + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, ASFSLVEN, true); + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, false); + /* Enable PEC and PEC append */ + amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, SMBHSTCNT, true); + amd_asf_update_ioport_target(piix4_smba, ASF_PEC_SP, SMBHSTCNT, true); +} + +static int amd_asf_access(struct i2c_adapter *adap, u16 addr, u8 command, u8 *data) +{ + struct amd_asf_dev *dev = i2c_get_adapdata(adap); + unsigned short piix4_smba = dev->port_addr->start; + u8 i, len; + + outb_p((addr << 1), SMBHSTADD); + outb_p(command, SMBHSTCMD); + len = data[0]; + if (len == 0 || len > ASF_BLOCK_MAX_BYTES) + return -EINVAL; + + outb_p(len, SMBHSTDAT0); + /* Reset SMBBLKDAT */ + inb_p(SMBHSTCNT); + for (i = 1; i <= len; i++) + outb_p(data[i], SMBBLKDAT); + + outb_p(PIIX4_BLOCK_DATA, SMBHSTCNT); + /* Enable PEC and PEC append */ + amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, SMBHSTCNT, true); + amd_asf_update_ioport_target(piix4_smba, ASF_PEC_SP, SMBHSTCNT, true); + + return piix4_transaction(adap, piix4_smba); +} + +static int amd_asf_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct amd_asf_dev *dev = i2c_get_adapdata(adap); + unsigned short piix4_smba = dev->port_addr->start; + u8 asf_data[ASF_BLOCK_MAX_BYTES]; + struct i2c_msg *dev_msgs = msgs; + u8 prev_port; + int ret; + + if (msgs->flags & I2C_M_RD) { + dev_err(&adap->dev, "ASF: Read not supported\n"); + return -EOPNOTSUPP; + } + + /* Exclude the receive header and PEC */ + if (msgs->len > ASF_BLOCK_MAX_BYTES - 3) { + dev_warn(&adap->dev, "ASF: max message length exceeded\n"); + return -EOPNOTSUPP; + } + + asf_data[0] = dev_msgs->len; + memcpy(asf_data + 1, dev_msgs[0].buf, dev_msgs->len); + + ret = piix4_sb800_region_request(&adap->dev, &dev->mmio_cfg); + if (ret) + return ret; + + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, true); + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_LISTN, ASFLISADDR, false); + /* Clear ASF target status */ + outb_p(0, ASFSLVSTA); + + /* Enable ASF SMBus controller function */ + amd_asf_update_mmio_target(dev, ASF_MSTR_EN, true); + prev_port = piix4_sb800_port_sel(0, &dev->mmio_cfg); + ret = amd_asf_access(adap, msgs->addr, msgs[0].buf[0], asf_data); + piix4_sb800_port_sel(prev_port, &dev->mmio_cfg); + amd_asf_setup_target(dev); + piix4_sb800_region_release(&adap->dev, &dev->mmio_cfg); + return ret; +} + +static int amd_asf_reg_target(struct i2c_client *target) +{ + struct amd_asf_dev *dev = i2c_get_adapdata(target->adapter); + unsigned short piix4_smba = dev->port_addr->start; + int ret; + u8 reg; + + if (dev->target) + return -EBUSY; + + ret = piix4_sb800_region_request(&target->dev, &dev->mmio_cfg); + if (ret) + return ret; + + reg = (target->addr << 1) | I2C_M_RD; + outb_p(reg, ASFLISADDR); + + amd_asf_setup_target(dev); + dev->target = target; + amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, ASFDATABNKSEL, false); + piix4_sb800_region_release(&target->dev, &dev->mmio_cfg); + + return 0; +} + +static int amd_asf_unreg_target(struct i2c_client *target) +{ + struct amd_asf_dev *dev = i2c_get_adapdata(target->adapter); + unsigned short piix4_smba = dev->port_addr->start; + + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, ASFSLVEN, false); + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, true); + dev->target = NULL; + + return 0; +} + +static u32 amd_asf_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | + I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_PEC | I2C_FUNC_SLAVE; +} + +static const struct i2c_algorithm amd_asf_smbus_algorithm = { + .master_xfer = amd_asf_xfer, + .reg_slave = amd_asf_reg_target, + .unreg_slave = amd_asf_unreg_target, + .functionality = amd_asf_func, +}; + +static irqreturn_t amd_asf_irq_handler(int irq, void *ptr) +{ + struct amd_asf_dev *dev = ptr; + unsigned short piix4_smba = dev->port_addr->start; + u8 target_int = inb_p(ASFSTA); + + if (target_int & BIT(6)) { + /* Target Interrupt */ + outb_p(target_int | BIT(6), ASFSTA); + schedule_delayed_work(&dev->work_buf, HZ); + } else { + /* Controller Interrupt */ + amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true); + } + + return IRQ_HANDLED; +} + +static int amd_asf_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct amd_asf_dev *asf_dev; + struct resource *eoi_addr; + int ret, irq; + + asf_dev = devm_kzalloc(dev, sizeof(*asf_dev), GFP_KERNEL); + if (!asf_dev) + return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n"); + + asf_dev->mmio_cfg.use_mmio = true; + asf_dev->port_addr = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!asf_dev->port_addr) + return dev_err_probe(dev, -EINVAL, "missing IO resources\n"); + + /* + * The resource obtained via ACPI might not belong to the ASF device address space. Instead, + * it could be within other IP blocks of the ASIC, which are crucial for generating + * subsequent interrupts. Therefore, we avoid using devm_platform_ioremap_resource() and + * use platform_get_resource() and devm_ioremap() separately to prevent any address space + * conflicts. + */ + eoi_addr = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!eoi_addr) + return dev_err_probe(dev, -EINVAL, "missing MEM resources\n"); + + asf_dev->eoi_base = devm_ioremap(dev, eoi_addr->start, resource_size(eoi_addr)); + if (!asf_dev->eoi_base) + return dev_err_probe(dev, -EBUSY, "failed mapping IO region\n"); + + ret = devm_delayed_work_autocancel(dev, &asf_dev->work_buf, amd_asf_process_target); + if (ret) + return dev_err_probe(dev, ret, "failed to create work queue\n"); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return dev_err_probe(dev, irq, "missing IRQ resources\n"); + + ret = devm_request_irq(dev, irq, amd_asf_irq_handler, IRQF_SHARED, "amd_asf", asf_dev); + if (ret) + return dev_err_probe(dev, ret, "Unable to request irq: %d for use\n", irq); + + asf_dev->adap.owner = THIS_MODULE; + asf_dev->adap.algo = &amd_asf_smbus_algorithm; + asf_dev->adap.dev.parent = dev; + + i2c_set_adapdata(&asf_dev->adap, asf_dev); + snprintf(asf_dev->adap.name, sizeof(asf_dev->adap.name), "AMD ASF adapter"); + + return devm_i2c_add_adapter(dev, &asf_dev->adap); +} + +static const struct acpi_device_id amd_asf_acpi_ids[] = { + { "AMDI001A" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, amd_asf_acpi_ids); + +static struct platform_driver amd_asf_driver = { + .driver = { + .name = "i2c-amd-asf", + .acpi_match_table = amd_asf_acpi_ids, + }, + .probe = amd_asf_probe, +}; +module_platform_driver(amd_asf_driver); + +MODULE_IMPORT_NS(PIIX4_SMBUS); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AMD Alert Standard Format Driver"); diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c index 6f0ef587e76d..d9dd0e475d1a 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-plat.c +++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c @@ -346,7 +346,7 @@ MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match); static struct platform_driver i2c_amd_plat_driver = { .probe = i2c_amd_probe, - .remove_new = i2c_amd_remove, + .remove = i2c_amd_remove, .driver = { .name = "i2c_amd_mp2", .acpi_match_table = ACPI_PTR(i2c_amd_acpi_match), diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c deleted file mode 100644 index 063274388a75..000000000000 --- a/drivers/i2c/busses/i2c-amd756-s4882.c +++ /dev/null @@ -1,245 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard - * - * Copyright (C) 2004, 2008 Jean Delvare - */ - -/* - * We select the channels by sending commands to the Philips - * PCA9556 chip at I2C address 0x18. The main adapter is used for - * the non-multiplexed part of the bus, and 4 virtual adapters - * are defined for the multiplexed addresses: 0x50-0x53 (memory - * module EEPROM) located on channels 1-4, and 0x4c (LM63) - * located on multiplexed channels 0 and 5-7. We define one - * virtual adapter per CPU, which corresponds to two multiplexed - * channels: - * CPU0: virtual adapter 1, channels 1 and 0 - * CPU1: virtual adapter 2, channels 2 and 5 - * CPU2: virtual adapter 3, channels 3 and 6 - * CPU3: virtual adapter 4, channels 4 and 7 - */ - -#include -#include -#include -#include -#include -#include - -extern struct i2c_adapter amd756_smbus; - -static struct i2c_adapter *s4882_adapter; -static struct i2c_algorithm *s4882_algo; - -/* Wrapper access functions for multiplexed SMBus */ -static DEFINE_MUTEX(amd756_lock); - -static s32 amd756_access_virt0(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - int error; - - /* We exclude the multiplexed addresses */ - if (addr == 0x4c || (addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30 - || addr == 0x18) - return -ENXIO; - - mutex_lock(&amd756_lock); - - error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, - command, size, data); - - mutex_unlock(&amd756_lock); - - return error; -} - -/* We remember the last used channels combination so as to only switch - channels when it is really needed. This greatly reduces the SMBus - overhead, but also assumes that nobody will be writing to the PCA9556 - in our back. */ -static u8 last_channels; - -static inline s32 amd756_access_channel(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data, - u8 channels) -{ - int error; - - /* We exclude the non-multiplexed addresses */ - if (addr != 0x4c && (addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) - return -ENXIO; - - mutex_lock(&amd756_lock); - - if (last_channels != channels) { - union i2c_smbus_data mplxdata; - mplxdata.byte = channels; - - error = amd756_smbus.algo->smbus_xfer(adap, 0x18, 0, - I2C_SMBUS_WRITE, 0x01, - I2C_SMBUS_BYTE_DATA, - &mplxdata); - if (error) - goto UNLOCK; - last_channels = channels; - } - error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, - command, size, data); - -UNLOCK: - mutex_unlock(&amd756_lock); - return error; -} - -static s32 amd756_access_virt1(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - /* CPU0: channels 1 and 0 enabled */ - return amd756_access_channel(adap, addr, flags, read_write, command, - size, data, 0x03); -} - -static s32 amd756_access_virt2(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - /* CPU1: channels 2 and 5 enabled */ - return amd756_access_channel(adap, addr, flags, read_write, command, - size, data, 0x24); -} - -static s32 amd756_access_virt3(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - /* CPU2: channels 3 and 6 enabled */ - return amd756_access_channel(adap, addr, flags, read_write, command, - size, data, 0x48); -} - -static s32 amd756_access_virt4(struct i2c_adapter * adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data * data) -{ - /* CPU3: channels 4 and 7 enabled */ - return amd756_access_channel(adap, addr, flags, read_write, command, - size, data, 0x90); -} - -static int __init amd756_s4882_init(void) -{ - int i, error; - union i2c_smbus_data ioconfig; - - if (!amd756_smbus.dev.parent) - return -ENODEV; - - /* Configure the PCA9556 multiplexer */ - ioconfig.byte = 0x00; /* All I/O to output mode */ - error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, - I2C_SMBUS_BYTE_DATA, &ioconfig); - if (error) { - dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n"); - error = -EIO; - goto ERROR0; - } - - /* Unregister physical bus */ - i2c_del_adapter(&amd756_smbus); - - printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4882\n"); - /* Define the 5 virtual adapters and algorithms structures */ - if (!(s4882_adapter = kcalloc(5, sizeof(struct i2c_adapter), - GFP_KERNEL))) { - error = -ENOMEM; - goto ERROR1; - } - if (!(s4882_algo = kcalloc(5, sizeof(struct i2c_algorithm), - GFP_KERNEL))) { - error = -ENOMEM; - goto ERROR2; - } - - /* Fill in the new structures */ - s4882_algo[0] = *(amd756_smbus.algo); - s4882_algo[0].smbus_xfer = amd756_access_virt0; - s4882_adapter[0] = amd756_smbus; - s4882_adapter[0].algo = s4882_algo; - s4882_adapter[0].dev.parent = amd756_smbus.dev.parent; - for (i = 1; i < 5; i++) { - s4882_algo[i] = *(amd756_smbus.algo); - s4882_adapter[i] = amd756_smbus; - snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name), - "SMBus 8111 adapter (CPU%d)", i-1); - s4882_adapter[i].algo = s4882_algo+i; - s4882_adapter[i].dev.parent = amd756_smbus.dev.parent; - } - s4882_algo[1].smbus_xfer = amd756_access_virt1; - s4882_algo[2].smbus_xfer = amd756_access_virt2; - s4882_algo[3].smbus_xfer = amd756_access_virt3; - s4882_algo[4].smbus_xfer = amd756_access_virt4; - - /* Register virtual adapters */ - for (i = 0; i < 5; i++) { - error = i2c_add_adapter(s4882_adapter+i); - if (error) { - printk(KERN_ERR "i2c-amd756-s4882: " - "Virtual adapter %d registration " - "failed, module not inserted\n", i); - for (i--; i >= 0; i--) - i2c_del_adapter(s4882_adapter+i); - goto ERROR3; - } - } - - return 0; - -ERROR3: - kfree(s4882_algo); - s4882_algo = NULL; -ERROR2: - kfree(s4882_adapter); - s4882_adapter = NULL; -ERROR1: - /* Restore physical bus */ - i2c_add_adapter(&amd756_smbus); -ERROR0: - return error; -} - -static void __exit amd756_s4882_exit(void) -{ - if (s4882_adapter) { - int i; - - for (i = 0; i < 5; i++) - i2c_del_adapter(s4882_adapter+i); - kfree(s4882_adapter); - s4882_adapter = NULL; - } - kfree(s4882_algo); - s4882_algo = NULL; - - /* Restore physical bus */ - if (i2c_add_adapter(&amd756_smbus)) - printk(KERN_ERR "i2c-amd756-s4882: " - "Physical bus restoration failed\n"); -} - -MODULE_AUTHOR("Jean Delvare "); -MODULE_DESCRIPTION("S4882 SMBus multiplexing"); -MODULE_LICENSE("GPL"); - -module_init(amd756_s4882_init); -module_exit(amd756_s4882_exit); diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c index 208310db906d..fa0d5a2c3732 100644 --- a/drivers/i2c/busses/i2c-amd756.c +++ b/drivers/i2c/busses/i2c-amd756.c @@ -283,7 +283,7 @@ static const struct i2c_algorithm smbus_algorithm = { .functionality = amd756_func, }; -struct i2c_adapter amd756_smbus = { +static struct i2c_adapter amd756_smbus = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON, .algo = &smbus_algorithm, @@ -398,5 +398,3 @@ module_pci_driver(amd756_driver); MODULE_AUTHOR("Merlin Hughes "); MODULE_DESCRIPTION("AMD756/766/768/8111 and nVidia nForce SMBus driver"); MODULE_LICENSE("GPL"); - -EXPORT_SYMBOL(amd756_smbus); diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index cc5a26637fd5..1550d3d552ae 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -1102,7 +1102,7 @@ static void aspeed_i2c_remove_bus(struct platform_device *pdev) static struct platform_driver aspeed_i2c_bus_driver = { .probe = aspeed_i2c_probe_bus, - .remove_new = aspeed_i2c_remove_bus, + .remove = aspeed_i2c_remove_bus, .driver = { .name = "aspeed-i2c-bus", .of_match_table = aspeed_i2c_bus_of_table, diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index dc52b3530725..edc047e3e535 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -330,7 +330,7 @@ static const struct dev_pm_ops __maybe_unused at91_twi_pm = { static struct platform_driver at91_twi_driver = { .probe = at91_twi_probe, - .remove_new = at91_twi_remove, + .remove = at91_twi_remove, .id_table = at91_twi_devtypes, .driver = { .name = "at91_i2c", diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c index 902e420e761e..b78b38ddac46 100644 --- a/drivers/i2c/busses/i2c-au1550.c +++ b/drivers/i2c/busses/i2c-au1550.c @@ -368,7 +368,7 @@ static struct platform_driver au1xpsc_smbus_driver = { .pm = pm_sleep_ptr(&i2c_au1550_pmops), }, .probe = i2c_au1550_probe, - .remove_new = i2c_au1550_remove, + .remove = i2c_au1550_remove, }; module_platform_driver(au1xpsc_smbus_driver); diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c index a66f7f67b3b8..48916cf45ff7 100644 --- a/drivers/i2c/busses/i2c-axxia.c +++ b/drivers/i2c/busses/i2c-axxia.c @@ -824,7 +824,7 @@ MODULE_DEVICE_TABLE(of, axxia_i2c_of_match); static struct platform_driver axxia_i2c_driver = { .probe = axxia_i2c_probe, - .remove_new = axxia_i2c_remove, + .remove = axxia_i2c_remove, .driver = { .name = "axxia-i2c", .of_match_table = axxia_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 133d02899c6b..15b632a146e1 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -1264,7 +1264,7 @@ static struct platform_driver bcm_iproc_i2c_driver = { .pm = pm_sleep_ptr(&bcm_iproc_i2c_pm_ops), }, .probe = bcm_iproc_i2c_probe, - .remove_new = bcm_iproc_i2c_remove, + .remove = bcm_iproc_i2c_remove, }; module_platform_driver(bcm_iproc_i2c_driver); diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index eb5c46a8f824..340fe1305dd9 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c @@ -877,7 +877,7 @@ static struct platform_driver bcm_kona_i2c_driver = { .of_match_table = bcm_kona_i2c_of_match, }, .probe = bcm_kona_i2c_probe, - .remove_new = bcm_kona_i2c_remove, + .remove = bcm_kona_i2c_remove, }; module_platform_driver(bcm_kona_i2c_driver); diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index ae42e37052a8..8554e790f8e3 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -520,7 +520,7 @@ MODULE_DEVICE_TABLE(of, bcm2835_i2c_of_match); static struct platform_driver bcm2835_i2c_driver = { .probe = bcm2835_i2c_probe, - .remove_new = bcm2835_i2c_remove, + .remove = bcm2835_i2c_remove, .driver = { .name = "i2c-bcm2835", .of_match_table = bcm2835_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 83b85011e377..00f1a046e985 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -744,7 +744,7 @@ static struct platform_driver brcmstb_i2c_driver = { .pm = pm_sleep_ptr(&brcmstb_i2c_pm), }, .probe = brcmstb_i2c_probe, - .remove_new = brcmstb_i2c_remove, + .remove = brcmstb_i2c_remove, }; module_platform_driver(brcmstb_i2c_driver); diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 87b9ba95b2e1..b64026fbca66 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -129,6 +129,7 @@ #define CDNS_I2C_BROKEN_HOLD_BIT BIT(0) #define CDNS_I2C_POLL_US 100000 +#define CDNS_I2C_POLL_US_ATOMIC 10 #define CDNS_I2C_TIMEOUT_US 500000 #define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset) @@ -189,6 +190,8 @@ enum cdns_i2c_slave_state { * @slave_state: I2C Slave state(idle/read/write). * @fifo_depth: The depth of the transfer FIFO * @transfer_size: The maximum number of bytes in one transfer + * @atomic: Mode of transfer + * @err_status_atomic: Error status in atomic mode */ struct cdns_i2c { struct device *dev; @@ -219,6 +222,8 @@ struct cdns_i2c { #endif u32 fifo_depth; unsigned int transfer_size; + bool atomic; + int err_status_atomic; }; struct cdns_platform_data { @@ -228,6 +233,66 @@ struct cdns_platform_data { #define to_cdns_i2c(_nb) container_of(_nb, struct cdns_i2c, \ clk_rate_change_nb) +/** + * cdns_i2c_init - Controller initialisation + * @id: Device private data structure + * + * Initialise the i2c controller. + * + */ +static void cdns_i2c_init(struct cdns_i2c *id) +{ + cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET); + /* + * Cadence I2C controller has a bug wherein it generates + * invalid read transaction after HW timeout in master receiver mode. + * HW timeout is not used by this driver and the interrupt is disabled. + * But the feature itself cannot be disabled. Hence maximum value + * is written to this register to reduce the chances of error. + */ + cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); +} + +/** + * cdns_i2c_runtime_suspend - Runtime suspend method for the driver + * @dev: Address of the platform_device structure + * + * Put the driver into low power mode. + * + * Return: 0 always + */ +static int cdns_i2c_runtime_suspend(struct device *dev) +{ + struct cdns_i2c *xi2c = dev_get_drvdata(dev); + + clk_disable(xi2c->clk); + + return 0; +} + +/** + * cdns_i2c_runtime_resume - Runtime resume + * @dev: Address of the platform_device structure + * + * Runtime resume callback. + * + * Return: 0 on success and error value on error + */ +static int cdns_i2c_runtime_resume(struct device *dev) +{ + struct cdns_i2c *xi2c = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(xi2c->clk); + if (ret) { + dev_err(dev, "Cannot enable clock.\n"); + return ret; + } + cdns_i2c_init(xi2c); + + return 0; +} + /** * cdns_i2c_clear_bus_hold - Clear bus hold bit * @id: Pointer to driver data struct @@ -561,6 +626,89 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) return cdns_i2c_master_isr(ptr); } +static bool cdns_i2c_error_check(struct cdns_i2c *id) +{ + unsigned int isr_status; + + id->err_status = 0; + + isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); + cdns_i2c_writereg(isr_status & CDNS_I2C_IXR_ERR_INTR_MASK, CDNS_I2C_ISR_OFFSET); + + id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK; + + return !!id->err_status; +} + +static void cdns_i2c_mrecv_atomic(struct cdns_i2c *id) +{ + while (id->recv_count > 0) { + bool updatetx; + + /* + * Check if transfer size register needs to be updated again for a + * large data receive operation. + */ + updatetx = id->recv_count > id->curr_recv_count; + + while (id->curr_recv_count > 0) { + if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_RXDV) { + *id->p_recv_buf = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); + id->p_recv_buf++; + id->recv_count--; + id->curr_recv_count--; + + /* + * Clear the hold bit that was set for FIFO control, + * if the remaining RX data is less than or equal to + * the FIFO depth, unless a repeated start is selected. + */ + if (id->recv_count <= id->fifo_depth && !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); + } + if (cdns_i2c_error_check(id)) + return; + if (cdns_is_holdquirk(id, updatetx)) + break; + } + + /* + * The controller sends NACK to the slave/target when transfer size + * register reaches zero without considering the HOLD bit. + * This workaround is implemented for large data transfers to + * maintain transfer size non-zero while performing a large + * receive operation. + */ + if (cdns_is_holdquirk(id, updatetx)) { + /* wait while fifo is full */ + while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != + (id->curr_recv_count - id->fifo_depth)) + ; + + /* + * Check number of bytes to be received against maximum + * transfer size and update register accordingly. + */ + if ((id->recv_count - id->fifo_depth) > + id->transfer_size) { + cdns_i2c_writereg(id->transfer_size, + CDNS_I2C_XFER_SIZE_OFFSET); + id->curr_recv_count = id->transfer_size + + id->fifo_depth; + } else { + cdns_i2c_writereg(id->recv_count - + id->fifo_depth, + CDNS_I2C_XFER_SIZE_OFFSET); + id->curr_recv_count = id->recv_count; + } + } + } + + /* Clear hold (if not repeated start) */ + if (!id->recv_count && !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); +} + /** * cdns_i2c_mrecv - Prepare and start a master receive operation * @id: pointer to the i2c device structure @@ -655,7 +803,34 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) cdns_i2c_writereg(addr, CDNS_I2C_ADDR_OFFSET); } - cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); + if (!id->atomic) + cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); + else + cdns_i2c_mrecv_atomic(id); +} + +static void cdns_i2c_msend_rem_atomic(struct cdns_i2c *id) +{ + while (id->send_count) { + unsigned int avail_bytes; + unsigned int bytes_to_send; + + avail_bytes = id->fifo_depth - cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); + if (id->send_count > avail_bytes) + bytes_to_send = avail_bytes; + else + bytes_to_send = id->send_count; + + while (bytes_to_send--) { + cdns_i2c_writereg((*id->p_send_buf++), CDNS_I2C_DATA_OFFSET); + id->send_count--; + } + if (cdns_i2c_error_check(id)) + return; + } + + if (!id->send_count && !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); } /** @@ -718,7 +893,10 @@ static void cdns_i2c_msend(struct cdns_i2c *id) cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, CDNS_I2C_ADDR_OFFSET); - cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); + if (!id->atomic) + cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); + else if (id->send_count > 0) + cdns_i2c_msend_rem_atomic(id); } /** @@ -758,7 +936,8 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, id->p_msg = msg; id->err_status = 0; - reinit_completion(&id->xfer_done); + if (!id->atomic) + reinit_completion(&id->xfer_done); /* Check for the TEN Bit mode on each msg */ reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); @@ -780,14 +959,31 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, /* Minimal time to execute this message */ msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk); - /* Plus some wiggle room */ - msg_timeout += msecs_to_jiffies(500); + + /* + * Plus some wiggle room. + * For non-atomic contexts, 500 ms is added to the timeout. + * For atomic contexts, 2000 ms is added because transfers happen in polled + * mode, requiring more time to account for the polling overhead. + */ + if (!id->atomic) + msg_timeout += msecs_to_jiffies(500); + else + msg_timeout += msecs_to_jiffies(2000); if (msg_timeout < adap->timeout) msg_timeout = adap->timeout; - /* Wait for the signal of completion */ - time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout); + if (!id->atomic) { + /* Wait for the signal of completion */ + time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout); + } else { + /* 0 is success, -ETIMEDOUT is error */ + time_left = !readl_poll_timeout_atomic(id->membase + CDNS_I2C_ISR_OFFSET, + reg, (reg & CDNS_I2C_IXR_COMP), + CDNS_I2C_POLL_US_ATOMIC, msg_timeout); + } + if (time_left == 0) { cdns_i2c_master_reset(adap); return -ETIMEDOUT; @@ -806,6 +1002,83 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, return 0; } +static int cdns_i2c_master_common_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, + int num) +{ + int ret, count; + u32 reg; + struct cdns_i2c *id = adap->algo_data; + bool hold_quirk; + + /* Check if the bus is free */ + if (!id->atomic) + ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET, + reg, + !(reg & CDNS_I2C_SR_BA), + CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US); + else + ret = readl_poll_timeout_atomic(id->membase + CDNS_I2C_SR_OFFSET, + reg, + !(reg & CDNS_I2C_SR_BA), + CDNS_I2C_POLL_US_ATOMIC, CDNS_I2C_TIMEOUT_US); + if (ret) { + ret = -EAGAIN; + if (id->adap.bus_recovery_info) + i2c_recover_bus(adap); + return ret; + } + + hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT); + /* + * Set the flag to one when multiple messages are to be + * processed with a repeated start. + */ + if (num > 1) { + /* + * This controller does not give completion interrupt after a + * master receive message if HOLD bit is set (repeated start), + * resulting in SW timeout. Hence, if a receive message is + * followed by any other message, an error is returned + * indicating that this sequence is not supported. + */ + for (count = 0; (count < num - 1 && hold_quirk); count++) { + if (msgs[count].flags & I2C_M_RD) { + dev_warn(adap->dev.parent, + "Can't do repeated start after a receive message\n"); + return -EOPNOTSUPP; + } + } + id->bus_hold_flag = 1; + reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); + reg |= CDNS_I2C_CR_HOLD; + cdns_i2c_writereg(reg, CDNS_I2C_CR_OFFSET); + } else { + id->bus_hold_flag = 0; + } + + /* Process the msg one by one */ + for (count = 0; count < num; count++, msgs++) { + if (count == (num - 1)) + id->bus_hold_flag = 0; + + ret = cdns_i2c_process_msg(id, msgs, adap); + if (ret) + return ret; + + /* Report the other error interrupts to application */ + if (id->err_status || id->err_status_atomic) { + cdns_i2c_master_reset(adap); + + if (id->err_status & CDNS_I2C_IXR_NACK) + return -ENXIO; + + return -EIO; + } + } + return 0; +} + /** * cdns_i2c_master_xfer - The main i2c transfer function * @adap: pointer to the i2c adapter driver instance @@ -819,10 +1092,8 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { - int ret, count; - u32 reg; + int ret; struct cdns_i2c *id = adap->algo_data; - bool hold_quirk; #if IS_ENABLED(CONFIG_I2C_SLAVE) bool change_role = false; #endif @@ -847,75 +1118,11 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, } #endif - /* Check if the bus is free */ - - ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET, - reg, - !(reg & CDNS_I2C_SR_BA), - CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US); - if (ret) { - ret = -EAGAIN; - if (id->adap.bus_recovery_info) - i2c_recover_bus(adap); - goto out; - } - - hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT); - /* - * Set the flag to one when multiple messages are to be - * processed with a repeated start. - */ - if (num > 1) { - /* - * This controller does not give completion interrupt after a - * master receive message if HOLD bit is set (repeated start), - * resulting in SW timeout. Hence, if a receive message is - * followed by any other message, an error is returned - * indicating that this sequence is not supported. - */ - for (count = 0; (count < num - 1 && hold_quirk); count++) { - if (msgs[count].flags & I2C_M_RD) { - dev_warn(adap->dev.parent, - "Can't do repeated start after a receive message\n"); - ret = -EOPNOTSUPP; - goto out; - } - } - id->bus_hold_flag = 1; - reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); - reg |= CDNS_I2C_CR_HOLD; - cdns_i2c_writereg(reg, CDNS_I2C_CR_OFFSET); - } else { - id->bus_hold_flag = 0; - } - - /* Process the msg one by one */ - for (count = 0; count < num; count++, msgs++) { - if (count == (num - 1)) - id->bus_hold_flag = 0; - - ret = cdns_i2c_process_msg(id, msgs, adap); - if (ret) - goto out; - - /* Report the other error interrupts to application */ - if (id->err_status) { - cdns_i2c_master_reset(adap); - - if (id->err_status & CDNS_I2C_IXR_NACK) { - ret = -ENXIO; - goto out; - } - ret = -EIO; - goto out; - } - } - - ret = num; - -out: - + ret = cdns_i2c_master_common_xfer(adap, msgs, num); + if (!ret) + ret = num; #if IS_ENABLED(CONFIG_I2C_SLAVE) +out: /* Switch i2c mode to slave */ if (change_role) cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id); @@ -926,6 +1133,41 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, return ret; } +/** + * cdns_i2c_master_xfer_atomic - The i2c transfer function in atomic mode + * @adap: pointer to the i2c adapter driver instance + * @msgs: pointer to the i2c message structure + * @num: the number of messages to transfer + * + * Return: number of msgs processed on success, negative error otherwise + */ +static int cdns_i2c_master_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) +{ + int ret; + struct cdns_i2c *id = adap->algo_data; + + ret = cdns_i2c_runtime_resume(id->dev); + if (ret) + return ret; + + if (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) { + dev_warn(id->adap.dev.parent, + "Atomic xfer not supported for version 1.0\n"); + return 0; + } + + id->atomic = true; + ret = cdns_i2c_master_common_xfer(adap, msgs, num); + if (!ret) + ret = num; + + id->atomic = false; + cdns_i2c_runtime_suspend(id->dev); + + return ret; +} + /** * cdns_i2c_func - Returns the supported features of the I2C driver * @adap: pointer to the i2c adapter structure @@ -990,6 +1232,7 @@ static int cdns_unreg_slave(struct i2c_client *slave) static const struct i2c_algorithm cdns_i2c_algo = { .master_xfer = cdns_i2c_master_xfer, + .master_xfer_atomic = cdns_i2c_master_xfer_atomic, .functionality = cdns_i2c_func, #if IS_ENABLED(CONFIG_I2C_SLAVE) .reg_slave = cdns_reg_slave, @@ -1158,23 +1401,6 @@ static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long } } -/** - * cdns_i2c_runtime_suspend - Runtime suspend method for the driver - * @dev: Address of the platform_device structure - * - * Put the driver into low power mode. - * - * Return: 0 always - */ -static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev) -{ - struct cdns_i2c *xi2c = dev_get_drvdata(dev); - - clk_disable(xi2c->clk); - - return 0; -} - static int __maybe_unused cdns_i2c_suspend(struct device *dev) { struct cdns_i2c *xi2c = dev_get_drvdata(dev); @@ -1187,49 +1413,6 @@ static int __maybe_unused cdns_i2c_suspend(struct device *dev) return 0; } -/** - * cdns_i2c_init - Controller initialisation - * @id: Device private data structure - * - * Initialise the i2c controller. - * - */ -static void cdns_i2c_init(struct cdns_i2c *id) -{ - cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET); - /* - * Cadence I2C controller has a bug wherein it generates - * invalid read transaction after HW timeout in master receiver mode. - * HW timeout is not used by this driver and the interrupt is disabled. - * But the feature itself cannot be disabled. Hence maximum value - * is written to this register to reduce the chances of error. - */ - cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); -} - -/** - * cdns_i2c_runtime_resume - Runtime resume - * @dev: Address of the platform_device structure - * - * Runtime resume callback. - * - * Return: 0 on success and error value on error - */ -static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev) -{ - struct cdns_i2c *xi2c = dev_get_drvdata(dev); - int ret; - - ret = clk_enable(xi2c->clk); - if (ret) { - dev_err(dev, "Cannot enable clock.\n"); - return ret; - } - cdns_i2c_init(xi2c); - - return 0; -} - static int __maybe_unused cdns_i2c_resume(struct device *dev) { struct cdns_i2c *xi2c = dev_get_drvdata(dev); @@ -1469,7 +1652,7 @@ static struct platform_driver cdns_i2c_drv = { .pm = &cdns_i2c_dev_pm_ops, }, .probe = cdns_i2c_probe, - .remove_new = cdns_i2c_remove, + .remove = cdns_i2c_remove, }; module_platform_driver(cdns_i2c_drv); diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c index fdc1758a3275..8065c7e4462e 100644 --- a/drivers/i2c/busses/i2c-cbus-gpio.c +++ b/drivers/i2c/busses/i2c-cbus-gpio.c @@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids); static struct platform_driver cbus_i2c_driver = { .probe = cbus_i2c_probe, - .remove_new = cbus_i2c_remove, + .remove = cbus_i2c_remove, .driver = { .name = "i2c-cbus-gpio", .of_match_table = of_match_ptr(i2c_cbus_dt_ids), diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index 52e3000626c5..26a36a65521e 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c @@ -546,7 +546,7 @@ MODULE_DEVICE_TABLE(platform, cht_wc_i2c_adap_id_table); static struct platform_driver cht_wc_i2c_adap_driver = { .probe = cht_wc_i2c_adap_i2c_probe, - .remove_new = cht_wc_i2c_adap_i2c_remove, + .remove = cht_wc_i2c_adap_i2c_remove, .driver = { .name = "cht_wcove_ext_chgr", }, diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 4794ec066eb0..260e1643c2cc 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -701,7 +701,7 @@ MODULE_DEVICE_TABLE(of, cpm_i2c_match); static struct platform_driver cpm_i2c_driver = { .probe = cpm_i2c_probe, - .remove_new = cpm_i2c_remove, + .remove = cpm_i2c_remove, .driver = { .name = "fsl-i2c-cpm", .of_match_table = cpm_i2c_match, diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index ab2688bd4d33..43bf90d90eeb 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id); static struct platform_driver ec_i2c_tunnel_driver = { .probe = ec_i2c_probe, - .remove_new = ec_i2c_remove, + .remove = ec_i2c_remove, .driver = { .name = "cros-ec-i2c-tunnel", .acpi_match_table = ACPI_PTR(cros_ec_i2c_tunnel_acpi_id), diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index c4fb5e9ab506..71dc0a6688b7 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -935,7 +935,7 @@ MODULE_DEVICE_TABLE(platform, davinci_i2c_driver_ids); static struct platform_driver davinci_i2c_driver = { .probe = davinci_i2c_probe, - .remove_new = davinci_i2c_remove, + .remove = davinci_i2c_remove, .id_table = davinci_i2c_driver_ids, .driver = { .name = "i2c_davinci", diff --git a/drivers/i2c/busses/i2c-designware-amdpsp.c b/drivers/i2c/busses/i2c-designware-amdpsp.c index 63454b06e5da..8fbd2a10c31a 100644 --- a/drivers/i2c/busses/i2c-designware-amdpsp.c +++ b/drivers/i2c/busses/i2c-designware-amdpsp.c @@ -155,7 +155,7 @@ static void psp_release_i2c_bus_deferred(struct work_struct *work) /* * If there is any pending transaction, cannot release the bus here. - * psp_release_i2c_bus will take care of this later. + * psp_release_i2c_bus() will take care of this later. */ if (psp_i2c_access_count) goto cleanup; @@ -210,12 +210,12 @@ static void psp_release_i2c_bus(void) { mutex_lock(&psp_i2c_access_mutex); - /* Return early if mailbox was malfunctional */ + /* Return early if mailbox was malfunctioned */ if (psp_i2c_mbox_fail) goto cleanup; /* - * If we are last owner of PSP semaphore, need to release aribtration + * If we are last owner of PSP semaphore, need to release arbitration * via mailbox. */ psp_i2c_access_count--; @@ -235,9 +235,9 @@ static void psp_release_i2c_bus(void) /* * Locking methods are based on the default implementation from - * drivers/i2c/i2c-core-base.c, but with psp acquire and release operations + * drivers/i2c/i2c-core-base.c, but with PSP acquire and release operations * added. With this in place we can ensure that i2c clients on the bus shared - * with psp are able to lock HW access to the bus for arbitrary number of + * with PSP are able to lock HW access to the bus for arbitrary number of * operations - that is e.g. write-wait-read. */ static void i2c_adapter_dw_psp_lock_bus(struct i2c_adapter *adapter, diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index f31d352d98b5..443f946acf36 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -33,7 +33,7 @@ #include "i2c-designware-core.h" -static char *abort_sources[] = { +static const char *const abort_sources[] = { [ABRT_7B_ADDR_NOACK] = "slave address not acknowledged (7bit mode)", [ABRT_10ADDR1_NOACK] = @@ -127,6 +127,8 @@ static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val) * Autodetects needed register access mode and creates the regmap with * corresponding read/write callbacks. This must be called before doing any * other register access. + * + * Return: 0 on success, or negative errno otherwise. */ int i2c_dw_init_regmap(struct dw_i2c_dev *dev) { @@ -174,7 +176,7 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev) /* * Note we'll check the return value of the regmap IO accessors only * at the probe stage. The rest of the code won't do this because - * basically we have MMIO-based regmap so non of the read/write methods + * basically we have MMIO-based regmap, so none of the read/write methods * can fail. */ dev->map = devm_regmap_init(dev->dev, NULL, dev, &map_cfg); @@ -336,7 +338,7 @@ static u32 i2c_dw_acpi_round_bus_speed(struct device *device) acpi_speed = i2c_acpi_find_bus_speed(device); /* - * Some DSTDs use a non standard speed, round down to the lowest + * Some DSDTs use a non standard speed, round down to the lowest * standard speed. */ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { @@ -407,47 +409,26 @@ static u32 i2c_dw_read_scl_reg(struct dw_i2c_dev *dev, u32 reg) } u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, - u32 tSYMBOL, u32 tf, int cond, int offset) + u32 tSYMBOL, u32 tf, int offset) { if (!ic_clk) return i2c_dw_read_scl_reg(dev, reg); /* - * DesignWare I2C core doesn't seem to have solid strategy to meet - * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec - * will result in violation of the tHD;STA spec. + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) + * + * This is just experimental rule; the tHD;STA period turned + * out to be proportinal to (_HCNT + 3). With this setting, + * we could meet both tHIGH and tHD;STA timing specs. + * + * If unsure, you'd better to take this alternative. + * + * The reason why we need to take into account "tf" here, + * is the same as described in i2c_dw_scl_lcnt(). */ - if (cond) - /* - * Conditional expression: - * - * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH - * - * This is based on the DW manuals, and represents an ideal - * configuration. The resulting I2C bus speed will be - * faster than any of the others. - * - * If your hardware is free from tHD;STA issue, try this one. - */ - return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) - - 8 + offset; - else - /* - * Conditional expression: - * - * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) - * - * This is just experimental rule; the tHD;STA period turned - * out to be proportinal to (_HCNT + 3). With this setting, - * we could meet both tHIGH and tHD;STA timing specs. - * - * If unsure, you'd better to take this alternative. - * - * The reason why we need to take into account "tf" here, - * is the same as described in i2c_dw_scl_lcnt(). - */ - return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) - - 3 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset; } u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, @@ -467,8 +448,7 @@ u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, * account the fall time of SCL signal (tf). Default tf value * should be 0.3 us, for safety. */ - return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) - - 1 + offset; + return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) - 1 + offset; } int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev) @@ -524,7 +504,7 @@ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev) void __i2c_dw_disable(struct dw_i2c_dev *dev) { struct i2c_timings *t = &dev->timings; - unsigned int raw_intr_stats; + unsigned int raw_intr_stats, ic_stats; unsigned int enable; int timeout = 100; bool abort_needed; @@ -532,9 +512,11 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev) int ret; regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats); + regmap_read(dev->map, DW_IC_STATUS, &ic_stats); regmap_read(dev->map, DW_IC_ENABLE, &enable); - abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD; + abort_needed = (raw_intr_stats & DW_IC_INTR_MST_ON_HOLD) || + (ic_stats & DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY); if (abort_needed) { if (!(enable & DW_IC_ENABLE_ENABLE)) { regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE); @@ -569,7 +551,7 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev) /* * Wait 10 times the signaling period of the highest I2C - * transfer supported by the driver (for 400KHz this is + * transfer supported by the driver (for 400kHz this is * 25us) as described in the DesignWare I2C databook. */ usleep_range(25, 250); @@ -676,10 +658,10 @@ int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) if (abort_source & DW_IC_TX_ARB_LOST) return -EAGAIN; - else if (abort_source & DW_IC_TX_ABRT_GCALL_READ) + if (abort_source & DW_IC_TX_ABRT_GCALL_READ) return -EINVAL; /* wrong msgs[] data */ - else - return -EIO; + + return -EIO; } int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 8e8854ec9882..520f9699e3bd 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -116,6 +116,7 @@ #define DW_IC_STATUS_RFNE BIT(3) #define DW_IC_STATUS_MASTER_ACTIVITY BIT(5) #define DW_IC_STATUS_SLAVE_ACTIVITY BIT(6) +#define DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY BIT(7) #define DW_IC_SDA_HOLD_RX_SHIFT 16 #define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, 16) @@ -142,10 +143,10 @@ #define DW_IC_SLAVE 1 /* - * Hardware abort codes from the DW_IC_TX_ABRT_SOURCE register + * Hardware abort codes from the DW_IC_TX_ABRT_SOURCE register. * - * Only expected abort codes are listed here - * refer to the datasheet for the full list + * Only expected abort codes are listed here, + * refer to the datasheet for the full list. */ #define ABRT_7B_ADDR_NOACK 0 #define ABRT_10ADDR1_NOACK 1 @@ -200,7 +201,7 @@ struct reset_control; * @rst: optional reset for the controller * @slave: represent an I2C slave device * @get_clk_rate_khz: callback to retrieve IP specific bus speed - * @cmd_err: run time hadware error code + * @cmd_err: run time hardware error code * @msgs: points to an array of messages currently being transferred * @msgs_num: the number of elements in msgs * @msg_write_idx: the element index of the current tx message in the msgs array @@ -236,7 +237,7 @@ struct reset_control; * @release_lock: function to release a hardware lock on the bus * @semaphore_idx: Index of table with semaphore type attached to the bus. It's * -1 if there is no semaphore. - * @shared_with_punit: true if this bus is shared with the SoCs PUNIT + * @shared_with_punit: true if this bus is shared with the SoC's PUNIT * @init: function to initialize the I2C hardware * @set_sda_hold_time: callback to retrieve IP specific SDA hold timing * @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE @@ -328,7 +329,7 @@ struct i2c_dw_semaphore_callbacks { int i2c_dw_init_regmap(struct dw_i2c_dev *dev); u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, - u32 tSYMBOL, u32 tf, int cond, int offset); + u32 tSYMBOL, u32 tf, int offset); u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk, u32 tLOW, u32 tf, int offset); int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e8ac9a7bf0b3..528b969253a7 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -71,7 +71,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) ic_clk, 4000, /* tHD;STA = tHIGH = 4.0 us */ sda_falling_time, - 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ dev->ss_lcnt = i2c_dw_scl_lcnt(dev, @@ -105,7 +104,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) ic_clk, 260, /* tHIGH = 260 ns */ sda_falling_time, - 0, /* DW default */ 0); /* No offset */ dev->fs_lcnt = i2c_dw_scl_lcnt(dev, @@ -129,7 +127,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) ic_clk, 600, /* tHD;STA = tHIGH = 0.6 us */ sda_falling_time, - 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ dev->fs_lcnt = i2c_dw_scl_lcnt(dev, @@ -161,7 +158,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) ic_clk, 160, /* tHIGH = 160 ns */ sda_falling_time, - 0, /* DW default */ 0); /* No offset */ dev->hs_lcnt = i2c_dw_scl_lcnt(dev, @@ -184,12 +180,14 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) } /** - * i2c_dw_init_master() - Initialize the designware I2C master hardware + * i2c_dw_init_master() - Initialize the DesignWare I2C master hardware * @dev: device private data * * This functions configures and enables the I2C master. * This function is called during I2C init function, and in case of timeout at * run time. + * + * Return: 0 on success, or negative errno otherwise. */ static int i2c_dw_init_master(struct dw_i2c_dev *dev) { @@ -357,7 +355,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, /* * Initiate the i2c read/write transaction of buffer length, * and poll for bus busy status. For the last message transfer, - * update the command with stopbit enable. + * update the command with stop bit enable. */ for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) { if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1) @@ -402,7 +400,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, /* * Initiate (and continue) low level master read/write transaction. - * This function is only called from i2c_dw_isr, and pumping i2c_msg + * This function is only called from i2c_dw_isr(), and pumping i2c_msg * messages into the tx buffer. Even if the size of i2c_msg data is * longer than the size of the tx buffer, it handles everything. */ @@ -440,7 +438,8 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) buf = msgs[dev->msg_write_idx].buf; buf_len = msgs[dev->msg_write_idx].len; - /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and + /* + * If both IC_EMPTYFIFO_HOLD_MASTER_EN and * IC_RESTART_EN are set, we must manually * set restart bit between messages. */ @@ -971,7 +970,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) rinfo->unprepare_recovery = i2c_dw_unprepare_recovery; adap->bus_recovery_info = rinfo; - dev_info(dev->dev, "running with gpio recovery mode! scl%s", + dev_info(dev->dev, "running with GPIO recovery mode! scl%s", rinfo->sda_gpiod ? ",sda" : ""); return 0; diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 7b2c5d71a7fc..38265c3dc454 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -51,7 +51,7 @@ struct dw_scl_sda_cfg { u16 fs_hcnt; u16 ss_lcnt; u16 fs_lcnt; - u32 sda_hold; + u32 sda_hold_time; }; struct dw_pci_controller { @@ -76,7 +76,7 @@ static struct dw_scl_sda_cfg byt_config = { .fs_hcnt = 0x55, .ss_lcnt = 0x200, .fs_lcnt = 0x99, - .sda_hold = 0x6, + .sda_hold_time = 0x6, }; /* Haswell HCNT/LCNT/SDA hold time */ @@ -85,14 +85,14 @@ static struct dw_scl_sda_cfg hsw_config = { .fs_hcnt = 0x48, .ss_lcnt = 0x01fb, .fs_lcnt = 0xa0, - .sda_hold = 0x9, + .sda_hold_time = 0x9, }; /* NAVI-AMD HCNT/LCNT/SDA hold time */ static struct dw_scl_sda_cfg navi_amd_config = { .ss_hcnt = 0x1ae, .ss_lcnt = 0x23a, - .sda_hold = 0x9, + .sda_hold_time = 0x9, }; static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev) @@ -207,6 +207,7 @@ static const struct software_node dgpu_node = { static int i2c_dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct device *device = &pdev->dev; struct dw_i2c_dev *dev; struct i2c_adapter *adap; int r; @@ -214,25 +215,22 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, struct dw_scl_sda_cfg *cfg; if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) - return dev_err_probe(&pdev->dev, -EINVAL, - "Invalid driver data %ld\n", + return dev_err_probe(device, -EINVAL, "Invalid driver data %ld\n", id->driver_data); controller = &dw_pci_controllers[id->driver_data]; r = pcim_enable_device(pdev); if (r) - return dev_err_probe(&pdev->dev, r, - "Failed to enable I2C PCI device\n"); + return dev_err_probe(device, r, "Failed to enable I2C PCI device\n"); pci_set_master(pdev); r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); if (r) - return dev_err_probe(&pdev->dev, r, - "I/O memory remapping failed\n"); + return dev_err_probe(device, r, "I/O memory remapping failed\n"); - dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; @@ -242,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, dev->get_clk_rate_khz = controller->get_clk_rate_khz; dev->base = pcim_iomap_table(pdev)[0]; - dev->dev = &pdev->dev; + dev->dev = device; dev->irq = pci_irq_vector(pdev, 0); dev->flags |= controller->flags; @@ -266,7 +264,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, dev->fs_hcnt = cfg->fs_hcnt; dev->ss_lcnt = cfg->ss_lcnt; dev->fs_lcnt = cfg->fs_lcnt; - dev->sda_hold_time = cfg->sda_hold; + dev->sda_hold_time = cfg->sda_hold_time; } adap = &dev->adapter; @@ -281,14 +279,14 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) { dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node); if (IS_ERR(dev->slave)) - return dev_err_probe(dev->dev, PTR_ERR(dev->slave), + return dev_err_probe(device, PTR_ERR(dev->slave), "register UCSI failed\n"); } - pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); - pm_runtime_allow(&pdev->dev); + pm_runtime_set_autosuspend_delay(device, 1000); + pm_runtime_use_autosuspend(device); + pm_runtime_put_autosuspend(device); + pm_runtime_allow(device); return 0; } @@ -296,11 +294,12 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, static void i2c_dw_pci_remove(struct pci_dev *pdev) { struct dw_i2c_dev *dev = pci_get_drvdata(pdev); + struct device *device = &pdev->dev; i2c_dw_disable(dev); - pm_runtime_forbid(&pdev->dev); - pm_runtime_get_noresume(&pdev->dev); + pm_runtime_forbid(device); + pm_runtime_get_noresume(device); i2c_del_adapter(&dev->adapter); } diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2d0c7348e491..1643cad6489c 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -72,7 +72,7 @@ static int bt1_i2c_write(void *context, unsigned int reg, unsigned int val) return ret; return regmap_write(dev->sysmap, BT1_I2C_CTL, - BT1_I2C_CTL_GO | BT1_I2C_CTL_WR | (reg & BT1_I2C_CTL_ADDR_MASK)); + BT1_I2C_CTL_GO | BT1_I2C_CTL_WR | (reg & BT1_I2C_CTL_ADDR_MASK)); } static const struct regmap_config bt1_i2c_cfg = { @@ -205,6 +205,7 @@ static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) static int dw_i2c_plat_probe(struct platform_device *pdev) { + struct device *device = &pdev->dev; struct i2c_adapter *adap; struct dw_i2c_dev *dev; int irq, ret; @@ -213,15 +214,15 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (irq < 0) return irq; - dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL); + dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; - dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); - if (device_property_present(&pdev->dev, "wx,i2c-snps-model")) + dev->flags = (uintptr_t)device_get_match_data(device); + if (device_property_present(device, "wx,i2c-snps-model")) dev->flags = MODEL_WANGXUN_SP | ACCESS_POLLING; - dev->dev = &pdev->dev; + dev->dev = device; dev->irq = irq; platform_set_drvdata(pdev, dev); @@ -229,7 +230,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (ret) return ret; - dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + dev->rst = devm_reset_control_get_optional_exclusive(device, NULL); if (IS_ERR(dev->rst)) return PTR_ERR(dev->rst); @@ -246,13 +247,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) i2c_dw_configure(dev); /* Optional interface clock */ - dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk"); + dev->pclk = devm_clk_get_optional(device, "pclk"); if (IS_ERR(dev->pclk)) { ret = PTR_ERR(dev->pclk); goto exit_reset; } - dev->clk = devm_clk_get_optional(&pdev->dev, NULL); + dev->clk = devm_clk_get_optional(device, NULL); if (IS_ERR(dev->clk)) { ret = PTR_ERR(dev->clk); goto exit_reset; @@ -277,31 +278,27 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) adap = &dev->adapter; adap->owner = THIS_MODULE; adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ? - I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED; + I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED; adap->nr = -1; - if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { - dev_pm_set_driver_flags(&pdev->dev, - DPM_FLAG_SMART_PREPARE); - } else { - dev_pm_set_driver_flags(&pdev->dev, - DPM_FLAG_SMART_PREPARE | - DPM_FLAG_SMART_SUSPEND); - } + if (dev->flags & ACCESS_NO_IRQ_SUSPEND) + dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE); + else + dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND); - device_enable_async_suspend(&pdev->dev); + device_enable_async_suspend(device); /* The code below assumes runtime PM to be disabled. */ - WARN_ON(pm_runtime_enabled(&pdev->dev)); + WARN_ON(pm_runtime_enabled(device)); - pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(device, 1000); + pm_runtime_use_autosuspend(device); + pm_runtime_set_active(device); if (dev->shared_with_punit) - pm_runtime_get_noresume(&pdev->dev); + pm_runtime_get_noresume(device); - pm_runtime_enable(&pdev->dev); + pm_runtime_enable(device); ret = i2c_dw_probe(dev); if (ret) @@ -319,15 +316,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) static void dw_i2c_plat_remove(struct platform_device *pdev) { struct dw_i2c_dev *dev = platform_get_drvdata(pdev); + struct device *device = &pdev->dev; - pm_runtime_get_sync(&pdev->dev); + pm_runtime_get_sync(device); i2c_del_adapter(&dev->adapter); i2c_dw_disable(dev); - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_sync(&pdev->dev); + pm_runtime_dont_use_autosuspend(device); + pm_runtime_put_sync(device); dw_i2c_plat_pm_cleanup(dev); i2c_dw_remove_lock_support(dev); @@ -354,6 +352,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "HISI02A1", 0 }, { "HISI02A2", 0 }, { "HISI02A3", 0 }, + { "HJMC3001", 0 }, { "HYGO0010", ACCESS_INTR_MASK }, { "INT33C2", 0 }, { "INT33C3", 0 }, @@ -372,7 +371,7 @@ MODULE_DEVICE_TABLE(platform, dw_i2c_platform_ids); static struct platform_driver dw_i2c_driver = { .probe = dw_i2c_plat_probe, - .remove_new = dw_i2c_plat_remove, + .remove = dw_i2c_plat_remove, .driver = { .name = "i2c_designware", .of_match_table = dw_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 7035296aa24c..fad568e3523b 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -32,12 +32,14 @@ static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev) } /** - * i2c_dw_init_slave() - Initialize the designware i2c slave hardware + * i2c_dw_init_slave() - Initialize the DesignWare i2c slave hardware * @dev: device private data * * This function configures and enables the I2C in slave mode. * This function is called during I2C init function, and in case of timeout at * run time. + * + * Return: 0 on success, or negative errno otherwise. */ static int i2c_dw_init_slave(struct dw_i2c_dev *dev) { @@ -264,7 +266,7 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev) ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr_slave, IRQF_SHARED, dev_name(dev->dev), dev); if (ret) { - dev_err(dev->dev, "failure requesting irq %i: %d\n", + dev_err(dev->dev, "failure requesting IRQ %i: %d\n", dev->irq, ret); return ret; } diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c index 3dc5a46698fc..38d7f31aee79 100644 --- a/drivers/i2c/busses/i2c-digicolor.c +++ b/drivers/i2c/busses/i2c-digicolor.c @@ -363,7 +363,7 @@ MODULE_DEVICE_TABLE(of, dc_i2c_match); static struct platform_driver dc_i2c_driver = { .probe = dc_i2c_probe, - .remove_new = dc_i2c_remove, + .remove = dc_i2c_remove, .driver = { .name = "digicolor-i2c", .of_match_table = dc_i2c_match, diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c index 11ed055143d3..bde2ef098862 100644 --- a/drivers/i2c/busses/i2c-dln2.c +++ b/drivers/i2c/busses/i2c-dln2.c @@ -245,7 +245,7 @@ static void dln2_i2c_remove(struct platform_device *pdev) static struct platform_driver dln2_i2c_driver = { .driver.name = "dln2-i2c", .probe = dln2_i2c_probe, - .remove_new = dln2_i2c_remove, + .remove = dln2_i2c_remove, }; module_platform_driver(dln2_i2c_driver); diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index d08be3f3cede..2512cef8e2a2 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -425,7 +425,7 @@ static const struct of_device_id em_i2c_ids[] = { static struct platform_driver em_i2c_driver = { .probe = em_i2c_probe, - .remove_new = em_i2c_remove, + .remove = em_i2c_remove, .driver = { .name = "em-i2c", .of_match_table = em_i2c_ids, diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index d8baca9b610c..e330015087ab 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -1009,7 +1009,7 @@ static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = { static struct platform_driver exynos5_i2c_driver = { .probe = exynos5_i2c_probe, - .remove_new = exynos5_i2c_remove, + .remove = exynos5_i2c_remove, .driver = { .name = "exynos5-hsi2c", .pm = pm_sleep_ptr(&exynos5_i2c_dev_pm_ops), diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index e0bd218e2f14..f4355b17bfbf 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -481,7 +481,7 @@ static struct platform_driver i2c_gpio_driver = { .acpi_match_table = i2c_gpio_acpi_match, }, .probe = i2c_gpio_probe, - .remove_new = i2c_gpio_remove, + .remove = i2c_gpio_remove, }; static int __init i2c_gpio_init(void) diff --git a/drivers/i2c/busses/i2c-gxp.c b/drivers/i2c/busses/i2c-gxp.c index efafc0528c44..0fc39caa6c87 100644 --- a/drivers/i2c/busses/i2c-gxp.c +++ b/drivers/i2c/busses/i2c-gxp.c @@ -595,7 +595,7 @@ MODULE_DEVICE_TABLE(of, gxp_i2c_of_match); static struct platform_driver gxp_i2c_driver = { .probe = gxp_i2c_probe, - .remove_new = gxp_i2c_remove, + .remove = gxp_i2c_remove, .driver = { .name = "gxp-i2c", .of_match_table = gxp_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c index ec1ebacb9aa8..78c5845e0877 100644 --- a/drivers/i2c/busses/i2c-highlander.c +++ b/drivers/i2c/busses/i2c-highlander.c @@ -454,7 +454,7 @@ static struct platform_driver highlander_i2c_driver = { }, .probe = highlander_i2c_probe, - .remove_new = highlander_i2c_remove, + .remove = highlander_i2c_remove, }; module_platform_driver(highlander_i2c_driver); diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index 64cade6ba923..370f32974763 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -508,7 +508,7 @@ MODULE_DEVICE_TABLE(of, hix5hd2_i2c_match); static struct platform_driver hix5hd2_i2c_driver = { .probe = hix5hd2_i2c_probe, - .remove_new = hix5hd2_i2c_remove, + .remove = hix5hd2_i2c_remove, .driver = { .name = "hix5hd2-i2c", .pm = pm_ptr(&hix5hd2_i2c_pm_ops), diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 299fe9d3afab..75dab01d43a7 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -81,6 +81,8 @@ * Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes * Birch Stream (SOC) 0x5796 32 hard yes yes yes * Arrow Lake-H (SOC) 0x7722 32 hard yes yes yes + * Panther Lake-H (SOC) 0xe322 32 hard yes yes yes + * Panther Lake-P (SOC) 0xe422 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -261,6 +263,8 @@ #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 #define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3 #define PCI_DEVICE_ID_INTEL_METEOR_LAKE_SOC_S_SMBUS 0xae22 +#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_H_SMBUS 0xe322 +#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_P_SMBUS 0xe422 struct i801_mux_config { char *gpio_chip; @@ -1055,6 +1059,8 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, + { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 82dedb1bb5be..c76c4116ddc7 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -790,7 +790,7 @@ static struct platform_driver ibm_iic_driver = { .of_match_table = ibm_iic_match, }, .probe = iic_probe, - .remove_new = iic_remove, + .remove = iic_remove, }; module_platform_driver(ibm_iic_driver); diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index e0e87185f6bb..02f75cf310aa 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c @@ -1497,7 +1497,7 @@ static struct platform_driver img_scb_i2c_driver = { .pm = pm_ptr(&img_i2c_pm), }, .probe = img_i2c_probe, - .remove_new = img_i2c_remove, + .remove = img_i2c_remove, }; module_platform_driver(img_scb_i2c_driver); diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 976d43f73f38..8adf2963d764 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -703,7 +703,7 @@ static const struct dev_pm_ops lpi2c_pm_ops = { static struct platform_driver lpi2c_imx_driver = { .probe = lpi2c_imx_probe, - .remove_new = lpi2c_imx_remove, + .remove = lpi2c_imx_remove, .driver = { .name = DRIVER_NAME, .of_match_table = lpi2c_imx_of_match, diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 98539313cbc9..f751d231ded8 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -17,7 +17,7 @@ * Copyright (C) 2008 Darius Augulis * * Copyright 2013 Freescale Semiconductor, Inc. - * Copyright 2020 NXP + * Copyright 2020, 2024 NXP * */ @@ -84,6 +84,7 @@ #define IMX_I2C_REGSHIFT 2 #define VF610_I2C_REGSHIFT 0 +#define S32G_I2C_REGSHIFT 0 /* Bits of IMX I2C registers */ #define I2SR_RXAK 0x01 @@ -165,9 +166,34 @@ static struct imx_i2c_clk_pair vf610_i2c_clk_div[] = { { 3840, 0x3F }, { 4096, 0x7B }, { 5120, 0x7D }, { 6144, 0x7E }, }; +/* S32G2/S32G3 clock divider, register value pairs */ +static struct imx_i2c_clk_pair s32g2_i2c_clk_div[] = { + { 34, 0x00 }, { 36, 0x01 }, { 38, 0x02 }, { 40, 0x03 }, + { 42, 0x04 }, { 44, 0x05 }, { 46, 0x06 }, { 48, 0x09 }, + { 52, 0x0A }, { 54, 0x07 }, { 56, 0x0B }, { 60, 0x0C }, + { 64, 0x0D }, { 68, 0x40 }, { 72, 0x0E }, { 76, 0x42 }, + { 80, 0x12 }, { 84, 0x0F }, { 88, 0x13 }, { 96, 0x14 }, + { 104, 0x15 }, { 108, 0x47 }, { 112, 0x19 }, { 120, 0x16 }, + { 128, 0x1A }, { 136, 0x80 }, { 144, 0x17 }, { 152, 0x82 }, + { 160, 0x1C }, { 168, 0x84 }, { 176, 0x1D }, { 192, 0x21 }, + { 208, 0x1E }, { 216, 0x87 }, { 224, 0x22 }, { 240, 0x56 }, + { 256, 0x1F }, { 288, 0x24 }, { 320, 0x25 }, { 336, 0x8F }, + { 352, 0x93 }, { 356, 0x5D }, { 358, 0x98 }, { 384, 0x26 }, + { 416, 0x56 }, { 448, 0x2A }, { 480, 0x27 }, { 512, 0x2B }, + { 576, 0x2C }, { 640, 0x2D }, { 704, 0x9D }, { 768, 0x2E }, + { 832, 0x9D }, { 896, 0x32 }, { 960, 0x2F }, { 1024, 0x33 }, + { 1152, 0x34 }, { 1280, 0x35 }, { 1536, 0x36 }, { 1792, 0x3A }, + { 1920, 0x37 }, { 2048, 0x3B }, { 2304, 0x74 }, { 2560, 0x3D }, + { 3072, 0x3E }, { 3584, 0x7A }, { 3840, 0x3F }, { 4096, 0x7B }, + { 4608, 0x7C }, { 5120, 0x7D }, { 6144, 0x7E }, { 7168, 0xBA }, + { 7680, 0x7F }, { 8192, 0xBB }, { 9216, 0xBC }, { 10240, 0xBD }, + { 12288, 0xBE }, { 15360, 0xBF }, +}; + enum imx_i2c_type { IMX1_I2C, IMX21_I2C, + S32G_I2C, VF610_I2C, }; @@ -197,6 +223,17 @@ struct imx_i2c_dma { enum dma_data_direction dma_data_dir; }; +enum imx_i2c_state { + IMX_I2C_STATE_DONE, + IMX_I2C_STATE_FAILED, + IMX_I2C_STATE_WRITE, + IMX_I2C_STATE_DMA, + IMX_I2C_STATE_READ, + IMX_I2C_STATE_READ_CONTINUE, + IMX_I2C_STATE_READ_BLOCK_DATA, + IMX_I2C_STATE_READ_BLOCK_DATA_LEN, +}; + struct imx_i2c_struct { struct i2c_adapter adapter; struct clk *clk; @@ -216,6 +253,14 @@ struct imx_i2c_struct { struct i2c_client *slave; enum i2c_slave_event last_slave_event; + struct i2c_msg *msg; + unsigned int msg_buf_idx; + int isr_result; + bool is_lastmsg; + enum imx_i2c_state state; + + bool multi_master; + /* For checking slave events. */ spinlock_t slave_lock; struct hrtimer slave_timer; @@ -258,7 +303,15 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = { .ndivs = ARRAY_SIZE(vf610_i2c_clk_div), .i2sr_clr_opcode = I2SR_CLR_OPCODE_W1C, .i2cr_ien_opcode = I2CR_IEN_OPCODE_0, +}; +static const struct imx_i2c_hwdata s32g2_i2c_hwdata = { + .devtype = S32G_I2C, + .regshift = S32G_I2C_REGSHIFT, + .clk_div = s32g2_i2c_clk_div, + .ndivs = ARRAY_SIZE(s32g2_i2c_clk_div), + .i2sr_clr_opcode = I2SR_CLR_OPCODE_W1C, + .i2cr_ien_opcode = I2CR_IEN_OPCODE_0, }; static const struct platform_device_id imx_i2c_devtype[] = { @@ -288,6 +341,7 @@ static const struct of_device_id i2c_imx_dt_ids[] = { { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, }, + { .compatible = "nxp,s32g2-i2c", .data = &s32g2_i2c_hwdata, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, i2c_imx_dt_ids); @@ -481,6 +535,9 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool a unsigned long orig_jiffies = jiffies; unsigned int temp; + if (!i2c_imx->multi_master) + return 0; + while (1) { temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); @@ -540,8 +597,8 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic) return -ETIMEDOUT; } - /* check for arbitration lost */ - if (i2c_imx->i2csr & I2SR_IAL) { + /* In multi-master mode check for arbitration lost */ + if (i2c_imx->multi_master && (i2c_imx->i2csr & I2SR_IAL)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__); i2c_imx_clear_irq(i2c_imx, I2SR_IAL); @@ -903,11 +960,156 @@ static int i2c_imx_unreg_slave(struct i2c_client *client) return ret; } +static inline int i2c_imx_isr_acked(struct imx_i2c_struct *i2c_imx) +{ + i2c_imx->isr_result = 0; + + if (imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR) & I2SR_RXAK) { + i2c_imx->state = IMX_I2C_STATE_FAILED; + i2c_imx->isr_result = -ENXIO; + wake_up(&i2c_imx->queue); + } + + return i2c_imx->isr_result; +} + +static inline int i2c_imx_isr_write(struct imx_i2c_struct *i2c_imx) +{ + int result; + + result = i2c_imx_isr_acked(i2c_imx); + if (result) + return result; + + if (i2c_imx->msg->len == i2c_imx->msg_buf_idx) + return 0; + + imx_i2c_write_reg(i2c_imx->msg->buf[i2c_imx->msg_buf_idx++], i2c_imx, IMX_I2C_I2DR); + + return 1; +} + +static inline int i2c_imx_isr_read(struct imx_i2c_struct *i2c_imx) +{ + int result; + unsigned int temp; + + result = i2c_imx_isr_acked(i2c_imx); + if (result) + return result; + + /* setup bus to read data */ + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp &= ~I2CR_MTX; + if (i2c_imx->msg->len - 1) + temp &= ~I2CR_TXAK; + + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */ + + return 0; +} + +static inline void i2c_imx_isr_read_continue(struct imx_i2c_struct *i2c_imx) +{ + unsigned int temp; + + if ((i2c_imx->msg->len - 1) == i2c_imx->msg_buf_idx) { + if (i2c_imx->is_lastmsg) { + /* + * It must generate STOP before read I2DR to prevent + * controller from generating another clock cycle + */ + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + if (!(temp & I2CR_MSTA)) + i2c_imx->stopped = 1; + temp &= ~(I2CR_MSTA | I2CR_MTX); + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + } else { + /* + * For i2c master receiver repeat restart operation like: + * read -> repeat MSTA -> read/write + * The controller must set MTX before read the last byte in + * the first read operation, otherwise the first read cost + * one extra clock cycle. + */ + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp |= I2CR_MTX; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + } + } else if (i2c_imx->msg_buf_idx == (i2c_imx->msg->len - 2)) { + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp |= I2CR_TXAK; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + } + + i2c_imx->msg->buf[i2c_imx->msg_buf_idx++] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); +} + +static inline void i2c_imx_isr_read_block_data_len(struct imx_i2c_struct *i2c_imx) +{ + u8 len = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); + + if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) { + i2c_imx->isr_result = -EPROTO; + i2c_imx->state = IMX_I2C_STATE_FAILED; + wake_up(&i2c_imx->queue); + } + i2c_imx->msg->len += len; +} + static irqreturn_t i2c_imx_master_isr(struct imx_i2c_struct *i2c_imx, unsigned int status) { - /* save status register */ - i2c_imx->i2csr = status; - wake_up(&i2c_imx->queue); + /* + * This state machine handles I2C reception and transmission in non-DMA + * mode. We must process all the data in the ISR to reduce the delay + * between two consecutive messages. If the data is not processed in + * the ISR, SMBus devices may timeout, leading to a bus error. + */ + switch (i2c_imx->state) { + case IMX_I2C_STATE_DMA: + i2c_imx->i2csr = status; + wake_up(&i2c_imx->queue); + break; + + case IMX_I2C_STATE_READ: + if (i2c_imx_isr_read(i2c_imx)) + break; + i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE; + break; + + case IMX_I2C_STATE_READ_CONTINUE: + i2c_imx_isr_read_continue(i2c_imx); + if (i2c_imx->msg_buf_idx == i2c_imx->msg->len) { + i2c_imx->state = IMX_I2C_STATE_DONE; + wake_up(&i2c_imx->queue); + } + break; + + case IMX_I2C_STATE_READ_BLOCK_DATA: + if (i2c_imx_isr_read(i2c_imx)) + break; + i2c_imx->state = IMX_I2C_STATE_READ_BLOCK_DATA_LEN; + break; + + case IMX_I2C_STATE_READ_BLOCK_DATA_LEN: + i2c_imx_isr_read_block_data_len(i2c_imx); + i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE; + break; + + case IMX_I2C_STATE_WRITE: + if (i2c_imx_isr_write(i2c_imx)) + break; + i2c_imx->state = IMX_I2C_STATE_DONE; + wake_up(&i2c_imx->queue); + break; + + default: + i2c_imx->i2csr = status; + i2c_imx->state = IMX_I2C_STATE_FAILED; + i2c_imx->isr_result = -EINVAL; + wake_up(&i2c_imx->queue); + } return IRQ_HANDLED; } @@ -954,6 +1156,8 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, struct imx_i2c_dma *dma = i2c_imx->dma; struct device *dev = &i2c_imx->adapter.dev; + i2c_imx->state = IMX_I2C_STATE_DMA; + dma->chan_using = dma->chan_tx; dma->dma_transfer_dir = DMA_MEM_TO_DEV; dma->dma_data_dir = DMA_TO_DEVICE; @@ -1006,6 +1210,42 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, return i2c_imx_acked(i2c_imx); } +static int i2c_imx_prepare_read(struct imx_i2c_struct *i2c_imx, + struct i2c_msg *msgs, bool use_dma) +{ + int result; + unsigned int temp = 0; + + /* write slave address */ + imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); + result = i2c_imx_trx_complete(i2c_imx, !use_dma); + if (result) + return result; + result = i2c_imx_acked(i2c_imx); + if (result) + return result; + + dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__); + + /* setup bus to read data */ + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp &= ~I2CR_MTX; + + /* + * Reset the I2CR_TXAK flag initially for SMBus block read since the + * length is unknown + */ + if (msgs->len - 1) + temp &= ~I2CR_TXAK; + if (use_dma) + temp |= I2CR_DMAEN; + + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */ + + return 0; +} + static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bool is_lastmsg) { @@ -1016,6 +1256,13 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, struct imx_i2c_dma *dma = i2c_imx->dma; struct device *dev = &i2c_imx->adapter.dev; + i2c_imx->state = IMX_I2C_STATE_DMA; + + result = i2c_imx_prepare_read(i2c_imx, msgs, true); + if (result) + return result; + + dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); dma->chan_using = dma->chan_rx; dma->dma_transfer_dir = DMA_DEV_TO_MEM; @@ -1092,8 +1339,8 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, return 0; } -static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, - bool atomic) +static int i2c_imx_atomic_write(struct imx_i2c_struct *i2c_imx, + struct i2c_msg *msgs) { int i, result; @@ -1102,7 +1349,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, /* write slave address */ imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); - result = i2c_imx_trx_complete(i2c_imx, atomic); + result = i2c_imx_trx_complete(i2c_imx, true); if (result) return result; result = i2c_imx_acked(i2c_imx); @@ -1116,7 +1363,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, "<%s> write byte: B%d=0x%X\n", __func__, i, msgs->buf[i]); imx_i2c_write_reg(msgs->buf[i], i2c_imx, IMX_I2C_I2DR); - result = i2c_imx_trx_complete(i2c_imx, atomic); + result = i2c_imx_trx_complete(i2c_imx, true); if (result) return result; result = i2c_imx_acked(i2c_imx); @@ -1126,55 +1373,54 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, return 0; } -static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, - bool is_lastmsg, bool atomic) +static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) +{ + dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", + __func__, i2c_8bit_addr_from_msg(msgs)); + + i2c_imx->state = IMX_I2C_STATE_WRITE; + i2c_imx->msg = msgs; + i2c_imx->msg_buf_idx = 0; + + /* + * By writing the device address we start the state machine in the ISR. + * The ISR will report when it is done or when it fails. + */ + imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); + wait_event_timeout(i2c_imx->queue, + i2c_imx->state == IMX_I2C_STATE_DONE || + i2c_imx->state == IMX_I2C_STATE_FAILED, + (msgs->len + 1) * HZ / 10); + if (i2c_imx->state == IMX_I2C_STATE_FAILED) { + dev_dbg(&i2c_imx->adapter.dev, "<%s> write failed with %d\n", + __func__, i2c_imx->isr_result); + return i2c_imx->isr_result; + } + if (i2c_imx->state != IMX_I2C_STATE_DONE) { + dev_err(&i2c_imx->adapter.dev, "<%s> write timedout\n", __func__); + return -ETIMEDOUT; + } + return 0; +} + +static int i2c_imx_atomic_read(struct imx_i2c_struct *i2c_imx, + struct i2c_msg *msgs, bool is_lastmsg) { int i, result; unsigned int temp; int block_data = msgs->flags & I2C_M_RECV_LEN; - int use_dma = i2c_imx->dma && msgs->flags & I2C_M_DMA_SAFE && - msgs->len >= DMA_THRESHOLD && !block_data; - dev_dbg(&i2c_imx->adapter.dev, - "<%s> write slave address: addr=0x%x\n", - __func__, i2c_8bit_addr_from_msg(msgs)); - - /* write slave address */ - imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); - result = i2c_imx_trx_complete(i2c_imx, atomic); + result = i2c_imx_prepare_read(i2c_imx, msgs, false); if (result) return result; - result = i2c_imx_acked(i2c_imx); - if (result) - return result; - - dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__); - - /* setup bus to read data */ - temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); - temp &= ~I2CR_MTX; - - /* - * Reset the I2CR_TXAK flag initially for SMBus block read since the - * length is unknown - */ - if ((msgs->len - 1) || block_data) - temp &= ~I2CR_TXAK; - if (use_dma) - temp |= I2CR_DMAEN; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */ dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); - if (use_dma) - return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg); - /* read data */ for (i = 0; i < msgs->len; i++) { u8 len = 0; - result = i2c_imx_trx_complete(i2c_imx, atomic); + result = i2c_imx_trx_complete(i2c_imx, true); if (result) return result; /* @@ -1205,7 +1451,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, temp &= ~(I2CR_MSTA | I2CR_MTX); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); if (!i2c_imx->stopped) - i2c_imx_bus_busy(i2c_imx, 0, atomic); + i2c_imx_bus_busy(i2c_imx, 0, true); } else { /* * For i2c master receiver repeat restart operation like: @@ -1236,6 +1482,48 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, return 0; } +static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, + bool is_lastmsg) +{ + int block_data = msgs->flags & I2C_M_RECV_LEN; + + dev_dbg(&i2c_imx->adapter.dev, + "<%s> write slave address: addr=0x%x\n", + __func__, i2c_8bit_addr_from_msg(msgs)); + + i2c_imx->is_lastmsg = is_lastmsg; + + if (block_data) + i2c_imx->state = IMX_I2C_STATE_READ_BLOCK_DATA; + else + i2c_imx->state = IMX_I2C_STATE_READ; + i2c_imx->msg = msgs; + i2c_imx->msg_buf_idx = 0; + + /* + * By writing the device address we start the state machine in the ISR. + * The ISR will report when it is done or when it fails. + */ + imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); + wait_event_timeout(i2c_imx->queue, + i2c_imx->state == IMX_I2C_STATE_DONE || + i2c_imx->state == IMX_I2C_STATE_FAILED, + (msgs->len + 1) * HZ / 10); + if (i2c_imx->state == IMX_I2C_STATE_FAILED) { + dev_dbg(&i2c_imx->adapter.dev, "<%s> read failed with %d\n", + __func__, i2c_imx->isr_result); + return i2c_imx->isr_result; + } + if (i2c_imx->state != IMX_I2C_STATE_DONE) { + dev_err(&i2c_imx->adapter.dev, "<%s> read timedout\n", __func__); + return -ETIMEDOUT; + } + if (!i2c_imx->stopped) + return i2c_imx_bus_busy(i2c_imx, 0, false); + + return 0; +} + static int i2c_imx_xfer_common(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, bool atomic) { @@ -1243,6 +1531,7 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter, int result; bool is_lastmsg = false; struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter); + int use_dma = 0; /* Start I2C transfer */ result = i2c_imx_start(i2c_imx, atomic); @@ -1295,15 +1584,25 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter, (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0), (temp & I2SR_RXAK ? 1 : 0)); #endif + + use_dma = i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD && + msgs[i].flags & I2C_M_DMA_SAFE; if (msgs[i].flags & I2C_M_RD) { - result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic); + int block_data = msgs->flags & I2C_M_RECV_LEN; + + if (atomic) + result = i2c_imx_atomic_read(i2c_imx, &msgs[i], is_lastmsg); + else if (use_dma && !block_data) + result = i2c_imx_dma_read(i2c_imx, &msgs[i], is_lastmsg); + else + result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg); } else { - if (!atomic && - i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD && - msgs[i].flags & I2C_M_DMA_SAFE) + if (atomic) + result = i2c_imx_atomic_write(i2c_imx, &msgs[i]); + else if (use_dma) result = i2c_imx_dma_write(i2c_imx, &msgs[i]); else - result = i2c_imx_write(i2c_imx, &msgs[i], atomic); + result = i2c_imx_write(i2c_imx, &msgs[i]); } if (result) goto fail0; @@ -1468,6 +1767,12 @@ static int i2c_imx_probe(struct platform_device *pdev) goto rpm_disable; } + /* + * We use the single-master property for backward compatibility. + * By default multi master mode is enabled. + */ + i2c_imx->multi_master = !of_property_read_bool(pdev->dev.of_node, "single-master"); + /* Set up clock divider */ i2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ; ret = of_property_read_u32(pdev->dev.of_node, @@ -1576,7 +1881,7 @@ static const struct dev_pm_ops i2c_imx_pm_ops = { static struct platform_driver i2c_imx_driver = { .probe = i2c_imx_probe, - .remove_new = i2c_imx_remove, + .remove = i2c_imx_remove, .driver = { .name = DRIVER_NAME, .pm = pm_ptr(&i2c_imx_pm_ops), diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index 859c14e340e7..ce5ca5b90b39 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -524,7 +524,7 @@ MODULE_DEVICE_TABLE(of, i2c_iop3xx_match); static struct platform_driver iop3xx_i2c_driver = { .probe = iop3xx_i2c_probe, - .remove_new = iop3xx_i2c_remove, + .remove = iop3xx_i2c_remove, .driver = { .name = "IOP3xx-I2C", .of_match_table = i2c_iop3xx_match, diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index f59158489ad9..2b3b65ef2900 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c @@ -1,41 +1,39 @@ // SPDX-License-Identifier: GPL-2.0-only /* - i2c-isch.c - Linux kernel driver for Intel SCH chipset SMBus - - Based on i2c-piix4.c - Copyright (c) 1998 - 2002 Frodo Looijaard and - Philip Edelbrock - - Intel SCH support - Copyright (c) 2007 - 2008 Jacob Jun Pan + * Linux kernel driver for Intel SCH chipset SMBus + * - Based on i2c-piix4.c + * Copyright (c) 1998 - 2002 Frodo Looijaard and + * Philip Edelbrock + * - Intel SCH support + * Copyright (c) 2007 - 2008 Jacob Jun Pan + */ -*/ - -/* - Supports: - Intel SCH chipsets (AF82US15W, AF82US15L, AF82UL11L) - Note: we assume there can only be one device, with one SMBus interface. -*/ +/* Supports: Intel SCH chipsets (AF82US15W, AF82US15L, AF82UL11L) */ +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include +#include #include -#include -#include -#include +#include +#include /* SCH SMBus address offsets */ -#define SMBHSTCNT (0 + sch_smba) -#define SMBHSTSTS (1 + sch_smba) -#define SMBHSTCLK (2 + sch_smba) -#define SMBHSTADD (4 + sch_smba) /* TSA */ -#define SMBHSTCMD (5 + sch_smba) -#define SMBHSTDAT0 (6 + sch_smba) -#define SMBHSTDAT1 (7 + sch_smba) -#define SMBBLKDAT (0x20 + sch_smba) - -/* Other settings */ -#define MAX_RETRIES 5000 +#define SMBHSTCNT 0x00 +#define SMBHSTSTS 0x01 +#define SMBHSTCLK 0x02 +#define SMBHSTADD 0x04 /* TSA */ +#define SMBHSTCMD 0x05 +#define SMBHSTDAT0 0x06 +#define SMBHSTDAT1 0x07 +#define SMBBLKDAT 0x20 /* I2C constants */ #define SCH_QUICK 0x00 @@ -44,109 +42,134 @@ #define SCH_WORD_DATA 0x03 #define SCH_BLOCK_DATA 0x05 -static unsigned short sch_smba; -static struct i2c_adapter sch_adapter; +struct sch_i2c { + struct i2c_adapter adapter; + void __iomem *smba; +}; + static int backbone_speed = 33000; /* backbone speed in kHz */ -module_param(backbone_speed, int, S_IRUSR | S_IWUSR); +module_param(backbone_speed, int, 0600); MODULE_PARM_DESC(backbone_speed, "Backbone speed in kHz, (default = 33000)"); -/* - * Start the i2c transaction -- the i2c_access will prepare the transaction - * and this function will execute it. - * return 0 for success and others for failure. - */ -static int sch_transaction(void) +static inline u8 sch_io_rd8(struct sch_i2c *priv, unsigned int offset) { - int temp; - int result = 0; - int retries = 0; + return ioread8(priv->smba + offset); +} - dev_dbg(&sch_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, " - "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT), - inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0), - inb(SMBHSTDAT1)); +static inline void sch_io_wr8(struct sch_i2c *priv, unsigned int offset, u8 value) +{ + iowrite8(value, priv->smba + offset); +} + +static inline u16 sch_io_rd16(struct sch_i2c *priv, unsigned int offset) +{ + return ioread16(priv->smba + offset); +} + +static inline void sch_io_wr16(struct sch_i2c *priv, unsigned int offset, u16 value) +{ + iowrite16(value, priv->smba + offset); +} + +/** + * sch_transaction - Start the i2c transaction + * @adap: the i2c adapter pointer + * + * The sch_access() will prepare the transaction and + * this function will execute it. + * + * Return: 0 for success and others for failure. + */ +static int sch_transaction(struct i2c_adapter *adap) +{ + struct sch_i2c *priv = container_of(adap, struct sch_i2c, adapter); + int temp; + int rc; + + dev_dbg(&adap->dev, + "Transaction (pre): CNT=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", + sch_io_rd8(priv, SMBHSTCNT), sch_io_rd8(priv, SMBHSTCMD), + sch_io_rd8(priv, SMBHSTADD), + sch_io_rd8(priv, SMBHSTDAT0), sch_io_rd8(priv, SMBHSTDAT1)); /* Make sure the SMBus host is ready to start transmitting */ - temp = inb(SMBHSTSTS) & 0x0f; + temp = sch_io_rd8(priv, SMBHSTSTS) & 0x0f; if (temp) { /* Can not be busy since we checked it in sch_access */ - if (temp & 0x01) { - dev_dbg(&sch_adapter.dev, "Completion (%02x). " - "Clear...\n", temp); - } - if (temp & 0x06) { - dev_dbg(&sch_adapter.dev, "SMBus error (%02x). " - "Resetting...\n", temp); - } - outb(temp, SMBHSTSTS); - temp = inb(SMBHSTSTS) & 0x0f; + if (temp & 0x01) + dev_dbg(&adap->dev, "Completion (%02x). Clear...\n", temp); + if (temp & 0x06) + dev_dbg(&adap->dev, "SMBus error (%02x). Resetting...\n", temp); + sch_io_wr8(priv, SMBHSTSTS, temp); + temp = sch_io_rd8(priv, SMBHSTSTS) & 0x0f; if (temp) { - dev_err(&sch_adapter.dev, - "SMBus is not ready: (%02x)\n", temp); + dev_err(&adap->dev, "SMBus is not ready: (%02x)\n", temp); return -EAGAIN; } } - /* start the transaction by setting bit 4 */ - outb(inb(SMBHSTCNT) | 0x10, SMBHSTCNT); - - do { - usleep_range(100, 200); - temp = inb(SMBHSTSTS) & 0x0f; - } while ((temp & 0x08) && (retries++ < MAX_RETRIES)); + /* Start the transaction by setting bit 4 */ + temp = sch_io_rd8(priv, SMBHSTCNT); + temp |= 0x10; + sch_io_wr8(priv, SMBHSTCNT, temp); + rc = read_poll_timeout(sch_io_rd8, temp, !(temp & 0x08), 200, 500000, true, priv, SMBHSTSTS); /* If the SMBus is still busy, we give up */ - if (retries > MAX_RETRIES) { - dev_err(&sch_adapter.dev, "SMBus Timeout!\n"); - result = -ETIMEDOUT; + if (rc) { + dev_err(&adap->dev, "SMBus Timeout!\n"); } else if (temp & 0x04) { - result = -EIO; - dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be " - "locked until next hard reset. (sorry!)\n"); + rc = -EIO; + dev_dbg(&adap->dev, "Bus collision! SMBus may be locked until next hard reset. (sorry!)\n"); /* Clock stops and target is stuck in mid-transmission */ } else if (temp & 0x02) { - result = -EIO; - dev_err(&sch_adapter.dev, "Error: no response!\n"); + rc = -EIO; + dev_err(&adap->dev, "Error: no response!\n"); } else if (temp & 0x01) { - dev_dbg(&sch_adapter.dev, "Post complete!\n"); - outb(temp, SMBHSTSTS); - temp = inb(SMBHSTSTS) & 0x07; + dev_dbg(&adap->dev, "Post complete!\n"); + sch_io_wr8(priv, SMBHSTSTS, temp & 0x0f); + temp = sch_io_rd8(priv, SMBHSTSTS) & 0x07; if (temp & 0x06) { /* Completion clear failed */ - dev_dbg(&sch_adapter.dev, "Failed reset at end of " - "transaction (%02x), Bus error!\n", temp); + dev_dbg(&adap->dev, + "Failed reset at end of transaction (%02x), Bus error!\n", temp); } } else { - result = -ENXIO; - dev_dbg(&sch_adapter.dev, "No such address.\n"); + rc = -ENXIO; + dev_dbg(&adap->dev, "No such address.\n"); } - dev_dbg(&sch_adapter.dev, "Transaction (post): CNT=%02x, CMD=%02x, " - "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT), - inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0), - inb(SMBHSTDAT1)); - return result; + dev_dbg(&adap->dev, "Transaction (post): CNT=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n", + sch_io_rd8(priv, SMBHSTCNT), sch_io_rd8(priv, SMBHSTCMD), + sch_io_rd8(priv, SMBHSTADD), + sch_io_rd8(priv, SMBHSTDAT0), sch_io_rd8(priv, SMBHSTDAT1)); + return rc; } -/* - * This is the main access entry for i2c-sch access - * adap is i2c_adapter pointer, addr is the i2c device bus address, read_write - * (0 for read and 1 for write), size is i2c transaction type and data is the - * union of transaction for data to be transferred or data read from bus. - * return 0 for success and others for failure. +/** + * sch_access - the main access entry for i2c-sch access + * @adap: the i2c adapter pointer + * @addr: the i2c device bus address + * @flags: I2C_CLIENT_* flags (usually zero or I2C_CLIENT_PEC) + * @read_write: 0 for read and 1 for write + * @command: Byte interpreted by slave, for protocols which use such bytes + * @size: the i2c transaction type + * @data: the union of transaction for data to be transferred or data read from bus + * + * Return: 0 for success and others for failure. */ static s32 sch_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { + struct sch_i2c *priv = container_of(adap, struct sch_i2c, adapter); int i, len, temp, rc; /* Make sure the SMBus host is not busy */ - temp = inb(SMBHSTSTS) & 0x0f; + temp = sch_io_rd8(priv, SMBHSTSTS) & 0x0f; if (temp & 0x08) { - dev_dbg(&sch_adapter.dev, "SMBus busy (%02x)\n", temp); + dev_dbg(&adap->dev, "SMBus busy (%02x)\n", temp); return -EAGAIN; } - temp = inw(SMBHSTCLK); + temp = sch_io_rd16(priv, SMBHSTCLK); if (!temp) { /* * We can't determine if we have 33 or 25 MHz clock for @@ -154,50 +177,48 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr, * 100 kHz. If we actually run at 25 MHz the bus will be * run ~75 kHz instead which should do no harm. */ - dev_notice(&sch_adapter.dev, - "Clock divider uninitialized. Setting defaults\n"); - outw(backbone_speed / (4 * 100), SMBHSTCLK); + dev_notice(&adap->dev, "Clock divider uninitialized. Setting defaults\n"); + sch_io_wr16(priv, SMBHSTCLK, backbone_speed / (4 * 100)); } - dev_dbg(&sch_adapter.dev, "access size: %d %s\n", size, - (read_write)?"READ":"WRITE"); + dev_dbg(&adap->dev, "access size: %d %s\n", size, str_read_write(read_write)); switch (size) { case I2C_SMBUS_QUICK: - outb((addr << 1) | read_write, SMBHSTADD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); size = SCH_QUICK; break; case I2C_SMBUS_BYTE: - outb((addr << 1) | read_write, SMBHSTADD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); if (read_write == I2C_SMBUS_WRITE) - outb(command, SMBHSTCMD); + sch_io_wr8(priv, SMBHSTCMD, command); size = SCH_BYTE; break; case I2C_SMBUS_BYTE_DATA: - outb((addr << 1) | read_write, SMBHSTADD); - outb(command, SMBHSTCMD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); + sch_io_wr8(priv, SMBHSTCMD, command); if (read_write == I2C_SMBUS_WRITE) - outb(data->byte, SMBHSTDAT0); + sch_io_wr8(priv, SMBHSTDAT0, data->byte); size = SCH_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: - outb((addr << 1) | read_write, SMBHSTADD); - outb(command, SMBHSTCMD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); + sch_io_wr8(priv, SMBHSTCMD, command); if (read_write == I2C_SMBUS_WRITE) { - outb(data->word & 0xff, SMBHSTDAT0); - outb((data->word & 0xff00) >> 8, SMBHSTDAT1); + sch_io_wr8(priv, SMBHSTDAT0, data->word >> 0); + sch_io_wr8(priv, SMBHSTDAT1, data->word >> 8); } size = SCH_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: - outb((addr << 1) | read_write, SMBHSTADD); - outb(command, SMBHSTCMD); + sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write); + sch_io_wr8(priv, SMBHSTCMD, command); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; - outb(len, SMBHSTDAT0); + sch_io_wr8(priv, SMBHSTDAT0, len); for (i = 1; i <= len; i++) - outb(data->block[i], SMBBLKDAT+i-1); + sch_io_wr8(priv, SMBBLKDAT + i - 1, data->block[i]); } size = SCH_BLOCK_DATA; break; @@ -205,10 +226,13 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr, dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } - dev_dbg(&sch_adapter.dev, "write size %d to 0x%04x\n", size, SMBHSTCNT); - outb((inb(SMBHSTCNT) & 0xb0) | (size & 0x7), SMBHSTCNT); + dev_dbg(&adap->dev, "write size %d to 0x%04x\n", size, SMBHSTCNT); - rc = sch_transaction(); + temp = sch_io_rd8(priv, SMBHSTCNT); + temp = (temp & 0xb0) | (size & 0x7); + sch_io_wr8(priv, SMBHSTCNT, temp); + + rc = sch_transaction(adap); if (rc) /* Error in transaction */ return rc; @@ -218,17 +242,18 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr, switch (size) { case SCH_BYTE: case SCH_BYTE_DATA: - data->byte = inb(SMBHSTDAT0); + data->byte = sch_io_rd8(priv, SMBHSTDAT0); break; case SCH_WORD_DATA: - data->word = inb(SMBHSTDAT0) + (inb(SMBHSTDAT1) << 8); + data->word = (sch_io_rd8(priv, SMBHSTDAT0) << 0) + + (sch_io_rd8(priv, SMBHSTDAT1) << 8); break; case SCH_BLOCK_DATA: - data->block[0] = inb(SMBHSTDAT0); + data->block[0] = sch_io_rd8(priv, SMBHSTDAT0); if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; for (i = 1; i <= data->block[0]; i++) - data->block[i] = inb(SMBBLKDAT+i-1); + data->block[i] = sch_io_rd8(priv, SMBBLKDAT + i - 1); break; } return 0; @@ -246,51 +271,34 @@ static const struct i2c_algorithm smbus_algorithm = { .functionality = sch_func, }; -static struct i2c_adapter sch_adapter = { - .owner = THIS_MODULE, - .class = I2C_CLASS_HWMON, - .algo = &smbus_algorithm, -}; - -static int smbus_sch_probe(struct platform_device *dev) +static int smbus_sch_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; + struct sch_i2c *priv; struct resource *res; - int retval; - res = platform_get_resource(dev, IORESOURCE_IO, 0); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) return -EBUSY; - if (!devm_request_region(&dev->dev, res->start, resource_size(res), - dev->name)) { - dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", - sch_smba); - return -EBUSY; - } + priv->smba = devm_ioport_map(dev, res->start, resource_size(res)); + if (!priv->smba) + return dev_err_probe(dev, -EBUSY, "SMBus region %pR already in use!\n", res); - sch_smba = res->start; + /* Set up the sysfs linkage to our parent device */ + priv->adapter.dev.parent = dev; + priv->adapter.owner = THIS_MODULE, + priv->adapter.class = I2C_CLASS_HWMON, + priv->adapter.algo = &smbus_algorithm, - dev_dbg(&dev->dev, "SMBA = 0x%X\n", sch_smba); + snprintf(priv->adapter.name, sizeof(priv->adapter.name), + "SMBus SCH adapter at %04x", (unsigned short)res->start); - /* set up the sysfs linkage to our parent device */ - sch_adapter.dev.parent = &dev->dev; - - snprintf(sch_adapter.name, sizeof(sch_adapter.name), - "SMBus SCH adapter at %04x", sch_smba); - - retval = i2c_add_adapter(&sch_adapter); - if (retval) - sch_smba = 0; - - return retval; -} - -static void smbus_sch_remove(struct platform_device *pdev) -{ - if (sch_smba) { - i2c_del_adapter(&sch_adapter); - sch_smba = 0; - } + return devm_i2c_add_adapter(dev, &priv->adapter); } static struct platform_driver smbus_sch_driver = { @@ -298,7 +306,6 @@ static struct platform_driver smbus_sch_driver = { .name = "isch_smbus", }, .probe = smbus_sch_probe, - .remove_new = smbus_sch_remove, }; module_platform_driver(smbus_sch_driver); diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index 92cc5b091137..664a5471d933 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -847,7 +847,7 @@ static void jz4780_i2c_remove(struct platform_device *pdev) static struct platform_driver jz4780_i2c_driver = { .probe = jz4780_i2c_probe, - .remove_new = jz4780_i2c_remove, + .remove = jz4780_i2c_remove, .driver = { .name = "jz4780-i2c", .of_match_table = jz4780_i2c_of_matches, diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c index eb66942e0b7d..212196af68ba 100644 --- a/drivers/i2c/busses/i2c-kempld.c +++ b/drivers/i2c/busses/i2c-kempld.c @@ -385,7 +385,7 @@ static struct platform_driver kempld_i2c_driver = { .pm = pm_sleep_ptr(&kempld_i2c_pm_ops), }, .probe = kempld_i2c_probe, - .remove_new = kempld_i2c_remove, + .remove = kempld_i2c_remove, }; module_platform_driver(kempld_i2c_driver); diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c index 9fb33cbf7419..6943a0de860a 100644 --- a/drivers/i2c/busses/i2c-lpc2k.c +++ b/drivers/i2c/busses/i2c-lpc2k.c @@ -462,7 +462,7 @@ MODULE_DEVICE_TABLE(of, lpc2k_i2c_match); static struct platform_driver i2c_lpc2k_driver = { .probe = i2c_lpc2k_probe, - .remove_new = i2c_lpc2k_remove, + .remove = i2c_lpc2k_remove, .driver = { .name = "lpc2k-i2c", .pm = pm_sleep_ptr(&i2c_lpc2k_dev_pm_ops), diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index c7b203cc4434..e1d69537353b 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -565,7 +565,7 @@ MODULE_DEVICE_TABLE(of, meson_i2c_match); static struct platform_driver meson_i2c_driver = { .probe = meson_i2c_probe, - .remove_new = meson_i2c_remove, + .remove = meson_i2c_remove, .driver = { .name = "meson-i2c", .of_match_table = meson_i2c_match, diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c index 0b0a1c4d17ca..d1543e7d8380 100644 --- a/drivers/i2c/busses/i2c-microchip-corei2c.c +++ b/drivers/i2c/busses/i2c-microchip-corei2c.c @@ -462,7 +462,7 @@ MODULE_DEVICE_TABLE(of, mchp_corei2c_of_match); static struct platform_driver mchp_corei2c_driver = { .probe = mchp_corei2c_probe, - .remove_new = mchp_corei2c_remove, + .remove = mchp_corei2c_remove, .driver = { .name = "microchip-corei2c", .of_match_table = mchp_corei2c_of_match, diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index b3a73921ab69..21f67f3b65b6 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -2456,7 +2456,7 @@ static void mlxbf_i2c_remove(struct platform_device *pdev) static struct platform_driver mlxbf_i2c_driver = { .probe = mlxbf_i2c_probe, - .remove_new = mlxbf_i2c_remove, + .remove = mlxbf_i2c_remove, .driver = { .name = "i2c-mlxbf", .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids), diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 8223f6d29eb3..07d3cadbf510 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -591,7 +591,7 @@ static void mlxcpld_i2c_remove(struct platform_device *pdev) static struct platform_driver mlxcpld_i2c_driver = { .probe = mlxcpld_i2c_probe, - .remove_new = mlxcpld_i2c_remove, + .remove = mlxcpld_i2c_remove, .driver = { .name = MLXCPLD_I2C_DEVICE_NAME, }, diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 236d6b8ba867..28c5c5c1fb7a 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -938,7 +938,7 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match); /* Structure for a device driver */ static struct platform_driver mpc_i2c_driver = { .probe = fsl_i2c_probe, - .remove_new = fsl_i2c_remove, + .remove = fsl_i2c_remove, .driver = { .name = "mpc-i2c", .of_match_table = mpc_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index e0ba653dec2d..5bd342047d59 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1550,7 +1550,7 @@ static const struct dev_pm_ops mtk_i2c_pm = { static struct platform_driver mtk_i2c_driver = { .probe = mtk_i2c_probe, - .remove_new = mtk_i2c_remove, + .remove = mtk_i2c_remove, .driver = { .name = I2C_DRV_NAME, .pm = pm_sleep_ptr(&mtk_i2c_pm), diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c index 23d417ff5e71..2103f21f9ddd 100644 --- a/drivers/i2c/busses/i2c-mt7621.c +++ b/drivers/i2c/busses/i2c-mt7621.c @@ -331,7 +331,7 @@ static void mtk_i2c_remove(struct platform_device *pdev) static struct platform_driver mtk_i2c_driver = { .probe = mtk_i2c_probe, - .remove_new = mtk_i2c_remove, + .remove = mtk_i2c_remove, .driver = { .name = "i2c-mt7621", .of_match_table = i2c_mtk_dt_ids, diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 29f94efedf60..874309580c33 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -1104,7 +1104,7 @@ static const struct dev_pm_ops mv64xxx_i2c_pm_ops = { static struct platform_driver mv64xxx_i2c_driver = { .probe = mv64xxx_i2c_probe, - .remove_new = mv64xxx_i2c_remove, + .remove = mv64xxx_i2c_remove, .driver = { .name = MV64XXX_I2C_CTLR_NAME, .pm = &mv64xxx_i2c_pm_ops, diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 36def0a9c95c..ad62d56b2186 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -881,7 +881,7 @@ static struct platform_driver mxs_i2c_driver = { .of_match_table = mxs_i2c_dt_ids, }, .probe = mxs_i2c_probe, - .remove_new = mxs_i2c_remove, + .remove = mxs_i2c_remove, }; static int __init mxs_i2c_init(void) diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c deleted file mode 100644 index 69a71bc9830d..000000000000 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c +++ /dev/null @@ -1,240 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * i2c-nforce2-s4985.c - i2c-nforce2 extras for the Tyan S4985 motherboard - * - * Copyright (C) 2008 Jean Delvare - */ - -/* - * We select the channels by sending commands to the Philips - * PCA9556 chip at I2C address 0x18. The main adapter is used for - * the non-multiplexed part of the bus, and 4 virtual adapters - * are defined for the multiplexed addresses: 0x50-0x53 (memory - * module EEPROM) located on channels 1-4. We define one virtual - * adapter per CPU, which corresponds to one multiplexed channel: - * CPU0: virtual adapter 1, channel 1 - * CPU1: virtual adapter 2, channel 2 - * CPU2: virtual adapter 3, channel 3 - * CPU3: virtual adapter 4, channel 4 - */ - -#include -#include -#include -#include -#include -#include - -extern struct i2c_adapter *nforce2_smbus; - -static struct i2c_adapter *s4985_adapter; -static struct i2c_algorithm *s4985_algo; - -/* Wrapper access functions for multiplexed SMBus */ -static DEFINE_MUTEX(nforce2_lock); - -static s32 nforce2_access_virt0(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - int error; - - /* We exclude the multiplexed addresses */ - if ((addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30 - || addr == 0x18) - return -ENXIO; - - mutex_lock(&nforce2_lock); - error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write, - command, size, data); - mutex_unlock(&nforce2_lock); - - return error; -} - -/* We remember the last used channels combination so as to only switch - channels when it is really needed. This greatly reduces the SMBus - overhead, but also assumes that nobody will be writing to the PCA9556 - in our back. */ -static u8 last_channels; - -static inline s32 nforce2_access_channel(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data, - u8 channels) -{ - int error; - - /* We exclude the non-multiplexed addresses */ - if ((addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) - return -ENXIO; - - mutex_lock(&nforce2_lock); - if (last_channels != channels) { - union i2c_smbus_data mplxdata; - mplxdata.byte = channels; - - error = nforce2_smbus->algo->smbus_xfer(adap, 0x18, 0, - I2C_SMBUS_WRITE, 0x01, - I2C_SMBUS_BYTE_DATA, - &mplxdata); - if (error) - goto UNLOCK; - last_channels = channels; - } - error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write, - command, size, data); - -UNLOCK: - mutex_unlock(&nforce2_lock); - return error; -} - -static s32 nforce2_access_virt1(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - /* CPU0: channel 1 enabled */ - return nforce2_access_channel(adap, addr, flags, read_write, command, - size, data, 0x02); -} - -static s32 nforce2_access_virt2(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - /* CPU1: channel 2 enabled */ - return nforce2_access_channel(adap, addr, flags, read_write, command, - size, data, 0x04); -} - -static s32 nforce2_access_virt3(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - /* CPU2: channel 3 enabled */ - return nforce2_access_channel(adap, addr, flags, read_write, command, - size, data, 0x08); -} - -static s32 nforce2_access_virt4(struct i2c_adapter *adap, u16 addr, - unsigned short flags, char read_write, - u8 command, int size, - union i2c_smbus_data *data) -{ - /* CPU3: channel 4 enabled */ - return nforce2_access_channel(adap, addr, flags, read_write, command, - size, data, 0x10); -} - -static int __init nforce2_s4985_init(void) -{ - int i, error; - union i2c_smbus_data ioconfig; - - if (!nforce2_smbus) - return -ENODEV; - - /* Configure the PCA9556 multiplexer */ - ioconfig.byte = 0x00; /* All I/O to output mode */ - error = i2c_smbus_xfer(nforce2_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, - I2C_SMBUS_BYTE_DATA, &ioconfig); - if (error) { - dev_err(&nforce2_smbus->dev, "PCA9556 configuration failed\n"); - error = -EIO; - goto ERROR0; - } - - /* Unregister physical bus */ - i2c_del_adapter(nforce2_smbus); - - printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4985\n"); - /* Define the 5 virtual adapters and algorithms structures */ - s4985_adapter = kcalloc(5, sizeof(struct i2c_adapter), GFP_KERNEL); - if (!s4985_adapter) { - error = -ENOMEM; - goto ERROR1; - } - s4985_algo = kcalloc(5, sizeof(struct i2c_algorithm), GFP_KERNEL); - if (!s4985_algo) { - error = -ENOMEM; - goto ERROR2; - } - - /* Fill in the new structures */ - s4985_algo[0] = *(nforce2_smbus->algo); - s4985_algo[0].smbus_xfer = nforce2_access_virt0; - s4985_adapter[0] = *nforce2_smbus; - s4985_adapter[0].algo = s4985_algo; - s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent; - for (i = 1; i < 5; i++) { - s4985_algo[i] = *(nforce2_smbus->algo); - s4985_adapter[i] = *nforce2_smbus; - snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name), - "SMBus nForce2 adapter (CPU%d)", i - 1); - s4985_adapter[i].algo = s4985_algo + i; - s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent; - } - s4985_algo[1].smbus_xfer = nforce2_access_virt1; - s4985_algo[2].smbus_xfer = nforce2_access_virt2; - s4985_algo[3].smbus_xfer = nforce2_access_virt3; - s4985_algo[4].smbus_xfer = nforce2_access_virt4; - - /* Register virtual adapters */ - for (i = 0; i < 5; i++) { - error = i2c_add_adapter(s4985_adapter + i); - if (error) { - printk(KERN_ERR "i2c-nforce2-s4985: " - "Virtual adapter %d registration " - "failed, module not inserted\n", i); - for (i--; i >= 0; i--) - i2c_del_adapter(s4985_adapter + i); - goto ERROR3; - } - } - - return 0; - -ERROR3: - kfree(s4985_algo); - s4985_algo = NULL; -ERROR2: - kfree(s4985_adapter); - s4985_adapter = NULL; -ERROR1: - /* Restore physical bus */ - i2c_add_adapter(nforce2_smbus); -ERROR0: - return error; -} - -static void __exit nforce2_s4985_exit(void) -{ - if (s4985_adapter) { - int i; - - for (i = 0; i < 5; i++) - i2c_del_adapter(s4985_adapter+i); - kfree(s4985_adapter); - s4985_adapter = NULL; - } - kfree(s4985_algo); - s4985_algo = NULL; - - /* Restore physical bus */ - if (i2c_add_adapter(nforce2_smbus)) - printk(KERN_ERR "i2c-nforce2-s4985: " - "Physical bus restoration failed\n"); -} - -MODULE_AUTHOR("Jean Delvare "); -MODULE_DESCRIPTION("S4985 SMBus multiplexing"); -MODULE_LICENSE("GPL"); - -module_init(nforce2_s4985_init); -module_exit(nforce2_s4985_exit); diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c index fab662e6bc08..d58a308582e4 100644 --- a/drivers/i2c/busses/i2c-nforce2.c +++ b/drivers/i2c/busses/i2c-nforce2.c @@ -117,20 +117,6 @@ static const struct dmi_system_id nforce2_dmi_blacklist2[] = { static struct pci_driver nforce2_driver; -/* For multiplexing support, we need a global reference to the 1st - SMBus channel */ -#if IS_ENABLED(CONFIG_I2C_NFORCE2_S4985) -struct i2c_adapter *nforce2_smbus; -EXPORT_SYMBOL_GPL(nforce2_smbus); - -static void nforce2_set_reference(struct i2c_adapter *adap) -{ - nforce2_smbus = adap; -} -#else -static inline void nforce2_set_reference(struct i2c_adapter *adap) { } -#endif - static void nforce2_abort(struct i2c_adapter *adap) { struct nforce2_smbus *smbus = adap->algo_data; @@ -411,7 +397,6 @@ static int nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; } - nforce2_set_reference(&smbuses[0].adapter); return 0; } @@ -420,7 +405,6 @@ static void nforce2_remove(struct pci_dev *dev) { struct nforce2_smbus *smbuses = pci_get_drvdata(dev); - nforce2_set_reference(NULL); if (smbuses[0].base) { i2c_del_adapter(&smbuses[0].adapter); release_region(smbuses[0].base, smbuses[0].size); diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index bbcb4d6668ce..482a0074d448 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -334,6 +334,7 @@ struct npcm_i2c { u64 nack_cnt; u64 timeout_cnt; u64 tx_complete_cnt; + bool ber_state; /* Indicate the bus error state */ }; static inline void npcm_i2c_select_bank(struct npcm_i2c *bus, @@ -1521,6 +1522,7 @@ static void npcm_i2c_irq_handle_ber(struct npcm_i2c *bus) if (npcm_i2c_is_master(bus)) { npcm_i2c_master_abort(bus); } else { + bus->ber_state = true; npcm_i2c_clear_master_status(bus); /* Clear BB (BUS BUSY) bit */ @@ -1628,13 +1630,10 @@ static void npcm_i2c_irq_handle_sda(struct npcm_i2c *bus, u8 i2cst) npcm_i2c_wr_byte(bus, bus->dest_addr | BIT(0)); /* SDA interrupt, after start\restart */ } else { - if (NPCM_I2CST_XMIT & i2cst) { - bus->operation = I2C_WRITE_OPER; + if (bus->operation == I2C_WRITE_OPER) npcm_i2c_irq_master_handler_write(bus); - } else { - bus->operation = I2C_READ_OPER; + else if (bus->operation == I2C_READ_OPER) npcm_i2c_irq_master_handler_read(bus); - } } } @@ -1702,6 +1701,7 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap) dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck", bus->num, bus->dest_addr); npcm_i2c_reset(bus); + bus->ber_state = false; return 0; } @@ -1766,6 +1766,7 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap) if (bus->rec_succ_cnt < ULLONG_MAX) bus->rec_succ_cnt++; } + bus->ber_state = false; return status; } @@ -2161,7 +2162,16 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, } while (time_is_after_jiffies(time_left) && bus_busy); - if (bus_busy) { + /* + * Check the BER (bus error) state, when ber_state is true, it means that the module + * detects the bus error which is caused by some factor like that the electricity + * noise occurs on the bus. Under this condition, the module is reset and the bus + * gets recovered. + * + * While ber_state is false, the module reset and bus recovery also get done as the + * bus is busy. + */ + if (bus_busy || bus->ber_state) { iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); npcm_i2c_reset(bus); i2c_recover_bus(adap); @@ -2363,7 +2373,7 @@ MODULE_DEVICE_TABLE(of, npcm_i2c_bus_of_table); static struct platform_driver npcm_i2c_bus_driver = { .probe = npcm_i2c_probe_bus, - .remove_new = npcm_i2c_remove_bus, + .remove = npcm_i2c_remove_bus, .driver = { .name = "nuvoton-i2c", .of_match_table = npcm_i2c_bus_of_table, diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 482b37c8a129..0f67e57cdeff 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -769,7 +769,7 @@ static DEFINE_NOIRQ_DEV_PM_OPS(ocores_i2c_pm, static struct platform_driver ocores_i2c_driver = { .probe = ocores_i2c_probe, - .remove_new = ocores_i2c_remove, + .remove = ocores_i2c_remove, .driver = { .name = "ocores-i2c", .of_match_table = ocores_i2c_match, diff --git a/drivers/i2c/busses/i2c-octeon-platdrv.c b/drivers/i2c/busses/i2c-octeon-platdrv.c index dc6dff95c68c..edfca7b20f29 100644 --- a/drivers/i2c/busses/i2c-octeon-platdrv.c +++ b/drivers/i2c/busses/i2c-octeon-platdrv.c @@ -269,7 +269,7 @@ MODULE_DEVICE_TABLE(of, octeon_i2c_match); static struct platform_driver octeon_i2c_driver = { .probe = octeon_i2c_probe, - .remove_new = octeon_i2c_remove, + .remove = octeon_i2c_remove, .driver = { .name = DRV_NAME, .of_match_table = octeon_i2c_match, diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1d9ad25c89ae..92faf03d64cf 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1605,7 +1605,7 @@ static const struct dev_pm_ops omap_i2c_pm_ops = { static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, - .remove_new = omap_i2c_remove, + .remove = omap_i2c_remove, .driver = { .name = "omap_i2c", .pm = pm_ptr(&omap_i2c_pm_ops), diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c index d9dd71cf37fd..c9b62892397a 100644 --- a/drivers/i2c/busses/i2c-opal.c +++ b/drivers/i2c/busses/i2c-opal.c @@ -249,7 +249,7 @@ MODULE_DEVICE_TABLE(of, i2c_opal_of_match); static struct platform_driver i2c_opal_driver = { .probe = i2c_opal_probe, - .remove_new = i2c_opal_remove, + .remove = i2c_opal_remove, .driver = { .name = "i2c-opal", .of_match_table = i2c_opal_of_match, diff --git a/drivers/i2c/busses/i2c-pasemi-platform.c b/drivers/i2c/busses/i2c-pasemi-platform.c index 5fbfb9b41744..a486a37d3863 100644 --- a/drivers/i2c/busses/i2c-pasemi-platform.c +++ b/drivers/i2c/busses/i2c-pasemi-platform.c @@ -104,7 +104,7 @@ static struct platform_driver pasemi_platform_i2c_driver = { .of_match_table = pasemi_platform_i2c_of_match, }, .probe = pasemi_platform_i2c_probe, - .remove_new = pasemi_platform_i2c_remove, + .remove = pasemi_platform_i2c_remove, }; module_platform_driver(pasemi_platform_i2c_driver); diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index b8d5480c54f6..87da8241b927 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -238,7 +238,7 @@ MODULE_DEVICE_TABLE(of, i2c_pca_of_match_table); static struct platform_driver i2c_pca_pf_driver = { .probe = i2c_pca_pf_probe, - .remove_new = i2c_pca_pf_remove, + .remove = i2c_pca_pf_remove, .driver = { .name = "i2c-pca-platform", .of_match_table = of_match_ptr(i2c_pca_of_match_table), diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index febbd9950d8f..9402fa3811c5 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -35,20 +35,7 @@ #include #include - -/* PIIX4 SMBus address offsets */ -#define SMBHSTSTS (0 + piix4_smba) -#define SMBHSLVSTS (1 + piix4_smba) -#define SMBHSTCNT (2 + piix4_smba) -#define SMBHSTCMD (3 + piix4_smba) -#define SMBHSTADD (4 + piix4_smba) -#define SMBHSTDAT0 (5 + piix4_smba) -#define SMBHSTDAT1 (6 + piix4_smba) -#define SMBBLKDAT (7 + piix4_smba) -#define SMBSLVCNT (8 + piix4_smba) -#define SMBSHDWCMD (9 + piix4_smba) -#define SMBSLVEVT (0xA + piix4_smba) -#define SMBSLVDAT (0xC + piix4_smba) +#include "i2c-piix4.h" /* count for request_region */ #define SMBIOSIZE 9 @@ -70,7 +57,6 @@ #define PIIX4_BYTE 0x04 #define PIIX4_BYTE_DATA 0x08 #define PIIX4_WORD_DATA 0x0C -#define PIIX4_BLOCK_DATA 0x14 /* Multi-port constants */ #define PIIX4_MAX_ADAPTERS 4 @@ -101,6 +87,7 @@ #define SB800_PIIX4_FCH_PM_ADDR 0xFED80300 #define SB800_PIIX4_FCH_PM_SIZE 8 +#define SB800_ASF_ACPI_PATH "\\_SB.ASFC" /* insmod parameters */ @@ -160,11 +147,6 @@ static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { }; static const char *piix4_aux_port_name_sb800 = " port 1"; -struct sb800_mmio_cfg { - void __iomem *addr; - bool use_mmio; -}; - struct i2c_piix4_adapdata { unsigned short smba; @@ -175,8 +157,7 @@ struct i2c_piix4_adapdata { struct sb800_mmio_cfg mmio_cfg; }; -static int piix4_sb800_region_request(struct device *dev, - struct sb800_mmio_cfg *mmio_cfg) +int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_cfg) { if (mmio_cfg->use_mmio) { void __iomem *addr; @@ -214,9 +195,9 @@ static int piix4_sb800_region_request(struct device *dev, return 0; } +EXPORT_SYMBOL_NS_GPL(piix4_sb800_region_request, PIIX4_SMBUS); -static void piix4_sb800_region_release(struct device *dev, - struct sb800_mmio_cfg *mmio_cfg) +void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_cfg) { if (mmio_cfg->use_mmio) { iounmap(mmio_cfg->addr); @@ -227,6 +208,7 @@ static void piix4_sb800_region_release(struct device *dev, release_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE); } +EXPORT_SYMBOL_NS_GPL(piix4_sb800_region_release, PIIX4_SMBUS); static bool piix4_sb800_use_mmio(struct pci_dev *PIIX4_dev) { @@ -536,10 +518,8 @@ static int piix4_setup_aux(struct pci_dev *PIIX4_dev, return piix4_smba; } -static int piix4_transaction(struct i2c_adapter *piix4_adapter) +int piix4_transaction(struct i2c_adapter *piix4_adapter, unsigned short piix4_smba) { - struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(piix4_adapter); - unsigned short piix4_smba = adapdata->smba; int temp; int result = 0; int timeout = 0; @@ -611,6 +591,7 @@ static int piix4_transaction(struct i2c_adapter *piix4_adapter) inb_p(SMBHSTDAT1)); return result; } +EXPORT_SYMBOL_NS_GPL(piix4_transaction, PIIX4_SMBUS); /* Return negative errno on error. */ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, @@ -675,7 +656,7 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, outb_p((size & 0x1C) + (ENABLE_INT9 & 1), SMBHSTCNT); - status = piix4_transaction(adap); + status = piix4_transaction(adap, piix4_smba); if (status) return status; @@ -764,7 +745,7 @@ static void piix4_imc_wakeup(void) release_region(KERNCZ_IMC_IDX, 2); } -static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg) +int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg) { u8 smba_en_lo, val; @@ -786,6 +767,7 @@ static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg) return (smba_en_lo & piix4_port_mask_sb800); } +EXPORT_SYMBOL_NS_GPL(piix4_sb800_port_sel, PIIX4_SMBUS); /* * Handles access to multiple SMBus ports on the SB800. @@ -1043,6 +1025,9 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) { int retval; bool is_sb800 = false; + bool is_asf = false; + acpi_status status; + acpi_handle handle; if ((dev->vendor == PCI_VENDOR_ID_ATI && dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && @@ -1105,10 +1090,16 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) } } + status = acpi_get_handle(NULL, (acpi_string)SB800_ASF_ACPI_PATH, &handle); + if (ACPI_SUCCESS(status)) + is_asf = true; + if (dev->vendor == PCI_VENDOR_ID_AMD && (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { - retval = piix4_setup_sb800(dev, id, 1); + /* Do not setup AUX port if ASF is enabled */ + if (!is_asf) + retval = piix4_setup_sb800(dev, id, 1); } if (retval > 0) { diff --git a/drivers/i2c/busses/i2c-piix4.h b/drivers/i2c/busses/i2c-piix4.h new file mode 100644 index 000000000000..36bc6ce82a27 --- /dev/null +++ b/drivers/i2c/busses/i2c-piix4.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * PIIX4/SB800 SMBus Interfaces + * + * Copyright (c) 2024, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Shyam Sundar S K + * Sanket Goswami + */ + +#ifndef I2C_PIIX4_H +#define I2C_PIIX4_H + +#include + +/* PIIX4 SMBus address offsets */ +#define SMBHSTSTS (0x00 + piix4_smba) +#define SMBHSLVSTS (0x01 + piix4_smba) +#define SMBHSTCNT (0x02 + piix4_smba) +#define SMBHSTCMD (0x03 + piix4_smba) +#define SMBHSTADD (0x04 + piix4_smba) +#define SMBHSTDAT0 (0x05 + piix4_smba) +#define SMBHSTDAT1 (0x06 + piix4_smba) +#define SMBBLKDAT (0x07 + piix4_smba) +#define SMBSLVCNT (0x08 + piix4_smba) +#define SMBSHDWCMD (0x09 + piix4_smba) +#define SMBSLVEVT (0x0A + piix4_smba) +#define SMBSLVDAT (0x0C + piix4_smba) + +/* PIIX4 constants */ +#define PIIX4_BLOCK_DATA 0x14 + +struct sb800_mmio_cfg { + void __iomem *addr; + bool use_mmio; +}; + +int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg); +int piix4_transaction(struct i2c_adapter *piix4_adapter, unsigned short piix4_smba); +int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_cfg); +void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_cfg); + +#endif /* I2C_PIIX4_H */ diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 1dafadda73af..d4d139b97513 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -733,7 +733,7 @@ static struct platform_driver i2c_pnx_driver = { .pm = pm_sleep_ptr(&i2c_pnx_pm), }, .probe = i2c_pnx_probe, - .remove_new = i2c_pnx_remove, + .remove = i2c_pnx_remove, }; static int __init i2c_adap_pnx_init(void) diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index b6b03539f626..9a867c817db0 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -437,7 +437,7 @@ static int i2c_powermac_probe(struct platform_device *dev) static struct platform_driver i2c_powermac_driver = { .probe = i2c_powermac_probe, - .remove_new = i2c_powermac_remove, + .remove = i2c_powermac_remove, .driver = { .name = "i2c-powermac", .bus = &platform_bus_type, diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 4d76e71cdd4b..cb6988482673 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1574,7 +1574,7 @@ static const struct dev_pm_ops i2c_pxa_dev_pm_ops = { static struct platform_driver i2c_pxa_driver = { .probe = i2c_pxa_probe, - .remove_new = i2c_pxa_remove, + .remove = i2c_pxa_remove, .driver = { .name = "pxa2xx-i2c", .pm = pm_sleep_ptr(&i2c_pxa_dev_pm_ops), diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index 414882c57d7f..05b73326afd4 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -120,7 +120,6 @@ struct cci_data { unsigned int num_masters; struct i2c_adapter_quirks quirks; u16 queue_size[NUM_QUEUES]; - unsigned long cci_clk_rate; struct hw_params params[3]; }; @@ -523,7 +522,6 @@ static const struct dev_pm_ops qcom_cci_pm = { static int cci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - unsigned long cci_clk_rate = 0; struct device_node *child; struct resource *r; struct cci *cci; @@ -594,22 +592,6 @@ static int cci_probe(struct platform_device *pdev) return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n"); cci->nclocks = ret; - /* Retrieve CCI clock rate */ - for (i = 0; i < cci->nclocks; i++) { - if (!strcmp(cci->clocks[i].id, "cci")) { - cci_clk_rate = clk_get_rate(cci->clocks[i].clk); - break; - } - } - - if (cci_clk_rate != cci->data->cci_clk_rate) { - /* cci clock set by the bootloader or via assigned clock rate - * in DT. - */ - dev_warn(dev, "Found %lu cci clk rate while %lu was expected\n", - cci_clk_rate, cci->data->cci_clk_rate); - } - ret = cci_enable_clocks(cci); if (ret < 0) return ret; @@ -699,7 +681,6 @@ static const struct cci_data cci_v1_data = { .max_write_len = 10, .max_read_len = 12, }, - .cci_clk_rate = 19200000, .params[I2C_MODE_STANDARD] = { .thigh = 78, .tlow = 114, @@ -733,7 +714,6 @@ static const struct cci_data cci_v1_5_data = { .max_write_len = 10, .max_read_len = 12, }, - .cci_clk_rate = 19200000, .params[I2C_MODE_STANDARD] = { .thigh = 78, .tlow = 114, @@ -767,7 +747,6 @@ static const struct cci_data cci_v2_data = { .max_write_len = 11, .max_read_len = 12, }, - .cci_clk_rate = 37500000, .params[I2C_MODE_STANDARD] = { .thigh = 201, .tlow = 174, @@ -826,7 +805,7 @@ MODULE_DEVICE_TABLE(of, cci_dt_match); static struct platform_driver qcom_cci_driver = { .probe = cci_probe, - .remove_new = cci_remove, + .remove = cci_remove, .driver = { .name = "i2c-qcom-cci", .of_match_table = cci_dt_match, diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 212336f724a6..7a22e1f46e60 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -16,6 +16,7 @@ #include #include #include +#include #define SE_I2C_TX_TRANS_LEN 0x26c #define SE_I2C_RX_TRANS_LEN 0x270 @@ -146,22 +147,36 @@ struct geni_i2c_clk_fld { * clk_freq_out = t / t_cycle * source_clock = 19.2 MHz */ -static const struct geni_i2c_clk_fld geni_i2c_clk_map[] = { +static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = { {KHZ(100), 7, 10, 11, 26}, {KHZ(400), 2, 5, 12, 24}, {KHZ(1000), 1, 3, 9, 18}, + {}, +}; + +/* source_clock = 32 MHz */ +static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = { + {KHZ(100), 8, 14, 18, 40}, + {KHZ(400), 4, 3, 11, 20}, + {KHZ(1000), 2, 3, 6, 15}, + {}, }; static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c) { - int i; - const struct geni_i2c_clk_fld *itr = geni_i2c_clk_map; + const struct geni_i2c_clk_fld *itr; - for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) { + if (clk_get_rate(gi2c->se.clk) == 32 * HZ_PER_MHZ) + itr = geni_i2c_clk_map_32mhz; + else + itr = geni_i2c_clk_map_19p2mhz; + + while (itr->clk_freq_out != 0) { if (itr->clk_freq_out == gi2c->clk_freq_out) { gi2c->clk_fld = itr; return 0; } + itr++; } return -EINVAL; } @@ -818,6 +833,8 @@ static int geni_i2c_probe(struct platform_device *pdev) init_completion(&gi2c->done); spin_lock_init(&gi2c->lock); platform_set_drvdata(pdev, gi2c); + + /* Keep interrupts disabled initially to allow for low-power modes */ ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, IRQF_NO_AUTOEN, dev_name(dev), gi2c); if (ret) { @@ -1049,7 +1066,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match); static struct platform_driver geni_i2c_driver = { .probe = geni_i2c_probe, - .remove_new = geni_i2c_remove, + .remove = geni_i2c_remove, .shutdown = geni_i2c_shutdown, .driver = { .name = "geni_i2c", diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index d480162a4d39..86ec391616b0 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -1974,7 +1974,7 @@ MODULE_DEVICE_TABLE(of, qup_i2c_dt_match); static struct platform_driver qup_i2c_driver = { .probe = qup_i2c_probe, - .remove_new = qup_i2c_remove, + .remove = qup_i2c_remove, .driver = { .name = "i2c_qup", .pm = pm_ptr(&qup_i2c_qup_pm_ops), diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 9267df38c2d0..a7b77d14ee86 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -1271,7 +1271,7 @@ static struct platform_driver rcar_i2c_driver = { .pm = pm_sleep_ptr(&rcar_i2c_pm_ops), }, .probe = rcar_i2c_probe, - .remove_new = rcar_i2c_remove, + .remove = rcar_i2c_remove, }; module_platform_driver(rcar_i2c_driver); diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index c7f3a4c02470..c218f73c3650 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -632,7 +632,7 @@ static const struct of_device_id riic_i2c_dt_ids[] = { static struct platform_driver riic_i2c_driver = { .probe = riic_i2c_probe, - .remove_new = riic_i2c_remove, + .remove = riic_i2c_remove, .driver = { .name = "i2c-riic", .of_match_table = riic_i2c_dt_ids, diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 4ef9bad77b85..d4e9196445c0 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -1398,7 +1398,7 @@ static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume); static struct platform_driver rk3x_i2c_driver = { .probe = rk3x_i2c_probe, - .remove_new = rk3x_i2c_remove, + .remove = rk3x_i2c_remove, .driver = { .name = "rk3x-i2c", .of_match_table = rk3x_i2c_match, diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c new file mode 100644 index 000000000000..e064e8a4a1f0 --- /dev/null +++ b/drivers/i2c/busses/i2c-rtl9300.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include +#include +#include +#include + +enum rtl9300_bus_freq { + RTL9300_I2C_STD_FREQ, + RTL9300_I2C_FAST_FREQ, +}; + +struct rtl9300_i2c; + +struct rtl9300_i2c_chan { + struct i2c_adapter adap; + struct rtl9300_i2c *i2c; + enum rtl9300_bus_freq bus_freq; + u8 sda_pin; +}; + +#define RTL9300_I2C_MUX_NCHAN 8 + +struct rtl9300_i2c { + struct regmap *regmap; + struct device *dev; + struct rtl9300_i2c_chan chans[RTL9300_I2C_MUX_NCHAN]; + u32 reg_base; + u8 sda_pin; + struct mutex lock; +}; + +#define RTL9300_I2C_MST_CTRL1 0x0 +#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS 8 +#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK GENMASK(31, 8) +#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS 4 +#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK GENMASK(6, 4) +#define RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL BIT(3) +#define RTL9300_I2C_MST_CTRL1_RWOP BIT(2) +#define RTL9300_I2C_MST_CTRL1_I2C_FAIL BIT(1) +#define RTL9300_I2C_MST_CTRL1_I2C_TRIG BIT(0) +#define RTL9300_I2C_MST_CTRL2 0x4 +#define RTL9300_I2C_MST_CTRL2_RD_MODE BIT(15) +#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS 8 +#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK GENMASK(14, 8) +#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS 4 +#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK GENMASK(7, 4) +#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS 2 +#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK GENMASK(3, 2) +#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS 0 +#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK GENMASK(1, 0) +#define RTL9300_I2C_MST_DATA_WORD0 0x8 +#define RTL9300_I2C_MST_DATA_WORD1 0xc +#define RTL9300_I2C_MST_DATA_WORD2 0x10 +#define RTL9300_I2C_MST_DATA_WORD3 0x14 + +#define RTL9300_I2C_MST_GLB_CTRL 0x384 + +static int rtl9300_i2c_reg_addr_set(struct rtl9300_i2c *i2c, u32 reg, u16 len) +{ + u32 val, mask; + int ret; + + val = len << RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS; + mask = RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK; + + ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val); + if (ret) + return ret; + + val = reg << RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS; + mask = RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK; + + return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val); +} + +static int rtl9300_i2c_config_io(struct rtl9300_i2c *i2c, u8 sda_pin) +{ + int ret; + u32 val, mask; + + ret = regmap_update_bits(i2c->regmap, RTL9300_I2C_MST_GLB_CTRL, BIT(sda_pin), BIT(sda_pin)); + if (ret) + return ret; + + val = (sda_pin << RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS) | + RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL; + mask = RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK | RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL; + + return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val); +} + +static int rtl9300_i2c_config_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan, + u16 addr, u16 len) +{ + u32 val, mask; + + val = chan->bus_freq << RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS; + mask = RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK; + + val |= addr << RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS; + mask |= RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK; + + val |= ((len - 1) & 0xf) << RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS; + mask |= RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK; + + mask |= RTL9300_I2C_MST_CTRL2_RD_MODE; + + return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val); +} + +static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len) +{ + u32 vals[4] = {}; + int i, ret; + + if (len > 16) + return -EIO; + + ret = regmap_bulk_read(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, + vals, ARRAY_SIZE(vals)); + if (ret) + return ret; + + for (i = 0; i < len; i++) { + buf[i] = vals[i/4] & 0xff; + vals[i/4] >>= 8; + } + + return 0; +} + +static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len) +{ + u32 vals[4] = {}; + int i; + + if (len > 16) + return -EIO; + + for (i = 0; i < len; i++) { + if (i % 4 == 0) + vals[i/4] = 0; + vals[i/4] <<= 8; + vals[i/4] |= buf[i]; + } + + return regmap_bulk_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, + vals, ARRAY_SIZE(vals)); +} + +static int rtl9300_i2c_writel(struct rtl9300_i2c *i2c, u32 data) +{ + return regmap_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, data); +} + +static int rtl9300_i2c_execute_xfer(struct rtl9300_i2c *i2c, char read_write, + int size, union i2c_smbus_data *data, int len) +{ + u32 val, mask; + int ret; + + val = read_write == I2C_SMBUS_WRITE ? RTL9300_I2C_MST_CTRL1_RWOP : 0; + mask = RTL9300_I2C_MST_CTRL1_RWOP; + + val |= RTL9300_I2C_MST_CTRL1_I2C_TRIG; + mask |= RTL9300_I2C_MST_CTRL1_I2C_TRIG; + + ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, + val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 2000); + if (ret) + return ret; + + if (val & RTL9300_I2C_MST_CTRL1_I2C_FAIL) + return -EIO; + + if (read_write == I2C_SMBUS_READ) { + if (size == I2C_SMBUS_BYTE || size == I2C_SMBUS_BYTE_DATA) { + ret = regmap_read(i2c->regmap, + i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val); + if (ret) + return ret; + data->byte = val & 0xff; + } else if (size == I2C_SMBUS_WORD_DATA) { + ret = regmap_read(i2c->regmap, + i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val); + if (ret) + return ret; + data->word = val & 0xffff; + } else { + ret = rtl9300_i2c_read(i2c, &data->block[0], len); + if (ret) + return ret; + } + } + + return 0; +} + +static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, + char read_write, u8 command, int size, + union i2c_smbus_data *data) +{ + struct rtl9300_i2c_chan *chan = i2c_get_adapdata(adap); + struct rtl9300_i2c *i2c = chan->i2c; + int len = 0, ret; + + mutex_lock(&i2c->lock); + if (chan->sda_pin != i2c->sda_pin) { + ret = rtl9300_i2c_config_io(i2c, chan->sda_pin); + if (ret) + goto out_unlock; + i2c->sda_pin = chan->sda_pin; + } + + switch (size) { + case I2C_SMBUS_QUICK: + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0); + if (ret) + goto out_unlock; + break; + + case I2C_SMBUS_BYTE: + if (read_write == I2C_SMBUS_WRITE) { + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); + if (ret) + goto out_unlock; + } else { + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0); + if (ret) + goto out_unlock; + } + break; + + case I2C_SMBUS_BYTE_DATA: + ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1); + if (ret) + goto out_unlock; + if (read_write == I2C_SMBUS_WRITE) { + ret = rtl9300_i2c_writel(i2c, data->byte); + if (ret) + goto out_unlock; + } + break; + + case I2C_SMBUS_WORD_DATA: + ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 2); + if (ret) + goto out_unlock; + if (read_write == I2C_SMBUS_WRITE) { + ret = rtl9300_i2c_writel(i2c, data->word); + if (ret) + goto out_unlock; + } + break; + + case I2C_SMBUS_BLOCK_DATA: + ret = rtl9300_i2c_reg_addr_set(i2c, command, 1); + if (ret) + goto out_unlock; + ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0]); + if (ret) + goto out_unlock; + if (read_write == I2C_SMBUS_WRITE) { + ret = rtl9300_i2c_write(i2c, &data->block[1], data->block[0]); + if (ret) + goto out_unlock; + } + len = data->block[0]; + break; + + default: + dev_err(&adap->dev, "Unsupported transaction %d\n", size); + ret = -EOPNOTSUPP; + goto out_unlock; + } + + ret = rtl9300_i2c_execute_xfer(i2c, read_write, size, data, len); + +out_unlock: + mutex_unlock(&i2c->lock); + + return ret; +} + +static u32 rtl9300_i2c_func(struct i2c_adapter *a) +{ + return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA; +} + +static const struct i2c_algorithm rtl9300_i2c_algo = { + .smbus_xfer = rtl9300_i2c_smbus_xfer, + .functionality = rtl9300_i2c_func, +}; + +static struct i2c_adapter_quirks rtl9300_i2c_quirks = { + .flags = I2C_AQ_NO_CLK_STRETCH, + .max_read_len = 16, + .max_write_len = 16, +}; + +static int rtl9300_i2c_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rtl9300_i2c *i2c; + u32 clock_freq, sda_pin; + int ret, i = 0; + struct fwnode_handle *child; + + i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + i2c->regmap = syscon_node_to_regmap(dev->parent->of_node); + if (IS_ERR(i2c->regmap)) + return PTR_ERR(i2c->regmap); + i2c->dev = dev; + + mutex_init(&i2c->lock); + + ret = device_property_read_u32(dev, "reg", &i2c->reg_base); + if (ret) + return ret; + + platform_set_drvdata(pdev, i2c); + + if (device_get_child_node_count(dev) >= RTL9300_I2C_MUX_NCHAN) + return dev_err_probe(dev, -EINVAL, "Too many channels\n"); + + device_for_each_child_node(dev, child) { + struct rtl9300_i2c_chan *chan = &i2c->chans[i]; + struct i2c_adapter *adap = &chan->adap; + + ret = fwnode_property_read_u32(child, "reg", &sda_pin); + if (ret) + return ret; + + ret = fwnode_property_read_u32(child, "clock-frequency", &clock_freq); + if (ret) + clock_freq = I2C_MAX_STANDARD_MODE_FREQ; + + switch (clock_freq) { + case I2C_MAX_STANDARD_MODE_FREQ: + chan->bus_freq = RTL9300_I2C_STD_FREQ; + break; + + case I2C_MAX_FAST_MODE_FREQ: + chan->bus_freq = RTL9300_I2C_FAST_FREQ; + break; + default: + dev_warn(i2c->dev, "SDA%d clock-frequency %d not supported using default\n", + sda_pin, clock_freq); + break; + } + + chan->sda_pin = sda_pin; + chan->i2c = i2c; + adap = &i2c->chans[i].adap; + adap->owner = THIS_MODULE; + adap->algo = &rtl9300_i2c_algo; + adap->quirks = &rtl9300_i2c_quirks; + adap->retries = 3; + adap->dev.parent = dev; + i2c_set_adapdata(adap, chan); + adap->dev.of_node = to_of_node(child); + snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_pin); + i++; + + ret = devm_i2c_add_adapter(dev, adap); + if (ret) + return ret; + } + i2c->sda_pin = 0xff; + + return 0; +} + +static const struct of_device_id i2c_rtl9300_dt_ids[] = { + { .compatible = "realtek,rtl9301-i2c" }, + { .compatible = "realtek,rtl9302b-i2c" }, + { .compatible = "realtek,rtl9302c-i2c" }, + { .compatible = "realtek,rtl9303-i2c" }, + {} +}; +MODULE_DEVICE_TABLE(of, i2c_rtl9300_dt_ids); + +static struct platform_driver rtl9300_i2c_driver = { + .probe = rtl9300_i2c_probe, + .driver = { + .name = "i2c-rtl9300", + .of_match_table = i2c_rtl9300_dt_ids, + }, +}; + +module_platform_driver(rtl9300_i2c_driver); + +MODULE_DESCRIPTION("RTL9300 I2C controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c index 8380a68538ab..02b76e24a476 100644 --- a/drivers/i2c/busses/i2c-rzv2m.c +++ b/drivers/i2c/busses/i2c-rzv2m.c @@ -536,7 +536,7 @@ static struct platform_driver rzv2m_i2c_driver = { .pm = pm_sleep_ptr(&rzv2m_i2c_pm_ops), }, .probe = rzv2m_i2c_probe, - .remove_new = rzv2m_i2c_remove, + .remove = rzv2m_i2c_remove, }; module_platform_driver(rzv2m_i2c_driver); diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 7698d9d59744..0f3cf500df68 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1176,7 +1176,7 @@ static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { static struct platform_driver s3c24xx_i2c_driver = { .probe = s3c24xx_i2c_probe, - .remove_new = s3c24xx_i2c_remove, + .remove = s3c24xx_i2c_remove, .id_table = s3c24xx_driver_ids, .driver = { .name = "s3c-i2c", diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index d7af8e0d7599..10a5146b3aa5 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c @@ -411,7 +411,7 @@ static void smbus_cmi_remove(struct platform_device *device) static struct platform_driver smbus_cmi_driver = { .probe = smbus_cmi_probe, - .remove_new = smbus_cmi_remove, + .remove = smbus_cmi_remove, .driver = { .name = "smbus_cmi", .acpi_match_table = acpi_smbus_cmi_ids, diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c index 8a043f5fca1e..620f12596763 100644 --- a/drivers/i2c/busses/i2c-sh7760.c +++ b/drivers/i2c/busses/i2c-sh7760.c @@ -552,7 +552,7 @@ static struct platform_driver sh7760_i2c_drv = { .name = SH7760_I2C_DEVNAME, }, .probe = sh7760_i2c_probe, - .remove_new = sh7760_i2c_remove, + .remove = sh7760_i2c_remove, }; module_platform_driver(sh7760_i2c_drv); diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index f86c29737df1..efe29621b8d7 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -983,7 +983,7 @@ static struct platform_driver sh_mobile_i2c_driver = { .pm = pm_sleep_ptr(&sh_mobile_i2c_pm_ops), }, .probe = sh_mobile_i2c_probe, - .remove_new = sh_mobile_i2c_remove, + .remove = sh_mobile_i2c_remove, }; static int __init sh_mobile_i2c_adap_init(void) diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c index 18516bc64e04..d90606048611 100644 --- a/drivers/i2c/busses/i2c-simtec.c +++ b/drivers/i2c/busses/i2c-simtec.c @@ -144,7 +144,7 @@ static struct platform_driver simtec_i2c_driver = { .name = "simtec-i2c", }, .probe = simtec_i2c_probe, - .remove_new = simtec_i2c_remove, + .remove = simtec_i2c_remove, }; module_platform_driver(simtec_i2c_driver); diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 9c45e75b9187..56b2e5c5fb49 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -643,7 +643,7 @@ MODULE_DEVICE_TABLE(of, sprd_i2c_of_match); static struct platform_driver sprd_i2c_driver = { .probe = sprd_i2c_probe, - .remove_new = sprd_i2c_remove, + .remove = sprd_i2c_remove, .driver = { .name = "sprd-i2c", .of_match_table = sprd_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index 05b19ede65a0..750fff3d2389 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -893,7 +893,7 @@ static struct platform_driver st_i2c_driver = { .pm = pm_sleep_ptr(&st_i2c_pm), }, .probe = st_i2c_probe, - .remove_new = st_i2c_remove, + .remove = st_i2c_remove, }; module_platform_driver(st_i2c_driver); diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index 230fff0c0bf9..b3d56d0aa9d0 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -869,7 +869,7 @@ static struct platform_driver stm32f4_i2c_driver = { .of_match_table = stm32f4_i2c_match, }, .probe = stm32f4_i2c_probe, - .remove_new = stm32f4_i2c_remove, + .remove = stm32f4_i2c_remove, }; module_platform_driver(stm32f4_i2c_driver); diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 0174ead99de6..973a3a8c6d4a 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -2532,7 +2532,7 @@ static struct platform_driver stm32f7_i2c_driver = { .pm = &stm32f7_i2c_pm_ops, }, .probe = stm32f7_i2c_probe, - .remove_new = stm32f7_i2c_remove, + .remove = stm32f7_i2c_remove, }; module_platform_driver(stm32f7_i2c_driver); diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c index 074eade6c4a4..fb5280b8cf7f 100644 --- a/drivers/i2c/busses/i2c-sun6i-p2wi.c +++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c @@ -319,7 +319,7 @@ static void p2wi_remove(struct platform_device *dev) static struct platform_driver p2wi_driver = { .probe = p2wi_probe, - .remove_new = p2wi_remove, + .remove = p2wi_remove, .driver = { .name = "i2c-sunxi-p2wi", .of_match_table = p2wi_of_match_table, diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index bbb9062669e4..31f8d08e32a4 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -629,7 +629,7 @@ MODULE_DEVICE_TABLE(acpi, synquacer_i2c_acpi_ids); static struct platform_driver synquacer_i2c_driver = { .probe = synquacer_i2c_probe, - .remove_new = synquacer_i2c_remove, + .remove = synquacer_i2c_remove, .driver = { .name = "synquacer_i2c", .of_match_table = of_match_ptr(synquacer_i2c_dt_ids), diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c index dabadbcc6d6a..bb0de6db6391 100644 --- a/drivers/i2c/busses/i2c-tegra-bpmp.c +++ b/drivers/i2c/busses/i2c-tegra-bpmp.c @@ -335,7 +335,7 @@ static struct platform_driver tegra_bpmp_i2c_driver = { .of_match_table = tegra_bpmp_i2c_of_match, }, .probe = tegra_bpmp_i2c_probe, - .remove_new = tegra_bpmp_i2c_remove, + .remove = tegra_bpmp_i2c_remove, }; module_platform_driver(tegra_bpmp_i2c_driver); diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 1df5b4204142..87976e99e6d0 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1964,7 +1964,7 @@ MODULE_DEVICE_TABLE(acpi, tegra_i2c_acpi_match); static struct platform_driver tegra_i2c_driver = { .probe = tegra_i2c_probe, - .remove_new = tegra_i2c_remove, + .remove = tegra_i2c_remove, .driver = { .name = "tegra-i2c", .of_match_table = tegra_i2c_of_match, diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index 10a99cd08972..d877f5a1f579 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c @@ -615,7 +615,7 @@ MODULE_DEVICE_TABLE(of, uniphier_fi2c_match); static struct platform_driver uniphier_fi2c_drv = { .probe = uniphier_fi2c_probe, - .remove_new = uniphier_fi2c_remove, + .remove = uniphier_fi2c_remove, .driver = { .name = "uniphier-fi2c", .of_match_table = uniphier_fi2c_match, diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index ef5753307469..b95d50d4d7db 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c @@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(of, uniphier_i2c_match); static struct platform_driver uniphier_i2c_drv = { .probe = uniphier_i2c_probe, - .remove_new = uniphier_i2c_remove, + .remove = uniphier_i2c_remove, .driver = { .name = "uniphier-i2c", .of_match_table = uniphier_i2c_match, diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c index 76abfa77e200..a1ab6ef6f071 100644 --- a/drivers/i2c/busses/i2c-versatile.c +++ b/drivers/i2c/busses/i2c-versatile.c @@ -109,7 +109,7 @@ MODULE_DEVICE_TABLE(of, i2c_versatile_match); static struct platform_driver i2c_versatile_driver = { .probe = i2c_versatile_probe, - .remove_new = i2c_versatile_remove, + .remove = i2c_versatile_remove, .driver = { .name = "versatile-i2c", .of_match_table = i2c_versatile_match, diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c index 3415683dab91..4eb740faf268 100644 --- a/drivers/i2c/busses/i2c-viai2c-wmt.c +++ b/drivers/i2c/busses/i2c-viai2c-wmt.c @@ -169,7 +169,7 @@ static const struct of_device_id wmt_i2c_dt_ids[] = { static struct platform_driver wmt_i2c_driver = { .probe = wmt_i2c_probe, - .remove_new = wmt_i2c_remove, + .remove = wmt_i2c_remove, .driver = { .name = "wmt-i2c", .of_match_table = wmt_i2c_dt_ids, diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c index 2ed4130c0339..503e2f4d6f84 100644 --- a/drivers/i2c/busses/i2c-viperboard.c +++ b/drivers/i2c/busses/i2c-viperboard.c @@ -415,7 +415,7 @@ static void vprbrd_i2c_remove(struct platform_device *pdev) static struct platform_driver vprbrd_i2c_driver = { .driver.name = "viperboard-i2c", .probe = vprbrd_i2c_probe, - .remove_new = vprbrd_i2c_remove, + .remove = vprbrd_i2c_remove, }; static int __init vprbrd_i2c_init(void) diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index 658396c9eeab..663fe5604dd6 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -581,7 +581,7 @@ MODULE_DEVICE_TABLE(acpi, xgene_slimpro_i2c_acpi_ids); static struct platform_driver xgene_slimpro_i2c_driver = { .probe = xgene_slimpro_i2c_probe, - .remove_new = xgene_slimpro_i2c_remove, + .remove = xgene_slimpro_i2c_remove, .driver = { .name = "xgene-slimpro-i2c", .of_match_table = of_match_ptr(xgene_slimpro_i2c_dt_ids), diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 1d68177241a6..c4d3eb02da09 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -1395,7 +1395,7 @@ static const struct dev_pm_ops xiic_dev_pm_ops = { static struct platform_driver xiic_i2c_driver = { .probe = xiic_i2c_probe, - .remove_new = xiic_i2c_remove, + .remove = xiic_i2c_remove, .driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(xiic_of_match), diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 08a59a920929..4d5e49b6321b 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -579,7 +579,7 @@ MODULE_DEVICE_TABLE(acpi, xlp9xx_i2c_acpi_ids); static struct platform_driver xlp9xx_i2c_driver = { .probe = xlp9xx_i2c_probe, - .remove_new = xlp9xx_i2c_remove, + .remove = xlp9xx_i2c_remove, .driver = { .name = "xlp9xx-i2c", .acpi_match_table = ACPI_PTR(xlp9xx_i2c_acpi_ids), diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index 3648382b885a..4d6abd7e92ce 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -536,7 +536,7 @@ static struct platform_driver scx200_pci_driver = { .name = "cs5535-smb", }, .probe = scx200_probe, - .remove_new = scx200_remove, + .remove = scx200_remove, }; static const struct pci_device_id scx200_isa[] = { diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c index 7aa6e795d833..d6ef91b888c6 100644 --- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c +++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c @@ -190,7 +190,7 @@ MODULE_DEVICE_TABLE(of, i2c_arbitrator_of_match); static struct platform_driver i2c_arbitrator_driver = { .probe = i2c_arbitrator_probe, - .remove_new = i2c_arbitrator_remove, + .remove = i2c_arbitrator_remove, .driver = { .name = "i2c-arb-gpio-challenge", .of_match_table = i2c_arbitrator_of_match, diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 7e2686b606c0..dce18f763a09 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -314,7 +314,7 @@ static struct platform_driver i2c_demux_pinctrl_driver = { .of_match_table = i2c_demux_pinctrl_of_match, }, .probe = i2c_demux_pinctrl_probe, - .remove_new = i2c_demux_pinctrl_remove, + .remove = i2c_demux_pinctrl_remove, }; module_platform_driver(i2c_demux_pinctrl_driver); diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c index 944577bb09c1..9b46b84e84fb 100644 --- a/drivers/i2c/muxes/i2c-mux-gpio.c +++ b/drivers/i2c/muxes/i2c-mux-gpio.c @@ -247,7 +247,7 @@ MODULE_DEVICE_TABLE(of, i2c_mux_gpio_of_match); static struct platform_driver i2c_mux_gpio_driver = { .probe = i2c_mux_gpio_probe, - .remove_new = i2c_mux_gpio_remove, + .remove = i2c_mux_gpio_remove, .driver = { .name = "i2c-mux-gpio", .of_match_table = i2c_mux_gpio_of_match, diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c index 10d63307b14d..ab8e11661052 100644 --- a/drivers/i2c/muxes/i2c-mux-gpmux.c +++ b/drivers/i2c/muxes/i2c-mux-gpmux.c @@ -152,7 +152,7 @@ static void i2c_mux_remove(struct platform_device *pdev) static struct platform_driver i2c_mux_driver = { .probe = i2c_mux_probe, - .remove_new = i2c_mux_remove, + .remove = i2c_mux_remove, .driver = { .name = "i2c-mux-gpmux", .of_match_table = i2c_mux_of_match, diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c index 3f06aa3331a7..1c2debcf379c 100644 --- a/drivers/i2c/muxes/i2c-mux-mlxcpld.c +++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c @@ -182,7 +182,7 @@ static struct platform_driver mlxcpld_mux_driver = { .name = "i2c-mux-mlxcpld", }, .probe = mlxcpld_mux_probe, - .remove_new = mlxcpld_mux_remove, + .remove = mlxcpld_mux_remove, }; module_platform_driver(mlxcpld_mux_driver); diff --git a/drivers/i2c/muxes/i2c-mux-mule.c b/drivers/i2c/muxes/i2c-mux-mule.c index 8e942470b35f..284ff4afeeac 100644 --- a/drivers/i2c/muxes/i2c-mux-mule.c +++ b/drivers/i2c/muxes/i2c-mux-mule.c @@ -66,8 +66,8 @@ static int mule_i2c_mux_probe(struct platform_device *pdev) priv = i2c_mux_priv(muxc); priv->regmap = dev_get_regmap(mux_dev->parent, NULL); - if (IS_ERR(priv->regmap)) - return dev_err_probe(mux_dev, PTR_ERR(priv->regmap), + if (!priv->regmap) + return dev_err_probe(mux_dev, -ENODEV, "No parent i2c register map\n"); platform_set_drvdata(pdev, muxc); diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 02aaf0781e9c..fc686a350ae8 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -186,7 +186,7 @@ static struct platform_driver i2c_mux_pinctrl_driver = { .of_match_table = i2c_mux_pinctrl_of_match, }, .probe = i2c_mux_pinctrl_probe, - .remove_new = i2c_mux_pinctrl_remove, + .remove = i2c_mux_pinctrl_remove, }; module_platform_driver(i2c_mux_pinctrl_driver); diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index ef765fcd33f5..dfa472d514cc 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c @@ -247,7 +247,7 @@ MODULE_DEVICE_TABLE(of, i2c_mux_reg_of_match); static struct platform_driver i2c_mux_reg_driver = { .probe = i2c_mux_reg_probe, - .remove_new = i2c_mux_reg_remove, + .remove = i2c_mux_reg_remove, .driver = { .name = "i2c-mux-reg", .of_match_table = of_match_ptr(i2c_mux_reg_of_match), diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index 516c1a8e4d56..8c3f7cf55d5f 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -447,6 +447,8 @@ config IIO_ST_ACCEL_SPI_3AXIS config IIO_KX022A tristate + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER config IIO_KX022A_SPI tristate "Kionix KX022A tri-axis digital accelerometer SPI interface" diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c index f4e43c3bbf1a..e4fe36768216 100644 --- a/drivers/iio/accel/bma400_core.c +++ b/drivers/iio/accel/bma400_core.c @@ -1218,7 +1218,8 @@ static int bma400_activity_event_en(struct bma400_data *data, static int bma400_tap_event_en(struct bma400_data *data, enum iio_event_direction dir, int state) { - unsigned int mask, field_value; + unsigned int mask; + unsigned int field_value = 0; int ret; /* diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 97ece1a4b7e3..6c4e74420fd2 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -52,6 +52,8 @@ config AD4695 tristate "Analog Device AD4695 ADC Driver" depends on SPI select REGMAP_SPI + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for Analog Devices AD4695 and similar analog to digital converters (ADC). @@ -328,6 +330,8 @@ config AD7923 config AD7944 tristate "Analog Devices AD7944 and similar ADCs driver" depends on SPI + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for Analog Devices AD7944, AD7985, AD7986 ADCs. @@ -1481,6 +1485,8 @@ config TI_ADS8344 config TI_ADS8688 tristate "Texas Instruments ADS8688" depends on SPI + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help If you say yes here you get support for Texas Instruments ADS8684 and and ADS8688 ADC chips @@ -1491,6 +1497,8 @@ config TI_ADS8688 config TI_ADS124S08 tristate "Texas Instruments ADS124S08" depends on SPI + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help If you say yes here you get support for Texas Instruments ADS124S08 and ADS124S06 ADC chips @@ -1525,6 +1533,9 @@ config TI_AM335X_ADC config TI_LMP92064 tristate "Texas Instruments LMP92064 ADC driver" depends on SPI + select REGMAP_SPI + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for the LMP92064 Precision Current and Voltage sensor. diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index a5d91933f505..b79c48d46ccc 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -637,7 +637,7 @@ static int ad7124_write_raw(struct iio_dev *indio_dev, switch (info) { case IIO_CHAN_INFO_SAMP_FREQ: - if (val2 != 0) { + if (val2 != 0 || val == 0) { ret = -EINVAL; break; } diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c index e8bddfb0d07d..fb728570debe 100644 --- a/drivers/iio/adc/ad7380.c +++ b/drivers/iio/adc/ad7380.c @@ -75,6 +75,7 @@ #define T_CONVERT_NS 190 /* conversion time */ #define T_CONVERT_0_NS 10 /* 1st conversion start time (oversampling) */ #define T_CONVERT_X_NS 500 /* xth conversion start time (oversampling) */ +#define T_POWERUP_US 5000 /* Power up */ struct ad7380_timing_specs { const unsigned int t_csh_ns; /* CS minimum high time */ @@ -86,6 +87,9 @@ struct ad7380_chip_info { unsigned int num_channels; unsigned int num_simult_channels; bool has_mux; + const char * const *supplies; + unsigned int num_supplies; + bool external_ref_only; const char * const *vcm_supplies; unsigned int num_vcm_supplies; const unsigned long *available_scan_masks; @@ -243,6 +247,10 @@ DEFINE_AD7380_8_CHANNEL(ad7386_4_channels, 16, 0, u); DEFINE_AD7380_8_CHANNEL(ad7387_4_channels, 14, 0, u); DEFINE_AD7380_8_CHANNEL(ad7388_4_channels, 12, 0, u); +static const char * const ad7380_supplies[] = { + "vcc", "vlogic", +}; + static const char * const ad7380_2_channel_vcm_supplies[] = { "aina", "ainb", }; @@ -338,6 +346,8 @@ static const struct ad7380_chip_info ad7380_chip_info = { .channels = ad7380_channels, .num_channels = ARRAY_SIZE(ad7380_channels), .num_simult_channels = 2, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .available_scan_masks = ad7380_2_channel_scan_masks, .timing_specs = &ad7380_timing, }; @@ -347,6 +357,8 @@ static const struct ad7380_chip_info ad7381_chip_info = { .channels = ad7381_channels, .num_channels = ARRAY_SIZE(ad7381_channels), .num_simult_channels = 2, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .available_scan_masks = ad7380_2_channel_scan_masks, .timing_specs = &ad7380_timing, }; @@ -356,6 +368,8 @@ static const struct ad7380_chip_info ad7383_chip_info = { .channels = ad7383_channels, .num_channels = ARRAY_SIZE(ad7383_channels), .num_simult_channels = 2, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .vcm_supplies = ad7380_2_channel_vcm_supplies, .num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies), .available_scan_masks = ad7380_2_channel_scan_masks, @@ -367,6 +381,8 @@ static const struct ad7380_chip_info ad7384_chip_info = { .channels = ad7384_channels, .num_channels = ARRAY_SIZE(ad7384_channels), .num_simult_channels = 2, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .vcm_supplies = ad7380_2_channel_vcm_supplies, .num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies), .available_scan_masks = ad7380_2_channel_scan_masks, @@ -378,6 +394,8 @@ static const struct ad7380_chip_info ad7386_chip_info = { .channels = ad7386_channels, .num_channels = ARRAY_SIZE(ad7386_channels), .num_simult_channels = 2, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .has_mux = true, .available_scan_masks = ad7380_2x2_channel_scan_masks, .timing_specs = &ad7380_timing, @@ -388,6 +406,8 @@ static const struct ad7380_chip_info ad7387_chip_info = { .channels = ad7387_channels, .num_channels = ARRAY_SIZE(ad7387_channels), .num_simult_channels = 2, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .has_mux = true, .available_scan_masks = ad7380_2x2_channel_scan_masks, .timing_specs = &ad7380_timing, @@ -398,6 +418,8 @@ static const struct ad7380_chip_info ad7388_chip_info = { .channels = ad7388_channels, .num_channels = ARRAY_SIZE(ad7388_channels), .num_simult_channels = 2, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .has_mux = true, .available_scan_masks = ad7380_2x2_channel_scan_masks, .timing_specs = &ad7380_timing, @@ -408,6 +430,9 @@ static const struct ad7380_chip_info ad7380_4_chip_info = { .channels = ad7380_4_channels, .num_channels = ARRAY_SIZE(ad7380_4_channels), .num_simult_channels = 4, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), + .external_ref_only = true, .available_scan_masks = ad7380_4_channel_scan_masks, .timing_specs = &ad7380_4_timing, }; @@ -417,6 +442,8 @@ static const struct ad7380_chip_info ad7381_4_chip_info = { .channels = ad7381_4_channels, .num_channels = ARRAY_SIZE(ad7381_4_channels), .num_simult_channels = 4, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .available_scan_masks = ad7380_4_channel_scan_masks, .timing_specs = &ad7380_4_timing, }; @@ -426,6 +453,8 @@ static const struct ad7380_chip_info ad7383_4_chip_info = { .channels = ad7383_4_channels, .num_channels = ARRAY_SIZE(ad7383_4_channels), .num_simult_channels = 4, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .vcm_supplies = ad7380_4_channel_vcm_supplies, .num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies), .available_scan_masks = ad7380_4_channel_scan_masks, @@ -437,6 +466,8 @@ static const struct ad7380_chip_info ad7384_4_chip_info = { .channels = ad7384_4_channels, .num_channels = ARRAY_SIZE(ad7384_4_channels), .num_simult_channels = 4, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .vcm_supplies = ad7380_4_channel_vcm_supplies, .num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies), .available_scan_masks = ad7380_4_channel_scan_masks, @@ -448,6 +479,8 @@ static const struct ad7380_chip_info ad7386_4_chip_info = { .channels = ad7386_4_channels, .num_channels = ARRAY_SIZE(ad7386_4_channels), .num_simult_channels = 4, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .has_mux = true, .available_scan_masks = ad7380_2x4_channel_scan_masks, .timing_specs = &ad7380_4_timing, @@ -458,6 +491,8 @@ static const struct ad7380_chip_info ad7387_4_chip_info = { .channels = ad7387_4_channels, .num_channels = ARRAY_SIZE(ad7387_4_channels), .num_simult_channels = 4, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .has_mux = true, .available_scan_masks = ad7380_2x4_channel_scan_masks, .timing_specs = &ad7380_4_timing, @@ -468,6 +503,8 @@ static const struct ad7380_chip_info ad7388_4_chip_info = { .channels = ad7388_4_channels, .num_channels = ARRAY_SIZE(ad7388_4_channels), .num_simult_channels = 4, + .supplies = ad7380_supplies, + .num_supplies = ARRAY_SIZE(ad7380_supplies), .has_mux = true, .available_scan_masks = ad7380_2x4_channel_scan_masks, .timing_specs = &ad7380_4_timing, @@ -956,7 +993,7 @@ static const struct iio_info ad7380_info = { .debugfs_reg_access = &ad7380_debugfs_reg_access, }; -static int ad7380_init(struct ad7380_state *st, struct regulator *vref) +static int ad7380_init(struct ad7380_state *st, bool external_ref_en) { int ret; @@ -968,13 +1005,13 @@ static int ad7380_init(struct ad7380_state *st, struct regulator *vref) if (ret < 0) return ret; - /* select internal or external reference voltage */ - ret = regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG1, - AD7380_CONFIG1_REFSEL, - FIELD_PREP(AD7380_CONFIG1_REFSEL, - vref ? 1 : 0)); - if (ret < 0) - return ret; + if (external_ref_en) { + /* select external reference voltage */ + ret = regmap_set_bits(st->regmap, AD7380_REG_ADDR_CONFIG1, + AD7380_CONFIG1_REFSEL); + if (ret < 0) + return ret; + } /* This is the default value after reset. */ st->oversampling_ratio = 1; @@ -987,16 +1024,11 @@ static int ad7380_init(struct ad7380_state *st, struct regulator *vref) FIELD_PREP(AD7380_CONFIG2_SDO, 1)); } -static void ad7380_regulator_disable(void *p) -{ - regulator_disable(p); -} - static int ad7380_probe(struct spi_device *spi) { struct iio_dev *indio_dev; struct ad7380_state *st; - struct regulator *vref; + bool external_ref_en; int ret, i; indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); @@ -1009,36 +1041,38 @@ static int ad7380_probe(struct spi_device *spi) if (!st->chip_info) return dev_err_probe(&spi->dev, -EINVAL, "missing match data\n"); - vref = devm_regulator_get_optional(&spi->dev, "refio"); - if (IS_ERR(vref)) { - if (PTR_ERR(vref) != -ENODEV) - return dev_err_probe(&spi->dev, PTR_ERR(vref), - "Failed to get refio regulator\n"); + ret = devm_regulator_bulk_get_enable(&spi->dev, st->chip_info->num_supplies, + st->chip_info->supplies); - vref = NULL; - } + if (ret) + return dev_err_probe(&spi->dev, ret, + "Failed to enable power supplies\n"); + fsleep(T_POWERUP_US); - /* - * If there is no REFIO supply, then it means that we are using - * the internal 2.5V reference, otherwise REFIO is reference voltage. - */ - if (vref) { - ret = regulator_enable(vref); - if (ret) - return ret; - - ret = devm_add_action_or_reset(&spi->dev, - ad7380_regulator_disable, vref); - if (ret) - return ret; - - ret = regulator_get_voltage(vref); + if (st->chip_info->external_ref_only) { + ret = devm_regulator_get_enable_read_voltage(&spi->dev, + "refin"); if (ret < 0) - return ret; + return dev_err_probe(&spi->dev, ret, + "Failed to get refin regulator\n"); st->vref_mv = ret / 1000; + + /* these chips don't have a register bit for this */ + external_ref_en = false; } else { - st->vref_mv = AD7380_INTERNAL_REF_MV; + /* + * If there is no REFIO supply, then it means that we are using + * the internal reference, otherwise REFIO is reference voltage. + */ + ret = devm_regulator_get_enable_read_voltage(&spi->dev, + "refio"); + if (ret < 0 && ret != -ENODEV) + return dev_err_probe(&spi->dev, ret, + "Failed to get refio regulator\n"); + + external_ref_en = ret != -ENODEV; + st->vref_mv = external_ref_en ? ret / 1000 : AD7380_INTERNAL_REF_MV; } if (st->chip_info->num_vcm_supplies > ARRAY_SIZE(st->vcm_mv)) @@ -1050,27 +1084,13 @@ static int ad7380_probe(struct spi_device *spi) * input pin. */ for (i = 0; i < st->chip_info->num_vcm_supplies; i++) { - struct regulator *vcm; + const char *vcm = st->chip_info->vcm_supplies[i]; - vcm = devm_regulator_get(&spi->dev, - st->chip_info->vcm_supplies[i]); - if (IS_ERR(vcm)) - return dev_err_probe(&spi->dev, PTR_ERR(vcm), - "Failed to get %s regulator\n", - st->chip_info->vcm_supplies[i]); - - ret = regulator_enable(vcm); - if (ret) - return ret; - - ret = devm_add_action_or_reset(&spi->dev, - ad7380_regulator_disable, vcm); - if (ret) - return ret; - - ret = regulator_get_voltage(vcm); + ret = devm_regulator_get_enable_read_voltage(&spi->dev, vcm); if (ret < 0) - return ret; + return dev_err_probe(&spi->dev, ret, + "Failed to get %s regulator\n", + vcm); st->vcm_mv[i] = ret / 1000; } @@ -1135,7 +1155,7 @@ static int ad7380_probe(struct spi_device *spi) if (ret) return ret; - ret = ad7380_init(st, vref); + ret = ad7380_init(st, external_ref_en); if (ret) return ret; diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig index b54fe01734b0..55eb16b32f6c 100644 --- a/drivers/iio/amplifiers/Kconfig +++ b/drivers/iio/amplifiers/Kconfig @@ -27,6 +27,7 @@ config AD8366 config ADA4250 tristate "Analog Devices ADA4250 Instrumentation Amplifier" depends on SPI + select REGMAP_SPI help Say yes here to build support for Analog Devices ADA4250 SPI Amplifier's support. The driver provides direct access via diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index 678a6adb9a75..6c87223f58d9 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -80,6 +80,8 @@ config ENS160 tristate "ScioSense ENS160 sensor driver" depends on (I2C || SPI) select REGMAP + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER select ENS160_I2C if I2C select ENS160_SPI if SPI help diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index ad8910e6ad59..abb09fefc792 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -32,7 +32,7 @@ static ssize_t _hid_sensor_set_report_latency(struct device *dev, latency = integer * 1000 + fract / 1000; ret = hid_sensor_set_report_latency(attrb, latency); if (ret < 0) - return len; + return ret; attrb->latency_ms = hid_sensor_get_report_latency(attrb); diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index 1cfd7e2a622f..9f5d5ebb8653 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -9,6 +9,8 @@ menu "Digital to analog converters" config AD3552R tristate "Analog Devices AD3552R DAC driver" depends on SPI_MASTER + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for Analog Devices AD3552R Digital to Analog Converter. @@ -252,6 +254,8 @@ config AD5764 config AD5766 tristate "Analog Devices AD5766/AD5767 DAC driver" depends on SPI_MASTER + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for Analog Devices AD5766, AD5767 Digital to Analog Converter. @@ -262,6 +266,7 @@ config AD5766 config AD5770R tristate "Analog Devices AD5770R IDAC driver" depends on SPI_MASTER + select REGMAP_SPI help Say yes here to build support for Analog Devices AD5770R Digital to Analog Converter. @@ -353,6 +358,7 @@ config LPC18XX_DAC config LTC1660 tristate "Linear Technology LTC1660/LTC1665 DAC SPI driver" depends on SPI + select REGMAP_SPI help Say yes here to build support for Linear Technology LTC1660 and LTC1665 Digital to Analog Converters. @@ -374,7 +380,7 @@ config LTC2632 config LTC2664 tristate "Analog Devices LTC2664 and LTC2672 DAC SPI driver" depends on SPI - select REGMAP + select REGMAP_SPI help Say yes here to build support for Analog Devices LTC2664 and LTC2672 converters (DAC). @@ -483,6 +489,7 @@ config STM32_DAC config STM32_DAC_CORE tristate + select REGMAP_MMIO config TI_DAC082S085 tristate "Texas Instruments 8/10/12-bit 2/4-channel DAC driver" diff --git a/drivers/iio/dac/ltc2664.c b/drivers/iio/dac/ltc2664.c index 5be5345ac5c8..67f14046cf77 100644 --- a/drivers/iio/dac/ltc2664.c +++ b/drivers/iio/dac/ltc2664.c @@ -516,7 +516,7 @@ static int ltc2664_channel_config(struct ltc2664_state *st) const struct ltc2664_chip_info *chip_info = st->chip_info; struct device *dev = &st->spi->dev; u32 reg, tmp[2], mspan; - int ret, span = 0; + int ret; mspan = LTC2664_MSPAN_SOFTSPAN; ret = device_property_read_u32(dev, "adi,manual-span-operation-config", @@ -579,20 +579,21 @@ static int ltc2664_channel_config(struct ltc2664_state *st) ret = fwnode_property_read_u32_array(child, "output-range-microvolt", tmp, ARRAY_SIZE(tmp)); if (!ret && mspan == LTC2664_MSPAN_SOFTSPAN) { - chan->span = ltc2664_set_span(st, tmp[0] / 1000, - tmp[1] / 1000, reg); - if (span < 0) - return dev_err_probe(dev, span, + ret = ltc2664_set_span(st, tmp[0] / 1000, tmp[1] / 1000, reg); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to set span\n"); + chan->span = ret; } ret = fwnode_property_read_u32_array(child, "output-range-microamp", tmp, ARRAY_SIZE(tmp)); if (!ret) { - chan->span = ltc2664_set_span(st, 0, tmp[1] / 1000, reg); - if (span < 0) - return dev_err_probe(dev, span, + ret = ltc2664_set_span(st, 0, tmp[1] / 1000, reg); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to set span\n"); + chan->span = ret; } } diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig index c455be7d4a1c..583cbdf4e8cd 100644 --- a/drivers/iio/frequency/Kconfig +++ b/drivers/iio/frequency/Kconfig @@ -53,6 +53,7 @@ config ADF4371 config ADF4377 tristate "Analog Devices ADF4377 Microwave Wideband Synthesizer" depends on SPI && COMMON_CLK + select REGMAP_SPI help Say yes here to build support for Analog Devices ADF4377 Microwave Wideband Synthesizer. @@ -91,25 +92,26 @@ config ADMV1014 module will be called admv1014. config ADMV4420 - tristate "Analog Devices ADMV4420 K Band Downconverter" - depends on SPI - help - Say yes here to build support for Analog Devices K Band - Downconverter with integrated Fractional-N PLL and VCO. + tristate "Analog Devices ADMV4420 K Band Downconverter" + depends on SPI + select REGMAP_SPI + help + Say yes here to build support for Analog Devices K Band + Downconverter with integrated Fractional-N PLL and VCO. - To compile this driver as a module, choose M here: the - module will be called admv4420. + To compile this driver as a module, choose M here: the + module will be called admv4420. config ADRF6780 - tristate "Analog Devices ADRF6780 Microwave Upconverter" - depends on SPI - depends on COMMON_CLK - help - Say yes here to build support for Analog Devices ADRF6780 - 5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter. + tristate "Analog Devices ADRF6780 Microwave Upconverter" + depends on SPI + depends on COMMON_CLK + help + Say yes here to build support for Analog Devices ADRF6780 + 5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter. - To compile this driver as a module, choose M here: the - module will be called adrf6780. + To compile this driver as a module, choose M here: the + module will be called adrf6780. endmenu endmenu diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c index beda8d2de53f..e1f3b0d778be 100644 --- a/drivers/iio/imu/bmi323/bmi323_core.c +++ b/drivers/iio/imu/bmi323/bmi323_core.c @@ -2172,7 +2172,6 @@ int bmi323_core_probe(struct device *dev) } EXPORT_SYMBOL_NS_GPL(bmi323_core_probe, IIO_BMI323); -#if defined(CONFIG_PM) static int bmi323_core_runtime_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); @@ -2199,12 +2198,12 @@ static int bmi323_core_runtime_suspend(struct device *dev) } for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_ext_reg_savestate); i++) { - ret = bmi323_read_ext_reg(data, bmi323_reg_savestate[i], - &savestate->reg_settings[i]); + ret = bmi323_read_ext_reg(data, bmi323_ext_reg_savestate[i], + &savestate->ext_reg_settings[i]); if (ret) { dev_err(data->dev, "Error reading bmi323 external reg 0x%x: %d\n", - bmi323_reg_savestate[i], ret); + bmi323_ext_reg_savestate[i], ret); return ret; } } @@ -2232,8 +2231,10 @@ static int bmi323_core_runtime_resume(struct device *dev) * after being reset in the lower power state by runtime-pm. */ ret = bmi323_init(data); - if (!ret) + if (ret) { + dev_err(data->dev, "Device power-on and init failed: %d", ret); return ret; + } /* Register must be cleared before changing an active config */ ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, 0); @@ -2243,12 +2244,12 @@ static int bmi323_core_runtime_resume(struct device *dev) } for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_ext_reg_savestate); i++) { - ret = bmi323_write_ext_reg(data, bmi323_reg_savestate[i], - savestate->reg_settings[i]); + ret = bmi323_write_ext_reg(data, bmi323_ext_reg_savestate[i], + savestate->ext_reg_settings[i]); if (ret) { dev_err(data->dev, "Error writing bmi323 external reg 0x%x: %d\n", - bmi323_reg_savestate[i], ret); + bmi323_ext_reg_savestate[i], ret); return ret; } } @@ -2293,11 +2294,9 @@ static int bmi323_core_runtime_resume(struct device *dev) return iio_device_resume_triggering(indio_dev); } -#endif - const struct dev_pm_ops bmi323_core_pm_ops = { - SET_RUNTIME_PM_OPS(bmi323_core_runtime_suspend, - bmi323_core_runtime_resume, NULL) + RUNTIME_PM_OPS(bmi323_core_runtime_suspend, + bmi323_core_runtime_resume, NULL) }; EXPORT_SYMBOL_NS_GPL(bmi323_core_pm_ops, IIO_BMI323); diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c index 59d7615c0f56..5f131bc1a01e 100644 --- a/drivers/iio/industrialio-gts-helper.c +++ b/drivers/iio/industrialio-gts-helper.c @@ -307,13 +307,15 @@ static int iio_gts_build_avail_scale_table(struct iio_gts *gts) if (ret) goto err_free_out; + for (i = 0; i < gts->num_itime; i++) + kfree(per_time_gains[i]); kfree(per_time_gains); gts->per_time_avail_scale_tables = per_time_scales; return 0; err_free_out: - for (i--; i; i--) { + for (i--; i >= 0; i--) { kfree(per_time_scales[i]); kfree(per_time_gains[i]); } diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index 515ff46b5b82..f2f3e414849a 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -335,6 +335,8 @@ config ROHM_BU27008 depends on I2C select REGMAP_I2C select IIO_GTS_HELPER + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Enable support for the ROHM BU27008 color sensor. The ROHM BU27008 is a sensor with 5 photodiodes (red, green, diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c index 887c4b776a86..176e54bb48c3 100644 --- a/drivers/iio/light/opt3001.c +++ b/drivers/iio/light/opt3001.c @@ -138,6 +138,10 @@ static const struct opt3001_scale opt3001_scales[] = { .val = 20966, .val2 = 400000, }, + { + .val = 41932, + .val2 = 800000, + }, { .val = 83865, .val2 = 600000, diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c index 2e86d310952e..621428885455 100644 --- a/drivers/iio/light/veml6030.c +++ b/drivers/iio/light/veml6030.c @@ -99,9 +99,8 @@ static const char * const period_values[] = { static ssize_t in_illuminance_period_available_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct veml6030_data *data = iio_priv(dev_to_iio_dev(dev)); int ret, reg, x; - struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); - struct veml6030_data *data = iio_priv(indio_dev); ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, ®); if (ret) { @@ -523,7 +522,7 @@ static int veml6030_read_raw(struct iio_dev *indio_dev, } if (mask == IIO_CHAN_INFO_PROCESSED) { *val = (reg * data->cur_resolution) / 10000; - *val2 = (reg * data->cur_resolution) % 10000; + *val2 = (reg * data->cur_resolution) % 10000 * 100; return IIO_VAL_INT_PLUS_MICRO; } *val = reg; @@ -780,7 +779,7 @@ static int veml6030_hw_init(struct iio_dev *indio_dev) /* Cache currently active measurement parameters */ data->cur_gain = 3; - data->cur_resolution = 4608; + data->cur_resolution = 5376; data->cur_integration_time = 3; return ret; diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index 8eb718f5e50f..f69ac75500f9 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig @@ -11,6 +11,8 @@ config AF8133J depends on I2C depends on OF select REGMAP_I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for Voltafield AF8133J I2C-based 3-axis magnetometer chip. diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig index ce369dbb17fc..d2cb8c871f6a 100644 --- a/drivers/iio/pressure/Kconfig +++ b/drivers/iio/pressure/Kconfig @@ -19,6 +19,9 @@ config ABP060MG config ROHM_BM1390 tristate "ROHM BM1390GLV-Z pressure sensor driver" depends on I2C + select REGMAP_I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Support for the ROHM BM1390 pressure sensor. The BM1390GLV-Z can measure pressures ranging from 300 hPa to 1300 hPa with @@ -253,6 +256,7 @@ config MS5637 config SDP500 tristate "Sensirion SDP500 differential pressure sensor I2C driver" depends on I2C + select CRC8 help Say Y here to build support for Sensirion SDP500 differential pressure sensor I2C driver. diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig index 31c679074b25..a562a78b7d0d 100644 --- a/drivers/iio/proximity/Kconfig +++ b/drivers/iio/proximity/Kconfig @@ -86,6 +86,8 @@ config LIDAR_LITE_V2 config MB1232 tristate "MaxSonar I2CXL family ultrasonic sensors" depends on I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say Y to build a driver for the ultrasonic sensors I2CXL of MaxBotix which have an i2c interface. It can be used to measure diff --git a/drivers/iio/resolver/Kconfig b/drivers/iio/resolver/Kconfig index 424529d36080..de2dee3832a1 100644 --- a/drivers/iio/resolver/Kconfig +++ b/drivers/iio/resolver/Kconfig @@ -31,6 +31,9 @@ config AD2S1210 depends on SPI depends on COMMON_CLK depends on GPIOLIB || COMPILE_TEST + select REGMAP + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for Analog Devices spi resolver to digital converters, ad2s1210, provides direct access via sysfs. diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index be0743dac3ff..c4cf26f1d149 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -269,6 +269,8 @@ rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in) break; #endif } + if (!ret && dev && is_vlan_dev(dev)) + dev = vlan_dev_real_dev(dev); return ret ? ERR_PTR(ret) : dev; } diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 39f89a4b8649..7dc8e2ec62cc 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -2816,6 +2816,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num, nlh = nlmsg_put(skb, 0, 0, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_MONITOR), 0, 0); + if (!nlh) + goto err_free; switch (type) { case RDMA_REGISTER_EVENT: diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c index 128651c01595..1e63f8091748 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.c +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c @@ -366,7 +366,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, goto done; } } - if (rdev->pacing.dbr_pacing) + if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) bnxt_re_copy_db_pacing_stats(rdev, stats); } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 460f33914825..e66ae9f22c71 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1307,7 +1307,11 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, 0 : BNXT_QPLIB_RESERVED_QP_WRS; entries = bnxt_re_init_depth(entries + diff + 1, uctx); sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1); - sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true); + if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) + sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true); + else + sq->max_sw_wqe = sq->max_wqe; + } sq->q_full_delta = diff + 1; /* diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 777068de4bbc..6715c96a3eee 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -188,8 +188,11 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) bnxt_re_set_db_offset(rdev); rc = bnxt_qplib_map_db_bar(&rdev->qplib_res); - if (rc) + if (rc) { + kfree(rdev->chip_ctx); + rdev->chip_ctx = NULL; return rc; + } if (bnxt_qplib_determine_atomics(en_dev->pdev)) ibdev_info(&rdev->ibdev, @@ -531,6 +534,7 @@ static bool is_dbr_fifo_full(struct bnxt_re_dev *rdev) static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev) { struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; + u32 retry_fifo_check = 1000; u32 fifo_occup; /* loop shouldn't run infintely as the occupancy usually goes @@ -544,6 +548,14 @@ static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev) if (fifo_occup < pacing_data->pacing_th) break; + if (!retry_fifo_check--) { + dev_info_once(rdev_to_dev(rdev), + "%s: fifo_occup = 0x%xfifo_max_depth = 0x%x pacing_th = 0x%x\n", + __func__, fifo_occup, pacing_data->fifo_max_depth, + pacing_data->pacing_th); + break; + } + } } @@ -957,7 +969,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev); } -static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv, +static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev, struct bnxt_en_dev *en_dev) { struct bnxt_re_dev *rdev; @@ -973,6 +985,7 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv, rdev->nb.notifier_call = NULL; rdev->netdev = en_dev->net; rdev->en_dev = en_dev; + rdev->adev = adev; rdev->id = rdev->en_dev->pdev->devfn; INIT_LIST_HEAD(&rdev->qp_list); mutex_init(&rdev->qp_lock); @@ -1025,12 +1038,15 @@ static int bnxt_re_handle_unaffi_async_event(struct creq_func_event static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct bnxt_re_qp *qp) { - struct bnxt_re_srq *srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq, - qplib_srq); struct creq_qp_error_notification *err_event; + struct bnxt_re_srq *srq = NULL; struct ib_event event = {}; unsigned int flags; + if (qp->qplib_qp.srq) + srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq, + qplib_srq); + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); @@ -1258,15 +1274,9 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, { struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq, qplib_cq); - u32 *cq_ptr; - if (cq->ib_cq.comp_handler) { - if (cq->uctx_cq_page) { - cq_ptr = (u32 *)cq->uctx_cq_page; - *cq_ptr = cq->qplib_cq.toggle; - } + if (cq->ib_cq.comp_handler) (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context); - } return 0; } @@ -1823,7 +1833,6 @@ static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev, */ rtnl_lock(); en_info->rdev = rdev; - rdev->adev = adev; rtnl_unlock(); } @@ -1840,7 +1849,7 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type) en_dev = en_info->en_dev; - rdev = bnxt_re_dev_add(aux_priv, en_dev); + rdev = bnxt_re_dev_add(adev, en_dev); if (!rdev || !rdev_to_dev(rdev)) { rc = -ENOMEM; goto exit; @@ -1865,12 +1874,14 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type) rdev->nb.notifier_call = NULL; pr_err("%s: Cannot register to netdevice_notifier", ROCE_DRV_MODULE_NAME); - return rc; + goto re_dev_unreg; } bnxt_re_setup_cc(rdev, true); return 0; +re_dev_unreg: + ib_unregister_device(&rdev->ibdev); re_dev_uninit: bnxt_re_update_en_info_rdev(NULL, en_info, adev); bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); @@ -2014,15 +2025,7 @@ static int bnxt_re_probe(struct auxiliary_device *adev, auxiliary_set_drvdata(adev, en_info); rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT); - if (rc) - goto err; mutex_unlock(&bnxt_re_mutex); - return 0; - -err: - mutex_unlock(&bnxt_re_mutex); - bnxt_re_remove(adev); - return rc; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 42e98e5f94cb..7ad83566ab0f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -327,6 +327,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t) case NQ_BASE_TYPE_CQ_NOTIFICATION: { struct nq_cn *nqcne = (struct nq_cn *)nqe; + struct bnxt_re_cq *cq_p; q_handle = le32_to_cpu(nqcne->cq_handle_low); q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) @@ -337,6 +338,10 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t) cq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT; cq->dbinfo.toggle = cq->toggle; + cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq); + if (cq_p->uctx_cq_page) + *((u32 *)cq_p->uctx_cq_page) = cq->toggle; + bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA); spin_lock_bh(&cq->compl_lock); @@ -1527,9 +1532,11 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, u32 tbl_indx; int rc; + spin_lock_bh(&rcfw->tbl_lock); tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID; rcfw->qp_tbl[tbl_indx].qp_handle = NULL; + spin_unlock_bh(&rcfw->tbl_lock); bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_DESTROY_QP, @@ -1540,8 +1547,10 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, sizeof(resp), 0); rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) { + spin_lock_bh(&rcfw->tbl_lock); rcfw->qp_tbl[tbl_indx].qp_id = qp->id; rcfw->qp_tbl[tbl_indx].qp_handle = qp; + spin_unlock_bh(&rcfw->tbl_lock); return rc; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index b62df8701950..820611a23943 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -170,7 +170,7 @@ struct bnxt_qplib_swqe { }; u32 q_key; u32 dst_qp; - u16 avid; + u32 avid; } send; /* Send Raw Ethernet and QP1 */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 3ffaef0c2651..e82bd37158ad 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -290,7 +290,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_hwq *hwq; u32 sw_prod, cmdq_prod; struct pci_dev *pdev; - unsigned long flags; u16 cookie; u8 *preq; @@ -301,7 +300,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, /* Cmdq are in 16-byte units, each request can consume 1 or more * cmdqe */ - spin_lock_irqsave(&hwq->lock, flags); + spin_lock_bh(&hwq->lock); required_slots = bnxt_qplib_get_cmd_slots(msg->req); free_slots = HWQ_FREE_SLOTS(hwq); cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE; @@ -311,7 +310,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, dev_info_ratelimited(&pdev->dev, "CMDQ is full req/free %d/%d!", required_slots, free_slots); - spin_unlock_irqrestore(&hwq->lock, flags); + spin_unlock_bh(&hwq->lock); return -EAGAIN; } if (msg->block) @@ -367,7 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, wmb(); writel(cmdq_prod, cmdq->cmdq_mbox.prod); writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db); - spin_unlock_irqrestore(&hwq->lock, flags); + spin_unlock_bh(&hwq->lock); /* Return the CREQ response pointer */ return 0; } @@ -486,7 +485,6 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, { struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp; struct bnxt_qplib_crsqe *crsqe; - unsigned long flags; u16 cookie; int rc; u8 opcode; @@ -512,12 +510,12 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, rc = __poll_for_resp(rcfw, cookie); if (rc) { - spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags); + spin_lock_bh(&rcfw->cmdq.hwq.lock); crsqe = &rcfw->crsqe_tbl[cookie]; crsqe->is_waiter_alive = false; if (rc == -ENODEV) set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags); - spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags); + spin_unlock_bh(&rcfw->cmdq.hwq.lock); return -ETIMEDOUT; } @@ -525,7 +523,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, /* failed with status */ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n", cookie, opcode, evnt->status); - rc = -EFAULT; + rc = -EIO; } return rc; @@ -628,7 +626,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, u16 cookie, blocked = 0; bool is_waiter_alive; struct pci_dev *pdev; - unsigned long flags; u32 wait_cmds = 0; int rc = 0; @@ -637,17 +634,21 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: err_event = (struct creq_qp_error_notification *)qp_event; qp_id = le32_to_cpu(err_event->xid); + spin_lock(&rcfw->tbl_lock); tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw); qp = rcfw->qp_tbl[tbl_indx].qp_handle; + if (!qp) { + spin_unlock(&rcfw->tbl_lock); + break; + } + bnxt_qplib_mark_qp_error(qp); + rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp); + spin_unlock(&rcfw->tbl_lock); dev_dbg(&pdev->dev, "Received QP error notification\n"); dev_dbg(&pdev->dev, "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", qp_id, err_event->req_err_state_reason, err_event->res_err_state_reason); - if (!qp) - break; - bnxt_qplib_mark_qp_error(qp); - rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp); break; default: /* @@ -659,8 +660,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, * */ - spin_lock_irqsave_nested(&hwq->lock, flags, - SINGLE_DEPTH_NESTING); + spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING); cookie = le16_to_cpu(qp_event->cookie); blocked = cookie & RCFW_CMD_IS_BLOCKING; cookie &= RCFW_MAX_COOKIE_VALUE; @@ -672,7 +672,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, dev_info(&pdev->dev, "rcfw timedout: cookie = %#x, free_slots = %d", cookie, crsqe->free_slots); - spin_unlock_irqrestore(&hwq->lock, flags); + spin_unlock(&hwq->lock); return rc; } @@ -720,7 +720,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, __destroy_timedout_ah(rcfw, (struct creq_create_ah_resp *) qp_event); - spin_unlock_irqrestore(&hwq->lock, flags); + spin_unlock(&hwq->lock); } *num_wait += wait_cmds; return rc; @@ -734,12 +734,11 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t) u32 type, budget = CREQ_ENTRY_POLL_BUDGET; struct bnxt_qplib_hwq *hwq = &creq->hwq; struct creq_base *creqe; - unsigned long flags; u32 num_wakeup = 0; u32 hw_polled = 0; /* Service the CREQ until budget is over */ - spin_lock_irqsave(&hwq->lock, flags); + spin_lock_bh(&hwq->lock); while (budget > 0) { creqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL); if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags)) @@ -782,7 +781,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t) if (hw_polled) bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, true); - spin_unlock_irqrestore(&hwq->lock, flags); + spin_unlock_bh(&hwq->lock); if (num_wakeup) wake_up_nr(&rcfw->cmdq.waitq, num_wakeup); } @@ -978,6 +977,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, GFP_KERNEL); if (!rcfw->qp_tbl) goto fail; + spin_lock_init(&rcfw->tbl_lock); rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 45996e60a0d0..07779aeb7575 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -224,6 +224,8 @@ struct bnxt_qplib_rcfw { struct bnxt_qplib_crsqe *crsqe_tbl; int qp_tbl_size; struct bnxt_qplib_qp_node *qp_tbl; + /* To synchronize the qp-handle hash table */ + spinlock_t tbl_lock; u64 oos_prev; u32 init_oos_stats; u32 cmdq_depth; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index dfc943fab87b..96ceec1e8199 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -244,6 +244,8 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, sginfo.pgsize = npde * pg_size; sginfo.npages = 1; rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); + if (rc) + goto fail; /* Alloc PBL pages */ sginfo.npages = npbl; @@ -255,22 +257,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, dst_virt_ptr = (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; - if (hwq_attr->type == HWQ_TYPE_MR) { - /* For MR it is expected that we supply only 1 contigous - * page i.e only 1 entry in the PDL that will contain - * all the PBLs for the user supplied memory region - */ - for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; - i++) - dst_virt_ptr[0][i] = src_phys_ptr[i] | - flag; - } else { - for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; - i++) - dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = - src_phys_ptr[i] | - PTU_PDE_VALID; - } + for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) + dst_virt_ptr[0][i] = src_phys_ptr[i] | flag; + /* Alloc or init PTEs */ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2], hwq_attr->sginfo); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 4f75e7e5bcf7..e29fbbdab9fd 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -140,6 +140,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6; attr->max_cq = le32_to_cpu(sb->max_cq); attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); + if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx)) + attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes); attr->max_cq_sges = attr->max_qp_sges; attr->max_mr = le32_to_cpu(sb->max_mr); attr->max_mw = le32_to_cpu(sb->max_mw); @@ -157,7 +159,14 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx)) attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); - attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; + /* + * Read the max gid supported by HW. + * For each entry in HW GID in HW table, we consume 2 + * GID entries in the kernel GID table. So max_gid reported + * to stack can be up to twice the value reported by the HW, up to 256 gids. + */ + attr->max_sgid = le32_to_cpu(sb->max_gid); + attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid); attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags); attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index acd9c14a31c4..ecf3f45fea74 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -56,6 +56,7 @@ struct bnxt_qplib_dev_attr { u32 max_qp_wqes; u32 max_qp_sges; u32 max_cq; +#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff u32 max_cq_wqes; u32 max_cq_sges; u32 max_mr; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b3757c6a0457..8d753e6e0c71 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2086,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, err = -ENOMEM; if (n->dev->flags & IFF_LOOPBACK) { if (iptype == 4) - pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); + pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false); else if (IS_ENABLED(CONFIG_IPV6)) for_each_netdev(&init_net, pdev) { if (ipv6_chk_addr(&init_net, @@ -2101,12 +2101,12 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, err = -ENODEV; goto out; } + if (is_vlan_dev(pdev)) + pdev = vlan_dev_real_dev(pdev); ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, pdev, rt_tos2priority(tos)); - if (!ep->l2t) { - dev_put(pdev); + if (!ep->l2t) goto out; - } ep->mtu = pdev->mtu; ep->tx_chan = cxgb4_port_chan(pdev); ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; @@ -2119,7 +2119,6 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, ep->rss_qid = cdev->rdev.lldi.rxq_ids[ cxgb4_port_idx(pdev) * step]; set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); - dev_put(pdev); } else { pdev = get_real_dev(n->dev); ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 10a4c738b59f..e059f92d90fd 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -473,6 +473,7 @@ static const struct ib_device_ops c4iw_dev_ops = { .fill_res_cq_entry = c4iw_fill_res_cq_entry, .fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry, .fill_res_mr_entry = c4iw_fill_res_mr_entry, + .fill_res_qp_entry = c4iw_fill_res_qp_entry, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = c4iw_get_dma_mr, .get_hw_stats = c4iw_get_mib, diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c index 36bb7e5ce638..ce8d821bdad8 100644 --- a/drivers/infiniband/hw/irdma/cm.c +++ b/drivers/infiniband/hw/irdma/cm.c @@ -3631,7 +3631,7 @@ void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp) /** * irdma_accept - registered call for connection to be accepted * @cm_id: cm information for passive connection - * @conn_param: accpet parameters + * @conn_param: accept parameters */ int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index e39b1a101e97..10ce3b44f645 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4268,14 +4268,14 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt); if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic) - MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic)); + MLX5_SET(qpc, qpc, log_sra_max, fls(attr->max_rd_atomic - 1)); if (attr_mask & IB_QP_SQ_PSN) MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn); if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic) MLX5_SET(qpc, qpc, log_rra_max, - ilog2(attr->max_dest_rd_atomic)); + fls(attr->max_dest_rd_atomic - 1)); if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc); diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 64ad9e0895bd..a034264c5669 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -331,6 +331,8 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, msg.msg_flags &= ~MSG_MORE; tcp_rate_check_app_limited(sk); + if (!sendpage_ok(page[i])) + msg.msg_flags &= ~MSG_SPLICE_PAGES; bvec_set_page(&bvec, page[i], bytes, offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 9632afbd727b..5dfb4644446b 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -68,6 +68,8 @@ MODULE_LICENSE("Dual BSD/GPL"); static u64 srpt_service_guid; static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */ static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */ +static DEFINE_MUTEX(srpt_mc_mutex); /* Protects srpt_memory_caches. */ +static DEFINE_XARRAY(srpt_memory_caches); /* See also srpt_memory_cache_entry */ static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; module_param(srp_max_req_size, int, 0444); @@ -105,6 +107,63 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc); static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc); static void srpt_process_wait_list(struct srpt_rdma_ch *ch); +/* Type of the entries in srpt_memory_caches. */ +struct srpt_memory_cache_entry { + refcount_t ref; + struct kmem_cache *c; +}; + +static struct kmem_cache *srpt_cache_get(unsigned int object_size) +{ + struct srpt_memory_cache_entry *e; + char name[32]; + void *res; + + guard(mutex)(&srpt_mc_mutex); + e = xa_load(&srpt_memory_caches, object_size); + if (e) { + refcount_inc(&e->ref); + return e->c; + } + snprintf(name, sizeof(name), "srpt-%u", object_size); + e = kmalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return NULL; + refcount_set(&e->ref, 1); + e->c = kmem_cache_create(name, object_size, /*align=*/512, 0, NULL); + if (!e->c) + goto free_entry; + res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL); + if (xa_is_err(res)) + goto destroy_cache; + return e->c; + +destroy_cache: + kmem_cache_destroy(e->c); + +free_entry: + kfree(e); + return NULL; +} + +static void srpt_cache_put(struct kmem_cache *c) +{ + struct srpt_memory_cache_entry *e = NULL; + unsigned long object_size; + + guard(mutex)(&srpt_mc_mutex); + xa_for_each(&srpt_memory_caches, object_size, e) + if (e->c == c) + break; + if (WARN_ON_ONCE(!e)) + return; + if (!refcount_dec_and_test(&e->ref)) + return; + WARN_ON_ONCE(xa_erase(&srpt_memory_caches, object_size) != e); + kmem_cache_destroy(e->c); + kfree(e); +} + /* * The only allowed channel state changes are those that change the channel * state into a state with a higher numerical value. Hence the new > prev test. @@ -2119,13 +2178,13 @@ static void srpt_release_channel_work(struct work_struct *w) ch->sport->sdev, ch->rq_size, ch->rsp_buf_cache, DMA_TO_DEVICE); - kmem_cache_destroy(ch->rsp_buf_cache); + srpt_cache_put(ch->rsp_buf_cache); srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, sdev, ch->rq_size, ch->req_buf_cache, DMA_FROM_DEVICE); - kmem_cache_destroy(ch->req_buf_cache); + srpt_cache_put(ch->req_buf_cache); kref_put(&ch->kref, srpt_free_ch); } @@ -2245,8 +2304,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, INIT_LIST_HEAD(&ch->cmd_wait_list); ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size; - ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size, - 512, 0, NULL); + ch->rsp_buf_cache = srpt_cache_get(ch->max_rsp_size); if (!ch->rsp_buf_cache) goto free_ch; @@ -2280,8 +2338,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, alignment_offset = round_up(imm_data_offset, 512) - imm_data_offset; req_sz = alignment_offset + imm_data_offset + srp_max_req_size; - ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz, - 512, 0, NULL); + ch->req_buf_cache = srpt_cache_get(req_sz); if (!ch->req_buf_cache) goto free_rsp_ring; @@ -2478,7 +2535,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, ch->req_buf_cache, DMA_FROM_DEVICE); free_recv_cache: - kmem_cache_destroy(ch->req_buf_cache); + srpt_cache_put(ch->req_buf_cache); free_rsp_ring: srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, @@ -2486,7 +2543,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, ch->rsp_buf_cache, DMA_TO_DEVICE); free_rsp_cache: - kmem_cache_destroy(ch->rsp_buf_cache); + srpt_cache_put(ch->rsp_buf_cache); free_ch: if (rdma_cm_id) @@ -3055,7 +3112,7 @@ static void srpt_free_srq(struct srpt_device *sdev) srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, sdev->srq_size, sdev->req_buf_cache, DMA_FROM_DEVICE); - kmem_cache_destroy(sdev->req_buf_cache); + srpt_cache_put(sdev->req_buf_cache); sdev->srq = NULL; } @@ -3082,8 +3139,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev) pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size, sdev->device->attrs.max_srq_wr, dev_name(&device->dev)); - sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf", - srp_max_req_size, 0, 0, NULL); + sdev->req_buf_cache = srpt_cache_get(srp_max_req_size); if (!sdev->req_buf_cache) goto free_srq; @@ -3105,7 +3161,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev) return 0; free_cache: - kmem_cache_destroy(sdev->req_buf_cache); + srpt_cache_put(sdev->req_buf_cache); free_srq: ib_destroy_srq(srq); diff --git a/drivers/input/input.c b/drivers/input/input.c index 47fac29cf7c3..c51858f1cdc5 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -119,12 +119,12 @@ static void input_pass_values(struct input_dev *dev, handle = rcu_dereference(dev->grab); if (handle) { - count = handle->handler->events(handle, vals, count); + count = handle->handle_events(handle, vals, count); } else { list_for_each_entry_rcu(handle, &dev->h_list, d_node) if (handle->open) { - count = handle->handler->events(handle, vals, - count); + count = handle->handle_events(handle, vals, + count); if (!count) break; } @@ -2534,57 +2534,6 @@ static int input_handler_check_methods(const struct input_handler *handler) return 0; } -/* - * An implementation of input_handler's events() method that simply - * invokes handler->event() method for each event one by one. - */ -static unsigned int input_handler_events_default(struct input_handle *handle, - struct input_value *vals, - unsigned int count) -{ - struct input_handler *handler = handle->handler; - struct input_value *v; - - for (v = vals; v != vals + count; v++) - handler->event(handle, v->type, v->code, v->value); - - return count; -} - -/* - * An implementation of input_handler's events() method that invokes - * handler->filter() method for each event one by one and removes events - * that were filtered out from the "vals" array. - */ -static unsigned int input_handler_events_filter(struct input_handle *handle, - struct input_value *vals, - unsigned int count) -{ - struct input_handler *handler = handle->handler; - struct input_value *end = vals; - struct input_value *v; - - for (v = vals; v != vals + count; v++) { - if (handler->filter(handle, v->type, v->code, v->value)) - continue; - if (end != v) - *end = *v; - end++; - } - - return end - vals; -} - -/* - * An implementation of input_handler's events() method that does nothing. - */ -static unsigned int input_handler_events_null(struct input_handle *handle, - struct input_value *vals, - unsigned int count) -{ - return count; -} - /** * input_register_handler - register a new input handler * @handler: handler to be registered @@ -2604,13 +2553,6 @@ int input_register_handler(struct input_handler *handler) INIT_LIST_HEAD(&handler->h_list); - if (handler->filter) - handler->events = input_handler_events_filter; - else if (handler->event) - handler->events = input_handler_events_default; - else if (!handler->events) - handler->events = input_handler_events_null; - error = mutex_lock_interruptible(&input_mutex); if (error) return error; @@ -2684,6 +2626,75 @@ int input_handler_for_each_handle(struct input_handler *handler, void *data, } EXPORT_SYMBOL(input_handler_for_each_handle); +/* + * An implementation of input_handle's handle_events() method that simply + * invokes handler->event() method for each event one by one. + */ +static unsigned int input_handle_events_default(struct input_handle *handle, + struct input_value *vals, + unsigned int count) +{ + struct input_handler *handler = handle->handler; + struct input_value *v; + + for (v = vals; v != vals + count; v++) + handler->event(handle, v->type, v->code, v->value); + + return count; +} + +/* + * An implementation of input_handle's handle_events() method that invokes + * handler->filter() method for each event one by one and removes events + * that were filtered out from the "vals" array. + */ +static unsigned int input_handle_events_filter(struct input_handle *handle, + struct input_value *vals, + unsigned int count) +{ + struct input_handler *handler = handle->handler; + struct input_value *end = vals; + struct input_value *v; + + for (v = vals; v != vals + count; v++) { + if (handler->filter(handle, v->type, v->code, v->value)) + continue; + if (end != v) + *end = *v; + end++; + } + + return end - vals; +} + +/* + * An implementation of input_handle's handle_events() method that does nothing. + */ +static unsigned int input_handle_events_null(struct input_handle *handle, + struct input_value *vals, + unsigned int count) +{ + return count; +} + +/* + * Sets up appropriate handle->event_handler based on the input_handler + * associated with the handle. + */ +static void input_handle_setup_event_handler(struct input_handle *handle) +{ + struct input_handler *handler = handle->handler; + + if (handler->filter) + handle->handle_events = input_handle_events_filter; + else if (handler->event) + handle->handle_events = input_handle_events_default; + else if (handler->events) + handle->handle_events = handler->events; + else + handle->handle_events = input_handle_events_null; +} + /** * input_register_handle - register a new input handle * @handle: handle to register @@ -2701,6 +2712,7 @@ int input_register_handle(struct input_handle *handle) struct input_dev *dev = handle->dev; int error; + input_handle_setup_event_handler(handle); /* * We take dev->mutex here to prevent race with * input_release_device(). diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 4eda18f4f46e..22ea58bf76cb 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -218,6 +218,7 @@ static const struct xpad_device { { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX }, { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX }, { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x0db0, 0x1901, "Micro Star International Xbox360 Controller for Windows", 0, XTYPE_XBOX360 }, { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX }, { 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX }, { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX }, @@ -373,6 +374,7 @@ static const struct xpad_device { { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE }, { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE }, { 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 }, { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, @@ -492,6 +494,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz Gamepad */ XPAD_XBOXONE_VENDOR(0x0b05), /* ASUS controllers */ XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x0db0), /* Micro Star International X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f Xbox One controllers */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori controllers */ diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index d25d63a807f2..dc734974ce06 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -822,7 +822,8 @@ static int adp5588_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); - disable_irq(client->irq); + if (client->irq) + disable_irq(client->irq); return 0; } @@ -831,7 +832,8 @@ static int adp5588_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); - enable_irq(client->irq); + if (client->irq) + enable_irq(client->irq); return 0; } diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index fda49b2fe088..85c6d8ce003f 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -1121,6 +1121,14 @@ static void edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata) } } +static void edt_ft5x06_exit_regmap(void *arg) +{ + struct edt_ft5x06_ts_data *data = arg; + + if (!IS_ERR_OR_NULL(data->regmap)) + regmap_exit(data->regmap); +} + static void edt_ft5x06_disable_regulators(void *arg) { struct edt_ft5x06_ts_data *data = arg; @@ -1154,6 +1162,16 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client) return PTR_ERR(tsdata->regmap); } + /* + * We are not using devm_regmap_init_i2c() and instead install a + * custom action because we may replace regmap with M06-specific one + * and we need to make sure that it will not be released too early. + */ + error = devm_add_action_or_reset(&client->dev, edt_ft5x06_exit_regmap, + tsdata); + if (error) + return error; + chip_data = device_get_match_data(&client->dev); if (!chip_data) chip_data = (const struct edt_i2c_chip_data *)id->driver_data; @@ -1347,7 +1365,6 @@ static void edt_ft5x06_ts_remove(struct i2c_client *client) struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client); edt_ft5x06_ts_teardown_debugfs(tsdata); - regmap_exit(tsdata->regmap); } static int edt_ft5x06_ts_suspend(struct device *dev) diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index 52b3950460e2..716d6fa60f86 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -645,19 +645,29 @@ static int zinitix_ts_probe(struct i2c_client *client) return error; } - bt541->num_keycodes = device_property_count_u32(&client->dev, "linux,keycodes"); - if (bt541->num_keycodes > ARRAY_SIZE(bt541->keycodes)) { - dev_err(&client->dev, "too many keys defined (%d)\n", bt541->num_keycodes); - return -EINVAL; - } + if (device_property_present(&client->dev, "linux,keycodes")) { + bt541->num_keycodes = device_property_count_u32(&client->dev, + "linux,keycodes"); + if (bt541->num_keycodes < 0) { + dev_err(&client->dev, "Failed to count keys (%d)\n", + bt541->num_keycodes); + return bt541->num_keycodes; + } else if (bt541->num_keycodes > ARRAY_SIZE(bt541->keycodes)) { + dev_err(&client->dev, "Too many keys defined (%d)\n", + bt541->num_keycodes); + return -EINVAL; + } - error = device_property_read_u32_array(&client->dev, "linux,keycodes", - bt541->keycodes, - bt541->num_keycodes); - if (error) { - dev_err(&client->dev, - "Unable to parse \"linux,keycodes\" property: %d\n", error); - return error; + error = device_property_read_u32_array(&client->dev, + "linux,keycodes", + bt541->keycodes, + bt541->num_keycodes); + if (error) { + dev_err(&client->dev, + "Unable to parse \"linux,keycodes\" property: %d\n", + error); + return error; + } } error = zinitix_init_input_dev(bt541); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 737c5b882355..353fea58cd31 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1420,7 +1420,7 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master) cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; cd_table->linear.num_ents = max_contexts; - l1size = max_contexts * sizeof(struct arm_smmu_cd), + l1size = max_contexts * sizeof(struct arm_smmu_cd); cd_table->linear.table = dma_alloc_coherent(smmu->dev, l1size, &cd_table->cdtab_dma, GFP_KERNEL); @@ -3625,7 +3625,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) u32 l1size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; unsigned int last_sid_idx = - arm_smmu_strtab_l1_idx((1 << smmu->sid_bits) - 1); + arm_smmu_strtab_l1_idx((1ULL << smmu->sid_bits) - 1); /* Calculate the L1 size, capped to the SIDSIZE. */ cfg->l2.num_l1_ents = min(last_sid_idx + 1, STRTAB_MAX_L1_ENTRIES); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 9dc772f2cbb2..99030e6b16e7 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -130,7 +130,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu) /* * Disable MMU-500's not-particularly-beneficial next-page - * prefetcher for the sake of errata #841119 and #826419. + * prefetcher for the sake of at least 5 known errata. */ for (i = 0; i < smmu->num_context_banks; ++i) { reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); @@ -138,7 +138,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu) arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg); reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); if (reg & ARM_MMU500_ACTLR_CPRE) - dev_warn_once(smmu->dev, "Failed to disable prefetcher [errata #841119 and #826419], check ACR.CACHE_LOCK\n"); + dev_warn_once(smmu->dev, "Failed to disable prefetcher for errata workarounds, check SACR.CACHE_LOCK\n"); } return 0; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 9f6b0780f2ef..e860bc9439a2 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3340,8 +3340,10 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op */ static void domain_context_clear(struct device_domain_info *info) { - if (!dev_is_pci(info->dev)) + if (!dev_is_pci(info->dev)) { domain_context_clear_one(info, info->bus, info->devfn); + return; + } pci_for_each_dma_alias(to_pci_dev(info->dev), &domain_context_clear_one_cb, info); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 341cd9ca5a05..d82bcab233a1 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -45,13 +45,6 @@ config ARM_GIC_V3_ITS select IRQ_MSI_LIB default ARM_GIC_V3 -config ARM_GIC_V3_ITS_PCI - bool - depends on ARM_GIC_V3_ITS - depends on PCI - depends on PCI_MSI - default ARM_GIC_V3_ITS - config ARM_GIC_V3_ITS_FSL_MC bool depends on ARM_GIC_V3_ITS diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index fdec478ba5e7..52f625e07658 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -797,8 +797,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, its_encode_valid(cmd, desc->its_vmapp_cmd.valid); if (!desc->its_vmapp_cmd.valid) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); if (is_v4_1(its)) { - alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); its_encode_alloc(cmd, alloc); /* * Unmapping a VPE is self-synchronizing on GICv4.1, @@ -817,13 +817,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, its_encode_vpt_addr(cmd, vpt_addr); its_encode_vpt_size(cmd, LPI_NRBITS - 1); + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + if (!is_v4_1(its)) goto out; vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); - alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); - its_encode_alloc(cmd, alloc); /* @@ -3806,6 +3806,23 @@ static int its_vpe_set_affinity(struct irq_data *d, struct cpumask *table_mask; unsigned long flags; + /* + * Check if we're racing against a VPE being destroyed, for + * which we don't want to allow a VMOVP. + */ + if (!atomic_read(&vpe->vmapp_count)) { + if (gic_requires_eager_mapping()) + return -EINVAL; + + /* + * If we lazily map the VPEs, this isn't an error and + * we can exit cleanly. + */ + cpu = cpumask_first(mask_val); + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + return IRQ_SET_MASK_OK_DONE; + } + /* * Changing affinity is mega expensive, so let's be as lazy as * we can and only do it if we really have to. Also, if mapped @@ -4463,9 +4480,8 @@ static int its_vpe_init(struct its_vpe *vpe) raw_spin_lock_init(&vpe->vpe_lock); vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; - if (gic_rdists->has_rvpeid) - atomic_set(&vpe->vmapp_count, 0); - else + atomic_set(&vpe->vmapp_count, 0); + if (!gic_rdists->has_rvpeid) vpe->vpe_proxy_event = -1; return 0; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index ce87205e3e82..8b6159f4cdaf 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -524,6 +524,13 @@ static int gic_irq_set_irqchip_state(struct irq_data *d, } gic_poke_irq(d, reg); + + /* + * Force read-back to guarantee that the active state has taken + * effect, and won't race with a guest-driven deactivation. + */ + if (reg == GICD_ISACTIVER) + gic_peek_irq(d, reg); return 0; } diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c index 4d0c3532dbe7..3dc745b14caf 100644 --- a/drivers/irqchip/irq-mscc-ocelot.c +++ b/drivers/irqchip/irq-mscc-ocelot.c @@ -37,7 +37,7 @@ static struct chip_props ocelot_props = { .reg_off_ena_clr = 0x1c, .reg_off_ena_set = 0x20, .reg_off_ident = 0x38, - .reg_off_trigger = 0x5c, + .reg_off_trigger = 0x4, .n_irq = 24, }; @@ -70,7 +70,7 @@ static struct chip_props jaguar2_props = { .reg_off_ena_clr = 0x1c, .reg_off_ena_set = 0x20, .reg_off_ident = 0x38, - .reg_off_trigger = 0x5c, + .reg_off_trigger = 0x4, .n_irq = 29, }; @@ -84,6 +84,12 @@ static void ocelot_irq_unmask(struct irq_data *data) u32 val; irq_gc_lock(gc); + /* + * Clear sticky bits for edge mode interrupts. + * Serval has only one trigger register replication, but the adjacent + * register is always read as zero, so there's no need to handle this + * case separately. + */ val = irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 0)) | irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 1)); if (!(val & mask)) diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c index 693ff285ca2c..99e27e01b0b1 100644 --- a/drivers/irqchip/irq-renesas-rzg2l.c +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -530,12 +531,12 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv, static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent, const struct irq_chip *irq_chip) { + struct platform_device *pdev = of_find_device_by_node(node); + struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL; struct irq_domain *irq_domain, *parent_domain; - struct platform_device *pdev; struct reset_control *resetn; int ret; - pdev = of_find_device_by_node(node); if (!pdev) return -ENODEV; @@ -591,6 +592,17 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node * register_syscore_ops(&rzg2l_irqc_syscore_ops); + /* + * Prevent the cleanup function from invoking put_device by assigning + * NULL to dev. + * + * make coccicheck will complain about missing put_device calls, but + * those are false positives, as dev will be automatically "put" via + * __free_put_device on the failing path. + * On the successful path we don't actually want to "put" dev. + */ + dev = NULL; + return 0; pm_put: diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c index 64905e6f52d7..c708780e8760 100644 --- a/drivers/irqchip/irq-riscv-imsic-platform.c +++ b/drivers/irqchip/irq-riscv-imsic-platform.c @@ -341,7 +341,7 @@ int imsic_irqdomain_init(void) imsic->fwnode, global->hart_index_bits, global->guest_index_bits); pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n", imsic->fwnode, global->group_index_bits, global->group_index_shift); - pr_info("%pfwP: per-CPU IDs %d at base PPN %pa\n", + pr_info("%pfwP: per-CPU IDs %d at base address %pa\n", imsic->fwnode, global->nr_ids, &global->base_addr); pr_info("%pfwP: total %d interrupts available\n", imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1)); diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index 8c5411386220..f653c13de62b 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -265,7 +265,7 @@ struct rintc_data { }; static u32 nr_rintc; -static struct rintc_data *rintc_acpi_data[NR_CPUS]; +static struct rintc_data **rintc_acpi_data; #define for_each_matching_plic(_plic_id) \ unsigned int _plic; \ @@ -329,13 +329,30 @@ int acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) return 0; } +static int __init riscv_intc_acpi_match(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} + static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_rintc *rintc; struct fwnode_handle *fn; + int count; int rc; + if (!rintc_acpi_data) { + count = acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, riscv_intc_acpi_match, 0); + if (count <= 0) + return -EINVAL; + + rintc_acpi_data = kcalloc(count, sizeof(*rintc_acpi_data), GFP_KERNEL); + if (!rintc_acpi_data) + return -ENOMEM; + } + rintc = (struct acpi_madt_rintc *)header; rintc_acpi_data[nr_rintc] = kzalloc(sizeof(*rintc_acpi_data[0]), GFP_KERNEL); if (!rintc_acpi_data[nr_rintc]) diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 2f6ef5c495bd..36dbcf2d728a 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -126,16 +126,6 @@ static inline void plic_irq_toggle(const struct cpumask *mask, } } -static void plic_irq_enable(struct irq_data *d) -{ - plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); -} - -static void plic_irq_disable(struct irq_data *d) -{ - plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); -} - static void plic_irq_unmask(struct irq_data *d) { struct plic_priv *priv = irq_data_get_irq_chip_data(d); @@ -150,6 +140,17 @@ static void plic_irq_mask(struct irq_data *d) writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); } +static void plic_irq_enable(struct irq_data *d) +{ + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); + plic_irq_unmask(d); +} + +static void plic_irq_disable(struct irq_data *d) +{ + plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); +} + static void plic_irq_eoi(struct irq_data *d) { struct plic_handler *handler = this_cpu_ptr(&plic_handlers); @@ -626,8 +627,10 @@ static int plic_probe(struct fwnode_handle *fwnode) handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), sizeof(*handler->enable_save), GFP_KERNEL); - if (!handler->enable_save) + if (!handler->enable_save) { + error = -ENOMEM; goto fail_cleanup_contexts; + } done: for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { plic_toggle(handler, hwirq, 0); @@ -639,8 +642,10 @@ static int plic_probe(struct fwnode_handle *fwnode) priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1, &plic_irqdomain_ops, priv); - if (WARN_ON(!priv->irqdomain)) + if (WARN_ON(!priv->irqdomain)) { + error = -ENOMEM; goto fail_cleanup_contexts; + } /* * We can have multiple PLIC instances so setup global state diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index aaeeabfab09b..40709310e327 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1905,16 +1905,13 @@ static void check_migrations(struct work_struct *ws) * This function gets called on the error paths of the constructor, so we * have to cope with a partially initialised struct. */ -static void destroy(struct cache *cache) +static void __destroy(struct cache *cache) { - unsigned int i; - mempool_exit(&cache->migration_pool); if (cache->prison) dm_bio_prison_destroy_v2(cache->prison); - cancel_delayed_work_sync(&cache->waker); if (cache->wq) destroy_workqueue(cache->wq); @@ -1942,13 +1939,22 @@ static void destroy(struct cache *cache) if (cache->policy) dm_cache_policy_destroy(cache->policy); + bioset_exit(&cache->bs); + + kfree(cache); +} + +static void destroy(struct cache *cache) +{ + unsigned int i; + + cancel_delayed_work_sync(&cache->waker); + for (i = 0; i < cache->nr_ctr_args ; i++) kfree(cache->ctr_args[i]); kfree(cache->ctr_args); - bioset_exit(&cache->bs); - - kfree(cache); + __destroy(cache); } static void cache_dtr(struct dm_target *ti) @@ -2003,7 +2009,6 @@ struct cache_args { sector_t cache_sectors; struct dm_dev *origin_dev; - sector_t origin_sectors; uint32_t block_size; @@ -2084,6 +2089,7 @@ static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, char **error) { + sector_t origin_sectors; int r; if (!at_least_one_arg(as, error)) @@ -2096,8 +2102,8 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, return r; } - ca->origin_sectors = get_dev_size(ca->origin_dev); - if (ca->ti->len > ca->origin_sectors) { + origin_sectors = get_dev_size(ca->origin_dev); + if (ca->ti->len > origin_sectors) { *error = "Device size larger than cached device"; return -EINVAL; } @@ -2407,7 +2413,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL; - origin_blocks = cache->origin_sectors = ca->origin_sectors; + origin_blocks = cache->origin_sectors = ti->len; origin_blocks = block_div(origin_blocks, ca->block_size); cache->origin_blocks = to_oblock(origin_blocks); @@ -2561,7 +2567,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) *result = cache; return 0; bad: - destroy(cache); + __destroy(cache); return r; } @@ -2612,7 +2618,7 @@ static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv) r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); if (r) { - destroy(cache); + __destroy(cache); goto out; } @@ -2895,19 +2901,19 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache) static bool can_resize(struct cache *cache, dm_cblock_t new_size) { if (from_cblock(new_size) > from_cblock(cache->cache_size)) { - if (cache->sized) { - DMERR("%s: unable to extend cache due to missing cache table reload", - cache_device_name(cache)); - return false; - } + DMERR("%s: unable to extend cache due to missing cache table reload", + cache_device_name(cache)); + return false; } /* * We can't drop a dirty block when shrinking the cache. */ - while (from_cblock(new_size) < from_cblock(cache->cache_size)) { - new_size = to_cblock(from_cblock(new_size) + 1); - if (is_dirty(cache, new_size)) { + if (cache->loaded_mappings) { + new_size = to_cblock(find_next_bit(cache->dirty_bitset, + from_cblock(cache->cache_size), + from_cblock(new_size))); + if (new_size != cache->cache_size) { DMERR("%s: unable to shrink cache; cache block %llu is dirty", cache_device_name(cache), (unsigned long long) from_cblock(new_size)); @@ -2943,20 +2949,15 @@ static int cache_preresume(struct dm_target *ti) /* * Check to see if the cache has resized. */ - if (!cache->sized) { - r = resize_cache_dev(cache, csize); - if (r) - return r; - - cache->sized = true; - - } else if (csize != cache->cache_size) { + if (!cache->sized || csize != cache->cache_size) { if (!can_resize(cache, csize)) return -EINVAL; r = resize_cache_dev(cache, csize); if (r) return r; + + cache->sized = true; } if (!cache->loaded_mappings) { diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c index 48587c16c445..e8a9432057dc 100644 --- a/drivers/md/dm-unstripe.c +++ b/drivers/md/dm-unstripe.c @@ -85,8 +85,8 @@ static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) } uc->physical_start = start; - uc->unstripe_offset = uc->unstripe * uc->chunk_size; - uc->unstripe_width = (uc->stripes - 1) * uc->chunk_size; + uc->unstripe_offset = (sector_t)uc->unstripe * uc->chunk_size; + uc->unstripe_width = (sector_t)(uc->stripes - 1) * uc->chunk_size; uc->chunk_shift = is_power_of_2(uc->chunk_size) ? fls(uc->chunk_size) - 1 : 0; tmp_len = ti->len; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 7d4d90b4395a..c142ec5458b7 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -356,9 +356,9 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA, hash_block)) { - struct bio *bio = - dm_bio_from_per_bio_data(io, - v->ti->per_io_data_size); + struct bio *bio; + io->had_mismatch = true; + bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio, block, 0); r = -EIO; @@ -482,6 +482,7 @@ static int verity_handle_data_hash_mismatch(struct dm_verity *v, return -EIO; /* Error correction failed; Just return error */ if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, blkno)) { + io->had_mismatch = true; dm_audit_log_bio(DM_MSG_PREFIX, "verify-data", bio, blkno, 0); return -EIO; } @@ -606,6 +607,7 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status) if (unlikely(status != BLK_STS_OK) && unlikely(!(bio->bi_opf & REQ_RAHEAD)) && + !io->had_mismatch && !verity_is_system_shutting_down()) { if (v->error_mode == DM_VERITY_MODE_PANIC) { panic("dm-verity device has I/O error"); @@ -779,6 +781,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) io->orig_bi_end_io = bio->bi_end_io; io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; + io->had_mismatch = false; bio->bi_end_io = verity_end_io; bio->bi_private = io; diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 6b75159bf835..c996140bda94 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -92,6 +92,7 @@ struct dm_verity_io { sector_t block; unsigned int n_blocks; bool in_bh; + bool had_mismatch; struct work_struct work; struct work_struct bh_work; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ff4a6b570b76..19230404d8c2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2290,8 +2290,10 @@ static struct mapped_device *alloc_dev(int minor) * override accordingly. */ md->disk = blk_alloc_disk(NULL, md->numa_node_id); - if (IS_ERR(md->disk)) + if (IS_ERR(md->disk)) { + md->disk = NULL; goto bad; + } md->queue = md->disk->queue; init_waitqueue_head(&md->wait); diff --git a/drivers/md/md.c b/drivers/md/md.c index 179ee4afe937..67108c397c5a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -546,6 +546,26 @@ static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_n return 0; } +/* + * The only difference from bio_chain_endio() is that the current + * bi_status of bio does not affect the bi_status of parent. + */ +static void md_end_flush(struct bio *bio) +{ + struct bio *parent = bio->bi_private; + + /* + * If any flush io error before the power failure, + * disk data may be lost. + */ + if (bio->bi_status) + pr_err("md: %pg flush io error %d\n", bio->bi_bdev, + blk_status_to_errno(bio->bi_status)); + + bio_put(bio); + bio_endio(parent); +} + bool md_flush_request(struct mddev *mddev, struct bio *bio) { struct md_rdev *rdev; @@ -565,7 +585,9 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio) new = bio_alloc_bioset(rdev->bdev, 0, REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO, &mddev->bio_set); - bio_chain(new, bio); + new->bi_private = bio; + new->bi_end_io = md_end_flush; + bio_inc_remaining(bio); submit_bio(new); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f3bf1116794a..862b1fb71d86 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4061,9 +4061,12 @@ static int raid10_run(struct mddev *mddev) } if (!mddev_is_dm(conf->mddev)) { - ret = raid10_set_queue_limits(mddev); - if (ret) + int err = raid10_set_queue_limits(mddev); + + if (err) { + ret = err; goto out_free_conf; + } } /* need to check that every block has at least one working mirror */ diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c index 8526f613a40e..cfbfc4c1b2e6 100644 --- a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c @@ -348,12 +348,12 @@ static int get_edid_tag_location(const u8 *edid, unsigned int size, /* Return if not a CTA-861 extension block */ if (size < 256 || edid[0] != 0x02 || edid[1] != 0x03) - return -1; + return -ENOENT; /* search tag */ d = edid[0x02] & 0x7f; if (d <= 4) - return -1; + return -ENOENT; i = 0x04; end = 0x00 + d; @@ -371,7 +371,7 @@ static int get_edid_tag_location(const u8 *edid, unsigned int size, return offset + i; i += len + 1; } while (i < end); - return -1; + return -ENOENT; } static void extron_edid_crc(u8 *edid) diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c index ba67587bd43e..171366fe3544 100644 --- a/drivers/media/cec/usb/pulse8/pulse8-cec.c +++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c @@ -685,7 +685,7 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio, err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4); if (err) return err; - date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; + date = ((unsigned)data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; dev_info(pulse8->dev, "Firmware build date %ptT\n", &date); dev_dbg(pulse8->dev, "Persistent config:\n"); diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index 642c48e8c1f5..ded11cd8dbf7 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -1795,6 +1795,9 @@ static void tpg_precalculate_line(struct tpg_data *tpg) unsigned p; unsigned x; + if (WARN_ON_ONCE(!tpg->src_width || !tpg->scaled_width)) + return; + switch (tpg->pattern) { case TPG_PAT_GREEN: contrast = TPG_COLOR_100_RED; diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 29a8d876e6c2..b0523fc23506 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -1482,18 +1482,23 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) } vb->planes[plane].dbuf_mapped = 1; } + } else { + for (plane = 0; plane < vb->num_planes; ++plane) + dma_buf_put(planes[plane].dbuf); + } - /* - * Now that everything is in order, copy relevant information - * provided by userspace. - */ - for (plane = 0; plane < vb->num_planes; ++plane) { - vb->planes[plane].bytesused = planes[plane].bytesused; - vb->planes[plane].length = planes[plane].length; - vb->planes[plane].m.fd = planes[plane].m.fd; - vb->planes[plane].data_offset = planes[plane].data_offset; - } + /* + * Now that everything is in order, copy relevant information + * provided by userspace. + */ + for (plane = 0; plane < vb->num_planes; ++plane) { + vb->planes[plane].bytesused = planes[plane].bytesused; + vb->planes[plane].length = planes[plane].length; + vb->planes[plane].m.fd = planes[plane].m.fd; + vb->planes[plane].data_offset = planes[plane].data_offset; + } + if (reacquired) { /* * Call driver-specific initialization on the newly acquired buffer, * if provided. @@ -1503,9 +1508,6 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) dprintk(q, 1, "buffer initialization failed\n"); goto err_put_vb2_buf; } - } else { - for (plane = 0; plane < vb->num_planes; ++plane) - dma_buf_put(planes[plane].dbuf); } ret = call_vb_qop(vb, buf_prepare, vb); diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 4f78f30b3646..a05aa271a1ba 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -443,8 +443,8 @@ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wra default: fepriv->auto_step++; - fepriv->auto_sub_step = -1; /* it'll be incremented to 0 in a moment */ - break; + fepriv->auto_sub_step = 0; + continue; } if (!ready) fepriv->auto_sub_step++; diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 192a8230c4aa..29edaaff7a5c 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -366,9 +366,15 @@ int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp) { struct vb2_queue *q = &ctx->vb_q; + struct vb2_buffer *vb2 = vb2_get_buffer(q, exp->index); int ret; - ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, q->bufs[exp->index], + if (!vb2) { + dprintk(1, "[%s] invalid buffer index\n", ctx->name); + return -EINVAL; + } + + ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, vb2, 0, exp->flags); if (ret) { dprintk(1, "[%s] index=%d errno=%d\n", ctx->name, diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index b43695bc51e7..9df7c213716a 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -86,10 +86,15 @@ static DECLARE_RWSEM(minor_rwsem); static int dvb_device_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev; + unsigned int minor = iminor(inode); + + if (minor >= MAX_DVB_MINORS) + return -ENODEV; mutex_lock(&dvbdev_mutex); down_read(&minor_rwsem); - dvbdev = dvb_minors[iminor(inode)]; + + dvbdev = dvb_minors[minor]; if (dvbdev && dvbdev->fops) { int err = 0; @@ -525,7 +530,10 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (!dvb_minors[minor]) break; - if (minor == MAX_DVB_MINORS) { +#else + minor = nums2minor(adap->num, type, id); +#endif + if (minor >= MAX_DVB_MINORS) { if (new_node) { list_del(&new_node->list_head); kfree(dvbdevfops); @@ -538,9 +546,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, mutex_unlock(&dvbdev_register_lock); return -EINVAL; } -#else - minor = nums2minor(adap->num, type, id); -#endif + dvbdev->minor = minor; dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c index 8b978a9f74a4..f5dd3a81725a 100644 --- a/drivers/media/dvb-frontends/cx24116.c +++ b/drivers/media/dvb-frontends/cx24116.c @@ -741,6 +741,7 @@ static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr) { struct cx24116_state *state = fe->demodulator_priv; u8 snr_reading; + int ret; static const u32 snr_tab[] = { /* 10 x Table (rounded up) */ 0x00000, 0x0199A, 0x03333, 0x04ccD, 0x06667, 0x08000, 0x0999A, 0x0b333, 0x0cccD, 0x0e667, @@ -749,7 +750,11 @@ static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr) dprintk("%s()\n", __func__); - snr_reading = cx24116_readreg(state, CX24116_REG_QUALITY0); + ret = cx24116_readreg(state, CX24116_REG_QUALITY0); + if (ret < 0) + return ret; + + snr_reading = ret; if (snr_reading >= 0xa0 /* 100% */) *snr = 0xffff; diff --git a/drivers/media/dvb-frontends/stb0899_algo.c b/drivers/media/dvb-frontends/stb0899_algo.c index df89c33dac23..40537c4ccb0d 100644 --- a/drivers/media/dvb-frontends/stb0899_algo.c +++ b/drivers/media/dvb-frontends/stb0899_algo.c @@ -269,7 +269,7 @@ static enum stb0899_status stb0899_search_carrier(struct stb0899_state *state) short int derot_freq = 0, last_derot_freq = 0, derot_limit, next_loop = 3; int index = 0; - u8 cfr[2]; + u8 cfr[2] = {0}; u8 reg; internal->status = NOCARRIER; diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 48230d5109f0..272945a878b3 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -2519,10 +2519,10 @@ static int adv76xx_log_status(struct v4l2_subdev *sd) const struct adv76xx_chip_info *info = state->info; struct v4l2_dv_timings timings; struct stdi_readback stdi; - u8 reg_io_0x02 = io_read(sd, 0x02); + int ret; + u8 reg_io_0x02; u8 edid_enabled; u8 cable_det; - static const char * const csc_coeff_sel_rb[16] = { "bypassed", "YPbPr601 -> RGB", "reserved", "YPbPr709 -> RGB", "reserved", "RGB -> YPbPr601", "reserved", "RGB -> YPbPr709", @@ -2621,13 +2621,21 @@ static int adv76xx_log_status(struct v4l2_subdev *sd) v4l2_info(sd, "-----Color space-----\n"); v4l2_info(sd, "RGB quantization range ctrl: %s\n", rgb_quantization_range_txt[state->rgb_quantization_range]); - v4l2_info(sd, "Input color space: %s\n", - input_color_space_txt[reg_io_0x02 >> 4]); - v4l2_info(sd, "Output color space: %s %s, alt-gamma %s\n", - (reg_io_0x02 & 0x02) ? "RGB" : "YCbCr", - (((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ? - "(16-235)" : "(0-255)", - (reg_io_0x02 & 0x08) ? "enabled" : "disabled"); + + ret = io_read(sd, 0x02); + if (ret < 0) { + v4l2_info(sd, "Can't read Input/Output color space\n"); + } else { + reg_io_0x02 = ret; + + v4l2_info(sd, "Input color space: %s\n", + input_color_space_txt[reg_io_0x02 >> 4]); + v4l2_info(sd, "Output color space: %s %s, alt-gamma %s\n", + (reg_io_0x02 & 0x02) ? "RGB" : "YCbCr", + (((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ? + "(16-235)" : "(0-255)", + (reg_io_0x02 & 0x08) ? "enabled" : "disabled"); + } v4l2_info(sd, "Color space conversion: %s\n", csc_coeff_sel_rb[cp_read(sd, info->cp_csc) >> 4]); diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c index fc27238dd4d3..24873149096c 100644 --- a/drivers/media/i2c/ar0521.c +++ b/drivers/media/i2c/ar0521.c @@ -255,10 +255,10 @@ static u32 calc_pll(struct ar0521_dev *sensor, u32 freq, u16 *pre_ptr, u16 *mult continue; /* Minimum value */ if (new_mult > 254) break; /* Maximum, larger pre won't work either */ - if (sensor->extclk_freq * (u64)new_mult < AR0521_PLL_MIN * + if (sensor->extclk_freq * (u64)new_mult < (u64)AR0521_PLL_MIN * new_pre) continue; - if (sensor->extclk_freq * (u64)new_mult > AR0521_PLL_MAX * + if (sensor->extclk_freq * (u64)new_mult > (u64)AR0521_PLL_MAX * new_pre) break; /* Larger pre won't work either */ new_pll = div64_round_up(sensor->extclk_freq * (u64)new_mult, diff --git a/drivers/media/pci/mgb4/mgb4_cmt.c b/drivers/media/pci/mgb4/mgb4_cmt.c index 70dc78ef193c..a25b68403bc6 100644 --- a/drivers/media/pci/mgb4/mgb4_cmt.c +++ b/drivers/media/pci/mgb4/mgb4_cmt.c @@ -227,6 +227,8 @@ void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev, u32 config; size_t i; + freq_range = array_index_nospec(freq_range, ARRAY_SIZE(cmt_vals_in)); + addr = cmt_addrs_in[vindev->config->id]; reg_set = cmt_vals_in[freq_range]; diff --git a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c index d2c4a0178b3c..1db4609b3557 100644 --- a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c @@ -775,11 +775,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx) (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2; jpeg_buffer.curr = 0; - word = 0; - if (get_word_be(&jpeg_buffer, &word)) return; - jpeg_buffer.size = (long)word - 2; + + if (word < 2) + jpeg_buffer.size = 0; + else + jpeg_buffer.size = (long)word - 2; + jpeg_buffer.data += 2; jpeg_buffer.curr = 0; @@ -1058,6 +1061,7 @@ static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word) if (byte == -1) return -1; *word = (unsigned int)byte | temp; + return 0; } @@ -1145,7 +1149,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; - if (!length) + if (length <= 0) return false; sof = jpeg_buffer.curr; /* after 0xffc0 */ sof_len = length; @@ -1176,7 +1180,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; - if (!length) + if (length <= 0) return false; if (n_dqt >= S5P_JPEG_MAX_MARKER) return false; @@ -1189,7 +1193,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; - if (!length) + if (length <= 0) return false; if (n_dht >= S5P_JPEG_MAX_MARKER) return false; @@ -1214,6 +1218,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; + /* No need to check underflows as skip() does it */ skip(&jpeg_buffer, length); break; } diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c index 00e0d08af357..4f330f4fc6be 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.c +++ b/drivers/media/test-drivers/vivid/vivid-core.c @@ -910,7 +910,7 @@ static int vivid_create_queue(struct vivid_dev *dev, * videobuf2-core.c to MAX_BUFFER_INDEX. */ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - q->max_num_buffers = 64; + q->max_num_buffers = MAX_VID_CAP_BUFFERS; if (buf_type == V4L2_BUF_TYPE_SDR_CAPTURE) q->max_num_buffers = 1024; if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE) diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h index cc18a3bc6dc0..d2d52763b119 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.h +++ b/drivers/media/test-drivers/vivid/vivid-core.h @@ -26,6 +26,8 @@ #define MAX_INPUTS 16 /* The maximum number of outputs */ #define MAX_OUTPUTS 16 +/* The maximum number of video capture buffers */ +#define MAX_VID_CAP_BUFFERS 64 /* The maximum up or down scaling factor is 4 */ #define MAX_ZOOM 4 /* The maximum image width/height are set to 4K DMT */ @@ -481,7 +483,7 @@ struct vivid_dev { /* video capture */ struct tpg_data tpg; unsigned ms_vid_cap; - bool must_blank[VIDEO_MAX_FRAME]; + bool must_blank[MAX_VID_CAP_BUFFERS]; const struct vivid_fmt *fmt_cap; struct v4l2_fract timeperframe_vid_cap; diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c index 8bb38bc7b8cc..2b5c8fbcd0a2 100644 --- a/drivers/media/test-drivers/vivid/vivid-ctrls.c +++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c @@ -553,7 +553,7 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl) break; case VIVID_CID_PERCENTAGE_FILL: tpg_s_perc_fill(&dev->tpg, ctrl->val); - for (i = 0; i < VIDEO_MAX_FRAME; i++) + for (i = 0; i < MAX_VID_CAP_BUFFERS; i++) dev->must_blank[i] = ctrl->val < 100; break; case VIVID_CID_INSERT_SAV: diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index 69620e0a35a0..6a790ac8cbe6 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -213,7 +213,7 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) dev->vid_cap_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); - for (i = 0; i < VIDEO_MAX_FRAME; i++) + for (i = 0; i < MAX_VID_CAP_BUFFERS; i++) dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; if (dev->start_streaming_error) { dev->start_streaming_error = false; diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c index e5a364efd5e6..95a2202879d8 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-api.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c @@ -753,9 +753,10 @@ static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) for (i = 0; i < master->ncontrols; i++) cur_to_new(master->cluster[i]); ret = call_op(master, g_volatile_ctrl); - new_to_user(c, ctrl); + if (!ret) + ret = new_to_user(c, ctrl); } else { - cur_to_user(c, ctrl); + ret = cur_to_user(c, ctrl); } v4l2_ctrl_unlock(master); return ret; @@ -770,7 +771,10 @@ int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) if (!ctrl || !ctrl->is_int) return -EINVAL; ret = get_ctrl(ctrl, &c); - control->value = c.value; + + if (!ret) + control->value = c.value; + return ret; } EXPORT_SYMBOL(v4l2_g_ctrl); @@ -811,10 +815,11 @@ static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, int ret; v4l2_ctrl_lock(ctrl); - user_to_new(c, ctrl); - ret = set_ctrl(fh, ctrl, 0); + ret = user_to_new(c, ctrl); if (!ret) - cur_to_user(c, ctrl); + ret = set_ctrl(fh, ctrl, 0); + if (!ret) + ret = cur_to_user(c, ctrl); v4l2_ctrl_unlock(ctrl); return ret; } diff --git a/drivers/misc/cardreader/Kconfig b/drivers/misc/cardreader/Kconfig index 022322dfb36e..a70700f0e592 100644 --- a/drivers/misc/cardreader/Kconfig +++ b/drivers/misc/cardreader/Kconfig @@ -16,7 +16,8 @@ config MISC_RTSX_PCI select MFD_CORE help This supports for Realtek PCI-Express card reader including rts5209, - rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, rts5260. + rts5227, rts5228, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, + rts5260, rts5261, rts5264. Realtek card readers support access to many types of memory cards, such as Memory Stick, Memory Stick Pro, Secure Digital and MultiMediaCard. diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c index 7c3d8bedf90b..a2ed477e0370 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c @@ -364,6 +364,7 @@ static int pci1xxxx_otp_eeprom_probe(struct auxiliary_device *aux_dev, if (is_eeprom_responsive(priv)) { priv->nvmem_config_eeprom.type = NVMEM_TYPE_EEPROM; priv->nvmem_config_eeprom.name = EEPROM_NAME; + priv->nvmem_config_eeprom.id = NVMEM_DEVID_AUTO; priv->nvmem_config_eeprom.dev = &aux_dev->dev; priv->nvmem_config_eeprom.owner = THIS_MODULE; priv->nvmem_config_eeprom.reg_read = pci1xxxx_eeprom_read; @@ -383,6 +384,7 @@ static int pci1xxxx_otp_eeprom_probe(struct auxiliary_device *aux_dev, priv->nvmem_config_otp.type = NVMEM_TYPE_OTP; priv->nvmem_config_otp.name = OTP_NAME; + priv->nvmem_config_otp.id = NVMEM_DEVID_AUTO; priv->nvmem_config_otp.dev = &aux_dev->dev; priv->nvmem_config_otp.owner = THIS_MODULE; priv->nvmem_config_otp.reg_read = pci1xxxx_otp_read; diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 9d090fa07516..be011cef12e5 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -321,7 +321,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb) return; list_del(&cb->list); - kfree(cb->buf.data); + kvfree(cb->buf.data); kfree(cb->ext_hdr); kfree(cb); } @@ -497,7 +497,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, if (length == 0) return cb; - cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); + cb->buf.data = kvmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); if (!cb->buf.data) { mei_io_cb_free(cb); return NULL; diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index 37e804bbb1f2..205945ce9e86 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c @@ -258,7 +258,6 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) int lcpu; BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); - preempt_disable(); bs = gru_lock_kernel_context(-1); lcpu = uv_blade_processor_id(); *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; @@ -272,7 +271,6 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) static void gru_free_cpu_resources(void *cb, void *dsr) { gru_unlock_kernel_context(uv_numa_blade_id()); - preempt_enable(); } /* diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 0f5b09e290c8..3036c15f3689 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -937,10 +937,8 @@ vm_fault_t gru_fault(struct vm_fault *vmf) again: mutex_lock(>s->ts_ctxlock); - preempt_disable(); if (gru_check_context_placement(gts)) { - preempt_enable(); mutex_unlock(>s->ts_ctxlock); gru_unload_context(gts, 1); return VM_FAULT_NOPAGE; @@ -949,7 +947,6 @@ vm_fault_t gru_fault(struct vm_fault *vmf) if (!gts->ts_gru) { STAT(load_user_context); if (!gru_assign_gru_context(gts)) { - preempt_enable(); mutex_unlock(>s->ts_ctxlock); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ @@ -965,7 +962,6 @@ vm_fault_t gru_fault(struct vm_fault *vmf) vma->vm_page_prot); } - preempt_enable(); mutex_unlock(>s->ts_ctxlock); return VM_FAULT_NOPAGE; diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c index 10921cd2608d..1107dd3e2e9f 100644 --- a/drivers/misc/sgi-gru/grutlbpurge.c +++ b/drivers/misc/sgi-gru/grutlbpurge.c @@ -65,7 +65,6 @@ static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state struct gru_tlb_global_handle *tgh; int n; - preempt_disable(); if (uv_numa_blade_id() == gru->gs_blade_id) n = get_on_blade_tgh(gru); else @@ -79,7 +78,6 @@ static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh) { unlock_tgh_handle(tgh); - preempt_enable(); } /* diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index d0b3ca8a11f0..4d6844261912 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -388,7 +388,8 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, blk_queue_rq_timeout(mq->queue, 60 * HZ); - dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); + if (mmc_dev(host)->dma_parms) + dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index d859b1a3ab71..12df4ff9eeee 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -38,9 +38,8 @@ struct mvsd_host { unsigned int xfer_mode; unsigned int intr_en; unsigned int ctrl; - bool use_pio; - struct sg_mapping_iter sg_miter; unsigned int pio_size; + void *pio_ptr; unsigned int sg_frags; unsigned int ns_per_clk; unsigned int clock; @@ -115,18 +114,11 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) * data when the buffer is not aligned on a 64 byte * boundary. */ - unsigned int miter_flags = SG_MITER_ATOMIC; /* Used from IRQ */ - - if (data->flags & MMC_DATA_READ) - miter_flags |= SG_MITER_TO_SG; - else - miter_flags |= SG_MITER_FROM_SG; - host->pio_size = data->blocks * data->blksz; - sg_miter_start(&host->sg_miter, data->sg, data->sg_len, miter_flags); + host->pio_ptr = sg_virt(data->sg); if (!nodma) - dev_dbg(host->dev, "fallback to PIO for data\n"); - host->use_pio = true; + dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n", + host->pio_ptr, host->pio_size); return 1; } else { dma_addr_t phys_addr; @@ -137,7 +129,6 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) phys_addr = sg_dma_address(data->sg); mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff); mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16); - host->use_pio = false; return 0; } } @@ -297,8 +288,8 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, { void __iomem *iobase = host->base; - if (host->use_pio) { - sg_miter_stop(&host->sg_miter); + if (host->pio_ptr) { + host->pio_ptr = NULL; host->pio_size = 0; } else { dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, @@ -353,12 +344,9 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, static irqreturn_t mvsd_irq(int irq, void *dev) { struct mvsd_host *host = dev; - struct sg_mapping_iter *sgm = &host->sg_miter; void __iomem *iobase = host->base; u32 intr_status, intr_done_mask; int irq_handled = 0; - u16 *p; - int s; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n", @@ -382,36 +370,15 @@ static irqreturn_t mvsd_irq(int irq, void *dev) spin_lock(&host->lock); /* PIO handling, if needed. Messy business... */ - if (host->use_pio) { - /* - * As we set sgm->consumed this always gives a valid buffer - * position. - */ - if (!sg_miter_next(sgm)) { - /* This should not happen */ - dev_err(host->dev, "ran out of scatter segments\n"); - spin_unlock(&host->lock); - host->intr_en &= - ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W | - MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W); - mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); - return IRQ_HANDLED; - } - p = sgm->addr; - s = sgm->length; - if (s > host->pio_size) - s = host->pio_size; - } - - if (host->use_pio && + if (host->pio_size && (intr_status & host->intr_en & (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) { - + u16 *p = host->pio_ptr; + int s = host->pio_size; while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) { readsw(iobase + MVSD_FIFO, p, 16); p += 16; s -= 32; - sgm->consumed += 32; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } /* @@ -424,7 +391,6 @@ static irqreturn_t mvsd_irq(int irq, void *dev) put_unaligned(mvsd_read(MVSD_FIFO), p++); put_unaligned(mvsd_read(MVSD_FIFO), p++); s -= 4; - sgm->consumed += 4; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) { @@ -432,13 +398,10 @@ static irqreturn_t mvsd_irq(int irq, void *dev) val[0] = mvsd_read(MVSD_FIFO); val[1] = mvsd_read(MVSD_FIFO); memcpy(p, ((void *)&val) + 4 - s, s); - sgm->consumed += s; s = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } - /* PIO transfer done */ - host->pio_size -= sgm->consumed; - if (host->pio_size == 0) { + if (s == 0) { host->intr_en &= ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); @@ -450,10 +413,14 @@ static irqreturn_t mvsd_irq(int irq, void *dev) } dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", s, intr_status, mvsd_read(MVSD_HW_STATE)); + host->pio_ptr = p; + host->pio_size = s; irq_handled = 1; - } else if (host->use_pio && + } else if (host->pio_size && (intr_status & host->intr_en & (MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) { + u16 *p = host->pio_ptr; + int s = host->pio_size; /* * The TX_FIFO_8W bit is unreliable. When set, bursting * 16 halfwords all at once in the FIFO drops data. Actually @@ -464,7 +431,6 @@ static irqreturn_t mvsd_irq(int irq, void *dev) mvsd_write(MVSD_FIFO, get_unaligned(p++)); mvsd_write(MVSD_FIFO, get_unaligned(p++)); s -= 4; - sgm->consumed += 4; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } if (s < 4) { @@ -473,13 +439,10 @@ static irqreturn_t mvsd_irq(int irq, void *dev) memcpy(((void *)&val) + 4 - s, p, s); mvsd_write(MVSD_FIFO, val[0]); mvsd_write(MVSD_FIFO, val[1]); - sgm->consumed += s; s = 0; intr_status = mvsd_read(MVSD_NOR_INTR_STATUS); } - /* PIO transfer done */ - host->pio_size -= sgm->consumed; - if (host->pio_size == 0) { + if (s == 0) { host->intr_en &= ~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W); mvsd_write(MVSD_NOR_INTR_EN, host->intr_en); @@ -487,6 +450,8 @@ static irqreturn_t mvsd_irq(int irq, void *dev) } dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n", s, intr_status, mvsd_read(MVSD_HW_STATE)); + host->pio_ptr = p; + host->pio_size = s; irq_handled = 1; } diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 8999b97263af..8fd80dac11bf 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -852,6 +852,14 @@ static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask) sdhci_reset(host, mask); + /* The T-Head 1520 SoC does not comply with the SDHCI specification + * regarding the "Software Reset for CMD line should clear 'Command + * Complete' in the Normal Interrupt Status Register." Clear the bit + * here to compensate for this quirk. + */ + if (mask & SDHCI_RESET_CMD) + sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); + if (priv->flags & FLAG_IO_FIXED_1V8) { ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); if (!(ctrl_2 & SDHCI_CTRL_VDD_180)) { diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 0f81586a19df..68ce4920e01e 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -892,28 +892,40 @@ static void gl9767_disable_ssc_pll(struct pci_dev *pdev) gl9767_vhs_read(pdev); } +static void gl9767_set_low_power_negotiation(struct pci_dev *pdev, bool enable) +{ + u32 value; + + gl9767_vhs_write(pdev); + + pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value); + if (enable) + value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF; + else + value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF; + pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value); + + gl9767_vhs_read(pdev); +} + static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pci_slot *slot = sdhci_priv(host); struct mmc_ios *ios = &host->mmc->ios; struct pci_dev *pdev; - u32 value; u16 clk; pdev = slot->chip->pdev; host->mmc->actual_clock = 0; - gl9767_vhs_write(pdev); - - pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value); - value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF; - pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value); - + gl9767_set_low_power_negotiation(pdev, false); gl9767_disable_ssc_pll(pdev); sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); - if (clock == 0) + if (clock == 0) { + gl9767_set_low_power_negotiation(pdev, true); return; + } clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) { @@ -922,12 +934,7 @@ static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock) } sdhci_enable_clk(host, clk); - - pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value); - value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF; - pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value); - - gl9767_vhs_read(pdev); + gl9767_set_low_power_negotiation(pdev, true); } static void gli_set_9767(struct sdhci_host *host) @@ -1061,6 +1068,9 @@ static int gl9767_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios) sdhci_writew(host, value, SDHCI_CLOCK_CONTROL); } + pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value); + value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF; + pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value); gl9767_vhs_read(pdev); return 0; diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index c63f7fc1e691..511615dc3341 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -1011,7 +1011,6 @@ static int c_can_handle_bus_err(struct net_device *dev, /* common for all type of bus errors */ priv->can.can_stats.bus_error++; - stats->rx_errors++; /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); @@ -1027,26 +1026,32 @@ static int c_can_handle_bus_err(struct net_device *dev, case LEC_STUFF_ERROR: netdev_dbg(dev, "stuff error\n"); cf->data[2] |= CAN_ERR_PROT_STUFF; + stats->rx_errors++; break; case LEC_FORM_ERROR: netdev_dbg(dev, "form error\n"); cf->data[2] |= CAN_ERR_PROT_FORM; + stats->rx_errors++; break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); cf->data[3] = CAN_ERR_PROT_LOC_ACK; + stats->tx_errors++; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); cf->data[2] |= CAN_ERR_PROT_BIT1; + stats->tx_errors++; break; case LEC_BIT0_ERROR: netdev_dbg(dev, "bit0 error\n"); cf->data[2] |= CAN_ERR_PROT_BIT0; + stats->tx_errors++; break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + stats->rx_errors++; break; default: break; diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig index 467ef19de1c1..aae25c2f849e 100644 --- a/drivers/net/can/cc770/Kconfig +++ b/drivers/net/can/cc770/Kconfig @@ -7,7 +7,7 @@ if CAN_CC770 config CAN_CC770_ISA tristate "ISA Bus based legacy CC770 driver" - depends on ISA + depends on HAS_IOPORT help This driver adds legacy support for CC770 and AN82527 chips connected to the ISA bus using I/O port, memory mapped or diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index a978b960f1f1..16e9e7d7527d 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1765,7 +1765,8 @@ static int m_can_close(struct net_device *dev) netif_stop_queue(dev); m_can_stop(dev); - free_irq(dev->irq, dev); + if (dev->irq) + free_irq(dev->irq, dev); m_can_clean(dev); diff --git a/drivers/net/can/rockchip/Kconfig b/drivers/net/can/rockchip/Kconfig index e029e2a3ca4b..d203c530551f 100644 --- a/drivers/net/can/rockchip/Kconfig +++ b/drivers/net/can/rockchip/Kconfig @@ -2,7 +2,8 @@ config CAN_ROCKCHIP_CANFD tristate "Rockchip CAN-FD controller" - depends on OF || COMPILE_TEST + depends on OF + depends on ARCH_ROCKCHIP || COMPILE_TEST select CAN_RX_OFFLOAD help Say Y here if you want to use CAN-FD controller found on diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index 01168db4c106..2f516cc6d22c 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -87,7 +87,7 @@ config CAN_PLX_PCI config CAN_SJA1000_ISA tristate "ISA Bus based legacy SJA1000 driver" - depends on ISA + depends on HAS_IOPORT help This driver adds legacy support for SJA1000 chips connected to the ISA bus using I/O port, memory mapped or indirect access. diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c index e684991fa391..7209a831f0f2 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Copyright (c) 2019, 2020, 2021, 2024 Pengutronix, // Marc Kleine-Budde // // Based on: @@ -483,9 +483,11 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) }; const struct ethtool_coalesce ec = { .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq, - .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq, + .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq == 0 ? + 1 : priv->rx_obj_num_coalesce_irq, .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq, - .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq, + .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq == 0 ? + 1 : priv->tx_obj_num_coalesce_irq, }; struct can_ram_layout layout; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c index f732556d233a..d3ac865933fd 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -16,9 +16,9 @@ #include "mcp251xfd.h" -static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta) +static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta) { - return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); + return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF; } static inline int @@ -122,7 +122,11 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p) if (err) return err; - if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) { + /* If the chip says the TX-FIFO is empty, but there are no TX + * buffers free in the ring, we assume all have been sent. + */ + if (mcp251xfd_tx_fifo_sta_empty(fifo_sta) && + mcp251xfd_get_tx_free(tx_ring) == 0) { *len_p = tx_ring->obj_num; return 0; } diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0783fc121bbb..c39cb119e760 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "b53_regs.h" @@ -224,6 +225,9 @@ static const struct b53_mib_desc b53_mibs_58xx[] = { #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) +#define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) + static int b53_do_vlan_op(struct b53_device *dev, u8 op) { unsigned int i; @@ -2254,20 +2258,25 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) bool allow_10_100; if (is5325(dev) || is5365(dev)) - return -EOPNOTSUPP; + return 0; if (!dsa_is_cpu_port(ds, port)) return 0; - enable_jumbo = (mtu >= JMS_MIN_SIZE); - allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID); + enable_jumbo = (mtu > ETH_DATA_LEN); + allow_10_100 = !is63xx(dev); return b53_set_jumbo(dev, enable_jumbo, allow_10_100); } static int b53_get_max_mtu(struct dsa_switch *ds, int port) { - return JMS_MAX_SIZE; + struct b53_device *dev = ds->priv; + + if (is5325(dev) || is5365(dev)) + return B53_MAX_MTU_25; + + return B53_MAX_MTU; } static const struct phylink_mac_ops b53_phylink_mac_ops = { diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 268949939636..d246f95d57ec 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -839,6 +840,8 @@ static void lan9303_handle_reset(struct lan9303 *chip) if (!chip->reset_gpio) return; + gpiod_set_value_cansleep(chip->reset_gpio, 1); + if (chip->reset_duration != 0) msleep(chip->reset_duration); @@ -864,8 +867,34 @@ static int lan9303_disable_processing(struct lan9303 *chip) static int lan9303_check_device(struct lan9303 *chip) { int ret; + int err; u32 reg; + /* In I2C-managed configurations this polling loop will clash with + * switch's reading of EEPROM right after reset and this behaviour is + * not configurable. While lan9303_read() already has quite long retry + * timeout, seems not all cases are being detected as arbitration error. + * + * According to datasheet, EEPROM loader has 30ms timeout (in case of + * missing EEPROM). + * + * Loading of the largest supported EEPROM is expected to take at least + * 5.9s. + */ + err = read_poll_timeout(lan9303_read, ret, + !ret && reg & LAN9303_HW_CFG_READY, + 20000, 6000000, false, + chip->regmap, LAN9303_HW_CFG, ®); + if (ret) { + dev_err(chip->dev, "failed to read HW_CFG reg: %pe\n", + ERR_PTR(ret)); + return ret; + } + if (err) { + dev_err(chip->dev, "HW_CFG not ready: 0x%08x\n", reg); + return err; + } + ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, ®); if (ret) { dev_err(chip->dev, "failed to read chip revision register: %d\n", diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 4e8710c7cb7b..5290f5ad98f3 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2733,26 +2733,27 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) return MICREL_KSZ8_P1_ERRATA; break; case KSZ8567_CHIP_ID: + /* KSZ8567R Errata DS80000752C Module 4 */ + case KSZ8765_CHIP_ID: + case KSZ8794_CHIP_ID: + case KSZ8795_CHIP_ID: + /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */ case KSZ9477_CHIP_ID: + /* KSZ9477S Errata DS80000754A Module 4 */ case KSZ9567_CHIP_ID: + /* KSZ9567S Errata DS80000756A Module 4 */ case KSZ9896_CHIP_ID: + /* KSZ9896C Errata DS80000757A Module 3 */ case KSZ9897_CHIP_ID: - /* KSZ9477 Errata DS80000754C - * - * Module 4: Energy Efficient Ethernet (EEE) feature select must - * be manually disabled + /* KSZ9897R Errata DS80000758C Module 4 */ + /* Energy Efficient Ethernet (EEE) feature select must be manually disabled * The EEE feature is enabled by default, but it is not fully * operational. It must be manually disabled through register * controls. If not disabled, the PHY ports can auto-negotiate * to enable EEE, and this feature can cause link drops when * linked to another device supporting EEE. * - * The same item appears in the errata for the KSZ9567, KSZ9896, - * and KSZ9897. - * - * A similar item appears in the errata for the KSZ8567, but - * provides an alternative workaround. For now, use the simple - * workaround of disabling the EEE feature for this device too. + * The same item appears in the errata for all switches above. */ return MICREL_NO_EEE; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5b4e2ce5470d..284270a4ade1 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -6347,7 +6347,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .invalid_port_mask = BIT(1) | BIT(2) | BIT(8), .num_internal_phys = 5, .internal_phys_offset = 3, - .max_vid = 4095, + .max_vid = 8191, .max_sid = 63, .port_base_addr = 0x0, .phy_base_addr = 0x0, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index c34caf9815c5..a54682240839 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -206,6 +206,7 @@ struct mv88e6xxx_gpio_ops; struct mv88e6xxx_avb_ops; struct mv88e6xxx_ptp_ops; struct mv88e6xxx_pcs_ops; +struct mv88e6xxx_cc_coeffs; struct mv88e6xxx_irq { u16 masked; @@ -408,6 +409,7 @@ struct mv88e6xxx_chip { struct cyclecounter tstamp_cc; struct timecounter tstamp_tc; struct delayed_work overflow_work; + const struct mv88e6xxx_cc_coeffs *cc_coeffs; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; @@ -731,10 +733,6 @@ struct mv88e6xxx_ptp_ops { int arr1_sts_reg; int dep_sts_reg; u32 rx_filters; - u32 cc_shift; - u32 cc_mult; - u32 cc_mult_num; - u32 cc_mult_dem; }; struct mv88e6xxx_pcs_ops { diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 5394a8cf7bf1..04053fdc6489 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -1713,6 +1713,7 @@ int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port, ptr = shift / 8; shift %= 8; mask >>= ptr * 8; + ptr <<= 8; err = mv88e6393x_port_policy_read(chip, port, ptr, ®); if (err) diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index 56391e09b325..aed4a4b07f34 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -18,6 +18,13 @@ #define MV88E6XXX_MAX_ADJ_PPB 1000000 +struct mv88e6xxx_cc_coeffs { + u32 cc_shift; + u32 cc_mult; + u32 cc_mult_num; + u32 cc_mult_dem; +}; + /* Family MV88E6250: * Raw timestamps are in units of 10-ns clock periods. * @@ -25,22 +32,43 @@ * simplifies to * clkadj = scaled_ppm * 2^7 / 5^5 */ -#define MV88E6250_CC_SHIFT 28 -#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT) -#define MV88E6250_CC_MULT_NUM (1 << 7) -#define MV88E6250_CC_MULT_DEM 3125ULL +#define MV88E6XXX_CC_10NS_SHIFT 28 +static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_10ns_coeffs = { + .cc_shift = MV88E6XXX_CC_10NS_SHIFT, + .cc_mult = 10 << MV88E6XXX_CC_10NS_SHIFT, + .cc_mult_num = 1 << 7, + .cc_mult_dem = 3125ULL, +}; -/* Other families: +/* Other families except MV88E6393X in internal clock mode: * Raw timestamps are in units of 8-ns clock periods. * * clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16) * simplifies to * clkadj = scaled_ppm * 2^9 / 5^6 */ -#define MV88E6XXX_CC_SHIFT 28 -#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT) -#define MV88E6XXX_CC_MULT_NUM (1 << 9) -#define MV88E6XXX_CC_MULT_DEM 15625ULL +#define MV88E6XXX_CC_8NS_SHIFT 28 +static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_8ns_coeffs = { + .cc_shift = MV88E6XXX_CC_8NS_SHIFT, + .cc_mult = 8 << MV88E6XXX_CC_8NS_SHIFT, + .cc_mult_num = 1 << 9, + .cc_mult_dem = 15625ULL +}; + +/* Family MV88E6393X using internal clock: + * Raw timestamps are in units of 4-ns clock periods. + * + * clkadj = scaled_ppm * 4*2^28 / (10^6 * 2^16) + * simplifies to + * clkadj = scaled_ppm * 2^8 / 5^6 + */ +#define MV88E6XXX_CC_4NS_SHIFT 28 +static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_4ns_coeffs = { + .cc_shift = MV88E6XXX_CC_4NS_SHIFT, + .cc_mult = 4 << MV88E6XXX_CC_4NS_SHIFT, + .cc_mult_num = 1 << 8, + .cc_mult_dem = 15625ULL +}; #define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100) @@ -83,6 +111,33 @@ static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, return chip->info->ops->gpio_ops->set_pctl(chip, pin, func); } +static const struct mv88e6xxx_cc_coeffs * +mv88e6xxx_cc_coeff_get(struct mv88e6xxx_chip *chip) +{ + u16 period_ps; + int err; + + err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_CLOCK_PERIOD, &period_ps, 1); + if (err) { + dev_err(chip->dev, "failed to read cycle counter period: %d\n", + err); + return ERR_PTR(err); + } + + switch (period_ps) { + case 4000: + return &mv88e6xxx_cc_4ns_coeffs; + case 8000: + return &mv88e6xxx_cc_8ns_coeffs; + case 10000: + return &mv88e6xxx_cc_10ns_coeffs; + default: + dev_err(chip->dev, "unexpected cycle counter period of %u ps\n", + period_ps); + return ERR_PTR(-ENODEV); + } +} + static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc) { struct mv88e6xxx_chip *chip = cc_to_chip(cc); @@ -204,7 +259,6 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly) static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); - const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; int neg_adj = 0; u32 diff, mult; u64 adj; @@ -214,10 +268,10 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) scaled_ppm = -scaled_ppm; } - mult = ptp_ops->cc_mult; - adj = ptp_ops->cc_mult_num; + mult = chip->cc_coeffs->cc_mult; + adj = chip->cc_coeffs->cc_mult_num; adj *= scaled_ppm; - diff = div_u64(adj, ptp_ops->cc_mult_dem); + diff = div_u64(adj, chip->cc_coeffs->cc_mult_dem); mv88e6xxx_reg_lock(chip); @@ -364,10 +418,6 @@ const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), - .cc_shift = MV88E6XXX_CC_SHIFT, - .cc_mult = MV88E6XXX_CC_MULT, - .cc_mult_num = MV88E6XXX_CC_MULT_NUM, - .cc_mult_dem = MV88E6XXX_CC_MULT_DEM, }; const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = { @@ -391,10 +441,6 @@ const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = { (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), - .cc_shift = MV88E6250_CC_SHIFT, - .cc_mult = MV88E6250_CC_MULT, - .cc_mult_num = MV88E6250_CC_MULT_NUM, - .cc_mult_dem = MV88E6250_CC_MULT_DEM, }; const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { @@ -418,10 +464,6 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), - .cc_shift = MV88E6XXX_CC_SHIFT, - .cc_mult = MV88E6XXX_CC_MULT, - .cc_mult_num = MV88E6XXX_CC_MULT_NUM, - .cc_mult_dem = MV88E6XXX_CC_MULT_DEM, }; const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = { @@ -446,10 +488,6 @@ const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = { (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), - .cc_shift = MV88E6XXX_CC_SHIFT, - .cc_mult = MV88E6XXX_CC_MULT, - .cc_mult_num = MV88E6XXX_CC_MULT_NUM, - .cc_mult_dem = MV88E6XXX_CC_MULT_DEM, }; static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) @@ -462,10 +500,10 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) return 0; } -/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3 +/* With a 250MHz input clock, the 32-bit timestamp counter overflows in ~17.2 * seconds; this task forces periodic reads so that we don't miss any. */ -#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16) +#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 8) static void mv88e6xxx_ptp_overflow_check(struct work_struct *work) { struct delayed_work *dw = to_delayed_work(work); @@ -484,11 +522,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) int i; /* Set up the cycle counter */ + chip->cc_coeffs = mv88e6xxx_cc_coeff_get(chip); + if (IS_ERR(chip->cc_coeffs)) + return PTR_ERR(chip->cc_coeffs); + memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc)); chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read; chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32); - chip->tstamp_cc.mult = ptp_ops->cc_mult; - chip->tstamp_cc.shift = ptp_ops->cc_shift; + chip->tstamp_cc.mult = chip->cc_coeffs->cc_mult; + chip->tstamp_cc.shift = chip->cc_coeffs->cc_shift; timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc, ktime_to_ns(ktime_get_real())); diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index bc7e50dcb57c..d0563ef59acf 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3158,7 +3158,6 @@ static int sja1105_setup(struct dsa_switch *ds) * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. */ ds->vlan_filtering_is_global = true; - ds->untag_bridge_pvid = true; ds->fdb_isolation = true; ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES; diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index e4b98fd51643..f18aa321053d 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -851,7 +851,6 @@ static int vsc73xx_setup(struct dsa_switch *ds) dev_info(vsc->dev, "set up the switch\n"); - ds->untag_bridge_pvid = true; ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES; ds->fdb_isolation = true; diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index a98b3139606a..68fad5575fd4 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -318,11 +318,11 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv) * from the ADIN1110 frame header. */ if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN) - return ret; + return -EINVAL; round_len = adin1110_round_len(frame_size); if (round_len < 0) - return ret; + return -EINVAL; frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN; memset(priv->data, 0, ADIN1110_RD_HEADER_LEN); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 27af7746d645..adf6f67c5fcb 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; - goto out; + goto len_error; } /* Save skb pointer. */ @@ -575,6 +575,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) map_error: if (net_ratelimit()) dev_warn(greth->dev, "Could not create TX DMA mapping\n"); +len_error: dev_kfree_skb(skb); out: return err; diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c index c156566c0906..f19b04b92fa9 100644 --- a/drivers/net/ethernet/amd/mvme147.c +++ b/drivers/net/ethernet/amd/mvme147.c @@ -105,10 +105,6 @@ static struct net_device * __init mvme147lance_probe(void) macaddr[3] = address&0xff; eth_hw_addr_set(dev, macaddr); - printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n", - dev->name, dev->base_addr, MVME147_LANCE_IRQ, - dev->dev_addr); - lp = netdev_priv(dev); lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */ if (!lp->ram) { @@ -138,6 +134,9 @@ static struct net_device * __init mvme147lance_probe(void) return ERR_PTR(err); } + netdev_info(dev, "MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n", + dev->base_addr, MVME147_LANCE_IRQ, dev->dev_addr); + return dev; } diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 31ee477dd131..8283aeee35fb 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -111,6 +111,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; + struct device *dev = ndev->dev.parent; unsigned int i; for (i = 0; i < TX_BD_NUM; i++) { @@ -140,7 +141,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) stats->tx_bytes += skb->len; } - dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), + dma_unmap_single(dev, dma_unmap_addr(tx_buff, addr), dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); /* return the sk_buff to system */ @@ -174,6 +175,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) static int arc_emac_rx(struct net_device *ndev, int budget) { struct arc_emac_priv *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; unsigned int work_done; for (work_done = 0; work_done < budget; work_done++) { @@ -223,9 +225,9 @@ static int arc_emac_rx(struct net_device *ndev, int budget) continue; } - addr = dma_map_single(&ndev->dev, (void *)skb->data, + addr = dma_map_single(dev, (void *)skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, addr)) { + if (dma_mapping_error(dev, addr)) { if (net_ratelimit()) netdev_err(ndev, "cannot map dma buffer\n"); dev_kfree_skb(skb); @@ -237,7 +239,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) } /* unmap previosly mapped skb */ - dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), + dma_unmap_single(dev, dma_unmap_addr(rx_buff, addr), dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); pktlen = info & LEN_MASK; @@ -423,6 +425,7 @@ static int arc_emac_open(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); struct phy_device *phy_dev = ndev->phydev; + struct device *dev = ndev->dev.parent; int i; phy_dev->autoneg = AUTONEG_ENABLE; @@ -445,9 +448,9 @@ static int arc_emac_open(struct net_device *ndev) if (unlikely(!rx_buff->skb)) return -ENOMEM; - addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + addr = dma_map_single(dev, (void *)rx_buff->skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, addr)) { + if (dma_mapping_error(dev, addr)) { netdev_err(ndev, "cannot dma map\n"); dev_kfree_skb(rx_buff->skb); return -ENOMEM; @@ -548,6 +551,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev) static void arc_free_tx_queue(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; unsigned int i; for (i = 0; i < TX_BD_NUM; i++) { @@ -555,7 +559,7 @@ static void arc_free_tx_queue(struct net_device *ndev) struct buffer_state *tx_buff = &priv->tx_buff[i]; if (tx_buff->skb) { - dma_unmap_single(&ndev->dev, + dma_unmap_single(dev, dma_unmap_addr(tx_buff, addr), dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); @@ -579,6 +583,7 @@ static void arc_free_tx_queue(struct net_device *ndev) static void arc_free_rx_queue(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; unsigned int i; for (i = 0; i < RX_BD_NUM; i++) { @@ -586,7 +591,7 @@ static void arc_free_rx_queue(struct net_device *ndev) struct buffer_state *rx_buff = &priv->rx_buff[i]; if (rx_buff->skb) { - dma_unmap_single(&ndev->dev, + dma_unmap_single(dev, dma_unmap_addr(rx_buff, addr), dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); @@ -679,6 +684,7 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) unsigned int len, *txbd_curr = &priv->txbd_curr; struct net_device_stats *stats = &ndev->stats; __le32 *info = &priv->txbd[*txbd_curr].info; + struct device *dev = ndev->dev.parent; dma_addr_t addr; if (skb_padto(skb, ETH_ZLEN)) @@ -692,10 +698,9 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_BUSY; } - addr = dma_map_single(&ndev->dev, (void *)skb->data, len, - DMA_TO_DEVICE); + addr = dma_map_single(dev, (void *)skb->data, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&ndev->dev, addr))) { + if (unlikely(dma_mapping_error(dev, addr))) { stats->tx_dropped++; stats->tx_errors++; dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 87f40c2ba904..078b1a72c161 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -133,6 +133,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) struct arc_emac_mdio_bus_data *data = &priv->bus_data; struct device_node *np = priv->dev->of_node; const char *name = "Synopsys MII Bus"; + struct device_node *mdio_node; struct mii_bus *bus; int error; @@ -164,7 +165,13 @@ int arc_mdio_probe(struct arc_emac_priv *priv) snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name); - error = of_mdiobus_register(bus, priv->dev->of_node); + /* Backwards compatibility for EMAC nodes without MDIO subnode. */ + mdio_node = of_get_child_by_name(np, "mdio"); + if (!mdio_node) + mdio_node = of_node_get(np); + + error = of_mdiobus_register(bus, mdio_node); + of_node_put(mdio_node); if (error) { mdiobus_free(bus); return dev_err_probe(priv->dev, error, diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index 82768b0e9026..9ea16ef4139d 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -322,6 +322,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev) } /* Rewind so we do not have a hole */ spb_index = intf->tx_spb_index; + dev_kfree_skb(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c9faa8540859..0a68b526e4a8 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1359,6 +1359,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", skb->data, skb_len); ret = NETDEV_TX_OK; + dev_kfree_skb_any(skb); goto out; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6e422e24750a..99d025b69079 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2254,10 +2254,11 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + unsigned long flags; - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); ns = timecounter_cyc2time(&ptp->tc, ts); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); memset(skb_hwtstamps(skb), 0, sizeof(*skb_hwtstamps(skb))); skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); @@ -2757,17 +2758,18 @@ static int bnxt_async_event_process(struct bnxt *bp, case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: if (BNXT_PTP_USE_RTC(bp)) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + unsigned long flags; u64 ns; if (!ptp) goto async_event_process_exit; - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); bnxt_ptp_update_current_time(bp); ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << BNXT_PHC_BITS) | ptp->current_time); bnxt_ptp_rtc_timecounter_init(ptp, ns); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); } break; } @@ -13494,9 +13496,11 @@ static void bnxt_force_fw_reset(struct bnxt *bp) return; if (ptp) { - spin_lock_bh(&ptp->ptp_lock); + unsigned long flags; + + spin_lock_irqsave(&ptp->ptp_lock, flags); set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); } else { set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); } @@ -13561,9 +13565,11 @@ void bnxt_fw_reset(struct bnxt *bp) int n = 0, tmo; if (ptp) { - spin_lock_bh(&ptp->ptp_lock); + unsigned long flags; + + spin_lock_irqsave(&ptp->ptp_lock, flags); set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); } else { set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 37d42423459c..fa514be87650 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -62,13 +62,14 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info, struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); u64 ns = timespec64_to_ns(ts); + unsigned long flags; if (BNXT_PTP_USE_RTC(ptp->bp)) return bnxt_ptp_cfg_settime(ptp->bp, ns); - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); timecounter_init(&ptp->tc, &ptp->cc, ns); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); return 0; } @@ -100,13 +101,14 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts, static void bnxt_ptp_get_current_time(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + unsigned long flags; if (!ptp) return; - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); WRITE_ONCE(ptp->old_time, ptp->current_time); bnxt_refclk_read(bp, NULL, &ptp->current_time); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); } static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, @@ -149,17 +151,18 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info, { struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); + unsigned long flags; u64 ns, cycles; int rc; - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); rc = bnxt_refclk_read(ptp->bp, sts, &cycles); if (rc) { - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); return rc; } ns = timecounter_cyc2time(&ptp->tc, cycles); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); *ts = ns_to_timespec64(ns); return 0; @@ -177,6 +180,7 @@ void bnxt_ptp_update_current_time(struct bnxt *bp) static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta) { struct hwrm_port_mac_cfg_input *req; + unsigned long flags; int rc; rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG); @@ -190,9 +194,9 @@ static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta) if (rc) { netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc); } else { - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); bnxt_ptp_update_current_time(ptp->bp); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); } return rc; @@ -202,13 +206,14 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); + unsigned long flags; if (BNXT_PTP_USE_RTC(ptp->bp)) return bnxt_ptp_adjphc(ptp, delta); - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); timecounter_adjtime(&ptp->tc, delta); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); return 0; } @@ -236,14 +241,15 @@ static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); struct bnxt *bp = ptp->bp; + unsigned long flags; if (!BNXT_MH(bp)) return bnxt_ptp_adjfine_rtc(bp, scaled_ppm); - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); timecounter_read(&ptp->tc); ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); return 0; } @@ -251,12 +257,13 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct ptp_clock_event event; + unsigned long flags; u64 ns, pps_ts; pps_ts = EVENT_PPS_TS(data2, data1); - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); ns = timecounter_cyc2time(&ptp->tc, pps_ts); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) { case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL: @@ -393,16 +400,17 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns, { u64 cycles_now; u64 nsec_now, nsec_delta; + unsigned long flags; int rc; - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now); if (rc) { - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); return rc; } nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); nsec_delta = target_ns - nsec_now; *cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult); @@ -689,6 +697,7 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot) struct skb_shared_hwtstamps timestamp; struct bnxt_ptp_tx_req *txts_req; unsigned long now = jiffies; + unsigned long flags; u64 ts = 0, ns = 0; u32 tmo = 0; int rc; @@ -702,9 +711,9 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot) tmo, slot); if (!rc) { memset(×tamp, 0, sizeof(timestamp)); - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); ns = timecounter_cyc2time(&ptp->tc, ts); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); timestamp.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(txts_req->tx_skb, ×tamp); ptp->stats.ts_pkts++; @@ -730,6 +739,7 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) unsigned long now = jiffies; struct bnxt *bp = ptp->bp; u16 cons = ptp->txts_cons; + unsigned long flags; u32 num_requests; int rc = 0; @@ -757,9 +767,9 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) bnxt_ptp_get_current_time(bp); ptp->next_period = now + HZ; if (time_after_eq(now, ptp->next_overflow_check)) { - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); timecounter_read(&ptp->tc); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD; } if (rc == -EAGAIN) @@ -819,6 +829,7 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, u32 opaque = tscmp->tx_ts_cmp_opaque; struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; + unsigned long flags; u64 ts, ns; u16 cons; @@ -833,9 +844,9 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, le32_to_cpu(tscmp->tx_ts_cmp_flags_type), le32_to_cpu(tscmp->tx_ts_cmp_errors_v)); } else { - spin_lock_bh(&ptp->ptp_lock); + spin_lock_irqsave(&ptp->ptp_lock, flags); ns = timecounter_cyc2time(&ptp->tc, ts); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); timestamp.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(tx_buf->skb, ×tamp); } @@ -975,6 +986,7 @@ void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns) int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) { struct timespec64 tsp; + unsigned long flags; u64 ns; int rc; @@ -993,9 +1005,9 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) if (rc) return rc; } - spin_lock_bh(&bp->ptp_cfg->ptp_lock); + spin_lock_irqsave(&bp->ptp_cfg->ptp_lock, flags); bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns); - spin_unlock_bh(&bp->ptp_cfg->ptp_lock); + spin_unlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags); return 0; } @@ -1063,10 +1075,12 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) atomic64_set(&ptp->stats.ts_err, 0); if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - spin_lock_bh(&ptp->ptp_lock); + unsigned long flags; + + spin_lock_irqsave(&ptp->ptp_lock, flags); bnxt_refclk_read(bp, NULL, &ptp->current_time); WRITE_ONCE(ptp->old_time, ptp->current_time); - spin_unlock_bh(&ptp->ptp_lock); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); ptp_schedule_worker(ptp->ptp_clock, 0); } ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index a9a2f9a18c9c..f322466ecad3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -146,11 +146,13 @@ struct bnxt_ptp_cfg { }; #if BITS_PER_LONG == 32 -#define BNXT_READ_TIME64(ptp, dst, src) \ -do { \ - spin_lock_bh(&(ptp)->ptp_lock); \ - (dst) = (src); \ - spin_unlock_bh(&(ptp)->ptp_lock); \ +#define BNXT_READ_TIME64(ptp, dst, src) \ +do { \ + unsigned long flags; \ + \ + spin_lock_irqsave(&(ptp)->ptp_lock, flags); \ + (dst) = (src); \ + spin_unlock_irqrestore(&(ptp)->ptp_lock, flags); \ } while (0) #else #define BNXT_READ_TIME64(ptp, dst, src) \ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index f06babec04a0..56901280ba04 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -930,9 +930,6 @@ static int macb_mdiobus_register(struct macb *bp) return ret; } - if (of_phy_is_fixed_link(np)) - return mdiobus_register(bp->mii_bus); - /* Only create the PHY from the device tree if at least one PHY is * described. Otherwise scan the entire MDIO bus. We do this to support * old device tree that did not follow the best practices and did not @@ -953,8 +950,19 @@ static int macb_mdiobus_register(struct macb *bp) static int macb_mii_init(struct macb *bp) { + struct device_node *child, *np = bp->pdev->dev.of_node; int err = -ENXIO; + /* With fixed-link, we don't need to register the MDIO bus, + * except if we have a child named "mdio" in the device tree. + * In that case, some devices may be attached to the MACB's MDIO bus. + */ + child = of_get_child_by_name(np, "mdio"); + if (child) + of_node_put(child); + else if (of_phy_is_fixed_link(np)) + return macb_mii_probe(bp->dev); + /* Enable management port */ macb_writel(bp, NCR, MACB_BIT(MPE)); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a8596ebcdfd6..875fe379eea2 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1381,10 +1381,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) be_get_wrb_params_from_skb(adapter, skb, &wrb_params); wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); - if (unlikely(!wrb_cnt)) { - dev_kfree_skb_any(skb); - goto drop; - } + if (unlikely(!wrb_cnt)) + goto drop_skb; /* if os2bmc is enabled and if the pkt is destined to bmc, * enqueue the pkt a 2nd time with mgmt bit set. @@ -1393,7 +1391,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) BE_WRB_F_SET(wrb_params.features, OS2BMC, 1); wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); if (unlikely(!wrb_cnt)) - goto drop; + goto drop_skb; else skb_get(skb); } @@ -1407,6 +1405,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) be_xmit_flush(adapter, txo); return NETDEV_TX_OK; +drop_skb: + dev_kfree_skb_any(skb); drop: tx_stats(txo)->tx_drv_drops++; /* Flush the already enqueued tx requests */ diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index f3cc14cc757d..0b61f548fd18 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1906,7 +1906,12 @@ static int ftgmac100_probe(struct platform_device *pdev) goto err_phy_connect; } - phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, NULL); + phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np); + if (IS_ERR(phydev)) { + dev_err(&pdev->dev, "failed to register fixed PHY device\n"); + err = PTR_ERR(phydev); + goto err_phy_connect; + } err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link, PHY_INTERFACE_MODE_MII); if (err) { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h index 6f0e58a2a58a..9e1d44ae92cc 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h @@ -56,7 +56,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd, __entry->fd_format = qm_fd_get_format(fd); __entry->fd_offset = qm_fd_get_offset(fd); __entry->fd_length = qm_fd_get_length(fd); - __entry->fd_status = fd->status; + __entry->fd_status = __be32_to_cpu(fd->status); __assign_str(name); ), diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 032d8eadd003..c09370eab319 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -902,6 +902,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && __netif_subqueue_stopped(ndev, tx_ring->index) && + !test_bit(ENETC_TX_DOWN, &priv->flags) && (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { netif_wake_subqueue(ndev, tx_ring->index); } @@ -1377,6 +1378,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames, int xdp_tx_bd_cnt, i, k; int xdp_tx_frm_cnt = 0; + if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) + return -ENETDOWN; + enetc_lock_mdio(); tx_ring = priv->xdp_tx_ring[smp_processor_id()]; @@ -1521,7 +1525,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, &rx_ring->rx_swbd[rx_ring_first]); enetc_bdr_idx_inc(rx_ring, &rx_ring_first); } - rx_ring->stats.xdp_drops++; } static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, @@ -1586,6 +1589,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, fallthrough; case XDP_DROP: enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_drops++; break; case XDP_PASS: rxbd = orig_rxbd; @@ -1602,6 +1606,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, break; case XDP_TX: tx_ring = priv->xdp_tx_ring[rx_ring->index]; + if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) { + enetc_xdp_drop(rx_ring, orig_i, i); + tx_ring->stats.xdp_tx_drops++; + break; + } + xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, rx_ring, orig_i, i); @@ -2223,16 +2233,22 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); } -static void enetc_enable_bdrs(struct enetc_ndev_priv *priv) +static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_enable_rxbdr(hw, priv->rx_ring[i]); +} + +static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv) { struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) enetc_enable_txbdr(hw, priv->tx_ring[i]); - - for (i = 0; i < priv->num_rx_rings; i++) - enetc_enable_rxbdr(hw, priv->rx_ring[i]); } static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) @@ -2251,16 +2267,22 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); } -static void enetc_disable_bdrs(struct enetc_ndev_priv *priv) +static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_disable_rxbdr(hw, priv->rx_ring[i]); +} + +static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv) { struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) enetc_disable_txbdr(hw, priv->tx_ring[i]); - - for (i = 0; i < priv->num_rx_rings; i++) - enetc_disable_rxbdr(hw, priv->rx_ring[i]); } static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) @@ -2460,9 +2482,13 @@ void enetc_start(struct net_device *ndev) enable_irq(irq); } - enetc_enable_bdrs(priv); + enetc_enable_tx_bdrs(priv); + + enetc_enable_rx_bdrs(priv); netif_tx_start_all_queues(ndev); + + clear_bit(ENETC_TX_DOWN, &priv->flags); } EXPORT_SYMBOL_GPL(enetc_start); @@ -2520,9 +2546,15 @@ void enetc_stop(struct net_device *ndev) struct enetc_ndev_priv *priv = netdev_priv(ndev); int i; + set_bit(ENETC_TX_DOWN, &priv->flags); + netif_tx_stop_all_queues(ndev); - enetc_disable_bdrs(priv); + enetc_disable_rx_bdrs(priv); + + enetc_wait_bdrs(priv); + + enetc_disable_tx_bdrs(priv); for (i = 0; i < priv->bdr_int_num; i++) { int irq = pci_irq_vector(priv->si->pdev, @@ -2533,8 +2565,6 @@ void enetc_stop(struct net_device *ndev) napi_disable(&priv->int_vector[i]->napi); } - enetc_wait_bdrs(priv); - enetc_clear_interrupts(priv); } EXPORT_SYMBOL_GPL(enetc_stop); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 97524dfa234c..fb7d98d57783 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -325,6 +325,7 @@ enum enetc_active_offloads { enum enetc_flags_bit { ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0, + ENETC_TX_DOWN, }; /* interrupt coalescing modes */ diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 8f6b0bf48139..c95a7c083b0f 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -665,19 +665,11 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!num_vfs) { enetc_msg_psi_free(pf); - kfree(pf->vf_state); pf->num_vfs = 0; pci_disable_sriov(pdev); } else { pf->num_vfs = num_vfs; - pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state), - GFP_KERNEL); - if (!pf->vf_state) { - pf->num_vfs = 0; - return -ENOMEM; - } - err = enetc_msg_psi_init(pf); if (err) { dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err); @@ -696,7 +688,6 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) err_en_sriov: enetc_msg_psi_free(pf); err_msg_psi: - kfree(pf->vf_state); pf->num_vfs = 0; return err; @@ -1286,6 +1277,12 @@ static int enetc_pf_probe(struct pci_dev *pdev, pf = enetc_si_priv(si); pf->si = si; pf->total_vfs = pci_sriov_get_totalvfs(pdev); + if (pf->total_vfs) { + pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state), + GFP_KERNEL); + if (!pf->vf_state) + goto err_alloc_vf_state; + } err = enetc_setup_mac_addresses(node, pf); if (err) @@ -1363,6 +1360,8 @@ static int enetc_pf_probe(struct pci_dev *pdev, free_netdev(ndev); err_alloc_netdev: err_setup_mac_addresses: + kfree(pf->vf_state); +err_alloc_vf_state: enetc_psi_destroy(pdev); err_psi_create: return err; @@ -1389,6 +1388,7 @@ static void enetc_pf_remove(struct pci_dev *pdev) enetc_free_si_resources(priv); free_netdev(si->ndev); + kfree(pf->vf_state); enetc_psi_destroy(pdev); } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index dfcaac302e24..b15db70769e5 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -78,11 +78,18 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct sockaddr *saddr = addr; + int err; if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - return enetc_msg_vsi_set_primary_mac_addr(priv, saddr); + err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr); + if (err) + return err; + + eth_hw_addr_set(ndev, saddr->sa_data); + + return 0; } static int enetc_vf_set_features(struct net_device *ndev, diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 31ebf6a4f973..9d9fcec41488 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1077,7 +1077,8 @@ fec_restart(struct net_device *ndev) u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = FEC_ECR_ETHEREN; - fec_ptp_save_state(fep); + if (fep->bufdesc_ex) + fec_ptp_save_state(fep); /* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC @@ -1340,7 +1341,8 @@ fec_stop(struct net_device *ndev) netdev_err(ndev, "Graceful transmit stop did not complete!\n"); } - fec_ptp_save_state(fep); + if (fep->bufdesc_ex) + fec_ptp_save_state(fep); /* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 9767586b4eb3..11da139082e1 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -197,55 +197,67 @@ static int mac_probe(struct platform_device *_of_dev) err = -EINVAL; goto _return_of_node_put; } + mac_dev->fman_dev = &of_dev->dev; /* Get the FMan cell-index */ err = of_property_read_u32(dev_node, "cell-index", &val); if (err) { dev_err(dev, "failed to read cell-index for %pOF\n", dev_node); err = -EINVAL; - goto _return_of_node_put; + goto _return_dev_put; } /* cell-index 0 => FMan id 1 */ fman_id = (u8)(val + 1); - priv->fman = fman_bind(&of_dev->dev); + priv->fman = fman_bind(mac_dev->fman_dev); if (!priv->fman) { dev_err(dev, "fman_bind(%pOF) failed\n", dev_node); err = -ENODEV; - goto _return_of_node_put; + goto _return_dev_put; } + /* Two references have been taken in of_find_device_by_node() + * and fman_bind(). Release one of them here. The second one + * will be released in mac_remove(). + */ + put_device(mac_dev->fman_dev); of_node_put(dev_node); + dev_node = NULL; /* Get the address of the memory mapped registers */ mac_dev->res = platform_get_mem_or_io(_of_dev, 0); if (!mac_dev->res) { dev_err(dev, "could not get registers\n"); - return -EINVAL; + err = -EINVAL; + goto _return_dev_put; } err = devm_request_resource(dev, fman_get_mem_region(priv->fman), mac_dev->res); if (err) { dev_err_probe(dev, err, "could not request resource\n"); - return err; + goto _return_dev_put; } mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start, resource_size(mac_dev->res)); if (!mac_dev->vaddr) { dev_err(dev, "devm_ioremap() failed\n"); - return -EIO; + err = -EIO; + goto _return_dev_put; } - if (!of_device_is_available(mac_node)) - return -ENODEV; + if (!of_device_is_available(mac_node)) { + err = -ENODEV; + goto _return_dev_put; + } /* Get the cell-index */ err = of_property_read_u32(mac_node, "cell-index", &val); if (err) { dev_err(dev, "failed to read cell-index for %pOF\n", mac_node); - return -EINVAL; + err = -EINVAL; + goto _return_dev_put; } priv->cell_index = (u8)val; @@ -259,22 +271,26 @@ static int mac_probe(struct platform_device *_of_dev) if (unlikely(nph < 0)) { dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n", mac_node); - return nph; + err = nph; + goto _return_dev_put; } if (nph != ARRAY_SIZE(mac_dev->port)) { dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n", mac_node); - return -EINVAL; + err = -EINVAL; + goto _return_dev_put; } - for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { + /* PORT_NUM determines the size of the port array */ + for (i = 0; i < PORT_NUM; i++) { /* Find the port node */ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i); if (!dev_node) { dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n", mac_node); - return -EINVAL; + err = -EINVAL; + goto _return_dev_arr_put; } of_dev = of_find_device_by_node(dev_node); @@ -282,17 +298,24 @@ static int mac_probe(struct platform_device *_of_dev) dev_err(dev, "of_find_device_by_node(%pOF) failed\n", dev_node); err = -EINVAL; - goto _return_of_node_put; + goto _return_dev_arr_put; } + mac_dev->fman_port_devs[i] = &of_dev->dev; - mac_dev->port[i] = fman_port_bind(&of_dev->dev); + mac_dev->port[i] = fman_port_bind(mac_dev->fman_port_devs[i]); if (!mac_dev->port[i]) { dev_err(dev, "dev_get_drvdata(%pOF) failed\n", dev_node); err = -EINVAL; - goto _return_of_node_put; + goto _return_dev_arr_put; } + /* Two references have been taken in of_find_device_by_node() + * and fman_port_bind(). Release one of them here. The second + * one will be released in mac_remove(). + */ + put_device(mac_dev->fman_port_devs[i]); of_node_put(dev_node); + dev_node = NULL; } /* Get the PHY connection type */ @@ -312,7 +335,7 @@ static int mac_probe(struct platform_device *_of_dev) err = init(mac_dev, mac_node, ¶ms); if (err < 0) - return err; + goto _return_dev_arr_put; if (!is_zero_ether_addr(mac_dev->addr)) dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); @@ -327,6 +350,12 @@ static int mac_probe(struct platform_device *_of_dev) return err; +_return_dev_arr_put: + /* mac_dev is kzalloc'ed */ + for (i = 0; i < PORT_NUM; i++) + put_device(mac_dev->fman_port_devs[i]); +_return_dev_put: + put_device(mac_dev->fman_dev); _return_of_node_put: of_node_put(dev_node); return err; @@ -335,6 +364,11 @@ static int mac_probe(struct platform_device *_of_dev) static void mac_remove(struct platform_device *pdev) { struct mac_device *mac_dev = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < PORT_NUM; i++) + put_device(mac_dev->fman_port_devs[i]); + put_device(mac_dev->fman_dev); platform_device_unregister(mac_dev->priv->eth_dev); } diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index fe747915cc73..8b5b43d50f8e 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -19,12 +19,13 @@ struct fman_mac; struct mac_priv_s; +#define PORT_NUM 2 struct mac_device { void __iomem *vaddr; struct device *dev; struct resource *res; u8 addr[ETH_ALEN]; - struct fman_port *port[2]; + struct fman_port *port[PORT_NUM]; struct phylink *phylink; struct phylink_config phylink_config; phy_interface_t phy_if; @@ -52,6 +53,9 @@ struct mac_device { struct fman_mac *fman_mac; struct mac_priv_s *priv; + + struct device *fman_dev; + struct device *fman_port_devs[PORT_NUM]; }; static inline struct mac_device diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 67b0bf310daa..9a63fbc69408 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -25,8 +25,11 @@ void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo) pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); if (!pci_id) continue; - if (IS_ENABLED(CONFIG_PCI_IOV)) + if (IS_ENABLED(CONFIG_PCI_IOV)) { + device_lock(&ae_dev->pdev->dev); pci_disable_sriov(ae_dev->pdev); + device_unlock(&ae_dev->pdev->dev); + } } } EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare); diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index f2d4669c81cf..58a3d28d938c 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -1012,6 +1012,7 @@ sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) if(skb->len > XMIT_BUFF_SIZE) { printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); + dev_kfree_skb(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index d92dd9c83031..99d5f83f7c60 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -578,7 +578,7 @@ static int mal_probe(struct platform_device *ofdev) printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n", ofdev->dev.of_node); err = -ENODEV; - goto fail; + goto fail_unmap; #endif } @@ -742,6 +742,8 @@ static void mal_remove(struct platform_device *ofdev) free_netdev(mal->dummy_dev); + dcr_unmap(mal->dcr_host, 0x100); + dma_free_coherent(&ofdev->dev, sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 87e693a81433..97425c06e1ed 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2472,9 +2472,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) /* if we are going to send_subcrq_direct this then we need to * update the checksum before copying the data into ltb. Essentially * these packets force disable CSO so that we can guarantee that - * FW does not need header info and we can send direct. + * FW does not need header info and we can send direct. Also, vnic + * server must be able to xmit standard packets without header data */ - if (!skb_is_gso(skb) && !ind_bufp->index && !netdev_xmit_more()) { + if (*hdrs == 0 && !skb_is_gso(skb) && + !ind_bufp->index && !netdev_xmit_more()) { use_scrq_send_direct = true; if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb)) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 4b6e7536170a..fc8ed38aa095 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -108,8 +108,8 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8 #define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A #define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B -#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C -#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D +#define E1000_DEV_ID_PCH_ADP_I219_LM19 0x550C +#define E1000_DEV_ID_PCH_ADP_I219_V19 0x550D #define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E #define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F #define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index ce227b56cf72..2f9655cf5dd9 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1205,12 +1205,10 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; - if (hw->mac.type != e1000_pch_mtp) { - ret_val = e1000e_force_smbus(hw); - if (ret_val) { - e_dbg("Failed to force SMBUS: %d\n", ret_val); - goto release; - } + ret_val = e1000e_force_smbus(hw); + if (ret_val) { + e_dbg("Failed to force SMBUS: %d\n", ret_val); + goto release; } /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable @@ -1273,13 +1271,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) } release: - if (hw->mac.type == e1000_pch_mtp) { - ret_val = e1000e_force_smbus(hw); - if (ret_val) - e_dbg("Failed to force SMBUS over MTL system: %d\n", - ret_val); - } - hw->phy.ops.release(hw); out: if (ret_val) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index f103249b12fa..07e903346358 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7899,10 +7899,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM19), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V19), board_pch_adp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp }, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2089a0e172bf..d4255c2706fa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -755,6 +755,7 @@ enum i40e_filter_state { I40E_FILTER_ACTIVE, /* Added to switch by FW */ I40E_FILTER_FAILED, /* Rejected by FW */ I40E_FILTER_REMOVE, /* To be removed */ + I40E_FILTER_NEW_SYNC, /* New, not sent yet, is in i40e_sync_vsi_filters() */ /* There is no 'removed' state; the filter struct is freed */ }; struct i40e_mac_filter { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index abf624d770e6..208c2f0857b6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -89,6 +89,7 @@ static char *i40e_filter_state_string[] = { "ACTIVE", "FAILED", "REMOVE", + "NEW_SYNC", }; /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 03205eb9f925..55fb362eb508 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1255,6 +1255,7 @@ int i40e_count_filters(struct i40e_vsi *vsi) hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_NEW_SYNC || f->state == I40E_FILTER_ACTIVE) ++cnt; } @@ -1441,6 +1442,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, new->f = add_head; new->state = add_head->state; + if (add_head->state == I40E_FILTER_NEW) + add_head->state = I40E_FILTER_NEW_SYNC; /* Add the new filter to the tmp list */ hlist_add_head(&new->hlist, tmp_add_list); @@ -1550,6 +1553,8 @@ static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi, return -ENOMEM; new_mac->f = add_head; new_mac->state = add_head->state; + if (add_head->state == I40E_FILTER_NEW) + add_head->state = I40E_FILTER_NEW_SYNC; /* Add the new filter to the tmp list */ hlist_add_head(&new_mac->hlist, tmp_add_list); @@ -1734,6 +1739,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, struct hlist_node *h; int bkt; + lockdep_assert_held(&vsi->mac_filter_hash_lock); if (vsi->info.pvid) return i40e_add_filter(vsi, macaddr, le16_to_cpu(vsi->info.pvid)); @@ -2436,7 +2442,8 @@ static int i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_mac_filter *f) { - bool enable = f->state == I40E_FILTER_NEW; + bool enable = f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_NEW_SYNC; struct i40e_hw *hw = &vsi->back->hw; int aq_ret; @@ -2610,6 +2617,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* Add it to the hash list */ hlist_add_head(&new->hlist, &tmp_add_list); + f->state = I40E_FILTER_NEW_SYNC; } /* Count the number of active (current and new) VLAN @@ -2761,7 +2769,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) spin_lock_bh(&vsi->mac_filter_hash_lock); hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { /* Only update the state if we're still NEW */ - if (new->f->state == I40E_FILTER_NEW) + if (new->f->state == I40E_FILTER_NEW || + new->f->state == I40E_FILTER_NEW_SYNC) new->f->state = new->state; hlist_del(&new->hlist); netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 662622f01e31..dfa785e39458 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2213,8 +2213,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vsi_res[0].qset_handle = le16_to_cpu(vsi->info.qs_handle[0]); if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) { + spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, vf->default_lan_addr.addr); eth_zero_addr(vf->default_lan_addr.addr); + spin_unlock_bh(&vsi->mac_filter_hash_lock); } ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->default_lan_addr.addr); diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c index 928c8bdb6649..c6779d9dffff 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c @@ -989,5 +989,11 @@ ice_devlink_port_new(struct devlink *devlink, if (err) return err; + if (!ice_is_eswitch_mode_switchdev(pf)) { + NL_SET_ERR_MSG_MOD(extack, + "SF ports are only supported in eswitch switchdev mode"); + return -EOPNOTSUPP; + } + return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port); } diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 953262b88a58..272fd823a825 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -31,7 +31,7 @@ static const struct ice_tunnel_type_scan tnls[] = { * Verifies various attributes of the package file, including length, format * version, and the requirement of at least one segment. */ -static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +static enum ice_ddp_state ice_verify_pkg(const struct ice_pkg_hdr *pkg, u32 len) { u32 seg_count; u32 i; @@ -57,13 +57,13 @@ static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) /* all segments must fit within length */ for (i = 0; i < seg_count; i++) { u32 off = le32_to_cpu(pkg->seg_offset[i]); - struct ice_generic_seg_hdr *seg; + const struct ice_generic_seg_hdr *seg; /* segment header must fit */ if (len < off + sizeof(*seg)) return ICE_DDP_PKG_INVALID_FILE; - seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + seg = (void *)pkg + off; /* segment body must fit */ if (len < off + le32_to_cpu(seg->seg_size)) @@ -119,13 +119,13 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) * * This helper function validates a buffer's header. */ -static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +static const struct ice_buf_hdr *ice_pkg_val_buf(const struct ice_buf *buf) { - struct ice_buf_hdr *hdr; + const struct ice_buf_hdr *hdr; u16 section_count; u16 data_end; - hdr = (struct ice_buf_hdr *)buf->buf; + hdr = (const struct ice_buf_hdr *)buf->buf; /* verify data */ section_count = le16_to_cpu(hdr->section_count); if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) @@ -165,8 +165,8 @@ static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) * unexpected value has been detected (for example an invalid section count or * an invalid buffer end value). */ -static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, - struct ice_pkg_enum *state) +static const struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, + struct ice_pkg_enum *state) { if (ice_seg) { state->buf_table = ice_find_buf_table(ice_seg); @@ -1800,9 +1800,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * success it returns a pointer to the segment header, otherwise it will * return NULL. */ -static struct ice_generic_seg_hdr * +static const struct ice_generic_seg_hdr * ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, - struct ice_pkg_hdr *pkg_hdr) + const struct ice_pkg_hdr *pkg_hdr) { u32 i; @@ -1813,11 +1813,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, /* Search all package segments for the requested segment type */ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { - struct ice_generic_seg_hdr *seg; + const struct ice_generic_seg_hdr *seg; - seg = (struct ice_generic_seg_hdr - *)((u8 *)pkg_hdr + - le32_to_cpu(pkg_hdr->seg_offset[i])); + seg = (void *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]); if (le32_to_cpu(seg->seg_type) == seg_type) return seg; @@ -2354,12 +2352,12 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, * * Return: zero when update was successful, negative values otherwise. */ -int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) +int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len) { - u8 *current_topo, *new_topo = NULL; - struct ice_run_time_cfg_seg *seg; - struct ice_buf_hdr *section; - struct ice_pkg_hdr *pkg_hdr; + u8 *new_topo = NULL, *topo __free(kfree) = NULL; + const struct ice_run_time_cfg_seg *seg; + const struct ice_buf_hdr *section; + const struct ice_pkg_hdr *pkg_hdr; enum ice_ddp_state state; u16 offset, size = 0; u32 reg = 0; @@ -2375,15 +2373,13 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) return -EOPNOTSUPP; } - current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); - if (!current_topo) + topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!topo) return -ENOMEM; - /* Get the current Tx topology */ - status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL, - &flags, false); - - kfree(current_topo); + /* Get the current Tx topology flags */ + status = ice_get_set_tx_topo(hw, topo, ICE_AQ_MAX_BUF_LEN, NULL, &flags, + false); if (status) { ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); @@ -2419,7 +2415,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) goto update_topo; } - pkg_hdr = (struct ice_pkg_hdr *)buf; + pkg_hdr = (const struct ice_pkg_hdr *)buf; state = ice_verify_pkg(pkg_hdr, len); if (state) { ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n", @@ -2428,7 +2424,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) } /* Find runtime configuration segment */ - seg = (struct ice_run_time_cfg_seg *) + seg = (const struct ice_run_time_cfg_seg *) ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); if (!seg) { ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); @@ -2461,8 +2457,10 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) return -EIO; } - /* Get the new topology buffer */ - new_topo = ((u8 *)section) + offset; + /* Get the new topology buffer, reuse current topo copy mem */ + static_assert(ICE_PKG_BUF_SIZE == ICE_AQ_MAX_BUF_LEN); + new_topo = topo; + memcpy(new_topo, (u8 *)section + offset, size); update_topo: /* Acquire global lock to make sure that set topology issued diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h index 97f272317475..79551da2a4b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.h +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -438,7 +438,7 @@ struct ice_pkg_enum { u32 buf_idx; u32 type; - struct ice_buf_hdr *buf; + const struct ice_buf_hdr *buf; u32 sect_idx; void *sect; u32 sect_type; @@ -467,6 +467,6 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type); -int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len); +int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index cd95705d1e7f..d5ad6d84007c 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -10,6 +10,7 @@ #define ICE_DPLL_PIN_IDX_INVALID 0xff #define ICE_DPLL_RCLK_NUM_PER_PF 1 #define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25 +#define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125 /** * enum ice_dpll_pin_type - enumerate ice pin types: @@ -656,6 +657,8 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv, struct ice_dpll_pin *p = pin_priv; struct ice_dpll *d = dpll_priv; + if (state == DPLL_PIN_STATE_SELECTABLE) + return -EINVAL; if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED) return 0; @@ -1843,6 +1846,8 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, struct dpll_pin *parent; int ret, i; + if (WARN_ON((!vsi || !vsi->netdev))) + return -EINVAL; ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF, pf->dplls.clock_id); if (ret) @@ -1858,8 +1863,6 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, if (ret) goto unregister_pins; } - if (WARN_ON((!vsi || !vsi->netdev))) - return -EINVAL; dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin); return 0; @@ -2061,6 +2064,73 @@ static int ice_dpll_init_worker(struct ice_pf *pf) return 0; } +/** + * ice_dpll_init_info_pins_generic - initializes generic pins info + * @pf: board private structure + * @input: if input pins initialized + * + * Init information for generic pins, cache them in PF's pins structures. + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input) +{ + struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps; + static const char labels[][sizeof("99")] = { + "0", "1", "2", "3", "4", "5", "6", "7", "8", + "9", "10", "11", "12", "13", "14", "15" }; + u32 cap = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + enum ice_dpll_pin_type pin_type; + int i, pin_num, ret = -EINVAL; + struct ice_dpll_pin *pins; + u32 phase_adj_max; + + if (input) { + pin_num = pf->dplls.num_inputs; + pins = pf->dplls.inputs; + phase_adj_max = pf->dplls.input_phase_adj_max; + pin_type = ICE_DPLL_PIN_TYPE_INPUT; + cap |= DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE; + } else { + pin_num = pf->dplls.num_outputs; + pins = pf->dplls.outputs; + phase_adj_max = pf->dplls.output_phase_adj_max; + pin_type = ICE_DPLL_PIN_TYPE_OUTPUT; + } + if (pin_num > ARRAY_SIZE(labels)) + return ret; + + for (i = 0; i < pin_num; i++) { + pins[i].idx = i; + pins[i].prop.board_label = labels[i]; + pins[i].prop.phase_range.min = phase_adj_max; + pins[i].prop.phase_range.max = -phase_adj_max; + pins[i].prop.capabilities = cap; + pins[i].pf = pf; + ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); + if (ret) + break; + if (input && pins[i].freq == ICE_DPLL_PIN_GEN_RCLK_FREQ) + pins[i].prop.type = DPLL_PIN_TYPE_MUX; + else + pins[i].prop.type = DPLL_PIN_TYPE_EXT; + if (!input) + continue; + ret = ice_aq_get_cgu_ref_prio(&pf->hw, de->dpll_idx, i, + &de->input_prio[i]); + if (ret) + break; + ret = ice_aq_get_cgu_ref_prio(&pf->hw, dp->dpll_idx, i, + &dp->input_prio[i]); + if (ret) + break; + } + + return ret; +} + /** * ice_dpll_init_info_direct_pins - initializes direct pins info * @pf: board private structure @@ -2099,6 +2169,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, default: return -EINVAL; } + if (num_pins != ice_cgu_get_num_pins(hw, input)) + return ice_dpll_init_info_pins_generic(pf, input); for (i = 0; i < num_pins; i++) { caps = 0; diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index c0b3e70a7ea3..fb527434b58b 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -552,13 +552,14 @@ int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr) { ice_eswitch_stop_reprs(pf); + repr->ops.rem(repr); + xa_erase(&pf->eswitch.reprs, repr->id); if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); ice_eswitch_release_repr(pf, repr); - repr->ops.rem(repr); ice_repr_destroy(repr); if (xa_empty(&pf->eswitch.reprs)) { diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c index f5aceb32bf4d..cccb7ddf61c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c @@ -582,10 +582,13 @@ ice_eswitch_br_switchdev_event(struct notifier_block *nb, return NOTIFY_DONE; } -static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) +void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) { struct ice_esw_br_fdb_entry *entry, *tmp; + if (!bridge) + return; + list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); } diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h index c15c7344d7f8..66a2c804338f 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h @@ -117,5 +117,6 @@ void ice_eswitch_br_offloads_deinit(struct ice_pf *pf); int ice_eswitch_br_offloads_init(struct ice_pf *pf); +void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge); #endif /* _ICE_ESWITCH_BR_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 5412eff8ef23..ee9862ddfe15 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1830,11 +1830,12 @@ static int ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, struct ice_fdir_fltr *input) { - u16 dest_vsi, q_index = 0; + s16 q_index = ICE_FDIR_NO_QUEUE_IDX; u16 orig_q_index = 0; struct ice_pf *pf; struct ice_hw *hw; int flow_type; + u16 dest_vsi; u8 dest_ctl; if (!vsi || !fsp || !input) diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index ab5b118daa2d..820023c0271f 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -53,6 +53,8 @@ */ #define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20 +#define ICE_FDIR_NO_QUEUE_IDX -1 + enum ice_fltr_prgm_desc_dest { ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX, @@ -186,7 +188,7 @@ struct ice_fdir_fltr { u16 flex_fltr; /* filter control */ - u16 q_index; + s16 q_index; u16 orig_q_index; u16 dest_vsi; u8 dest_ctl; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index eeb48cc48e08..b1e7727b8677 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -87,7 +87,8 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, bool netif_is_ice(const struct net_device *dev) { - return dev && (dev->netdev_ops == &ice_netdev_ops); + return dev && (dev->netdev_ops == &ice_netdev_ops || + dev->netdev_ops == &ice_netdev_safe_mode_ops); } /** @@ -520,25 +521,6 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) pf->vf_agg_node[node].num_vsis = 0; } -/** - * ice_clear_sw_switch_recipes - clear switch recipes - * @pf: board private structure - * - * Mark switch recipes as not created in sw structures. There are cases where - * rules (especially advanced rules) need to be restored, either re-read from - * hardware or added again. For example after the reset. 'recp_created' flag - * prevents from doing that and need to be cleared upfront. - */ -static void ice_clear_sw_switch_recipes(struct ice_pf *pf) -{ - struct ice_sw_recipe *recp; - u8 i; - - recp = pf->hw.switch_info->recp_list; - for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) - recp[i].recp_created = false; -} - /** * ice_prepare_for_reset - prep for reset * @pf: board private structure @@ -575,8 +557,9 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) mutex_unlock(&pf->vfs.table_lock); if (ice_is_eswitch_mode_switchdev(pf)) { - if (reset_type != ICE_RESET_PFR) - ice_clear_sw_switch_recipes(pf); + rtnl_lock(); + ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge); + rtnl_unlock(); } /* release ADQ specific HW and SW resources */ @@ -4536,16 +4519,10 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) u8 num_tx_sched_layers = hw->num_tx_sched_layers; struct ice_pf *pf = hw->back; struct device *dev; - u8 *buf_copy; int err; dev = ice_pf_to_dev(pf); - /* ice_cfg_tx_topo buf argument is not a constant, - * so we have to make a copy - */ - buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL); - - err = ice_cfg_tx_topo(hw, buf_copy, firmware->size); + err = ice_cfg_tx_topo(hw, firmware->data, firmware->size); if (!err) { if (hw->num_tx_sched_layers > num_tx_sched_layers) dev_info(dev, "Tx scheduling layers switching feature disabled\n"); @@ -4773,14 +4750,12 @@ int ice_init_dev(struct ice_pf *pf) ice_init_feature_support(pf); err = ice_init_ddp_config(hw, pf); - if (err) - return err; /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be * set in pf->state, which will cause ice_is_safe_mode to return * true */ - if (ice_is_safe_mode(pf)) { + if (err || ice_is_safe_mode(pf)) { /* we already got function/device capabilities but these don't * reflect what the driver needs to do in safe mode. Instead of * adding conditional logic everywhere to ignore these diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 3a33e6b9b313..ec8db830ac73 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -34,7 +34,6 @@ static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = { ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, - { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, }, }; static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = { @@ -52,7 +51,6 @@ static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = { ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, - { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, }, }; static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = { @@ -5964,6 +5962,25 @@ ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size) return t; } +/** + * ice_cgu_get_num_pins - get pin description array size + * @hw: pointer to the hw struct + * @input: if request is done against input or output pins + * + * Return: size of pin description array for given hw. + */ +int ice_cgu_get_num_pins(struct ice_hw *hw, bool input) +{ + const struct ice_cgu_pin_desc *t; + int size; + + t = ice_cgu_get_pin_desc(hw, input, &size); + if (t) + return size; + + return 0; +} + /** * ice_cgu_get_pin_type - get pin's type * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 0852a34ade91..6cedc1a906af 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -404,6 +404,7 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); bool ice_is_pca9575_present(struct ice_hw *hw); +int ice_cgu_get_num_pins(struct ice_hw *hw, bool input); enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input); struct dpll_pin_frequency * ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index e34fe2516ccc..91cb393f616f 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1096,8 +1096,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) return -ENOENT; vsi = ice_get_vf_vsi(vf); - if (!vsi) + if (!vsi) { + ice_put_vf(vf); return -ENOENT; + } prev_msix = vf->num_msix; prev_queues = vf->num_vf_qs; @@ -1119,7 +1121,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) if (vf->first_vector_idx < 0) goto unroll; - if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) { + vsi->req_txq = queues; + vsi->req_rxq = queues; + + if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { /* Try to rebuild with previous values */ needs_rebuild = true; goto unroll; @@ -1142,12 +1147,16 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) vf->num_msix = prev_msix; vf->num_vf_qs = prev_queues; vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); - if (vf->first_vector_idx < 0) + if (vf->first_vector_idx < 0) { + ice_put_vf(vf); return -EINVAL; + } if (needs_rebuild) { - ice_vf_reconfig_vsi(vf); - ice_vf_init_host_cfg(vf, vsi); + vsi->req_txq = prev_queues; + vsi->req_rxq = prev_queues; + + ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); } ice_ena_vf_mappings(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 79d91e95358c..0e740342e294 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -6322,8 +6322,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, if (!itr->vsi_list_info || !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) continue; - /* Clearing it so that the logic can add it back */ - clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); f_entry.fltr_info.vsi_handle = vsi_handle; f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; /* update the src in case it is VSI num */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index e6923f8121a9..ea39b999a0d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -819,6 +819,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; + /* This is a specific case. The destination VSI index is + * overwritten by the source VSI index. This type of filter + * should allow the packet to go to the LAN, not to the + * VSI passed here. It should set LAN_EN bit only. However, + * the VSI must be a valid one. Setting source VSI index + * here is safe. Even if the result from switch is set LAN_EN + * and LB_EN (which normally will pass the packet to this VSI) + * packet won't be seen on the VSI, because local loopback is + * turned off. + */ + rule_info.sw_act.vsi_handle = vsi->idx; } else { /* VF to VF */ rule_info.sw_act.flag |= ICE_FLTR_TX; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index a69e91f88d81..8c434689e3f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -256,7 +256,7 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) * * It brings the VSI down and then reconfigures it with the hardware. */ -int ice_vf_reconfig_vsi(struct ice_vf *vf) +static int ice_vf_reconfig_vsi(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); struct ice_pf *pf = vf->pf; @@ -335,6 +335,13 @@ static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); } else { + /* clear possible previous port vlan config */ + err = ice_vsi_clear_port_vlan(vsi); + if (err) { + dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n", + vf->vf_id, err); + return err; + } err = ice_vsi_add_vlan_zero(vsi); } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h index 91ba7fe0eaee..0c7e77c0a09f 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h @@ -23,7 +23,6 @@ #warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files" #endif -int ice_vf_reconfig_vsi(struct ice_vf *vf); void ice_initialize_vf_entry(struct ice_vf *vf); void ice_dis_vf_qs(struct ice_vf *vf); int ice_check_vf_init(struct ice_vf *vf); diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c index 6e8f2aab6080..5291f2888ef8 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c @@ -787,3 +787,60 @@ int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi) kfree(ctxt); return err; } + +int ice_vsi_clear_port_vlan(struct ice_vsi *vsi) +{ + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx *ctxt; + int err; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ctxt->info = vsi->info; + + ctxt->info.port_based_outer_vlan = 0; + ctxt->info.port_based_inner_vlan = 0; + + ctxt->info.inner_vlan_flags = + FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M, + ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL); + if (ice_is_dvm_ena(hw)) { + ctxt->info.inner_vlan_flags |= + FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M, + ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING); + ctxt->info.outer_vlan_flags = + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, + ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL); + ctxt->info.outer_vlan_flags |= + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, + ICE_AQ_VSI_OUTER_TAG_VLAN_8100); + ctxt->info.outer_vlan_flags |= + ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING << + ICE_AQ_VSI_OUTER_VLAN_EMODE_S; + } + + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + ctxt->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID | + ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing port based VLAN failed, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + } else { + vsi->info.port_based_outer_vlan = + ctxt->info.port_based_outer_vlan; + vsi->info.port_based_inner_vlan = + ctxt->info.port_based_inner_vlan; + vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags; + vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + } + + kfree(ctxt); + return err; +} diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h index f0d84d11bd5b..12b227621a7d 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h @@ -36,5 +36,6 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid); int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi); int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi); +int ice_vsi_clear_port_vlan(struct ice_vsi *vsi); #endif /* _ICE_VSI_VLAN_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 2c31ad87587a..66544faab710 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -141,6 +141,7 @@ enum idpf_vport_state { * @adapter: Adapter back pointer * @vport: Vport back pointer * @vport_id: Vport identifier + * @link_speed_mbps: Link speed in mbps * @vport_idx: Relative vport index * @state: See enum idpf_vport_state * @netstats: Packet and byte stats @@ -150,6 +151,7 @@ struct idpf_netdev_priv { struct idpf_adapter *adapter; struct idpf_vport *vport; u32 vport_id; + u32 link_speed_mbps; u16 vport_idx; enum idpf_vport_state state; struct rtnl_link_stats64 netstats; @@ -287,7 +289,6 @@ struct idpf_port_stats { * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation * @port_stats: per port csum, header split, and other offload stats * @link_up: True if link is up - * @link_speed_mbps: Link speed in mbps * @sw_marker_wq: workqueue for marker packets */ struct idpf_vport { @@ -331,7 +332,6 @@ struct idpf_vport { struct idpf_port_stats port_stats; bool link_up; - u32 link_speed_mbps; wait_queue_head_t sw_marker_wq; }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 3806ddd3ce4a..59b1a1a09996 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -1296,24 +1296,19 @@ static void idpf_set_msglevel(struct net_device *netdev, u32 data) static int idpf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - struct idpf_vport *vport; - - idpf_vport_ctrl_lock(netdev); - vport = idpf_netdev_to_vport(netdev); + struct idpf_netdev_priv *np = netdev_priv(netdev); ethtool_link_ksettings_zero_link_mode(cmd, supported); cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = PORT_NONE; - if (vport->link_up) { + if (netif_carrier_ok(netdev)) { cmd->base.duplex = DUPLEX_FULL; - cmd->base.speed = vport->link_speed_mbps; + cmd->base.speed = np->link_speed_mbps; } else { cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.speed = SPEED_UNKNOWN; } - idpf_vport_ctrl_unlock(netdev); - return 0; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 4f20343e49a9..b4fbb99bfad2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1786,6 +1786,7 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) */ err = idpf_vc_core_init(adapter); if (err) { + cancel_delayed_work_sync(&adapter->mbx_task); idpf_deinit_dflt_mbx(adapter); goto unlock_mutex; } @@ -1860,7 +1861,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, * mess with. Nothing below should use those variables from new_vport * and should instead always refer to them in vport if they need to. */ - memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps)); + memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up)); /* Adjust resource parameters prior to reallocating resources */ switch (reset_cause) { @@ -1906,7 +1907,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, /* Same comment as above regarding avoiding copying the wait_queues and * mutexes applies here. We do not want to mess with those if possible. */ - memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps)); + memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up)); if (reset_cause == IDPF_SR_Q_CHANGE) idpf_vport_alloc_vec_indexes(vport); diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c index 99b8dbaf4225..aad62e270ae4 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -99,6 +99,7 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport) intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M; intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M; intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S; + intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S; intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M; spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 70986e12da28..d46c95f91b0d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -141,7 +141,7 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter, } np = netdev_priv(vport->netdev); - vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); + np->link_speed_mbps = le32_to_cpu(v2e->link_speed); if (vport->link_up == v2e->link_status) return; @@ -666,7 +666,7 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, if (ctlq_msg->data_len) { payload = ctlq_msg->ctx.indirect.payload->va; - payload_size = ctlq_msg->ctx.indirect.payload->size; + payload_size = ctlq_msg->data_len; } xn->reply_sz = payload_size; @@ -1295,10 +1295,6 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, err = reply_sz; goto free_vport_params; } - if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) { - err = -EIO; - goto free_vport_params; - } return 0; @@ -2602,9 +2598,6 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) if (reply_sz < 0) return reply_sz; - if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) - return -EIO; - ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); if (ptypes_recvd > max_ptype) return -EINVAL; @@ -3070,7 +3063,6 @@ int idpf_vc_core_init(struct idpf_adapter *adapter) adapter->state = __IDPF_VER_CHECK; if (adapter->vcxn_mngr) idpf_vc_xn_shutdown(adapter->vcxn_mngr); - idpf_deinit_dflt_mbx(adapter); set_bit(IDPF_HR_DRV_LOAD, adapter->flags); queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, msecs_to_jiffies(task_delay)); @@ -3088,9 +3080,9 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter) if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) return; - idpf_vc_xn_shutdown(adapter->vcxn_mngr); idpf_deinit_task(adapter); idpf_intr_rel(adapter); + idpf_vc_xn_shutdown(adapter->vcxn_mngr); cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->mbx_task); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1ef4cb871452..b83df5f94b1f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -907,7 +907,7 @@ static int igb_request_msix(struct igb_adapter *adapter) int i, err = 0, vector = 0, free_vector = 0; err = request_irq(adapter->msix_entries[vector].vector, - igb_msix_other, 0, netdev->name, adapter); + igb_msix_other, IRQF_NO_THREAD, netdev->name, adapter); if (err) goto err_out; @@ -9651,6 +9651,10 @@ static void igb_io_resume(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) { + if (!test_bit(__IGB_DOWN, &adapter->state)) { + dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n"); + return; + } if (igb_up(adapter)) { dev_err(&pdev->dev, "igb_up failed after reset\n"); return; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c index 4746a6b258f0..8af75cb37c3e 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -336,6 +336,51 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct, return new_pkts; } +/** + * octep_oq_next_pkt() - Move to the next packet in Rx queue. + * + * @oq: Octeon Rx queue data structure. + * @buff_info: Current packet buffer info. + * @read_idx: Current packet index in the ring. + * @desc_used: Current packet descriptor number. + * + * Free the resources associated with a packet. + * Increment packet index in the ring and packet descriptor number. + */ +static void octep_oq_next_pkt(struct octep_oq *oq, + struct octep_rx_buffer *buff_info, + u32 *read_idx, u32 *desc_used) +{ + dma_unmap_page(oq->dev, oq->desc_ring[*read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + buff_info->page = NULL; + (*read_idx)++; + (*desc_used)++; + if (*read_idx == oq->max_count) + *read_idx = 0; +} + +/** + * octep_oq_drop_rx() - Free the resources associated with a packet. + * + * @oq: Octeon Rx queue data structure. + * @buff_info: Current packet buffer info. + * @read_idx: Current packet index in the ring. + * @desc_used: Current packet descriptor number. + * + */ +static void octep_oq_drop_rx(struct octep_oq *oq, + struct octep_rx_buffer *buff_info, + u32 *read_idx, u32 *desc_used) +{ + int data_len = buff_info->len - oq->max_single_buffer_size; + + while (data_len > 0) { + octep_oq_next_pkt(oq, buff_info, read_idx, desc_used); + data_len -= oq->buffer_size; + }; +} + /** * __octep_oq_process_rx() - Process hardware Rx queue and push to stack. * @@ -367,10 +412,7 @@ static int __octep_oq_process_rx(struct octep_device *oct, desc_used = 0; for (pkt = 0; pkt < pkts_to_process; pkt++) { buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx]; - dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, - PAGE_SIZE, DMA_FROM_DEVICE); resp_hw = page_address(buff_info->page); - buff_info->page = NULL; /* Swap the length field that is in Big-Endian to CPU */ buff_info->len = be64_to_cpu(resp_hw->length); @@ -394,36 +436,33 @@ static int __octep_oq_process_rx(struct octep_device *oct, data_offset = OCTEP_OQ_RESP_HW_SIZE; rx_ol_flags = 0; } + + octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used); + + skb = build_skb((void *)resp_hw, PAGE_SIZE); + if (!skb) { + octep_oq_drop_rx(oq, buff_info, + &read_idx, &desc_used); + oq->stats.alloc_failures++; + continue; + } + skb_reserve(skb, data_offset); + rx_bytes += buff_info->len; if (buff_info->len <= oq->max_single_buffer_size) { - skb = build_skb((void *)resp_hw, PAGE_SIZE); - skb_reserve(skb, data_offset); skb_put(skb, buff_info->len); - read_idx++; - desc_used++; - if (read_idx == oq->max_count) - read_idx = 0; } else { struct skb_shared_info *shinfo; u16 data_len; - skb = build_skb((void *)resp_hw, PAGE_SIZE); - skb_reserve(skb, data_offset); /* Head fragment includes response header(s); * subsequent fragments contains only data. */ skb_put(skb, oq->max_single_buffer_size); - read_idx++; - desc_used++; - if (read_idx == oq->max_count) - read_idx = 0; - shinfo = skb_shinfo(skb); data_len = buff_info->len - oq->max_single_buffer_size; while (data_len) { - dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, - PAGE_SIZE, DMA_FROM_DEVICE); buff_info = (struct octep_rx_buffer *) &oq->buff_info[read_idx]; if (data_len < oq->buffer_size) { @@ -438,11 +477,8 @@ static int __octep_oq_process_rx(struct octep_device *oct, buff_info->page, 0, buff_info->len, buff_info->len); - buff_info->page = NULL; - read_idx++; - desc_used++; - if (read_idx == oq->max_count) - read_idx = 0; + + octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used); } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 82832a24fbd8..da69350c6f76 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2411,7 +2411,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); if (!(cfg & BIT_ULL(12))) continue; - bmap |= (1 << i); + bmap |= BIT_ULL(i); cfg &= ~BIT_ULL(12); rvu_write64(rvu, blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); @@ -2432,7 +2432,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { - if (!(bmap & (1 << i))) + if (!(bmap & BIT_ULL(i))) continue; cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 930f180688e5..2c26eb185283 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -2471,10 +2471,6 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, e->dma_addr = addr; e->dma_len = len; - airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), - TX_RING_CPU_IDX_MASK, - FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); - data = skb_frag_address(frag); len = skb_frag_size(frag); } @@ -2483,6 +2479,11 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, q->queued += i; skb_tx_timestamp(skb); + if (!netdev_xmit_more()) + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), + TX_RING_CPU_IDX_MASK, + FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); + if (q->ndesc - q->queued < q->free_thr) netif_tx_stop_queue(txq); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 16ca427cf4c3..ed7313c10a05 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1171,7 +1171,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) return -ENOMEM; - for (i = 0; i < cnt; i++) { + for (i = 0; i < len; i++) { struct mtk_tx_dma_v2 *txd; txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size; diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h index 87a67fa3868d..c01b1e8428f6 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h @@ -91,8 +91,8 @@ enum mtk_wed_dummy_cr_idx { #define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin" #define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin" #define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin" -#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin" -#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin" +#define MT7988_FIRMWARE_WO0 "mediatek/mt7988/mt7988_wo_0.bin" +#define MT7988_FIRMWARE_WO1 "mediatek/mt7988/mt7988_wo_1.bin" #define MTK_WO_MCU_CFG_LS_BASE 0 #define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index a64d96effb9e..6bd8a18e3af3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1765,6 +1765,10 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force } } +#define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1 +#define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \ + MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1) + static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; @@ -1776,7 +1780,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) /* wait for pending handlers to complete */ mlx5_eq_synchronize_cmd_irq(dev); spin_lock_irqsave(&dev->cmd.alloc_lock, flags); - vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1); + vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK; if (!vector) goto no_trig; @@ -2361,7 +2365,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev) cmd->state = MLX5_CMDIF_STATE_DOWN; cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1; - cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1; + cmd->vars.bitmask = MLX5_CMD_MASK; sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds); sema_init(&cmd->vars.pages_sem, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a5659c0c4236..e601324a690a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -6509,7 +6509,9 @@ static void _mlx5e_remove(struct auxiliary_device *adev) mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); _mlx5e_suspend(adev, false); - priv->profile->cleanup(priv); + /* Avoid cleanup if profile rollback failed. */ + if (priv->profile) + priv->profile->cleanup(priv); mlx5e_destroy_netdev(priv); mlx5e_devlink_port_unregister(mlx5e_dev); mlx5e_destroy_devlink(mlx5e_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 2505f90c0b39..68cb86b37e56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -1061,6 +1061,12 @@ int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn) struct mlx5_eq_comp *eq; int ret = 0; + if (vecidx >= table->max_comp_eqs) { + mlx5_core_dbg(dev, "Requested vector index %u should be less than %u", + vecidx, table->max_comp_eqs); + return -EINVAL; + } + mutex_lock(&table->comp_lock); eq = xa_load(&table->comp_eqs, vecidx); if (eq) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 17f78091ad30..7aef30dbd82d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1489,7 +1489,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) } if (err) - goto abort; + goto err_esw_enable; esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED; @@ -1503,7 +1503,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) return 0; -abort: +err_esw_enable: + mlx5_eq_notifier_unregister(esw->dev, &esw->nb); mlx5_esw_acls_ns_cleanup(esw); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c index bd52b05db367..8f3a6f9d703d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c @@ -691,7 +691,6 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher) static int hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) { - u32 num_of_rules; int ret; /* If the current matcher size is already at its max size, we can't @@ -705,8 +704,7 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) * Need to check again if we really need rehash. * If the reason for rehash was size, but not any more - skip rehash. */ - num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED); - if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules)) + if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules)) return 0; /* Now we're done all the checking - do the rehash: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h index e5a7ce604334..8ab548aa402b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h @@ -46,6 +46,7 @@ struct mlx5hws_context { struct mlx5hws_send_engine *send_queue; size_t queues; struct mutex *bwc_send_queue_locks; /* protect BWC queues */ + struct lock_class_key *bwc_lock_class_keys; struct list_head tbl_list; struct mlx5hws_context_debug_info debug_info; struct xarray peer_ctx_xa; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c index d566d2ddf424..3f4c58bada37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c @@ -1925,7 +1925,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx, ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl); if (ret) { mlx5hws_err(ctx, "Failed to convert items to header layout\n"); - goto free_fc; + goto free_match_hl; } /* Find the match definer layout for header layout match union */ @@ -1946,7 +1946,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx, free_fc: kfree(mt->fc); - +free_match_hl: kfree(match_hl); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c index 0c7989184c30..6d443e6ee8d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c @@ -941,14 +941,18 @@ static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues) static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx) { - int bwc_queues = ctx->queues - 1; + int bwc_queues = mlx5hws_bwc_queues(ctx); int i; if (!mlx5hws_context_bwc_supported(ctx)) return; - for (i = 0; i < bwc_queues; i++) + for (i = 0; i < bwc_queues; i++) { mutex_destroy(&ctx->bwc_send_queue_locks[i]); + lockdep_unregister_key(ctx->bwc_lock_class_keys + i); + } + + kfree(ctx->bwc_lock_class_keys); kfree(ctx->bwc_send_queue_locks); } @@ -977,10 +981,22 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx) if (!ctx->bwc_send_queue_locks) return -ENOMEM; - for (i = 0; i < bwc_queues; i++) + ctx->bwc_lock_class_keys = kcalloc(bwc_queues, + sizeof(*ctx->bwc_lock_class_keys), + GFP_KERNEL); + if (!ctx->bwc_lock_class_keys) + goto err_lock_class_keys; + + for (i = 0; i < bwc_queues; i++) { mutex_init(&ctx->bwc_send_queue_locks[i]); + lockdep_register_key(ctx->bwc_lock_class_keys + i); + } return 0; + +err_lock_class_keys: + kfree(ctx->bwc_send_queue_locks); + return -ENOMEM; } int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 060e5b939211..d6f37456fb31 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -389,15 +389,27 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction); } -static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[], +static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q, + struct page *pages[], u16 byte_count) { + struct mlxsw_pci_queue *cq = q->u.rdq.cq; unsigned int linear_data_size; + struct page_pool *page_pool; struct sk_buff *skb; int page_index = 0; bool linear_only; void *data; + linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE; + linear_data_size = linear_only ? byte_count : + PAGE_SIZE - + MLXSW_PCI_RX_BUF_SW_OVERHEAD; + + page_pool = cq->u.cq.page_pool; + page_pool_dma_sync_for_cpu(page_pool, pages[page_index], + MLXSW_PCI_SKB_HEADROOM, linear_data_size); + data = page_address(pages[page_index]); net_prefetch(data); @@ -405,11 +417,6 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[], if (unlikely(!skb)) return ERR_PTR(-ENOMEM); - linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE; - linear_data_size = linear_only ? byte_count : - PAGE_SIZE - - MLXSW_PCI_RX_BUF_SW_OVERHEAD; - skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM); skb_put(skb, linear_data_size); @@ -425,6 +432,7 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[], page = pages[page_index]; frag_size = min(byte_count, PAGE_SIZE); + page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, frag_size, PAGE_SIZE); byte_count -= frag_size; @@ -760,7 +768,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (err) goto out; - skb = mlxsw_pci_rdq_build_skb(pages, byte_count); + skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count); if (IS_ERR(skb)) { dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n"); mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries); @@ -988,12 +996,13 @@ static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q, if (cq_type != MLXSW_PCI_CQ_RDQ) return 0; - pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries; pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev); pp_params.dev = &mlxsw_pci->pdev->dev; pp_params.napi = &q->u.cq.napi; pp_params.dma_dir = DMA_FROM_DEVICE; + pp_params.max_len = PAGE_SIZE; page_pool = page_pool_create(&pp_params); if (IS_ERR(page_pool)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index d761a1235994..7ea798a4949e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -481,11 +481,33 @@ mlxsw_sp_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry, struct netlink_ext_ack *extack) { + u32 new_kvdl_index, old_kvdl_index = ipip_entry->dip_kvdl_index; + struct in6_addr old_addr6 = ipip_entry->parms.daddr.addr6; struct mlxsw_sp_ipip_parms new_parms; + int err; new_parms = mlxsw_sp_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev); - return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry, - &new_parms, extack); + + err = mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, + &new_parms.daddr.addr6, + &new_kvdl_index); + if (err) + return err; + ipip_entry->dip_kvdl_index = new_kvdl_index; + + err = mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry, + &new_parms, extack); + if (err) + goto err_change_gre; + + mlxsw_sp_ipv6_addr_put(mlxsw_sp, &old_addr6); + + return 0; + +err_change_gre: + ipip_entry->dip_kvdl_index = old_kvdl_index; + mlxsw_sp_ipv6_addr_put(mlxsw_sp, &new_parms.daddr.addr6); + return err; } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index 5b174cb95eb8..d94081c7658e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -16,6 +16,7 @@ #include "spectrum.h" #include "spectrum_ptp.h" #include "core.h" +#include "txheader.h" #define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT 29 #define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ 156257 /* 6.4nSec */ @@ -1684,6 +1685,12 @@ int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return -ENOMEM; + } + mlxsw_sp_txhdr_construct(skb, tx_info); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 800dfb64ec83..7d6d859cef3f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3197,7 +3197,6 @@ mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp; struct mlxsw_sp_nexthop_counter *nhct; - void *ptr; int err; nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id); @@ -3210,12 +3209,10 @@ mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(nhct)) return nhct; - ptr = xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct, - GFP_KERNEL); - if (IS_ERR(ptr)) { - err = PTR_ERR(ptr); + err = xa_err(xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct, + GFP_KERNEL)); + if (err) goto err_store; - } return nhct; diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index dcea6652d56d..4a777b449ecd 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -401,28 +401,21 @@ static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci, u32 nano_seconds = 0; u32 seconds = 0; - if (ts) { - if (ts->tv_sec > 0xFFFFFFFFLL || - ts->tv_sec < 0) { - netif_warn(adapter, drv, adapter->netdev, - "ts->tv_sec out of range, %lld\n", - ts->tv_sec); - return -ERANGE; - } - if (ts->tv_nsec >= 1000000000L || - ts->tv_nsec < 0) { - netif_warn(adapter, drv, adapter->netdev, - "ts->tv_nsec out of range, %ld\n", - ts->tv_nsec); - return -ERANGE; - } - seconds = ts->tv_sec; - nano_seconds = ts->tv_nsec; - lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0); - } else { - netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n"); - return -EINVAL; + if (ts->tv_sec > 0xFFFFFFFFLL) { + netif_warn(adapter, drv, adapter->netdev, + "ts->tv_sec out of range, %lld\n", + ts->tv_sec); + return -ERANGE; } + if (ts->tv_nsec < 0) { + netif_warn(adapter, drv, adapter->netdev, + "ts->tv_nsec out of range, %ld\n", + ts->tv_nsec); + return -ERANGE; + } + seconds = ts->tv_sec; + nano_seconds = ts->tv_nsec; + lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0); return 0; } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c index 15db423be4aa..459a53676ae9 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c @@ -31,10 +31,10 @@ static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx) /* Add port to mirror (only front ports) */ static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno) { - u32 val, reg = portno; + u64 reg = portno; + u32 val; - reg = portno / BITS_PER_BYTE; - val = BIT(portno % BITS_PER_BYTE); + val = BIT(do_div(reg, 32)); if (reg == 0) return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx)); @@ -45,10 +45,10 @@ static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno) /* Delete port from mirror (only front ports) */ static void sparx5_mirror_port_del(struct sparx5 *sparx5, u32 idx, u32 portno) { - u32 val, reg = portno; + u64 reg = portno; + u32 val; - reg = portno / BITS_PER_BYTE; - val = BIT(portno % BITS_PER_BYTE); + val = BIT(do_div(reg, 32)); if (reg == 0) return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx)); diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c index f2a5a36fdacd..7251121ab196 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c @@ -1444,6 +1444,8 @@ static void vcap_api_encode_rule_test(struct kunit *test) ret = vcap_del_rule(&test_vctrl, &test_netdev, id); KUNIT_EXPECT_EQ(test, 0, ret); + + vcap_free_rule(rule); } static void vcap_api_set_rule_counter_test(struct kunit *test) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index b93791d6b593..f5dc876eb500 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -394,6 +394,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_out_pci: ionic_dev_teardown(ionic); ionic_clear_pci(ionic); + ionic_debugfs_del_dev(ionic); err_out: mutex_destroy(&ionic->dev_cmd_lock); ionic_devlink_free(ionic); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0cc9baaecb1b..713a89bb21e9 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4682,7 +4682,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) return IRQ_NONE; - if (unlikely(status & SYSErr)) { + /* At least RTL8168fp may unexpectedly set the SYSErr bit */ + if (unlikely(status & SYSErr && + tp->mac_version <= RTL_GIGA_MAC_VER_06)) { rtl8169_pcierr_interrupt(tp->dev); goto out; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index d2a6518532f3..907af4651c55 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1750,20 +1750,19 @@ static int ravb_get_ts_info(struct net_device *ndev, struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *hw_info = priv->info; - info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); - info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_ALL); - if (hw_info->gptp || hw_info->ccc_gac) + if (hw_info->gptp || hw_info->ccc_gac) { + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_ALL); info->phc_index = ptp_clock_index(priv->ptp.clock); - else - info->phc_index = 0; + } return 0; } diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c index f9f63c61d792..6b3f7fca8d15 100644 --- a/drivers/net/ethernet/renesas/rtsn.c +++ b/drivers/net/ethernet/renesas/rtsn.c @@ -1057,6 +1057,7 @@ static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (skb->len >= TX_DS) { priv->stats.tx_dropped++; priv->stats.tx_errors++; + dev_kfree_skb_any(skb); goto out; } diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index c9e17a8208a9..f1723a6fb082 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -1260,7 +1260,8 @@ static int efx_poll(struct napi_struct *napi, int budget) spent = efx_process_channel(channel, budget); - xdp_do_flush(); + if (budget) + xdp_do_flush(); if (spent < budget) { if (efx_channel_has_rx_queue(channel) && diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index a7346e965bfe..d120b3c83ac0 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -1285,7 +1285,8 @@ static int efx_poll(struct napi_struct *napi, int budget) spent = efx_process_channel(channel, budget); - xdp_do_flush(); + if (budget) + xdp_do_flush(); if (spent < budget) { if (efx_channel_has_rx_queue(channel) && diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c index 362f85136c3e..6fdd94c8919e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c @@ -127,10 +127,12 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ; writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); + usleep_range(10, 20); /* 50ns min delay needed as per HW design */ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP; writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); + usleep_range(10, 20); /* 500ns min delay needed as per HW design */ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN; writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); @@ -143,22 +145,30 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_ return err; } + usleep_range(10, 20); /* 50ns min delay needed as per HW design */ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN; writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); + value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); + value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY; + writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); + + usleep_range(10, 20); /* 50ns min delay needed as per HW design */ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET; writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); - value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); - value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET; - writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); - + usleep_range(10, 20); /* 50ns min delay needed as per HW design */ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY; writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); + msleep(30); /* 30ms delay needed as per HW design */ + value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); + value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET; + writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); + err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value, value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS, 500, 500 * 2000); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index e0165358c4ac..77b35abc6f6f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -203,8 +203,12 @@ static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv, readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, channel)); reg_space[DMA_CHAN_RX_CONTROL(default_addrs, channel) / 4] = readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_TX_BASE_ADDR_HI(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, channel)); reg_space[DMA_CHAN_TX_BASE_ADDR(default_addrs, channel) / 4] = readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_RX_BASE_ADDR_HI(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, channel)); reg_space[DMA_CHAN_RX_BASE_ADDR(default_addrs, channel) / 4] = readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, channel)); reg_space[DMA_CHAN_TX_END_ADDR(default_addrs, channel) / 4] = @@ -225,8 +229,12 @@ static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv, readl(ioaddr + DMA_CHAN_CUR_TX_DESC(dwmac4_addrs, channel)); reg_space[DMA_CHAN_CUR_RX_DESC(default_addrs, channel) / 4] = readl(ioaddr + DMA_CHAN_CUR_RX_DESC(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_CUR_TX_BUF_ADDR_HI(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR_HI(dwmac4_addrs, channel)); reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(default_addrs, channel) / 4] = readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(dwmac4_addrs, channel)); + reg_space[DMA_CHAN_CUR_RX_BUF_ADDR_HI(default_addrs, channel) / 4] = + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR_HI(dwmac4_addrs, channel)); reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(default_addrs, channel) / 4] = readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(dwmac4_addrs, channel)); reg_space[DMA_CHAN_STATUS(default_addrs, channel) / 4] = diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index 17d9120db5fe..4f980dcd3958 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -127,7 +127,9 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs, #define DMA_CHAN_SLOT_CTRL_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x3c) #define DMA_CHAN_CUR_TX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x44) #define DMA_CHAN_CUR_RX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4c) +#define DMA_CHAN_CUR_TX_BUF_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x50) #define DMA_CHAN_CUR_TX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x54) +#define DMA_CHAN_CUR_RX_BUF_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x58) #define DMA_CHAN_CUR_RX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x5c) #define DMA_CHAN_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x60) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e2140482270a..7bf275f127c9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2035,7 +2035,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, rx_q->queue_index = queue; rx_q->priv_data = priv; - pp_params.flags = PP_FLAG_DMA_MAP | (xdp_prog ? PP_FLAG_DMA_SYNC_DEV : 0); + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = dma_conf->dma_rx_size; num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); pp_params.order = ilog2(num_pages); @@ -3780,6 +3780,7 @@ static int stmmac_request_irq_single(struct net_device *dev) /* Request the Wake IRQ in case of another line * is used for WoL */ + priv->wol_irq_disabled = true; if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { ret = request_irq(priv->wol_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); @@ -4304,11 +4305,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; - tx_q->tx_skbuff_dma[first_entry].buf = des; - tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); - tx_q->tx_skbuff_dma[first_entry].map_as_page = false; - tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; - if (priv->dma_cap.addr64 <= 32) { first->des0 = cpu_to_le32(des); @@ -4327,6 +4323,23 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); + /* In case two or more DMA transmit descriptors are allocated for this + * non-paged SKB data, the DMA buffer address should be saved to + * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor, + * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee + * that stmmac_tx_clean() does not unmap the entire DMA buffer too early + * since the tail areas of the DMA buffer can be accessed by DMA engine + * sooner or later. + * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf + * corresponding to the last descriptor, stmmac_tx_clean() will unmap + * this DMA buffer right after the DMA engine completely finishes the + * full buffer transmission. + */ + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb); + tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; + /* Prepare fragments */ for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index d253727b160f..ba6db61dd227 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -337,9 +337,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct cppi5_host_desc_t *desc_rx; struct device *dev = common->dev; + struct am65_cpsw_swdata *swdata; dma_addr_t desc_dma; dma_addr_t buf_dma; - void *swdata; desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); if (!desc_rx) { @@ -363,7 +363,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE, buf_dma, AM65_CPSW_MAX_PACKET_SIZE); swdata = cppi5_hdesc_get_swdata(desc_rx); - *((void **)swdata) = page_address(page); + swdata->page = page; + swdata->flow_id = flow_idx; return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx, desc_rx, desc_dma); @@ -519,36 +520,31 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow, struct page *page, - bool allow_direct, - int desc_idx) + bool allow_direct) { page_pool_put_full_page(flow->page_pool, page, allow_direct); - flow->pages[desc_idx] = NULL; } static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) { - struct am65_cpsw_rx_flow *flow = data; + struct am65_cpsw_rx_chn *rx_chn = data; struct cppi5_host_desc_t *desc_rx; - struct am65_cpsw_rx_chn *rx_chn; + struct am65_cpsw_swdata *swdata; dma_addr_t buf_dma; + struct page *page; u32 buf_dma_len; - void *page_addr; - void **swdata; - int desc_idx; + u32 flow_id; - rx_chn = &flow->common->rx_chns; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - page_addr = *swdata; + page = swdata->page; + flow_id = swdata->flow_id; cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, - rx_chn->dsize_log2); - am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx); + am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false); } static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, @@ -703,14 +699,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) ret = -ENOMEM; goto fail_rx; } - flow->pages[i] = page; ret = am65_cpsw_nuss_rx_push(common, page, flow_idx); if (ret < 0) { dev_err(common->dev, "cannot submit page to rx channel flow %d, error %d\n", flow_idx, ret); - am65_cpsw_put_page(flow, page, false, i); + am65_cpsw_put_page(flow, page, false); goto fail_rx; } } @@ -764,8 +759,8 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) fail_rx: for (i = 0; i < common->rx_ch_num_flows; i++) - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i], - am65_cpsw_nuss_rx_cleanup, 0); + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, + am65_cpsw_nuss_rx_cleanup, !!i); am65_cpsw_destroy_xdp_rxqs(common); @@ -817,11 +812,11 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) dev_err(common->dev, "rx teardown timeout\n"); } - for (i = 0; i < common->rx_ch_num_flows; i++) { + for (i = common->rx_ch_num_flows - 1; i >= 0; i--) { napi_disable(&rx_chn->flows[i].napi_rx); hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer); - k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i], - am65_cpsw_nuss_rx_cleanup, 0); + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, + am65_cpsw_nuss_rx_cleanup, !!i); } k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); @@ -1028,7 +1023,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, struct am65_cpsw_port *port, struct xdp_buff *xdp, - int desc_idx, int cpu, int *len) + int cpu, int *len) { struct am65_cpsw_common *common = flow->common; struct am65_cpsw_ndev_priv *ndev_priv; @@ -1101,7 +1096,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, } page = virt_to_head_page(xdp->data); - am65_cpsw_put_page(flow, page, true, desc_idx); + am65_cpsw_put_page(flow, page, true); out: return ret; @@ -1150,16 +1145,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, struct am65_cpsw_ndev_stats *stats; struct cppi5_host_desc_t *desc_rx; struct device *dev = common->dev; + struct am65_cpsw_swdata *swdata; struct page *page, *new_page; dma_addr_t desc_dma, buf_dma; struct am65_cpsw_port *port; - int headroom, desc_idx, ret; struct net_device *ndev; u32 flow_idx = flow->id; struct sk_buff *skb; struct xdp_buff xdp; + int headroom, ret; void *page_addr; - void **swdata; u32 *psdata; *xdp_state = AM65_CPSW_XDP_PASS; @@ -1182,8 +1177,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, __func__, flow_idx, &desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - page_addr = *swdata; - page = virt_to_page(page_addr); + page = swdata->page; + page_addr = page_address(page); cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); pkt_len = cppi5_hdesc_get_pktlen(desc_rx); @@ -1199,9 +1194,6 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, - rx_chn->dsize_log2); - skb = am65_cpsw_build_skb(page_addr, ndev, AM65_CPSW_MAX_PACKET_SIZE); if (unlikely(!skb)) { @@ -1213,7 +1205,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]); xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, pkt_len, false); - *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx, + *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, cpu, &pkt_len); if (*xdp_state != AM65_CPSW_XDP_PASS) goto allocate; @@ -1247,10 +1239,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, return -ENOMEM; } - flow->pages[desc_idx] = new_page; - if (netif_dormant(ndev)) { - am65_cpsw_put_page(flow, new_page, true, desc_idx); + am65_cpsw_put_page(flow, new_page, true); ndev->stats.rx_dropped++; return 0; } @@ -1258,7 +1248,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, requeue: ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx); if (WARN_ON(ret < 0)) { - am65_cpsw_put_page(flow, new_page, true, desc_idx); + am65_cpsw_put_page(flow, new_page, true); ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } @@ -2402,10 +2392,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) for (i = 0; i < common->rx_ch_num_flows; i++) { flow = &rx_chn->flows[i]; flow->page_pool = NULL; - flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC, - sizeof(*flow->pages), GFP_KERNEL); - if (!flow->pages) - return -ENOMEM; } rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); @@ -2455,10 +2441,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) flow = &rx_chn->flows[i]; flow->id = i; flow->common = common; + flow->irq = -EINVAL; rx_flow_cfg.ring_rxfdq0_id = fdqring_id; rx_flow_cfg.rx_cfg.size = max_desc_num; - rx_flow_cfg.rxfdq_cfg.size = max_desc_num; + /* share same FDQ for all flows */ + rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num; rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode; ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, @@ -2496,6 +2484,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) if (ret) { dev_err(dev, "failure requesting rx %d irq %u, %d\n", i, flow->irq, ret); + flow->irq = -EINVAL; goto err; } } @@ -2744,10 +2733,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) return 0; /* alloc netdev */ - port->ndev = devm_alloc_etherdev_mqs(common->dev, - sizeof(struct am65_cpsw_ndev_priv), - AM65_CPSW_MAX_QUEUES, - AM65_CPSW_MAX_QUEUES); + port->ndev = alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv), + AM65_CPSW_MAX_QUEUES, + AM65_CPSW_MAX_QUEUES); if (!port->ndev) { dev_err(dev, "error allocating slave net_device %u\n", port->port_id); @@ -2868,8 +2856,12 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) for (i = 0; i < common->port_num; i++) { port = &common->ports[i]; - if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED) + if (!port->ndev) + continue; + if (port->ndev->reg_state == NETREG_REGISTERED) unregister_netdev(port->ndev); + free_netdev(port->ndev); + port->ndev = NULL; } } @@ -3346,8 +3338,8 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) for (i = 0; i < common->rx_ch_num_flows; i++) k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, - &rx_chan->flows[i], - am65_cpsw_nuss_rx_cleanup, 0); + rx_chan, + am65_cpsw_nuss_rx_cleanup, !!i); k3_udma_glue_disable_rx_chn(rx_chan->rx_chn); @@ -3613,16 +3605,17 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ret = am65_cpsw_nuss_init_ndevs(common); if (ret) - goto err_free_phylink; + goto err_ndevs_clear; ret = am65_cpsw_nuss_register_ndevs(common); if (ret) - goto err_free_phylink; + goto err_ndevs_clear; pm_runtime_put(dev); return 0; -err_free_phylink: +err_ndevs_clear: + am65_cpsw_nuss_cleanup_ndev(common); am65_cpsw_nuss_phylink_cleanup(common); am65_cpts_release(common->cpts); err_of_clear: @@ -3652,13 +3645,13 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev) return; } - am65_cpsw_unregister_devlink(common); am65_cpsw_unregister_notifiers(common); /* must unregister ndevs here because DD release_driver routine calls * dma_deconfigure(dev) before devres_release_all(dev) */ am65_cpsw_nuss_cleanup_ndev(common); + am65_cpsw_unregister_devlink(common); am65_cpsw_nuss_phylink_cleanup(common); am65_cpts_release(common->cpts); am65_cpsw_disable_serdes_phy(common); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index dc8d544230dc..92a27ba4c601 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -101,10 +101,14 @@ struct am65_cpsw_rx_flow { struct hrtimer rx_hrtimer; unsigned long rx_pace_timeout; struct page_pool *page_pool; - struct page **pages; char name[32]; }; +struct am65_cpsw_swdata { + u32 flow_id; + struct page *page; +}; + struct am65_cpsw_rx_chn { struct device *dev; struct device *dma_dev; diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index 72ace151d8e9..5d2491c2943a 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -735,6 +735,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask, u8 fid_c1; tbl = prueth->vlan_tbl; + spin_lock(&prueth->vtbl_lock); fid_c1 = tbl[vid].fid_c1; /* FID_C1: bit0..2 port membership mask, @@ -750,6 +751,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask, } tbl[vid].fid_c1 = fid_c1; + spin_unlock(&prueth->vtbl_lock); } EXPORT_SYMBOL_GPL(icssg_vtbl_modify); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 5fd9902ab181..5c20ceb164df 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1442,6 +1442,7 @@ static int prueth_probe(struct platform_device *pdev) icss_iep_init_fw(prueth->iep1); } + spin_lock_init(&prueth->vtbl_lock); /* setup netdev interfaces */ if (eth0_node) { ret = prueth_netdev_init(prueth, eth0_node); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index bba6da2e6bd8..8722bb4a268a 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -296,6 +296,8 @@ struct prueth { bool is_switchmode_supported; unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN]; int default_vlan; + /** @vtbl_lock: Lock for vtbl in shared memory */ + spinlock_t vtbl_lock; }; struct emac_tx_ts_response { diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index a04d4073def9..2c37957478fb 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -222,7 +222,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, struct mse102x_net_spi *mses = to_mse102x_spi(mse); struct spi_transfer *xfer = &mses->spi_xfer; struct spi_message *msg = &mses->spi_msg; - struct sk_buff *tskb; + struct sk_buff *tskb = NULL; int ret; netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n", @@ -235,7 +235,6 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, if (!tskb) return -ENOMEM; - dev_kfree_skb(txp); txp = tskb; } @@ -257,6 +256,8 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, mse->stats.xfer_err++; } + dev_kfree_skb(tskb); + return ret; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index fc35fcb22d94..1fcbcaa85ebd 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -924,13 +924,13 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) skbuf_dma->sg_len = sg_len; dma_tx_desc->callback_param = lp; dma_tx_desc->callback_result = axienet_dma_tx_cb; - dmaengine_submit(dma_tx_desc); - dma_async_issue_pending(lp->tx_chan); txq = skb_get_tx_queue(lp->ndev, skb); netdev_tx_sent_queue(txq, skb->len); netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); + dmaengine_submit(dma_tx_desc); + dma_async_issue_pending(lp->tx_chan); return NETDEV_TX_OK; xmit_error_unmap_sg: @@ -1051,6 +1051,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (net_ratelimit()) netdev_err(ndev, "TX DMA mapping error\n"); ndev->stats.tx_dropped++; + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } desc_set_phys_addr(lp, phys, cur_p); @@ -1071,6 +1072,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ndev->stats.tx_dropped++; axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, true, NULL, 0); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } desc_set_phys_addr(lp, phys, cur_p); diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index a60bfb1abb7f..70f981887518 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1702,20 +1702,24 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) return -EINVAL; if (data[IFLA_GTP_FD0]) { - u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); + int fd0 = nla_get_u32(data[IFLA_GTP_FD0]); - sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp); - if (IS_ERR(sk0)) - return PTR_ERR(sk0); + if (fd0 >= 0) { + sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp); + if (IS_ERR(sk0)) + return PTR_ERR(sk0); + } } if (data[IFLA_GTP_FD1]) { - u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]); + int fd1 = nla_get_u32(data[IFLA_GTP_FD1]); - sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp); - if (IS_ERR(sk1u)) { - gtp_encap_disable_sock(sk0); - return PTR_ERR(sk1u); + if (fd1 >= 0) { + sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp); + if (IS_ERR(sk1u)) { + gtp_encap_disable_sock(sk0); + return PTR_ERR(sk1u); + } } } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 153b97f8ec0d..23180f7b67b6 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2798,6 +2798,31 @@ static struct hv_driver netvsc_drv = { }, }; +/* Set VF's namespace same as the synthetic NIC */ +static void netvsc_event_set_vf_ns(struct net_device *ndev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct net_device *vf_netdev; + int ret; + + vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + if (!vf_netdev) + return; + + if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) { + ret = dev_change_net_namespace(vf_netdev, dev_net(ndev), + "eth%d"); + if (ret) + netdev_err(vf_netdev, + "Cannot move to same namespace as %s: %d\n", + ndev->name, ret); + else + netdev_info(vf_netdev, + "Moved VF to namespace with: %s\n", + ndev->name); + } +} + /* * On Hyper-V, every VF interface is matched with a corresponding * synthetic interface. The synthetic interface is presented first @@ -2810,6 +2835,11 @@ static int netvsc_netdev_event(struct notifier_block *this, struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); int ret = 0; + if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) { + netvsc_event_set_vf_ns(event_dev); + return NOTIFY_DONE; + } + ret = check_dev_is_matching_vf(event_dev); if (ret != 0) return NOTIFY_DONE; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 12d1b205f6d1..ee2159282573 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -154,19 +154,6 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) return sa; } -static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) -{ - struct macsec_rx_sa *sa = NULL; - int an; - - for (an = 0; an < MACSEC_NUM_AN; an++) { - sa = macsec_rxsa_get(rx_sc->sa[an]); - if (sa) - break; - } - return sa; -} - static void free_rx_sc_rcu(struct rcu_head *head) { struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); @@ -1208,15 +1195,12 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) /* If validateFrames is Strict or the C bit in the * SecTAG is set, discard */ - struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); if (hdr->tci_an & MACSEC_TCI_C || secy->validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotUsingSA++; u64_stats_update_end(&rxsc_stats->syncp); DEV_STATS_INC(secy->netdev, rx_errors); - if (active_rx_sa) - this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); goto drop_nosa; } @@ -1226,8 +1210,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsUnusedSA++; u64_stats_update_end(&rxsc_stats->syncp); - if (active_rx_sa) - this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); goto deliver; } @@ -3816,8 +3798,7 @@ static void macsec_free_netdev(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); - if (macsec->secy.tx_sc.md_dst) - metadata_dst_free(macsec->secy.tx_sc.md_dst); + dst_release(&macsec->secy.tx_sc.md_dst->dst); free_percpu(macsec->stats); free_percpu(macsec->secy.tx_sc.stats); diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index 4dc057c121f5..e70fb6687994 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -588,6 +588,9 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, if (len > MCTP_I2C_MAXMTU) return -EMSGSIZE; + if (!daddr || !saddr) + return -EINVAL; + lldst = *((u8 *)daddr); llsrc = *((u8 *)saddr); diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index f40eb50bb978..b7bc70586ee0 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -337,6 +337,7 @@ static const struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,asp-v2.2-mdio", }, { .compatible = "brcm,asp-v2.1-mdio", }, { .compatible = "brcm,asp-v2.0-mdio", }, + { .compatible = "brcm,bcm6846-mdio", }, { .compatible = "brcm,genet-mdio-v5", }, { .compatible = "brcm,genet-mdio-v4", }, { .compatible = "brcm,genet-mdio-v3", }, diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 01cf33fa7503..de20928f7402 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -1161,8 +1161,14 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg, this_chunk = min(userdata_len - sent_userdata, MAX_PRINT_CHUNK - preceding_bytes); - if (WARN_ON_ONCE(this_chunk <= 0)) + if (WARN_ON_ONCE(this_chunk < 0)) + /* this_chunk could be zero if all the previous + * message used all the buffer. This is not a + * problem, userdata will be sent in the next + * iteration + */ return; + memcpy(buf + this_header + this_offset, userdata + sent_userdata, this_chunk); diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 92a7a36b93ac..3e0b61202f0c 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -836,7 +836,8 @@ static void nsim_dev_trap_report_work(struct work_struct *work) nsim_dev = nsim_trap_data->nsim_dev; if (!devl_trylock(priv_to_devlink(nsim_dev))) { - schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1); + queue_delayed_work(system_unbound_wq, + &nsim_dev->trap_data->trap_report_dw, 1); return; } @@ -848,11 +849,12 @@ static void nsim_dev_trap_report_work(struct work_struct *work) continue; nsim_dev_trap_report(nsim_dev_port); + cond_resched(); } devl_unlock(priv_to_devlink(nsim_dev)); - - schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, - msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS)); + queue_delayed_work(system_unbound_wq, + &nsim_dev->trap_data->trap_report_dw, + msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS)); } static int nsim_dev_traps_init(struct devlink *devlink) @@ -907,8 +909,9 @@ static int nsim_dev_traps_init(struct devlink *devlink) INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw, nsim_dev_trap_report_work); - schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, - msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS)); + queue_delayed_work(system_unbound_wq, + &nsim_dev->trap_data->trap_report_dw, + msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS)); return 0; diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 41e80f78b316..16c382c42227 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -1377,10 +1377,12 @@ static ssize_t nsim_nexthop_bucket_activity_write(struct file *file, if (pos != 0) return -EINVAL; - if (size > sizeof(buf)) + if (size > sizeof(buf) - 1) return -EINVAL; if (copy_from_user(buf, user_buf, size)) return -EFAULT; + buf[size] = 0; + if (sscanf(buf, "%u %hu", &nhid, &bucket_index) != 2) return -EINVAL; diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c index 4d156d406bab..c33a5ef34ba0 100644 --- a/drivers/net/phy/aquantia/aquantia_main.c +++ b/drivers/net/phy/aquantia/aquantia_main.c @@ -537,12 +537,6 @@ static int aqcs109_config_init(struct phy_device *phydev) if (!ret) aqr107_chip_info(phydev); - /* AQCS109 belongs to a chip family partially supporting 10G and 5G. - * PMA speed ability bits are the same for all members of the family, - * AQCS109 however supports speeds up to 2.5G only. - */ - phy_set_max_speed(phydev, SPEED_2500); - return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT); } @@ -731,6 +725,31 @@ static int aqr113c_fill_interface_modes(struct phy_device *phydev) return aqr107_fill_interface_modes(phydev); } +static int aqr115c_get_features(struct phy_device *phydev) +{ + unsigned long *supported = phydev->supported; + + /* PHY supports speeds up to 2.5G with autoneg. PMA capabilities + * are not useful. + */ + linkmode_or(supported, supported, phy_gbit_features); + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, supported); + + return 0; +} + +static int aqr111_get_features(struct phy_device *phydev) +{ + /* PHY supports speeds up to 5G with autoneg. PMA capabilities + * are not useful. + */ + aqr115c_get_features(phydev); + linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->supported); + + return 0; +} + static int aqr113c_config_init(struct phy_device *phydev) { int ret; @@ -767,15 +786,6 @@ static int aqr107_probe(struct phy_device *phydev) return aqr_hwmon_probe(phydev); } -static int aqr111_config_init(struct phy_device *phydev) -{ - /* AQR111 reports supporting speed up to 10G, - * however only speeds up to 5G are supported. - */ - phy_set_max_speed(phydev, SPEED_5000); - - return aqr107_config_init(phydev); -} static struct phy_driver aqr_driver[] = { { @@ -853,6 +863,7 @@ static struct phy_driver aqr_driver[] = { .get_sset_count = aqr107_get_sset_count, .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, + .get_features = aqr115c_get_features, .link_change_notify = aqr107_link_change_notify, .led_brightness_set = aqr_phy_led_brightness_set, .led_hw_is_supported = aqr_phy_led_hw_is_supported, @@ -865,7 +876,7 @@ static struct phy_driver aqr_driver[] = { .name = "Aquantia AQR111", .probe = aqr107_probe, .get_rate_matching = aqr107_get_rate_matching, - .config_init = aqr111_config_init, + .config_init = aqr107_config_init, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .handle_interrupt = aqr_handle_interrupt, @@ -877,6 +888,7 @@ static struct phy_driver aqr_driver[] = { .get_sset_count = aqr107_get_sset_count, .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, + .get_features = aqr111_get_features, .link_change_notify = aqr107_link_change_notify, .led_brightness_set = aqr_phy_led_brightness_set, .led_hw_is_supported = aqr_phy_led_hw_is_supported, @@ -889,7 +901,7 @@ static struct phy_driver aqr_driver[] = { .name = "Aquantia AQR111B0", .probe = aqr107_probe, .get_rate_matching = aqr107_get_rate_matching, - .config_init = aqr111_config_init, + .config_init = aqr107_config_init, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .handle_interrupt = aqr_handle_interrupt, @@ -901,6 +913,7 @@ static struct phy_driver aqr_driver[] = { .get_sset_count = aqr107_get_sset_count, .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, + .get_features = aqr111_get_features, .link_change_notify = aqr107_link_change_notify, .led_brightness_set = aqr_phy_led_brightness_set, .led_hw_is_supported = aqr_phy_led_hw_is_supported, @@ -1010,7 +1023,7 @@ static struct phy_driver aqr_driver[] = { .name = "Aquantia AQR114C", .probe = aqr107_probe, .get_rate_matching = aqr107_get_rate_matching, - .config_init = aqr111_config_init, + .config_init = aqr107_config_init, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .handle_interrupt = aqr_handle_interrupt, @@ -1022,6 +1035,7 @@ static struct phy_driver aqr_driver[] = { .get_sset_count = aqr107_get_sset_count, .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, + .get_features = aqr111_get_features, .link_change_notify = aqr107_link_change_notify, .led_brightness_set = aqr_phy_led_brightness_set, .led_hw_is_supported = aqr_phy_led_hw_is_supported, @@ -1046,6 +1060,7 @@ static struct phy_driver aqr_driver[] = { .get_sset_count = aqr107_get_sset_count, .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, + .get_features = aqr115c_get_features, .link_change_notify = aqr107_link_change_notify, .led_brightness_set = aqr_phy_led_brightness_set, .led_hw_is_supported = aqr_phy_led_hw_is_supported, diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c index f1d47c264058..97da3aee4942 100644 --- a/drivers/net/phy/bcm84881.c +++ b/drivers/net/phy/bcm84881.c @@ -132,7 +132,7 @@ static int bcm84881_aneg_done(struct phy_device *phydev) bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR); if (bmsr < 0) - return val; + return bmsr; return !!(val & MDIO_AN_STAT1_COMPLETE) && !!(bmsr & BMSR_ANEGCOMPLETE); @@ -158,7 +158,7 @@ static int bcm84881_read_status(struct phy_device *phydev) bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR); if (bmsr < 0) - return val; + return bmsr; phydev->autoneg_complete = !!(val & MDIO_AN_STAT1_COMPLETE) && !!(bmsr & BMSR_ANEGCOMPLETE); diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index fc247f479257..3ab64e04a01c 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -45,8 +45,8 @@ /* Control Register 2 bits */ #define DP83822_FX_ENABLE BIT(14) -#define DP83822_HW_RESET BIT(15) -#define DP83822_SW_RESET BIT(14) +#define DP83822_SW_RESET BIT(15) +#define DP83822_DIG_RESTART BIT(14) /* PHY STS bits */ #define DP83822_PHYSTS_DUPLEX BIT(2) diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 937061acfc61..351411f0aa6f 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -147,6 +147,8 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); /* IRQ related */ \ .config_intr = dp83848_config_intr, \ .handle_interrupt = dp83848_handle_interrupt, \ + \ + .flags = PHY_RST_AFTER_CLK_EN, \ } static struct phy_driver dp83848_driver[] = { diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index d7aaefb5226b..5f056d7db83e 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -645,7 +645,6 @@ static int dp83869_configure_fiber(struct phy_device *phydev, phydev->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); - linkmode_set_bit(ADVERTISED_FIBRE, phydev->advertising); if (dp83869->mode == DP83869_RGMII_1000_BASE) { linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 560e338b307a..499797646580 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3326,10 +3326,11 @@ static __maybe_unused int phy_led_hw_is_supported(struct led_classdev *led_cdev, static void phy_leds_unregister(struct phy_device *phydev) { - struct phy_led *phyled; + struct phy_led *phyled, *tmp; - list_for_each_entry(phyled, &phydev->leds, list) { + list_for_each_entry_safe(phyled, tmp, &phydev->leds, list) { led_classdev_unregister(&phyled->led_cdev); + list_del(&phyled->list); } } diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index c15d2f66ef0d..166f6a728373 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -1081,6 +1081,16 @@ static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev) return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true); } +static int rtl8251b_c22_match_phy_device(struct phy_device *phydev) +{ + return rtlgen_is_c45_match(phydev, RTL_8251B, false); +} + +static int rtl8251b_c45_match_phy_device(struct phy_device *phydev) +{ + return rtlgen_is_c45_match(phydev, RTL_8251B, true); +} + static int rtlgen_resume(struct phy_device *phydev) { int ret = genphy_resume(phydev); @@ -1418,7 +1428,7 @@ static struct phy_driver realtek_drvs[] = { .suspend = genphy_c45_pma_suspend, .resume = rtlgen_c45_resume, }, { - PHY_ID_MATCH_EXACT(0x001cc862), + .match_phy_device = rtl8251b_c45_match_phy_device, .name = "RTL8251B 5Gbps PHY", .get_features = rtl822x_get_features, .config_aneg = rtl822x_config_aneg, @@ -1427,6 +1437,18 @@ static struct phy_driver realtek_drvs[] = { .resume = rtlgen_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + }, { + .match_phy_device = rtl8251b_c22_match_phy_device, + .name = "RTL8126A-internal 5Gbps PHY", + .get_features = rtl822x_get_features, + .config_aneg = rtl822x_config_aneg, + .read_status = rtl822x_read_status, + .suspend = genphy_suspend, + .resume = rtlgen_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, + .read_mmd = rtl822x_read_mmd, + .write_mmd = rtl822x_write_mmd, }, { PHY_ID_MATCH_EXACT(0x001ccad0), .name = "RTL8224 2.5Gbps PHY", diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index e39bfaefe8c5..d81163bc910a 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -815,7 +815,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, return HS_TIMEOUT; } } - break; + fallthrough; case PLIP_PK_LENGTH_LSB: if (plip_send(nibble_timeout, dev, diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index a940b9a67107..c97406c6004d 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -542,7 +542,7 @@ ppp_async_encode(struct asyncppp *ap) * and 7 (code-reject) must be sent as though no options * had been negotiated. */ - islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7; + islcp = proto == PPP_LCP && count >= 3 && 1 <= data[2] && data[2] <= 7; if (i == 0) { if (islcp) diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index 4f032b16a8a0..2906ce173f66 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -113,7 +113,7 @@ static void pse_release_pis(struct pse_controller_dev *pcdev) { int i; - for (i = 0; i <= pcdev->nr_lines; i++) { + for (i = 0; i < pcdev->nr_lines; i++) { of_node_put(pcdev->pi[i].pairset[0].np); of_node_put(pcdev->pi[i].pairset[1].np); of_node_put(pcdev->pi[i].np); @@ -647,7 +647,7 @@ static int of_pse_match_pi(struct pse_controller_dev *pcdev, { int i; - for (i = 0; i <= pcdev->nr_lines; i++) { + for (i = 0; i < pcdev->nr_lines; i++) { if (pcdev->pi[i].np == np) return i; } @@ -785,6 +785,17 @@ static int pse_ethtool_c33_set_config(struct pse_control *psec, */ switch (config->c33_admin_control) { case ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED: + /* We could have mismatch between admin_state_enabled and + * state reported by regulator_is_enabled. This can occur when + * the PI is forcibly turn off by the controller. Call + * regulator_disable on that case to fix the counters state. + */ + if (psec->pcdev->pi[psec->id].admin_state_enabled && + !regulator_is_enabled(psec->ps)) { + err = regulator_disable(psec->ps); + if (err) + break; + } if (!psec->pcdev->pi[psec->id].admin_state_enabled) err = regulator_enable(psec->ps); break; diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index 252cd757d3a2..ee9fd3a94b96 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -643,46 +643,57 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) int slhc_remember(struct slcompress *comp, unsigned char *icp, int isize) { - struct cstate *cs; - unsigned ihl; - + const struct tcphdr *th; unsigned char index; + struct iphdr *iph; + struct cstate *cs; + unsigned int ihl; - if(isize < 20) { - /* The packet is shorter than a legal IP header */ + /* The packet is shorter than a legal IP header. + * Also make sure isize is positive. + */ + if (isize < (int)sizeof(struct iphdr)) { +runt: comp->sls_i_runt++; - return slhc_toss( comp ); + return slhc_toss(comp); } + iph = (struct iphdr *)icp; /* Peek at the IP header's IHL field to find its length */ - ihl = icp[0] & 0xf; - if(ihl < 20 / 4){ - /* The IP header length field is too small */ - comp->sls_i_runt++; - return slhc_toss( comp ); - } - index = icp[9]; - icp[9] = IPPROTO_TCP; + ihl = iph->ihl; + /* The IP header length field is too small, + * or packet is shorter than the IP header followed + * by minimal tcp header. + */ + if (ihl < 5 || isize < ihl * 4 + sizeof(struct tcphdr)) + goto runt; + + index = iph->protocol; + iph->protocol = IPPROTO_TCP; if (ip_fast_csum(icp, ihl)) { /* Bad IP header checksum; discard */ comp->sls_i_badcheck++; - return slhc_toss( comp ); + return slhc_toss(comp); } - if(index > comp->rslot_limit) { + if (index > comp->rslot_limit) { comp->sls_i_error++; return slhc_toss(comp); } - + th = (struct tcphdr *)(icp + ihl * 4); + if (th->doff < sizeof(struct tcphdr) / 4) + goto runt; + if (isize < ihl * 4 + th->doff * 4) + goto runt; /* Update local state */ cs = &comp->rstate[comp->recv_current = index]; comp->flags &=~ SLF_TOSS; - memcpy(&cs->cs_ip,icp,20); - memcpy(&cs->cs_tcp,icp + ihl*4,20); + memcpy(&cs->cs_ip, iph, sizeof(*iph)); + memcpy(&cs->cs_tcp, th, sizeof(*th)); if (ihl > 5) - memcpy(cs->cs_ipopt, icp + sizeof(struct iphdr), (ihl - 5) * 4); - if (cs->cs_tcp.doff > 5) - memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4); - cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2; + memcpy(cs->cs_ipopt, &iph[1], (ihl - 5) * 4); + if (th->doff > 5) + memcpy(cs->cs_tcpopt, &th[1], (th->doff - 5) * 4); + cs->cs_hsize = ihl*2 + th->doff*2; cs->initialized = true; /* Put headers back on packet * Neither header checksum is recalculated diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 4823dbdf5465..0c011d8f5d4d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1076,6 +1076,7 @@ static const struct usb_device_id products[] = { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, }, + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0122)}, /* Quectel RG650V */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ @@ -1426,6 +1427,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ + {QMI_QUIRK_SET_DTR(0x2cb7, 0x0112, 0)}, /* Fibocom FG132 */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index a5612c799f5e..468c73974046 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -10069,6 +10069,7 @@ static const struct usb_device_id rtl8152_table[] = { { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3098) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) }, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 2506aa8c603e..44179f4e807f 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1767,7 +1767,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) // can rename the link if it knows better. if ((dev->driver_info->flags & FLAG_ETHER) != 0 && ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || - (net->dev_addr [0] & 0x02) == 0)) + /* somebody touched it*/ + !is_zero_ether_addr(net->dev_addr))) strscpy(net->name, "eth%d", sizeof(net->name)); /* WLAN devices should always be named "wlan%d" */ if ((dev->driver_info->flags & FLAG_WLAN) != 0) @@ -1870,6 +1871,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) * may trigger an error resubmitting itself and, worse, * schedule a timer. So we kill it all just in case. */ + usbnet_mark_going_away(dev); cancel_work_sync(&dev->kevent); del_timer_sync(&dev->delay); free_netdev(net); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f8131f92a392..53a038fcbe99 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -368,15 +368,16 @@ struct receive_queue { * because table sizes may be differ according to the device configuration. */ #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 -#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128 struct virtio_net_ctrl_rss { u32 hash_types; u16 indirection_table_mask; u16 unclassified_queue; - u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN]; + u16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */ u16 max_tx_vq; u8 hash_key_length; u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; + + u16 *indirection_table; }; /* Control VQ buffers: protected by the rtnl lock */ @@ -512,6 +513,25 @@ static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb, struct page *page, void *buf, int len, int truesize); +static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size) +{ + if (!indir_table_size) { + rss->indirection_table = NULL; + return 0; + } + + rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL); + if (!rss->indirection_table) + return -ENOMEM; + + return 0; +} + +static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss) +{ + kfree(rss->indirection_table); +} + static bool is_xdp_frame(void *ptr) { return (unsigned long)ptr & VIRTIO_XDP_FLAG; @@ -3374,15 +3394,59 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); } +static bool virtnet_commit_rss_command(struct virtnet_info *vi); + +static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs) +{ + u32 indir_val = 0; + int i = 0; + + for (; i < vi->rss_indir_table_size; ++i) { + indir_val = ethtool_rxfh_indir_default(i, queue_pairs); + vi->rss.indirection_table[i] = indir_val; + } + vi->rss.max_tx_vq = queue_pairs; +} + static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct virtio_net_ctrl_mq *mq __free(kfree) = NULL; - struct scatterlist sg; + struct virtio_net_ctrl_rss old_rss; struct net_device *dev = vi->dev; + struct scatterlist sg; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; + /* Firstly check if we need update rss. Do updating if both (1) rss enabled and + * (2) no user configuration. + * + * During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is, + * the device updates queue_pairs together with rss, so we can skip the sperate queue_pairs + * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly. + */ + if (vi->has_rss && !netif_is_rxfh_configured(dev)) { + memcpy(&old_rss, &vi->rss, sizeof(old_rss)); + if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) { + vi->rss.indirection_table = old_rss.indirection_table; + return -ENOMEM; + } + + virtnet_rss_update_by_qpairs(vi, queue_pairs); + + if (!virtnet_commit_rss_command(vi)) { + /* restore ctrl_rss if commit_rss_command failed */ + rss_indirection_table_free(&vi->rss); + memcpy(&vi->rss, &old_rss, sizeof(old_rss)); + + dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n", + queue_pairs); + return -EINVAL; + } + rss_indirection_table_free(&old_rss); + goto succ; + } + mq = kzalloc(sizeof(*mq), GFP_KERNEL); if (!mq) return -ENOMEM; @@ -3395,12 +3459,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", queue_pairs); return -EINVAL; - } else { - vi->curr_queue_pairs = queue_pairs; - /* virtnet_open() will refill when device is going to up. */ - if (dev->flags & IFF_UP) - schedule_delayed_work(&vi->refill, 0); } +succ: + vi->curr_queue_pairs = queue_pairs; + /* virtnet_open() will refill when device is going to up. */ + if (dev->flags & IFF_UP) + schedule_delayed_work(&vi->refill, 0); return 0; } @@ -3828,11 +3892,15 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi) /* prepare sgs */ sg_init_table(sgs, 4); - sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); + sg_buf_size = offsetof(struct virtio_net_ctrl_rss, hash_cfg_reserved); sg_set_buf(&sgs[0], &vi->rss, sg_buf_size); - sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1); - sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size); + if (vi->has_rss) { + sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size; + sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size); + } else { + sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t)); + } sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); @@ -3856,21 +3924,14 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi) static void virtnet_init_default_rss(struct virtnet_info *vi) { - u32 indir_val = 0; - int i = 0; - vi->rss.hash_types = vi->rss_hash_types_supported; vi->rss_hash_types_saved = vi->rss_hash_types_supported; vi->rss.indirection_table_mask = vi->rss_indir_table_size ? vi->rss_indir_table_size - 1 : 0; vi->rss.unclassified_queue = 0; - for (; i < vi->rss_indir_table_size; ++i) { - indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); - vi->rss.indirection_table[i] = indir_val; - } + virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); - vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; vi->rss.hash_key_length = vi->rss_key_size; netdev_rss_key_fill(vi->rss.key, vi->rss_key_size); @@ -4155,7 +4216,7 @@ struct virtnet_stats_ctx { u32 desc_num[3]; /* The actual supported stat types. */ - u32 bitmap[3]; + u64 bitmap[3]; /* Used to calculate the reply buffer size. */ u32 size[3]; @@ -6420,10 +6481,19 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_cread16(vdev, offsetof(struct virtio_net_config, rss_max_indirection_table_length)); } + err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size); + if (err) + goto free; if (vi->has_rss || vi->has_rss_hash_report) { vi->rss_key_size = virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size)); + if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) { + dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n", + vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE); + err = -EINVAL; + goto free; + } vi->rss_hash_types_supported = virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types)); @@ -6551,6 +6621,15 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); + if (vi->has_rss || vi->has_rss_hash_report) { + if (!virtnet_commit_rss_command(vi)) { + dev_warn(&vdev->dev, "RSS disabled because committing failed.\n"); + dev->hw_features &= ~NETIF_F_RXHASH; + vi->has_rss_hash_report = false; + vi->has_rss = false; + } + } + virtnet_set_queues(vi, vi->curr_queue_pairs); /* a random MAC address has been assigned, notify the device. @@ -6674,6 +6753,8 @@ static void virtnet_remove(struct virtio_device *vdev) remove_vq_common(vi); + rss_indirection_table_free(&vi->rss); + free_netdev(vi->dev); } diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c index a6c787454a1a..1341374a4588 100644 --- a/drivers/net/vmxnet3/vmxnet3_xdp.c +++ b/drivers/net/vmxnet3/vmxnet3_xdp.c @@ -148,7 +148,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter, } else { /* XDP buffer from page pool */ page = virt_to_page(xdpf->data); tbi->dma_addr = page_pool_get_dma_addr(page) + - VMXNET3_XDP_HEADROOM; + (xdpf->data - (void *)xdpf); dma_sync_single_for_device(&adapter->pdev->dev, tbi->dma_addr, buf_size, DMA_TO_DEVICE); diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 53dcb9fffc04..6e9a3795846a 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -4913,9 +4913,13 @@ static int __init vxlan_init_module(void) if (rc) goto out4; - vxlan_vnifilter_init(); + rc = vxlan_vnifilter_init(); + if (rc) + goto out5; return 0; +out5: + rtnl_link_unregister(&vxlan_link_ops); out4: unregister_switchdev_notifier(&vxlan_switchdev_notifier_block); out3: diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h index b35d96b78843..76a351a997d5 100644 --- a/drivers/net/vxlan/vxlan_private.h +++ b/drivers/net/vxlan/vxlan_private.h @@ -202,7 +202,7 @@ int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan, int vxlan_vnigroup_init(struct vxlan_dev *vxlan); void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan); -void vxlan_vnifilter_init(void); +int vxlan_vnifilter_init(void); void vxlan_vnifilter_uninit(void); void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni, struct vxlan_vni_node *vninode, diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c index 9c59d0bf8c3d..d2023e7131bd 100644 --- a/drivers/net/vxlan/vxlan_vnifilter.c +++ b/drivers/net/vxlan/vxlan_vnifilter.c @@ -992,19 +992,18 @@ static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } -void vxlan_vnifilter_init(void) +static const struct rtnl_msg_handler vxlan_vnifilter_rtnl_msg_handlers[] = { + {THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL, vxlan_vnifilter_dump, 0}, + {THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL, vxlan_vnifilter_process, NULL, 0}, + {THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL, vxlan_vnifilter_process, NULL, 0}, +}; + +int vxlan_vnifilter_init(void) { - rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL, - vxlan_vnifilter_dump, 0); - rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL, - vxlan_vnifilter_process, NULL, 0); - rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL, - vxlan_vnifilter_process, NULL, 0); + return rtnl_register_many(vxlan_vnifilter_rtnl_msg_handlers); } void vxlan_vnifilter_uninit(void) { - rtnl_unregister(PF_BRIDGE, RTM_GETTUNNEL); - rtnl_unregister(PF_BRIDGE, RTM_NEWTUNNEL); - rtnl_unregister(PF_BRIDGE, RTM_DELTUNNEL); + rtnl_unregister_many(vxlan_vnifilter_rtnl_msg_handlers); } diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index dbaf26d6a7a6..16d07d619b4d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3043,9 +3043,14 @@ ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu) { struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct ath10k_mgmt_tx_pkt_addr *pkt_addr; struct ath10k_wmi *wmi = &ar->wmi; - idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); + spin_lock_bh(&ar->data_lock); + pkt_addr = idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); + spin_unlock_bh(&ar->data_lock); + + kfree(pkt_addr); return 0; } diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 4861179b2217..5e061f7525a6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2441,6 +2441,7 @@ wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param) dma_unmap_single(ar->dev, pkt_addr->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); + kfree(pkt_addr); if (param->status) { info->flags &= ~IEEE80211_TX_STAT_ACK; @@ -9612,6 +9613,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr, dma_unmap_single(ar->dev, pkt_addr->paddr, msdu->len, DMA_TO_DEVICE); ieee80211_free_txskb(ar->hw, msdu); + kfree(pkt_addr); return 0; } diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index c087d8a0f5b2..40088e62572e 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -5291,8 +5291,11 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, hal_status == HAL_TLV_STATUS_PPDU_DONE) { rx_mon_stats->status_ppdu_done++; pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; - ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi); - pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + if (!ab->hw_params.full_monitor_mode) { + ath11k_dp_rx_mon_dest_process(ar, mac_id, + budget, napi); + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + } } if (ppdu_info->peer_id == HAL_INVALID_PEERID || diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index f29ac6de7139..19702b6f09c3 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -306,7 +306,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, struct sk_buff *skb) { struct wil6210_rtap { - struct ieee80211_radiotap_header rthdr; + struct ieee80211_radiotap_header_fixed rthdr; /* fields should be in the order of bits in rthdr.it_present */ /* flags */ u8 flags; diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig index 3a1a35b5672f..19d0c003f626 100644 --- a/drivers/net/wireless/broadcom/brcm80211/Kconfig +++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig @@ -27,6 +27,7 @@ source "drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig" config BRCM_TRACING bool "Broadcom device tracing" depends on BRCMSMAC || BRCMFMAC + depends on TRACING help If you say Y here, the Broadcom wireless drivers will register with ftrace to dump event information into the trace ringbuffer. diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index b6636002c7d2..fe75941c584d 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -2518,7 +2518,7 @@ static void isr_rx_monitor(struct ipw2100_priv *priv, int i, * to build this manually element by element, we can write it much * more efficiently than we can parse it. ORDER MATTERS HERE */ struct ipw_rt_hdr { - struct ieee80211_radiotap_header rt_hdr; + struct ieee80211_radiotap_header_fixed rt_hdr; s8 rt_dbmsignal; /* signal in dbM, kluged to signed */ } *ipw_rt; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h index 8ebf09121e17..226286cb7eb8 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h @@ -1143,7 +1143,7 @@ struct ipw_prom_priv { * structure is provided regardless of any bits unset. */ struct ipw_rt_hdr { - struct ieee80211_radiotap_header rt_hdr; + struct ieee80211_radiotap_header_fixed rt_hdr; u64 rt_tsf; /* TSF */ /* XXX */ u8 rt_flags; /* radiotap packet flags */ u8 rt_rate; /* rate in 500kb/s */ diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 9d33a66a49b5..958dd4f9bc69 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -3122,6 +3122,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) struct il_cmd_meta *out_meta; dma_addr_t phys_addr; unsigned long flags; + u8 *out_payload; u32 idx; u16 fix_size; @@ -3157,6 +3158,16 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; + /* The payload is in the same place in regular and huge + * command buffers, but we need to let the compiler know when + * we're using a larger payload buffer to avoid "field- + * spanning write" warnings at run-time for huge commands. + */ + if (cmd->flags & CMD_SIZE_HUGE) + out_payload = ((struct il_device_cmd_huge *)out_cmd)->cmd.payload; + else + out_payload = out_cmd->cmd.payload; + if (WARN_ON(out_meta->flags & CMD_MAPPED)) { spin_unlock_irqrestore(&il->hcmd_lock, flags); return -ENOSPC; @@ -3170,7 +3181,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) out_meta->callback = cmd->callback; out_cmd->hdr.cmd = cmd->id; - memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + memcpy(out_payload, cmd->data, cmd->len); /* At this point, the out_cmd now has all of the incoming cmd * information */ @@ -4962,6 +4973,8 @@ il_pci_resume(struct device *device) */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + _il_wr(il, CSR_INT, 0xffffffff); + _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff); il_enable_interrupts(il); if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index 2147781b5fff..725c2a88ddb7 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -560,6 +560,18 @@ struct il_device_cmd { #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd)) +/** + * struct il_device_cmd_huge + * + * For use when sending huge commands. + */ +struct il_device_cmd_huge { + struct il_cmd_header hdr; /* uCode API */ + union { + u8 payload[IL_MAX_CMD_SIZE - sizeof(struct il_cmd_header)]; + } __packed cmd; +} __packed; + struct il_host_cmd { const void *data; unsigned long reply_page; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index a7cea0a55b35..0bc32291815e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -429,38 +429,28 @@ int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk) return ret; } -static int iwl_acpi_sar_set_profile(union acpi_object *table, - struct iwl_sar_profile *profile, - bool enabled, u8 num_chains, - u8 num_sub_bands) +static int +iwl_acpi_parse_chains_table(union acpi_object *table, + struct iwl_sar_profile_chain *chains, + u8 num_chains, u8 num_sub_bands) { - int i, j, idx = 0; - - /* - * The table from ACPI is flat, but we store it in a - * structured array. - */ - for (i = 0; i < BIOS_SAR_MAX_CHAINS_PER_PROFILE; i++) { - for (j = 0; j < BIOS_SAR_MAX_SUB_BANDS_NUM; j++) { + for (u8 chain = 0; chain < num_chains; chain++) { + for (u8 subband = 0; subband < BIOS_SAR_MAX_SUB_BANDS_NUM; + subband++) { /* if we don't have the values, use the default */ - if (i >= num_chains || j >= num_sub_bands) { - profile->chains[i].subbands[j] = 0; + if (subband >= num_sub_bands) { + chains[chain].subbands[subband] = 0; + } else if (table->type != ACPI_TYPE_INTEGER || + table->integer.value > U8_MAX) { + return -EINVAL; } else { - if (table[idx].type != ACPI_TYPE_INTEGER || - table[idx].integer.value > U8_MAX) - return -EINVAL; - - profile->chains[i].subbands[j] = - table[idx].integer.value; - - idx++; + chains[chain].subbands[subband] = + table->integer.value; + table++; } } } - /* Only if all values were valid can the profile be enabled */ - profile->enabled = enabled; - return 0; } @@ -543,9 +533,11 @@ int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt) /* The profile from WRDS is officially profile 1, but goes * into sar_profiles[0] (because we don't have a profile 0). */ - ret = iwl_acpi_sar_set_profile(table, &fwrt->sar_profiles[0], - flags & IWL_SAR_ENABLE_MSK, - num_chains, num_sub_bands); + ret = iwl_acpi_parse_chains_table(table, fwrt->sar_profiles[0].chains, + num_chains, num_sub_bands); + if (!ret && flags & IWL_SAR_ENABLE_MSK) + fwrt->sar_profiles[0].enabled = true; + out_free: kfree(data); return ret; @@ -557,7 +549,7 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt) bool enabled; int i, n_profiles, tbl_rev, pos; int ret = 0; - u8 num_chains, num_sub_bands; + u8 num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) @@ -573,7 +565,6 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt) goto out_free; } - num_chains = ACPI_SAR_NUM_CHAINS_REV2; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; goto read_table; @@ -589,7 +580,6 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt) goto out_free; } - num_chains = ACPI_SAR_NUM_CHAINS_REV1; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; goto read_table; @@ -605,7 +595,6 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt) goto out_free; } - num_chains = ACPI_SAR_NUM_CHAINS_REV0; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; goto read_table; @@ -637,23 +626,54 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt) /* the tables start at element 3 */ pos = 3; + BUILD_BUG_ON(ACPI_SAR_NUM_CHAINS_REV0 != ACPI_SAR_NUM_CHAINS_REV1); + BUILD_BUG_ON(ACPI_SAR_NUM_CHAINS_REV2 != 2 * ACPI_SAR_NUM_CHAINS_REV0); + + /* parse non-cdb chains for all profiles */ for (i = 0; i < n_profiles; i++) { union acpi_object *table = &wifi_pkg->package.elements[pos]; + /* The EWRD profiles officially go from 2 to 4, but we * save them in sar_profiles[1-3] (because we don't * have profile 0). So in the array we start from 1. */ - ret = iwl_acpi_sar_set_profile(table, - &fwrt->sar_profiles[i + 1], - enabled, num_chains, - num_sub_bands); + ret = iwl_acpi_parse_chains_table(table, + fwrt->sar_profiles[i + 1].chains, + ACPI_SAR_NUM_CHAINS_REV0, + num_sub_bands); if (ret < 0) - break; + goto out_free; /* go to the next table */ - pos += num_chains * num_sub_bands; + pos += ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands; } + /* non-cdb table revisions */ + if (tbl_rev < 2) + goto set_enabled; + + /* parse cdb chains for all profiles */ + for (i = 0; i < n_profiles; i++) { + struct iwl_sar_profile_chain *chains; + union acpi_object *table; + + table = &wifi_pkg->package.elements[pos]; + chains = &fwrt->sar_profiles[i + 1].chains[ACPI_SAR_NUM_CHAINS_REV0]; + ret = iwl_acpi_parse_chains_table(table, + chains, + ACPI_SAR_NUM_CHAINS_REV0, + num_sub_bands); + if (ret < 0) + goto out_free; + + /* go to the next table */ + pos += ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands; + } + +set_enabled: + for (i = 0; i < n_profiles; i++) + fwrt->sar_profiles[i + 1].enabled = enabled; + out_free: kfree(data); return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index d8b083be5b6b..de87e0e3e072 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -39,10 +39,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); +/* Assumes the appropriate lock is held by the caller */ void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt) { iwl_fw_suspend_timestamp(fwrt); - iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, NULL); + iwl_dbg_tlv_time_point_sync(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, + NULL); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 2abfc986701f..c620911a1193 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1413,26 +1413,36 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) const struct iwl_op_mode_ops *ops = op->ops; struct dentry *dbgfs_dir = NULL; struct iwl_op_mode *op_mode = NULL; + int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY; /* also protects start/stop from racing against each other */ lockdep_assert_held(&iwlwifi_opmode_table_mtx); -#ifdef CONFIG_IWLWIFI_DEBUGFS - drv->dbgfs_op_mode = debugfs_create_dir(op->name, - drv->dbgfs_drv); - dbgfs_dir = drv->dbgfs_op_mode; -#endif - - op_mode = ops->start(drv->trans, drv->trans->cfg, - &drv->fw, dbgfs_dir); - if (op_mode) - return op_mode; + for (retry = 0; retry <= max_retry; retry++) { #ifdef CONFIG_IWLWIFI_DEBUGFS - debugfs_remove_recursive(drv->dbgfs_op_mode); - drv->dbgfs_op_mode = NULL; + drv->dbgfs_op_mode = debugfs_create_dir(op->name, + drv->dbgfs_drv); + dbgfs_dir = drv->dbgfs_op_mode; #endif + op_mode = ops->start(drv->trans, drv->trans->cfg, + &drv->fw, dbgfs_dir); + + if (op_mode) + return op_mode; + + if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status)) + break; + + IWL_ERR(drv, "retry init count %d\n", retry); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + debugfs_remove_recursive(drv->dbgfs_op_mode); + drv->dbgfs_op_mode = NULL; +#endif + } + return NULL; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 1549ff429549..6a1d31892417 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -98,6 +98,9 @@ void iwl_drv_stop(struct iwl_drv *drv); #define VISIBLE_IF_IWLWIFI_KUNIT static #endif +/* max retry for init flow */ +#define IWL_MAX_INIT_RETRY 2 + #define FW_NAME_PRE_BUFSIZE 64 struct iwl_trans; const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 49a6aff42376..244ca8cab9d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1398,7 +1398,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) iwl_mvm_pause_tcm(mvm, true); + mutex_lock(&mvm->mutex); iwl_fw_runtime_suspend(&mvm->fwrt); + mutex_unlock(&mvm->mutex); return __iwl_mvm_suspend(hw, wowlan, false); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 08546e673cf5..f30b0fc8eca9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1307,8 +1307,8 @@ static void iwl_mvm_disconnect_iterator(void *data, u8 *mac, void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) { u32 error_log_size = mvm->fw->ucode_capa.error_log_size; + u32 status = 0; int ret; - u32 resp; struct iwl_fw_error_recovery_cmd recovery_cmd = { .flags = cpu_to_le32(flags), @@ -1316,7 +1316,6 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) }; struct iwl_host_cmd host_cmd = { .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), - .flags = CMD_WANT_SKB, .data = {&recovery_cmd, }, .len = {sizeof(recovery_cmd), }, }; @@ -1336,7 +1335,7 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) recovery_cmd.buf_size = cpu_to_le32(error_log_size); } - ret = iwl_mvm_send_cmd(mvm, &host_cmd); + ret = iwl_mvm_send_cmd_status(mvm, &host_cmd, &status); kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; @@ -1347,11 +1346,10 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */ if (flags & ERROR_RECOVERY_UPDATE_DB) { - resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data); - if (resp) { + if (status) { IWL_ERR(mvm, "Failed to send recovery cmd blob was invalid %d\n", - resp); + status); ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_disconnect_iterator, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a327893c6dce..80b9a115245f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1293,12 +1293,14 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; + int retry, max_retry = 0; mutex_lock(&mvm->mutex); /* we are starting the mac not in error flow, and restart is enabled */ if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && iwlwifi_mod_params.fw_restart) { + max_retry = IWL_MAX_INIT_RETRY; /* * This will prevent mac80211 recovery flows to trigger during * init failures @@ -1306,7 +1308,13 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw) set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); } - ret = __iwl_mvm_mac_start(mvm); + for (retry = 0; retry <= max_retry; retry++) { + ret = __iwl_mvm_mac_start(mvm); + if (!ret) + break; + + IWL_ERR(mvm, "mac start retry %d\n", retry); + } clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); mutex_unlock(&mvm->mutex); @@ -1970,7 +1978,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->p2p_device_vif = NULL; } - iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf); iwl_mvm_mac_ctxt_remove(mvm, vif); RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); @@ -1979,6 +1986,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->monitor_on = false; out: + iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf); if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.mcast_sta); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index f2378e0fb2fb..e252f0dcea20 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -41,8 +41,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, /* reset deflink MLO parameters */ mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; mvmvif->deflink.active = 0; - /* the first link always points to the default one */ - mvmvif->link[0] = &mvmvif->deflink; ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif); if (ret) @@ -60,9 +58,19 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI; } - ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); - if (ret) - goto out_free_bf; + /* We want link[0] to point to the default link, unless we have MLO and + * in this case this will be modified later by .change_vif_links() + * If we are in the restart flow with an MLD connection, we will wait + * to .change_vif_links() to setup the links. + */ + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + !ieee80211_vif_is_mld(vif)) { + mvmvif->link[0] = &mvmvif->deflink; + + ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (ret) + goto out_free_bf; + } /* Save a pointer to p2p device vif, so it can later be used to * update the p2p device MAC when a GO is started/stopped @@ -350,11 +358,6 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, rcu_read_unlock(); } - if (vif->type == NL80211_IFTYPE_STATION) - iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, - link_conf, - false); - /* then activate */ ret = iwl_mvm_link_changed(mvm, vif, link_conf, LINK_CONTEXT_MODIFY_ACTIVE | @@ -363,6 +366,11 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, if (ret) goto out; + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, + link_conf, + false); + /* * Power state must be updated before quotas, * otherwise fw will complain. @@ -1194,7 +1202,11 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - if (old_links == 0) { + /* If we're in RESTART flow, the default link wasn't added in + * drv_add_interface(), and link[0] doesn't point to it. + */ + if (old_links == 0 && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, + &mvm->status)) { err = iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); if (err) goto out_err; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 3ce9150213a7..ddcbd80a49fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1774,7 +1774,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, &cp->channel_config[ch_cnt]; u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0; - u8 j, k, n_s_ssids = 0, n_bssids = 0; + u8 k, n_s_ssids = 0, n_bssids = 0; u8 max_s_ssids, max_bssids; bool force_passive = false, found = false, allow_passive = true, unsolicited_probe_on_chan = false, psc_no_listen = false; @@ -1799,7 +1799,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, cfg->v5.iter_count = 1; cfg->v5.iter_interval = 0; - for (j = 0; j < params->n_6ghz_params; j++) { + for (u32 j = 0; j < params->n_6ghz_params; j++) { s8 tmp_psd_20; if (!(scan_6ghz_params[j].channel_idx == i)) @@ -1873,7 +1873,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, * SSID. * TODO: improve this logic */ - for (j = 0; j < params->n_6ghz_params; j++) { + for (u32 j = 0; j < params->n_6ghz_params; j++) { if (!(scan_6ghz_params[j].channel_idx == i)) continue; diff --git a/drivers/net/wireless/marvell/libertas/radiotap.h b/drivers/net/wireless/marvell/libertas/radiotap.h index 1ed5608d353f..d543bfe739dc 100644 --- a/drivers/net/wireless/marvell/libertas/radiotap.h +++ b/drivers/net/wireless/marvell/libertas/radiotap.h @@ -2,7 +2,7 @@ #include struct tx_radiotap_hdr { - struct ieee80211_radiotap_header hdr; + struct ieee80211_radiotap_header_fixed hdr; u8 rate; u8 txpower; u8 rts_retries; @@ -31,7 +31,7 @@ struct tx_radiotap_hdr { #define IEEE80211_FC_DSTODS 0x0300 struct rx_radiotap_hdr { - struct ieee80211_radiotap_header hdr; + struct ieee80211_radiotap_header_fixed hdr; u8 flags; u8 rate; u8 antsignal; diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c index 98da82b74094..3353012e8542 100644 --- a/drivers/net/wireless/mediatek/mt76/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mcu.c @@ -84,13 +84,16 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, mutex_lock(&dev->mcu.mutex); if (dev->mcu_ops->mcu_skb_prepare_msg) { + orig_skb = skb; ret = dev->mcu_ops->mcu_skb_prepare_msg(dev, skb, cmd, &seq); if (ret < 0) goto out; } retry: - orig_skb = skb_get(skb); + /* orig skb might be needed for retry, mcu_skb_send_msg consumes it */ + if (orig_skb) + skb_get(orig_skb); ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq); if (ret < 0) goto out; @@ -105,7 +108,7 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, do { skb = mt76_mcu_get_response(dev, expires); if (!skb && !test_bit(MT76_MCU_RESET, &dev->phy.state) && - retry++ < dev->mcu_ops->max_retry) { + orig_skb && retry++ < dev->mcu_ops->max_retry) { dev_err(dev->dev, "Retry message %08x (seq %d)\n", cmd, seq); skb = orig_skb; diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c index 03b7229a0ff5..c3d27aaec297 100644 --- a/drivers/net/wireless/microchip/wilc1000/mon.c +++ b/drivers/net/wireless/microchip/wilc1000/mon.c @@ -7,12 +7,12 @@ #include "cfg80211.h" struct wilc_wfi_radiotap_hdr { - struct ieee80211_radiotap_header hdr; + struct ieee80211_radiotap_header_fixed hdr; u8 rate; } __packed; struct wilc_wfi_radiotap_cb_hdr { - struct ieee80211_radiotap_header hdr; + struct ieee80211_radiotap_header_fixed hdr; u8 rate; u8 dump; u16 tx_flags; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c index d069a81ac617..cc699efa9c79 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c @@ -352,7 +352,6 @@ static const struct usb_device_id rtl8192d_usb_ids[] = { {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8194, rtl92du_hal_cfg)}, {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8111, rtl92du_hal_cfg)}, {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x0193, rtl92du_hal_cfg)}, - {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8171, rtl92du_hal_cfg)}, {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0xe194, rtl92du_hal_cfg)}, {RTL_USB_DEVICE(0x2019, 0xab2c, rtl92du_hal_cfg)}, {RTL_USB_DEVICE(0x2019, 0xab2d, rtl92du_hal_cfg)}, diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index e83ab6fb83f5..b17a429bcd29 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -771,7 +771,6 @@ static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable) u8 size, timeout; u16 val16; - rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC); rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN); rtw_write8_clr(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7)); diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index df51b29142aa..8d27374db83c 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -6445,6 +6445,8 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid) /* todo DBCC related event */ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] wl_info phy_now=%d\n", phy_now); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC] rlink cnt_2g=%d cnt_5g=%d\n", cnt_2g, cnt_5g); if (wl_rinfo->dbcc_en != rtwdev->dbcc_en) { wl_rinfo->dbcc_chg = 1; diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 02afeb3acce4..5aef7fa37878 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -3026,23 +3026,53 @@ static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, pci_disable_device(pdev); } -static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev) +static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev) { - struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; const struct rtw89_chip_info *chip = rtwdev->chip; - if (!rtwpci->enable_dac) - return; - switch (chip->chip_id) { case RTL8852A: case RTL8852B: case RTL8851B: case RTL8852BT: - break; + return true; default: - return; + return false; } +} + +static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev); + + if (!rtw89_pci_chip_is_manual_dac(rtwdev)) + return true; + + if (!bridge) + return false; + + switch (bridge->vendor) { + case PCI_VENDOR_ID_INTEL: + return true; + case PCI_VENDOR_ID_ASMEDIA: + if (bridge->device == 0x2806) + return true; + break; + } + + return false; +} + +static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + if (!rtwpci->enable_dac) + return; + + if (!rtw89_pci_chip_is_manual_dac(rtwdev)) + return; rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS); } @@ -3061,6 +3091,9 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, goto err; } + if (!rtw89_pci_is_dac_compatible_bridge(rtwdev)) + goto no_dac; + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); if (!ret) { rtwpci->enable_dac = true; @@ -3073,6 +3106,7 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, goto err_release_regions; } } +no_dac: resource_len = pci_resource_len(pdev, bar_id); rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index f0e528abb1b4..3f424f14de4e 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -763,7 +763,7 @@ static const struct rhashtable_params hwsim_rht_params = { }; struct hwsim_radiotap_hdr { - struct ieee80211_radiotap_header hdr; + struct ieee80211_radiotap_header_fixed hdr; __le64 rt_tsft; u8 rt_flags; u8 rt_rate; @@ -772,7 +772,7 @@ struct hwsim_radiotap_hdr { } __packed; struct hwsim_radiotap_ack_hdr { - struct ieee80211_radiotap_header hdr; + struct ieee80211_radiotap_header_fixed hdr; u8 rt_flags; u8 pad; __le16 rt_channel; diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index 210d84c67ef9..7a9c09cd4fdc 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -226,7 +226,7 @@ int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl, return 0; err_unmap_skbs: - while (--i > 0) + while (i--) t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); return ret; diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 17431f1b1a0c..65a7ed4d6766 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -1038,7 +1038,7 @@ static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = { static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = { .kind = "wwan", - .maxtype = __IFLA_WWAN_MAX, + .maxtype = IFLA_WWAN_MAX, .alloc = wwan_rtnl_alloc, .validate = wwan_rtnl_validate, .newlink = wwan_rtnl_newlink, diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 43d73d31c66f..855b42c92284 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -91,6 +91,17 @@ module_param(apst_secondary_latency_tol_us, ulong, 0644); MODULE_PARM_DESC(apst_secondary_latency_tol_us, "secondary APST latency tolerance in us"); +/* + * Older kernels didn't enable protection information if it was at an offset. + * Newer kernels do, so it breaks reads on the upgrade if such formats were + * used in prior kernels since the metadata written did not contain a valid + * checksum. + */ +static bool disable_pi_offsets = false; +module_param(disable_pi_offsets, bool, 0444); +MODULE_PARM_DESC(disable_pi_offsets, + "disable protection information if it has an offset"); + /* * nvme_wq - hosts nvme related works that are not reset or delete * nvme_reset_wq - hosts nvme reset works @@ -1292,14 +1303,12 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); } -static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, - blk_status_t status) +static void nvme_keep_alive_finish(struct request *rq, + blk_status_t status, struct nvme_ctrl *ctrl) { - struct nvme_ctrl *ctrl = rq->end_io_data; - unsigned long flags; - bool startka = false; unsigned long rtt = jiffies - (rq->deadline - rq->timeout); unsigned long delay = nvme_keep_alive_work_period(ctrl); + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); /* * Subtract off the keepalive RTT so nvme_keep_alive_work runs @@ -1313,25 +1322,17 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, delay = 0; } - blk_mq_free_request(rq); - if (status) { dev_err(ctrl->device, "failed nvme_keep_alive_end_io error=%d\n", status); - return RQ_END_IO_NONE; + return; } ctrl->ka_last_check_time = jiffies; ctrl->comp_seen = false; - spin_lock_irqsave(&ctrl->lock, flags); - if (ctrl->state == NVME_CTRL_LIVE || - ctrl->state == NVME_CTRL_CONNECTING) - startka = true; - spin_unlock_irqrestore(&ctrl->lock, flags); - if (startka) + if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); - return RQ_END_IO_NONE; } static void nvme_keep_alive_work(struct work_struct *work) @@ -1340,6 +1341,7 @@ static void nvme_keep_alive_work(struct work_struct *work) struct nvme_ctrl, ka_work); bool comp_seen = ctrl->comp_seen; struct request *rq; + blk_status_t status; ctrl->ka_last_check_time = jiffies; @@ -1362,9 +1364,9 @@ static void nvme_keep_alive_work(struct work_struct *work) nvme_init_request(rq, &ctrl->ka_cmd); rq->timeout = ctrl->kato * HZ; - rq->end_io = nvme_keep_alive_end_io; - rq->end_io_data = ctrl; - blk_execute_rq_nowait(rq, false); + status = blk_execute_rq(rq, false); + nvme_keep_alive_finish(rq, status, ctrl); + blk_mq_free_request(rq); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) @@ -1399,17 +1401,30 @@ static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, nvme_start_keep_alive(ctrl); } -/* - * In NVMe 1.0 the CNS field was just a binary controller or namespace - * flag, thus sending any new CNS opcodes has a big chance of not working. - * Qemu unfortunately had that bug after reporting a 1.1 version compliance - * (but not for any later version). - */ -static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) +static bool nvme_id_cns_ok(struct nvme_ctrl *ctrl, u8 cns) { - if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) - return ctrl->vs < NVME_VS(1, 2, 0); - return ctrl->vs < NVME_VS(1, 1, 0); + /* + * The CNS field occupies a full byte starting with NVMe 1.2 + */ + if (ctrl->vs >= NVME_VS(1, 2, 0)) + return true; + + /* + * NVMe 1.1 expanded the CNS value to two bits, which means values + * larger than that could get truncated and treated as an incorrect + * value. + * + * Qemu implemented 1.0 behavior for controllers claiming 1.1 + * compliance, so they need to be quirked here. + */ + if (ctrl->vs >= NVME_VS(1, 1, 0) && + !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) + return cns <= 3; + + /* + * NVMe 1.0 used a single bit for the CNS value. + */ + return cns <= 1; } static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) @@ -1922,8 +1937,12 @@ static void nvme_configure_metadata(struct nvme_ctrl *ctrl, if (head->pi_size && head->ms >= head->pi_size) head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; - if (!(id->dps & NVME_NS_DPS_PI_FIRST)) - info->pi_offset = head->ms - head->pi_size; + if (!(id->dps & NVME_NS_DPS_PI_FIRST)) { + if (disable_pi_offsets) + head->pi_type = 0; + else + info->pi_offset = head->ms - head->pi_size; + } if (ctrl->ops->flags & NVME_F_FABRICS) { /* @@ -2458,8 +2477,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) else ctrl->ctrl_config = NVME_CC_CSS_NVM; - if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) - ctrl->ctrl_config |= NVME_CC_CRIME; + /* + * Setting CRIME results in CSTS.RDY before the media is ready. This + * makes it possible for media related commands to return the error + * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is + * restructured to handle retries, disable CC.CRIME. + */ + ctrl->ctrl_config &= ~NVME_CC_CRIME; ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; @@ -2489,10 +2513,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) * devices are known to get this wrong. Use the larger of the * two values. */ - if (ctrl->ctrl_config & NVME_CC_CRIME) - ready_timeout = NVME_CRTO_CRIMT(crto); - else - ready_timeout = NVME_CRTO_CRWMT(crto); + ready_timeout = NVME_CRTO_CRWMT(crto); if (ready_timeout < timeout) dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", @@ -3111,7 +3132,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) ctrl->max_zeroes_sectors = 0; if (ctrl->subsys->subtype != NVME_NQN_NVME || - nvme_ctrl_limited_cns(ctrl) || + !nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) || test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) return 0; @@ -3774,7 +3795,8 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { if (ns->head->ns_id == nsid) { if (!nvme_get_ns(ns)) continue; @@ -4207,7 +4229,7 @@ static void nvme_scan_work(struct work_struct *work) } mutex_lock(&ctrl->scan_lock); - if (nvme_ctrl_limited_cns(ctrl)) { + if (!nvme_id_cns_ok(ctrl, NVME_ID_CNS_NS_ACTIVE_LIST)) { nvme_scan_ns_sequential(ctrl); } else { /* @@ -4858,7 +4880,8 @@ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mark_disk_dead(ns->disk); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4870,7 +4893,8 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mq_unfreeze_queue(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); @@ -4883,7 +4907,8 @@ int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); if (timeout <= 0) break; @@ -4899,7 +4924,8 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mq_freeze_queue_wait(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4912,7 +4938,8 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl) set_bit(NVME_CTRL_FROZEN, &ctrl->flags); srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_freeze_queue_start(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4960,7 +4987,8 @@ void nvme_sync_io_queues(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_sync_queue(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index b9b79ccfabf8..a96976b22fa7 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -421,10 +421,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, struct io_uring_cmd *ioucmd = req->end_io_data; struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); - if (nvme_req(req)->flags & NVME_REQ_CANCELLED) + if (nvme_req(req)->flags & NVME_REQ_CANCELLED) { pdu->status = -EINTR; - else + } else { pdu->status = nvme_req(req)->status; + if (!pdu->status) + pdu->status = blk_status_to_errno(err); + } pdu->result = le64_to_cpu(nvme_req(req)->result.u64); /* diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 48e7a8906d01..6a15873055b9 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -431,7 +431,6 @@ static bool nvme_available_path(struct nvme_ns_head *head) case NVME_CTRL_LIVE: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: - /* fallthru */ return true; default: break; @@ -580,6 +579,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) return ret; } +static void nvme_partition_scan_work(struct work_struct *work) +{ + struct nvme_ns_head *head = + container_of(work, struct nvme_ns_head, partition_scan_work); + + if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN, + &head->disk->state))) + return; + + mutex_lock(&head->disk->open_mutex); + bdev_disk_changed(head->disk, false); + mutex_unlock(&head->disk->open_mutex); +} + static void nvme_requeue_work(struct work_struct *work) { struct nvme_ns_head *head = @@ -606,6 +619,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) bio_list_init(&head->requeue_list); spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); + INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work); /* * Add a multipath node if the subsystems supports multiple controllers. @@ -629,6 +643,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) return PTR_ERR(head->disk); head->disk->fops = &nvme_ns_head_ops; head->disk->private_data = head; + + /* + * We need to suppress the partition scan from occuring within the + * controller's scan_work context. If a path error occurs here, the IO + * will wait until a path becomes available or all paths are torn down, + * but that action also occurs within scan_work, so it would deadlock. + * Defer the partion scan to a different context that does not block + * scan_work. + */ + set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state); sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); return 0; @@ -655,6 +679,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) return; } nvme_add_ns_head_cdev(head); + kblockd_schedule_work(&head->partition_scan_work); } mutex_lock(&head->lock); @@ -974,14 +999,14 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) return; if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { nvme_cdev_del(&head->cdev, &head->cdev_device); + /* + * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared + * to allow multipath to fail all I/O. + */ + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); del_gendisk(head->disk); } - /* - * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared - * to allow multipath to fail all I/O. - */ - synchronize_srcu(&head->srcu); - kblockd_schedule_work(&head->requeue_work); } void nvme_mpath_remove_disk(struct nvme_ns_head *head) @@ -991,6 +1016,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); + flush_work(&head->partition_scan_work); put_disk(head->disk); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 313a4f978a2c..093cb423f536 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -494,6 +494,7 @@ struct nvme_ns_head { struct bio_list requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; + struct work_struct partition_scan_work; struct mutex lock; unsigned long flags; #define NVME_NSHEAD_DISK_LIVE 0 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 7990c3f22ecf..4b9fda0b1d9a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2506,17 +2506,29 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) return 1; } -static void nvme_pci_update_nr_queues(struct nvme_dev *dev) +static bool nvme_pci_update_nr_queues(struct nvme_dev *dev) { if (!dev->ctrl.tagset) { nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); - return; + return true; + } + + /* Give up if we are racing with nvme_dev_disable() */ + if (!mutex_trylock(&dev->shutdown_lock)) + return false; + + /* Check if nvme_dev_disable() has been executed already */ + if (!dev->online_queues) { + mutex_unlock(&dev->shutdown_lock); + return false; } blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); /* free previously allocated queues that are no longer usable */ nvme_free_queues(dev, dev->online_queues); + mutex_unlock(&dev->shutdown_lock); + return true; } static int nvme_pci_enable(struct nvme_dev *dev) @@ -2797,7 +2809,8 @@ static void nvme_reset_work(struct work_struct *work) nvme_dbbuf_set(dev); nvme_unquiesce_io_queues(&dev->ctrl); nvme_wait_freeze(&dev->ctrl); - nvme_pci_update_nr_queues(dev); + if (!nvme_pci_update_nr_queues(dev)) + goto out; nvme_unfreeze(&dev->ctrl); } else { dev_warn(dev->ctrl.device, "IO queues lost\n"); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 89c44413c593..3e416af2659f 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2644,10 +2644,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len = nvmf_get_address(ctrl, buf, size); + if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) + return len; + mutex_lock(&queue->queue_lock); - if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) - goto done; ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); if (ret > 0) { if (len > 0) @@ -2655,7 +2656,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", (len) ? "," : "", &src_addr); } -done: + mutex_unlock(&queue->queue_lock); return len; diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 29f8639cfe7f..b47d675232d2 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -115,6 +115,7 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id) pr_debug("%s: ctrl %d failed to generate private key, err %d\n", __func__, ctrl->cntlid, ret); kfree_sensitive(ctrl->dh_key); + ctrl->dh_key = NULL; return ret; } ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index e32790d8fc26..a9d112d34d4f 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -265,6 +265,13 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) return; + /* + * It's possible that some requests might have been added + * after admin queue is stopped/quiesced. So now start the + * queue to flush these requests to the completion. + */ + nvme_unquiesce_admin_queue(&ctrl->ctrl); + nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvme_remove_admin_tag_set(&ctrl->ctrl); } @@ -297,6 +304,12 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); } ctrl->ctrl.queue_count = 1; + /* + * It's possible that some requests might have been added + * after io queue is stopped/quiesced. So now start the + * queue to flush these requests to the completion. + */ + nvme_unquiesce_io_queues(&ctrl->ctrl); } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 24d0e2418d2e..0f9b280c438d 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -535,10 +535,6 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) break; case nvme_admin_identify: switch (req->cmd->identify.cns) { - case NVME_ID_CNS_CTRL: - req->execute = nvmet_passthru_execute_cmd; - req->p.use_workqueue = true; - return NVME_SC_SUCCESS; case NVME_ID_CNS_CS_CTRL: switch (req->cmd->identify.csi) { case NVME_CSI_ZNS: @@ -547,7 +543,9 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) return NVME_SC_SUCCESS; } return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; + case NVME_ID_CNS_CTRL: case NVME_ID_CNS_NS: + case NVME_ID_CNS_NS_DESC_LIST: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; return NVME_SC_SUCCESS; diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index ade285308450..1afd93026f9b 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -39,6 +39,8 @@ #define NVMET_RDMA_BACKLOG 128 +#define NVMET_RDMA_DISCRETE_RSP_TAG -1 + struct nvmet_rdma_srq; struct nvmet_rdma_cmd { @@ -75,7 +77,7 @@ struct nvmet_rdma_rsp { u32 invalidate_rkey; struct list_head wait_list; - struct list_head free_list; + int tag; }; enum nvmet_rdma_queue_state { @@ -98,8 +100,7 @@ struct nvmet_rdma_queue { struct nvmet_sq nvme_sq; struct nvmet_rdma_rsp *rsps; - struct list_head free_rsps; - spinlock_t rsps_lock; + struct sbitmap rsp_tags; struct nvmet_rdma_cmd *cmds; struct work_struct release_work; @@ -172,7 +173,8 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r); static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, - struct nvmet_rdma_rsp *r); + struct nvmet_rdma_rsp *r, + int tag); static const struct nvmet_fabrics_ops nvmet_rdma_ops; @@ -210,15 +212,12 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) static inline struct nvmet_rdma_rsp * nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) { - struct nvmet_rdma_rsp *rsp; - unsigned long flags; + struct nvmet_rdma_rsp *rsp = NULL; + int tag; - spin_lock_irqsave(&queue->rsps_lock, flags); - rsp = list_first_entry_or_null(&queue->free_rsps, - struct nvmet_rdma_rsp, free_list); - if (likely(rsp)) - list_del(&rsp->free_list); - spin_unlock_irqrestore(&queue->rsps_lock, flags); + tag = sbitmap_get(&queue->rsp_tags); + if (tag >= 0) + rsp = &queue->rsps[tag]; if (unlikely(!rsp)) { int ret; @@ -226,13 +225,12 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); if (unlikely(!rsp)) return NULL; - ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); + ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, + NVMET_RDMA_DISCRETE_RSP_TAG); if (unlikely(ret)) { kfree(rsp); return NULL; } - - rsp->allocated = true; } return rsp; @@ -241,17 +239,13 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) static inline void nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) { - unsigned long flags; - - if (unlikely(rsp->allocated)) { + if (unlikely(rsp->tag == NVMET_RDMA_DISCRETE_RSP_TAG)) { nvmet_rdma_free_rsp(rsp->queue->dev, rsp); kfree(rsp); return; } - spin_lock_irqsave(&rsp->queue->rsps_lock, flags); - list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); - spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); + sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag); } static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, @@ -404,7 +398,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, } static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, - struct nvmet_rdma_rsp *r) + struct nvmet_rdma_rsp *r, int tag) { /* NVMe CQE / RDMA SEND */ r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); @@ -432,6 +426,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, r->read_cqe.done = nvmet_rdma_read_data_done; /* Data Out / RDMA WRITE */ r->write_cqe.done = nvmet_rdma_write_data_done; + r->tag = tag; return 0; @@ -454,21 +449,23 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) { struct nvmet_rdma_device *ndev = queue->dev; int nr_rsps = queue->recv_queue_size * 2; - int ret = -EINVAL, i; + int ret = -ENOMEM, i; + + if (sbitmap_init_node(&queue->rsp_tags, nr_rsps, -1, GFP_KERNEL, + NUMA_NO_NODE, false, true)) + goto out; queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), GFP_KERNEL); if (!queue->rsps) - goto out; + goto out_free_sbitmap; for (i = 0; i < nr_rsps; i++) { struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; - ret = nvmet_rdma_alloc_rsp(ndev, rsp); + ret = nvmet_rdma_alloc_rsp(ndev, rsp, i); if (ret) goto out_free; - - list_add_tail(&rsp->free_list, &queue->free_rsps); } return 0; @@ -477,6 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) while (--i >= 0) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); +out_free_sbitmap: + sbitmap_free(&queue->rsp_tags); out: return ret; } @@ -489,6 +488,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) for (i = 0; i < nr_rsps; i++) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); + sbitmap_free(&queue->rsp_tags); } static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, @@ -1447,8 +1447,6 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, INIT_LIST_HEAD(&queue->rsp_wait_list); INIT_LIST_HEAD(&queue->rsp_wr_wait_list); spin_lock_init(&queue->rsp_wr_wait_lock); - INIT_LIST_HEAD(&queue->free_rsps); - spin_lock_init(&queue->rsps_lock); INIT_LIST_HEAD(&queue->queue_list); queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); diff --git a/drivers/of/of_kunit_helpers.c b/drivers/of/of_kunit_helpers.c index 287d6c91bb37..7b3ed5a382aa 100644 --- a/drivers/of/of_kunit_helpers.c +++ b/drivers/of/of_kunit_helpers.c @@ -10,6 +10,19 @@ #include #include +#include "of_private.h" + +/** + * of_root_kunit_skip() - Skip test if the root node isn't populated + * @test: test to skip if the root node isn't populated + */ +void of_root_kunit_skip(struct kunit *test) +{ + if (IS_ENABLED(CONFIG_ARM64) && IS_ENABLED(CONFIG_ACPI) && !of_root) + kunit_skip(test, "arm64+acpi doesn't populate a root node"); +} +EXPORT_SYMBOL_GPL(of_root_kunit_skip); + #if defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE) static void of_overlay_fdt_apply_kunit_exit(void *ovcs_id) @@ -36,6 +49,8 @@ int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt, int ret; int *copy_id; + of_root_kunit_skip(test); + copy_id = kunit_kmalloc(test, sizeof(*copy_id), GFP_KERNEL); if (!copy_id) return -ENOMEM; diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 04aa2a91f851..c235d6c909a1 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -42,6 +42,9 @@ extern raw_spinlock_t devtree_lock; extern struct list_head aliases_lookup; extern struct kset *of_kset; +struct kunit; +extern void of_root_kunit_skip(struct kunit *test); + #if defined(CONFIG_OF_DYNAMIC) extern int of_property_notify(int action, struct device_node *np, struct property *prop, struct property *old_prop); diff --git a/drivers/of/of_test.c b/drivers/of/of_test.c index c85a258bc6ae..b0557ded838f 100644 --- a/drivers/of/of_test.c +++ b/drivers/of/of_test.c @@ -7,6 +7,8 @@ #include +#include "of_private.h" + /* * Test that the root node "/" can be found by path. */ @@ -36,6 +38,7 @@ static struct kunit_case of_dtb_test_cases[] = { static int of_dtb_test_init(struct kunit *test) { + of_root_kunit_skip(test); if (!IS_ENABLED(CONFIG_OF_EARLY_FLATTREE)) kunit_skip(test, "requires CONFIG_OF_EARLY_FLATTREE"); diff --git a/drivers/of/overlay_test.c b/drivers/of/overlay_test.c index 19a292cdeee3..1f76d50fb16a 100644 --- a/drivers/of/overlay_test.c +++ b/drivers/of/overlay_test.c @@ -11,6 +11,8 @@ #include #include +#include "of_private.h" + static const char * const kunit_node_name = "kunit-test"; static const char * const kunit_compatible = "test,empty"; @@ -62,6 +64,7 @@ static void of_overlay_apply_kunit_cleanup(struct kunit *test) struct device *dev; struct device_node *np; + of_root_kunit_skip(test); if (!IS_ENABLED(CONFIG_OF_EARLY_FLATTREE)) kunit_skip(test, "requires CONFIG_OF_EARLY_FLATTREE for root node"); @@ -73,7 +76,7 @@ static void of_overlay_apply_kunit_cleanup(struct kunit *test) np = of_find_node_by_name(NULL, kunit_node_name); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np); - of_node_put_kunit(test, np); + of_node_put_kunit(&fake, np); pdev = of_find_device_by_node(np); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev); diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 494f8860220d..3aa18737470f 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -2630,8 +2630,10 @@ int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) /* Attach genpds */ if (config->genpd_names) { - if (config->required_devs) + if (config->required_devs) { + ret = -EINVAL; goto err; + } ret = _opp_attach_genpd(opp_table, dev, config->genpd_names, config->virt_devs); diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index 3ef486cd3d6d..3880460e67f2 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c @@ -51,12 +51,12 @@ static int do_active_device(const struct ctl_table *table, int write, for (dev = port->devices; dev ; dev = dev->next) { if(dev == port->cad) { - len += snprintf(buffer, sizeof(buffer), "%s\n", dev->name); + len += scnprintf(buffer, sizeof(buffer), "%s\n", dev->name); } } if(!len) { - len += snprintf(buffer, sizeof(buffer), "%s\n", "none"); + len += scnprintf(buffer, sizeof(buffer), "%s\n", "none"); } if (len > *lenp) @@ -87,19 +87,19 @@ static int do_autoprobe(const struct ctl_table *table, int write, } if ((str = info->class_name) != NULL) - len += snprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str); + len += scnprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str); if ((str = info->model) != NULL) - len += snprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str); + len += scnprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str); if ((str = info->mfr) != NULL) - len += snprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str); + len += scnprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str); if ((str = info->description) != NULL) - len += snprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str); + len += scnprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str); if ((str = info->cmdset) != NULL) - len += snprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str); + len += scnprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str); if (len > *lenp) len = *lenp; @@ -128,7 +128,7 @@ static int do_hardware_base_addr(const struct ctl_table *table, int write, if (write) /* permissions prevent this anyway */ return -EACCES; - len += snprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi); + len += scnprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi); if (len > *lenp) len = *lenp; @@ -155,7 +155,7 @@ static int do_hardware_irq(const struct ctl_table *table, int write, if (write) /* permissions prevent this anyway */ return -EACCES; - len += snprintf (buffer, sizeof(buffer), "%d\n", port->irq); + len += scnprintf (buffer, sizeof(buffer), "%d\n", port->irq); if (len > *lenp) len = *lenp; @@ -182,7 +182,7 @@ static int do_hardware_dma(const struct ctl_table *table, int write, if (write) /* permissions prevent this anyway */ return -EACCES; - len += snprintf (buffer, sizeof(buffer), "%d\n", port->dma); + len += scnprintf (buffer, sizeof(buffer), "%d\n", port->dma); if (len > *lenp) len = *lenp; @@ -213,7 +213,7 @@ static int do_hardware_modes(const struct ctl_table *table, int write, #define printmode(x) \ do { \ if (port->modes & PARPORT_MODE_##x) \ - len += snprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \ + len += scnprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \ } while (0) int f = 0; printmode(PCSPP); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7d85c04fbba2..225a6cd2e9ca 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1067,8 +1067,15 @@ static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps) static void pci_enable_acs(struct pci_dev *dev) { struct pci_acs caps; + bool enable_acs = false; int pos; + /* If an iommu is present we start with kernel default caps */ + if (pci_acs_enable) { + if (pci_dev_specific_enable_acs(dev)) + enable_acs = true; + } + pos = dev->acs_cap; if (!pos) return; @@ -1077,11 +1084,8 @@ static void pci_enable_acs(struct pci_dev *dev) pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl); caps.fw_ctrl = caps.ctrl; - /* If an iommu is present we start with kernel default caps */ - if (pci_acs_enable) { - if (pci_dev_specific_enable_acs(dev)) - pci_std_enable_acs(dev, &caps); - } + if (enable_acs) + pci_std_enable_acs(dev, &caps); /* * Always apply caps from the command line, even if there is no iommu. diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4f68414c3086..f1615805f5b0 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -3105,7 +3105,9 @@ int pci_host_probe(struct pci_host_bridge *bridge) list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); + pci_lock_rescan_remove(); pci_bus_add_devices(bus); + pci_unlock_rescan_remove(); return 0; } EXPORT_SYMBOL_GPL(pci_host_probe); diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c index a23a4312574b..0e6bd47671c2 100644 --- a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c +++ b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c @@ -6,9 +6,9 @@ #include #include #include -#include #include #include +#include #include #include #include @@ -18,6 +18,40 @@ struct pci_pwrctl_pwrseq_data { struct pwrseq_desc *pwrseq; }; +struct pci_pwrctl_pwrseq_pdata { + const char *target; + /* + * Called before doing anything else to perform device-specific + * verification between requesting the power sequencing handle. + */ + int (*validate_device)(struct device *dev); +}; + +static int pci_pwrctl_pwrseq_qcm_wcn_validate_device(struct device *dev) +{ + /* + * Old device trees for some platforms already define wifi nodes for + * the WCN family of chips since before power sequencing was added + * upstream. + * + * These nodes don't consume the regulator outputs from the PMU, and + * if we allow this driver to bind to one of such "incomplete" nodes, + * we'll see a kernel log error about the indefinite probe deferral. + * + * Check the existence of the regulator supply that exists on all + * WCN models before moving forward. + */ + if (!device_property_present(dev, "vddaon-supply")) + return -ENODEV; + + return 0; +} + +static const struct pci_pwrctl_pwrseq_pdata pci_pwrctl_pwrseq_qcom_wcn_pdata = { + .target = "wlan", + .validate_device = pci_pwrctl_pwrseq_qcm_wcn_validate_device, +}; + static void devm_pci_pwrctl_pwrseq_power_off(void *data) { struct pwrseq_desc *pwrseq = data; @@ -27,15 +61,26 @@ static void devm_pci_pwrctl_pwrseq_power_off(void *data) static int pci_pwrctl_pwrseq_probe(struct platform_device *pdev) { + const struct pci_pwrctl_pwrseq_pdata *pdata; struct pci_pwrctl_pwrseq_data *data; struct device *dev = &pdev->dev; int ret; + pdata = device_get_match_data(dev); + if (!pdata || !pdata->target) + return -EINVAL; + + if (pdata->validate_device) { + ret = pdata->validate_device(dev); + if (ret) + return ret; + } + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - data->pwrseq = devm_pwrseq_get(dev, of_device_get_match_data(dev)); + data->pwrseq = devm_pwrseq_get(dev, pdata->target); if (IS_ERR(data->pwrseq)) return dev_err_probe(dev, PTR_ERR(data->pwrseq), "Failed to get the power sequencer\n"); @@ -64,17 +109,17 @@ static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = { { /* ATH11K in QCA6390 package. */ .compatible = "pci17cb,1101", - .data = "wlan", + .data = &pci_pwrctl_pwrseq_qcom_wcn_pdata, }, { /* ATH11K in WCN6855 package. */ .compatible = "pci17cb,1103", - .data = "wlan", + .data = &pci_pwrctl_pwrseq_qcom_wcn_pdata, }, { /* ATH12K in WCN7850 package. */ .compatible = "pci17cb,1107", - .data = "wlan", + .data = &pci_pwrctl_pwrseq_qcom_wcn_pdata, }, { } }; diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c index 4c10cafded4e..950b7ae1d1a8 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c @@ -153,7 +153,9 @@ static void xhci_soft_reset(struct brcm_usb_init_params *params, } else { USB_CTRL_SET(ctrl, USB_PM, XHC_SOFT_RESETB); /* Required for COMMONONN to be set */ - USB_XHCI_GBL_UNSET(xhci_gbl, GUSB2PHYCFG, U2_FREECLK_EXISTS); + if (params->supported_port_modes != USB_CTLR_MODE_DRD) + USB_XHCI_GBL_UNSET(xhci_gbl, GUSB2PHYCFG, + U2_FREECLK_EXISTS); } } @@ -328,8 +330,12 @@ static void usb_init_common_7216(struct brcm_usb_init_params *params) /* 1 millisecond - for USB clocks to settle down */ usleep_range(1000, 2000); - /* Disable PHY when port is suspended */ - USB_CTRL_SET(ctrl, P0_U2PHY_CFG1, COMMONONN); + /* + * Disable PHY when port is suspended + * Does not work in DRD mode + */ + if (params->supported_port_modes != USB_CTLR_MODE_DRD) + USB_CTRL_SET(ctrl, P0_U2PHY_CFG1, COMMONONN); usb_wake_enable_7216(params, false); usb_init_common(params); diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c index 39536b6d96a9..5ebb3a616115 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init.c @@ -220,6 +220,8 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */ 0, /* USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ + 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT0_MASK */ + 0, /* USB_CTRL_SETUP_OC3_DISABLE_PORT1_MASK */ 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index aeec6eb6be23..dfc4f55d112e 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -174,8 +174,9 @@ #define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150 #define SIERRA_DEQ_TAU_CTRL2_PREG 0x151 #define SIERRA_DEQ_TAU_CTRL3_PREG 0x152 -#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158 +#define SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG 0x158 #define SIERRA_DEQ_CONCUR_EPIOFFSET_MODE_PREG 0x159 +#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x15C #define SIERRA_DEQ_PICTRL_PREG 0x161 #define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170 #define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171 @@ -1733,7 +1734,7 @@ static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = { {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, - {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG}, {0x002B, SIERRA_CPI_TRIM_PREG}, {0x0003, SIERRA_EPI_CTRL_PREG}, {0x803F, SIERRA_SDFILT_H2L_A_PREG}, @@ -1797,7 +1798,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = { {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, - {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG}, {0x002B, SIERRA_CPI_TRIM_PREG}, {0x0003, SIERRA_EPI_CTRL_PREG}, {0x803F, SIERRA_SDFILT_H2L_A_PREG}, @@ -1874,7 +1875,7 @@ static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = { {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, - {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG}, {0x002B, SIERRA_CPI_TRIM_PREG}, {0x0003, SIERRA_EPI_CTRL_PREG}, {0x803F, SIERRA_SDFILT_H2L_A_PREG}, @@ -1941,7 +1942,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = { {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, - {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG}, {0x002B, SIERRA_CPI_TRIM_PREG}, {0x0003, SIERRA_EPI_CTRL_PREG}, {0x803F, SIERRA_SDFILT_H2L_A_PREG}, @@ -2012,7 +2013,7 @@ static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = { {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, - {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG}, {0x002B, SIERRA_CPI_TRIM_PREG}, {0x0003, SIERRA_EPI_CTRL_PREG}, {0x803F, SIERRA_SDFILT_H2L_A_PREG}, @@ -2079,7 +2080,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = { {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, - {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG}, {0x002B, SIERRA_CPI_TRIM_PREG}, {0x0003, SIERRA_EPI_CTRL_PREG}, {0x803F, SIERRA_SDFILT_H2L_A_PREG}, @@ -2140,7 +2141,7 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = { {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, - {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG}, {0x002B, SIERRA_CPI_TRIM_PREG}, {0x0003, SIERRA_EPI_CTRL_PREG}, {0x803F, SIERRA_SDFILT_H2L_A_PREG}, @@ -2215,7 +2216,7 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = { {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, - {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG}, {0x002B, SIERRA_CPI_TRIM_PREG}, {0x0003, SIERRA_EPI_CTRL_PREG}, {0x803F, SIERRA_SDFILT_H2L_A_PREG}, @@ -2284,7 +2285,7 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = { {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG}, {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG}, - {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG}, + {0x5E82, SIERRA_DEQ_TAU_EPIOFFSET_MODE_PREG}, {0x002B, SIERRA_CPI_TRIM_PREG}, {0x0003, SIERRA_EPI_CTRL_PREG}, {0x803F, SIERRA_SDFILT_H2L_A_PREG}, diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c index 11fcb1867118..e98361dcdead 100644 --- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c +++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c @@ -141,11 +141,6 @@ static int imx8_pcie_phy_power_on(struct phy *phy) IMX8MM_GPR_PCIE_REF_CLK_PLL); usleep_range(100, 200); - /* Do the PHY common block reset */ - regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, - IMX8MM_GPR_PCIE_CMN_RST, - IMX8MM_GPR_PCIE_CMN_RST); - switch (imx8_phy->drvdata->variant) { case IMX8MP: reset_control_deassert(imx8_phy->perst); @@ -156,6 +151,11 @@ static int imx8_pcie_phy_power_on(struct phy *phy) break; } + /* Do the PHY common block reset */ + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_CMN_RST, + IMX8MM_GPR_PCIE_CMN_RST); + /* Polling to check the phy is ready or not. */ ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG075, val, val == ANA_PLL_DONE, 10, 20000); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index a8adc3214bfe..643045c9024e 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -3673,6 +3673,7 @@ static int qmp_combo_probe(struct platform_device *pdev) return -ENOMEM; qmp->dev = dev; + dev_set_drvdata(dev, qmp); qmp->orientation = TYPEC_ORIENTATION_NORMAL; @@ -3749,8 +3750,6 @@ static int qmp_combo_probe(struct platform_device *pdev) phy_set_drvdata(qmp->dp_phy, qmp); - dev_set_drvdata(dev, qmp); - if (usb_np == dev->of_node) phy_provider = devm_of_phy_provider_register(dev, qmp_combo_phy_xlate); else diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c index f71787fb4d7e..36aaac34e6c6 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c @@ -3661,8 +3661,8 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x2_pciephy_cfg = { .reset_list = sdm845_pciephy_reset_l, .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), - .vreg_list = sm8550_qmp_phy_vreg_l, - .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = pciephy_v6_regs_layout, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, @@ -3695,8 +3695,8 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = { .reset_list = sdm845_pciephy_reset_l, .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), - .vreg_list = sm8550_qmp_phy_vreg_l, - .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), .regs = pciephy_v6_regs_layout, .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c index 6d0ba39c1943..8bf951b0490c 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c @@ -1248,6 +1248,7 @@ static int qmp_usb_legacy_probe(struct platform_device *pdev) return -ENOMEM; qmp->dev = dev; + dev_set_drvdata(dev, qmp); qmp->cfg = of_device_get_match_data(dev); if (!qmp->cfg) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index 2fd49355aa37..1246d3bc8b92 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -2179,6 +2179,7 @@ static int qmp_usb_probe(struct platform_device *pdev) return -ENOMEM; qmp->dev = dev; + dev_set_drvdata(dev, qmp); qmp->cfg = of_device_get_match_data(dev); if (!qmp->cfg) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c index d4fa1063ea61..cf12a6f12134 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c @@ -1050,6 +1050,7 @@ static int qmp_usbc_probe(struct platform_device *pdev) return -ENOMEM; qmp->dev = dev; + dev_set_drvdata(dev, qmp); qmp->orientation = TYPEC_ORIENTATION_NORMAL; diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig index 490263375057..2f7a05f21dc5 100644 --- a/drivers/phy/rockchip/Kconfig +++ b/drivers/phy/rockchip/Kconfig @@ -86,6 +86,7 @@ config PHY_ROCKCHIP_PCIE config PHY_ROCKCHIP_SAMSUNG_HDPTX tristate "Rockchip Samsung HDMI/eDP Combo PHY driver" depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF + depends on COMMON_CLK depends on HAS_IOMEM select GENERIC_PHY select MFD_SYSCON diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c index 633912f8a05d..cb5454fbe2c8 100644 --- a/drivers/phy/starfive/phy-jh7110-usb.c +++ b/drivers/phy/starfive/phy-jh7110-usb.c @@ -10,18 +10,24 @@ #include #include #include +#include #include #include #include +#include #include #define USB_125M_CLK_RATE 125000000 #define USB_LS_KEEPALIVE_OFF 0x4 #define USB_LS_KEEPALIVE_ENABLE BIT(4) +#define USB_PDRSTN_SPLIT BIT(17) +#define SYSCON_USB_SPLIT_OFFSET 0x18 + struct jh7110_usb2_phy { struct phy *phy; void __iomem *regs; + struct regmap *sys_syscon; struct clk *usb_125m_clk; struct clk *app_125m; enum phy_mode mode; @@ -61,6 +67,10 @@ static int usb2_phy_set_mode(struct phy *_phy, usb2_set_ls_keepalive(phy, (mode != PHY_MODE_USB_DEVICE)); } + /* Connect usb 2.0 phy mode */ + regmap_update_bits(phy->sys_syscon, SYSCON_USB_SPLIT_OFFSET, + USB_PDRSTN_SPLIT, USB_PDRSTN_SPLIT); + return 0; } @@ -129,6 +139,12 @@ static int jh7110_usb_phy_probe(struct platform_device *pdev) phy_set_drvdata(phy->phy, phy); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + phy->sys_syscon = + syscon_regmap_lookup_by_compatible("starfive,jh7110-sys-syscon"); + if (IS_ERR(phy->sys_syscon)) + return dev_err_probe(dev, PTR_ERR(phy->sys_syscon), + "Failed to get sys-syscon\n"); + return PTR_ERR_OR_ZERO(phy_provider); } diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index cfdb54b6070a..342f5ccf611d 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -699,6 +699,8 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port) return -ENOMEM; lane = tegra_xusb_find_lane(port->padctl, "usb2", port->index); + if (IS_ERR(lane)) + return PTR_ERR(lane); /* * Assign phy dev to usb-phy dev. Host/device drivers can use phy diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index a6c0c5607ffd..c6e846d385d2 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -450,8 +450,8 @@ static int wiz_mode_select(struct wiz *wiz) } else if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) { ret = regmap_field_write(wiz->p0_mac_src_sel[i], 0x3); ret = regmap_field_write(wiz->p0_rxfclk_sel[i], 0x3); - ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x3); - mode = LANE_MODE_GEN1; + ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x2); + mode = LANE_MODE_GEN2; } else { continue; } diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig index 2101d30bd66c..14c26c023590 100644 --- a/drivers/pinctrl/intel/Kconfig +++ b/drivers/pinctrl/intel/Kconfig @@ -46,6 +46,7 @@ config PINCTRL_INTEL_PLATFORM of Intel PCH pins and using them as GPIOs. Currently the following Intel SoCs / platforms require this to be functional: - Lunar Lake + - Panther Lake config PINCTRL_ALDERLAKE tristate "Intel Alder Lake pinctrl and GPIO driver" diff --git a/drivers/pinctrl/intel/pinctrl-intel-platform.c b/drivers/pinctrl/intel/pinctrl-intel-platform.c index 4a19ab3b4ba7..016a9f62eecc 100644 --- a/drivers/pinctrl/intel/pinctrl-intel-platform.c +++ b/drivers/pinctrl/intel/pinctrl-intel-platform.c @@ -90,7 +90,6 @@ static int intel_platform_pinctrl_prepare_community(struct device *dev, struct intel_community *community, struct intel_platform_pins *pins) { - struct fwnode_handle *child; struct intel_padgroup *gpps; unsigned int group; size_t ngpps; @@ -131,7 +130,7 @@ static int intel_platform_pinctrl_prepare_community(struct device *dev, return -ENOMEM; group = 0; - device_for_each_child_node(dev, child) { + device_for_each_child_node_scoped(dev, child) { struct intel_padgroup *gpp = &gpps[group]; gpp->reg_num = group; @@ -159,7 +158,7 @@ static int intel_platform_pinctrl_prepare_soc_data(struct device *dev, int ret; /* Version 1.0 of the specification assumes only a single community per device node */ - ncommunities = 1, + ncommunities = 1; communities = devm_kcalloc(dev, ncommunities, sizeof(*communities), GFP_KERNEL); if (!communities) return -ENOMEM; diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c index 1fa00a23534a..59c4e7c6cdde 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c +++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c @@ -218,7 +218,7 @@ static int ma35_pinctrl_dt_node_to_map_func(struct pinctrl_dev *pctldev, } map_num += grp->npins; - new_map = devm_kcalloc(pctldev->dev, map_num, sizeof(*new_map), GFP_KERNEL); + new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL); if (!new_map) return -ENOMEM; diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c index 3751c7de37aa..f861e63f4115 100644 --- a/drivers/pinctrl/pinctrl-apple-gpio.c +++ b/drivers/pinctrl/pinctrl-apple-gpio.c @@ -474,6 +474,9 @@ static int apple_gpio_pinctrl_probe(struct platform_device *pdev) for (i = 0; i < npins; i++) { pins[i].number = i; pins[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "PIN%u", i); + if (!pins[i].name) + return -ENOMEM; + pins[i].drv_data = pctl; pin_names[i] = pins[i].name; pin_nums[i] = i; diff --git a/drivers/pinctrl/pinctrl-aw9523.c b/drivers/pinctrl/pinctrl-aw9523.c index b5e1c467625b..1374f30166bc 100644 --- a/drivers/pinctrl/pinctrl-aw9523.c +++ b/drivers/pinctrl/pinctrl-aw9523.c @@ -987,8 +987,10 @@ static int aw9523_probe(struct i2c_client *client) lockdep_set_subclass(&awi->i2c_lock, i2c_adapter_depth(client->adapter)); pdesc = devm_kzalloc(dev, sizeof(*pdesc), GFP_KERNEL); - if (!pdesc) - return -ENOMEM; + if (!pdesc) { + ret = -ENOMEM; + goto err_disable_vregs; + } ret = aw9523_hw_init(awi); if (ret) diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index be9b8c010167..d1ab8450ea93 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -1955,21 +1955,21 @@ static void ocelot_irq_handler(struct irq_desc *desc) unsigned int reg = 0, irq, i; unsigned long irqs; + chained_irq_enter(parent_chip, desc); + for (i = 0; i < info->stride; i++) { regmap_read(info->map, id_reg + 4 * i, ®); if (!reg) continue; - chained_irq_enter(parent_chip, desc); - irqs = reg; for_each_set_bit(irq, &irqs, min(32U, info->desc->npins - 32 * i)) generic_handle_domain_irq(chip->irq.domain, irq + 32 * i); - - chained_irq_exit(parent_chip, desc); } + + chained_irq_exit(parent_chip, desc); } static int ocelot_gpiochip_register(struct platform_device *pdev, diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c index d18fc5aa84f7..57f2674e75d6 100644 --- a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c +++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c @@ -221,7 +221,7 @@ static int cv1800_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, if (!grpnames) return -ENOMEM; - map = devm_kcalloc(dev, ngroups * 2, sizeof(*map), GFP_KERNEL); + map = kcalloc(ngroups * 2, sizeof(*map), GFP_KERNEL); if (!map) return -ENOMEM; diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index a8673739871d..5b7fa77c1184 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -1374,10 +1374,15 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode for (i = 0; i < npins; i++) { stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i); - if (stm32_pin && stm32_pin->pin.name) + if (stm32_pin && stm32_pin->pin.name) { names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", stm32_pin->pin.name); - else + if (!names[i]) { + err = -ENOMEM; + goto err_clk; + } + } else { names[i] = NULL; + } } bank->gpio_chip.names = (const char * const *)names; diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c index bbb8edb62e00..5669f94c3d06 100644 --- a/drivers/platform/x86/amd/pmc/pmc.c +++ b/drivers/platform/x86/amd/pmc/pmc.c @@ -998,6 +998,11 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev) amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true); amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true); + if (!phys_addr_hi && !phys_addr_low) { + dev_err(dev->dev, "STB is not enabled on the system; disable enable_stb or contact system vendor\n"); + return -EINVAL; + } + stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); /* Clear msg_port for other SMU operation */ diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index d6af0ca036f1..347bb43a5f2b 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -261,6 +261,7 @@ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer) dev->mtable_size = sizeof(dev->m_table); break; case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: dev->mtable_size = sizeof(dev->m_table_v2); break; default: diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c index b5183969f9bf..06226eb0eab3 100644 --- a/drivers/platform/x86/amd/pmf/spc.c +++ b/drivers/platform/x86/amd/pmf/spc.c @@ -86,6 +86,7 @@ static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_ta ARRAY_SIZE(dev->m_table.avg_core_c0residency), in); break; case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: + case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: memcpy(&dev->m_table_v2, dev->buf, dev->mtable_size); in->ev_info.socket_power = dev->m_table_v2.apu_power + dev->m_table_v2.dgpu_power; in->ev_info.skin_temperature = dev->m_table_v2.skin_temp; diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 7a48220b4f5a..abdca3f05c5c 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -3908,6 +3908,16 @@ static int platform_profile_setup(struct asus_wmi *asus) if (!asus->throttle_thermal_policy_dev) return 0; + /* + * We need to set the default thermal profile during probe or otherwise + * the system will often remain in silent mode, causing low performance. + */ + err = throttle_thermal_policy_set_default(asus); + if (err < 0) { + pr_warn("Failed to set default thermal profile\n"); + return err; + } + dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n"); asus->platform_profile_handler.profile_get = asus_wmi_platform_profile_get; diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c index 73e41eb69cb5..01c72b91a50d 100644 --- a/drivers/platform/x86/dell/dell-smbios-base.c +++ b/drivers/platform/x86/dell/dell-smbios-base.c @@ -576,6 +576,7 @@ static int __init dell_smbios_init(void) int ret, wmi, smm; if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) && + !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Alienware", NULL) && !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) { pr_err("Unable to run on non-Dell system\n"); return -ENODEV; diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c index 502783a7adb1..841a5414d28a 100644 --- a/drivers/platform/x86/dell/dell-wmi-base.c +++ b/drivers/platform/x86/dell/dell-wmi-base.c @@ -80,6 +80,12 @@ static const struct dmi_system_id dell_wmi_smbios_list[] __initconst = { static const struct key_entry dell_wmi_keymap_type_0000[] = { { KE_IGNORE, 0x003a, { KEY_CAPSLOCK } }, + /* Meta key lock */ + { KE_IGNORE, 0xe000, { KEY_RIGHTMETA } }, + + /* Meta key unlock */ + { KE_IGNORE, 0xe001, { KEY_RIGHTMETA } }, + /* Key code is followed by brightness level */ { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } }, @@ -264,6 +270,15 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = { /*Speaker Mute*/ { KE_KEY, 0x109, { KEY_MUTE} }, + /* S2Idle screen off */ + { KE_IGNORE, 0x120, { KEY_RESERVED }}, + + /* Leaving S4 or S2Idle suspend */ + { KE_IGNORE, 0x130, { KEY_RESERVED }}, + + /* Entering S2Idle suspend */ + { KE_IGNORE, 0x140, { KEY_RESERVED }}, + /* Mic mute */ { KE_KEY, 0x150, { KEY_MICMUTE } }, diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index c64dfc56651d..c908f52ed717 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1294,6 +1294,9 @@ static const struct key_entry ideapad_keymap[] = { { KE_KEY, 0x27 | IDEAPAD_WMI_KEY, { KEY_HELP } }, /* Refresh Rate Toggle */ { KE_KEY, 0x0a | IDEAPAD_WMI_KEY, { KEY_REFRESH_RATE_TOGGLE } }, + /* Specific to some newer models */ + { KE_KEY, 0x3e | IDEAPAD_WMI_KEY, { KEY_MICMUTE } }, + { KE_KEY, 0x3f | IDEAPAD_WMI_KEY, { KEY_RFKILL } }, { KE_END }, }; diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c index 9d9c07f44ff6..e7878558fd90 100644 --- a/drivers/platform/x86/intel/pmc/adl.c +++ b/drivers/platform/x86/intel/pmc/adl.c @@ -295,8 +295,6 @@ const struct pmc_reg_map adl_reg_map = { .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, - .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, - .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED, .lpm_num_modes = ADL_LPM_NUM_MODES, .lpm_num_maps = ADL_LPM_NUM_MAPS, diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c index 513c02670c5a..dd72974bf71e 100644 --- a/drivers/platform/x86/intel/pmc/cnp.c +++ b/drivers/platform/x86/intel/pmc/cnp.c @@ -200,8 +200,6 @@ const struct pmc_reg_map cnp_reg_map = { .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, - .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, - .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED, .etr3_offset = ETR3_OFFSET, }; diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index ecb47f8b4f83..4e9c8c96c8cc 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -11,7 +11,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include #include #include @@ -1258,39 +1257,6 @@ static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev) return val == 1; } -/* - * Enable or disable ACPI PM Timer - * - * This function is intended to be a callback for ACPI PM suspend/resume event. - * The ACPI PM Timer is enabled on resume only if it was enabled during suspend. - */ -static void pmc_core_acpi_pm_timer_suspend_resume(void *data, bool suspend) -{ - struct pmc_dev *pmcdev = data; - struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; - const struct pmc_reg_map *map = pmc->map; - bool enabled; - u32 reg; - - if (!map->acpi_pm_tmr_ctl_offset) - return; - - guard(mutex)(&pmcdev->lock); - - if (!suspend && !pmcdev->enable_acpi_pm_timer_on_resume) - return; - - reg = pmc_core_reg_read(pmc, map->acpi_pm_tmr_ctl_offset); - enabled = !(reg & map->acpi_pm_tmr_disable_bit); - if (suspend) - reg |= map->acpi_pm_tmr_disable_bit; - else - reg &= ~map->acpi_pm_tmr_disable_bit; - pmc_core_reg_write(pmc, map->acpi_pm_tmr_ctl_offset, reg); - - pmcdev->enable_acpi_pm_timer_on_resume = suspend && enabled; -} - static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) { debugfs_remove_recursive(pmcdev->dbgfs_dir); @@ -1486,7 +1452,6 @@ static int pmc_core_probe(struct platform_device *pdev) struct pmc_dev *pmcdev; const struct x86_cpu_id *cpu_id; int (*core_init)(struct pmc_dev *pmcdev); - const struct pmc_reg_map *map; struct pmc *primary_pmc; int ret; @@ -1545,11 +1510,6 @@ static int pmc_core_probe(struct platform_device *pdev) pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) * pmc_core_adjust_slp_s0_step(primary_pmc, 1)); - map = primary_pmc->map; - if (map->acpi_pm_tmr_ctl_offset) - acpi_pmtmr_register_suspend_resume_callback(pmc_core_acpi_pm_timer_suspend_resume, - pmcdev); - device_initialized = true; dev_info(&pdev->dev, " initialized\n"); @@ -1559,12 +1519,6 @@ static int pmc_core_probe(struct platform_device *pdev) static void pmc_core_remove(struct platform_device *pdev) { struct pmc_dev *pmcdev = platform_get_drvdata(pdev); - const struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; - const struct pmc_reg_map *map = pmc->map; - - if (map->acpi_pm_tmr_ctl_offset) - acpi_pmtmr_unregister_suspend_resume_callback(); - pmc_core_dbgfs_unregister(pmcdev); pmc_core_clean_structure(pdev); } diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h index 75fd593a7b0f..b9d3291d0bf2 100644 --- a/drivers/platform/x86/intel/pmc/core.h +++ b/drivers/platform/x86/intel/pmc/core.h @@ -68,8 +68,6 @@ struct telem_endpoint; #define SPT_PMC_LTR_SCC 0x3A0 #define SPT_PMC_LTR_ISH 0x3A4 -#define SPT_PMC_ACPI_PM_TMR_CTL_OFFSET 0x18FC - /* Sunrise Point: PGD PFET Enable Ack Status Registers */ enum ppfear_regs { SPT_PMC_XRAM_PPFEAR0A = 0x590, @@ -150,8 +148,6 @@ enum ppfear_regs { #define SPT_PMC_VRIC1_SLPS0LVEN BIT(13) #define SPT_PMC_VRIC1_XTALSDQDIS BIT(22) -#define SPT_PMC_BIT_ACPI_PM_TMR_DISABLE BIT(1) - /* Cannonlake Power Management Controller register offsets */ #define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4 #define CNP_PMC_PM_CFG_OFFSET 0x1818 @@ -355,8 +351,6 @@ struct pmc_reg_map { const u8 *lpm_reg_index; const u32 pson_residency_offset; const u32 pson_residency_counter_step; - const u32 acpi_pm_tmr_ctl_offset; - const u32 acpi_pm_tmr_disable_bit; }; /** @@ -432,8 +426,6 @@ struct pmc_dev { u32 die_c6_offset; struct telem_endpoint *punit_ep; struct pmc_info *regmap_list; - - bool enable_acpi_pm_timer_on_resume; }; enum pmc_index { diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c index c259c96b7dfd..8504154b649f 100644 --- a/drivers/platform/x86/intel/pmc/core_ssram.c +++ b/drivers/platform/x86/intel/pmc/core_ssram.c @@ -29,7 +29,7 @@ #define LPM_REG_COUNT 28 #define LPM_MODE_OFFSET 1 -DEFINE_FREE(pmc_core_iounmap, void __iomem *, iounmap(_T)); +DEFINE_FREE(pmc_core_iounmap, void __iomem *, if (_T) iounmap(_T)) static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map) { @@ -262,6 +262,8 @@ pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset) ssram_base = ssram_pcidev->resource[0].start; tmp_ssram = ioremap(ssram_base, SSRAM_HDR_SIZE); + if (!tmp_ssram) + return -ENOMEM; if (pmc_idx != PMC_IDX_MAIN) { /* diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c index cbbd44054468..71b0fd6cb7d8 100644 --- a/drivers/platform/x86/intel/pmc/icl.c +++ b/drivers/platform/x86/intel/pmc/icl.c @@ -46,8 +46,6 @@ const struct pmc_reg_map icl_reg_map = { .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, - .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, - .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, .etr3_offset = ETR3_OFFSET, }; diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c index 91f2fa728f5c..c7d15d864039 100644 --- a/drivers/platform/x86/intel/pmc/mtl.c +++ b/drivers/platform/x86/intel/pmc/mtl.c @@ -462,8 +462,6 @@ const struct pmc_reg_map mtl_socm_reg_map = { .ppfear_buckets = MTL_SOCM_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, - .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, - .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .lpm_num_maps = ADL_LPM_NUM_MAPS, .ltr_ignore_max = MTL_SOCM_NUM_IP_IGN_ALLOWED, .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c index 371b4e30f142..e0580de18077 100644 --- a/drivers/platform/x86/intel/pmc/tgl.c +++ b/drivers/platform/x86/intel/pmc/tgl.c @@ -197,8 +197,6 @@ const struct pmc_reg_map tgl_reg_map = { .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, - .acpi_pm_tmr_ctl_offset = SPT_PMC_ACPI_PM_TMR_CTL_OFFSET, - .acpi_pm_tmr_disable_bit = SPT_PMC_BIT_ACPI_PM_TMR_DISABLE, .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, .lpm_num_maps = TGL_LPM_NUM_MAPS, .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 4c1b0553f872..6371a9f765c1 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -7936,6 +7936,7 @@ static u8 fan_control_resume_level; static int fan_watchdog_maxinterval; static bool fan_with_ns_addr; +static bool ecfw_with_fan_dec_rpm; static struct mutex fan_mutex; @@ -8682,7 +8683,11 @@ static ssize_t fan_fan1_input_show(struct device *dev, if (res < 0) return res; - return sysfs_emit(buf, "%u\n", speed); + /* Check for fan speeds displayed in hexadecimal */ + if (!ecfw_with_fan_dec_rpm) + return sysfs_emit(buf, "%u\n", speed); + else + return sysfs_emit(buf, "%x\n", speed); } static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL); @@ -8699,7 +8704,11 @@ static ssize_t fan_fan2_input_show(struct device *dev, if (res < 0) return res; - return sysfs_emit(buf, "%u\n", speed); + /* Check for fan speeds displayed in hexadecimal */ + if (!ecfw_with_fan_dec_rpm) + return sysfs_emit(buf, "%u\n", speed); + else + return sysfs_emit(buf, "%x\n", speed); } static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL); @@ -8775,6 +8784,7 @@ static const struct attribute_group fan_driver_attr_group = { #define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */ #define TPACPI_FAN_NOFAN 0x0008 /* no fan available */ #define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */ +#define TPACPI_FAN_DECRPM 0x0020 /* For ECFW's with RPM in register as decimal */ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1), @@ -8803,6 +8813,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('R', '1', 'D', TPACPI_FAN_NS), /* 11e Gen5 GL-R */ TPACPI_Q_LNV3('R', '0', 'V', TPACPI_FAN_NS), /* 11e Gen5 KL-Y */ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ + TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */ }; static int __init fan_init(struct ibm_init_struct *iibm) @@ -8847,6 +8858,13 @@ static int __init fan_init(struct ibm_init_struct *iibm) tp_features.fan_ctrl_status_undef = 1; } + /* Check for the EC/BIOS with RPM reported in decimal*/ + if (quirks & TPACPI_FAN_DECRPM) { + pr_info("ECFW with fan RPM as decimal in EC register\n"); + ecfw_with_fan_dec_rpm = 1; + tp_features.fan_ctrl_status_undef = 1; + } + if (gfan_handle) { /* 570, 600e/x, 770e, 770x */ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; @@ -9067,7 +9085,11 @@ static int fan_read(struct seq_file *m) if (rc < 0) return rc; - seq_printf(m, "speed:\t\t%d\n", speed); + /* Check for fan speeds displayed in hexadecimal */ + if (!ecfw_with_fan_dec_rpm) + seq_printf(m, "speed:\t\t%d\n", speed); + else + seq_printf(m, "speed:\t\t%x\n", speed); if (fan_status_access_mode == TPACPI_FAN_RD_TPEC_NS) { /* diff --git a/drivers/pmdomain/qcom/cpr.c b/drivers/pmdomain/qcom/cpr.c index e1fca65b80be..26a60a101e42 100644 --- a/drivers/pmdomain/qcom/cpr.c +++ b/drivers/pmdomain/qcom/cpr.c @@ -1052,7 +1052,7 @@ static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref, of_parse_phandle(child_np, "required-opps", 0); if (child_req_np == ref_np) { - u64 rate; + u64 rate = 0; of_property_read_u64(child_np, "opp-hz", &rate); return (unsigned long) rate; diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c index f40bce8176df..d1dff6ccab12 100644 --- a/drivers/powercap/dtpm_devfreq.c +++ b/drivers/powercap/dtpm_devfreq.c @@ -178,7 +178,7 @@ static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent) ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req, DEV_PM_QOS_MAX_FREQUENCY, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); - if (ret) { + if (ret < 0) { pr_err("Failed to add QoS request: %d\n", ret); goto out_dtpm_unregister; } diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index 733a36f67fbc..cbe07450de93 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -147,6 +147,8 @@ static const struct x86_cpu_id pl4_support_ids[] = { X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL), X86_MATCH_VFM(INTEL_METEORLAKE, NULL), X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ARROWLAKE_U, NULL), + X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL), {} }; diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c index 947544e4d229..645fd1dc51a9 100644 --- a/drivers/powercap/intel_rapl_tpmi.c +++ b/drivers/powercap/intel_rapl_tpmi.c @@ -15,7 +15,8 @@ #include #include -#define TPMI_RAPL_VERSION 1 +#define TPMI_RAPL_MAJOR_VERSION 0 +#define TPMI_RAPL_MINOR_VERSION 1 /* 1 header + 10 registers + 5 reserved. 8 bytes for each. */ #define TPMI_RAPL_DOMAIN_SIZE 128 @@ -154,11 +155,21 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) tpmi_domain_size = tpmi_domain_header >> 16 & 0xff; tpmi_domain_flags = tpmi_domain_header >> 32 & 0xffff; - if (tpmi_domain_version != TPMI_RAPL_VERSION) { - pr_warn(FW_BUG "Unsupported version:%d\n", tpmi_domain_version); + if (tpmi_domain_version == TPMI_VERSION_INVALID) { + pr_warn(FW_BUG "Invalid version\n"); return -ENODEV; } + if (TPMI_MAJOR_VERSION(tpmi_domain_version) != TPMI_RAPL_MAJOR_VERSION) { + pr_warn(FW_BUG "Unsupported major version:%ld\n", + TPMI_MAJOR_VERSION(tpmi_domain_version)); + return -ENODEV; + } + + if (TPMI_MINOR_VERSION(tpmi_domain_version) > TPMI_RAPL_MINOR_VERSION) + pr_info("Ignore: Unsupported minor version:%ld\n", + TPMI_MINOR_VERSION(tpmi_domain_version)); + /* Domain size: in unit of 128 Bytes */ if (tpmi_domain_size != 1) { pr_warn(FW_BUG "Invalid Domain size %d\n", tpmi_domain_size); @@ -181,7 +192,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) pr_warn(FW_BUG "System domain must support Domain Info register\n"); return -ENODEV; } - tpmi_domain_info = readq(trp->base + offset + TPMI_RAPL_REG_DOMAIN_INFO); + tpmi_domain_info = readq(trp->base + offset + TPMI_RAPL_REG_DOMAIN_INFO * 8); if (!(tpmi_domain_info & TPMI_RAPL_DOMAIN_ROOT)) return 0; domain_type = RAPL_DOMAIN_PLATFORM; diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c index 96ea343856f0..7ee7b65b9b90 100644 --- a/drivers/pwm/pwm-imx-tpm.c +++ b/drivers/pwm/pwm-imx-tpm.c @@ -106,7 +106,9 @@ static int pwm_imx_tpm_round_state(struct pwm_chip *chip, p->prescale = prescale; period_count = (clock_unit + ((1 << prescale) >> 1)) >> prescale; - p->mod = period_count; + if (period_count == 0) + return -EINVAL; + p->mod = period_count - 1; /* calculate real period HW can support */ tmp = (u64)period_count << prescale; diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 14b60abd6afc..01a8d0487918 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1379,6 +1379,8 @@ static const struct regulator_desc rk809_reg[] = { .n_linear_ranges = ARRAY_SIZE(rk817_buck1_voltage_ranges), .vsel_reg = RK817_BUCK3_ON_VSEL_REG, .vsel_mask = RK817_BUCK_VSEL_MASK, + .apply_reg = RK817_POWER_CONFIG, + .apply_bit = RK817_BUCK3_FB_RES_INTER, .enable_reg = RK817_POWER_EN_REG(0), .enable_mask = ENABLE_MASK(RK817_ID_DCDC3), .enable_val = ENABLE_MASK(RK817_ID_DCDC3), diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c index a5c126afc648..5925fa7a9a06 100644 --- a/drivers/regulator/rtq2208-regulator.c +++ b/drivers/regulator/rtq2208-regulator.c @@ -568,7 +568,7 @@ static int rtq2208_probe(struct i2c_client *i2c) struct regmap *regmap; struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX]; struct regulator_dev *rdev; - struct regulator_config cfg; + struct regulator_config cfg = {}; struct rtq2208_rdev_map *rdev_map; int i, ret = 0, idx, n_regulator = 0; unsigned int regulator_idx_table[RTQ2208_LDO_MAX], diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c index 8935ef95a2d1..a200cc8c7955 100644 --- a/drivers/reset/reset-npcm.c +++ b/drivers/reset/reset-npcm.c @@ -405,8 +405,8 @@ static int npcm_rc_probe(struct platform_device *pdev) if (!of_property_read_u32(pdev->dev.of_node, "nuvoton,sw-reset-number", &rc->sw_reset_number)) { if (rc->sw_reset_number && rc->sw_reset_number < 5) { - rc->restart_nb.priority = 192, - rc->restart_nb.notifier_call = npcm_rc_restart, + rc->restart_nb.priority = 192; + rc->restart_nb.notifier_call = npcm_rc_restart; ret = register_restart_handler(&rc->restart_nb); if (ret) dev_warn(&pdev->dev, "failed to register restart handler\n"); diff --git a/drivers/reset/starfive/reset-starfive-jh71x0.c b/drivers/reset/starfive/reset-starfive-jh71x0.c index 55bbbd2de52c..29ce3486752f 100644 --- a/drivers/reset/starfive/reset-starfive-jh71x0.c +++ b/drivers/reset/starfive/reset-starfive-jh71x0.c @@ -94,6 +94,9 @@ static int jh71x0_reset_status(struct reset_controller_dev *rcdev, void __iomem *reg_status = data->status + offset * sizeof(u32); u32 value = readl(reg_status); + if (!data->asserted) + return !(value & mask); + return !((value ^ data->asserted[offset]) & mask); } diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 0b2f29006908..d3af1dfa3c7d 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1440,14 +1440,18 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, goto unlock; ret = wait_event_timeout(channel->intent_req_wq, - READ_ONCE(channel->intent_req_result) >= 0 && - READ_ONCE(channel->intent_received), + READ_ONCE(channel->intent_req_result) == 0 || + (READ_ONCE(channel->intent_req_result) > 0 && + READ_ONCE(channel->intent_received)) || + glink->abort_tx, 10 * HZ); if (!ret) { dev_err(glink->dev, "intent request timed out\n"); ret = -ETIMEDOUT; + } else if (glink->abort_tx) { + ret = -ECANCELED; } else { - ret = READ_ONCE(channel->intent_req_result) ? 0 : -ECANCELED; + ret = READ_ONCE(channel->intent_req_result) ? 0 : -EAGAIN; } unlock: diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f3621adbd5de..fbffd451031f 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -1195,7 +1195,8 @@ sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) } static struct notifier_block sclp_reboot_notifier = { - .notifier_call = sclp_reboot_event + .notifier_call = sclp_reboot_event, + .priority = INT_MIN, }; static ssize_t con_pages_show(struct device_driver *dev, char *buf) diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 218ae604f737..33b9c968dbcb 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -319,7 +319,7 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request, buffer = (void *) ((addr_t) sccb + sccb->header.length); if (convertlf) { - /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/ + /* Perform Linefeed conversion (0x0a -> 0x0d 0x0a)*/ for (from=0, to=0; (from < count) && (to < sclp_vt220_space_left(request)); from++) { @@ -328,8 +328,8 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request, /* Perform conversion */ if (c == 0x0a) { if (to + 1 < sclp_vt220_space_left(request)) { - ((unsigned char *) buffer)[to++] = c; ((unsigned char *) buffer)[to++] = 0x0d; + ((unsigned char *) buffer)[to++] = c; } else break; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 60cea6c24349..e14638936de6 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1864,13 +1864,12 @@ static inline void ap_scan_domains(struct ap_card *ac) } /* if no queue device exists, create a new one */ if (!aq) { - aq = ap_queue_create(qid, ac->ap_dev.device_type); + aq = ap_queue_create(qid, ac); if (!aq) { AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n", __func__, ac->id, dom); continue; } - aq->card = ac; aq->config = !decfg; aq->chkstop = chkstop; aq->se_bstate = hwinfo.bs; diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 0b275c719319..f4622ee4d894 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -272,7 +272,7 @@ int ap_test_config_usage_domain(unsigned int domain); int ap_test_config_ctrl_domain(unsigned int domain); void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); -struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); +struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac); void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_init_state(struct ap_queue *aq); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 8c878c5aa31f..9a0e6e4d8a5e 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -22,6 +22,11 @@ static void __ap_flush_queue(struct ap_queue *aq); * some AP queue helper functions */ +static inline bool ap_q_supported_in_se(struct ap_queue *aq) +{ + return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel; +} + static inline bool ap_q_supports_bind(struct ap_queue *aq) { return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel; @@ -1104,18 +1109,19 @@ static void ap_queue_device_release(struct device *dev) kfree(aq); } -struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) +struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac) { struct ap_queue *aq; aq = kzalloc(sizeof(*aq), GFP_KERNEL); if (!aq) return NULL; + aq->card = ac; aq->ap_dev.device.release = ap_queue_device_release; aq->ap_dev.device.type = &ap_queue_type; - aq->ap_dev.device_type = device_type; - // add optional SE secure binding attributes group - if (ap_sb_available() && is_prot_virt_guest()) + aq->ap_dev.device_type = ac->ap_dev.device_type; + /* in SE environment add bind/associate attributes group */ + if (ap_is_se_guest() && ap_q_supported_in_se(aq)) aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups; aq->qid = qid; spin_lock_init(&aq->lock); @@ -1196,10 +1202,16 @@ bool ap_queue_usable(struct ap_queue *aq) } /* SE guest's queues additionally need to be bound */ - if (ap_q_needs_bind(aq) && - !(aq->se_bstate == AP_BS_Q_USABLE || - aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY)) - rc = false; + if (ap_is_se_guest()) { + if (!ap_q_supported_in_se(aq)) { + rc = false; + goto unlock_and_out; + } + if (ap_q_needs_bind(aq) && + !(aq->se_bstate == AP_BS_Q_USABLE || + aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY)) + rc = false; + } unlock_and_out: spin_unlock_bh(&aq->lock); diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c index 98079b1ed6db..beeca8827c46 100644 --- a/drivers/s390/crypto/pkey_pckmo.c +++ b/drivers/s390/crypto/pkey_pckmo.c @@ -324,6 +324,7 @@ static int pckmo_key2protkey(const u8 *key, u32 keylen, memcpy(protkey, t->protkey, t->len); *protkeylen = t->len; *protkeytype = t->keytype; + rc = 0; break; } case TOKVER_CLEAR_KEY: { diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 0044717d4486..adec0df24bc4 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -830,7 +830,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&fnic->vlans_lock); INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); INIT_WORK(&fnic->event_work, fnic_handle_event); - INIT_WORK(&fnic->flush_work, fnic_flush_tx); skb_queue_head_init(&fnic->fip_frame_queue); INIT_LIST_HEAD(&fnic->evlist); INIT_LIST_HEAD(&fnic->vlans); @@ -948,6 +947,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&fnic->link_work, fnic_handle_link); INIT_WORK(&fnic->frame_work, fnic_handle_frame); + INIT_WORK(&fnic->flush_work, fnic_flush_tx); skb_queue_head_init(&fnic->frame_queue); skb_queue_head_init(&fnic->tx_queue); diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 14eeac08660f..81bb408ce56d 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -542,8 +542,8 @@ struct mpi3mr_hba_port { * @port_list: List of ports belonging to a SAS node * @num_phys: Number of phys associated with port * @marked_responding: used while refresing the sas ports - * @lowest_phy: lowest phy ID of current sas port - * @phy_mask: phy_mask of current sas port + * @lowest_phy: lowest phy ID of current sas port, valid for controller port + * @phy_mask: phy_mask of current sas port, valid for controller port * @hba_port: HBA port entry * @remote_identify: Attached device identification * @rphy: SAS transport layer rphy object diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index ccd23def2e0c..0ba9e6a6a13c 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -590,12 +590,13 @@ static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 link_rate) * @mrioc: Adapter instance reference * @mr_sas_port: Internal Port object * @mr_sas_phy: Internal Phy object + * @host_node: Flag to indicate this is a host_node * * Return: None. */ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_port *mr_sas_port, - struct mpi3mr_sas_phy *mr_sas_phy) + struct mpi3mr_sas_phy *mr_sas_phy, u8 host_node) { u64 sas_address = mr_sas_port->remote_identify.sas_address; @@ -605,9 +606,13 @@ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, list_del(&mr_sas_phy->port_siblings); mr_sas_port->num_phys--; - mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id); - if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id) - mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + + if (host_node) { + mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id); + + if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + } sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy); mr_sas_phy->phy_belongs_to_port = 0; } @@ -617,12 +622,13 @@ static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc, * @mrioc: Adapter instance reference * @mr_sas_port: Internal Port object * @mr_sas_phy: Internal Phy object + * @host_node: Flag to indicate this is a host_node * * Return: None. */ static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_port *mr_sas_port, - struct mpi3mr_sas_phy *mr_sas_phy) + struct mpi3mr_sas_phy *mr_sas_phy, u8 host_node) { u64 sas_address = mr_sas_port->remote_identify.sas_address; @@ -632,9 +638,12 @@ static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc, list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list); mr_sas_port->num_phys++; - mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id); - if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy) - mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + if (host_node) { + mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id); + + if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + } sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy); mr_sas_phy->phy_belongs_to_port = 1; } @@ -675,7 +684,7 @@ static void mpi3mr_add_phy_to_an_existing_port(struct mpi3mr_ioc *mrioc, if (srch_phy == mr_sas_phy) return; } - mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy); + mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy, mr_sas_node->host_node); return; } } @@ -736,7 +745,7 @@ static void mpi3mr_del_phy_from_an_existing_port(struct mpi3mr_ioc *mrioc, mpi3mr_delete_sas_port(mrioc, mr_sas_port); else mpi3mr_delete_sas_phy(mrioc, mr_sas_port, - mr_sas_phy); + mr_sas_phy, mr_sas_node->host_node); return; } } @@ -1028,7 +1037,7 @@ mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id) /** * mpi3mr_get_hba_port_by_id - find hba port by id * @mrioc: Adapter instance reference - * @port_id - Port ID to search + * @port_id: Port ID to search * * Return: mpi3mr_hba_port reference for the matched port */ @@ -1367,7 +1376,8 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node, mr_sas_port->remote_identify.sas_address, hba_port); - if (mr_sas_node->num_phys >= sizeof(mr_sas_port->phy_mask) * 8) + if (mr_sas_node->host_node && mr_sas_node->num_phys >= + sizeof(mr_sas_port->phy_mask) * 8) ioc_info(mrioc, "max port count %u could be too high\n", mr_sas_node->num_phys); @@ -1377,7 +1387,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, (mr_sas_node->phy[i].hba_port != hba_port)) continue; - if (i >= sizeof(mr_sas_port->phy_mask) * 8) { + if (mr_sas_node->host_node && (i >= sizeof(mr_sas_port->phy_mask) * 8)) { ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n", i, sizeof(mr_sas_port->phy_mask) * 8); goto out_fail; @@ -1385,7 +1395,8 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, list_add_tail(&mr_sas_node->phy[i].port_siblings, &mr_sas_port->phy_list); mr_sas_port->num_phys++; - mr_sas_port->phy_mask |= (1 << i); + if (mr_sas_node->host_node) + mr_sas_port->phy_mask |= (1 << i); } if (!mr_sas_port->num_phys) { @@ -1394,7 +1405,8 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, goto out_fail; } - mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; + if (mr_sas_node->host_node) + mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1; if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) { tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc, diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index de15fc0df104..b52513eeeafa 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3651,7 +3651,7 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, enum dma_data_direction dir; struct scsi_data_buffer *sdb = &scp->sdb; u8 *fsp; - int i; + int i, total = 0; /* * Even though reads are inherently atomic (in this driver), we expect @@ -3688,18 +3688,16 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, fsp + (block * sdebug_sector_size), sdebug_sector_size, sg_skip, do_write); sdeb_data_sector_unlock(sip, do_write); - if (ret != sdebug_sector_size) { - ret += (i * sdebug_sector_size); + total += ret; + if (ret != sdebug_sector_size) break; - } sg_skip += sdebug_sector_size; if (++block >= sdebug_store_sectors) block = 0; } - ret = num * sdebug_sector_size; sdeb_data_unlock(sip, atomic); - return ret; + return total; } /* Returns number of bytes copied or -1 if error. */ diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 62ea7e44460e..082f76e76721 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -1250,7 +1250,7 @@ static ssize_t fc_rport_set_marginal_state(struct device *dev, */ if (rport->port_state == FC_PORTSTATE_ONLINE) rport->port_state = port_state; - else + else if (port_state != rport->port_state) return -EINVAL; } else if (port_state == FC_PORTSTATE_ONLINE) { /* @@ -1260,7 +1260,7 @@ static ssize_t fc_rport_set_marginal_state(struct device *dev, */ if (rport->port_state == FC_PORTSTATE_MARGINAL) rport->port_state = port_state; - else + else if (port_state != rport->port_state) return -EINVAL; } else return -EINVAL; diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index ee2b74238758..6ab27f4f4878 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -188,8 +188,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); while (bufsize >= SECTOR_SIZE) { - buf = __vmalloc(bufsize, - GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY); + buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); if (buf) { *buflen = bufsize; return buf; diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index a44b60c9004a..dd1fef9226f2 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c @@ -831,7 +831,7 @@ wd33c93_intr(struct Scsi_Host *instance) /* construct an IDENTIFY message with correct disconnect bit */ hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun); - if (scsi_pointer->phase) + if (WD33C93_scsi_pointer(cmd)->phase) hostdata->outgoing_msg[0] |= 0x40; if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) { diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c index 3dffebb48b0d..19cc581b06d0 100644 --- a/drivers/soc/fsl/qe/qmc.c +++ b/drivers/soc/fsl/qe/qmc.c @@ -1761,10 +1761,9 @@ static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev) */ info = devm_qe_muram_alloc(qmc->dev, UCC_SLOW_PRAM_SIZE + 2 * 64, ALIGNMENT_OF_UCC_SLOW_PRAM); - if (IS_ERR_VALUE(info)) { - dev_err(qmc->dev, "cannot allocate MURAM for PRAM"); - return -ENOMEM; - } + if (info < 0) + return info; + if (!qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, qmc->qe_subblock, QE_CR_PROTOCOL_UNSPECIFIED, info)) { dev_err(qmc->dev, "QE_ASSIGN_PAGE_TO_DEVICE cmd failed"); @@ -2056,7 +2055,7 @@ static void qmc_remove(struct platform_device *pdev) qmc_exit_xcc(qmc); } -static const struct qmc_data qmc_data_cpm1 = { +static const struct qmc_data qmc_data_cpm1 __maybe_unused = { .version = QMC_CPM1, .tstate = 0x30000000, .rstate = 0x31000000, @@ -2066,7 +2065,7 @@ static const struct qmc_data qmc_data_cpm1 = { .rpack = 0x00000000, }; -static const struct qmc_data qmc_data_qe = { +static const struct qmc_data qmc_data_qe __maybe_unused = { .version = QMC_QE, .tstate = 0x30000000, .rstate = 0x30000000, diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 8fa4ffd3a9b5..28bcc65e91be 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -139,6 +139,7 @@ struct qcom_llcc_config { int size; bool need_llcc_cfg; bool no_edac; + bool irq_configured; }; struct qcom_sct_config { @@ -718,6 +719,7 @@ static const struct qcom_llcc_config x1e80100_cfg[] = { .need_llcc_cfg = true, .reg_offset = llcc_v2_1_reg_offset, .edac_reg_offset = &llcc_v2_1_edac_reg_offset, + .irq_configured = true, }, }; @@ -1345,6 +1347,7 @@ static int qcom_llcc_probe(struct platform_device *pdev) drv_data->cfg = llcc_cfg; drv_data->cfg_size = sz; drv_data->edac_reg_offset = cfg->edac_reg_offset; + drv_data->ecc_irq_configured = cfg->irq_configured; mutex_init(&drv_data->lock); platform_set_drvdata(pdev, drv_data); diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index 9606222993fd..baa4ac6704a9 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -4,6 +4,7 @@ * Copyright (c) 2022, Linaro Ltd */ #include +#include #include #include #include @@ -13,6 +14,8 @@ #include #include +#define PMIC_GLINK_SEND_TIMEOUT (5 * HZ) + enum { PMIC_GLINK_CLIENT_BATT = 0, PMIC_GLINK_CLIENT_ALTMODE, @@ -112,13 +115,29 @@ EXPORT_SYMBOL_GPL(pmic_glink_client_register); int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len) { struct pmic_glink *pg = client->pg; + bool timeout_reached = false; + unsigned long start; int ret; mutex_lock(&pg->state_lock); - if (!pg->ept) + if (!pg->ept) { ret = -ECONNRESET; - else - ret = rpmsg_send(pg->ept, data, len); + } else { + start = jiffies; + for (;;) { + ret = rpmsg_send(pg->ept, data, len); + if (ret != -EAGAIN) + break; + + if (timeout_reached) { + ret = -ETIMEDOUT; + break; + } + + usleep_range(1000, 5000); + timeout_reached = time_after(jiffies, start + PMIC_GLINK_SEND_TIMEOUT); + } + } mutex_unlock(&pg->state_lock); return ret; diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 64fc4f41da77..ecfd3da9d5e8 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -786,10 +786,16 @@ static int qcom_socinfo_probe(struct platform_device *pdev) qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", SOCINFO_MAJOR(le32_to_cpu(info->ver)), SOCINFO_MINOR(le32_to_cpu(info->ver))); - if (offsetof(struct socinfo, serial_num) <= item_size) + if (!qs->attr.soc_id || !qs->attr.revision) + return -ENOMEM; + + if (offsetof(struct socinfo, serial_num) <= item_size) { qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", le32_to_cpu(info->serial_num)); + if (!qs->attr.serial_number) + return -ENOMEM; + } qs->soc_dev = soc_device_register(&qs->attr); if (IS_ERR(qs->soc_dev)) diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c index fff312c6968d..4f3dd70d6a1a 100644 --- a/drivers/soundwire/intel_ace2x.c +++ b/drivers/soundwire/intel_ace2x.c @@ -376,11 +376,12 @@ static int intel_hw_params(struct snd_pcm_substream *substream, static int intel_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { + struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); struct sdw_intel *sdw = cdns_to_intel(cdns); struct sdw_cdns_dai_runtime *dai_runtime; + struct snd_pcm_hw_params *hw_params; int ch, dir; - int ret = 0; dai_runtime = cdns->dai_runtime_array[dai->id]; if (!dai_runtime) { @@ -389,12 +390,8 @@ static int intel_prepare(struct snd_pcm_substream *substream, return -EIO; } + hw_params = &rtd->dpcm[substream->stream].hw_params; if (dai_runtime->suspended) { - struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); - struct snd_pcm_hw_params *hw_params; - - hw_params = &rtd->dpcm[substream->stream].hw_params; - dai_runtime->suspended = false; /* @@ -415,15 +412,11 @@ static int intel_prepare(struct snd_pcm_substream *substream, /* the SHIM will be configured in the callback functions */ sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi); - - /* Inform DSP about PDI stream number */ - ret = intel_params_stream(sdw, substream, dai, - hw_params, - sdw->instance, - dai_runtime->pdi->intel_alh_id); } - return ret; + /* Inform DSP about PDI stream number */ + return intel_params_stream(sdw, substream, dai, hw_params, sdw->instance, + dai_runtime->pdi->intel_alh_id); } static int diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 191de1917f83..3fa990fb59c7 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1003,6 +1003,7 @@ static int dspi_setup(struct spi_device *spi) u32 cs_sck_delay = 0, sck_cs_delay = 0; struct fsl_dspi_platform_data *pdata; unsigned char pasc = 0, asc = 0; + struct gpio_desc *gpio_cs; struct chip_data *chip; unsigned long clkrate; bool cs = true; @@ -1077,7 +1078,10 @@ static int dspi_setup(struct spi_device *spi) chip->ctar_val |= SPI_CTAR_LSBFE; } - gpiod_direction_output(spi_get_csgpiod(spi, 0), false); + gpio_cs = spi_get_csgpiod(spi, 0); + if (gpio_cs) + gpiod_direction_output(gpio_cs, false); + dspi_deassert_cs(spi, &cs); spi_set_ctldata(spi, chip); diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index f6e40f90418f..768d7482102a 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -1116,6 +1116,11 @@ static int spi_geni_probe(struct platform_device *pdev) init_completion(&mas->tx_reset_done); init_completion(&mas->rx_reset_done); spin_lock_init(&mas->lock); + + ret = geni_icc_get(&mas->se, NULL); + if (ret) + return ret; + pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, 250); ret = devm_pm_runtime_enable(dev); @@ -1125,9 +1130,6 @@ static int spi_geni_probe(struct platform_device *pdev) if (device_property_read_bool(&pdev->dev, "spi-slave")) spi->target = true; - ret = geni_icc_get(&mas->se, NULL); - if (ret) - return ret; /* Set the bus quota to a reasonable value for register access */ mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ); mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW; diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c index ddd98ddb7913..c5677fd94e5e 100644 --- a/drivers/spi/spi-mtk-snfi.c +++ b/drivers/spi/spi-mtk-snfi.c @@ -1187,7 +1187,7 @@ static int mtk_snand_write_page_cache(struct mtk_snand *snf, /** * mtk_snand_is_page_ops() - check if the op is a controller supported page op. - * @op spi-mem op to check + * @op: spi-mem op to check * * Check whether op can be executed with read_from_cache or program_load * mode in the controller. diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 4c4ff074e3f6..fc72a89fb3a7 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -2044,6 +2044,7 @@ static const struct stm32_spi_cfg stm32mp25_spi_cfg = { .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX, .has_fifo = true, .prevent_dma_burst = true, + .has_device_mode = true, }; static const struct of_device_id stm32_spi_of_match[] = { diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c index 6c390c4eb26d..492612e8f8ba 100644 --- a/drivers/staging/iio/frequency/ad9832.c +++ b/drivers/staging/iio/frequency/ad9832.c @@ -129,12 +129,15 @@ static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout) static int ad9832_write_frequency(struct ad9832_state *st, unsigned int addr, unsigned long fout) { + unsigned long clk_freq; unsigned long regval; - if (fout > (clk_get_rate(st->mclk) / 2)) + clk_freq = clk_get_rate(st->mclk); + + if (!clk_freq || fout > (clk_freq / 2)) return -EINVAL; - regval = ad9832_calc_freqreg(clk_get_rate(st->mclk), fout); + regval = ad9832_calc_freqreg(clk_freq, fout); st->freq_data[0] = cpu_to_be16((AD9832_CMD_FRE8BITSW << CMD_SHIFT) | (addr << ADD_SHIFT) | diff --git a/drivers/staging/media/av7110/av7110.h b/drivers/staging/media/av7110/av7110.h index ec461fd187af..b584754f4be0 100644 --- a/drivers/staging/media/av7110/av7110.h +++ b/drivers/staging/media/av7110/av7110.h @@ -88,6 +88,8 @@ struct infrared { u32 ir_config; }; +#define MAX_CI_SLOTS 2 + /* place to store all the necessary device information */ struct av7110 { /* devices */ @@ -163,7 +165,7 @@ struct av7110 { /* CA */ - struct ca_slot_info ci_slot[2]; + struct ca_slot_info ci_slot[MAX_CI_SLOTS]; enum av7110_video_mode vidmode; struct dmxdev dmxdev; diff --git a/drivers/staging/media/av7110/av7110_ca.c b/drivers/staging/media/av7110/av7110_ca.c index 6ce212c64e5d..fce4023c9dea 100644 --- a/drivers/staging/media/av7110/av7110_ca.c +++ b/drivers/staging/media/av7110/av7110_ca.c @@ -26,23 +26,28 @@ void CI_handle(struct av7110 *av7110, u8 *data, u16 len) { + unsigned slot_num; + dprintk(8, "av7110:%p\n", av7110); if (len < 3) return; switch (data[0]) { case CI_MSG_CI_INFO: - if (data[2] != 1 && data[2] != 2) + if (data[2] != 1 && data[2] != MAX_CI_SLOTS) break; + + slot_num = array_index_nospec(data[2] - 1, MAX_CI_SLOTS); + switch (data[1]) { case 0: - av7110->ci_slot[data[2] - 1].flags = 0; + av7110->ci_slot[slot_num].flags = 0; break; case 1: - av7110->ci_slot[data[2] - 1].flags |= CA_CI_MODULE_PRESENT; + av7110->ci_slot[slot_num].flags |= CA_CI_MODULE_PRESENT; break; case 2: - av7110->ci_slot[data[2] - 1].flags |= CA_CI_MODULE_READY; + av7110->ci_slot[slot_num].flags |= CA_CI_MODULE_READY; break; } break; @@ -262,15 +267,19 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) case CA_GET_SLOT_INFO: { struct ca_slot_info *info = (struct ca_slot_info *)parg; + unsigned int slot_num; if (info->num < 0 || info->num > 1) { mutex_unlock(&av7110->ioctl_mutex); return -EINVAL; } - av7110->ci_slot[info->num].num = info->num; - av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ? - CA_CI_LINK : CA_CI; - memcpy(info, &av7110->ci_slot[info->num], sizeof(struct ca_slot_info)); + slot_num = array_index_nospec(info->num, MAX_CI_SLOTS); + + av7110->ci_slot[slot_num].num = info->num; + av7110->ci_slot[slot_num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ? + CA_CI_LINK : CA_CI; + memcpy(info, &av7110->ci_slot[slot_num], + sizeof(struct ca_slot_info)); break; } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 3dbeffc650d3..6c488b1e2624 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -593,7 +593,7 @@ vchiq_platform_init_state(struct vchiq_state *state) { struct vchiq_arm_state *platform_state; - platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL); + platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); if (!platform_state) return -ENOMEM; @@ -1731,7 +1731,7 @@ static int vchiq_probe(struct platform_device *pdev) return -ENOENT; } - mgmt = kzalloc(sizeof(*mgmt), GFP_KERNEL); + mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); if (!mgmt) return -ENOMEM; @@ -1789,8 +1789,6 @@ static void vchiq_remove(struct platform_device *pdev) arm_state = vchiq_platform_get_arm_state(&mgmt->state); kthread_stop(arm_state->ka_thread); - - kfree(mgmt); } static struct platform_driver vchiq_driver = { diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7d43d92c44d4..d1ae3df069a4 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -691,7 +691,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); if (!dev->queues) { - dev->transport->free_device(dev); + hba->backend->ops->free_device(dev); return NULL; } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 7eb94894bd68..717931267bda 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -2130,7 +2130,7 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev, } ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, - TCMU_MCGRP_CONFIG, GFP_KERNEL); + TCMU_MCGRP_CONFIG); /* Wait during an add as the listener may not be up yet */ if (ret == 0 || diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c index 006614921870..ba5d36d36fc4 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c @@ -416,7 +416,6 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_ if (!pci_info->no_legacy) proc_thermal_remove(proc_priv); proc_thermal_mmio_remove(pdev, proc_priv); - pci_disable_device(pdev); return ret; } @@ -438,7 +437,6 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev) proc_thermal_mmio_remove(pdev, pci_info->proc_priv); if (!pci_info->no_legacy) proc_thermal_remove(proc_priv); - pci_disable_device(pdev); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c index e9aa9e23aab9..bde2cc386afd 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c @@ -13,48 +13,12 @@ static struct rapl_if_priv rapl_mmio_priv; static const struct rapl_mmio_regs rapl_mmio_default = { .reg_unit = 0x5938, - .regs[RAPL_DOMAIN_PACKAGE] = { 0x59a0, 0x593c, 0x58f0, 0, 0x5930}, + .regs[RAPL_DOMAIN_PACKAGE] = { 0x59a0, 0x593c, 0x58f0, 0, 0x5930, 0x59b0}, .regs[RAPL_DOMAIN_DRAM] = { 0x58e0, 0x58e8, 0x58ec, 0, 0}, - .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2), + .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2) | BIT(POWER_LIMIT4), .limits[RAPL_DOMAIN_DRAM] = BIT(POWER_LIMIT2), }; -static int rapl_mmio_cpu_online(unsigned int cpu) -{ - struct rapl_package *rp; - - /* mmio rapl supports package 0 only for now */ - if (topology_physical_package_id(cpu)) - return 0; - - rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true); - if (!rp) { - rp = rapl_add_package_cpuslocked(cpu, &rapl_mmio_priv, true); - if (IS_ERR(rp)) - return PTR_ERR(rp); - } - cpumask_set_cpu(cpu, &rp->cpumask); - return 0; -} - -static int rapl_mmio_cpu_down_prep(unsigned int cpu) -{ - struct rapl_package *rp; - int lead_cpu; - - rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true); - if (!rp) - return 0; - - cpumask_clear_cpu(cpu, &rp->cpumask); - lead_cpu = cpumask_first(&rp->cpumask); - if (lead_cpu >= nr_cpu_ids) - rapl_remove_package_cpuslocked(rp); - else if (rp->lead_cpu == cpu) - rp->lead_cpu = lead_cpu; - return 0; -} - static int rapl_mmio_read_raw(int cpu, struct reg_action *ra) { if (!ra->reg.mmio) @@ -82,6 +46,7 @@ static int rapl_mmio_write_raw(int cpu, struct reg_action *ra) int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv) { const struct rapl_mmio_regs *rapl_regs = &rapl_mmio_default; + struct rapl_package *rp; enum rapl_domain_reg_id reg; enum rapl_domain_type domain; int ret; @@ -109,25 +74,38 @@ int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc return PTR_ERR(rapl_mmio_priv.control_type); } - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online", - rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep); - if (ret < 0) { - powercap_unregister_control_type(rapl_mmio_priv.control_type); - rapl_mmio_priv.control_type = NULL; - return ret; + /* Register a RAPL package device for package 0 which is always online */ + rp = rapl_find_package_domain(0, &rapl_mmio_priv, false); + if (rp) { + ret = -EEXIST; + goto err; + } + + rp = rapl_add_package(0, &rapl_mmio_priv, false); + if (IS_ERR(rp)) { + ret = PTR_ERR(rp); + goto err; } - rapl_mmio_priv.pcap_rapl_online = ret; return 0; + +err: + powercap_unregister_control_type(rapl_mmio_priv.control_type); + rapl_mmio_priv.control_type = NULL; + return ret; } EXPORT_SYMBOL_GPL(proc_thermal_rapl_add); void proc_thermal_rapl_remove(void) { + struct rapl_package *rp; + if (IS_ERR_OR_NULL(rapl_mmio_priv.control_type)) return; - cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online); + rp = rapl_find_package_domain(0, &rapl_mmio_priv, false); + if (rp) + rapl_remove_package(rp); powercap_unregister_control_type(rapl_mmio_priv.control_type); } EXPORT_SYMBOL_GPL(proc_thermal_rapl_remove); diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index 5225b3621a56..d2d49264cf83 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -73,7 +73,14 @@ static struct irq_chip lmh_irq_chip = { static int lmh_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct lmh_hw_data *lmh_data = d->host_data; + static struct lock_class_key lmh_lock_key; + static struct lock_class_key lmh_request_key; + /* + * This lock class tells lockdep that GPIO irqs are in a different + * category than their parents, so it won't report false recursion. + */ + irq_set_lockdep_class(irq, &lmh_lock_key, &lmh_request_key); irq_set_chip_and_handler(irq, &lmh_irq_chip, handle_simple_irq); irq_set_chip_data(irq, lmh_data); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 073d02e21352..8f03985f971c 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -728,6 +728,7 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id) mutex_lock(&thermal_list_lock); list_for_each_entry(tz, &thermal_tz_list, node) { if (tz->id == id) { + get_device(&tz->device); match = tz; break; } @@ -1605,14 +1606,12 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) ida_destroy(&tz->ida); device_del(&tz->device); - - kfree(tz->tzp); - put_device(&tz->device); thermal_notify_tz_delete(tz); wait_for_completion(&tz->removal); + kfree(tz->tzp); kfree(tz); } EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 50b858aa173a..a64d39b1c86b 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -194,6 +194,9 @@ int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), struct thermal_zone_device *thermal_zone_get_by_id(int id); +DEFINE_CLASS(thermal_zone_get_by_id, struct thermal_zone_device *, + if (_T) put_device(&_T->device), thermal_zone_get_by_id(id), int id) + static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) { return cdev->ops->get_requested_power && cdev->ops->state2power && diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index 97157c453630..f3c58c708969 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -443,7 +443,6 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) { struct sk_buff *msg = p->msg; const struct thermal_trip_desc *td; - struct thermal_zone_device *tz; struct nlattr *start_trip; int id; @@ -452,7 +451,7 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); - tz = thermal_zone_get_by_id(id); + CLASS(thermal_zone_get_by_id, tz)(id); if (!tz) return -EINVAL; @@ -488,7 +487,6 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) static int thermal_genl_cmd_tz_get_temp(struct param *p) { struct sk_buff *msg = p->msg; - struct thermal_zone_device *tz; int temp, ret, id; if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) @@ -496,7 +494,7 @@ static int thermal_genl_cmd_tz_get_temp(struct param *p) id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); - tz = thermal_zone_get_by_id(id); + CLASS(thermal_zone_get_by_id, tz)(id); if (!tz) return -EINVAL; @@ -514,7 +512,6 @@ static int thermal_genl_cmd_tz_get_temp(struct param *p) static int thermal_genl_cmd_tz_get_gov(struct param *p) { struct sk_buff *msg = p->msg; - struct thermal_zone_device *tz; int id, ret = 0; if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) @@ -522,7 +519,7 @@ static int thermal_genl_cmd_tz_get_gov(struct param *p) id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); - tz = thermal_zone_get_by_id(id); + CLASS(thermal_zone_get_by_id, tz)(id); if (!tz) return -EINVAL; diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index a4caf7899f8e..07e09897165f 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -99,18 +99,15 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n struct device_node *trips; int ret, count; + *ntrips = 0; + trips = of_get_child_by_name(np, "trips"); - if (!trips) { - pr_err("Failed to find 'trips' node\n"); - return ERR_PTR(-EINVAL); - } + if (!trips) + return NULL; count = of_get_child_count(trips); - if (!count) { - pr_err("No trip point defined\n"); - ret = -EINVAL; - goto out_of_node_put; - } + if (!count) + return NULL; tt = kzalloc(sizeof(*tt) * count, GFP_KERNEL); if (!tt) { @@ -133,7 +130,6 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n out_kfree: kfree(tt); - *ntrips = 0; out_of_node_put: of_node_put(trips); @@ -401,11 +397,14 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node * trips = thermal_of_trips_init(np, &ntrips); if (IS_ERR(trips)) { - pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id); + pr_err("Failed to parse trip points for %pOFn id=%d\n", sensor, id); ret = PTR_ERR(trips); goto out_of_node_put; } + if (!trips) + pr_info("No trip points found for %pOFn id=%d\n", sensor, id); + ret = thermal_of_monitor_init(np, &delay, &pdelay); if (ret) { pr_err("Failed to initialize monitoring delays from %pOFn\n", np); diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 721319329afa..89d2919d0193 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -516,7 +516,7 @@ int tb_retimer_scan(struct tb_port *port, bool add) */ tb_retimer_set_inbound_sbtx(port); - for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { + for (max = 1, i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { /* * Last retimer is true only for the last on-board * retimer (the one connected directly to the Type-C @@ -527,10 +527,13 @@ int tb_retimer_scan(struct tb_port *port, bool add) last_idx = i; else if (ret < 0) break; + + max = i; } - max = i; ret = 0; + if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)) + max = min(last_idx, max); /* Add retimers if they do not exist already */ for (i = 1; i <= max; i++) { diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 10e719dd837c..4f777788e917 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -288,6 +288,24 @@ static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel) device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); } +static int tb_switch_tmu_hifi_uni_required(struct device *dev, void *not_used) +{ + struct tb_switch *sw = tb_to_switch(dev); + + if (sw && tb_switch_tmu_is_enabled(sw) && + tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI)) + return 1; + + return device_for_each_child(dev, NULL, + tb_switch_tmu_hifi_uni_required); +} + +static bool tb_tmu_hifi_uni_required(struct tb *tb) +{ + return device_for_each_child(&tb->dev, NULL, + tb_switch_tmu_hifi_uni_required) == 1; +} + static int tb_enable_tmu(struct tb_switch *sw) { int ret; @@ -302,12 +320,30 @@ static int tb_enable_tmu(struct tb_switch *sw) ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI); if (ret == -EOPNOTSUPP) { - if (tb_switch_clx_is_enabled(sw, TB_CL1)) - ret = tb_switch_tmu_configure(sw, - TB_SWITCH_TMU_MODE_LOWRES); - else - ret = tb_switch_tmu_configure(sw, - TB_SWITCH_TMU_MODE_HIFI_BI); + if (tb_switch_clx_is_enabled(sw, TB_CL1)) { + /* + * Figure out uni-directional HiFi TMU requirements + * currently in the domain. If there are no + * uni-directional HiFi requirements we can put the TMU + * into LowRes mode. + * + * Deliberately skip bi-directional HiFi links + * as these work independently of other links + * (and they do not allow any CL states anyway). + */ + if (tb_tmu_hifi_uni_required(sw->tb)) + ret = tb_switch_tmu_configure(sw, + TB_SWITCH_TMU_MODE_HIFI_UNI); + else + ret = tb_switch_tmu_configure(sw, + TB_SWITCH_TMU_MODE_LOWRES); + } else { + ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); + } + + /* If not supported, fallback to bi-directional HiFi */ + if (ret == -EOPNOTSUPP) + ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); } if (ret) return ret; diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 0a9b4aeb3fa1..402fdf8b1cde 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -48,7 +48,7 @@ enum usb4_ba_index { /* Delays in us used with usb4_port_wait_for_bit() */ #define USB4_PORT_DELAY 50 -#define USB4_PORT_SB_DELAY 5000 +#define USB4_PORT_SB_DELAY 1000 static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, u8 *status, diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 5d37a0984916..252849910588 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -3157,6 +3157,8 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) mutex_unlock(&gsm->mutex); /* Now wipe the queues */ tty_ldisc_flush(gsm->tty); + + guard(spinlock_irqsave)(&gsm->tx_lock); list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list) kfree(txq); INIT_LIST_HEAD(&gsm->tx_ctrl_list); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 67d4a72eda77..90974d338f3c 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -762,6 +762,21 @@ static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id) imx_uart_writel(sport, USR1_RTSD, USR1); usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS; + /* + * Update sport->old_status here, so any follow-up calls to + * imx_uart_mctrl_check() will be able to recognize that RTS + * state changed since last imx_uart_mctrl_check() call. + * + * In case RTS has been detected as asserted here and later on + * deasserted by the time imx_uart_mctrl_check() was called, + * imx_uart_mctrl_check() can detect the RTS state change and + * trigger uart_handle_cts_change() to unblock the port for + * further TX transfers. + */ + if (usr1 & USR1_RTSS) + sport->old_status |= TIOCM_CTS; + else + sport->old_status &= ~TIOCM_CTS; uart_handle_cts_change(&sport->port, usr1); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 6f0db310cf69..5dfe4e599ad6 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -147,6 +147,7 @@ static struct uart_driver qcom_geni_uart_driver; static void __qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport); static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport); +static int qcom_geni_serial_port_setup(struct uart_port *uport); static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port *uport) { @@ -395,6 +396,23 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport, writel(c, uport->membase + SE_GENI_TX_FIFOn); qcom_geni_serial_poll_tx_done(uport); } + +static int qcom_geni_serial_poll_init(struct uart_port *uport) +{ + struct qcom_geni_serial_port *port = to_dev_port(uport); + int ret; + + if (!port->setup) { + ret = qcom_geni_serial_port_setup(uport); + if (ret) + return ret; + } + + if (!qcom_geni_serial_secondary_active(uport)) + geni_se_setup_s_cmd(&port->se, UART_START_READ, 0); + + return 0; +} #endif #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE @@ -562,7 +580,7 @@ static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop) } #endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */ -static void handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop) +static void handle_rx_uart(struct uart_port *uport, u32 bytes) { struct qcom_geni_serial_port *port = to_dev_port(uport); struct tty_port *tport = &uport->state->port; @@ -570,9 +588,8 @@ static void handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop) ret = tty_insert_flip_string(tport, port->rx_buf, bytes); if (ret != bytes) { - dev_err(uport->dev, "%s:Unable to push data ret %d_bytes %d\n", - __func__, ret, bytes); - WARN_ON_ONCE(1); + dev_err_ratelimited(uport->dev, "failed to push data (%d < %u)\n", + ret, bytes); } uport->icount.rx += ret; tty_flip_buffer_push(tport); @@ -787,17 +804,27 @@ static void qcom_geni_serial_start_rx_fifo(struct uart_port *uport) static void qcom_geni_serial_stop_rx_dma(struct uart_port *uport) { struct qcom_geni_serial_port *port = to_dev_port(uport); + bool done; if (!qcom_geni_serial_secondary_active(uport)) return; geni_se_cancel_s_cmd(&port->se); - qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS, - S_CMD_CANCEL_EN, true); - - if (qcom_geni_serial_secondary_active(uport)) + done = qcom_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT, + RX_EOT, true); + if (done) { + writel(RX_EOT | RX_DMA_DONE, + uport->membase + SE_DMA_RX_IRQ_CLR); + } else { qcom_geni_serial_abort_rx(uport); + writel(1, uport->membase + SE_DMA_RX_FSM_RST); + qcom_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT, + RX_RESET_DONE, true); + writel(RX_RESET_DONE | RX_DMA_DONE, + uport->membase + SE_DMA_RX_IRQ_CLR); + } + if (port->rx_dma_addr) { geni_se_rx_dma_unprep(&port->se, port->rx_dma_addr, DMA_RX_BUF_SIZE); @@ -846,7 +873,7 @@ static void qcom_geni_serial_handle_rx_dma(struct uart_port *uport, bool drop) } if (!drop) - handle_rx_uart(uport, rx_in, drop); + handle_rx_uart(uport, rx_in); ret = geni_se_rx_dma_prep(&port->se, port->rx_buf, DMA_RX_BUF_SIZE, @@ -1096,10 +1123,12 @@ static void qcom_geni_serial_shutdown(struct uart_port *uport) { disable_irq(uport->irq); + uart_port_lock_irq(uport); qcom_geni_serial_stop_tx(uport); qcom_geni_serial_stop_rx(uport); qcom_geni_serial_cancel_tx_cmd(uport); + uart_port_unlock_irq(uport); } static void qcom_geni_serial_flush_buffer(struct uart_port *uport) @@ -1152,7 +1181,6 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport) false, true, true); geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2); geni_se_select_mode(&port->se, port->dev_data->mode); - qcom_geni_serial_start_rx(uport); port->setup = true; return 0; @@ -1168,6 +1196,11 @@ static int qcom_geni_serial_startup(struct uart_port *uport) if (ret) return ret; } + + uart_port_lock_irq(uport); + qcom_geni_serial_start_rx(uport); + uart_port_unlock_irq(uport); + enable_irq(uport->irq); return 0; @@ -1253,7 +1286,6 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, unsigned int avg_bw_core; unsigned long timeout; - qcom_geni_serial_stop_rx(uport); /* baud rate */ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000); @@ -1269,7 +1301,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, dev_err(port->se.dev, "Couldn't find suitable clock rate for %u\n", baud * sampling_rate); - goto out_restart_rx; + return; } dev_dbg(port->se.dev, "desired_rate = %u, clk_rate = %lu, clk_div = %u\n", @@ -1360,8 +1392,6 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN); writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG); writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG); -out_restart_rx: - qcom_geni_serial_start_rx(uport); } #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE @@ -1582,7 +1612,7 @@ static const struct uart_ops qcom_geni_console_pops = { #ifdef CONFIG_CONSOLE_POLL .poll_get_char = qcom_geni_serial_get_char, .poll_put_char = qcom_geni_serial_poll_put_char, - .poll_init = qcom_geni_serial_port_setup, + .poll_init = qcom_geni_serial_poll_init, #endif .pm = qcom_geni_serial_pm, }; @@ -1749,7 +1779,7 @@ static void qcom_geni_serial_remove(struct platform_device *pdev) uart_remove_one_port(drv, &port->uport); } -static int qcom_geni_serial_sys_suspend(struct device *dev) +static int qcom_geni_serial_suspend(struct device *dev) { struct qcom_geni_serial_port *port = dev_get_drvdata(dev); struct uart_port *uport = &port->uport; @@ -1766,7 +1796,7 @@ static int qcom_geni_serial_sys_suspend(struct device *dev) return uart_suspend_port(private_data->drv, uport); } -static int qcom_geni_serial_sys_resume(struct device *dev) +static int qcom_geni_serial_resume(struct device *dev) { int ret; struct qcom_geni_serial_port *port = dev_get_drvdata(dev); @@ -1781,38 +1811,6 @@ static int qcom_geni_serial_sys_resume(struct device *dev) return ret; } -static int qcom_geni_serial_sys_hib_resume(struct device *dev) -{ - int ret = 0; - struct uart_port *uport; - struct qcom_geni_private_data *private_data; - struct qcom_geni_serial_port *port = dev_get_drvdata(dev); - - uport = &port->uport; - private_data = uport->private_data; - - if (uart_console(uport)) { - geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS); - geni_icc_set_bw(&port->se); - ret = uart_resume_port(private_data->drv, uport); - /* - * For hibernation usecase clients for - * console UART won't call port setup during restore, - * hence call port setup for console uart. - */ - qcom_geni_serial_port_setup(uport); - } else { - /* - * Peripheral register settings are lost during hibernation. - * Update setup flag such that port setup happens again - * during next session. Clients of HS-UART will close and - * open the port during hibernation. - */ - port->setup = false; - } - return ret; -} - static const struct qcom_geni_device_data qcom_geni_console_data = { .console = true, .mode = GENI_SE_FIFO, @@ -1824,12 +1822,7 @@ static const struct qcom_geni_device_data qcom_geni_uart_data = { }; static const struct dev_pm_ops qcom_geni_serial_pm_ops = { - .suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend), - .resume = pm_sleep_ptr(qcom_geni_serial_sys_resume), - .freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend), - .poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend), - .restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume), - .thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume), + SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_suspend, qcom_geni_serial_resume) }; static const struct of_device_id qcom_geni_serial_match_table[] = { diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index cd87e3d1291e..96842ce817af 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -4726,7 +4726,7 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op) return -EINVAL; if (op->data) { - font.data = kvmalloc(max_font_size, GFP_KERNEL); + font.data = kvzalloc(max_font_size, GFP_KERNEL); if (!font.data) return -ENOMEM; } else diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 8402151330fe..dba935c712d6 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -539,7 +539,7 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) struct scsi_cmnd *cmd = lrbp->cmd; struct ufs_hw_queue *hwq; void __iomem *reg, *opr_sqd_base; - u32 nexus, id, val; + u32 nexus, id, val, rtc; int err; if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) @@ -569,17 +569,18 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id); writel(nexus, opr_sqd_base + REG_SQCTI); - /* SQRTCy.ICU = 1 */ - writel(SQ_ICU, opr_sqd_base + REG_SQRTC); + /* Initiate Cleanup */ + writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU, + opr_sqd_base + REG_SQRTC); /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */ reg = opr_sqd_base + REG_SQRTS; err = read_poll_timeout(readl, val, val & SQ_CUS, 20, MCQ_POLL_US, false, reg); - if (err) - dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n", - __func__, id, task_tag, - FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); + rtc = FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)); + if (err || rtc) + dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d RTC=%d\n", + __func__, id, task_tag, err, rtc); if (ufshcd_mcq_sq_start(hba, hwq)) err = -ETIMEDOUT; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index fc55fdab526b..abbe7135a977 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -2933,9 +2933,8 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + i * ufshcd_get_ucd_size(hba); - u16 response_offset = offsetof(struct utp_transfer_cmd_desc, - response_upiu); - u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); + u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset); + u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset); lrb->utr_descriptor_ptr = utrdlp + i; lrb->utrd_dma_addr = hba->utrdl_dma_addr + @@ -5417,10 +5416,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, } break; case OCS_ABORTED: - result |= DID_ABORT << 16; - break; case OCS_INVALID_COMMAND_STATUS: result |= DID_REQUEUE << 16; + dev_warn(hba->dev, + "OCS %s from controller for tag %d\n", + (ocs == OCS_ABORTED ? "aborted" : "invalid"), + lrbp->task_tag); break; case OCS_INVALID_CMD_TABLE_ATTR: case OCS_INVALID_PRDT_ATTR: @@ -6466,26 +6467,12 @@ static bool ufshcd_abort_one(struct request *rq, void *priv) struct scsi_device *sdev = cmd->device; struct Scsi_Host *shost = sdev->host; struct ufs_hba *hba = shost_priv(shost); - struct ufshcd_lrb *lrbp = &hba->lrb[tag]; - struct ufs_hw_queue *hwq; - unsigned long flags; *ret = ufshcd_try_to_abort_task(hba, tag); dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, *ret ? "failed" : "succeeded"); - /* Release cmd in MCQ mode if abort succeeds */ - if (hba->mcq_enabled && (*ret == 0)) { - hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); - if (!hwq) - return 0; - spin_lock_irqsave(&hwq->cq_lock, flags); - if (ufshcd_cmd_inflight(lrbp->cmd)) - ufshcd_release_scsi_cmd(hba, lrbp); - spin_unlock_irqrestore(&hwq->cq_lock, flags); - } - return *ret == 0; } @@ -8232,7 +8219,7 @@ static void ufshcd_update_rtc(struct ufs_hba *hba) err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED, 0, 0, &val); - ufshcd_rpm_put_sync(hba); + ufshcd_rpm_put(hba); if (err) dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err); @@ -8649,6 +8636,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba) ufshcd_init_clk_scaling_sysfs(hba); } + /* + * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev + * pointer and hence must only be started after the WLUN pointer has + * been initialized by ufshcd_scsi_add_wlus(). + */ + schedule_delayed_work(&hba->ufs_rtc_update_work, + msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); + ufs_bsg_probe(hba); scsi_scan_host(hba->host); @@ -8808,8 +8803,6 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ufshcd_force_reset_auto_bkops(hba); ufshcd_set_timestamp_attr(hba); - schedule_delayed_work(&hba->ufs_rtc_update_work, - msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); /* Gear up to HS gear if supported */ if (hba->max_pwr_info.is_valid) { @@ -10210,7 +10203,9 @@ static void ufshcd_wl_shutdown(struct device *dev) shost_for_each_device(sdev, hba->host) { if (sdev == hba->ufs_device_wlun) continue; - scsi_device_quiesce(sdev); + mutex_lock(&sdev->state_mutex); + scsi_device_set_state(sdev, SDEV_OFFLINE); + mutex_unlock(&sdev->state_mutex); } __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 21585ed89ef8..03c22114214b 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -170,11 +170,11 @@ static int usb_acpi_add_usb4_devlink(struct usb_device *udev) struct fwnode_handle *nhi_fwnode __free(fwnode_handle) = fwnode_find_reference(dev_fwnode(&port_dev->dev), "usb4-host-interface", 0); - if (IS_ERR(nhi_fwnode)) + if (IS_ERR(nhi_fwnode) || !nhi_fwnode->dev) return 0; link = device_link_add(&port_dev->child->dev, nhi_fwnode->dev, - DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE | DL_FLAG_PM_RUNTIME); if (!link) { diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 68226defdc60..4d73fae80b12 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -23,7 +23,6 @@ static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg) p->max_transfer_size = 65535; p->max_packet_count = 511; p->ahbcfg = 0x10; - p->no_clock_gating = true; } static void dwc2_set_his_params(struct dwc2_hsotg *hsotg) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 9eb085f359ce..98114c2827c0 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -544,6 +544,7 @@ static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length) int dwc3_event_buffers_setup(struct dwc3 *dwc) { struct dwc3_event_buffer *evt; + u32 reg; if (!dwc->ev_buf) return 0; @@ -556,8 +557,10 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc) upper_32_bits(evt->dma)); dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_SIZE(evt->length)); - dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0); + /* Clear any stale event */ + reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); return 0; } @@ -584,7 +587,10 @@ void dwc3_event_buffers_cleanup(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0); dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK | DWC3_GEVNTSIZ_SIZE(0)); - dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0); + + /* Clear any stale event */ + reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg); } static void dwc3_core_num_eps(struct dwc3 *dwc) @@ -2336,6 +2342,19 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) u32 reg; int i; + if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { + dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & + DWC3_GUSB2PHYCFG_SUSPHY) || + (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) & + DWC3_GUSB3PIPECTL_SUSPHY); + /* + * TI AM62 platform requires SUSPHY to be + * enabled for system suspend to work. + */ + if (!dwc->susphy_state) + dwc3_enable_susphy(dwc, true); + } + switch (dwc->current_dr_role) { case DWC3_GCTL_PRTCAP_DEVICE: if (pm_runtime_suspended(dwc->dev)) @@ -2454,6 +2473,11 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) break; } + if (!PMSG_IS_AUTO(msg)) { + /* restore SUSPHY state to that before system suspend. */ + dwc3_enable_susphy(dwc, dwc->susphy_state); + } + return 0; } @@ -2499,7 +2523,11 @@ static int dwc3_runtime_resume(struct device *dev) switch (dwc->current_dr_role) { case DWC3_GCTL_PRTCAP_DEVICE: - dwc3_gadget_process_pending_events(dwc); + if (dwc->pending_events) { + pm_runtime_put(dwc->dev); + dwc->pending_events = false; + enable_irq(dwc->irq_gadget); + } break; case DWC3_GCTL_PRTCAP_HOST: default: @@ -2552,7 +2580,7 @@ static int dwc3_suspend(struct device *dev) static int dwc3_resume(struct device *dev) { struct dwc3 *dwc = dev_get_drvdata(dev); - int ret; + int ret = 0; pinctrl_pm_select_default_state(dev); @@ -2560,14 +2588,12 @@ static int dwc3_resume(struct device *dev) pm_runtime_set_active(dev); ret = dwc3_resume_common(dwc, PMSG_RESUME); - if (ret) { + if (ret) pm_runtime_set_suspended(dev); - return ret; - } pm_runtime_enable(dev); - return 0; + return ret; } static void dwc3_complete(struct device *dev) @@ -2589,6 +2615,12 @@ static void dwc3_complete(struct device *dev) static const struct dev_pm_ops dwc3_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) .complete = dwc3_complete, + + /* + * Runtime suspend halts the controller on disconnection. It relies on + * platforms with custom connection notification to start the controller + * again. + */ SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, dwc3_runtime_idle) }; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index c71240e8f7c7..eab81dfdcc35 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1150,6 +1150,8 @@ struct dwc3_scratchpad_array { * @sys_wakeup: set if the device may do system wakeup. * @wakeup_configured: set if the device is configured for remote wakeup. * @suspended: set to track suspend event due to U3/L2. + * @susphy_state: state of DWC3_GUSB2PHYCFG_SUSPHY + DWC3_GUSB3PIPECTL_SUSPHY + * before PM suspend. * @imod_interval: set the interrupt moderation interval in 250ns * increments or 0 to disable. * @max_cfg_eps: current max number of IN eps used across all USB configs. @@ -1382,6 +1384,7 @@ struct dwc3 { unsigned sys_wakeup:1; unsigned wakeup_configured:1; unsigned suspended:1; + unsigned susphy_state:1; u16 imod_interval; @@ -1675,7 +1678,6 @@ static inline void dwc3_otg_host_init(struct dwc3 *dwc) #if !IS_ENABLED(CONFIG_USB_DWC3_HOST) int dwc3_gadget_suspend(struct dwc3 *dwc); int dwc3_gadget_resume(struct dwc3 *dwc); -void dwc3_gadget_process_pending_events(struct dwc3 *dwc); #else static inline int dwc3_gadget_suspend(struct dwc3 *dwc) { @@ -1687,9 +1689,6 @@ static inline int dwc3_gadget_resume(struct dwc3 *dwc) return 0; } -static inline void dwc3_gadget_process_pending_events(struct dwc3 *dwc) -{ -} #endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */ #if IS_ENABLED(CONFIG_USB_DWC3_ULPI) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 291bc549935b..4959c26d3b71 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -438,6 +438,10 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, dwc3_gadget_ep_get_transfer_index(dep); } + if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER && + !(cmd & DWC3_DEPCMD_CMDIOC)) + mdelay(1); + if (saved_config) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); reg |= saved_config; @@ -1715,12 +1719,10 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int WARN_ON_ONCE(ret); dep->resource_index = 0; - if (!interrupt) { - mdelay(1); + if (!interrupt) dep->flags &= ~DWC3_EP_TRANSFER_STARTED; - } else if (!ret) { + else if (!ret) dep->flags |= DWC3_EP_END_TRANSFER_PENDING; - } dep->flags &= ~DWC3_EP_DELAY_STOP; return ret; @@ -4728,14 +4730,3 @@ int dwc3_gadget_resume(struct dwc3 *dwc) return dwc3_gadget_soft_connect(dwc); } - -void dwc3_gadget_process_pending_events(struct dwc3 *dwc) -{ - if (dwc->pending_events) { - dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); - dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf); - pm_runtime_put(dwc->dev); - dwc->pending_events = false; - enable_irq(dwc->irq_gadget); - } -} diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 1cdda44455b3..ce5b77f89190 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -2061,7 +2061,7 @@ static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \ const char *page, size_t len) \ { \ struct f_uac2_opts *opts = to_f_uac2_opts(item); \ - int ret = 0; \ + int ret = len; \ \ mutex_lock(&opts->lock); \ if (opts->refcnt) { \ @@ -2072,8 +2072,8 @@ static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \ if (len && page[len - 1] == '\n') \ len--; \ \ - ret = scnprintf(opts->name, min(sizeof(opts->name), len + 1), \ - "%s", page); \ + scnprintf(opts->name, min(sizeof(opts->name), len + 1), \ + "%s", page); \ \ end: \ mutex_unlock(&opts->lock); \ diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index cf6478f97f4a..a6f46364be65 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1696,6 +1696,7 @@ int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver, driver->driver.bus = &gadget_bus_type; driver->driver.owner = owner; driver->driver.mod_name = mod_name; + driver->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; ret = driver_register(&driver->driver); if (ret) { pr_warn("%s: driver registration failed: %d\n", diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 8820d9924448..081ac7683c0b 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -254,6 +254,7 @@ struct dummy_hcd { u32 stream_en_ep; u8 num_stream[30 / 2]; + unsigned timer_pending:1; unsigned active:1; unsigned old_active:1; unsigned resuming:1; @@ -1303,9 +1304,11 @@ static int dummy_urb_enqueue( urb->error_count = 1; /* mark as a new urb */ /* kick the scheduler, it'll do the rest */ - if (!hrtimer_active(&dum_hcd->timer)) + if (!dum_hcd->timer_pending) { + dum_hcd->timer_pending = 1; hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL_SOFT); + } done: spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); @@ -1324,9 +1327,10 @@ static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) spin_lock_irqsave(&dum_hcd->dum->lock, flags); rc = usb_hcd_check_unlink_urb(hcd, urb, status); - if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING && - !list_empty(&dum_hcd->urbp_list)) + if (rc == 0 && !dum_hcd->timer_pending) { + dum_hcd->timer_pending = 1; hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT); + } spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); return rc; @@ -1813,6 +1817,7 @@ static enum hrtimer_restart dummy_timer(struct hrtimer *t) /* look at each urb queued by the host side driver */ spin_lock_irqsave(&dum->lock, flags); + dum_hcd->timer_pending = 0; if (!dum_hcd->udev) { dev_err(dummy_dev(dum_hcd), @@ -1994,8 +1999,10 @@ static enum hrtimer_restart dummy_timer(struct hrtimer *t) if (list_empty(&dum_hcd->urbp_list)) { usb_put_dev(dum_hcd->udev); dum_hcd->udev = NULL; - } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) { + } else if (!dum_hcd->timer_pending && + dum_hcd->rh_state == DUMMY_RH_RUNNING) { /* want a 1 msec delay here */ + dum_hcd->timer_pending = 1; hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL_SOFT); } @@ -2390,8 +2397,10 @@ static int dummy_bus_resume(struct usb_hcd *hcd) } else { dum_hcd->rh_state = DUMMY_RH_RUNNING; set_link_state(dum_hcd); - if (!list_empty(&dum_hcd->urbp_list)) + if (!list_empty(&dum_hcd->urbp_list)) { + dum_hcd->timer_pending = 1; hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT); + } hcd->state = HC_STATE_RUNNING; } spin_unlock_irq(&dum_hcd->dum->lock); @@ -2522,6 +2531,7 @@ static void dummy_stop(struct usb_hcd *hcd) struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd); hrtimer_cancel(&dum_hcd->timer); + dum_hcd->timer_pending = 0; device_remove_file(dummy_dev(dum_hcd), &dev_attr_urbs); dev_info(dummy_dev(dum_hcd), "stopped\n"); } diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h index 8ec813b6e9fd..9dc8f4d8077c 100644 --- a/drivers/usb/host/xhci-dbgcap.h +++ b/drivers/usb/host/xhci-dbgcap.h @@ -110,6 +110,7 @@ struct dbc_port { struct tasklet_struct push; struct list_head write_pool; + unsigned int tx_boundary; bool registered; }; diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index b8e78867e25a..d719c16ea30b 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -24,6 +24,29 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc) return dbc->priv; } +static unsigned int +dbc_kfifo_to_req(struct dbc_port *port, char *packet) +{ + unsigned int len; + + len = kfifo_len(&port->port.xmit_fifo); + + if (len == 0) + return 0; + + len = min(len, DBC_MAX_PACKET); + + if (port->tx_boundary) + len = min(port->tx_boundary, len); + + len = kfifo_out(&port->port.xmit_fifo, packet, len); + + if (port->tx_boundary) + port->tx_boundary -= len; + + return len; +} + static int dbc_start_tx(struct dbc_port *port) __releases(&port->port_lock) __acquires(&port->port_lock) @@ -36,7 +59,7 @@ static int dbc_start_tx(struct dbc_port *port) while (!list_empty(pool)) { req = list_entry(pool->next, struct dbc_request, list_pool); - len = kfifo_out(&port->port.xmit_fifo, req->buf, DBC_MAX_PACKET); + len = dbc_kfifo_to_req(port, req->buf); if (len == 0) break; do_tty_wake = true; @@ -200,14 +223,32 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf, { struct dbc_port *port = tty->driver_data; unsigned long flags; + unsigned int written = 0; spin_lock_irqsave(&port->port_lock, flags); - if (count) - count = kfifo_in(&port->port.xmit_fifo, buf, count); - dbc_start_tx(port); + + /* + * Treat tty write as one usb transfer. Make sure the writes are turned + * into TRB request having the same size boundaries as the tty writes. + * Don't add data to kfifo before previous write is turned into TRBs + */ + if (port->tx_boundary) { + spin_unlock_irqrestore(&port->port_lock, flags); + return 0; + } + + if (count) { + written = kfifo_in(&port->port.xmit_fifo, buf, count); + + if (written == count) + port->tx_boundary = kfifo_len(&port->port.xmit_fifo); + + dbc_start_tx(port); + } + spin_unlock_irqrestore(&port->port_lock, flags); - return count; + return written; } static int dbc_tty_put_char(struct tty_struct *tty, u8 ch) @@ -241,6 +282,10 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty) spin_lock_irqsave(&port->port_lock, flags); room = kfifo_avail(&port->port.xmit_fifo); + + if (port->tx_boundary) + room = 0; + spin_unlock_irqrestore(&port->port_lock, flags); return room; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 91dccd25a551..cb07cee9ed0c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -79,6 +79,7 @@ #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 +#define PCI_DEVICE_ID_ASMEDIA_3042_XHCI 0x3042 #define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242 #define PCI_DEVICE_ID_CADENCE 0x17CD @@ -451,6 +452,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == PCI_DEVICE_ID_ASMEDIA_3042_XHCI) + xhci->quirks |= XHCI_RESET_ON_RESUME; + if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; @@ -635,7 +640,7 @@ int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id) pm_runtime_put_noidle(&dev->dev); if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0) - pm_runtime_forbid(&dev->dev); + pm_runtime_get(&dev->dev); else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW) pm_runtime_allow(&dev->dev); @@ -678,7 +683,9 @@ void xhci_pci_remove(struct pci_dev *dev) xhci->xhc_state |= XHCI_STATE_REMOVING; - if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW) + if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0) + pm_runtime_put(&dev->dev); + else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW) pm_runtime_forbid(&dev->dev); if (xhci->shared_hcd) { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 4d664ba53fe9..928b93ad1ee8 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1023,7 +1023,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) td_to_noop(xhci, ring, cached_td, false); cached_td->cancel_status = TD_CLEARED; } - + td_to_noop(xhci, ring, td, false); td->cancel_status = TD_CLEARING_CACHE; cached_td = td; break; @@ -1718,6 +1718,14 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); + cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); + + /* If CMD ring stopped we own the trbs between enqueue and dequeue */ + if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { + complete_all(&xhci->cmd_ring_stop_completion); + return; + } + cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_trb); /* @@ -1734,14 +1742,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cancel_delayed_work(&xhci->cmd_timer); - cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); - - /* If CMD ring stopped we own the trbs between enqueue and dequeue */ - if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { - complete_all(&xhci->cmd_ring_stop_completion); - return; - } - if (cmd->command_trb != xhci->cmd_ring->dequeue) { xhci_err(xhci, "Command completion event does not match command\n"); @@ -2775,6 +2775,29 @@ static int handle_tx_event(struct xhci_hcd *xhci, return 0; } + /* + * xhci 4.10.2 states isoc endpoints should continue + * processing the next TD if there was an error mid TD. + * So host like NEC don't generate an event for the last + * isoc TRB even if the IOC flag is set. + * xhci 4.9.1 states that if there are errors in mult-TRB + * TDs xHC should generate an error for that TRB, and if xHC + * proceeds to the next TD it should genete an event for + * any TRB with IOC flag on the way. Other host follow this. + * + * We wait for the final IOC event, but if we get an event + * anywhere outside this TD, just give it back already. + */ + td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list); + + if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) { + xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; + inc_deq(xhci, ep_ring); + xhci_td_cleanup(xhci, td, ep_ring, td->status); + } + if (list_empty(&ep_ring->td_list)) { /* * Don't print wanings if ring is empty due to a stopped endpoint generating an @@ -2836,44 +2859,13 @@ static int handle_tx_event(struct xhci_hcd *xhci, return 0; } - /* - * xhci 4.10.2 states isoc endpoints should continue - * processing the next TD if there was an error mid TD. - * So host like NEC don't generate an event for the last - * isoc TRB even if the IOC flag is set. - * xhci 4.9.1 states that if there are errors in mult-TRB - * TDs xHC should generate an error for that TRB, and if xHC - * proceeds to the next TD it should genete an event for - * any TRB with IOC flag on the way. Other host follow this. - * So this event might be for the next TD. - */ - if (td->error_mid_td && - !list_is_last(&td->td_list, &ep_ring->td_list)) { - struct xhci_td *td_next = list_next_entry(td, td_list); + /* HC is busted, give up! */ + xhci_err(xhci, + "ERROR Transfer event TRB DMA ptr not part of current TD ep_index %d comp_code %u\n", + ep_index, trb_comp_code); + trb_in_td(xhci, td, ep_trb_dma, true); - ep_seg = trb_in_td(xhci, td_next, ep_trb_dma, false); - if (ep_seg) { - /* give back previous TD, start handling new */ - xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); - ep_ring->dequeue = td->last_trb; - ep_ring->deq_seg = td->last_trb_seg; - inc_deq(xhci, ep_ring); - xhci_td_cleanup(xhci, td, ep_ring, td->status); - td = td_next; - } - } - - if (!ep_seg) { - /* HC is busted, give up! */ - xhci_err(xhci, - "ERROR Transfer event TRB DMA ptr not " - "part of current TD ep_index %d " - "comp_code %u\n", ep_index, - trb_comp_code); - trb_in_td(xhci, td, ep_trb_dma, true); - - return -ESHUTDOWN; - } + return -ESHUTDOWN; } if (ep->skip) { diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 6246d5ad1468..76f228e7443c 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -2183,7 +2183,7 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime) goto out; } - for (i = 0; i < tegra->num_usb_phys; i++) { + for (i = 0; i < xhci->usb2_rhub.num_ports; i++) { if (!xhci->usb2_rhub.ports[i]) continue; portsc = readl(xhci->usb2_rhub.ports[i]->addr); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 620502de971a..f0fb696d5619 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1001,7 +1001,7 @@ enum xhci_setup_dev { /* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */ #define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16)) #define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16) -#define SCT_FOR_TRB(p) (((p) << 1) & 0x7) +#define SCT_FOR_TRB(p) (((p) & 0x7) << 1) /* Link TRB specific fields */ #define TRB_TC (1<<1) diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 50b86d531701..6497c4e81e95 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -331,3 +331,15 @@ config USB_ONBOARD_DEV this config will enable the driver and it will automatically match the state of the USB subsystem. If this driver is a module it will be called onboard_usb_dev. + +config USB_ONBOARD_DEV_USB5744 + bool "Onboard USB Microchip usb5744 hub with SMBus support" + depends on (USB_ONBOARD_DEV && I2C=y) || (USB_ONBOARD_DEV=m && I2C=m) + help + Say Y here if you want to support onboard USB Microchip usb5744 + hub that requires SMBus initialization. + + This options enables usb5744 i2c default initialization sequence + during hub start-up configuration stage. It is must to enable this + option on AMD Kria KR260 Robotics Starter Kit as this hub is + connected to USB-SD converter which mounts the root filesystem. diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c index 560591e02d6a..75dfdca04ff1 100644 --- a/drivers/usb/misc/onboard_usb_dev.c +++ b/drivers/usb/misc/onboard_usb_dev.c @@ -311,7 +311,7 @@ static void onboard_dev_attach_usb_driver(struct work_struct *work) static int onboard_dev_5744_i2c_init(struct i2c_client *client) { -#if IS_ENABLED(CONFIG_I2C) +#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV_USB5744) struct device *dev = &client->dev; int ret; @@ -394,9 +394,11 @@ static int onboard_dev_probe(struct platform_device *pdev) i2c_node = of_parse_phandle(pdev->dev.of_node, "i2c-bus", 0); if (i2c_node) { - struct i2c_client *client; + struct i2c_client *client = NULL; +#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV_USB5744) client = of_find_i2c_device_by_node(i2c_node); +#endif of_node_put(i2c_node); if (!client) { diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 4a9859e03f6b..6aebc736a80c 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -34,8 +34,6 @@ #define YUREX_BUF_SIZE 8 #define YUREX_WRITE_TIMEOUT (HZ*2) -#define MAX_S64_STRLEN 20 /* {-}922337203685477580{7,8} */ - /* table of devices that work with this driver */ static struct usb_device_id yurex_table[] = { { USB_DEVICE(YUREX_VENDOR_ID, YUREX_PRODUCT_ID) }, @@ -402,8 +400,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct usb_yurex *dev; - int len = 0; - char in_buffer[MAX_S64_STRLEN]; + int len; + char in_buffer[20]; + unsigned long flags; dev = file->private_data; @@ -413,16 +412,14 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, return -ENODEV; } - if (WARN_ON_ONCE(dev->bbu > S64_MAX || dev->bbu < S64_MIN)) { - mutex_unlock(&dev->io_mutex); - return -EIO; - } - - spin_lock_irq(&dev->lock); - scnprintf(in_buffer, MAX_S64_STRLEN, "%lld\n", dev->bbu); - spin_unlock_irq(&dev->lock); + spin_lock_irqsave(&dev->lock, flags); + len = snprintf(in_buffer, 20, "%lld\n", dev->bbu); + spin_unlock_irqrestore(&dev->lock, flags); mutex_unlock(&dev->io_mutex); + if (WARN_ON_ONCE(len >= sizeof(in_buffer))) + return -EIO; + return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); } diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index d54283fd026b..05b6e7e52e02 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c @@ -293,8 +293,6 @@ static int sunxi_musb_exit(struct musb *musb) if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags)) sunxi_sram_release(musb->controller->parent); - devm_usb_put_phy(glue->dev, glue->xceiv); - return 0; } diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index 06e0fb23566c..06f789097989 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c @@ -628,7 +628,7 @@ void devm_usb_put_phy(struct device *dev, struct usb_phy *phy) { int r; - r = devres_destroy(dev, devm_usb_phy_release, devm_usb_phy_match, phy); + r = devres_release(dev, devm_usb_phy_release, devm_usb_phy_match, phy); dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); } EXPORT_SYMBOL_GPL(devm_usb_put_phy); diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index c7d6b5e3f898..28c71d99e857 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -770,11 +770,12 @@ static void edge_bulk_out_data_callback(struct urb *urb) static void edge_bulk_out_cmd_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; + struct device *dev = &urb->dev->dev; int status = urb->status; atomic_dec(&CmdUrbs); - dev_dbg(&urb->dev->dev, "%s - FREE URB %p (outstanding %d)\n", - __func__, urb, atomic_read(&CmdUrbs)); + dev_dbg(dev, "%s - FREE URB %p (outstanding %d)\n", __func__, urb, + atomic_read(&CmdUrbs)); /* clean up the transfer buffer */ @@ -784,8 +785,7 @@ static void edge_bulk_out_cmd_callback(struct urb *urb) usb_free_urb(urb); if (status) { - dev_dbg(&urb->dev->dev, - "%s - nonzero write bulk status received: %d\n", + dev_dbg(dev, "%s - nonzero write bulk status received: %d\n", __func__, status); return; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index eb0731992ca9..9ba5584061c8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_VENDOR_ID 0x2c7c /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 +#define QUECTEL_PRODUCT_RG650V 0x0122 #define QUECTEL_PRODUCT_EM061K_LTA 0x0123 #define QUECTEL_PRODUCT_EM061K_LMS 0x0124 #define QUECTEL_PRODUCT_EC25 0x0125 @@ -279,6 +280,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EG912Y 0x6001 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200A 0x6005 +#define QUECTEL_PRODUCT_EG916Q 0x6007 #define QUECTEL_PRODUCT_EM061K_LWW 0x6008 #define QUECTEL_PRODUCT_EM061K_LCN 0x6009 #define QUECTEL_PRODUCT_EC200T 0x6026 @@ -1270,7 +1272,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG916Q, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, @@ -1380,10 +1385,16 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */ + .driver_info = NCTRL(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */ + .driver_info = NCTRL(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */ + .driver_info = NCTRL(3) | RSVD(4) | RSVD(5) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -2312,6 +2323,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0xff, 0x30) }, /* Fibocom FG132 Diag */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0xff, 0x40) }, /* Fibocom FG132 AT */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0, 0) }, /* Fibocom FG132 NMEA */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */ .driver_info = RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index c7de9585feb2..13c664317a05 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ {DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */ + {DEVICE_SWI(0x1199, 0x90e4)}, /* Sierra Wireless EM86xx QDL*/ + {DEVICE_SWI(0x1199, 0x90e5)}, /* Sierra Wireless EM86xx */ {DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */ {DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index fd68204374f2..e5ad23d86833 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2423,6 +2423,17 @@ UNUSUAL_DEV( 0xc251, 0x4003, 0x0100, 0x0100, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE), +/* + * Reported by Icenowy Zheng + * This is an interface for vendor-specific cryptic commands instead + * of real USB storage device. + */ +UNUSUAL_DEV( 0xe5b7, 0x0811, 0x0100, 0x0100, + "ZhuHai JieLi Technology", + "JieLi BR21", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE), + /* Reported by Andrew Simmons */ UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001, "DataStor", diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 9262fcd4144f..58f40156de56 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -519,6 +519,7 @@ static void typec_altmode_release(struct device *dev) typec_altmode_put_partner(alt); altmode_id_remove(alt->adev.dev.parent, alt->id); + put_device(alt->adev.dev.parent); kfree(alt); } @@ -568,6 +569,8 @@ typec_register_altmode(struct device *parent, alt->adev.dev.type = &typec_altmode_dev_type; dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id); + get_device(alt->adev.dev.parent); + /* Link partners and plugs with the ports */ if (!is_port) typec_altmode_set_partner(alt); @@ -2290,7 +2293,7 @@ void typec_port_register_altmodes(struct typec_port *port, const struct typec_altmode_ops *ops, void *drvdata, struct typec_altmode **altmodes, size_t n) { - struct fwnode_handle *altmodes_node, *child; + struct fwnode_handle *child; struct typec_altmode_desc desc; struct typec_altmode *alt; size_t index = 0; @@ -2298,7 +2301,9 @@ void typec_port_register_altmodes(struct typec_port *port, u32 vdo; int ret; - altmodes_node = device_get_named_child_node(&port->dev, "altmodes"); + struct fwnode_handle *altmodes_node __free(fwnode_handle) = + device_get_named_child_node(&port->dev, "altmodes"); + if (!altmodes_node) return; /* No altmodes specified */ diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c index 501eddb294e4..b80eb2d78d88 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c @@ -93,8 +93,10 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev) return -EINVAL; bridge_dev = devm_drm_dp_hpd_bridge_alloc(tcpm->dev, to_of_node(tcpm->tcpc.fwnode)); - if (IS_ERR(bridge_dev)) - return PTR_ERR(bridge_dev); + if (IS_ERR(bridge_dev)) { + ret = PTR_ERR(bridge_dev); + goto fwnode_remove; + } tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc); if (IS_ERR(tcpm->tcpm_port)) { @@ -123,7 +125,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev) port_unregister: tcpm_unregister_port(tcpm->tcpm_port); fwnode_remove: - fwnode_remove_software_node(tcpm->tcpc.fwnode); + fwnode_handle_put(tcpm->tcpc.fwnode); return ret; } @@ -135,7 +137,7 @@ static void qcom_pmic_typec_remove(struct platform_device *pdev) tcpm->pdphy_stop(tcpm); tcpm->port_stop(tcpm); tcpm_unregister_port(tcpm->tcpm_port); - fwnode_remove_software_node(tcpm->tcpc.fwnode); + fwnode_handle_put(tcpm->tcpc.fwnode); } static const struct pmic_typec_resources pm8150b_typec_res = { diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c index 5b7f52b74a40..726423684bae 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c @@ -227,6 +227,10 @@ qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pd spin_lock_irqsave(&pmic_typec_pdphy->lock, flags); + hdr_len = sizeof(msg->header); + txbuf_len = pd_header_cnt_le(msg->header) * 4; + txsize_len = hdr_len + txbuf_len - 1; + ret = regmap_read(pmic_typec_pdphy->regmap, pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, &val); @@ -244,10 +248,6 @@ qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pd if (ret) goto done; - hdr_len = sizeof(msg->header); - txbuf_len = pd_header_cnt_le(msg->header) * 4; - txsize_len = hdr_len + txbuf_len - 1; - /* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */ ret = regmap_bulk_write(pmic_typec_pdphy->regmap, pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG, diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c index a747baa29784..c37dede62e12 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c @@ -432,7 +432,6 @@ static int qcom_pmic_typec_port_get_cc(struct tcpc_dev *tcpc, val = TYPEC_CC_RP_DEF; break; } - val = TYPEC_CC_RP_DEF; } if (misc & CC_ORIENTATION) diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index fc619478200f..7ae341a40342 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -4515,7 +4515,8 @@ static inline enum tcpm_state hard_reset_state(struct tcpm_port *port) return ERROR_RECOVERY; if (port->pwr_role == TYPEC_SOURCE) return SRC_UNATTACHED; - if (port->state == SNK_WAIT_CAPABILITIES_TIMEOUT) + if (port->state == SNK_WAIT_CAPABILITIES || + port->state == SNK_WAIT_CAPABILITIES_TIMEOUT) return SNK_READY; return SNK_UNATTACHED; } @@ -5043,8 +5044,11 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_state(port, SNK_SOFT_RESET, PD_T_SINK_WAIT_CAP); } else { - tcpm_set_state(port, SNK_WAIT_CAPABILITIES_TIMEOUT, - PD_T_SINK_WAIT_CAP); + if (!port->self_powered) + upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT; + else + upcoming_state = hard_reset_state(port); + tcpm_set_state(port, upcoming_state, PD_T_SINK_WAIT_CAP); } break; case SNK_WAIT_CAPABILITIES_TIMEOUT: diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index ba58d11907bc..bccfc03b5986 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -482,6 +482,8 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc, port = uc->orig; new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd); + if (new_cam >= ARRAY_SIZE(uc->updated)) + return; new_port = &uc->updated[new_cam]; cam = new_port->linked_idx; enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd); diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_hw.c b/drivers/vdpa/octeon_ep/octep_vdpa_hw.c index 11bd76ae18cf..1d4767b33315 100644 --- a/drivers/vdpa/octeon_ep/octep_vdpa_hw.c +++ b/drivers/vdpa/octeon_ep/octep_vdpa_hw.c @@ -475,11 +475,11 @@ int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev) dev_err(dev, "Incomplete PCI capabilities"); return -EIO; } - dev_info(dev, "common cfg mapped at: 0x%016llx\n", (u64)(uintptr_t)oct_hw->common_cfg); - dev_info(dev, "device cfg mapped at: 0x%016llx\n", (u64)(uintptr_t)oct_hw->dev_cfg); - dev_info(dev, "isr cfg mapped at: 0x%016llx\n", (u64)(uintptr_t)oct_hw->isr); - dev_info(dev, "notify base: 0x%016llx, notify off multiplier: %u\n", - (u64)(uintptr_t)oct_hw->notify_base, oct_hw->notify_off_multiplier); + dev_info(dev, "common cfg mapped at: %p\n", oct_hw->common_cfg); + dev_info(dev, "device cfg mapped at: %p\n", oct_hw->dev_cfg); + dev_info(dev, "isr cfg mapped at: %p\n", oct_hw->isr); + dev_info(dev, "notify base: %p, notify off multiplier: %u\n", + oct_hw->notify_base, oct_hw->notify_off_multiplier); oct_hw->config_size = octep_get_config_size(oct_hw); oct_hw->features = octep_hw_get_dev_features(oct_hw); @@ -511,7 +511,7 @@ int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev) } mbox = octep_get_mbox(oct_hw); octep_mbox_init(mbox); - dev_info(dev, "mbox mapped at: 0x%016llx\n", (u64)(uintptr_t)mbox); + dev_info(dev, "mbox mapped at: %p\n", mbox); return 0; } diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 7db9bbdfb038..718fa4e0b31e 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1029,20 +1029,23 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc, /* virtio-scsi spec requires byte 0 of the lun to be 1 */ vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp); } else { - struct vhost_scsi_tpg **vs_tpg, *tpg; + struct vhost_scsi_tpg **vs_tpg, *tpg = NULL; - vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */ - - tpg = READ_ONCE(vs_tpg[*vc->target]); - if (unlikely(!tpg)) { - vq_err(vq, "Target 0x%x does not exist\n", *vc->target); - } else { - if (tpgp) - *tpgp = tpg; - ret = 0; + if (vc->target) { + /* validated at handler entry */ + vs_tpg = vhost_vq_get_backend(vq); + tpg = READ_ONCE(vs_tpg[*vc->target]); + if (unlikely(!tpg)) { + vq_err(vq, "Target 0x%x does not exist\n", *vc->target); + goto out; + } } - } + if (tpgp) + *tpgp = tpg; + ret = 0; + } +out: return ret; } diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index ea36c6956bf3..de035071fedb 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1236,7 +1236,6 @@ config FB_3DFX_I2C config FB_VOODOO1 tristate "3Dfx Voodoo Graphics (sst1) support" depends on FB && PCI - depends on FB_DEVICE select FB_IOMEM_HELPERS help Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or @@ -1374,6 +1373,7 @@ config FB_VT8500 config FB_WM8505 bool "Wondermedia WM8xxx-series frame buffer support" depends on (FB = y) && HAS_IOMEM && (ARCH_VT8500 || COMPILE_TEST) + select FB_IOMEM_FOPS select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS) select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS) select FB_SYS_IMAGEBLIT @@ -1660,19 +1660,6 @@ config FB_SH7760 and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for panels <= 320 pixel horizontal resolution. -config FB_DA8XX - tristate "DA8xx/OMAP-L1xx/AM335x Framebuffer support" - depends on FB && HAVE_CLK && HAS_IOMEM - depends on ARCH_DAVINCI_DA8XX || SOC_AM33XX || COMPILE_TEST - select FB_CFB_REV_PIXELS_IN_BYTE - select FB_IOMEM_HELPERS - select FB_MODE_HELPERS - select VIDEOMODE_HELPERS - help - This is the frame buffer device driver for the TI LCD controller - found on DA8xx/OMAP-L1xx/AM335x SoCs. - If unsure, say N. - config FB_VIRTUAL tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" depends on FB diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 3eecd51267fa..b3d12f977c06 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -121,7 +121,6 @@ obj-$(CONFIG_FB_VESA) += vesafb.o obj-$(CONFIG_FB_EFI) += efifb.o obj-$(CONFIG_FB_VGA16) += vga16fb.o obj-$(CONFIG_FB_OF) += offb.o -obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o obj-$(CONFIG_FB_SIMPLE) += simplefb.o diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c index 132638240521..1116a0789ca4 100644 --- a/drivers/video/fbdev/amifb.c +++ b/drivers/video/fbdev/amifb.c @@ -3774,8 +3774,8 @@ static void __exit amifb_remove(struct platform_device *pdev) * triggers a section mismatch warning. */ static struct platform_driver amifb_driver __refdata = { - .remove_new = __exit_p(amifb_remove), - .driver = { + .remove = __exit_p(amifb_remove), + .driver = { .name = "amiga-video", }, }; diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index b2408543277c..b807cf07522d 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -548,7 +548,7 @@ static void arcfb_remove(struct platform_device *dev) static struct platform_driver arcfb_driver = { .probe = arcfb_probe, - .remove_new = arcfb_remove, + .remove = arcfb_remove, .driver = { .name = "arcfb", }, diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 5574fb0361ee..e13f53965a0d 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1299,7 +1299,7 @@ static int atmel_lcdfb_resume(struct platform_device *pdev) static struct platform_driver atmel_lcdfb_driver = { .probe = atmel_lcdfb_probe, - .remove_new = atmel_lcdfb_remove, + .remove = atmel_lcdfb_remove, .suspend = atmel_lcdfb_suspend, .resume = atmel_lcdfb_resume, .driver = { diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 08109ce535cd..840f22160763 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -588,7 +588,7 @@ static struct platform_driver au1100fb_driver = { .name = "au1100-lcd", }, .probe = au1100fb_drv_probe, - .remove_new = au1100fb_drv_remove, + .remove = au1100fb_drv_remove, .suspend = au1100fb_drv_suspend, .resume = au1100fb_drv_resume, }; diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index e718fea63662..ed770222660b 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1833,7 +1833,7 @@ static struct platform_driver au1200fb_driver = { .pm = AU1200FB_PMOPS, }, .probe = au1200fb_drv_probe, - .remove_new = au1200fb_drv_remove, + .remove = au1200fb_drv_remove, }; module_platform_driver(au1200fb_driver); diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index e857b15e9f5d..c8ba098a8c42 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -1151,7 +1151,7 @@ static void broadsheetfb_remove(struct platform_device *dev) static struct platform_driver broadsheetfb_driver = { .probe = broadsheetfb_probe, - .remove_new = broadsheetfb_remove, + .remove = broadsheetfb_remove, .driver = { .name = "broadsheetfb", }, diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c index eaab51be74f8..e757462af0a6 100644 --- a/drivers/video/fbdev/bw2.c +++ b/drivers/video/fbdev/bw2.c @@ -147,7 +147,7 @@ bw2_blank(int blank, struct fb_info *info) return 0; } -static struct sbus_mmap_map bw2_mmap_map[] = { +static const struct sbus_mmap_map bw2_mmap_map[] = { { .size = SBUS_MMAP_FBSIZE(1) }, @@ -372,7 +372,7 @@ static struct platform_driver bw2_driver = { .of_match_table = bw2_match, }, .probe = bw2_probe, - .remove_new = bw2_remove, + .remove = bw2_remove, }; static int __init bw2_init(void) diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c index c161b2af8933..5389f8f07346 100644 --- a/drivers/video/fbdev/cg14.c +++ b/drivers/video/fbdev/cg14.c @@ -360,7 +360,7 @@ static void cg14_init_fix(struct fb_info *info, int linebytes, info->fix.accel = FB_ACCEL_SUN_CG14; } -static struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] = { +static const struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] = { { .voff = CG14_REGS, .poff = 0x80000000, @@ -590,7 +590,7 @@ static struct platform_driver cg14_driver = { .of_match_table = cg14_match, }, .probe = cg14_probe, - .remove_new = cg14_remove, + .remove = cg14_remove, }; static int __init cg14_init(void) diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c index 5e1f1b9a81b6..a58a483014e6 100644 --- a/drivers/video/fbdev/cg3.c +++ b/drivers/video/fbdev/cg3.c @@ -209,7 +209,7 @@ static int cg3_blank(int blank, struct fb_info *info) return 0; } -static struct sbus_mmap_map cg3_mmap_map[] = { +static const struct sbus_mmap_map cg3_mmap_map[] = { { .voff = CG3_MMAP_OFFSET, .poff = CG3_RAM_OFFSET, @@ -458,7 +458,7 @@ static struct platform_driver cg3_driver = { .of_match_table = cg3_match, }, .probe = cg3_probe, - .remove_new = cg3_remove, + .remove = cg3_remove, }; static int __init cg3_init(void) diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c index 69d3ce50948d..56d74468040a 100644 --- a/drivers/video/fbdev/cg6.c +++ b/drivers/video/fbdev/cg6.c @@ -545,7 +545,7 @@ static int cg6_blank(int blank, struct fb_info *info) return 0; } -static struct sbus_mmap_map cg6_mmap_map[] = { +static const struct sbus_mmap_map cg6_mmap_map[] = { { .voff = CG6_FBC, .poff = CG6_FBC_OFFSET, @@ -858,7 +858,7 @@ static struct platform_driver cg6_driver = { .of_match_table = cg6_match, }, .probe = cg6_probe, - .remove_new = cg6_remove, + .remove = cg6_remove, }; static int __init cg6_init(void) diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c index 6171a98a48fd..0d0ba617b4aa 100644 --- a/drivers/video/fbdev/clps711x-fb.c +++ b/drivers/video/fbdev/clps711x-fb.c @@ -371,7 +371,7 @@ static struct platform_driver clps711x_fb_driver = { .of_match_table = clps711x_fb_dt_ids, }, .probe = clps711x_fb_probe, - .remove_new = clps711x_fb_remove, + .remove = clps711x_fb_remove, }; module_platform_driver(clps711x_fb_driver); diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c index c2b8f894799c..308967b5096a 100644 --- a/drivers/video/fbdev/cobalt_lcdfb.c +++ b/drivers/video/fbdev/cobalt_lcdfb.c @@ -344,7 +344,7 @@ static void cobalt_lcdfb_remove(struct platform_device *dev) static struct platform_driver cobalt_lcdfb_driver = { .probe = cobalt_lcdfb_probe, - .remove_new = cobalt_lcdfb_remove, + .remove = cobalt_lcdfb_remove, .driver = { .name = "cobalt-lcd", }, diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c deleted file mode 100644 index 4ca70a1bdd3b..000000000000 --- a/drivers/video/fbdev/da8xx-fb.c +++ /dev/null @@ -1,1665 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2008-2009 MontaVista Software Inc. - * Copyright (C) 2008-2009 Texas Instruments Inc - * - * Based on the LCD driver for TI Avalanche processors written by - * Ajay Singh and Shalom Hai. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include