mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
Merge 6.13-rc3 into staging-next
We need the gpib build fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
f9e7f3f962
@ -76,7 +76,7 @@ Description:
|
|||||||
timeout when the pretimeout interrupt is delivered. Pretimeout
|
timeout when the pretimeout interrupt is delivered. Pretimeout
|
||||||
is an optional feature.
|
is an optional feature.
|
||||||
|
|
||||||
What: /sys/class/watchdog/watchdogn/pretimeout_avaialable_governors
|
What: /sys/class/watchdog/watchdogn/pretimeout_available_governors
|
||||||
Date: February 2017
|
Date: February 2017
|
||||||
Contact: Wim Van Sebroeck <wim@iguana.be>
|
Contact: Wim Van Sebroeck <wim@iguana.be>
|
||||||
Description:
|
Description:
|
||||||
|
@ -4822,6 +4822,11 @@
|
|||||||
can be preempted anytime. Tasks will also yield
|
can be preempted anytime. Tasks will also yield
|
||||||
contended spinlocks (if the critical section isn't
|
contended spinlocks (if the critical section isn't
|
||||||
explicitly preempt disabled beyond the lock itself).
|
explicitly preempt disabled beyond the lock itself).
|
||||||
|
lazy - Scheduler controlled. Similar to full but instead
|
||||||
|
of preempting the task immediately, the task gets
|
||||||
|
one HZ tick time to yield itself before the
|
||||||
|
preemption will be forced. One preemption is when the
|
||||||
|
task returns to user space.
|
||||||
|
|
||||||
print-fatal-signals=
|
print-fatal-signals=
|
||||||
[KNL] debug: print fatal signals
|
[KNL] debug: print fatal signals
|
||||||
|
@ -255,8 +255,9 @@ stable kernels.
|
|||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
|
| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| Hisilicon | Hip{08,09,10,10C| #162001900 | N/A |
|
| Hisilicon | Hip{08,09,09A,10| #162001900 | N/A |
|
||||||
| | ,11} SMMU PMCG | | |
|
| | ,10C,11} | | |
|
||||||
|
| | SMMU PMCG | | |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| Hisilicon | Hip09 | #162100801 | HISILICON_ERRATUM_162100801 |
|
| Hisilicon | Hip09 | #162100801 | HISILICON_ERRATUM_162100801 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
@ -68,7 +68,7 @@ is to define the default namespace in the ``Makefile`` of the subsystem. E.g. to
|
|||||||
export all symbols defined in usb-common into the namespace USB_COMMON, add a
|
export all symbols defined in usb-common into the namespace USB_COMMON, add a
|
||||||
line like this to drivers/usb/common/Makefile::
|
line like this to drivers/usb/common/Makefile::
|
||||||
|
|
||||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
|
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_COMMON"'
|
||||||
|
|
||||||
That will affect all EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL() statements. A
|
That will affect all EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL() statements. A
|
||||||
symbol exported with EXPORT_SYMBOL_NS() while this definition is present, will
|
symbol exported with EXPORT_SYMBOL_NS() while this definition is present, will
|
||||||
@ -79,7 +79,7 @@ A second option to define the default namespace is directly in the compilation
|
|||||||
unit as preprocessor statement. The above example would then read::
|
unit as preprocessor statement. The above example would then read::
|
||||||
|
|
||||||
#undef DEFAULT_SYMBOL_NAMESPACE
|
#undef DEFAULT_SYMBOL_NAMESPACE
|
||||||
#define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
|
#define DEFAULT_SYMBOL_NAMESPACE "USB_COMMON"
|
||||||
|
|
||||||
within the corresponding compilation unit before any EXPORT_SYMBOL macro is
|
within the corresponding compilation unit before any EXPORT_SYMBOL macro is
|
||||||
used.
|
used.
|
||||||
@ -106,7 +106,7 @@ inspected with modinfo::
|
|||||||
[...]
|
[...]
|
||||||
|
|
||||||
|
|
||||||
It is advisable to add the MODULE_IMPORT_NS("") statement close to other module
|
It is advisable to add the MODULE_IMPORT_NS() statement close to other module
|
||||||
metadata definitions like MODULE_AUTHOR() or MODULE_LICENSE(). Refer to section
|
metadata definitions like MODULE_AUTHOR() or MODULE_LICENSE(). Refer to section
|
||||||
5. for a way to create missing import statements automatically.
|
5. for a way to create missing import statements automatically.
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ enable loading regardless, but will emit a warning.
|
|||||||
Missing namespaces imports can easily be detected at build time. In fact,
|
Missing namespaces imports can easily be detected at build time. In fact,
|
||||||
modpost will emit a warning if a module uses a symbol from a namespace
|
modpost will emit a warning if a module uses a symbol from a namespace
|
||||||
without importing it.
|
without importing it.
|
||||||
MODULE_IMPORT_NS("") statements will usually be added at a definite location
|
MODULE_IMPORT_NS() statements will usually be added at a definite location
|
||||||
(along with other module meta data). To make the life of module authors (and
|
(along with other module meta data). To make the life of module authors (and
|
||||||
subsystem maintainers) easier, a script and make target is available to fixup
|
subsystem maintainers) easier, a script and make target is available to fixup
|
||||||
missing imports. Fixing missing imports can be done with::
|
missing imports. Fixing missing imports can be done with::
|
||||||
|
@ -113,11 +113,8 @@ allOf:
|
|||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
||||||
- if:
|
- if:
|
||||||
properties:
|
required:
|
||||||
compatible:
|
- orientation-switch
|
||||||
contains:
|
|
||||||
enum:
|
|
||||||
- fsl,imx95-usb-phy
|
|
||||||
then:
|
then:
|
||||||
$ref: /schemas/usb/usb-switch.yaml#
|
$ref: /schemas/usb/usb-switch.yaml#
|
||||||
|
|
||||||
|
@ -55,6 +55,10 @@ patternProperties:
|
|||||||
patternProperties:
|
patternProperties:
|
||||||
"^power-domain@[0-9a-f]+$":
|
"^power-domain@[0-9a-f]+$":
|
||||||
$ref: "#/$defs/power-domain-node"
|
$ref: "#/$defs/power-domain-node"
|
||||||
|
patternProperties:
|
||||||
|
"^power-domain@[0-9a-f]+$":
|
||||||
|
$ref: "#/$defs/power-domain-node"
|
||||||
|
unevaluatedProperties: false
|
||||||
unevaluatedProperties: false
|
unevaluatedProperties: false
|
||||||
unevaluatedProperties: false
|
unevaluatedProperties: false
|
||||||
unevaluatedProperties: false
|
unevaluatedProperties: false
|
||||||
|
@ -18,6 +18,7 @@ properties:
|
|||||||
compatible:
|
compatible:
|
||||||
enum:
|
enum:
|
||||||
- qcom,qca6390-pmu
|
- qcom,qca6390-pmu
|
||||||
|
- qcom,wcn6750-pmu
|
||||||
- qcom,wcn6855-pmu
|
- qcom,wcn6855-pmu
|
||||||
- qcom,wcn7850-pmu
|
- qcom,wcn7850-pmu
|
||||||
|
|
||||||
@ -27,6 +28,9 @@ properties:
|
|||||||
vddaon-supply:
|
vddaon-supply:
|
||||||
description: VDD_AON supply regulator handle
|
description: VDD_AON supply regulator handle
|
||||||
|
|
||||||
|
vddasd-supply:
|
||||||
|
description: VDD_ASD supply regulator handle
|
||||||
|
|
||||||
vdddig-supply:
|
vdddig-supply:
|
||||||
description: VDD_DIG supply regulator handle
|
description: VDD_DIG supply regulator handle
|
||||||
|
|
||||||
@ -42,6 +46,9 @@ properties:
|
|||||||
vddio1p2-supply:
|
vddio1p2-supply:
|
||||||
description: VDD_IO_1P2 supply regulator handle
|
description: VDD_IO_1P2 supply regulator handle
|
||||||
|
|
||||||
|
vddrfa0p8-supply:
|
||||||
|
description: VDD_RFA_0P8 supply regulator handle
|
||||||
|
|
||||||
vddrfa0p95-supply:
|
vddrfa0p95-supply:
|
||||||
description: VDD_RFA_0P95 supply regulator handle
|
description: VDD_RFA_0P95 supply regulator handle
|
||||||
|
|
||||||
@ -51,12 +58,18 @@ properties:
|
|||||||
vddrfa1p3-supply:
|
vddrfa1p3-supply:
|
||||||
description: VDD_RFA_1P3 supply regulator handle
|
description: VDD_RFA_1P3 supply regulator handle
|
||||||
|
|
||||||
|
vddrfa1p7-supply:
|
||||||
|
description: VDD_RFA_1P7 supply regulator handle
|
||||||
|
|
||||||
vddrfa1p8-supply:
|
vddrfa1p8-supply:
|
||||||
description: VDD_RFA_1P8 supply regulator handle
|
description: VDD_RFA_1P8 supply regulator handle
|
||||||
|
|
||||||
vddrfa1p9-supply:
|
vddrfa1p9-supply:
|
||||||
description: VDD_RFA_1P9 supply regulator handle
|
description: VDD_RFA_1P9 supply regulator handle
|
||||||
|
|
||||||
|
vddrfa2p2-supply:
|
||||||
|
description: VDD_RFA_2P2 supply regulator handle
|
||||||
|
|
||||||
vddpcie1p3-supply:
|
vddpcie1p3-supply:
|
||||||
description: VDD_PCIE_1P3 supply regulator handle
|
description: VDD_PCIE_1P3 supply regulator handle
|
||||||
|
|
||||||
@ -119,6 +132,20 @@ allOf:
|
|||||||
- vddpcie1p3-supply
|
- vddpcie1p3-supply
|
||||||
- vddpcie1p9-supply
|
- vddpcie1p9-supply
|
||||||
- vddio-supply
|
- vddio-supply
|
||||||
|
- if:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
contains:
|
||||||
|
const: qcom,wcn6750-pmu
|
||||||
|
then:
|
||||||
|
required:
|
||||||
|
- vddaon-supply
|
||||||
|
- vddasd-supply
|
||||||
|
- vddpmu-supply
|
||||||
|
- vddrfa0p8-supply
|
||||||
|
- vddrfa1p2-supply
|
||||||
|
- vddrfa1p7-supply
|
||||||
|
- vddrfa2p2-supply
|
||||||
- if:
|
- if:
|
||||||
properties:
|
properties:
|
||||||
compatible:
|
compatible:
|
||||||
|
@ -0,0 +1,47 @@
|
|||||||
|
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||||
|
%YAML 1.2
|
||||||
|
---
|
||||||
|
$id: http://devicetree.org/schemas/watchdog/airoha,en7581-wdt.yaml#
|
||||||
|
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||||
|
|
||||||
|
title: Airoha EN7581 Watchdog Timer
|
||||||
|
|
||||||
|
maintainers:
|
||||||
|
- Christian Marangi <ansuelsmth@gmail.com>
|
||||||
|
|
||||||
|
allOf:
|
||||||
|
- $ref: watchdog.yaml#
|
||||||
|
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
const: airoha,en7581-wdt
|
||||||
|
|
||||||
|
reg:
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
clocks:
|
||||||
|
description: BUS clock (timer ticks at half the BUS clock)
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
clock-names:
|
||||||
|
const: bus
|
||||||
|
|
||||||
|
required:
|
||||||
|
- compatible
|
||||||
|
- reg
|
||||||
|
- clocks
|
||||||
|
- clock-names
|
||||||
|
|
||||||
|
unevaluatedProperties: false
|
||||||
|
|
||||||
|
examples:
|
||||||
|
- |
|
||||||
|
#include <dt-bindings/clock/en7523-clk.h>
|
||||||
|
|
||||||
|
watchdog@1fbf0100 {
|
||||||
|
compatible = "airoha,en7581-wdt";
|
||||||
|
reg = <0x1fbf0100 0x3c>;
|
||||||
|
|
||||||
|
clocks = <&scuclk EN7523_CLK_BUS>;
|
||||||
|
clock-names = "bus";
|
||||||
|
};
|
@ -48,6 +48,8 @@ properties:
|
|||||||
clocks:
|
clocks:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
||||||
|
big-endian: true
|
||||||
|
|
||||||
fsl,ext-reset-output:
|
fsl,ext-reset-output:
|
||||||
$ref: /schemas/types.yaml#/definitions/flag
|
$ref: /schemas/types.yaml#/definitions/flag
|
||||||
description: |
|
description: |
|
||||||
@ -93,6 +95,18 @@ allOf:
|
|||||||
properties:
|
properties:
|
||||||
fsl,suspend-in-wait: false
|
fsl,suspend-in-wait: false
|
||||||
|
|
||||||
|
- if:
|
||||||
|
not:
|
||||||
|
properties:
|
||||||
|
compatible:
|
||||||
|
contains:
|
||||||
|
enum:
|
||||||
|
- fsl,ls1012a-wdt
|
||||||
|
- fsl,ls1043a-wdt
|
||||||
|
then:
|
||||||
|
properties:
|
||||||
|
big-endian: false
|
||||||
|
|
||||||
unevaluatedProperties: false
|
unevaluatedProperties: false
|
||||||
|
|
||||||
examples:
|
examples:
|
||||||
|
@ -26,6 +26,8 @@ properties:
|
|||||||
- qcom,apss-wdt-msm8994
|
- qcom,apss-wdt-msm8994
|
||||||
- qcom,apss-wdt-qcm2290
|
- qcom,apss-wdt-qcm2290
|
||||||
- qcom,apss-wdt-qcs404
|
- qcom,apss-wdt-qcs404
|
||||||
|
- qcom,apss-wdt-qcs615
|
||||||
|
- qcom,apss-wdt-qcs8300
|
||||||
- qcom,apss-wdt-sa8255p
|
- qcom,apss-wdt-sa8255p
|
||||||
- qcom,apss-wdt-sa8775p
|
- qcom,apss-wdt-sa8775p
|
||||||
- qcom,apss-wdt-sc7180
|
- qcom,apss-wdt-sc7180
|
||||||
|
@ -26,6 +26,7 @@ properties:
|
|||||||
- samsung,exynos7-wdt # for Exynos7
|
- samsung,exynos7-wdt # for Exynos7
|
||||||
- samsung,exynos850-wdt # for Exynos850
|
- samsung,exynos850-wdt # for Exynos850
|
||||||
- samsung,exynosautov9-wdt # for Exynosautov9
|
- samsung,exynosautov9-wdt # for Exynosautov9
|
||||||
|
- samsung,exynosautov920-wdt # for Exynosautov920
|
||||||
- items:
|
- items:
|
||||||
- enum:
|
- enum:
|
||||||
- tesla,fsd-wdt
|
- tesla,fsd-wdt
|
||||||
@ -77,6 +78,7 @@ allOf:
|
|||||||
- samsung,exynos7-wdt
|
- samsung,exynos7-wdt
|
||||||
- samsung,exynos850-wdt
|
- samsung,exynos850-wdt
|
||||||
- samsung,exynosautov9-wdt
|
- samsung,exynosautov9-wdt
|
||||||
|
- samsung,exynosautov920-wdt
|
||||||
then:
|
then:
|
||||||
required:
|
required:
|
||||||
- samsung,syscon-phandle
|
- samsung,syscon-phandle
|
||||||
@ -88,6 +90,7 @@ allOf:
|
|||||||
- google,gs101-wdt
|
- google,gs101-wdt
|
||||||
- samsung,exynos850-wdt
|
- samsung,exynos850-wdt
|
||||||
- samsung,exynosautov9-wdt
|
- samsung,exynosautov9-wdt
|
||||||
|
- samsung,exynosautov920-wdt
|
||||||
then:
|
then:
|
||||||
properties:
|
properties:
|
||||||
clocks:
|
clocks:
|
||||||
|
@ -6,16 +6,17 @@ Bare UDP Tunnelling Module Documentation
|
|||||||
|
|
||||||
There are various L3 encapsulation standards using UDP being discussed to
|
There are various L3 encapsulation standards using UDP being discussed to
|
||||||
leverage the UDP based load balancing capability of different networks.
|
leverage the UDP based load balancing capability of different networks.
|
||||||
MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them.
|
MPLSoUDP (https://tools.ietf.org/html/rfc7510) is one among them.
|
||||||
|
|
||||||
The Bareudp tunnel module provides a generic L3 encapsulation support for
|
The Bareudp tunnel module provides a generic L3 encapsulation support for
|
||||||
tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel.
|
tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel.
|
||||||
|
|
||||||
Special Handling
|
Special Handling
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
The bareudp device supports special handling for MPLS & IP as they can have
|
The bareudp device supports special handling for MPLS & IP as they can have
|
||||||
multiple ethertypes.
|
multiple ethertypes.
|
||||||
MPLS procotcol can have ethertypes ETH_P_MPLS_UC (unicast) & ETH_P_MPLS_MC (multicast).
|
The MPLS protocol can have ethertypes ETH_P_MPLS_UC (unicast) & ETH_P_MPLS_MC (multicast).
|
||||||
IP protocol can have ethertypes ETH_P_IP (v4) & ETH_P_IPV6 (v6).
|
IP protocol can have ethertypes ETH_P_IP (v4) & ETH_P_IPV6 (v6).
|
||||||
This special handling can be enabled only for ethertypes ETH_P_IP & ETH_P_MPLS_UC
|
This special handling can be enabled only for ethertypes ETH_P_IP & ETH_P_MPLS_UC
|
||||||
with a flag called multiproto mode.
|
with a flag called multiproto mode.
|
||||||
@ -52,7 +53,7 @@ be enabled explicitly with the "multiproto" flag.
|
|||||||
3) Device Usage
|
3) Device Usage
|
||||||
|
|
||||||
The bareudp device could be used along with OVS or flower filter in TC.
|
The bareudp device could be used along with OVS or flower filter in TC.
|
||||||
The OVS or TC flower layer must set the tunnel information in SKB dst field before
|
The OVS or TC flower layer must set the tunnel information in the SKB dst field before
|
||||||
sending packet buffer to the bareudp device for transmission. On reception the
|
sending the packet buffer to the bareudp device for transmission. On reception, the
|
||||||
bareudp device extracts and stores the tunnel information in SKB dst field before
|
bareUDP device extracts and stores the tunnel information in the SKB dst field before
|
||||||
passing the packet buffer to the network stack.
|
passing the packet buffer to the network stack.
|
||||||
|
@ -2170,6 +2170,12 @@ nexthop_compat_mode - BOOLEAN
|
|||||||
understands the new API, this sysctl can be disabled to achieve full
|
understands the new API, this sysctl can be disabled to achieve full
|
||||||
performance benefits of the new API by disabling the nexthop expansion
|
performance benefits of the new API by disabling the nexthop expansion
|
||||||
and extraneous notifications.
|
and extraneous notifications.
|
||||||
|
|
||||||
|
Note that as a backward-compatible mode, dumping of modern features
|
||||||
|
might be incomplete or wrong. For example, resilient groups will not be
|
||||||
|
shown as such, but rather as just a list of next hops. Also weights that
|
||||||
|
do not fit into 8 bits will show incorrectly.
|
||||||
|
|
||||||
Default: true (backward compat mode)
|
Default: true (backward compat mode)
|
||||||
|
|
||||||
fib_notify_on_flag_change - INTEGER
|
fib_notify_on_flag_change - INTEGER
|
||||||
|
@ -347,7 +347,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
|
|||||||
|
|
||||||
`int pm_runtime_resume_and_get(struct device *dev);`
|
`int pm_runtime_resume_and_get(struct device *dev);`
|
||||||
- run pm_runtime_resume(dev) and if successful, increment the device's
|
- run pm_runtime_resume(dev) and if successful, increment the device's
|
||||||
usage counter; return the result of pm_runtime_resume
|
usage counter; returns 0 on success (whether or not the device's
|
||||||
|
runtime PM status was already 'active') or the error code from
|
||||||
|
pm_runtime_resume() on failure.
|
||||||
|
|
||||||
`int pm_request_idle(struct device *dev);`
|
`int pm_request_idle(struct device *dev);`
|
||||||
- submit a request to execute the subsystem-level idle callback for the
|
- submit a request to execute the subsystem-level idle callback for the
|
||||||
|
@ -69,7 +69,7 @@ Per esempio per esportare tutti i simboli definiti in usb-common nello spazio
|
|||||||
dei nomi USB_COMMON, si può aggiungere la seguente linea in
|
dei nomi USB_COMMON, si può aggiungere la seguente linea in
|
||||||
drivers/usb/common/Makefile::
|
drivers/usb/common/Makefile::
|
||||||
|
|
||||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
|
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_COMMON"'
|
||||||
|
|
||||||
Questo cambierà tutte le macro EXPORT_SYMBOL() ed EXPORT_SYMBOL_GPL(). Invece,
|
Questo cambierà tutte le macro EXPORT_SYMBOL() ed EXPORT_SYMBOL_GPL(). Invece,
|
||||||
un simbolo esportato con EXPORT_SYMBOL_NS() non verrà cambiato e il simbolo
|
un simbolo esportato con EXPORT_SYMBOL_NS() non verrà cambiato e il simbolo
|
||||||
@ -79,7 +79,7 @@ Una seconda possibilità è quella di definire il simbolo di preprocessore
|
|||||||
direttamente nei file da compilare. L'esempio precedente diventerebbe::
|
direttamente nei file da compilare. L'esempio precedente diventerebbe::
|
||||||
|
|
||||||
#undef DEFAULT_SYMBOL_NAMESPACE
|
#undef DEFAULT_SYMBOL_NAMESPACE
|
||||||
#define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
|
#define DEFAULT_SYMBOL_NAMESPACE "USB_COMMON"
|
||||||
|
|
||||||
Questo va messo prima di un qualsiasi uso di EXPORT_SYMBOL.
|
Questo va messo prima di un qualsiasi uso di EXPORT_SYMBOL.
|
||||||
|
|
||||||
@ -107,7 +107,7 @@ modinfo::
|
|||||||
[...]
|
[...]
|
||||||
|
|
||||||
|
|
||||||
Si consiglia di posizionare la dichiarazione MODULE_IMPORT_NS("") vicino
|
Si consiglia di posizionare la dichiarazione MODULE_IMPORT_NS() vicino
|
||||||
ai metadati del modulo come MODULE_AUTHOR() o MODULE_LICENSE(). Fate
|
ai metadati del modulo come MODULE_AUTHOR() o MODULE_LICENSE(). Fate
|
||||||
riferimento alla sezione 5. per creare automaticamente le importazioni
|
riferimento alla sezione 5. per creare automaticamente le importazioni
|
||||||
mancanti.
|
mancanti.
|
||||||
@ -131,7 +131,7 @@ emetterà un avviso.
|
|||||||
La mancanza di un'importazione può essere individuata facilmente al momento
|
La mancanza di un'importazione può essere individuata facilmente al momento
|
||||||
della compilazione. Infatti, modpost emetterà un avviso se il modulo usa
|
della compilazione. Infatti, modpost emetterà un avviso se il modulo usa
|
||||||
un simbolo da uno spazio dei nomi che non è stato importato.
|
un simbolo da uno spazio dei nomi che non è stato importato.
|
||||||
La dichiarazione MODULE_IMPORT_NS("") viene solitamente aggiunta in un posto
|
La dichiarazione MODULE_IMPORT_NS() viene solitamente aggiunta in un posto
|
||||||
ben definito (assieme agli altri metadati del modulo). Per facilitare
|
ben definito (assieme agli altri metadati del modulo). Per facilitare
|
||||||
la vita di chi scrive moduli (e i manutentori di sottosistemi), esistono uno
|
la vita di chi scrive moduli (e i manutentori di sottosistemi), esistono uno
|
||||||
script e un target make per correggere le importazioni mancanti. Questo può
|
script e un target make per correggere le importazioni mancanti. Questo può
|
||||||
|
@ -66,7 +66,7 @@
|
|||||||
子系统的 ``Makefile`` 中定义默认命名空间。例如,如果要将usb-common中定义的所有符号导
|
子系统的 ``Makefile`` 中定义默认命名空间。例如,如果要将usb-common中定义的所有符号导
|
||||||
出到USB_COMMON命名空间,可以在drivers/usb/common/Makefile中添加这样一行::
|
出到USB_COMMON命名空间,可以在drivers/usb/common/Makefile中添加这样一行::
|
||||||
|
|
||||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
|
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_COMMON"'
|
||||||
|
|
||||||
这将影响所有 EXPORT_SYMBOL() 和 EXPORT_SYMBOL_GPL() 语句。当这个定义存在时,
|
这将影响所有 EXPORT_SYMBOL() 和 EXPORT_SYMBOL_GPL() 语句。当这个定义存在时,
|
||||||
用EXPORT_SYMBOL_NS()导出的符号仍然会被导出到作为命名空间参数传递的命名空间中,
|
用EXPORT_SYMBOL_NS()导出的符号仍然会被导出到作为命名空间参数传递的命名空间中,
|
||||||
@ -76,7 +76,7 @@
|
|||||||
成::
|
成::
|
||||||
|
|
||||||
#undef DEFAULT_SYMBOL_NAMESPACE
|
#undef DEFAULT_SYMBOL_NAMESPACE
|
||||||
#define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
|
#define DEFAULT_SYMBOL_NAMESPACE "USB_COMMON"
|
||||||
|
|
||||||
应置于相关编译单元中任何 EXPORT_SYMBOL 宏之前
|
应置于相关编译单元中任何 EXPORT_SYMBOL 宏之前
|
||||||
|
|
||||||
@ -99,7 +99,7 @@
|
|||||||
[...]
|
[...]
|
||||||
|
|
||||||
|
|
||||||
建议将 MODULE_IMPORT_NS("") 语句添加到靠近其他模块元数据定义的地方,
|
建议将 MODULE_IMPORT_NS() 语句添加到靠近其他模块元数据定义的地方,
|
||||||
如 MODULE_AUTHOR() 或 MODULE_LICENSE() 。关于自动创建缺失的导入
|
如 MODULE_AUTHOR() 或 MODULE_LICENSE() 。关于自动创建缺失的导入
|
||||||
语句的方法,请参考第5节。
|
语句的方法,请参考第5节。
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ EINVAL方式失败。要允许加载不满足这个前提条件的模块,可
|
|||||||
|
|
||||||
缺少命名空间的导入可以在构建时很容易被检测到。事实上,如果一个模块
|
缺少命名空间的导入可以在构建时很容易被检测到。事实上,如果一个模块
|
||||||
使用了一个命名空间的符号而没有导入它,modpost会发出警告。
|
使用了一个命名空间的符号而没有导入它,modpost会发出警告。
|
||||||
MODULE_IMPORT_NS("")语句通常会被添加到一个明确的位置(和其他模块元
|
MODULE_IMPORT_NS()语句通常会被添加到一个明确的位置(和其他模块元
|
||||||
数据一起)。为了使模块作者(和子系统维护者)的生活更加轻松,我们提
|
数据一起)。为了使模块作者(和子系统维护者)的生活更加轻松,我们提
|
||||||
供了一个脚本和make目标来修复丢失的导入。修复丢失的导入可以用::
|
供了一个脚本和make目标来修复丢失的导入。修复丢失的导入可以用::
|
||||||
|
|
||||||
|
@ -120,16 +120,6 @@ coh901327_wdt:
|
|||||||
|
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
|
|
||||||
cpu5wdt:
|
|
||||||
port:
|
|
||||||
base address of watchdog card, default is 0x91
|
|
||||||
verbose:
|
|
||||||
be verbose, default is 0 (no)
|
|
||||||
ticks:
|
|
||||||
count down ticks, default is 10000
|
|
||||||
|
|
||||||
-------------------------------------------------
|
|
||||||
|
|
||||||
cpwd:
|
cpwd:
|
||||||
wd0_timeout:
|
wd0_timeout:
|
||||||
Default watchdog0 timeout in 1/10secs
|
Default watchdog0 timeout in 1/10secs
|
||||||
|
11
MAINTAINERS
11
MAINTAINERS
@ -3376,6 +3376,8 @@ S: Maintained
|
|||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
|
||||||
F: Documentation/arch/arm64/
|
F: Documentation/arch/arm64/
|
||||||
F: arch/arm64/
|
F: arch/arm64/
|
||||||
|
F: drivers/virt/coco/arm-cca-guest/
|
||||||
|
F: drivers/virt/coco/pkvm-guest/
|
||||||
F: tools/testing/selftests/arm64/
|
F: tools/testing/selftests/arm64/
|
||||||
X: arch/arm64/boot/dts/
|
X: arch/arm64/boot/dts/
|
||||||
|
|
||||||
@ -3891,7 +3893,7 @@ W: http://www.baycom.org/~tom/ham/ham.html
|
|||||||
F: drivers/net/hamradio/baycom*
|
F: drivers/net/hamradio/baycom*
|
||||||
|
|
||||||
BCACHE (BLOCK LAYER CACHE)
|
BCACHE (BLOCK LAYER CACHE)
|
||||||
M: Coly Li <colyli@suse.de>
|
M: Coly Li <colyli@kernel.org>
|
||||||
M: Kent Overstreet <kent.overstreet@linux.dev>
|
M: Kent Overstreet <kent.overstreet@linux.dev>
|
||||||
L: linux-bcache@vger.kernel.org
|
L: linux-bcache@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -15343,7 +15345,7 @@ M: Daniel Machon <daniel.machon@microchip.com>
|
|||||||
M: UNGLinuxDriver@microchip.com
|
M: UNGLinuxDriver@microchip.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/net/ethernet/microchip/lan969x/*
|
F: drivers/net/ethernet/microchip/sparx5/lan969x/*
|
||||||
|
|
||||||
MICROCHIP LCDFB DRIVER
|
MICROCHIP LCDFB DRIVER
|
||||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||||
@ -16267,6 +16269,7 @@ F: Documentation/devicetree/bindings/net/
|
|||||||
F: Documentation/networking/net_cachelines/net_device.rst
|
F: Documentation/networking/net_cachelines/net_device.rst
|
||||||
F: drivers/connector/
|
F: drivers/connector/
|
||||||
F: drivers/net/
|
F: drivers/net/
|
||||||
|
F: drivers/ptp/
|
||||||
F: include/dt-bindings/net/
|
F: include/dt-bindings/net/
|
||||||
F: include/linux/cn_proc.h
|
F: include/linux/cn_proc.h
|
||||||
F: include/linux/etherdevice.h
|
F: include/linux/etherdevice.h
|
||||||
@ -16334,6 +16337,7 @@ F: Documentation/networking/
|
|||||||
F: Documentation/networking/net_cachelines/
|
F: Documentation/networking/net_cachelines/
|
||||||
F: Documentation/process/maintainer-netdev.rst
|
F: Documentation/process/maintainer-netdev.rst
|
||||||
F: Documentation/userspace-api/netlink/
|
F: Documentation/userspace-api/netlink/
|
||||||
|
F: include/linux/ethtool.h
|
||||||
F: include/linux/framer/framer-provider.h
|
F: include/linux/framer/framer-provider.h
|
||||||
F: include/linux/framer/framer.h
|
F: include/linux/framer/framer.h
|
||||||
F: include/linux/in.h
|
F: include/linux/in.h
|
||||||
@ -16348,6 +16352,7 @@ F: include/linux/rtnetlink.h
|
|||||||
F: include/linux/seq_file_net.h
|
F: include/linux/seq_file_net.h
|
||||||
F: include/linux/skbuff*
|
F: include/linux/skbuff*
|
||||||
F: include/net/
|
F: include/net/
|
||||||
|
F: include/uapi/linux/ethtool.h
|
||||||
F: include/uapi/linux/genetlink.h
|
F: include/uapi/linux/genetlink.h
|
||||||
F: include/uapi/linux/hsr_netlink.h
|
F: include/uapi/linux/hsr_netlink.h
|
||||||
F: include/uapi/linux/in.h
|
F: include/uapi/linux/in.h
|
||||||
@ -22407,7 +22412,7 @@ F: drivers/char/hw_random/jh7110-trng.c
|
|||||||
|
|
||||||
STARFIVE WATCHDOG DRIVER
|
STARFIVE WATCHDOG DRIVER
|
||||||
M: Xingyu Wu <xingyu.wu@starfivetech.com>
|
M: Xingyu Wu <xingyu.wu@starfivetech.com>
|
||||||
M: Samin Guo <samin.guo@starfivetech.com>
|
M: Ziv Xu <ziv.xu@starfivetech.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: Documentation/devicetree/bindings/watchdog/starfive*
|
F: Documentation/devicetree/bindings/watchdog/starfive*
|
||||||
F: drivers/watchdog/starfive-wdt.c
|
F: drivers/watchdog/starfive-wdt.c
|
||||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
|||||||
VERSION = 6
|
VERSION = 6
|
||||||
PATCHLEVEL = 13
|
PATCHLEVEL = 13
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc1
|
EXTRAVERSION = -rc3
|
||||||
NAME = Baby Opossum Posse
|
NAME = Baby Opossum Posse
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -297,7 +297,6 @@ config ARC_PAGE_SIZE_16K
|
|||||||
config ARC_PAGE_SIZE_4K
|
config ARC_PAGE_SIZE_4K
|
||||||
bool "4KB"
|
bool "4KB"
|
||||||
select HAVE_PAGE_SIZE_4KB
|
select HAVE_PAGE_SIZE_4KB
|
||||||
depends on ARC_MMU_V3 || ARC_MMU_V4
|
|
||||||
|
|
||||||
endchoice
|
endchoice
|
||||||
|
|
||||||
@ -474,7 +473,8 @@ config HIGHMEM
|
|||||||
|
|
||||||
config ARC_HAS_PAE40
|
config ARC_HAS_PAE40
|
||||||
bool "Support for the 40-bit Physical Address Extension"
|
bool "Support for the 40-bit Physical Address Extension"
|
||||||
depends on ISA_ARCV2
|
depends on ARC_MMU_V4
|
||||||
|
depends on !ARC_PAGE_SIZE_4K
|
||||||
select HIGHMEM
|
select HIGHMEM
|
||||||
select PHYS_ADDR_T_64BIT
|
select PHYS_ADDR_T_64BIT
|
||||||
help
|
help
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
KBUILD_DEFCONFIG := haps_hs_smp_defconfig
|
KBUILD_DEFCONFIG := haps_hs_smp_defconfig
|
||||||
|
|
||||||
ifeq ($(CROSS_COMPILE),)
|
ifeq ($(CROSS_COMPILE),)
|
||||||
CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
|
CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux- arc-linux-gnu-)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
|
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
|
||||||
|
@ -54,7 +54,7 @@ ictl_intc: gpio-controller@0 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <30>;
|
ngpios = <30>;
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <2>;
|
#interrupt-cells = <2>;
|
||||||
|
@ -62,7 +62,7 @@ ictl_intc: gpio-controller@0 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <30>;
|
ngpios = <30>;
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <2>;
|
#interrupt-cells = <2>;
|
||||||
|
@ -69,7 +69,7 @@ ictl_intc: gpio-controller@0 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <30>;
|
ngpios = <30>;
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <2>;
|
#interrupt-cells = <2>;
|
||||||
|
@ -250,7 +250,7 @@ gpio0_banka: gpio-controller@0 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <32>;
|
ngpios = <32>;
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -258,7 +258,7 @@ gpio0_bankb: gpio-controller@1 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <8>;
|
ngpios = <8>;
|
||||||
reg = <1>;
|
reg = <1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -266,7 +266,7 @@ gpio0_bankc: gpio-controller@2 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <8>;
|
ngpios = <8>;
|
||||||
reg = <2>;
|
reg = <2>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@ -281,7 +281,7 @@ gpio1_banka: gpio-controller@0 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <30>;
|
ngpios = <30>;
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -289,7 +289,7 @@ gpio1_bankb: gpio-controller@1 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <10>;
|
ngpios = <10>;
|
||||||
reg = <1>;
|
reg = <1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -297,7 +297,7 @@ gpio1_bankc: gpio-controller@2 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <8>;
|
ngpios = <8>;
|
||||||
reg = <2>;
|
reg = <2>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -308,7 +308,7 @@ gpio_port_a: gpio-controller@0 {
|
|||||||
compatible = "snps,dw-apb-gpio-port";
|
compatible = "snps,dw-apb-gpio-port";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
snps,nr-gpios = <24>;
|
ngpios = <24>;
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -146,7 +146,7 @@
|
|||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
#include <soc/arc/aux.h>
|
#include <soc/arc/arc_aux.h>
|
||||||
|
|
||||||
/* Helpers */
|
/* Helpers */
|
||||||
#define TO_KB(bytes) ((bytes) >> 10)
|
#define TO_KB(bytes) ((bytes) >> 10)
|
||||||
|
@ -48,7 +48,7 @@
|
|||||||
\
|
\
|
||||||
switch(sizeof((_p_))) { \
|
switch(sizeof((_p_))) { \
|
||||||
case 1: \
|
case 1: \
|
||||||
_prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
|
_prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *__force)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
|
||||||
break; \
|
break; \
|
||||||
case 4: \
|
case 4: \
|
||||||
_prev_ = __cmpxchg(_p_, _o_, _n_); \
|
_prev_ = __cmpxchg(_p_, _o_, _n_); \
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
#ifndef _ASM_ARC_MMU_ARCV2_H
|
#ifndef _ASM_ARC_MMU_ARCV2_H
|
||||||
#define _ASM_ARC_MMU_ARCV2_H
|
#define _ASM_ARC_MMU_ARCV2_H
|
||||||
|
|
||||||
#include <soc/arc/aux.h>
|
#include <soc/arc/arc_aux.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TLB Management regs
|
* TLB Management regs
|
||||||
|
@ -2916,7 +2916,7 @@ bool check_jmp_32(u32 curr_off, u32 targ_off, u8 cond)
|
|||||||
addendum = (cond == ARC_CC_AL) ? 0 : INSN_len_normal;
|
addendum = (cond == ARC_CC_AL) ? 0 : INSN_len_normal;
|
||||||
disp = get_displacement(curr_off + addendum, targ_off);
|
disp = get_displacement(curr_off + addendum, targ_off);
|
||||||
|
|
||||||
if (ARC_CC_AL)
|
if (cond == ARC_CC_AL)
|
||||||
return is_valid_far_disp(disp);
|
return is_valid_far_disp(disp);
|
||||||
else
|
else
|
||||||
return is_valid_near_disp(disp);
|
return is_valid_near_disp(disp);
|
||||||
|
@ -44,6 +44,8 @@ cpucap_is_possible(const unsigned int cap)
|
|||||||
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
|
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
|
||||||
case ARM64_HAS_S1POE:
|
case ARM64_HAS_S1POE:
|
||||||
return IS_ENABLED(CONFIG_ARM64_POE);
|
return IS_ENABLED(CONFIG_ARM64_POE);
|
||||||
|
case ARM64_HAS_GCS:
|
||||||
|
return IS_ENABLED(CONFIG_ARM64_GCS);
|
||||||
case ARM64_UNMAP_KERNEL_AT_EL0:
|
case ARM64_UNMAP_KERNEL_AT_EL0:
|
||||||
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
|
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
|
||||||
case ARM64_WORKAROUND_843419:
|
case ARM64_WORKAROUND_843419:
|
||||||
|
@ -847,8 +847,7 @@ static inline bool system_supports_poe(void)
|
|||||||
|
|
||||||
static inline bool system_supports_gcs(void)
|
static inline bool system_supports_gcs(void)
|
||||||
{
|
{
|
||||||
return IS_ENABLED(CONFIG_ARM64_GCS) &&
|
return alternative_has_cap_unlikely(ARM64_HAS_GCS);
|
||||||
alternative_has_cap_unlikely(ARM64_HAS_GCS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool system_supports_haft(void)
|
static inline bool system_supports_haft(void)
|
||||||
|
@ -87,7 +87,7 @@
|
|||||||
1 << PMSCR_EL2_PA_SHIFT)
|
1 << PMSCR_EL2_PA_SHIFT)
|
||||||
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
|
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
|
||||||
.Lskip_spe_el2_\@:
|
.Lskip_spe_el2_\@:
|
||||||
mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
mov x0, #MDCR_EL2_E2PB_MASK
|
||||||
orr x2, x2, x0 // If we don't have VHE, then
|
orr x2, x2, x0 // If we don't have VHE, then
|
||||||
// use EL1&0 translation.
|
// use EL1&0 translation.
|
||||||
|
|
||||||
@ -100,7 +100,7 @@
|
|||||||
and x0, x0, TRBIDR_EL1_P
|
and x0, x0, TRBIDR_EL1_P
|
||||||
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
|
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
|
||||||
|
|
||||||
mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
|
mov x0, #MDCR_EL2_E2TB_MASK
|
||||||
orr x2, x2, x0 // allow the EL1&0 translation
|
orr x2, x2, x0 // allow the EL1&0 translation
|
||||||
// to own it.
|
// to own it.
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#ifndef BUILD_VDSO
|
#ifndef BUILD_VDSO
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
@ -44,7 +45,7 @@ static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
|
|||||||
if (system_supports_mte()) {
|
if (system_supports_mte()) {
|
||||||
if (flags & (MAP_ANONYMOUS | MAP_HUGETLB))
|
if (flags & (MAP_ANONYMOUS | MAP_HUGETLB))
|
||||||
return VM_MTE_ALLOWED;
|
return VM_MTE_ALLOWED;
|
||||||
if (shmem_file(file))
|
if (shmem_file(file) || is_file_hugepages(file))
|
||||||
return VM_MTE_ALLOWED;
|
return VM_MTE_ALLOWED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,8 +114,8 @@ SYM_CODE_START_LOCAL(__finalise_el2)
|
|||||||
|
|
||||||
// Use EL2 translations for SPE & TRBE and disable access from EL1
|
// Use EL2 translations for SPE & TRBE and disable access from EL1
|
||||||
mrs x0, mdcr_el2
|
mrs x0, mdcr_el2
|
||||||
bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
bic x0, x0, #MDCR_EL2_E2PB_MASK
|
||||||
bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
|
bic x0, x0, #MDCR_EL2_E2TB_MASK
|
||||||
msr mdcr_el2, x0
|
msr mdcr_el2, x0
|
||||||
|
|
||||||
// Transfer the MM state from EL1 to EL2
|
// Transfer the MM state from EL1 to EL2
|
||||||
|
@ -30,20 +30,17 @@ static bool is_image_text(unsigned long addr)
|
|||||||
|
|
||||||
static void __kprobes *patch_map(void *addr, int fixmap)
|
static void __kprobes *patch_map(void *addr, int fixmap)
|
||||||
{
|
{
|
||||||
unsigned long uintaddr = (uintptr_t) addr;
|
phys_addr_t phys;
|
||||||
bool image = is_image_text(uintaddr);
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
if (image)
|
|
||||||
page = phys_to_page(__pa_symbol(addr));
|
|
||||||
else if (IS_ENABLED(CONFIG_EXECMEM))
|
|
||||||
page = vmalloc_to_page(addr);
|
|
||||||
else
|
|
||||||
return addr;
|
|
||||||
|
|
||||||
|
if (is_image_text((unsigned long)addr)) {
|
||||||
|
phys = __pa_symbol(addr);
|
||||||
|
} else {
|
||||||
|
struct page *page = vmalloc_to_page(addr);
|
||||||
BUG_ON(!page);
|
BUG_ON(!page);
|
||||||
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
|
phys = page_to_phys(page) + offset_in_page(addr);
|
||||||
(uintaddr & ~PAGE_MASK));
|
}
|
||||||
|
|
||||||
|
return (void *)set_fixmap_offset(fixmap, phys);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __kprobes patch_unmap(int fixmap)
|
static void __kprobes patch_unmap(int fixmap)
|
||||||
|
@ -720,6 +720,8 @@ static int fpmr_set(struct task_struct *target, const struct user_regset *regset
|
|||||||
if (!system_supports_fpmr())
|
if (!system_supports_fpmr())
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
fpmr = target->thread.uw.fpmr;
|
||||||
|
|
||||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1427,7 +1429,7 @@ static int tagged_addr_ctrl_get(struct task_struct *target,
|
|||||||
{
|
{
|
||||||
long ctrl = get_tagged_addr_ctrl(target);
|
long ctrl = get_tagged_addr_ctrl(target);
|
||||||
|
|
||||||
if (IS_ERR_VALUE(ctrl))
|
if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
|
||||||
return ctrl;
|
return ctrl;
|
||||||
|
|
||||||
return membuf_write(&to, &ctrl, sizeof(ctrl));
|
return membuf_write(&to, &ctrl, sizeof(ctrl));
|
||||||
@ -1441,6 +1443,10 @@ static int tagged_addr_ctrl_set(struct task_struct *target, const struct
|
|||||||
int ret;
|
int ret;
|
||||||
long ctrl;
|
long ctrl;
|
||||||
|
|
||||||
|
ctrl = get_tagged_addr_ctrl(target);
|
||||||
|
if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
|
||||||
|
return ctrl;
|
||||||
|
|
||||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1472,6 +1478,8 @@ static int poe_set(struct task_struct *target, const struct
|
|||||||
if (!system_supports_poe())
|
if (!system_supports_poe())
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
ctrl = target->thread.por_el0;
|
||||||
|
|
||||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1483,6 +1491,22 @@ static int poe_set(struct task_struct *target, const struct
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_GCS
|
#ifdef CONFIG_ARM64_GCS
|
||||||
|
static void task_gcs_to_user(struct user_gcs *user_gcs,
|
||||||
|
const struct task_struct *target)
|
||||||
|
{
|
||||||
|
user_gcs->features_enabled = target->thread.gcs_el0_mode;
|
||||||
|
user_gcs->features_locked = target->thread.gcs_el0_locked;
|
||||||
|
user_gcs->gcspr_el0 = target->thread.gcspr_el0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void task_gcs_from_user(struct task_struct *target,
|
||||||
|
const struct user_gcs *user_gcs)
|
||||||
|
{
|
||||||
|
target->thread.gcs_el0_mode = user_gcs->features_enabled;
|
||||||
|
target->thread.gcs_el0_locked = user_gcs->features_locked;
|
||||||
|
target->thread.gcspr_el0 = user_gcs->gcspr_el0;
|
||||||
|
}
|
||||||
|
|
||||||
static int gcs_get(struct task_struct *target,
|
static int gcs_get(struct task_struct *target,
|
||||||
const struct user_regset *regset,
|
const struct user_regset *regset,
|
||||||
struct membuf to)
|
struct membuf to)
|
||||||
@ -1495,9 +1519,7 @@ static int gcs_get(struct task_struct *target,
|
|||||||
if (target == current)
|
if (target == current)
|
||||||
gcs_preserve_current_state();
|
gcs_preserve_current_state();
|
||||||
|
|
||||||
user_gcs.features_enabled = target->thread.gcs_el0_mode;
|
task_gcs_to_user(&user_gcs, target);
|
||||||
user_gcs.features_locked = target->thread.gcs_el0_locked;
|
|
||||||
user_gcs.gcspr_el0 = target->thread.gcspr_el0;
|
|
||||||
|
|
||||||
return membuf_write(&to, &user_gcs, sizeof(user_gcs));
|
return membuf_write(&to, &user_gcs, sizeof(user_gcs));
|
||||||
}
|
}
|
||||||
@ -1513,6 +1535,8 @@ static int gcs_set(struct task_struct *target, const struct
|
|||||||
if (!system_supports_gcs())
|
if (!system_supports_gcs())
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
task_gcs_to_user(&user_gcs, target);
|
||||||
|
|
||||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1);
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1520,9 +1544,7 @@ static int gcs_set(struct task_struct *target, const struct
|
|||||||
if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
|
if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
target->thread.gcs_el0_mode = user_gcs.features_enabled;
|
task_gcs_from_user(target, &user_gcs);
|
||||||
target->thread.gcs_el0_locked = user_gcs.features_locked;
|
|
||||||
target->thread.gcspr_el0 = user_gcs.gcspr_el0;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1462,10 +1462,33 @@ static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
|
|||||||
struct rt_sigframe_user_layout *user, int usig)
|
struct rt_sigframe_user_layout *user, int usig)
|
||||||
{
|
{
|
||||||
__sigrestore_t sigtramp;
|
__sigrestore_t sigtramp;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (ksig->ka.sa.sa_flags & SA_RESTORER)
|
||||||
|
sigtramp = ksig->ka.sa.sa_restorer;
|
||||||
|
else
|
||||||
|
sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
|
||||||
|
|
||||||
|
err = gcs_signal_entry(sigtramp, ksig);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must not fail from this point onwards. We are going to update
|
||||||
|
* registers, including SP, in order to invoke the signal handler. If
|
||||||
|
* we failed and attempted to deliver a nested SIGSEGV to a handler
|
||||||
|
* after that point, the subsequent sigreturn would end up restoring
|
||||||
|
* the (partial) state for the original signal handler.
|
||||||
|
*/
|
||||||
|
|
||||||
regs->regs[0] = usig;
|
regs->regs[0] = usig;
|
||||||
|
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
|
||||||
|
regs->regs[1] = (unsigned long)&user->sigframe->info;
|
||||||
|
regs->regs[2] = (unsigned long)&user->sigframe->uc;
|
||||||
|
}
|
||||||
regs->sp = (unsigned long)user->sigframe;
|
regs->sp = (unsigned long)user->sigframe;
|
||||||
regs->regs[29] = (unsigned long)&user->next_frame->fp;
|
regs->regs[29] = (unsigned long)&user->next_frame->fp;
|
||||||
|
regs->regs[30] = (unsigned long)sigtramp;
|
||||||
regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
|
regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1506,14 +1529,7 @@ static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
|
|||||||
sme_smstop();
|
sme_smstop();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ksig->ka.sa.sa_flags & SA_RESTORER)
|
return 0;
|
||||||
sigtramp = ksig->ka.sa.sa_restorer;
|
|
||||||
else
|
|
||||||
sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
|
|
||||||
|
|
||||||
regs->regs[30] = (unsigned long)sigtramp;
|
|
||||||
|
|
||||||
return gcs_signal_entry(sigtramp, ksig);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
|
static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
|
||||||
@ -1537,14 +1553,16 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
|
|||||||
|
|
||||||
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
|
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
|
||||||
err |= setup_sigframe(&user, regs, set, &ua_state);
|
err |= setup_sigframe(&user, regs, set, &ua_state);
|
||||||
if (err == 0) {
|
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
|
||||||
err = setup_return(regs, ksig, &user, usig);
|
|
||||||
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
|
|
||||||
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
||||||
regs->regs[1] = (unsigned long)&frame->info;
|
|
||||||
regs->regs[2] = (unsigned long)&frame->uc;
|
if (err == 0)
|
||||||
}
|
err = setup_return(regs, ksig, &user, usig);
|
||||||
}
|
|
||||||
|
/*
|
||||||
|
* We must not fail if setup_return() succeeded - see comment at the
|
||||||
|
* beginning of setup_return().
|
||||||
|
*/
|
||||||
|
|
||||||
if (err == 0)
|
if (err == 0)
|
||||||
set_handler_user_access_state();
|
set_handler_user_access_state();
|
||||||
|
@ -26,7 +26,6 @@ enum kunwind_source {
|
|||||||
KUNWIND_SOURCE_CALLER,
|
KUNWIND_SOURCE_CALLER,
|
||||||
KUNWIND_SOURCE_TASK,
|
KUNWIND_SOURCE_TASK,
|
||||||
KUNWIND_SOURCE_REGS_PC,
|
KUNWIND_SOURCE_REGS_PC,
|
||||||
KUNWIND_SOURCE_REGS_LR,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
union unwind_flags {
|
union unwind_flags {
|
||||||
@ -138,8 +137,10 @@ kunwind_recover_return_address(struct kunwind_state *state)
|
|||||||
orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
|
orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
|
||||||
state->common.pc,
|
state->common.pc,
|
||||||
(void *)state->common.fp);
|
(void *)state->common.fp);
|
||||||
if (WARN_ON_ONCE(state->common.pc == orig_pc))
|
if (state->common.pc == orig_pc) {
|
||||||
|
WARN_ON_ONCE(state->task == current);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
state->common.pc = orig_pc;
|
state->common.pc = orig_pc;
|
||||||
state->flags.fgraph = 1;
|
state->flags.fgraph = 1;
|
||||||
}
|
}
|
||||||
@ -178,23 +179,8 @@ int kunwind_next_regs_pc(struct kunwind_state *state)
|
|||||||
state->regs = regs;
|
state->regs = regs;
|
||||||
state->common.pc = regs->pc;
|
state->common.pc = regs->pc;
|
||||||
state->common.fp = regs->regs[29];
|
state->common.fp = regs->regs[29];
|
||||||
state->source = KUNWIND_SOURCE_REGS_PC;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline int
|
|
||||||
kunwind_next_regs_lr(struct kunwind_state *state)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* The stack for the regs was consumed by kunwind_next_regs_pc(), so we
|
|
||||||
* cannot consume that again here, but we know the regs are safe to
|
|
||||||
* access.
|
|
||||||
*/
|
|
||||||
state->common.pc = state->regs->regs[30];
|
|
||||||
state->common.fp = state->regs->regs[29];
|
|
||||||
state->regs = NULL;
|
state->regs = NULL;
|
||||||
state->source = KUNWIND_SOURCE_REGS_LR;
|
state->source = KUNWIND_SOURCE_REGS_PC;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,12 +201,12 @@ kunwind_next_frame_record_meta(struct kunwind_state *state)
|
|||||||
case FRAME_META_TYPE_FINAL:
|
case FRAME_META_TYPE_FINAL:
|
||||||
if (meta == &task_pt_regs(tsk)->stackframe)
|
if (meta == &task_pt_regs(tsk)->stackframe)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(tsk == current);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
case FRAME_META_TYPE_PT_REGS:
|
case FRAME_META_TYPE_PT_REGS:
|
||||||
return kunwind_next_regs_pc(state);
|
return kunwind_next_regs_pc(state);
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(tsk == current);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -274,11 +260,8 @@ kunwind_next(struct kunwind_state *state)
|
|||||||
case KUNWIND_SOURCE_FRAME:
|
case KUNWIND_SOURCE_FRAME:
|
||||||
case KUNWIND_SOURCE_CALLER:
|
case KUNWIND_SOURCE_CALLER:
|
||||||
case KUNWIND_SOURCE_TASK:
|
case KUNWIND_SOURCE_TASK:
|
||||||
case KUNWIND_SOURCE_REGS_LR:
|
|
||||||
err = kunwind_next_frame_record(state);
|
|
||||||
break;
|
|
||||||
case KUNWIND_SOURCE_REGS_PC:
|
case KUNWIND_SOURCE_REGS_PC:
|
||||||
err = kunwind_next_regs_lr(state);
|
err = kunwind_next_frame_record(state);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
@ -436,7 +419,6 @@ static const char *state_source_string(const struct kunwind_state *state)
|
|||||||
case KUNWIND_SOURCE_CALLER: return "C";
|
case KUNWIND_SOURCE_CALLER: return "C";
|
||||||
case KUNWIND_SOURCE_TASK: return "T";
|
case KUNWIND_SOURCE_TASK: return "T";
|
||||||
case KUNWIND_SOURCE_REGS_PC: return "P";
|
case KUNWIND_SOURCE_REGS_PC: return "P";
|
||||||
case KUNWIND_SOURCE_REGS_LR: return "L";
|
|
||||||
default: return "U";
|
default: return "U";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -739,8 +739,15 @@ static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par,
|
|||||||
final_attr = s1_parattr;
|
final_attr = s1_parattr;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* MemAttr[2]=0, Device from S2 */
|
/*
|
||||||
final_attr = s2_memattr & GENMASK(1,0) << 2;
|
* MemAttr[2]=0, Device from S2.
|
||||||
|
*
|
||||||
|
* FWB does not influence the way that stage 1
|
||||||
|
* memory types and attributes are combined
|
||||||
|
* with stage 2 Device type and attributes.
|
||||||
|
*/
|
||||||
|
final_attr = min(s2_memattr_to_attr(s2_memattr),
|
||||||
|
s1_parattr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Combination of R_HMNDG, R_TNHFM and R_GQFSF */
|
/* Combination of R_HMNDG, R_TNHFM and R_GQFSF */
|
||||||
|
@ -126,7 +126,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
|
|||||||
/* Trap SPE */
|
/* Trap SPE */
|
||||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
|
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
|
||||||
mdcr_set |= MDCR_EL2_TPMS;
|
mdcr_set |= MDCR_EL2_TPMS;
|
||||||
mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
|
mdcr_clear |= MDCR_EL2_E2PB_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Trap Trace Filter */
|
/* Trap Trace Filter */
|
||||||
@ -143,7 +143,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
/* Trap External Trace */
|
/* Trap External Trace */
|
||||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
|
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
|
||||||
mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
|
mdcr_clear |= MDCR_EL2_E2TB_MASK;
|
||||||
|
|
||||||
vcpu->arch.mdcr_el2 |= mdcr_set;
|
vcpu->arch.mdcr_el2 |= mdcr_set;
|
||||||
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
|
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
|
||||||
|
@ -2618,7 +2618,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||||||
ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
|
ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
|
||||||
ID_AA64MMFR0_EL1_TGRAN4_2 |
|
ID_AA64MMFR0_EL1_TGRAN4_2 |
|
||||||
ID_AA64MMFR0_EL1_TGRAN64_2 |
|
ID_AA64MMFR0_EL1_TGRAN64_2 |
|
||||||
ID_AA64MMFR0_EL1_TGRAN16_2)),
|
ID_AA64MMFR0_EL1_TGRAN16_2 |
|
||||||
|
ID_AA64MMFR0_EL1_ASIDBITS)),
|
||||||
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
|
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
|
||||||
ID_AA64MMFR1_EL1_HCX |
|
ID_AA64MMFR1_EL1_HCX |
|
||||||
ID_AA64MMFR1_EL1_TWED |
|
ID_AA64MMFR1_EL1_TWED |
|
||||||
|
@ -608,12 +608,22 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
|
|||||||
lockdep_assert_held(&its->its_lock);
|
lockdep_assert_held(&its->its_lock);
|
||||||
vgic_get_irq_kref(irq);
|
vgic_get_irq_kref(irq);
|
||||||
|
|
||||||
|
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Put the reference taken on @irq if the store fails. Intentionally do
|
||||||
|
* not return the error as the translation cache is best effort.
|
||||||
|
*/
|
||||||
|
if (xa_is_err(old)) {
|
||||||
|
vgic_put_irq(kvm, irq);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We could have raced with another CPU caching the same
|
* We could have raced with another CPU caching the same
|
||||||
* translation behind our back, ensure we don't leak a
|
* translation behind our back, ensure we don't leak a
|
||||||
* reference if that is the case.
|
* reference if that is the case.
|
||||||
*/
|
*/
|
||||||
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
|
|
||||||
if (old)
|
if (old)
|
||||||
vgic_put_irq(kvm, old);
|
vgic_put_irq(kvm, old);
|
||||||
}
|
}
|
||||||
|
@ -32,9 +32,9 @@ static unsigned long nr_pinned_asids;
|
|||||||
static unsigned long *pinned_asid_map;
|
static unsigned long *pinned_asid_map;
|
||||||
|
|
||||||
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
|
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
|
||||||
#define ASID_FIRST_VERSION (1UL << asid_bits)
|
#define ASID_FIRST_VERSION (1UL << 16)
|
||||||
|
|
||||||
#define NUM_USER_ASIDS ASID_FIRST_VERSION
|
#define NUM_USER_ASIDS (1UL << asid_bits)
|
||||||
#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
|
#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
|
||||||
#define asid2ctxid(asid, genid) ((asid) | (genid))
|
#define asid2ctxid(asid, genid) ((asid) | (genid))
|
||||||
|
|
||||||
|
@ -30,11 +30,13 @@ void copy_highpage(struct page *to, struct page *from)
|
|||||||
if (!system_supports_mte())
|
if (!system_supports_mte())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (folio_test_hugetlb(src) &&
|
if (folio_test_hugetlb(src)) {
|
||||||
folio_test_hugetlb_mte_tagged(src)) {
|
if (!folio_test_hugetlb_mte_tagged(src) ||
|
||||||
if (!folio_try_hugetlb_mte_tagging(dst))
|
from != folio_page(src, 0))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Populate tags for all subpages.
|
* Populate tags for all subpages.
|
||||||
*
|
*
|
||||||
|
@ -117,15 +117,6 @@ static void __init arch_reserve_crashkernel(void)
|
|||||||
|
|
||||||
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
|
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
|
||||||
{
|
{
|
||||||
/**
|
|
||||||
* Information we get from firmware (e.g. DT dma-ranges) describe DMA
|
|
||||||
* bus constraints. Devices using DMA might have their own limitations.
|
|
||||||
* Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
|
|
||||||
* DMA zone on platforms that have RAM there.
|
|
||||||
*/
|
|
||||||
if (memblock_start_of_DRAM() < U32_MAX)
|
|
||||||
zone_limit = min(zone_limit, U32_MAX);
|
|
||||||
|
|
||||||
return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
|
return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,6 +132,14 @@ static void __init zone_sizes_init(void)
|
|||||||
acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
|
acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
|
||||||
dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
|
dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
|
||||||
zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
|
zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
|
||||||
|
/*
|
||||||
|
* Information we get from firmware (e.g. DT dma-ranges) describe DMA
|
||||||
|
* bus constraints. Devices using DMA might have their own limitations.
|
||||||
|
* Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
|
||||||
|
* DMA zone on platforms that have RAM there.
|
||||||
|
*/
|
||||||
|
if (memblock_start_of_DRAM() < U32_MAX)
|
||||||
|
zone_dma_limit = min(zone_dma_limit, U32_MAX);
|
||||||
arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
|
arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
|
||||||
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
|
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
|
||||||
#endif
|
#endif
|
||||||
|
@ -24,6 +24,16 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define __HAVE_ARCH_HUGE_PTE_CLEAR
|
||||||
|
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||||
|
pte_t *ptep, unsigned long sz)
|
||||||
|
{
|
||||||
|
pte_t clear;
|
||||||
|
|
||||||
|
pte_val(clear) = (unsigned long)invalid_pte_table;
|
||||||
|
set_pte_at(mm, addr, ptep, clear);
|
||||||
|
}
|
||||||
|
|
||||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||||
unsigned long addr, pte_t *ptep)
|
unsigned long addr, pte_t *ptep)
|
||||||
|
@ -683,7 +683,17 @@ DEF_EMIT_REG2I16_FORMAT(blt, blt_op)
|
|||||||
DEF_EMIT_REG2I16_FORMAT(bge, bge_op)
|
DEF_EMIT_REG2I16_FORMAT(bge, bge_op)
|
||||||
DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op)
|
DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op)
|
||||||
DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op)
|
DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op)
|
||||||
DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op)
|
|
||||||
|
static inline void emit_jirl(union loongarch_instruction *insn,
|
||||||
|
enum loongarch_gpr rd,
|
||||||
|
enum loongarch_gpr rj,
|
||||||
|
int offset)
|
||||||
|
{
|
||||||
|
insn->reg2i16_format.opcode = jirl_op;
|
||||||
|
insn->reg2i16_format.immediate = offset;
|
||||||
|
insn->reg2i16_format.rd = rd;
|
||||||
|
insn->reg2i16_format.rj = rj;
|
||||||
|
}
|
||||||
|
|
||||||
#define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP) \
|
#define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP) \
|
||||||
static inline void emit_##NAME(union loongarch_instruction *insn, \
|
static inline void emit_##NAME(union loongarch_instruction *insn, \
|
||||||
|
@ -95,7 +95,7 @@ static void __init init_screen_info(void)
|
|||||||
memset(si, 0, sizeof(*si));
|
memset(si, 0, sizeof(*si));
|
||||||
early_memunmap(si, sizeof(*si));
|
early_memunmap(si, sizeof(*si));
|
||||||
|
|
||||||
memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
|
memblock_reserve(__screen_info_lfb_base(&screen_info), screen_info.lfb_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init efi_init(void)
|
void __init efi_init(void)
|
||||||
|
@ -332,7 +332,7 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
|
|||||||
return INSN_BREAK;
|
return INSN_BREAK;
|
||||||
}
|
}
|
||||||
|
|
||||||
emit_jirl(&insn, rj, rd, imm >> 2);
|
emit_jirl(&insn, rd, rj, imm >> 2);
|
||||||
|
|
||||||
return insn.word;
|
return insn.word;
|
||||||
}
|
}
|
||||||
|
@ -82,7 +82,7 @@ void show_ipi_list(struct seq_file *p, int prec)
|
|||||||
for (i = 0; i < NR_IPI; i++) {
|
for (i = 0; i < NR_IPI; i++) {
|
||||||
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
|
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
|
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, cpu).ipi_irqs[i], 10);
|
||||||
seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
|
seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -156,7 +156,7 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
|
|||||||
|
|
||||||
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
|
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int ret;
|
int idx, ret;
|
||||||
unsigned long *val;
|
unsigned long *val;
|
||||||
u32 addr, rd, rj, opcode;
|
u32 addr, rd, rj, opcode;
|
||||||
|
|
||||||
@ -167,7 +167,6 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||||||
rj = inst.reg2_format.rj;
|
rj = inst.reg2_format.rj;
|
||||||
opcode = inst.reg2_format.opcode;
|
opcode = inst.reg2_format.opcode;
|
||||||
addr = vcpu->arch.gprs[rj];
|
addr = vcpu->arch.gprs[rj];
|
||||||
ret = EMULATE_DO_IOCSR;
|
|
||||||
run->iocsr_io.phys_addr = addr;
|
run->iocsr_io.phys_addr = addr;
|
||||||
run->iocsr_io.is_write = 0;
|
run->iocsr_io.is_write = 0;
|
||||||
val = &vcpu->arch.gprs[rd];
|
val = &vcpu->arch.gprs[rd];
|
||||||
@ -207,20 +206,28 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (run->iocsr_io.is_write) {
|
if (run->iocsr_io.is_write) {
|
||||||
if (!kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
|
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
|
if (ret == 0)
|
||||||
ret = EMULATE_DONE;
|
ret = EMULATE_DONE;
|
||||||
else
|
else {
|
||||||
|
ret = EMULATE_DO_IOCSR;
|
||||||
/* Save data and let user space to write it */
|
/* Save data and let user space to write it */
|
||||||
memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
|
memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
|
||||||
|
}
|
||||||
trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
|
trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
|
||||||
} else {
|
} else {
|
||||||
if (!kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
|
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
|
if (ret == 0)
|
||||||
ret = EMULATE_DONE;
|
ret = EMULATE_DONE;
|
||||||
else
|
else {
|
||||||
|
ret = EMULATE_DO_IOCSR;
|
||||||
/* Save register id for iocsr read completion */
|
/* Save register id for iocsr read completion */
|
||||||
vcpu->arch.io_gpr = rd;
|
vcpu->arch.io_gpr = rd;
|
||||||
|
}
|
||||||
trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
|
trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -359,7 +366,7 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
|
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
|
||||||
{
|
{
|
||||||
int ret;
|
int idx, ret;
|
||||||
unsigned int op8, opcode, rd;
|
unsigned int op8, opcode, rd;
|
||||||
struct kvm_run *run = vcpu->run;
|
struct kvm_run *run = vcpu->run;
|
||||||
|
|
||||||
@ -464,8 +471,10 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
|
|||||||
* it need not return to user space to handle the mmio
|
* it need not return to user space to handle the mmio
|
||||||
* exception.
|
* exception.
|
||||||
*/
|
*/
|
||||||
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
|
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
|
||||||
run->mmio.len, &vcpu->arch.gprs[rd]);
|
run->mmio.len, &vcpu->arch.gprs[rd]);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
update_pc(&vcpu->arch);
|
update_pc(&vcpu->arch);
|
||||||
vcpu->mmio_needed = 0;
|
vcpu->mmio_needed = 0;
|
||||||
@ -531,7 +540,7 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
|
|
||||||
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
|
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
|
||||||
{
|
{
|
||||||
int ret;
|
int idx, ret;
|
||||||
unsigned int rd, op8, opcode;
|
unsigned int rd, op8, opcode;
|
||||||
unsigned long curr_pc, rd_val = 0;
|
unsigned long curr_pc, rd_val = 0;
|
||||||
struct kvm_run *run = vcpu->run;
|
struct kvm_run *run = vcpu->run;
|
||||||
@ -631,7 +640,9 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
|
|||||||
* it need not return to user space to handle the mmio
|
* it need not return to user space to handle the mmio
|
||||||
* exception.
|
* exception.
|
||||||
*/
|
*/
|
||||||
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
|
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return EMULATE_DONE;
|
return EMULATE_DONE;
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int
|
|||||||
|
|
||||||
static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, idx, ret;
|
||||||
uint32_t val = 0, mask = 0;
|
uint32_t val = 0, mask = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -107,7 +107,9 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
|||||||
*/
|
*/
|
||||||
if ((data >> 27) & 0xf) {
|
if ((data >> 27) & 0xf) {
|
||||||
/* Read the old val */
|
/* Read the old val */
|
||||||
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
|
kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
|
||||||
return ret;
|
return ret;
|
||||||
@ -121,7 +123,9 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
|||||||
val &= mask;
|
val &= mask;
|
||||||
}
|
}
|
||||||
val |= ((uint32_t)(data >> 32) & ~mask);
|
val |= ((uint32_t)(data >> 32) & ~mask);
|
||||||
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
|
kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
|
||||||
|
|
||||||
|
@ -240,7 +240,7 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
|
static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int ret;
|
int idx, ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check conditions before entering the guest
|
* Check conditions before entering the guest
|
||||||
@ -249,7 +249,9 @@ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
ret = kvm_check_requests(vcpu);
|
ret = kvm_check_requests(vcpu);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -181,13 +181,13 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
|
|||||||
/* Set return value */
|
/* Set return value */
|
||||||
emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
|
emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
|
||||||
/* Return to the caller */
|
/* Return to the caller */
|
||||||
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
|
emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Call the next bpf prog and skip the first instruction
|
* Call the next bpf prog and skip the first instruction
|
||||||
* of TCC initialization.
|
* of TCC initialization.
|
||||||
*/
|
*/
|
||||||
emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
|
emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -904,7 +904,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
move_addr(ctx, t1, func_addr);
|
move_addr(ctx, t1, func_addr);
|
||||||
emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
|
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
|
||||||
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
|
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -239,6 +239,8 @@ handler: ;\
|
|||||||
|
|
||||||
/* =====================================================[ exceptions] === */
|
/* =====================================================[ exceptions] === */
|
||||||
|
|
||||||
|
__REF
|
||||||
|
|
||||||
/* ---[ 0x100: RESET exception ]----------------------------------------- */
|
/* ---[ 0x100: RESET exception ]----------------------------------------- */
|
||||||
|
|
||||||
EXCEPTION_ENTRY(_tng_kernel_start)
|
EXCEPTION_ENTRY(_tng_kernel_start)
|
||||||
|
@ -357,6 +357,8 @@
|
|||||||
|
|
||||||
/* =====================================================[ exceptions] === */
|
/* =====================================================[ exceptions] === */
|
||||||
|
|
||||||
|
__HEAD
|
||||||
|
|
||||||
/* ---[ 0x100: RESET exception ]----------------------------------------- */
|
/* ---[ 0x100: RESET exception ]----------------------------------------- */
|
||||||
.org 0x100
|
.org 0x100
|
||||||
/* Jump to .init code at _start which lives in the .head section
|
/* Jump to .init code at _start which lives in the .head section
|
||||||
@ -394,7 +396,7 @@ _dispatch_do_ipage_fault:
|
|||||||
.org 0x500
|
.org 0x500
|
||||||
EXCEPTION_HANDLE(_timer_handler)
|
EXCEPTION_HANDLE(_timer_handler)
|
||||||
|
|
||||||
/* ---[ 0x600: Alignment exception ]-------------------------------------- */
|
/* ---[ 0x600: Alignment exception ]------------------------------------- */
|
||||||
.org 0x600
|
.org 0x600
|
||||||
EXCEPTION_HANDLE(_alignment_handler)
|
EXCEPTION_HANDLE(_alignment_handler)
|
||||||
|
|
||||||
@ -424,7 +426,7 @@ _dispatch_do_ipage_fault:
|
|||||||
.org 0xc00
|
.org 0xc00
|
||||||
EXCEPTION_HANDLE(_sys_call_handler)
|
EXCEPTION_HANDLE(_sys_call_handler)
|
||||||
|
|
||||||
/* ---[ 0xd00: Floating point exception ]--------------------------------- */
|
/* ---[ 0xd00: Floating point exception ]-------------------------------- */
|
||||||
.org 0xd00
|
.org 0xd00
|
||||||
EXCEPTION_HANDLE(_fpe_trap_handler)
|
EXCEPTION_HANDLE(_fpe_trap_handler)
|
||||||
|
|
||||||
@ -506,10 +508,10 @@ _dispatch_do_ipage_fault:
|
|||||||
|
|
||||||
/* .text*/
|
/* .text*/
|
||||||
|
|
||||||
/* This early stuff belongs in HEAD, but some of the functions below definitely
|
/* This early stuff belongs in the .init.text section, but some of the functions below definitely
|
||||||
* don't... */
|
* don't... */
|
||||||
|
|
||||||
__HEAD
|
__INIT
|
||||||
.global _start
|
.global _start
|
||||||
_start:
|
_start:
|
||||||
/* Init r0 to zero as per spec */
|
/* Init r0 to zero as per spec */
|
||||||
@ -816,7 +818,7 @@ secondary_start:
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* ========================================[ cache ]=== */
|
/* ==========================================================[ cache ]=== */
|
||||||
|
|
||||||
/* alignment here so we don't change memory offsets with
|
/* alignment here so we don't change memory offsets with
|
||||||
* memory controller defined
|
* memory controller defined
|
||||||
|
@ -50,6 +50,7 @@ SECTIONS
|
|||||||
.text : AT(ADDR(.text) - LOAD_OFFSET)
|
.text : AT(ADDR(.text) - LOAD_OFFSET)
|
||||||
{
|
{
|
||||||
_stext = .;
|
_stext = .;
|
||||||
|
HEAD_TEXT
|
||||||
TEXT_TEXT
|
TEXT_TEXT
|
||||||
SCHED_TEXT
|
SCHED_TEXT
|
||||||
LOCK_TEXT
|
LOCK_TEXT
|
||||||
@ -83,8 +84,6 @@ SECTIONS
|
|||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
__init_begin = .;
|
__init_begin = .;
|
||||||
|
|
||||||
HEAD_TEXT_SECTION
|
|
||||||
|
|
||||||
/* Page aligned */
|
/* Page aligned */
|
||||||
INIT_TEXT_SECTION(PAGE_SIZE)
|
INIT_TEXT_SECTION(PAGE_SIZE)
|
||||||
|
|
||||||
|
@ -22,7 +22,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|||||||
else
|
else
|
||||||
set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
|
set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
|
||||||
|
|
||||||
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
preempt_disable();
|
||||||
|
local_flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -36,9 +36,15 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
|||||||
insn = RISCV_INSN_NOP;
|
insn = RISCV_INSN_NOP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (early_boot_irqs_disabled) {
|
||||||
|
riscv_patch_in_stop_machine = 1;
|
||||||
|
patch_insn_write(addr, &insn, sizeof(insn));
|
||||||
|
riscv_patch_in_stop_machine = 0;
|
||||||
|
} else {
|
||||||
mutex_lock(&text_mutex);
|
mutex_lock(&text_mutex);
|
||||||
patch_insn_write(addr, &insn, sizeof(insn));
|
patch_insn_write(addr, &insn, sizeof(insn));
|
||||||
mutex_unlock(&text_mutex);
|
mutex_unlock(&text_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -227,7 +227,7 @@ static void __init init_resources(void)
|
|||||||
static void __init parse_dtb(void)
|
static void __init parse_dtb(void)
|
||||||
{
|
{
|
||||||
/* Early scan of device tree from init memory */
|
/* Early scan of device tree from init memory */
|
||||||
if (early_init_dt_scan(dtb_early_va, __pa(dtb_early_va))) {
|
if (early_init_dt_scan(dtb_early_va, dtb_early_pa)) {
|
||||||
const char *name = of_flat_dt_get_machine_name();
|
const char *name = of_flat_dt_get_machine_name();
|
||||||
|
|
||||||
if (name) {
|
if (name) {
|
||||||
|
@ -590,7 +590,7 @@ void kvm_riscv_aia_enable(void)
|
|||||||
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
|
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
|
||||||
/* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
|
/* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
|
||||||
if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
|
if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
|
||||||
csr_write(CSR_HVIEN, BIT(IRQ_PMU_OVF));
|
csr_set(CSR_HVIEN, BIT(IRQ_PMU_OVF));
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_riscv_aia_disable(void)
|
void kvm_riscv_aia_disable(void)
|
||||||
|
@ -1566,7 +1566,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
|||||||
pmd_clear(pmd);
|
pmd_clear(pmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
|
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemmap)
|
||||||
{
|
{
|
||||||
struct page *page = pud_page(*pud);
|
struct page *page = pud_page(*pud);
|
||||||
struct ptdesc *ptdesc = page_ptdesc(page);
|
struct ptdesc *ptdesc = page_ptdesc(page);
|
||||||
@ -1579,6 +1579,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!is_vmemmap)
|
||||||
pagetable_pmd_dtor(ptdesc);
|
pagetable_pmd_dtor(ptdesc);
|
||||||
if (PageReserved(page))
|
if (PageReserved(page))
|
||||||
free_reserved_page(page);
|
free_reserved_page(page);
|
||||||
@ -1703,7 +1704,7 @@ static void __meminit remove_pud_mapping(pud_t *pud_base, unsigned long addr, un
|
|||||||
remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap);
|
remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap);
|
||||||
|
|
||||||
if (pgtable_l4_enabled)
|
if (pgtable_l4_enabled)
|
||||||
free_pmd_table(pmd_base, pudp);
|
free_pmd_table(pmd_base, pudp, is_vmemmap);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7135,6 +7135,7 @@ __init int intel_pmu_init(void)
|
|||||||
|
|
||||||
case INTEL_METEORLAKE:
|
case INTEL_METEORLAKE:
|
||||||
case INTEL_METEORLAKE_L:
|
case INTEL_METEORLAKE_L:
|
||||||
|
case INTEL_ARROWLAKE_U:
|
||||||
intel_pmu_init_hybrid(hybrid_big_small);
|
intel_pmu_init_hybrid(hybrid_big_small);
|
||||||
|
|
||||||
x86_pmu.pebs_latency_data = cmt_latency_data;
|
x86_pmu.pebs_latency_data = cmt_latency_data;
|
||||||
|
@ -1489,7 +1489,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
|
|||||||
* hence we need to drain when changing said
|
* hence we need to drain when changing said
|
||||||
* size.
|
* size.
|
||||||
*/
|
*/
|
||||||
intel_pmu_drain_large_pebs(cpuc);
|
intel_pmu_drain_pebs_buffer();
|
||||||
adaptive_pebs_record_size_update();
|
adaptive_pebs_record_size_update();
|
||||||
wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
|
wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
|
||||||
cpuc->active_pebs_data_cfg = pebs_data_cfg;
|
cpuc->active_pebs_data_cfg = pebs_data_cfg;
|
||||||
|
@ -36,10 +36,12 @@
|
|||||||
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
|
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit */
|
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */
|
||||||
|
#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW5 /* No PTI shadow (root PGD) */
|
||||||
#else
|
#else
|
||||||
/* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */
|
/* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */
|
||||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit */
|
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit (leaf) */
|
||||||
|
#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW2 /* No PTI shadow (root PGD) */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* If _PAGE_BIT_PRESENT is clear, we use these: */
|
/* If _PAGE_BIT_PRESENT is clear, we use these: */
|
||||||
@ -139,6 +141,8 @@
|
|||||||
|
|
||||||
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
|
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
|
||||||
|
|
||||||
|
#define _PAGE_NOPTISHADOW (_AT(pteval_t, 1) << _PAGE_BIT_NOPTISHADOW)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set of bits not changed in pte_modify. The pte's
|
* Set of bits not changed in pte_modify. The pte's
|
||||||
* protection key is treated like _PAGE_RW, for
|
* protection key is treated like _PAGE_RW, for
|
||||||
|
@ -1065,7 +1065,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||||||
*/
|
*/
|
||||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||||
cpu_has(c, X86_FEATURE_AUTOIBRS))
|
cpu_has(c, X86_FEATURE_AUTOIBRS))
|
||||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0);
|
||||||
|
|
||||||
/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
|
/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
|
||||||
clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
||||||
|
@ -178,8 +178,6 @@ struct _cpuid4_info_regs {
|
|||||||
struct amd_northbridge *nb;
|
struct amd_northbridge *nb;
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned short num_cache_leaves;
|
|
||||||
|
|
||||||
/* AMD doesn't have CPUID4. Emulate it here to report the same
|
/* AMD doesn't have CPUID4. Emulate it here to report the same
|
||||||
information to the user. This makes some assumptions about the machine:
|
information to the user. This makes some assumptions about the machine:
|
||||||
L2 not shared, no SMT etc. that is currently true on AMD CPUs.
|
L2 not shared, no SMT etc. that is currently true on AMD CPUs.
|
||||||
@ -717,20 +715,23 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c)
|
|||||||
|
|
||||||
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
|
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||||
|
|
||||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||||
num_cache_leaves = find_num_cache_leaves(c);
|
ci->num_leaves = find_num_cache_leaves(c);
|
||||||
} else if (c->extended_cpuid_level >= 0x80000006) {
|
} else if (c->extended_cpuid_level >= 0x80000006) {
|
||||||
if (cpuid_edx(0x80000006) & 0xf000)
|
if (cpuid_edx(0x80000006) & 0xf000)
|
||||||
num_cache_leaves = 4;
|
ci->num_leaves = 4;
|
||||||
else
|
else
|
||||||
num_cache_leaves = 3;
|
ci->num_leaves = 3;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
|
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
num_cache_leaves = find_num_cache_leaves(c);
|
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||||
|
|
||||||
|
ci->num_leaves = find_num_cache_leaves(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
@ -740,21 +741,21 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|||||||
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
||||||
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
||||||
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
||||||
|
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||||
|
|
||||||
if (c->cpuid_level > 3) {
|
if (c->cpuid_level > 3) {
|
||||||
static int is_initialized;
|
/*
|
||||||
|
* There should be at least one leaf. A non-zero value means
|
||||||
if (is_initialized == 0) {
|
* that the number of leaves has been initialized.
|
||||||
/* Init num_cache_leaves from boot CPU */
|
*/
|
||||||
num_cache_leaves = find_num_cache_leaves(c);
|
if (!ci->num_leaves)
|
||||||
is_initialized++;
|
ci->num_leaves = find_num_cache_leaves(c);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Whenever possible use cpuid(4), deterministic cache
|
* Whenever possible use cpuid(4), deterministic cache
|
||||||
* parameters cpuid leaf to find the cache details
|
* parameters cpuid leaf to find the cache details
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < num_cache_leaves; i++) {
|
for (i = 0; i < ci->num_leaves; i++) {
|
||||||
struct _cpuid4_info_regs this_leaf = {};
|
struct _cpuid4_info_regs this_leaf = {};
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
@ -790,14 +791,14 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|||||||
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
|
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
|
||||||
* trace cache
|
* trace cache
|
||||||
*/
|
*/
|
||||||
if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
|
if ((!ci->num_leaves || c->x86 == 15) && c->cpuid_level > 1) {
|
||||||
/* supports eax=2 call */
|
/* supports eax=2 call */
|
||||||
int j, n;
|
int j, n;
|
||||||
unsigned int regs[4];
|
unsigned int regs[4];
|
||||||
unsigned char *dp = (unsigned char *)regs;
|
unsigned char *dp = (unsigned char *)regs;
|
||||||
int only_trace = 0;
|
int only_trace = 0;
|
||||||
|
|
||||||
if (num_cache_leaves != 0 && c->x86 == 15)
|
if (ci->num_leaves && c->x86 == 15)
|
||||||
only_trace = 1;
|
only_trace = 1;
|
||||||
|
|
||||||
/* Number of times to iterate */
|
/* Number of times to iterate */
|
||||||
@ -991,14 +992,12 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
|
|||||||
|
|
||||||
int init_cache_level(unsigned int cpu)
|
int init_cache_level(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
|
||||||
|
|
||||||
if (!num_cache_leaves)
|
/* There should be at least one leaf. */
|
||||||
|
if (!ci->num_leaves)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
if (!this_cpu_ci)
|
|
||||||
return -EINVAL;
|
|
||||||
this_cpu_ci->num_levels = 3;
|
|
||||||
this_cpu_ci->num_leaves = num_cache_leaves;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -555,7 +555,9 @@ static void init_intel(struct cpuinfo_x86 *c)
|
|||||||
c->x86_vfm == INTEL_WESTMERE_EX))
|
c->x86_vfm == INTEL_WESTMERE_EX))
|
||||||
set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
|
set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
|
||||||
|
|
||||||
if (boot_cpu_has(X86_FEATURE_MWAIT) && c->x86_vfm == INTEL_ATOM_GOLDMONT)
|
if (boot_cpu_has(X86_FEATURE_MWAIT) &&
|
||||||
|
(c->x86_vfm == INTEL_ATOM_GOLDMONT ||
|
||||||
|
c->x86_vfm == INTEL_LUNARLAKE_M))
|
||||||
set_cpu_bug(c, X86_BUG_MONITOR);
|
set_cpu_bug(c, X86_BUG_MONITOR);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
@ -428,8 +428,8 @@ void __init topology_apply_cmdline_limits_early(void)
|
|||||||
{
|
{
|
||||||
unsigned int possible = nr_cpu_ids;
|
unsigned int possible = nr_cpu_ids;
|
||||||
|
|
||||||
/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' 'noapic' */
|
/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' */
|
||||||
if (!setup_max_cpus || ioapic_is_disabled || apic_is_disabled)
|
if (!setup_max_cpus || apic_is_disabled)
|
||||||
possible = 1;
|
possible = 1;
|
||||||
|
|
||||||
/* 'possible_cpus=N' */
|
/* 'possible_cpus=N' */
|
||||||
@ -443,7 +443,7 @@ void __init topology_apply_cmdline_limits_early(void)
|
|||||||
|
|
||||||
static __init bool restrict_to_up(void)
|
static __init bool restrict_to_up(void)
|
||||||
{
|
{
|
||||||
if (!smp_found_config || ioapic_is_disabled)
|
if (!smp_found_config)
|
||||||
return true;
|
return true;
|
||||||
/*
|
/*
|
||||||
* XEN PV is special as it does not advertise the local APIC
|
* XEN PV is special as it does not advertise the local APIC
|
||||||
|
@ -63,16 +63,6 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Update the value of PKRU register that was already pushed onto the signal frame.
|
|
||||||
*/
|
|
||||||
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
|
|
||||||
{
|
|
||||||
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
|
|
||||||
return 0;
|
|
||||||
return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Signal frame handlers.
|
* Signal frame handlers.
|
||||||
*/
|
*/
|
||||||
@ -168,14 +158,8 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
|
|||||||
|
|
||||||
static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
|
static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||||
{
|
{
|
||||||
int err = 0;
|
if (use_xsave())
|
||||||
|
return xsave_to_user_sigframe(buf, pkru);
|
||||||
if (use_xsave()) {
|
|
||||||
err = xsave_to_user_sigframe(buf);
|
|
||||||
if (!err)
|
|
||||||
err = update_pkru_in_sigframe(buf, pkru);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (use_fxsr())
|
if (use_fxsr())
|
||||||
return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
|
return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
|
||||||
|
@ -69,6 +69,28 @@ static inline u64 xfeatures_mask_independent(void)
|
|||||||
return fpu_kernel_cfg.independent_features;
|
return fpu_kernel_cfg.independent_features;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update the value of PKRU register that was already pushed onto the signal frame.
|
||||||
|
*/
|
||||||
|
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
|
||||||
|
{
|
||||||
|
u64 xstate_bv;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Mark PKRU as in-use so that it is restored correctly. */
|
||||||
|
xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
|
||||||
|
|
||||||
|
err = __put_user(xstate_bv, &buf->header.xfeatures);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* Update PKRU value in the userspace xsave buffer. */
|
||||||
|
return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
|
||||||
|
}
|
||||||
|
|
||||||
/* XSAVE/XRSTOR wrapper functions */
|
/* XSAVE/XRSTOR wrapper functions */
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
@ -256,7 +278,7 @@ static inline u64 xfeatures_need_sigframe_write(void)
|
|||||||
* The caller has to zero buf::header before calling this because XSAVE*
|
* The caller has to zero buf::header before calling this because XSAVE*
|
||||||
* does not touch the reserved fields in the header.
|
* does not touch the reserved fields in the header.
|
||||||
*/
|
*/
|
||||||
static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
|
static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Include the features which are not xsaved/rstored by the kernel
|
* Include the features which are not xsaved/rstored by the kernel
|
||||||
@ -281,6 +303,9 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
|
|||||||
XSTATE_OP(XSAVE, buf, lmask, hmask, err);
|
XSTATE_OP(XSAVE, buf, lmask, hmask, err);
|
||||||
clac();
|
clac();
|
||||||
|
|
||||||
|
if (!err)
|
||||||
|
err = update_pkru_in_sigframe(buf, mask, pkru);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <asm/pgtable_types.h>
|
#include <asm/pgtable_types.h>
|
||||||
#include <asm/nospec-branch.h>
|
#include <asm/nospec-branch.h>
|
||||||
#include <asm/unwind_hints.h>
|
#include <asm/unwind_hints.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be relocatable PIC code callable as a C function, in particular
|
* Must be relocatable PIC code callable as a C function, in particular
|
||||||
@ -242,6 +243,13 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
|||||||
movq CR0(%r8), %r8
|
movq CR0(%r8), %r8
|
||||||
movq %rax, %cr3
|
movq %rax, %cr3
|
||||||
movq %r8, %cr0
|
movq %r8, %cr0
|
||||||
|
|
||||||
|
#ifdef CONFIG_KEXEC_JUMP
|
||||||
|
/* Saved in save_processor_state. */
|
||||||
|
movq $saved_context, %rax
|
||||||
|
lgdt saved_context_gdt_desc(%rax)
|
||||||
|
#endif
|
||||||
|
|
||||||
movq %rbp, %rax
|
movq %rbp, %rax
|
||||||
|
|
||||||
popf
|
popf
|
||||||
|
@ -36,6 +36,26 @@
|
|||||||
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
|
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
|
||||||
EXPORT_SYMBOL_GPL(kvm_cpu_caps);
|
EXPORT_SYMBOL_GPL(kvm_cpu_caps);
|
||||||
|
|
||||||
|
struct cpuid_xstate_sizes {
|
||||||
|
u32 eax;
|
||||||
|
u32 ebx;
|
||||||
|
u32 ecx;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init;
|
||||||
|
|
||||||
|
void __init kvm_init_xstate_sizes(void)
|
||||||
|
{
|
||||||
|
u32 ign;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) {
|
||||||
|
struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
|
||||||
|
|
||||||
|
cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
||||||
{
|
{
|
||||||
int feature_bit = 0;
|
int feature_bit = 0;
|
||||||
@ -44,14 +64,15 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
|||||||
xstate_bv &= XFEATURE_MASK_EXTEND;
|
xstate_bv &= XFEATURE_MASK_EXTEND;
|
||||||
while (xstate_bv) {
|
while (xstate_bv) {
|
||||||
if (xstate_bv & 0x1) {
|
if (xstate_bv & 0x1) {
|
||||||
u32 eax, ebx, ecx, edx, offset;
|
struct cpuid_xstate_sizes *xs = &xstate_sizes[feature_bit];
|
||||||
cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
|
u32 offset;
|
||||||
|
|
||||||
/* ECX[1]: 64B alignment in compacted form */
|
/* ECX[1]: 64B alignment in compacted form */
|
||||||
if (compacted)
|
if (compacted)
|
||||||
offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
|
offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
|
||||||
else
|
else
|
||||||
offset = ebx;
|
offset = xs->ebx;
|
||||||
ret = max(ret, offset + eax);
|
ret = max(ret, offset + xs->eax);
|
||||||
}
|
}
|
||||||
|
|
||||||
xstate_bv >>= 1;
|
xstate_bv >>= 1;
|
||||||
|
@ -31,6 +31,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
|
|||||||
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
|
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
|
||||||
u32 *ecx, u32 *edx, bool exact_only);
|
u32 *ecx, u32 *edx, bool exact_only);
|
||||||
|
|
||||||
|
void __init kvm_init_xstate_sizes(void);
|
||||||
u32 xstate_required_size(u64 xstate_bv, bool compacted);
|
u32 xstate_required_size(u64 xstate_bv, bool compacted);
|
||||||
|
|
||||||
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
|
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
|
||||||
|
@ -13997,6 +13997,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_rmp_fault);
|
|||||||
|
|
||||||
static int __init kvm_x86_init(void)
|
static int __init kvm_x86_init(void)
|
||||||
{
|
{
|
||||||
|
kvm_init_xstate_sizes();
|
||||||
|
|
||||||
kvm_mmu_x86_module_init();
|
kvm_mmu_x86_module_init();
|
||||||
mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
|
mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -174,7 +174,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
|
|||||||
if (result)
|
if (result)
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
|
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -218,14 +218,14 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
|||||||
if (result)
|
if (result)
|
||||||
return result;
|
return result;
|
||||||
if (pgtable_l5_enabled()) {
|
if (pgtable_l5_enabled()) {
|
||||||
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
|
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* With p4d folded, pgd is equal to p4d.
|
* With p4d folded, pgd is equal to p4d.
|
||||||
* The pgd entry has to point to the pud page table in this case.
|
* The pgd entry has to point to the pud page table in this case.
|
||||||
*/
|
*/
|
||||||
pud_t *pud = pud_offset(p4d, 0);
|
pud_t *pud = pud_offset(p4d, 0);
|
||||||
set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
|
set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
|
|||||||
* Top-level entries added to init_mm's usermode pgd after boot
|
* Top-level entries added to init_mm's usermode pgd after boot
|
||||||
* will not be automatically propagated to other mms.
|
* will not be automatically propagated to other mms.
|
||||||
*/
|
*/
|
||||||
if (!pgdp_maps_userspace(pgdp))
|
if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))
|
||||||
return pgd;
|
return pgd;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1171,7 +1171,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__bio_release_pages);
|
EXPORT_SYMBOL_GPL(__bio_release_pages);
|
||||||
|
|
||||||
void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
|
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(bio->bi_max_vecs);
|
WARN_ON_ONCE(bio->bi_max_vecs);
|
||||||
|
|
||||||
|
@ -1324,10 +1324,14 @@ void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css)
|
|||||||
struct blkcg *blkcg = css_to_blkcg(blkcg_css);
|
struct blkcg *blkcg = css_to_blkcg(blkcg_css);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
struct blkcg *parent;
|
||||||
|
|
||||||
if (!refcount_dec_and_test(&blkcg->online_pin))
|
if (!refcount_dec_and_test(&blkcg->online_pin))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
parent = blkcg_parent(blkcg);
|
||||||
blkcg_destroy_blkgs(blkcg);
|
blkcg_destroy_blkgs(blkcg);
|
||||||
blkcg = blkcg_parent(blkcg);
|
blkcg = parent;
|
||||||
} while (blkcg);
|
} while (blkcg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1098,7 +1098,14 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
|
|||||||
inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
|
inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
|
||||||
iocg->child_active_sum);
|
iocg->child_active_sum);
|
||||||
} else {
|
} else {
|
||||||
inuse = clamp_t(u32, inuse, 1, active);
|
/*
|
||||||
|
* It may be tempting to turn this into a clamp expression with
|
||||||
|
* a lower limit of 1 but active may be 0, which cannot be used
|
||||||
|
* as an upper limit in that situation. This expression allows
|
||||||
|
* active to clamp inuse unless it is 0, in which case inuse
|
||||||
|
* becomes 1.
|
||||||
|
*/
|
||||||
|
inuse = min(inuse, active) ?: 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
iocg->last_inuse = iocg->inuse;
|
iocg->last_inuse = iocg->inuse;
|
||||||
|
@ -574,7 +574,7 @@ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
|
|||||||
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
|
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
|
||||||
if (!bio)
|
if (!bio)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
|
bio_iov_bvec_set(bio, iter);
|
||||||
|
|
||||||
/* check that the data layout matches the hardware restrictions */
|
/* check that the data layout matches the hardware restrictions */
|
||||||
ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
|
ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
|
||||||
|
@ -275,15 +275,13 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
|
|||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
mutex_lock(&q->sysfs_dir_lock);
|
lockdep_assert_held(&q->sysfs_dir_lock);
|
||||||
|
|
||||||
if (!q->mq_sysfs_init_done)
|
if (!q->mq_sysfs_init_done)
|
||||||
goto unlock;
|
return;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i)
|
queue_for_each_hw_ctx(q, hctx, i)
|
||||||
blk_mq_unregister_hctx(hctx);
|
blk_mq_unregister_hctx(hctx);
|
||||||
|
|
||||||
unlock:
|
|
||||||
mutex_unlock(&q->sysfs_dir_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
||||||
@ -292,9 +290,10 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
|||||||
unsigned long i;
|
unsigned long i;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&q->sysfs_dir_lock);
|
lockdep_assert_held(&q->sysfs_dir_lock);
|
||||||
|
|
||||||
if (!q->mq_sysfs_init_done)
|
if (!q->mq_sysfs_init_done)
|
||||||
goto unlock;
|
return ret;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
ret = blk_mq_register_hctx(hctx);
|
ret = blk_mq_register_hctx(hctx);
|
||||||
@ -302,8 +301,5 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock:
|
|
||||||
mutex_unlock(&q->sysfs_dir_lock);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
143
block/blk-mq.c
143
block/blk-mq.c
@ -43,6 +43,7 @@
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
|
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
|
||||||
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
|
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
|
||||||
|
static DEFINE_MUTEX(blk_mq_cpuhp_lock);
|
||||||
|
|
||||||
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
|
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
|
||||||
static void blk_mq_request_bypass_insert(struct request *rq,
|
static void blk_mq_request_bypass_insert(struct request *rq,
|
||||||
@ -1543,20 +1544,18 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
|||||||
|
|
||||||
while (!list_empty(&rq_list)) {
|
while (!list_empty(&rq_list)) {
|
||||||
rq = list_entry(rq_list.next, struct request, queuelist);
|
rq = list_entry(rq_list.next, struct request, queuelist);
|
||||||
|
list_del_init(&rq->queuelist);
|
||||||
/*
|
/*
|
||||||
* If RQF_DONTPREP ist set, the request has been started by the
|
* If RQF_DONTPREP is set, the request has been started by the
|
||||||
* driver already and might have driver-specific data allocated
|
* driver already and might have driver-specific data allocated
|
||||||
* already. Insert it into the hctx dispatch list to avoid
|
* already. Insert it into the hctx dispatch list to avoid
|
||||||
* block layer merges for the request.
|
* block layer merges for the request.
|
||||||
*/
|
*/
|
||||||
if (rq->rq_flags & RQF_DONTPREP) {
|
if (rq->rq_flags & RQF_DONTPREP)
|
||||||
list_del_init(&rq->queuelist);
|
|
||||||
blk_mq_request_bypass_insert(rq, 0);
|
blk_mq_request_bypass_insert(rq, 0);
|
||||||
} else {
|
else
|
||||||
list_del_init(&rq->queuelist);
|
|
||||||
blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
|
blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
while (!list_empty(&flush_list)) {
|
while (!list_empty(&flush_list)) {
|
||||||
rq = list_entry(flush_list.next, struct request, queuelist);
|
rq = list_entry(flush_list.next, struct request, queuelist);
|
||||||
@ -3739,13 +3738,91 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
if (!(hctx->flags & BLK_MQ_F_STACKING))
|
lockdep_assert_held(&blk_mq_cpuhp_lock);
|
||||||
|
|
||||||
|
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
|
||||||
|
!hlist_unhashed(&hctx->cpuhp_online)) {
|
||||||
cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
||||||
&hctx->cpuhp_online);
|
&hctx->cpuhp_online);
|
||||||
|
INIT_HLIST_NODE(&hctx->cpuhp_online);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!hlist_unhashed(&hctx->cpuhp_dead)) {
|
||||||
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
|
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
|
||||||
&hctx->cpuhp_dead);
|
&hctx->cpuhp_dead);
|
||||||
|
INIT_HLIST_NODE(&hctx->cpuhp_dead);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||||
|
{
|
||||||
|
mutex_lock(&blk_mq_cpuhp_lock);
|
||||||
|
__blk_mq_remove_cpuhp(hctx);
|
||||||
|
mutex_unlock(&blk_mq_cpuhp_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&blk_mq_cpuhp_lock);
|
||||||
|
|
||||||
|
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
|
||||||
|
hlist_unhashed(&hctx->cpuhp_online))
|
||||||
|
cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
||||||
|
&hctx->cpuhp_online);
|
||||||
|
|
||||||
|
if (hlist_unhashed(&hctx->cpuhp_dead))
|
||||||
|
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD,
|
||||||
|
&hctx->cpuhp_dead);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __blk_mq_remove_cpuhp_list(struct list_head *head)
|
||||||
|
{
|
||||||
|
struct blk_mq_hw_ctx *hctx;
|
||||||
|
|
||||||
|
lockdep_assert_held(&blk_mq_cpuhp_lock);
|
||||||
|
|
||||||
|
list_for_each_entry(hctx, head, hctx_list)
|
||||||
|
__blk_mq_remove_cpuhp(hctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unregister cpuhp callbacks from exited hw queues
|
||||||
|
*
|
||||||
|
* Safe to call if this `request_queue` is live
|
||||||
|
*/
|
||||||
|
static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
|
||||||
|
{
|
||||||
|
LIST_HEAD(hctx_list);
|
||||||
|
|
||||||
|
spin_lock(&q->unused_hctx_lock);
|
||||||
|
list_splice_init(&q->unused_hctx_list, &hctx_list);
|
||||||
|
spin_unlock(&q->unused_hctx_lock);
|
||||||
|
|
||||||
|
mutex_lock(&blk_mq_cpuhp_lock);
|
||||||
|
__blk_mq_remove_cpuhp_list(&hctx_list);
|
||||||
|
mutex_unlock(&blk_mq_cpuhp_lock);
|
||||||
|
|
||||||
|
spin_lock(&q->unused_hctx_lock);
|
||||||
|
list_splice(&hctx_list, &q->unused_hctx_list);
|
||||||
|
spin_unlock(&q->unused_hctx_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Register cpuhp callbacks from all hw queues
|
||||||
|
*
|
||||||
|
* Safe to call if this `request_queue` is live
|
||||||
|
*/
|
||||||
|
static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
|
||||||
|
{
|
||||||
|
struct blk_mq_hw_ctx *hctx;
|
||||||
|
unsigned long i;
|
||||||
|
|
||||||
|
mutex_lock(&blk_mq_cpuhp_lock);
|
||||||
|
queue_for_each_hw_ctx(q, hctx, i)
|
||||||
|
__blk_mq_add_cpuhp(hctx);
|
||||||
|
mutex_unlock(&blk_mq_cpuhp_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3796,8 +3873,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
|
|||||||
if (set->ops->exit_hctx)
|
if (set->ops->exit_hctx)
|
||||||
set->ops->exit_hctx(hctx, hctx_idx);
|
set->ops->exit_hctx(hctx, hctx_idx);
|
||||||
|
|
||||||
blk_mq_remove_cpuhp(hctx);
|
|
||||||
|
|
||||||
xa_erase(&q->hctx_table, hctx_idx);
|
xa_erase(&q->hctx_table, hctx_idx);
|
||||||
|
|
||||||
spin_lock(&q->unused_hctx_lock);
|
spin_lock(&q->unused_hctx_lock);
|
||||||
@ -3814,6 +3889,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
|
|||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
if (i == nr_queue)
|
if (i == nr_queue)
|
||||||
break;
|
break;
|
||||||
|
blk_mq_remove_cpuhp(hctx);
|
||||||
blk_mq_exit_hctx(q, set, hctx, i);
|
blk_mq_exit_hctx(q, set, hctx, i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3824,16 +3900,11 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|||||||
{
|
{
|
||||||
hctx->queue_num = hctx_idx;
|
hctx->queue_num = hctx_idx;
|
||||||
|
|
||||||
if (!(hctx->flags & BLK_MQ_F_STACKING))
|
|
||||||
cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
|
||||||
&hctx->cpuhp_online);
|
|
||||||
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
|
|
||||||
|
|
||||||
hctx->tags = set->tags[hctx_idx];
|
hctx->tags = set->tags[hctx_idx];
|
||||||
|
|
||||||
if (set->ops->init_hctx &&
|
if (set->ops->init_hctx &&
|
||||||
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
|
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
|
||||||
goto unregister_cpu_notifier;
|
goto fail;
|
||||||
|
|
||||||
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
|
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
|
||||||
hctx->numa_node))
|
hctx->numa_node))
|
||||||
@ -3850,8 +3921,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|||||||
exit_hctx:
|
exit_hctx:
|
||||||
if (set->ops->exit_hctx)
|
if (set->ops->exit_hctx)
|
||||||
set->ops->exit_hctx(hctx, hctx_idx);
|
set->ops->exit_hctx(hctx, hctx_idx);
|
||||||
unregister_cpu_notifier:
|
fail:
|
||||||
blk_mq_remove_cpuhp(hctx);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3877,6 +3947,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
|
|||||||
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
||||||
spin_lock_init(&hctx->lock);
|
spin_lock_init(&hctx->lock);
|
||||||
INIT_LIST_HEAD(&hctx->dispatch);
|
INIT_LIST_HEAD(&hctx->dispatch);
|
||||||
|
INIT_HLIST_NODE(&hctx->cpuhp_dead);
|
||||||
|
INIT_HLIST_NODE(&hctx->cpuhp_online);
|
||||||
hctx->queue = q;
|
hctx->queue = q;
|
||||||
hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
|
hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
|
||||||
|
|
||||||
@ -4381,7 +4453,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
|||||||
unsigned long i, j;
|
unsigned long i, j;
|
||||||
|
|
||||||
/* protect against switching io scheduler */
|
/* protect against switching io scheduler */
|
||||||
mutex_lock(&q->sysfs_lock);
|
lockdep_assert_held(&q->sysfs_lock);
|
||||||
|
|
||||||
for (i = 0; i < set->nr_hw_queues; i++) {
|
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||||
int old_node;
|
int old_node;
|
||||||
int node = blk_mq_get_hctx_node(set, i);
|
int node = blk_mq_get_hctx_node(set, i);
|
||||||
@ -4414,7 +4487,12 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
|||||||
|
|
||||||
xa_for_each_start(&q->hctx_table, j, hctx, j)
|
xa_for_each_start(&q->hctx_table, j, hctx, j)
|
||||||
blk_mq_exit_hctx(q, set, hctx, j);
|
blk_mq_exit_hctx(q, set, hctx, j);
|
||||||
mutex_unlock(&q->sysfs_lock);
|
|
||||||
|
/* unregister cpuhp callbacks for exited hctxs */
|
||||||
|
blk_mq_remove_hw_queues_cpuhp(q);
|
||||||
|
|
||||||
|
/* register cpuhp for new initialized hctxs */
|
||||||
|
blk_mq_add_hw_queues_cpuhp(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||||
@ -4440,10 +4518,14 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|||||||
|
|
||||||
xa_init(&q->hctx_table);
|
xa_init(&q->hctx_table);
|
||||||
|
|
||||||
|
mutex_lock(&q->sysfs_lock);
|
||||||
|
|
||||||
blk_mq_realloc_hw_ctxs(set, q);
|
blk_mq_realloc_hw_ctxs(set, q);
|
||||||
if (!q->nr_hw_queues)
|
if (!q->nr_hw_queues)
|
||||||
goto err_hctxs;
|
goto err_hctxs;
|
||||||
|
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
|
||||||
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
|
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
|
||||||
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
|
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
|
||||||
|
|
||||||
@ -4462,6 +4544,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_hctxs:
|
err_hctxs:
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
blk_mq_release(q);
|
blk_mq_release(q);
|
||||||
err_exit:
|
err_exit:
|
||||||
q->mq_ops = NULL;
|
q->mq_ops = NULL;
|
||||||
@ -4842,12 +4925,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* q->elevator needs protection from ->sysfs_lock */
|
/* q->elevator needs protection from ->sysfs_lock */
|
||||||
mutex_lock(&q->sysfs_lock);
|
lockdep_assert_held(&q->sysfs_lock);
|
||||||
|
|
||||||
/* the check has to be done with holding sysfs_lock */
|
/* the check has to be done with holding sysfs_lock */
|
||||||
if (!q->elevator) {
|
if (!q->elevator) {
|
||||||
kfree(qe);
|
kfree(qe);
|
||||||
goto unlock;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&qe->node);
|
INIT_LIST_HEAD(&qe->node);
|
||||||
@ -4857,9 +4940,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
|
|||||||
__elevator_get(qe->type);
|
__elevator_get(qe->type);
|
||||||
list_add(&qe->node, head);
|
list_add(&qe->node, head);
|
||||||
elevator_disable(q);
|
elevator_disable(q);
|
||||||
unlock:
|
out:
|
||||||
mutex_unlock(&q->sysfs_lock);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4888,11 +4969,9 @@ static void blk_mq_elv_switch_back(struct list_head *head,
|
|||||||
list_del(&qe->node);
|
list_del(&qe->node);
|
||||||
kfree(qe);
|
kfree(qe);
|
||||||
|
|
||||||
mutex_lock(&q->sysfs_lock);
|
|
||||||
elevator_switch(q, t);
|
elevator_switch(q, t);
|
||||||
/* drop the reference acquired in blk_mq_elv_switch_none */
|
/* drop the reference acquired in blk_mq_elv_switch_none */
|
||||||
elevator_put(t);
|
elevator_put(t);
|
||||||
mutex_unlock(&q->sysfs_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||||
@ -4912,8 +4991,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
|||||||
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
|
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
||||||
|
mutex_lock(&q->sysfs_dir_lock);
|
||||||
|
mutex_lock(&q->sysfs_lock);
|
||||||
blk_mq_freeze_queue(q);
|
blk_mq_freeze_queue(q);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Switch IO scheduler to 'none', cleaning up the data associated
|
* Switch IO scheduler to 'none', cleaning up the data associated
|
||||||
* with the previous scheduler. We will switch back once we are done
|
* with the previous scheduler. We will switch back once we are done
|
||||||
@ -4969,8 +5051,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
|||||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||||
blk_mq_elv_switch_back(&head, q);
|
blk_mq_elv_switch_back(&head, q);
|
||||||
|
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
||||||
blk_mq_unfreeze_queue(q);
|
blk_mq_unfreeze_queue(q);
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
mutex_unlock(&q->sysfs_dir_lock);
|
||||||
|
}
|
||||||
|
|
||||||
/* Free the excess tags when nr_hw_queues shrink. */
|
/* Free the excess tags when nr_hw_queues shrink. */
|
||||||
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
|
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
|
||||||
|
@ -263,7 +263,7 @@ static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
|
|||||||
|
|
||||||
static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
|
static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
|
||||||
{
|
{
|
||||||
return queue_var_show(blk_queue_passthrough_stat(disk->queue), page);
|
return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t queue_iostats_passthrough_store(struct gendisk *disk,
|
static ssize_t queue_iostats_passthrough_store(struct gendisk *disk,
|
||||||
@ -706,11 +706,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
|
|||||||
if (entry->load_module)
|
if (entry->load_module)
|
||||||
entry->load_module(disk, page, length);
|
entry->load_module(disk, page, length);
|
||||||
|
|
||||||
blk_mq_freeze_queue(q);
|
|
||||||
mutex_lock(&q->sysfs_lock);
|
mutex_lock(&q->sysfs_lock);
|
||||||
|
blk_mq_freeze_queue(q);
|
||||||
res = entry->store(disk, page, length);
|
res = entry->store(disk, page, length);
|
||||||
mutex_unlock(&q->sysfs_lock);
|
|
||||||
blk_mq_unfreeze_queue(q);
|
blk_mq_unfreeze_queue(q);
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,6 @@ static const char *const zone_cond_name[] = {
|
|||||||
/*
|
/*
|
||||||
* Per-zone write plug.
|
* Per-zone write plug.
|
||||||
* @node: hlist_node structure for managing the plug using a hash table.
|
* @node: hlist_node structure for managing the plug using a hash table.
|
||||||
* @link: To list the plug in the zone write plug error list of the disk.
|
|
||||||
* @ref: Zone write plug reference counter. A zone write plug reference is
|
* @ref: Zone write plug reference counter. A zone write plug reference is
|
||||||
* always at least 1 when the plug is hashed in the disk plug hash table.
|
* always at least 1 when the plug is hashed in the disk plug hash table.
|
||||||
* The reference is incremented whenever a new BIO needing plugging is
|
* The reference is incremented whenever a new BIO needing plugging is
|
||||||
@ -63,7 +62,6 @@ static const char *const zone_cond_name[] = {
|
|||||||
*/
|
*/
|
||||||
struct blk_zone_wplug {
|
struct blk_zone_wplug {
|
||||||
struct hlist_node node;
|
struct hlist_node node;
|
||||||
struct list_head link;
|
|
||||||
refcount_t ref;
|
refcount_t ref;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
@ -80,8 +78,8 @@ struct blk_zone_wplug {
|
|||||||
* - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
|
* - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
|
||||||
* that is, that write BIOs are being throttled due to a write BIO already
|
* that is, that write BIOs are being throttled due to a write BIO already
|
||||||
* being executed or the zone write plug bio list is not empty.
|
* being executed or the zone write plug bio list is not empty.
|
||||||
* - BLK_ZONE_WPLUG_ERROR: Indicates that a write error happened which will be
|
* - BLK_ZONE_WPLUG_NEED_WP_UPDATE: Indicates that we lost track of a zone
|
||||||
* recovered with a report zone to update the zone write pointer offset.
|
* write pointer offset and need to update it.
|
||||||
* - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed
|
* - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed
|
||||||
* from the disk hash table and that the initial reference to the zone
|
* from the disk hash table and that the initial reference to the zone
|
||||||
* write plug set when the plug was first added to the hash table has been
|
* write plug set when the plug was first added to the hash table has been
|
||||||
@ -91,11 +89,9 @@ struct blk_zone_wplug {
|
|||||||
* freed once all remaining references from BIOs or functions are dropped.
|
* freed once all remaining references from BIOs or functions are dropped.
|
||||||
*/
|
*/
|
||||||
#define BLK_ZONE_WPLUG_PLUGGED (1U << 0)
|
#define BLK_ZONE_WPLUG_PLUGGED (1U << 0)
|
||||||
#define BLK_ZONE_WPLUG_ERROR (1U << 1)
|
#define BLK_ZONE_WPLUG_NEED_WP_UPDATE (1U << 1)
|
||||||
#define BLK_ZONE_WPLUG_UNHASHED (1U << 2)
|
#define BLK_ZONE_WPLUG_UNHASHED (1U << 2)
|
||||||
|
|
||||||
#define BLK_ZONE_WPLUG_BUSY (BLK_ZONE_WPLUG_PLUGGED | BLK_ZONE_WPLUG_ERROR)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
|
* blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
|
||||||
* @zone_cond: BLK_ZONE_COND_XXX.
|
* @zone_cond: BLK_ZONE_COND_XXX.
|
||||||
@ -115,6 +111,30 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_zone_cond_str);
|
EXPORT_SYMBOL_GPL(blk_zone_cond_str);
|
||||||
|
|
||||||
|
struct disk_report_zones_cb_args {
|
||||||
|
struct gendisk *disk;
|
||||||
|
report_zones_cb user_cb;
|
||||||
|
void *user_data;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
|
||||||
|
struct blk_zone *zone);
|
||||||
|
|
||||||
|
static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx,
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
struct disk_report_zones_cb_args *args = data;
|
||||||
|
struct gendisk *disk = args->disk;
|
||||||
|
|
||||||
|
if (disk->zone_wplugs_hash)
|
||||||
|
disk_zone_wplug_sync_wp_offset(disk, zone);
|
||||||
|
|
||||||
|
if (!args->user_cb)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return args->user_cb(zone, idx, args->user_data);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blkdev_report_zones - Get zones information
|
* blkdev_report_zones - Get zones information
|
||||||
* @bdev: Target block device
|
* @bdev: Target block device
|
||||||
@ -139,6 +159,11 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
|
|||||||
{
|
{
|
||||||
struct gendisk *disk = bdev->bd_disk;
|
struct gendisk *disk = bdev->bd_disk;
|
||||||
sector_t capacity = get_capacity(disk);
|
sector_t capacity = get_capacity(disk);
|
||||||
|
struct disk_report_zones_cb_args args = {
|
||||||
|
.disk = disk,
|
||||||
|
.user_cb = cb,
|
||||||
|
.user_data = data,
|
||||||
|
};
|
||||||
|
|
||||||
if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
|
if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
@ -146,7 +171,8 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
|
|||||||
if (!nr_zones || sector >= capacity)
|
if (!nr_zones || sector >= capacity)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
|
return disk->fops->report_zones(disk, sector, nr_zones,
|
||||||
|
disk_report_zones_cb, &args);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blkdev_report_zones);
|
EXPORT_SYMBOL_GPL(blkdev_report_zones);
|
||||||
|
|
||||||
@ -427,7 +453,7 @@ static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug)
|
|||||||
{
|
{
|
||||||
if (refcount_dec_and_test(&zwplug->ref)) {
|
if (refcount_dec_and_test(&zwplug->ref)) {
|
||||||
WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list));
|
WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list));
|
||||||
WARN_ON_ONCE(!list_empty(&zwplug->link));
|
WARN_ON_ONCE(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
|
||||||
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED));
|
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED));
|
||||||
|
|
||||||
call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu);
|
call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu);
|
||||||
@ -441,8 +467,8 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
|
|||||||
if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
|
if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* If the zone write plug is still busy, it cannot be removed. */
|
/* If the zone write plug is still plugged, it cannot be removed. */
|
||||||
if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
|
if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -525,12 +551,11 @@ static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
INIT_HLIST_NODE(&zwplug->node);
|
INIT_HLIST_NODE(&zwplug->node);
|
||||||
INIT_LIST_HEAD(&zwplug->link);
|
|
||||||
refcount_set(&zwplug->ref, 2);
|
refcount_set(&zwplug->ref, 2);
|
||||||
spin_lock_init(&zwplug->lock);
|
spin_lock_init(&zwplug->lock);
|
||||||
zwplug->flags = 0;
|
zwplug->flags = 0;
|
||||||
zwplug->zone_no = zno;
|
zwplug->zone_no = zno;
|
||||||
zwplug->wp_offset = sector & (disk->queue->limits.chunk_sectors - 1);
|
zwplug->wp_offset = bdev_offset_from_zone_start(disk->part0, sector);
|
||||||
bio_list_init(&zwplug->bio_list);
|
bio_list_init(&zwplug->bio_list);
|
||||||
INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work);
|
INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work);
|
||||||
zwplug->disk = disk;
|
zwplug->disk = disk;
|
||||||
@ -574,115 +599,22 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Abort (fail) all plugged BIOs of a zone write plug that are not aligned
|
* Set a zone write plug write pointer offset to the specified value.
|
||||||
* with the assumed write pointer location of the zone when the BIO will
|
* This aborts all plugged BIOs, which is fine as this function is called for
|
||||||
* be unplugged.
|
* a zone reset operation, a zone finish operation or if the zone needs a wp
|
||||||
*/
|
* update from a report zone after a write error.
|
||||||
static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
|
|
||||||
struct blk_zone_wplug *zwplug)
|
|
||||||
{
|
|
||||||
unsigned int wp_offset = zwplug->wp_offset;
|
|
||||||
struct bio_list bl = BIO_EMPTY_LIST;
|
|
||||||
struct bio *bio;
|
|
||||||
|
|
||||||
while ((bio = bio_list_pop(&zwplug->bio_list))) {
|
|
||||||
if (disk_zone_is_full(disk, zwplug->zone_no, wp_offset) ||
|
|
||||||
(bio_op(bio) != REQ_OP_ZONE_APPEND &&
|
|
||||||
bio_offset_from_zone_start(bio) != wp_offset)) {
|
|
||||||
blk_zone_wplug_bio_io_error(zwplug, bio);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
wp_offset += bio_sectors(bio);
|
|
||||||
bio_list_add(&bl, bio);
|
|
||||||
}
|
|
||||||
|
|
||||||
bio_list_merge(&zwplug->bio_list, &bl);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void disk_zone_wplug_set_error(struct gendisk *disk,
|
|
||||||
struct blk_zone_wplug *zwplug)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (zwplug->flags & BLK_ZONE_WPLUG_ERROR)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* At this point, we already have a reference on the zone write plug.
|
|
||||||
* However, since we are going to add the plug to the disk zone write
|
|
||||||
* plugs work list, increase its reference count. This reference will
|
|
||||||
* be dropped in disk_zone_wplugs_work() once the error state is
|
|
||||||
* handled, or in disk_zone_wplug_clear_error() if the zone is reset or
|
|
||||||
* finished.
|
|
||||||
*/
|
|
||||||
zwplug->flags |= BLK_ZONE_WPLUG_ERROR;
|
|
||||||
refcount_inc(&zwplug->ref);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
|
||||||
list_add_tail(&zwplug->link, &disk->zone_wplugs_err_list);
|
|
||||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void disk_zone_wplug_clear_error(struct gendisk *disk,
|
|
||||||
struct blk_zone_wplug *zwplug)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We are racing with the error handling work which drops the reference
|
|
||||||
* on the zone write plug after handling the error state. So remove the
|
|
||||||
* plug from the error list and drop its reference count only if the
|
|
||||||
* error handling has not yet started, that is, if the zone write plug
|
|
||||||
* is still listed.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
|
||||||
if (!list_empty(&zwplug->link)) {
|
|
||||||
list_del_init(&zwplug->link);
|
|
||||||
zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
|
|
||||||
disk_put_zone_wplug(zwplug);
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a zone write plug write pointer offset to either 0 (zone reset case)
|
|
||||||
* or to the zone size (zone finish case). This aborts all plugged BIOs, which
|
|
||||||
* is fine to do as doing a zone reset or zone finish while writes are in-flight
|
|
||||||
* is a mistake from the user which will most likely cause all plugged BIOs to
|
|
||||||
* fail anyway.
|
|
||||||
*/
|
*/
|
||||||
static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
|
static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
|
||||||
struct blk_zone_wplug *zwplug,
|
struct blk_zone_wplug *zwplug,
|
||||||
unsigned int wp_offset)
|
unsigned int wp_offset)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
lockdep_assert_held(&zwplug->lock);
|
||||||
|
|
||||||
spin_lock_irqsave(&zwplug->lock, flags);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure that a BIO completion or another zone reset or finish
|
|
||||||
* operation has not already removed the plug from the hash table.
|
|
||||||
*/
|
|
||||||
if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
|
|
||||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update the zone write pointer and abort all plugged BIOs. */
|
/* Update the zone write pointer and abort all plugged BIOs. */
|
||||||
|
zwplug->flags &= ~BLK_ZONE_WPLUG_NEED_WP_UPDATE;
|
||||||
zwplug->wp_offset = wp_offset;
|
zwplug->wp_offset = wp_offset;
|
||||||
disk_zone_wplug_abort(zwplug);
|
disk_zone_wplug_abort(zwplug);
|
||||||
|
|
||||||
/*
|
|
||||||
* Updating the write pointer offset puts back the zone
|
|
||||||
* in a good state. So clear the error flag and decrement the
|
|
||||||
* error count if we were in error state.
|
|
||||||
*/
|
|
||||||
disk_zone_wplug_clear_error(disk, zwplug);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The zone write plug now has no BIO plugged: remove it from the
|
* The zone write plug now has no BIO plugged: remove it from the
|
||||||
* hash table so that it cannot be seen. The plug will be freed
|
* hash table so that it cannot be seen. The plug will be freed
|
||||||
@ -690,8 +622,58 @@ static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
|
|||||||
*/
|
*/
|
||||||
if (disk_should_remove_zone_wplug(disk, zwplug))
|
if (disk_should_remove_zone_wplug(disk, zwplug))
|
||||||
disk_remove_zone_wplug(disk, zwplug);
|
disk_remove_zone_wplug(disk, zwplug);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
|
||||||
|
{
|
||||||
|
switch (zone->cond) {
|
||||||
|
case BLK_ZONE_COND_IMP_OPEN:
|
||||||
|
case BLK_ZONE_COND_EXP_OPEN:
|
||||||
|
case BLK_ZONE_COND_CLOSED:
|
||||||
|
return zone->wp - zone->start;
|
||||||
|
case BLK_ZONE_COND_FULL:
|
||||||
|
return zone->len;
|
||||||
|
case BLK_ZONE_COND_EMPTY:
|
||||||
|
return 0;
|
||||||
|
case BLK_ZONE_COND_NOT_WP:
|
||||||
|
case BLK_ZONE_COND_OFFLINE:
|
||||||
|
case BLK_ZONE_COND_READONLY:
|
||||||
|
default:
|
||||||
|
/*
|
||||||
|
* Conventional, offline and read-only zones do not have a valid
|
||||||
|
* write pointer.
|
||||||
|
*/
|
||||||
|
return UINT_MAX;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
|
||||||
|
struct blk_zone *zone)
|
||||||
|
{
|
||||||
|
struct blk_zone_wplug *zwplug;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
zwplug = disk_get_zone_wplug(disk, zone->start);
|
||||||
|
if (!zwplug)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&zwplug->lock, flags);
|
||||||
|
if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
|
||||||
|
disk_zone_wplug_set_wp_offset(disk, zwplug,
|
||||||
|
blk_zone_wp_offset(zone));
|
||||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||||
|
|
||||||
|
disk_put_zone_wplug(zwplug);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector)
|
||||||
|
{
|
||||||
|
struct disk_report_zones_cb_args args = {
|
||||||
|
.disk = disk,
|
||||||
|
};
|
||||||
|
|
||||||
|
return disk->fops->report_zones(disk, sector, 1,
|
||||||
|
disk_report_zones_cb, &args);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
|
static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
|
||||||
@ -700,6 +682,7 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
|
|||||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||||
sector_t sector = bio->bi_iter.bi_sector;
|
sector_t sector = bio->bi_iter.bi_sector;
|
||||||
struct blk_zone_wplug *zwplug;
|
struct blk_zone_wplug *zwplug;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
/* Conventional zones cannot be reset nor finished. */
|
/* Conventional zones cannot be reset nor finished. */
|
||||||
if (!bdev_zone_is_seq(bio->bi_bdev, sector)) {
|
if (!bdev_zone_is_seq(bio->bi_bdev, sector)) {
|
||||||
@ -707,6 +690,15 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No-wait reset or finish BIOs do not make much sense as the callers
|
||||||
|
* issue these as blocking operations in most cases. To avoid issues
|
||||||
|
* the BIO execution potentially failing with BLK_STS_AGAIN, warn about
|
||||||
|
* REQ_NOWAIT being set and ignore that flag.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT))
|
||||||
|
bio->bi_opf &= ~REQ_NOWAIT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we have a zone write plug, set its write pointer offset to 0
|
* If we have a zone write plug, set its write pointer offset to 0
|
||||||
* (reset case) or to the zone size (finish case). This will abort all
|
* (reset case) or to the zone size (finish case). This will abort all
|
||||||
@ -716,7 +708,9 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
|
|||||||
*/
|
*/
|
||||||
zwplug = disk_get_zone_wplug(disk, sector);
|
zwplug = disk_get_zone_wplug(disk, sector);
|
||||||
if (zwplug) {
|
if (zwplug) {
|
||||||
|
spin_lock_irqsave(&zwplug->lock, flags);
|
||||||
disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
|
disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
|
||||||
|
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||||
disk_put_zone_wplug(zwplug);
|
disk_put_zone_wplug(zwplug);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -727,6 +721,7 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
|
|||||||
{
|
{
|
||||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||||
struct blk_zone_wplug *zwplug;
|
struct blk_zone_wplug *zwplug;
|
||||||
|
unsigned long flags;
|
||||||
sector_t sector;
|
sector_t sector;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -738,7 +733,9 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
|
|||||||
sector += disk->queue->limits.chunk_sectors) {
|
sector += disk->queue->limits.chunk_sectors) {
|
||||||
zwplug = disk_get_zone_wplug(disk, sector);
|
zwplug = disk_get_zone_wplug(disk, sector);
|
||||||
if (zwplug) {
|
if (zwplug) {
|
||||||
|
spin_lock_irqsave(&zwplug->lock, flags);
|
||||||
disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
|
disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
|
||||||
|
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||||
disk_put_zone_wplug(zwplug);
|
disk_put_zone_wplug(zwplug);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -746,9 +743,25 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
|
static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
|
||||||
|
struct blk_zone_wplug *zwplug)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Take a reference on the zone write plug and schedule the submission
|
||||||
|
* of the next plugged BIO. blk_zone_wplug_bio_work() will release the
|
||||||
|
* reference we take here.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
|
||||||
|
refcount_inc(&zwplug->ref);
|
||||||
|
queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void disk_zone_wplug_add_bio(struct gendisk *disk,
|
||||||
|
struct blk_zone_wplug *zwplug,
|
||||||
struct bio *bio, unsigned int nr_segs)
|
struct bio *bio, unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
|
bool schedule_bio_work = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Grab an extra reference on the BIO request queue usage counter.
|
* Grab an extra reference on the BIO request queue usage counter.
|
||||||
* This reference will be reused to submit a request for the BIO for
|
* This reference will be reused to submit a request for the BIO for
|
||||||
@ -764,6 +777,16 @@ static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
|
|||||||
*/
|
*/
|
||||||
bio_clear_polled(bio);
|
bio_clear_polled(bio);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* REQ_NOWAIT BIOs are always handled using the zone write plug BIO
|
||||||
|
* work, which can block. So clear the REQ_NOWAIT flag and schedule the
|
||||||
|
* work if this is the first BIO we are plugging.
|
||||||
|
*/
|
||||||
|
if (bio->bi_opf & REQ_NOWAIT) {
|
||||||
|
schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
|
||||||
|
bio->bi_opf &= ~REQ_NOWAIT;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reuse the poll cookie field to store the number of segments when
|
* Reuse the poll cookie field to store the number of segments when
|
||||||
* split to the hardware limits.
|
* split to the hardware limits.
|
||||||
@ -777,6 +800,11 @@ static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
|
|||||||
* at the tail of the list to preserve the sequential write order.
|
* at the tail of the list to preserve the sequential write order.
|
||||||
*/
|
*/
|
||||||
bio_list_add(&zwplug->bio_list, bio);
|
bio_list_add(&zwplug->bio_list, bio);
|
||||||
|
|
||||||
|
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
|
||||||
|
|
||||||
|
if (schedule_bio_work)
|
||||||
|
disk_zone_wplug_schedule_bio_work(disk, zwplug);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -889,13 +917,23 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
|
|||||||
{
|
{
|
||||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we lost track of the zone write pointer due to a write error,
|
||||||
|
* the user must either execute a report zones, reset the zone or finish
|
||||||
|
* the to recover a reliable write pointer position. Fail BIOs if the
|
||||||
|
* user did not do that as we cannot handle emulated zone append
|
||||||
|
* otherwise.
|
||||||
|
*/
|
||||||
|
if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
|
||||||
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check that the user is not attempting to write to a full zone.
|
* Check that the user is not attempting to write to a full zone.
|
||||||
* We know such BIO will fail, and that would potentially overflow our
|
* We know such BIO will fail, and that would potentially overflow our
|
||||||
* write pointer offset beyond the end of the zone.
|
* write pointer offset beyond the end of the zone.
|
||||||
*/
|
*/
|
||||||
if (disk_zone_wplug_is_full(disk, zwplug))
|
if (disk_zone_wplug_is_full(disk, zwplug))
|
||||||
goto err;
|
return false;
|
||||||
|
|
||||||
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
|
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
|
||||||
/*
|
/*
|
||||||
@ -914,24 +952,18 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
|
|||||||
bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
|
bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Check for non-sequential writes early because we avoid a
|
* Check for non-sequential writes early as we know that BIOs
|
||||||
* whole lot of error handling trouble if we don't send it off
|
* with a start sector not unaligned to the zone write pointer
|
||||||
* to the driver.
|
* will fail.
|
||||||
*/
|
*/
|
||||||
if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
|
if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
|
||||||
goto err;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Advance the zone write pointer offset. */
|
/* Advance the zone write pointer offset. */
|
||||||
zwplug->wp_offset += bio_sectors(bio);
|
zwplug->wp_offset += bio_sectors(bio);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
err:
|
|
||||||
/* We detected an invalid write BIO: schedule error recovery. */
|
|
||||||
disk_zone_wplug_set_error(disk, zwplug);
|
|
||||||
kblockd_schedule_work(&disk->zone_wplugs_work);
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
|
static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
|
||||||
@ -970,6 +1002,9 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
|
|||||||
|
|
||||||
zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags);
|
zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags);
|
||||||
if (!zwplug) {
|
if (!zwplug) {
|
||||||
|
if (bio->bi_opf & REQ_NOWAIT)
|
||||||
|
bio_wouldblock_error(bio);
|
||||||
|
else
|
||||||
bio_io_error(bio);
|
bio_io_error(bio);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -978,18 +1013,20 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
|
|||||||
bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
|
bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the zone is already plugged or has a pending error, add the BIO
|
* If the zone is already plugged, add the BIO to the plug BIO list.
|
||||||
* to the plug BIO list. Otherwise, plug and let the BIO execute.
|
* Do the same for REQ_NOWAIT BIOs to ensure that we will not see a
|
||||||
|
* BLK_STS_AGAIN failure if we let the BIO execute.
|
||||||
|
* Otherwise, plug and let the BIO execute.
|
||||||
*/
|
*/
|
||||||
if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
|
if ((zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) ||
|
||||||
|
(bio->bi_opf & REQ_NOWAIT))
|
||||||
goto plug;
|
goto plug;
|
||||||
|
|
||||||
/*
|
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
|
||||||
* If an error is detected when preparing the BIO, add it to the BIO
|
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||||
* list so that error recovery can deal with it.
|
bio_io_error(bio);
|
||||||
*/
|
return true;
|
||||||
if (!blk_zone_wplug_prepare_bio(zwplug, bio))
|
}
|
||||||
goto plug;
|
|
||||||
|
|
||||||
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
|
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
|
||||||
|
|
||||||
@ -998,8 +1035,7 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
plug:
|
plug:
|
||||||
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
|
disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs);
|
||||||
blk_zone_wplug_add_bio(zwplug, bio, nr_segs);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||||
|
|
||||||
@ -1083,19 +1119,6 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_zone_plug_bio);
|
EXPORT_SYMBOL_GPL(blk_zone_plug_bio);
|
||||||
|
|
||||||
static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
|
|
||||||
struct blk_zone_wplug *zwplug)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Take a reference on the zone write plug and schedule the submission
|
|
||||||
* of the next plugged BIO. blk_zone_wplug_bio_work() will release the
|
|
||||||
* reference we take here.
|
|
||||||
*/
|
|
||||||
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
|
|
||||||
refcount_inc(&zwplug->ref);
|
|
||||||
queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
|
static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
|
||||||
struct blk_zone_wplug *zwplug)
|
struct blk_zone_wplug *zwplug)
|
||||||
{
|
{
|
||||||
@ -1103,16 +1126,6 @@ static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
|
|||||||
|
|
||||||
spin_lock_irqsave(&zwplug->lock, flags);
|
spin_lock_irqsave(&zwplug->lock, flags);
|
||||||
|
|
||||||
/*
|
|
||||||
* If we had an error, schedule error recovery. The recovery work
|
|
||||||
* will restart submission of plugged BIOs.
|
|
||||||
*/
|
|
||||||
if (zwplug->flags & BLK_ZONE_WPLUG_ERROR) {
|
|
||||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
|
||||||
kblockd_schedule_work(&disk->zone_wplugs_work);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Schedule submission of the next plugged BIO if we have one. */
|
/* Schedule submission of the next plugged BIO if we have one. */
|
||||||
if (!bio_list_empty(&zwplug->bio_list)) {
|
if (!bio_list_empty(&zwplug->bio_list)) {
|
||||||
disk_zone_wplug_schedule_bio_work(disk, zwplug);
|
disk_zone_wplug_schedule_bio_work(disk, zwplug);
|
||||||
@ -1155,12 +1168,13 @@ void blk_zone_write_plug_bio_endio(struct bio *bio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the BIO failed, mark the plug as having an error to trigger
|
* If the BIO failed, abort all plugged BIOs and mark the plug as
|
||||||
* recovery.
|
* needing a write pointer update.
|
||||||
*/
|
*/
|
||||||
if (bio->bi_status != BLK_STS_OK) {
|
if (bio->bi_status != BLK_STS_OK) {
|
||||||
spin_lock_irqsave(&zwplug->lock, flags);
|
spin_lock_irqsave(&zwplug->lock, flags);
|
||||||
disk_zone_wplug_set_error(disk, zwplug);
|
disk_zone_wplug_abort(zwplug);
|
||||||
|
zwplug->flags |= BLK_ZONE_WPLUG_NEED_WP_UPDATE;
|
||||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1216,6 +1230,7 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
|
|||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&zwplug->lock, flags);
|
spin_lock_irqsave(&zwplug->lock, flags);
|
||||||
|
|
||||||
|
again:
|
||||||
bio = bio_list_pop(&zwplug->bio_list);
|
bio = bio_list_pop(&zwplug->bio_list);
|
||||||
if (!bio) {
|
if (!bio) {
|
||||||
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
|
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
|
||||||
@ -1224,10 +1239,8 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
|
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
|
||||||
/* Error recovery will decide what to do with the BIO. */
|
blk_zone_wplug_bio_io_error(zwplug, bio);
|
||||||
bio_list_add_head(&zwplug->bio_list, bio);
|
goto again;
|
||||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
|
||||||
goto put_zwplug;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||||
@ -1249,120 +1262,6 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
|
|||||||
disk_put_zone_wplug(zwplug);
|
disk_put_zone_wplug(zwplug);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
|
|
||||||
{
|
|
||||||
switch (zone->cond) {
|
|
||||||
case BLK_ZONE_COND_IMP_OPEN:
|
|
||||||
case BLK_ZONE_COND_EXP_OPEN:
|
|
||||||
case BLK_ZONE_COND_CLOSED:
|
|
||||||
return zone->wp - zone->start;
|
|
||||||
case BLK_ZONE_COND_FULL:
|
|
||||||
return zone->len;
|
|
||||||
case BLK_ZONE_COND_EMPTY:
|
|
||||||
return 0;
|
|
||||||
case BLK_ZONE_COND_NOT_WP:
|
|
||||||
case BLK_ZONE_COND_OFFLINE:
|
|
||||||
case BLK_ZONE_COND_READONLY:
|
|
||||||
default:
|
|
||||||
/*
|
|
||||||
* Conventional, offline and read-only zones do not have a valid
|
|
||||||
* write pointer.
|
|
||||||
*/
|
|
||||||
return UINT_MAX;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int blk_zone_wplug_report_zone_cb(struct blk_zone *zone,
|
|
||||||
unsigned int idx, void *data)
|
|
||||||
{
|
|
||||||
struct blk_zone *zonep = data;
|
|
||||||
|
|
||||||
*zonep = *zone;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void disk_zone_wplug_handle_error(struct gendisk *disk,
|
|
||||||
struct blk_zone_wplug *zwplug)
|
|
||||||
{
|
|
||||||
sector_t zone_start_sector =
|
|
||||||
bdev_zone_sectors(disk->part0) * zwplug->zone_no;
|
|
||||||
unsigned int noio_flag;
|
|
||||||
struct blk_zone zone;
|
|
||||||
unsigned long flags;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* Get the current zone information from the device. */
|
|
||||||
noio_flag = memalloc_noio_save();
|
|
||||||
ret = disk->fops->report_zones(disk, zone_start_sector, 1,
|
|
||||||
blk_zone_wplug_report_zone_cb, &zone);
|
|
||||||
memalloc_noio_restore(noio_flag);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&zwplug->lock, flags);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A zone reset or finish may have cleared the error already. In such
|
|
||||||
* case, do nothing as the report zones may have seen the "old" write
|
|
||||||
* pointer value before the reset/finish operation completed.
|
|
||||||
*/
|
|
||||||
if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
|
|
||||||
|
|
||||||
if (ret != 1) {
|
|
||||||
/*
|
|
||||||
* We failed to get the zone information, meaning that something
|
|
||||||
* is likely really wrong with the device. Abort all remaining
|
|
||||||
* plugged BIOs as otherwise we could endup waiting forever on
|
|
||||||
* plugged BIOs to complete if there is a queue freeze on-going.
|
|
||||||
*/
|
|
||||||
disk_zone_wplug_abort(zwplug);
|
|
||||||
goto unplug;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update the zone write pointer offset. */
|
|
||||||
zwplug->wp_offset = blk_zone_wp_offset(&zone);
|
|
||||||
disk_zone_wplug_abort_unaligned(disk, zwplug);
|
|
||||||
|
|
||||||
/* Restart BIO submission if we still have any BIO left. */
|
|
||||||
if (!bio_list_empty(&zwplug->bio_list)) {
|
|
||||||
disk_zone_wplug_schedule_bio_work(disk, zwplug);
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
unplug:
|
|
||||||
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
|
|
||||||
if (disk_should_remove_zone_wplug(disk, zwplug))
|
|
||||||
disk_remove_zone_wplug(disk, zwplug);
|
|
||||||
|
|
||||||
unlock:
|
|
||||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void disk_zone_wplugs_work(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct gendisk *disk =
|
|
||||||
container_of(work, struct gendisk, zone_wplugs_work);
|
|
||||||
struct blk_zone_wplug *zwplug;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
|
||||||
|
|
||||||
while (!list_empty(&disk->zone_wplugs_err_list)) {
|
|
||||||
zwplug = list_first_entry(&disk->zone_wplugs_err_list,
|
|
||||||
struct blk_zone_wplug, link);
|
|
||||||
list_del_init(&zwplug->link);
|
|
||||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
|
||||||
|
|
||||||
disk_zone_wplug_handle_error(disk, zwplug);
|
|
||||||
disk_put_zone_wplug(zwplug);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
|
static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
|
||||||
{
|
{
|
||||||
return 1U << disk->zone_wplugs_hash_bits;
|
return 1U << disk->zone_wplugs_hash_bits;
|
||||||
@ -1371,8 +1270,6 @@ static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
|
|||||||
void disk_init_zone_resources(struct gendisk *disk)
|
void disk_init_zone_resources(struct gendisk *disk)
|
||||||
{
|
{
|
||||||
spin_lock_init(&disk->zone_wplugs_lock);
|
spin_lock_init(&disk->zone_wplugs_lock);
|
||||||
INIT_LIST_HEAD(&disk->zone_wplugs_err_list);
|
|
||||||
INIT_WORK(&disk->zone_wplugs_work, disk_zone_wplugs_work);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1471,8 +1368,6 @@ void disk_free_zone_resources(struct gendisk *disk)
|
|||||||
if (!disk->zone_wplugs_pool)
|
if (!disk->zone_wplugs_pool)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cancel_work_sync(&disk->zone_wplugs_work);
|
|
||||||
|
|
||||||
if (disk->zone_wplugs_wq) {
|
if (disk->zone_wplugs_wq) {
|
||||||
destroy_workqueue(disk->zone_wplugs_wq);
|
destroy_workqueue(disk->zone_wplugs_wq);
|
||||||
disk->zone_wplugs_wq = NULL;
|
disk->zone_wplugs_wq = NULL;
|
||||||
@ -1669,6 +1564,8 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
|
|||||||
if (!disk->zone_wplugs_hash)
|
if (!disk->zone_wplugs_hash)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
disk_zone_wplug_sync_wp_offset(disk, zone);
|
||||||
|
|
||||||
wp_offset = blk_zone_wp_offset(zone);
|
wp_offset = blk_zone_wp_offset(zone);
|
||||||
if (!wp_offset || wp_offset >= zone->capacity)
|
if (!wp_offset || wp_offset >= zone->capacity)
|
||||||
return 0;
|
return 0;
|
||||||
@ -1799,6 +1696,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
|
|||||||
memalloc_noio_restore(noio_flag);
|
memalloc_noio_restore(noio_flag);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
|
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
|
||||||
blk_revalidate_zone_cb, &args);
|
blk_revalidate_zone_cb, &args);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
@ -1835,6 +1733,48 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
|
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_zone_issue_zeroout - zero-fill a block range in a zone
|
||||||
|
* @bdev: blockdev to write
|
||||||
|
* @sector: start sector
|
||||||
|
* @nr_sects: number of sectors to write
|
||||||
|
* @gfp_mask: memory allocation flags (for bio_alloc)
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Zero-fill a block range in a zone (@sector must be equal to the zone write
|
||||||
|
* pointer), handling potential errors due to the (initially unknown) lack of
|
||||||
|
* hardware offload (See blkdev_issue_zeroout()).
|
||||||
|
*/
|
||||||
|
int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||||
|
sector_t nr_sects, gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!bdev_is_zoned(bdev)))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
ret = blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
|
||||||
|
BLKDEV_ZERO_NOFALLBACK);
|
||||||
|
if (ret != -EOPNOTSUPP)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The failed call to blkdev_issue_zeroout() advanced the zone write
|
||||||
|
* pointer. Undo this using a report zone to update the zone write
|
||||||
|
* pointer to the correct current value.
|
||||||
|
*/
|
||||||
|
ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector);
|
||||||
|
if (ret != 1)
|
||||||
|
return ret < 0 ? ret : -EIO;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Retry without BLKDEV_ZERO_NOFALLBACK to force the fallback to a
|
||||||
|
* regular write with zero-pages.
|
||||||
|
*/
|
||||||
|
return blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 0);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(blk_zone_issue_zeroout);
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEBUG_FS
|
#ifdef CONFIG_BLK_DEBUG_FS
|
||||||
|
|
||||||
int queue_zone_wplugs_show(void *data, struct seq_file *m)
|
int queue_zone_wplugs_show(void *data, struct seq_file *m)
|
||||||
|
@ -698,8 +698,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||||||
list_add(&rq->queuelist, &per_prio->dispatch);
|
list_add(&rq->queuelist, &per_prio->dispatch);
|
||||||
rq->fifo_time = jiffies;
|
rq->fifo_time = jiffies;
|
||||||
} else {
|
} else {
|
||||||
struct list_head *insert_before;
|
|
||||||
|
|
||||||
deadline_add_rq_rb(per_prio, rq);
|
deadline_add_rq_rb(per_prio, rq);
|
||||||
|
|
||||||
if (rq_mergeable(rq)) {
|
if (rq_mergeable(rq)) {
|
||||||
@ -712,8 +710,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||||||
* set expire time and add to fifo list
|
* set expire time and add to fifo list
|
||||||
*/
|
*/
|
||||||
rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
|
rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
|
||||||
insert_before = &per_prio->fifo_list[data_dir];
|
list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
|
||||||
list_add_tail(&rq->queuelist, insert_before);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,10 +163,6 @@ static int rsassa_pkcs1_sign(struct crypto_sig *tfm,
|
|||||||
struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
|
struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
|
||||||
const struct hash_prefix *hash_prefix = ictx->hash_prefix;
|
const struct hash_prefix *hash_prefix = ictx->hash_prefix;
|
||||||
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
|
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
|
||||||
unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child);
|
|
||||||
struct akcipher_request *child_req __free(kfree_sensitive) = NULL;
|
|
||||||
struct scatterlist in_sg[3], out_sg;
|
|
||||||
struct crypto_wait cwait;
|
|
||||||
unsigned int pad_len;
|
unsigned int pad_len;
|
||||||
unsigned int ps_end;
|
unsigned int ps_end;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
@ -187,37 +183,25 @@ static int rsassa_pkcs1_sign(struct crypto_sig *tfm,
|
|||||||
|
|
||||||
pad_len = ctx->key_size - slen - hash_prefix->size - 1;
|
pad_len = ctx->key_size - slen - hash_prefix->size - 1;
|
||||||
|
|
||||||
child_req = kmalloc(sizeof(*child_req) + child_reqsize + pad_len,
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!child_req)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* RFC 8017 sec 8.2.1 step 1 - EMSA-PKCS1-v1_5 encoding generation */
|
/* RFC 8017 sec 8.2.1 step 1 - EMSA-PKCS1-v1_5 encoding generation */
|
||||||
in_buf = (u8 *)(child_req + 1) + child_reqsize;
|
in_buf = dst;
|
||||||
|
memmove(in_buf + pad_len + hash_prefix->size, src, slen);
|
||||||
|
memcpy(in_buf + pad_len, hash_prefix->data, hash_prefix->size);
|
||||||
|
|
||||||
ps_end = pad_len - 1;
|
ps_end = pad_len - 1;
|
||||||
in_buf[0] = 0x01;
|
in_buf[0] = 0x01;
|
||||||
memset(in_buf + 1, 0xff, ps_end - 1);
|
memset(in_buf + 1, 0xff, ps_end - 1);
|
||||||
in_buf[ps_end] = 0x00;
|
in_buf[ps_end] = 0x00;
|
||||||
|
|
||||||
/* RFC 8017 sec 8.2.1 step 2 - RSA signature */
|
|
||||||
crypto_init_wait(&cwait);
|
|
||||||
sg_init_table(in_sg, 3);
|
|
||||||
sg_set_buf(&in_sg[0], in_buf, pad_len);
|
|
||||||
sg_set_buf(&in_sg[1], hash_prefix->data, hash_prefix->size);
|
|
||||||
sg_set_buf(&in_sg[2], src, slen);
|
|
||||||
sg_init_one(&out_sg, dst, dlen);
|
|
||||||
akcipher_request_set_tfm(child_req, ctx->child);
|
|
||||||
akcipher_request_set_crypt(child_req, in_sg, &out_sg,
|
|
||||||
ctx->key_size - 1, dlen);
|
|
||||||
akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
||||||
crypto_req_done, &cwait);
|
|
||||||
|
|
||||||
err = crypto_akcipher_decrypt(child_req);
|
/* RFC 8017 sec 8.2.1 step 2 - RSA signature */
|
||||||
err = crypto_wait_req(err, &cwait);
|
err = crypto_akcipher_sync_decrypt(ctx->child, in_buf,
|
||||||
if (err)
|
ctx->key_size - 1, in_buf,
|
||||||
|
ctx->key_size);
|
||||||
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
len = child_req->dst_len;
|
len = err;
|
||||||
pad_len = ctx->key_size - len;
|
pad_len = ctx->key_size - len;
|
||||||
|
|
||||||
/* Four billion to one */
|
/* Four billion to one */
|
||||||
@ -239,8 +223,8 @@ static int rsassa_pkcs1_verify(struct crypto_sig *tfm,
|
|||||||
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
|
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
|
||||||
unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child);
|
unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child);
|
||||||
struct akcipher_request *child_req __free(kfree_sensitive) = NULL;
|
struct akcipher_request *child_req __free(kfree_sensitive) = NULL;
|
||||||
struct scatterlist in_sg, out_sg;
|
|
||||||
struct crypto_wait cwait;
|
struct crypto_wait cwait;
|
||||||
|
struct scatterlist sg;
|
||||||
unsigned int dst_len;
|
unsigned int dst_len;
|
||||||
unsigned int pos;
|
unsigned int pos;
|
||||||
u8 *out_buf;
|
u8 *out_buf;
|
||||||
@ -259,13 +243,12 @@ static int rsassa_pkcs1_verify(struct crypto_sig *tfm,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
out_buf = (u8 *)(child_req + 1) + child_reqsize;
|
out_buf = (u8 *)(child_req + 1) + child_reqsize;
|
||||||
|
memcpy(out_buf, src, slen);
|
||||||
|
|
||||||
crypto_init_wait(&cwait);
|
crypto_init_wait(&cwait);
|
||||||
sg_init_one(&in_sg, src, slen);
|
sg_init_one(&sg, out_buf, slen);
|
||||||
sg_init_one(&out_sg, out_buf, ctx->key_size);
|
|
||||||
akcipher_request_set_tfm(child_req, ctx->child);
|
akcipher_request_set_tfm(child_req, ctx->child);
|
||||||
akcipher_request_set_crypt(child_req, &in_sg, &out_sg,
|
akcipher_request_set_crypt(child_req, &sg, &sg, slen, slen);
|
||||||
slen, ctx->key_size);
|
|
||||||
akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP,
|
akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||||
crypto_req_done, &cwait);
|
crypto_req_done, &cwait);
|
||||||
|
|
||||||
|
@ -232,8 +232,6 @@ acpi_remove_address_space_handler(acpi_handle device,
|
|||||||
|
|
||||||
/* Now we can delete the handler object */
|
/* Now we can delete the handler object */
|
||||||
|
|
||||||
acpi_os_release_mutex(handler_obj->address_space.
|
|
||||||
context_mutex);
|
|
||||||
acpi_ut_remove_reference(handler_obj);
|
acpi_ut_remove_reference(handler_obj);
|
||||||
goto unlock_and_exit;
|
goto unlock_and_exit;
|
||||||
}
|
}
|
||||||
|
@ -1716,6 +1716,8 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
|
|||||||
/* HiSilicon Hip09 Platform */
|
/* HiSilicon Hip09 Platform */
|
||||||
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
||||||
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
||||||
|
{"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
||||||
|
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
||||||
/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
|
/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
|
||||||
{"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
{"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
||||||
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
||||||
|
@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||||||
if (cmd_rc)
|
if (cmd_rc)
|
||||||
*cmd_rc = -EINVAL;
|
*cmd_rc = -EINVAL;
|
||||||
|
|
||||||
if (cmd == ND_CMD_CALL)
|
if (cmd == ND_CMD_CALL) {
|
||||||
|
if (!buf || buf_len < sizeof(*call_pkg))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
call_pkg = buf;
|
call_pkg = buf;
|
||||||
|
}
|
||||||
|
|
||||||
func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
|
func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
|
||||||
if (func < 0)
|
if (func < 0)
|
||||||
return func;
|
return func;
|
||||||
|
@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win,
|
|||||||
switch (addr->resource_type) {
|
switch (addr->resource_type) {
|
||||||
case ACPI_MEMORY_RANGE:
|
case ACPI_MEMORY_RANGE:
|
||||||
acpi_dev_memresource_flags(res, len, wp);
|
acpi_dev_memresource_flags(res, len, wp);
|
||||||
|
|
||||||
|
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
|
||||||
|
res->flags |= IORESOURCE_PREFETCH;
|
||||||
break;
|
break;
|
||||||
case ACPI_IO_RANGE:
|
case ACPI_IO_RANGE:
|
||||||
acpi_dev_ioresource_flags(res, len, iodec,
|
acpi_dev_ioresource_flags(res, len, iodec,
|
||||||
@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win,
|
|||||||
if (addr->producer_consumer == ACPI_PRODUCER)
|
if (addr->producer_consumer == ACPI_PRODUCER)
|
||||||
res->flags |= IORESOURCE_WINDOW;
|
res->flags |= IORESOURCE_WINDOW;
|
||||||
|
|
||||||
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
|
|
||||||
res->flags |= IORESOURCE_PREFETCH;
|
|
||||||
|
|
||||||
return !(res->flags & IORESOURCE_DISABLED);
|
return !(res->flags & IORESOURCE_DISABLED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
|
|||||||
phy_nodes[phy] = phy_data.np;
|
phy_nodes[phy] = phy_data.np;
|
||||||
cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
|
cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
|
||||||
if (cphy_base[phy] == NULL) {
|
if (cphy_base[phy] == NULL) {
|
||||||
|
of_node_put(phy_data.np);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
phy_count += 1;
|
phy_count += 1;
|
||||||
|
@ -208,6 +208,10 @@ static int __init numa_register_nodes(void)
|
|||||||
{
|
{
|
||||||
int nid;
|
int nid;
|
||||||
|
|
||||||
|
/* Check the validity of the memblock/node mapping */
|
||||||
|
if (!memblock_validate_numa_coverage(0))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* Finally register nodes. */
|
/* Finally register nodes. */
|
||||||
for_each_node_mask(nid, numa_nodes_parsed) {
|
for_each_node_mask(nid, numa_nodes_parsed) {
|
||||||
unsigned long start_pfn, end_pfn;
|
unsigned long start_pfn, end_pfn;
|
||||||
|
@ -58,7 +58,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
|
|||||||
{
|
{
|
||||||
struct cacheinfo *llc;
|
struct cacheinfo *llc;
|
||||||
|
|
||||||
if (!cache_leaves(cpu))
|
if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
|
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
|
||||||
@ -458,11 +458,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
|
|||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline int allocate_cache_info(int cpu)
|
||||||
int allocate_cache_info(int cpu)
|
|
||||||
{
|
{
|
||||||
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
|
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
|
||||||
sizeof(struct cacheinfo), GFP_ATOMIC);
|
|
||||||
if (!per_cpu_cacheinfo(cpu)) {
|
if (!per_cpu_cacheinfo(cpu)) {
|
||||||
cache_leaves(cpu) = 0;
|
cache_leaves(cpu) = 0;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -534,7 +532,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
|
|||||||
*/
|
*/
|
||||||
ci_cacheinfo(cpu)->early_ci_levels = false;
|
ci_cacheinfo(cpu)->early_ci_levels = false;
|
||||||
|
|
||||||
if (cache_leaves(cpu) <= early_leaves)
|
/*
|
||||||
|
* Some architectures (e.g., x86) do not use early initialization.
|
||||||
|
* Allocate memory now in such case.
|
||||||
|
*/
|
||||||
|
if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
kfree(per_cpu_cacheinfo(cpu));
|
kfree(per_cpu_cacheinfo(cpu));
|
||||||
|
@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(regmap_attach_dev);
|
EXPORT_SYMBOL_GPL(regmap_attach_dev);
|
||||||
|
|
||||||
|
static int dev_get_regmap_match(struct device *dev, void *res, void *data);
|
||||||
|
|
||||||
|
static int regmap_detach_dev(struct device *dev, struct regmap *map)
|
||||||
|
{
|
||||||
|
if (!dev)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return devres_release(dev, dev_get_regmap_release,
|
||||||
|
dev_get_regmap_match, (void *)map->name);
|
||||||
|
}
|
||||||
|
|
||||||
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
|
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
|
||||||
const struct regmap_config *config)
|
const struct regmap_config *config)
|
||||||
{
|
{
|
||||||
@ -1052,13 +1063,13 @@ struct regmap *__regmap_init(struct device *dev,
|
|||||||
|
|
||||||
/* Sanity check */
|
/* Sanity check */
|
||||||
if (range_cfg->range_max < range_cfg->range_min) {
|
if (range_cfg->range_max < range_cfg->range_min) {
|
||||||
dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
|
dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
|
||||||
range_cfg->range_max, range_cfg->range_min);
|
range_cfg->range_max, range_cfg->range_min);
|
||||||
goto err_range;
|
goto err_range;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (range_cfg->range_max > map->max_register) {
|
if (range_cfg->range_max > map->max_register) {
|
||||||
dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
|
dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
|
||||||
range_cfg->range_max, map->max_register);
|
range_cfg->range_max, map->max_register);
|
||||||
goto err_range;
|
goto err_range;
|
||||||
}
|
}
|
||||||
@ -1445,6 +1456,7 @@ void regmap_exit(struct regmap *map)
|
|||||||
{
|
{
|
||||||
struct regmap_async *async;
|
struct regmap_async *async;
|
||||||
|
|
||||||
|
regmap_detach_dev(map->dev, map);
|
||||||
regcache_exit(map);
|
regcache_exit(map);
|
||||||
|
|
||||||
regmap_debugfs_exit(map);
|
regmap_debugfs_exit(map);
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
type: NullBlkModule,
|
type: NullBlkModule,
|
||||||
name: "rnull_mod",
|
name: "rnull_mod",
|
||||||
author: "Andreas Hindborg",
|
author: "Andreas Hindborg",
|
||||||
|
description: "Rust implementation of the C null block driver",
|
||||||
license: "GPL v2",
|
license: "GPL v2",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1586,9 +1586,12 @@ static void virtblk_remove(struct virtio_device *vdev)
|
|||||||
static int virtblk_freeze(struct virtio_device *vdev)
|
static int virtblk_freeze(struct virtio_device *vdev)
|
||||||
{
|
{
|
||||||
struct virtio_blk *vblk = vdev->priv;
|
struct virtio_blk *vblk = vdev->priv;
|
||||||
|
struct request_queue *q = vblk->disk->queue;
|
||||||
|
|
||||||
/* Ensure no requests in virtqueues before deleting vqs. */
|
/* Ensure no requests in virtqueues before deleting vqs. */
|
||||||
blk_mq_freeze_queue(vblk->disk->queue);
|
blk_mq_freeze_queue(q);
|
||||||
|
blk_mq_quiesce_queue_nowait(q);
|
||||||
|
blk_mq_unfreeze_queue(q);
|
||||||
|
|
||||||
/* Ensure we don't receive any more interrupts */
|
/* Ensure we don't receive any more interrupts */
|
||||||
virtio_reset_device(vdev);
|
virtio_reset_device(vdev);
|
||||||
@ -1612,8 +1615,8 @@ static int virtblk_restore(struct virtio_device *vdev)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
virtio_device_ready(vdev);
|
virtio_device_ready(vdev);
|
||||||
|
blk_mq_unquiesce_queue(vblk->disk->queue);
|
||||||
|
|
||||||
blk_mq_unfreeze_queue(vblk->disk->queue);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -395,6 +395,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
|||||||
{
|
{
|
||||||
struct btmtk_data *data = hci_get_priv(hdev);
|
struct btmtk_data *data = hci_get_priv(hdev);
|
||||||
int err;
|
int err;
|
||||||
|
bool complete = false;
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
|
if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
@ -416,16 +417,19 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
|||||||
fallthrough;
|
fallthrough;
|
||||||
case HCI_DEVCOREDUMP_ACTIVE:
|
case HCI_DEVCOREDUMP_ACTIVE:
|
||||||
default:
|
default:
|
||||||
|
/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
|
||||||
|
if (data->cd_info.cnt >= MTK_COREDUMP_NUM &&
|
||||||
|
skb->len > MTK_COREDUMP_END_LEN)
|
||||||
|
if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
|
||||||
|
MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1))
|
||||||
|
complete = true;
|
||||||
|
|
||||||
err = hci_devcd_append(hdev, skb);
|
err = hci_devcd_append(hdev, skb);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
break;
|
break;
|
||||||
data->cd_info.cnt++;
|
data->cd_info.cnt++;
|
||||||
|
|
||||||
/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
|
if (complete) {
|
||||||
if (data->cd_info.cnt > MTK_COREDUMP_NUM &&
|
|
||||||
skb->len > MTK_COREDUMP_END_LEN)
|
|
||||||
if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
|
|
||||||
MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) {
|
|
||||||
bt_dev_info(hdev, "Mediatek coredump end");
|
bt_dev_info(hdev, "Mediatek coredump end");
|
||||||
hci_devcd_complete(hdev);
|
hci_devcd_complete(hdev);
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
|
# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
|
||||||
#
|
#
|
||||||
|
|
||||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CDX_BUS
|
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CDX_BUS"'
|
||||||
|
|
||||||
obj-$(CONFIG_CDX_BUS) += cdx.o controller/
|
obj-$(CONFIG_CDX_BUS) += cdx.o controller/
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user