mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 00:35:01 +00:00
ASoC: Fixes for v6.13
A small pile of driver specific fixes, all quite small and not particularly major. -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmda4CsACgkQJNaLcl1U h9BYGwf/W+MWujoP9buzCXg5gIDm9PAMyL8wbo2RdeaevzzGgFtN1Qlbs8thJU/M Zo2dsnoSSd/S0RN2xgywKALCcNkL2wc/BE+hbOTcm1vk7JeonggzAQjdk2Mvt9un CtvqayQNNycNRU+xvBiyc3j8q1BvKkt+jWdkzMYRLraVm//hseMXyRo9nolLMRxU 22d7tzYE0qdAS5Zkd3lGOgQW6v5kMih+h0QtwsaqEZb0LOtzqJmr8gNx4DZXdg50 cSsPzgdPM7vKC1/LnwY7SdLHxSl9Uga3vNiDR6y0O9PgCtOlEJe5oydSBvFo4/32 dE1qIlg8jEEidTmVpWcDThHM4D/4lw== =/FQk -----END PGP SIGNATURE----- Merge tag 'asoc-fix-v6.12-rc2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus ASoC: Fixes for v6.13 A small pile of driver specific fixes, all quite small and not particularly major.
This commit is contained in:
commit
7b26bc6582
@ -76,7 +76,7 @@ Description:
|
||||
timeout when the pretimeout interrupt is delivered. Pretimeout
|
||||
is an optional feature.
|
||||
|
||||
What: /sys/class/watchdog/watchdogn/pretimeout_avaialable_governors
|
||||
What: /sys/class/watchdog/watchdogn/pretimeout_available_governors
|
||||
Date: February 2017
|
||||
Contact: Wim Van Sebroeck <wim@iguana.be>
|
||||
Description:
|
||||
|
@ -255,8 +255,9 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Hisilicon | Hip{08,09,10,10C| #162001900 | N/A |
|
||||
| | ,11} SMMU PMCG | | |
|
||||
| Hisilicon | Hip{08,09,09A,10| #162001900 | N/A |
|
||||
| | ,10C,11} | | |
|
||||
| | SMMU PMCG | | |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Hisilicon | Hip09 | #162100801 | HISILICON_ERRATUM_162100801 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
@ -46,7 +46,7 @@ Please note that due to macro expansion that argument needs to be a
|
||||
preprocessor symbol. E.g. to export the symbol ``usb_stor_suspend`` into the
|
||||
namespace ``USB_STORAGE``, use::
|
||||
|
||||
EXPORT_SYMBOL_NS(usb_stor_suspend, USB_STORAGE);
|
||||
EXPORT_SYMBOL_NS(usb_stor_suspend, "USB_STORAGE");
|
||||
|
||||
The corresponding ksymtab entry struct ``kernel_symbol`` will have the member
|
||||
``namespace`` set accordingly. A symbol that is exported without a namespace will
|
||||
@ -68,7 +68,7 @@ is to define the default namespace in the ``Makefile`` of the subsystem. E.g. to
|
||||
export all symbols defined in usb-common into the namespace USB_COMMON, add a
|
||||
line like this to drivers/usb/common/Makefile::
|
||||
|
||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
|
||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_COMMON"'
|
||||
|
||||
That will affect all EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL() statements. A
|
||||
symbol exported with EXPORT_SYMBOL_NS() while this definition is present, will
|
||||
@ -79,7 +79,7 @@ A second option to define the default namespace is directly in the compilation
|
||||
unit as preprocessor statement. The above example would then read::
|
||||
|
||||
#undef DEFAULT_SYMBOL_NAMESPACE
|
||||
#define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
|
||||
#define DEFAULT_SYMBOL_NAMESPACE "USB_COMMON"
|
||||
|
||||
within the corresponding compilation unit before any EXPORT_SYMBOL macro is
|
||||
used.
|
||||
@ -94,7 +94,7 @@ for the namespaces it uses symbols from. E.g. a module using the
|
||||
usb_stor_suspend symbol from above, needs to import the namespace USB_STORAGE
|
||||
using a statement like::
|
||||
|
||||
MODULE_IMPORT_NS(USB_STORAGE);
|
||||
MODULE_IMPORT_NS("USB_STORAGE");
|
||||
|
||||
This will create a ``modinfo`` tag in the module for each imported namespace.
|
||||
This has the side effect, that the imported namespaces of a module can be
|
||||
|
@ -55,6 +55,10 @@ patternProperties:
|
||||
patternProperties:
|
||||
"^power-domain@[0-9a-f]+$":
|
||||
$ref: "#/$defs/power-domain-node"
|
||||
patternProperties:
|
||||
"^power-domain@[0-9a-f]+$":
|
||||
$ref: "#/$defs/power-domain-node"
|
||||
unevaluatedProperties: false
|
||||
unevaluatedProperties: false
|
||||
unevaluatedProperties: false
|
||||
unevaluatedProperties: false
|
||||
|
@ -0,0 +1,47 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/watchdog/airoha,en7581-wdt.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Airoha EN7581 Watchdog Timer
|
||||
|
||||
maintainers:
|
||||
- Christian Marangi <ansuelsmth@gmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: watchdog.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: airoha,en7581-wdt
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
description: BUS clock (timer ticks at half the BUS clock)
|
||||
maxItems: 1
|
||||
|
||||
clock-names:
|
||||
const: bus
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/en7523-clk.h>
|
||||
|
||||
watchdog@1fbf0100 {
|
||||
compatible = "airoha,en7581-wdt";
|
||||
reg = <0x1fbf0100 0x3c>;
|
||||
|
||||
clocks = <&scuclk EN7523_CLK_BUS>;
|
||||
clock-names = "bus";
|
||||
};
|
@ -48,6 +48,8 @@ properties:
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
big-endian: true
|
||||
|
||||
fsl,ext-reset-output:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description: |
|
||||
@ -93,6 +95,18 @@ allOf:
|
||||
properties:
|
||||
fsl,suspend-in-wait: false
|
||||
|
||||
- if:
|
||||
not:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- fsl,ls1012a-wdt
|
||||
- fsl,ls1043a-wdt
|
||||
then:
|
||||
properties:
|
||||
big-endian: false
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
|
@ -26,6 +26,8 @@ properties:
|
||||
- qcom,apss-wdt-msm8994
|
||||
- qcom,apss-wdt-qcm2290
|
||||
- qcom,apss-wdt-qcs404
|
||||
- qcom,apss-wdt-qcs615
|
||||
- qcom,apss-wdt-qcs8300
|
||||
- qcom,apss-wdt-sa8255p
|
||||
- qcom,apss-wdt-sa8775p
|
||||
- qcom,apss-wdt-sc7180
|
||||
|
@ -26,6 +26,7 @@ properties:
|
||||
- samsung,exynos7-wdt # for Exynos7
|
||||
- samsung,exynos850-wdt # for Exynos850
|
||||
- samsung,exynosautov9-wdt # for Exynosautov9
|
||||
- samsung,exynosautov920-wdt # for Exynosautov920
|
||||
- items:
|
||||
- enum:
|
||||
- tesla,fsd-wdt
|
||||
@ -77,6 +78,7 @@ allOf:
|
||||
- samsung,exynos7-wdt
|
||||
- samsung,exynos850-wdt
|
||||
- samsung,exynosautov9-wdt
|
||||
- samsung,exynosautov920-wdt
|
||||
then:
|
||||
required:
|
||||
- samsung,syscon-phandle
|
||||
@ -88,6 +90,7 @@ allOf:
|
||||
- google,gs101-wdt
|
||||
- samsung,exynos850-wdt
|
||||
- samsung,exynosautov9-wdt
|
||||
- samsung,exynosautov920-wdt
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
|
@ -6,16 +6,17 @@ Bare UDP Tunnelling Module Documentation
|
||||
|
||||
There are various L3 encapsulation standards using UDP being discussed to
|
||||
leverage the UDP based load balancing capability of different networks.
|
||||
MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them.
|
||||
MPLSoUDP (https://tools.ietf.org/html/rfc7510) is one among them.
|
||||
|
||||
The Bareudp tunnel module provides a generic L3 encapsulation support for
|
||||
tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel.
|
||||
|
||||
Special Handling
|
||||
----------------
|
||||
|
||||
The bareudp device supports special handling for MPLS & IP as they can have
|
||||
multiple ethertypes.
|
||||
MPLS procotcol can have ethertypes ETH_P_MPLS_UC (unicast) & ETH_P_MPLS_MC (multicast).
|
||||
The MPLS protocol can have ethertypes ETH_P_MPLS_UC (unicast) & ETH_P_MPLS_MC (multicast).
|
||||
IP protocol can have ethertypes ETH_P_IP (v4) & ETH_P_IPV6 (v6).
|
||||
This special handling can be enabled only for ethertypes ETH_P_IP & ETH_P_MPLS_UC
|
||||
with a flag called multiproto mode.
|
||||
@ -52,7 +53,7 @@ be enabled explicitly with the "multiproto" flag.
|
||||
3) Device Usage
|
||||
|
||||
The bareudp device could be used along with OVS or flower filter in TC.
|
||||
The OVS or TC flower layer must set the tunnel information in SKB dst field before
|
||||
sending packet buffer to the bareudp device for transmission. On reception the
|
||||
bareudp device extracts and stores the tunnel information in SKB dst field before
|
||||
The OVS or TC flower layer must set the tunnel information in the SKB dst field before
|
||||
sending the packet buffer to the bareudp device for transmission. On reception, the
|
||||
bareUDP device extracts and stores the tunnel information in the SKB dst field before
|
||||
passing the packet buffer to the network stack.
|
||||
|
@ -43,7 +43,7 @@ Tenete presente che per via dell'espansione delle macro questo argomento deve
|
||||
essere un simbolo di preprocessore. Per esempio per esportare il
|
||||
simbolo ``usb_stor_suspend`` nello spazio dei nomi ``USB_STORAGE`` usate::
|
||||
|
||||
EXPORT_SYMBOL_NS(usb_stor_suspend, USB_STORAGE);
|
||||
EXPORT_SYMBOL_NS(usb_stor_suspend, "USB_STORAGE");
|
||||
|
||||
Di conseguenza, nella tabella dei simboli del kernel ci sarà una voce
|
||||
rappresentata dalla struttura ``kernel_symbol`` che avrà il campo
|
||||
@ -69,7 +69,7 @@ Per esempio per esportare tutti i simboli definiti in usb-common nello spazio
|
||||
dei nomi USB_COMMON, si può aggiungere la seguente linea in
|
||||
drivers/usb/common/Makefile::
|
||||
|
||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
|
||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_COMMON"'
|
||||
|
||||
Questo cambierà tutte le macro EXPORT_SYMBOL() ed EXPORT_SYMBOL_GPL(). Invece,
|
||||
un simbolo esportato con EXPORT_SYMBOL_NS() non verrà cambiato e il simbolo
|
||||
@ -79,7 +79,7 @@ Una seconda possibilità è quella di definire il simbolo di preprocessore
|
||||
direttamente nei file da compilare. L'esempio precedente diventerebbe::
|
||||
|
||||
#undef DEFAULT_SYMBOL_NAMESPACE
|
||||
#define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
|
||||
#define DEFAULT_SYMBOL_NAMESPACE "USB_COMMON"
|
||||
|
||||
Questo va messo prima di un qualsiasi uso di EXPORT_SYMBOL.
|
||||
|
||||
@ -94,7 +94,7 @@ dei nomi che contiene i simboli desiderati. Per esempio un modulo che
|
||||
usa il simbolo usb_stor_suspend deve importare lo spazio dei nomi
|
||||
USB_STORAGE usando la seguente dichiarazione::
|
||||
|
||||
MODULE_IMPORT_NS(USB_STORAGE);
|
||||
MODULE_IMPORT_NS("USB_STORAGE");
|
||||
|
||||
Questo creerà un'etichetta ``modinfo`` per ogni spazio dei nomi
|
||||
importato. Un risvolto di questo fatto è che gli spazi dei
|
||||
|
@ -48,7 +48,7 @@
|
||||
要是一个预处理器符号。例如,要把符号 ``usb_stor_suspend`` 导出到命名空间 ``USB_STORAGE``,
|
||||
请使用::
|
||||
|
||||
EXPORT_SYMBOL_NS(usb_stor_suspend, USB_STORAGE);
|
||||
EXPORT_SYMBOL_NS(usb_stor_suspend, "USB_STORAGE");
|
||||
|
||||
相应的 ksymtab 条目结构体 ``kernel_symbol`` 将有相应的成员 ``命名空间`` 集。
|
||||
导出时未指明命名空间的符号将指向 ``NULL`` 。如果没有定义命名空间,则默认没有。
|
||||
@ -66,7 +66,7 @@
|
||||
子系统的 ``Makefile`` 中定义默认命名空间。例如,如果要将usb-common中定义的所有符号导
|
||||
出到USB_COMMON命名空间,可以在drivers/usb/common/Makefile中添加这样一行::
|
||||
|
||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
|
||||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"USB_COMMON"'
|
||||
|
||||
这将影响所有 EXPORT_SYMBOL() 和 EXPORT_SYMBOL_GPL() 语句。当这个定义存在时,
|
||||
用EXPORT_SYMBOL_NS()导出的符号仍然会被导出到作为命名空间参数传递的命名空间中,
|
||||
@ -76,7 +76,7 @@
|
||||
成::
|
||||
|
||||
#undef DEFAULT_SYMBOL_NAMESPACE
|
||||
#define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
|
||||
#define DEFAULT_SYMBOL_NAMESPACE "USB_COMMON"
|
||||
|
||||
应置于相关编译单元中任何 EXPORT_SYMBOL 宏之前
|
||||
|
||||
@ -88,7 +88,7 @@
|
||||
表示它所使用的命名空间的符号。例如,一个使用usb_stor_suspend符号的
|
||||
模块,需要使用如下语句导入命名空间USB_STORAGE::
|
||||
|
||||
MODULE_IMPORT_NS(USB_STORAGE);
|
||||
MODULE_IMPORT_NS("USB_STORAGE");
|
||||
|
||||
这将在模块中为每个导入的命名空间创建一个 ``modinfo`` 标签。这也顺带
|
||||
使得可以用modinfo检查模块已导入的命名空间::
|
||||
|
@ -120,16 +120,6 @@ coh901327_wdt:
|
||||
|
||||
-------------------------------------------------
|
||||
|
||||
cpu5wdt:
|
||||
port:
|
||||
base address of watchdog card, default is 0x91
|
||||
verbose:
|
||||
be verbose, default is 0 (no)
|
||||
ticks:
|
||||
count down ticks, default is 10000
|
||||
|
||||
-------------------------------------------------
|
||||
|
||||
cpwd:
|
||||
wd0_timeout:
|
||||
Default watchdog0 timeout in 1/10secs
|
||||
|
@ -3376,6 +3376,8 @@ S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
|
||||
F: Documentation/arch/arm64/
|
||||
F: arch/arm64/
|
||||
F: drivers/virt/coco/arm-cca-guest/
|
||||
F: drivers/virt/coco/pkvm-guest/
|
||||
F: tools/testing/selftests/arm64/
|
||||
X: arch/arm64/boot/dts/
|
||||
|
||||
@ -16267,6 +16269,7 @@ F: Documentation/devicetree/bindings/net/
|
||||
F: Documentation/networking/net_cachelines/net_device.rst
|
||||
F: drivers/connector/
|
||||
F: drivers/net/
|
||||
F: drivers/ptp/
|
||||
F: include/dt-bindings/net/
|
||||
F: include/linux/cn_proc.h
|
||||
F: include/linux/etherdevice.h
|
||||
@ -22407,7 +22410,7 @@ F: drivers/char/hw_random/jh7110-trng.c
|
||||
|
||||
STARFIVE WATCHDOG DRIVER
|
||||
M: Xingyu Wu <xingyu.wu@starfivetech.com>
|
||||
M: Samin Guo <samin.guo@starfivetech.com>
|
||||
M: Ziv Xu <ziv.xu@starfivetech.com>
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/watchdog/starfive*
|
||||
F: drivers/watchdog/starfive-wdt.c
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -516,7 +516,7 @@ static void locomo_remove(struct platform_device *dev)
|
||||
*/
|
||||
static struct platform_driver locomo_device_driver = {
|
||||
.probe = locomo_probe,
|
||||
.remove_new = locomo_remove,
|
||||
.remove = locomo_remove,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = locomo_suspend,
|
||||
.resume = locomo_resume,
|
||||
|
@ -1154,7 +1154,7 @@ static struct dev_pm_ops sa1111_pm_ops = {
|
||||
*/
|
||||
static struct platform_driver sa1111_device_driver = {
|
||||
.probe = sa1111_probe,
|
||||
.remove_new = sa1111_remove,
|
||||
.remove = sa1111_remove,
|
||||
.driver = {
|
||||
.name = "sa1111",
|
||||
.pm = &sa1111_pm_ops,
|
||||
|
@ -250,7 +250,7 @@ static void scoop_remove(struct platform_device *pdev)
|
||||
|
||||
static struct platform_driver scoop_driver = {
|
||||
.probe = scoop_probe,
|
||||
.remove_new = scoop_remove,
|
||||
.remove = scoop_remove,
|
||||
.suspend = scoop_suspend,
|
||||
.resume = scoop_resume,
|
||||
.driver = {
|
||||
|
@ -596,7 +596,7 @@ static struct platform_driver imx_mmdc_driver = {
|
||||
.of_match_table = imx_mmdc_dt_ids,
|
||||
},
|
||||
.probe = imx_mmdc_probe,
|
||||
.remove_new = imx_mmdc_remove,
|
||||
.remove = imx_mmdc_remove,
|
||||
};
|
||||
|
||||
static int __init imx_mmdc_init(void)
|
||||
|
@ -832,7 +832,7 @@ static void omap_system_dma_remove(struct platform_device *pdev)
|
||||
|
||||
static struct platform_driver omap_system_dma_driver = {
|
||||
.probe = omap_system_dma_probe,
|
||||
.remove_new = omap_system_dma_remove,
|
||||
.remove = omap_system_dma_remove,
|
||||
.driver = {
|
||||
.name = "omap_dma_system"
|
||||
},
|
||||
|
@ -919,7 +919,7 @@ static void sharpsl_pm_remove(struct platform_device *pdev)
|
||||
|
||||
static struct platform_driver sharpsl_pm_driver = {
|
||||
.probe = sharpsl_pm_probe,
|
||||
.remove_new = sharpsl_pm_remove,
|
||||
.remove = sharpsl_pm_remove,
|
||||
.suspend = sharpsl_pm_suspend,
|
||||
.resume = sharpsl_pm_resume,
|
||||
.driver = {
|
||||
|
@ -188,7 +188,7 @@ static void jornada_ssp_remove(struct platform_device *dev)
|
||||
|
||||
struct platform_driver jornadassp_driver = {
|
||||
.probe = jornada_ssp_probe,
|
||||
.remove_new = jornada_ssp_remove,
|
||||
.remove = jornada_ssp_remove,
|
||||
.driver = {
|
||||
.name = "jornada_ssp",
|
||||
},
|
||||
|
@ -423,7 +423,7 @@ static const struct dev_pm_ops neponset_pm_ops = {
|
||||
|
||||
static struct platform_driver neponset_device_driver = {
|
||||
.probe = neponset_probe,
|
||||
.remove_new = neponset_remove,
|
||||
.remove = neponset_remove,
|
||||
.driver = {
|
||||
.name = "neponset",
|
||||
.pm = PM_OPS,
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
#include "aes-ce-setkey.h"
|
||||
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
||||
static int num_rounds(struct crypto_aes_ctx *ctx)
|
||||
{
|
||||
|
@ -1048,7 +1048,7 @@ static int __init aes_init(void)
|
||||
|
||||
#ifdef USE_V8_CRYPTO_EXTENSIONS
|
||||
module_cpu_feature_match(AES, aes_init);
|
||||
EXPORT_SYMBOL_NS(ce_aes_mac_update, CRYPTO_INTERNAL);
|
||||
EXPORT_SYMBOL_NS(ce_aes_mac_update, "CRYPTO_INTERNAL");
|
||||
#else
|
||||
module_init(aes_init);
|
||||
EXPORT_SYMBOL(neon_aes_ecb_encrypt);
|
||||
|
@ -44,6 +44,8 @@ cpucap_is_possible(const unsigned int cap)
|
||||
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
|
||||
case ARM64_HAS_S1POE:
|
||||
return IS_ENABLED(CONFIG_ARM64_POE);
|
||||
case ARM64_HAS_GCS:
|
||||
return IS_ENABLED(CONFIG_ARM64_GCS);
|
||||
case ARM64_UNMAP_KERNEL_AT_EL0:
|
||||
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
|
||||
case ARM64_WORKAROUND_843419:
|
||||
|
@ -847,8 +847,7 @@ static inline bool system_supports_poe(void)
|
||||
|
||||
static inline bool system_supports_gcs(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_GCS) &&
|
||||
alternative_has_cap_unlikely(ARM64_HAS_GCS);
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_GCS);
|
||||
}
|
||||
|
||||
static inline bool system_supports_haft(void)
|
||||
|
@ -7,6 +7,7 @@
|
||||
#ifndef BUILD_VDSO
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
@ -44,7 +45,7 @@ static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
|
||||
if (system_supports_mte()) {
|
||||
if (flags & (MAP_ANONYMOUS | MAP_HUGETLB))
|
||||
return VM_MTE_ALLOWED;
|
||||
if (shmem_file(file))
|
||||
if (shmem_file(file) || is_file_hugepages(file))
|
||||
return VM_MTE_ALLOWED;
|
||||
}
|
||||
|
||||
|
@ -30,20 +30,17 @@ static bool is_image_text(unsigned long addr)
|
||||
|
||||
static void __kprobes *patch_map(void *addr, int fixmap)
|
||||
{
|
||||
unsigned long uintaddr = (uintptr_t) addr;
|
||||
bool image = is_image_text(uintaddr);
|
||||
struct page *page;
|
||||
phys_addr_t phys;
|
||||
|
||||
if (image)
|
||||
page = phys_to_page(__pa_symbol(addr));
|
||||
else if (IS_ENABLED(CONFIG_EXECMEM))
|
||||
page = vmalloc_to_page(addr);
|
||||
else
|
||||
return addr;
|
||||
if (is_image_text((unsigned long)addr)) {
|
||||
phys = __pa_symbol(addr);
|
||||
} else {
|
||||
struct page *page = vmalloc_to_page(addr);
|
||||
BUG_ON(!page);
|
||||
phys = page_to_phys(page) + offset_in_page(addr);
|
||||
}
|
||||
|
||||
BUG_ON(!page);
|
||||
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
|
||||
(uintaddr & ~PAGE_MASK));
|
||||
return (void *)set_fixmap_offset(fixmap, phys);
|
||||
}
|
||||
|
||||
static void __kprobes patch_unmap(int fixmap)
|
||||
|
@ -720,6 +720,8 @@ static int fpmr_set(struct task_struct *target, const struct user_regset *regset
|
||||
if (!system_supports_fpmr())
|
||||
return -EINVAL;
|
||||
|
||||
fpmr = target->thread.uw.fpmr;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1427,7 +1429,7 @@ static int tagged_addr_ctrl_get(struct task_struct *target,
|
||||
{
|
||||
long ctrl = get_tagged_addr_ctrl(target);
|
||||
|
||||
if (IS_ERR_VALUE(ctrl))
|
||||
if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
|
||||
return ctrl;
|
||||
|
||||
return membuf_write(&to, &ctrl, sizeof(ctrl));
|
||||
@ -1441,6 +1443,10 @@ static int tagged_addr_ctrl_set(struct task_struct *target, const struct
|
||||
int ret;
|
||||
long ctrl;
|
||||
|
||||
ctrl = get_tagged_addr_ctrl(target);
|
||||
if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
|
||||
return ctrl;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1472,6 +1478,8 @@ static int poe_set(struct task_struct *target, const struct
|
||||
if (!system_supports_poe())
|
||||
return -EINVAL;
|
||||
|
||||
ctrl = target->thread.por_el0;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1483,6 +1491,22 @@ static int poe_set(struct task_struct *target, const struct
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_GCS
|
||||
static void task_gcs_to_user(struct user_gcs *user_gcs,
|
||||
const struct task_struct *target)
|
||||
{
|
||||
user_gcs->features_enabled = target->thread.gcs_el0_mode;
|
||||
user_gcs->features_locked = target->thread.gcs_el0_locked;
|
||||
user_gcs->gcspr_el0 = target->thread.gcspr_el0;
|
||||
}
|
||||
|
||||
static void task_gcs_from_user(struct task_struct *target,
|
||||
const struct user_gcs *user_gcs)
|
||||
{
|
||||
target->thread.gcs_el0_mode = user_gcs->features_enabled;
|
||||
target->thread.gcs_el0_locked = user_gcs->features_locked;
|
||||
target->thread.gcspr_el0 = user_gcs->gcspr_el0;
|
||||
}
|
||||
|
||||
static int gcs_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
struct membuf to)
|
||||
@ -1495,9 +1519,7 @@ static int gcs_get(struct task_struct *target,
|
||||
if (target == current)
|
||||
gcs_preserve_current_state();
|
||||
|
||||
user_gcs.features_enabled = target->thread.gcs_el0_mode;
|
||||
user_gcs.features_locked = target->thread.gcs_el0_locked;
|
||||
user_gcs.gcspr_el0 = target->thread.gcspr_el0;
|
||||
task_gcs_to_user(&user_gcs, target);
|
||||
|
||||
return membuf_write(&to, &user_gcs, sizeof(user_gcs));
|
||||
}
|
||||
@ -1513,6 +1535,8 @@ static int gcs_set(struct task_struct *target, const struct
|
||||
if (!system_supports_gcs())
|
||||
return -EINVAL;
|
||||
|
||||
task_gcs_to_user(&user_gcs, target);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1520,9 +1544,7 @@ static int gcs_set(struct task_struct *target, const struct
|
||||
if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
target->thread.gcs_el0_mode = user_gcs.features_enabled;
|
||||
target->thread.gcs_el0_locked = user_gcs.features_locked;
|
||||
target->thread.gcspr_el0 = user_gcs.gcspr_el0;
|
||||
task_gcs_from_user(target, &user_gcs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -32,9 +32,9 @@ static unsigned long nr_pinned_asids;
|
||||
static unsigned long *pinned_asid_map;
|
||||
|
||||
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
|
||||
#define ASID_FIRST_VERSION (1UL << asid_bits)
|
||||
#define ASID_FIRST_VERSION (1UL << 16)
|
||||
|
||||
#define NUM_USER_ASIDS ASID_FIRST_VERSION
|
||||
#define NUM_USER_ASIDS (1UL << asid_bits)
|
||||
#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
|
||||
#define asid2ctxid(asid, genid) ((asid) | (genid))
|
||||
|
||||
|
@ -30,11 +30,13 @@ void copy_highpage(struct page *to, struct page *from)
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
if (folio_test_hugetlb(src) &&
|
||||
folio_test_hugetlb_mte_tagged(src)) {
|
||||
if (!folio_try_hugetlb_mte_tagging(dst))
|
||||
if (folio_test_hugetlb(src)) {
|
||||
if (!folio_test_hugetlb_mte_tagged(src) ||
|
||||
from != folio_page(src, 0))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
|
||||
|
||||
/*
|
||||
* Populate tags for all subpages.
|
||||
*
|
||||
|
@ -117,15 +117,6 @@ static void __init arch_reserve_crashkernel(void)
|
||||
|
||||
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
|
||||
{
|
||||
/**
|
||||
* Information we get from firmware (e.g. DT dma-ranges) describe DMA
|
||||
* bus constraints. Devices using DMA might have their own limitations.
|
||||
* Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
|
||||
* DMA zone on platforms that have RAM there.
|
||||
*/
|
||||
if (memblock_start_of_DRAM() < U32_MAX)
|
||||
zone_limit = min(zone_limit, U32_MAX);
|
||||
|
||||
return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
|
||||
}
|
||||
|
||||
@ -141,6 +132,14 @@ static void __init zone_sizes_init(void)
|
||||
acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
|
||||
dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
|
||||
zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
|
||||
/*
|
||||
* Information we get from firmware (e.g. DT dma-ranges) describe DMA
|
||||
* bus constraints. Devices using DMA might have their own limitations.
|
||||
* Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
|
||||
* DMA zone on platforms that have RAM there.
|
||||
*/
|
||||
if (memblock_start_of_DRAM() < U32_MAX)
|
||||
zone_dma_limit = min(zone_dma_limit, U32_MAX);
|
||||
arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
|
||||
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
|
||||
#endif
|
||||
|
@ -24,6 +24,16 @@ static inline int prepare_hugepage_range(struct file *file,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTE_CLEAR
|
||||
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
|
||||
pte_val(clear) = (unsigned long)invalid_pte_table;
|
||||
set_pte_at(mm, addr, ptep, clear);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
|
@ -683,7 +683,17 @@ DEF_EMIT_REG2I16_FORMAT(blt, blt_op)
|
||||
DEF_EMIT_REG2I16_FORMAT(bge, bge_op)
|
||||
DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op)
|
||||
DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op)
|
||||
DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op)
|
||||
|
||||
static inline void emit_jirl(union loongarch_instruction *insn,
|
||||
enum loongarch_gpr rd,
|
||||
enum loongarch_gpr rj,
|
||||
int offset)
|
||||
{
|
||||
insn->reg2i16_format.opcode = jirl_op;
|
||||
insn->reg2i16_format.immediate = offset;
|
||||
insn->reg2i16_format.rd = rd;
|
||||
insn->reg2i16_format.rj = rj;
|
||||
}
|
||||
|
||||
#define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP) \
|
||||
static inline void emit_##NAME(union loongarch_instruction *insn, \
|
||||
|
@ -95,7 +95,7 @@ static void __init init_screen_info(void)
|
||||
memset(si, 0, sizeof(*si));
|
||||
early_memunmap(si, sizeof(*si));
|
||||
|
||||
memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
|
||||
memblock_reserve(__screen_info_lfb_base(&screen_info), screen_info.lfb_size);
|
||||
}
|
||||
|
||||
void __init efi_init(void)
|
||||
|
@ -332,7 +332,7 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
|
||||
return INSN_BREAK;
|
||||
}
|
||||
|
||||
emit_jirl(&insn, rj, rd, imm >> 2);
|
||||
emit_jirl(&insn, rd, rj, imm >> 2);
|
||||
|
||||
return insn.word;
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ void show_ipi_list(struct seq_file *p, int prec)
|
||||
for (i = 0; i < NR_IPI; i++) {
|
||||
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
|
||||
for_each_online_cpu(cpu)
|
||||
seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
|
||||
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, cpu).ipi_irqs[i], 10);
|
||||
seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
|
||||
|
||||
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
int idx, ret;
|
||||
unsigned long *val;
|
||||
u32 addr, rd, rj, opcode;
|
||||
|
||||
@ -167,7 +167,6 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
rj = inst.reg2_format.rj;
|
||||
opcode = inst.reg2_format.opcode;
|
||||
addr = vcpu->arch.gprs[rj];
|
||||
ret = EMULATE_DO_IOCSR;
|
||||
run->iocsr_io.phys_addr = addr;
|
||||
run->iocsr_io.is_write = 0;
|
||||
val = &vcpu->arch.gprs[rd];
|
||||
@ -207,20 +206,28 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if (run->iocsr_io.is_write) {
|
||||
if (!kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (ret == 0)
|
||||
ret = EMULATE_DONE;
|
||||
else
|
||||
else {
|
||||
ret = EMULATE_DO_IOCSR;
|
||||
/* Save data and let user space to write it */
|
||||
memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
|
||||
|
||||
}
|
||||
trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
|
||||
} else {
|
||||
if (!kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (ret == 0)
|
||||
ret = EMULATE_DONE;
|
||||
else
|
||||
else {
|
||||
ret = EMULATE_DO_IOCSR;
|
||||
/* Save register id for iocsr read completion */
|
||||
vcpu->arch.io_gpr = rd;
|
||||
|
||||
}
|
||||
trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
|
||||
}
|
||||
|
||||
@ -359,7 +366,7 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
|
||||
{
|
||||
int ret;
|
||||
int idx, ret;
|
||||
unsigned int op8, opcode, rd;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
@ -464,8 +471,10 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
|
||||
* it need not return to user space to handle the mmio
|
||||
* exception.
|
||||
*/
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
|
||||
run->mmio.len, &vcpu->arch.gprs[rd]);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (!ret) {
|
||||
update_pc(&vcpu->arch);
|
||||
vcpu->mmio_needed = 0;
|
||||
@ -531,7 +540,7 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
||||
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
|
||||
{
|
||||
int ret;
|
||||
int idx, ret;
|
||||
unsigned int rd, op8, opcode;
|
||||
unsigned long curr_pc, rd_val = 0;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
@ -631,7 +640,9 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
|
||||
* it need not return to user space to handle the mmio
|
||||
* exception.
|
||||
*/
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (!ret)
|
||||
return EMULATE_DONE;
|
||||
|
||||
|
@ -98,7 +98,7 @@ static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int
|
||||
|
||||
static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||
{
|
||||
int i, ret;
|
||||
int i, idx, ret;
|
||||
uint32_t val = 0, mask = 0;
|
||||
|
||||
/*
|
||||
@ -107,7 +107,9 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||
*/
|
||||
if ((data >> 27) & 0xf) {
|
||||
/* Read the old val */
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret)) {
|
||||
kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
|
||||
return ret;
|
||||
@ -121,7 +123,9 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||
val &= mask;
|
||||
}
|
||||
val |= ((uint32_t)(data >> 32) & ~mask);
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret))
|
||||
kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
|
||||
|
||||
|
@ -240,7 +240,7 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
int idx, ret;
|
||||
|
||||
/*
|
||||
* Check conditions before entering the guest
|
||||
@ -249,7 +249,9 @@ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_check_requests(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -181,13 +181,13 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
|
||||
/* Set return value */
|
||||
emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
|
||||
/* Return to the caller */
|
||||
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
|
||||
emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
|
||||
} else {
|
||||
/*
|
||||
* Call the next bpf prog and skip the first instruction
|
||||
* of TCC initialization.
|
||||
*/
|
||||
emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
|
||||
emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -904,7 +904,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
return ret;
|
||||
|
||||
move_addr(ctx, t1, func_addr);
|
||||
emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
|
||||
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
|
||||
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
|
||||
break;
|
||||
|
||||
|
@ -749,7 +749,7 @@ static void bridge_remove(struct platform_device *pdev)
|
||||
|
||||
static struct platform_driver bridge_driver = {
|
||||
.probe = bridge_probe,
|
||||
.remove_new = bridge_remove,
|
||||
.remove = bridge_remove,
|
||||
.driver = {
|
||||
.name = "xtalk-bridge",
|
||||
}
|
||||
|
@ -74,4 +74,4 @@ MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions "
|
||||
"support on Power 8");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("1.0.0");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -1168,4 +1168,4 @@ MODULE_ALIAS_CRYPTO("aes-all");
|
||||
|
||||
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -110,7 +110,7 @@ static void switch_drv_remove(struct platform_device *pdev)
|
||||
|
||||
static struct platform_driver switch_driver = {
|
||||
.probe = switch_drv_probe,
|
||||
.remove_new = switch_drv_remove,
|
||||
.remove = switch_drv_remove,
|
||||
.driver = {
|
||||
.name = DRV_NAME,
|
||||
},
|
||||
|
@ -243,7 +243,7 @@ static struct platform_driver ecpp_driver = {
|
||||
.of_match_table = ecpp_match,
|
||||
},
|
||||
.probe = ecpp_probe,
|
||||
.remove_new = ecpp_remove,
|
||||
.remove = ecpp_remove,
|
||||
};
|
||||
|
||||
static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
|
||||
|
@ -814,7 +814,7 @@ static struct platform_driver us3mc_driver = {
|
||||
.of_match_table = us3mc_match,
|
||||
},
|
||||
.probe = us3mc_probe,
|
||||
.remove_new = us3mc_remove,
|
||||
.remove = us3mc_remove,
|
||||
};
|
||||
|
||||
static inline bool us3mc_platform(void)
|
||||
|
@ -176,7 +176,7 @@ static void uml_rtc_remove(struct platform_device *pdev)
|
||||
|
||||
static struct platform_driver uml_rtc_driver = {
|
||||
.probe = uml_rtc_probe,
|
||||
.remove_new = uml_rtc_remove,
|
||||
.remove = uml_rtc_remove,
|
||||
.driver = {
|
||||
.name = "uml-rtc",
|
||||
},
|
||||
|
@ -1465,7 +1465,7 @@ static int virtio_uml_resume(struct platform_device *pdev)
|
||||
|
||||
static struct platform_driver virtio_uml_driver = {
|
||||
.probe = virtio_uml_probe,
|
||||
.remove_new = virtio_uml_remove,
|
||||
.remove = virtio_uml_remove,
|
||||
.driver = {
|
||||
.name = "virtio-uml",
|
||||
.of_match_table = virtio_uml_match,
|
||||
|
@ -36,10 +36,12 @@
|
||||
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit */
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */
|
||||
#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW5 /* No PTI shadow (root PGD) */
|
||||
#else
|
||||
/* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit */
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit (leaf) */
|
||||
#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW2 /* No PTI shadow (root PGD) */
|
||||
#endif
|
||||
|
||||
/* If _PAGE_BIT_PRESENT is clear, we use these: */
|
||||
@ -139,6 +141,8 @@
|
||||
|
||||
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
|
||||
|
||||
#define _PAGE_NOPTISHADOW (_AT(pteval_t, 1) << _PAGE_BIT_NOPTISHADOW)
|
||||
|
||||
/*
|
||||
* Set of bits not changed in pte_modify. The pte's
|
||||
* protection key is treated like _PAGE_RW, for
|
||||
|
@ -1065,7 +1065,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
cpu_has(c, X86_FEATURE_AUTOIBRS))
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0);
|
||||
|
||||
/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
|
||||
clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
||||
|
@ -178,8 +178,6 @@ struct _cpuid4_info_regs {
|
||||
struct amd_northbridge *nb;
|
||||
};
|
||||
|
||||
static unsigned short num_cache_leaves;
|
||||
|
||||
/* AMD doesn't have CPUID4. Emulate it here to report the same
|
||||
information to the user. This makes some assumptions about the machine:
|
||||
L2 not shared, no SMT etc. that is currently true on AMD CPUs.
|
||||
@ -717,20 +715,23 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c)
|
||||
|
||||
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
ci->num_leaves = find_num_cache_leaves(c);
|
||||
} else if (c->extended_cpuid_level >= 0x80000006) {
|
||||
if (cpuid_edx(0x80000006) & 0xf000)
|
||||
num_cache_leaves = 4;
|
||||
ci->num_leaves = 4;
|
||||
else
|
||||
num_cache_leaves = 3;
|
||||
ci->num_leaves = 3;
|
||||
}
|
||||
}
|
||||
|
||||
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||
|
||||
ci->num_leaves = find_num_cache_leaves(c);
|
||||
}
|
||||
|
||||
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
@ -740,21 +741,21 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
||||
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
||||
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||
|
||||
if (c->cpuid_level > 3) {
|
||||
static int is_initialized;
|
||||
|
||||
if (is_initialized == 0) {
|
||||
/* Init num_cache_leaves from boot CPU */
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
is_initialized++;
|
||||
}
|
||||
/*
|
||||
* There should be at least one leaf. A non-zero value means
|
||||
* that the number of leaves has been initialized.
|
||||
*/
|
||||
if (!ci->num_leaves)
|
||||
ci->num_leaves = find_num_cache_leaves(c);
|
||||
|
||||
/*
|
||||
* Whenever possible use cpuid(4), deterministic cache
|
||||
* parameters cpuid leaf to find the cache details
|
||||
*/
|
||||
for (i = 0; i < num_cache_leaves; i++) {
|
||||
for (i = 0; i < ci->num_leaves; i++) {
|
||||
struct _cpuid4_info_regs this_leaf = {};
|
||||
int retval;
|
||||
|
||||
@ -790,14 +791,14 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
|
||||
* trace cache
|
||||
*/
|
||||
if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
|
||||
if ((!ci->num_leaves || c->x86 == 15) && c->cpuid_level > 1) {
|
||||
/* supports eax=2 call */
|
||||
int j, n;
|
||||
unsigned int regs[4];
|
||||
unsigned char *dp = (unsigned char *)regs;
|
||||
int only_trace = 0;
|
||||
|
||||
if (num_cache_leaves != 0 && c->x86 == 15)
|
||||
if (ci->num_leaves && c->x86 == 15)
|
||||
only_trace = 1;
|
||||
|
||||
/* Number of times to iterate */
|
||||
@ -991,14 +992,12 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
|
||||
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
|
||||
|
||||
if (!num_cache_leaves)
|
||||
/* There should be at least one leaf. */
|
||||
if (!ci->num_leaves)
|
||||
return -ENOENT;
|
||||
if (!this_cpu_ci)
|
||||
return -EINVAL;
|
||||
this_cpu_ci->num_levels = 3;
|
||||
this_cpu_ci->num_leaves = num_cache_leaves;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -555,7 +555,9 @@ static void init_intel(struct cpuinfo_x86 *c)
|
||||
c->x86_vfm == INTEL_WESTMERE_EX))
|
||||
set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_MWAIT) && c->x86_vfm == INTEL_ATOM_GOLDMONT)
|
||||
if (boot_cpu_has(X86_FEATURE_MWAIT) &&
|
||||
(c->x86_vfm == INTEL_ATOM_GOLDMONT ||
|
||||
c->x86_vfm == INTEL_LUNARLAKE_M))
|
||||
set_cpu_bug(c, X86_BUG_MONITOR);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -428,8 +428,8 @@ void __init topology_apply_cmdline_limits_early(void)
|
||||
{
|
||||
unsigned int possible = nr_cpu_ids;
|
||||
|
||||
/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' 'noapic' */
|
||||
if (!setup_max_cpus || ioapic_is_disabled || apic_is_disabled)
|
||||
/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' */
|
||||
if (!setup_max_cpus || apic_is_disabled)
|
||||
possible = 1;
|
||||
|
||||
/* 'possible_cpus=N' */
|
||||
@ -443,7 +443,7 @@ void __init topology_apply_cmdline_limits_early(void)
|
||||
|
||||
static __init bool restrict_to_up(void)
|
||||
{
|
||||
if (!smp_found_config || ioapic_is_disabled)
|
||||
if (!smp_found_config)
|
||||
return true;
|
||||
/*
|
||||
* XEN PV is special as it does not advertise the local APIC
|
||||
|
@ -63,16 +63,6 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the value of PKRU register that was already pushed onto the signal frame.
|
||||
*/
|
||||
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||
{
|
||||
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
|
||||
return 0;
|
||||
return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
|
||||
}
|
||||
|
||||
/*
|
||||
* Signal frame handlers.
|
||||
*/
|
||||
@ -168,14 +158,8 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
|
||||
|
||||
static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (use_xsave()) {
|
||||
err = xsave_to_user_sigframe(buf);
|
||||
if (!err)
|
||||
err = update_pkru_in_sigframe(buf, pkru);
|
||||
return err;
|
||||
}
|
||||
if (use_xsave())
|
||||
return xsave_to_user_sigframe(buf, pkru);
|
||||
|
||||
if (use_fxsr())
|
||||
return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
|
||||
|
@ -69,6 +69,28 @@ static inline u64 xfeatures_mask_independent(void)
|
||||
return fpu_kernel_cfg.independent_features;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the value of PKRU register that was already pushed onto the signal frame.
|
||||
*/
|
||||
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
|
||||
{
|
||||
u64 xstate_bv;
|
||||
int err;
|
||||
|
||||
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
|
||||
return 0;
|
||||
|
||||
/* Mark PKRU as in-use so that it is restored correctly. */
|
||||
xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
|
||||
|
||||
err = __put_user(xstate_bv, &buf->header.xfeatures);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Update PKRU value in the userspace xsave buffer. */
|
||||
return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
|
||||
}
|
||||
|
||||
/* XSAVE/XRSTOR wrapper functions */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -256,7 +278,7 @@ static inline u64 xfeatures_need_sigframe_write(void)
|
||||
* The caller has to zero buf::header before calling this because XSAVE*
|
||||
* does not touch the reserved fields in the header.
|
||||
*/
|
||||
static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
|
||||
static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||
{
|
||||
/*
|
||||
* Include the features which are not xsaved/rstored by the kernel
|
||||
@ -281,6 +303,9 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
|
||||
XSTATE_OP(XSAVE, buf, lmask, hmask, err);
|
||||
clac();
|
||||
|
||||
if (!err)
|
||||
err = update_pkru_in_sigframe(buf, mask, pkru);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -242,6 +242,13 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
||||
movq CR0(%r8), %r8
|
||||
movq %rax, %cr3
|
||||
movq %r8, %cr0
|
||||
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
/* Saved in save_processor_state. */
|
||||
movq $saved_context, %rax
|
||||
lgdt saved_context_gdt_desc(%rax)
|
||||
#endif
|
||||
|
||||
movq %rbp, %rax
|
||||
|
||||
popf
|
||||
|
@ -174,7 +174,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
|
||||
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -218,14 +218,14 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
||||
if (result)
|
||||
return result;
|
||||
if (pgtable_l5_enabled()) {
|
||||
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
|
||||
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||
} else {
|
||||
/*
|
||||
* With p4d folded, pgd is equal to p4d.
|
||||
* The pgd entry has to point to the pud page table in this case.
|
||||
*/
|
||||
pud_t *pud = pud_offset(p4d, 0);
|
||||
set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
|
||||
set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,7 +354,7 @@ bool cpu_cache_has_invalidate_memregion(void)
|
||||
{
|
||||
return !cpu_feature_enabled(X86_FEATURE_HYPERVISOR);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, DEVMEM);
|
||||
EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
|
||||
|
||||
int cpu_cache_invalidate_memregion(int res_desc)
|
||||
{
|
||||
@ -363,7 +363,7 @@ int cpu_cache_invalidate_memregion(int res_desc)
|
||||
wbinvd_on_all_cpus();
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, DEVMEM);
|
||||
EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, "DEVMEM");
|
||||
#endif
|
||||
|
||||
static void __cpa_flush_all(void *arg)
|
||||
|
@ -132,7 +132,7 @@ pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
|
||||
* Top-level entries added to init_mm's usermode pgd after boot
|
||||
* will not be automatically propagated to other mms.
|
||||
*/
|
||||
if (!pgdp_maps_userspace(pgdp))
|
||||
if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))
|
||||
return pgd;
|
||||
|
||||
/*
|
||||
|
108
block/blk-mq.c
108
block/blk-mq.c
@ -43,6 +43,7 @@
|
||||
|
||||
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
|
||||
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
|
||||
static DEFINE_MUTEX(blk_mq_cpuhp_lock);
|
||||
|
||||
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
|
||||
static void blk_mq_request_bypass_insert(struct request *rq,
|
||||
@ -3739,13 +3740,91 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||
static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (!(hctx->flags & BLK_MQ_F_STACKING))
|
||||
lockdep_assert_held(&blk_mq_cpuhp_lock);
|
||||
|
||||
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
|
||||
!hlist_unhashed(&hctx->cpuhp_online)) {
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
||||
&hctx->cpuhp_online);
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
|
||||
&hctx->cpuhp_dead);
|
||||
INIT_HLIST_NODE(&hctx->cpuhp_online);
|
||||
}
|
||||
|
||||
if (!hlist_unhashed(&hctx->cpuhp_dead)) {
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
|
||||
&hctx->cpuhp_dead);
|
||||
INIT_HLIST_NODE(&hctx->cpuhp_dead);
|
||||
}
|
||||
}
|
||||
|
||||
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
mutex_lock(&blk_mq_cpuhp_lock);
|
||||
__blk_mq_remove_cpuhp(hctx);
|
||||
mutex_unlock(&blk_mq_cpuhp_lock);
|
||||
}
|
||||
|
||||
static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
lockdep_assert_held(&blk_mq_cpuhp_lock);
|
||||
|
||||
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
|
||||
hlist_unhashed(&hctx->cpuhp_online))
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
||||
&hctx->cpuhp_online);
|
||||
|
||||
if (hlist_unhashed(&hctx->cpuhp_dead))
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD,
|
||||
&hctx->cpuhp_dead);
|
||||
}
|
||||
|
||||
static void __blk_mq_remove_cpuhp_list(struct list_head *head)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
lockdep_assert_held(&blk_mq_cpuhp_lock);
|
||||
|
||||
list_for_each_entry(hctx, head, hctx_list)
|
||||
__blk_mq_remove_cpuhp(hctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unregister cpuhp callbacks from exited hw queues
|
||||
*
|
||||
* Safe to call if this `request_queue` is live
|
||||
*/
|
||||
static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
|
||||
{
|
||||
LIST_HEAD(hctx_list);
|
||||
|
||||
spin_lock(&q->unused_hctx_lock);
|
||||
list_splice_init(&q->unused_hctx_list, &hctx_list);
|
||||
spin_unlock(&q->unused_hctx_lock);
|
||||
|
||||
mutex_lock(&blk_mq_cpuhp_lock);
|
||||
__blk_mq_remove_cpuhp_list(&hctx_list);
|
||||
mutex_unlock(&blk_mq_cpuhp_lock);
|
||||
|
||||
spin_lock(&q->unused_hctx_lock);
|
||||
list_splice(&hctx_list, &q->unused_hctx_list);
|
||||
spin_unlock(&q->unused_hctx_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Register cpuhp callbacks from all hw queues
|
||||
*
|
||||
* Safe to call if this `request_queue` is live
|
||||
*/
|
||||
static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned long i;
|
||||
|
||||
mutex_lock(&blk_mq_cpuhp_lock);
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
__blk_mq_add_cpuhp(hctx);
|
||||
mutex_unlock(&blk_mq_cpuhp_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3796,8 +3875,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
|
||||
blk_mq_remove_cpuhp(hctx);
|
||||
|
||||
xa_erase(&q->hctx_table, hctx_idx);
|
||||
|
||||
spin_lock(&q->unused_hctx_lock);
|
||||
@ -3814,6 +3891,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (i == nr_queue)
|
||||
break;
|
||||
blk_mq_remove_cpuhp(hctx);
|
||||
blk_mq_exit_hctx(q, set, hctx, i);
|
||||
}
|
||||
}
|
||||
@ -3824,16 +3902,11 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
{
|
||||
hctx->queue_num = hctx_idx;
|
||||
|
||||
if (!(hctx->flags & BLK_MQ_F_STACKING))
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
||||
&hctx->cpuhp_online);
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
|
||||
|
||||
hctx->tags = set->tags[hctx_idx];
|
||||
|
||||
if (set->ops->init_hctx &&
|
||||
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
|
||||
goto unregister_cpu_notifier;
|
||||
goto fail;
|
||||
|
||||
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
|
||||
hctx->numa_node))
|
||||
@ -3850,8 +3923,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
exit_hctx:
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
unregister_cpu_notifier:
|
||||
blk_mq_remove_cpuhp(hctx);
|
||||
fail:
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -3877,6 +3949,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
|
||||
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
||||
spin_lock_init(&hctx->lock);
|
||||
INIT_LIST_HEAD(&hctx->dispatch);
|
||||
INIT_HLIST_NODE(&hctx->cpuhp_dead);
|
||||
INIT_HLIST_NODE(&hctx->cpuhp_online);
|
||||
hctx->queue = q;
|
||||
hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
|
||||
|
||||
@ -4415,6 +4489,12 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
||||
xa_for_each_start(&q->hctx_table, j, hctx, j)
|
||||
blk_mq_exit_hctx(q, set, hctx, j);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
/* unregister cpuhp callbacks for exited hctxs */
|
||||
blk_mq_remove_hw_queues_cpuhp(q);
|
||||
|
||||
/* register cpuhp for new initialized hctxs */
|
||||
blk_mq_add_hw_queues_cpuhp(q);
|
||||
}
|
||||
|
||||
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
|
@ -646,4 +646,4 @@ MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
|
||||
MODULE_ALIAS_CRYPTO("adiantum");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -471,4 +471,4 @@ subsys_initcall(prng_mod_init);
|
||||
module_exit(prng_mod_fini);
|
||||
MODULE_ALIAS_CRYPTO("stdrng");
|
||||
MODULE_ALIAS_CRYPTO("ansi_cprng");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -949,4 +949,4 @@ MODULE_ALIAS_CRYPTO("ccm_base");
|
||||
MODULE_ALIAS_CRYPTO("rfc4309");
|
||||
MODULE_ALIAS_CRYPTO("ccm");
|
||||
MODULE_ALIAS_CRYPTO("cbcmac");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -53,7 +53,7 @@ int crypto_cipher_setkey(struct crypto_cipher *tfm,
|
||||
|
||||
return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, CRYPTO_INTERNAL);
|
||||
EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, "CRYPTO_INTERNAL");
|
||||
|
||||
static inline void cipher_crypt_one(struct crypto_cipher *tfm,
|
||||
u8 *dst, const u8 *src, bool enc)
|
||||
@ -81,14 +81,14 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
|
||||
{
|
||||
cipher_crypt_one(tfm, dst, src, true);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, CRYPTO_INTERNAL);
|
||||
EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, "CRYPTO_INTERNAL");
|
||||
|
||||
void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
|
||||
u8 *dst, const u8 *src)
|
||||
{
|
||||
cipher_crypt_one(tfm, dst, src, false);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL);
|
||||
EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, "CRYPTO_INTERNAL");
|
||||
|
||||
struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher)
|
||||
{
|
||||
|
@ -313,4 +313,4 @@ module_exit(crypto_cmac_module_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("CMAC keyed hash algorithm");
|
||||
MODULE_ALIAS_CRYPTO("cmac");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -357,4 +357,4 @@ MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("CTR block cipher mode of operation");
|
||||
MODULE_ALIAS_CRYPTO("rfc3686");
|
||||
MODULE_ALIAS_CRYPTO("ctr");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -2151,4 +2151,4 @@ MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
|
||||
CRYPTO_DRBG_HMAC_STRING
|
||||
CRYPTO_DRBG_CTR_STRING);
|
||||
MODULE_ALIAS_CRYPTO("stdrng");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -225,4 +225,4 @@ module_exit(crypto_ecb_module_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("ECB block cipher mode of operation");
|
||||
MODULE_ALIAS_CRYPTO("ecb");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -649,4 +649,4 @@ module_exit(essiv_module_exit);
|
||||
MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("essiv");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -576,4 +576,4 @@ module_exit(hctr2_module_exit);
|
||||
MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("hctr2");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -317,4 +317,4 @@ MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
|
||||
MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
|
||||
MODULE_ALIAS_CRYPTO("kw");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -192,4 +192,4 @@ module_exit(crypto_pcbc_module_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("PCBC block cipher mode of operation");
|
||||
MODULE_ALIAS_CRYPTO("pcbc");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -1085,4 +1085,4 @@ EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Symmetric key cipher type");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
||||
static bool notests;
|
||||
module_param(notests, bool, 0644);
|
||||
|
@ -693,4 +693,4 @@ module_exit(vmac_module_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("VMAC hash algorithm");
|
||||
MODULE_ALIAS_CRYPTO("vmac64");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -261,4 +261,4 @@ module_exit(crypto_xcbc_module_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("XCBC keyed hash algorithm");
|
||||
MODULE_ALIAS_CRYPTO("xcbc");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -188,4 +188,4 @@ module_exit(crypto_xctr_module_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("XCTR block cipher mode of operation");
|
||||
MODULE_ALIAS_CRYPTO("xctr");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
|
@ -472,5 +472,5 @@ module_exit(xts_module_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("XTS block cipher mode");
|
||||
MODULE_ALIAS_CRYPTO("xts");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
||||
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
|
||||
MODULE_SOFTDEP("pre: ecb");
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pci-p2pdma.h>
|
||||
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
||||
MODULE_IMPORT_NS("DMA_BUF");
|
||||
|
||||
#define HL_MMU_DEBUG 0
|
||||
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "qaic_timesync.h"
|
||||
#include "sahara.h"
|
||||
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
||||
MODULE_IMPORT_NS("DMA_BUF");
|
||||
|
||||
#define PCI_DEV_AIC080 0xa080
|
||||
#define PCI_DEV_AIC100 0xa100
|
||||
|
@ -45,7 +45,7 @@ int einj_cxl_available_error_type_show(struct seq_file *m, void *v)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, CXL);
|
||||
EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, "CXL");
|
||||
|
||||
static int cxl_dport_get_sbdf(struct pci_dev *dport_dev, u64 *sbdf)
|
||||
{
|
||||
@ -83,7 +83,7 @@ int einj_cxl_inject_rch_error(u64 rcrb, u64 type)
|
||||
return einj_cxl_rch_error_inject(type, 0x2, rcrb, GENMASK_ULL(63, 0),
|
||||
0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, CXL);
|
||||
EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, "CXL");
|
||||
|
||||
int einj_cxl_inject_error(struct pci_dev *dport, u64 type)
|
||||
{
|
||||
@ -104,10 +104,10 @@ int einj_cxl_inject_error(struct pci_dev *dport, u64 type)
|
||||
|
||||
return einj_error_inject(type, 0x4, 0, 0, 0, param4);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, CXL);
|
||||
EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, "CXL");
|
||||
|
||||
bool einj_cxl_is_initialized(void)
|
||||
{
|
||||
return einj_initialized;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, CXL);
|
||||
EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, "CXL");
|
||||
|
@ -726,7 +726,7 @@ int cxl_cper_register_work(struct work_struct *work)
|
||||
cxl_cper_work = work;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL);
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, "CXL");
|
||||
|
||||
int cxl_cper_unregister_work(struct work_struct *work)
|
||||
{
|
||||
@ -737,13 +737,13 @@ int cxl_cper_unregister_work(struct work_struct *work)
|
||||
cxl_cper_work = NULL;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL);
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, "CXL");
|
||||
|
||||
int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
|
||||
{
|
||||
return kfifo_get(&cxl_cper_fifo, wd);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL);
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
|
||||
|
||||
static bool ghes_do_proc(struct ghes *ghes,
|
||||
const struct acpi_hest_generic_status *estatus)
|
||||
|
@ -1716,6 +1716,8 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
|
||||
/* HiSilicon Hip09 Platform */
|
||||
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
||||
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
||||
{"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
||||
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
||||
/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
|
||||
{"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
||||
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
||||
|
@ -151,7 +151,7 @@ int acpi_get_genport_coordinates(u32 uid,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, CXL);
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, "CXL");
|
||||
|
||||
static __init void alloc_memory_initiator(unsigned int cpu_pxm)
|
||||
{
|
||||
|
@ -1082,7 +1082,7 @@ static void __exit acpi_thermal_exit(void)
|
||||
module_init(acpi_thermal_init);
|
||||
module_exit(acpi_thermal_exit);
|
||||
|
||||
MODULE_IMPORT_NS(ACPI_THERMAL);
|
||||
MODULE_IMPORT_NS("ACPI_THERMAL");
|
||||
MODULE_AUTHOR("Paul Diefenbaugh");
|
||||
MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -53,25 +53,25 @@ int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
|
||||
|
||||
return acpi_trip_temp(adev, obj_name, ret_temp);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, ACPI_THERMAL);
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, "ACPI_THERMAL");
|
||||
|
||||
int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
|
||||
{
|
||||
return acpi_trip_temp(adev, "_PSV", ret_temp);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, ACPI_THERMAL);
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, "ACPI_THERMAL");
|
||||
|
||||
int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
|
||||
{
|
||||
return acpi_trip_temp(adev, "_HOT", ret_temp);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, ACPI_THERMAL);
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, "ACPI_THERMAL");
|
||||
|
||||
int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
|
||||
{
|
||||
return acpi_trip_temp(adev, "_CRT", ret_temp);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, ACPI_THERMAL);
|
||||
EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, "ACPI_THERMAL");
|
||||
|
||||
static int thermal_temp(int error, int temp_decik, int *ret_temp)
|
||||
{
|
||||
|
@ -2569,7 +2569,7 @@ static struct platform_driver fore200e_sba_driver = {
|
||||
.of_match_table = fore200e_sba_match,
|
||||
},
|
||||
.probe = fore200e_sba_probe,
|
||||
.remove_new = fore200e_sba_remove,
|
||||
.remove = fore200e_sba_remove,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -108,7 +108,7 @@ static void cfag12864bfb_remove(struct platform_device *device)
|
||||
|
||||
static struct platform_driver cfag12864bfb_driver = {
|
||||
.probe = cfag12864bfb_probe,
|
||||
.remove_new = cfag12864bfb_remove,
|
||||
.remove = cfag12864bfb_remove,
|
||||
.driver = {
|
||||
.name = CFAG12864BFB_NAME,
|
||||
},
|
||||
|
@ -339,7 +339,7 @@ MODULE_DEVICE_TABLE(of, hd44780_of_match);
|
||||
|
||||
static struct platform_driver hd44780_driver = {
|
||||
.probe = hd44780_probe,
|
||||
.remove_new = hd44780_remove,
|
||||
.remove = hd44780_remove,
|
||||
.driver = {
|
||||
.name = "hd44780",
|
||||
.of_match_table = hd44780_of_match,
|
||||
|
@ -780,5 +780,5 @@ module_i2c_driver(ht16k33_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Holtek HT16K33 driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_IMPORT_NS(LINEDISP);
|
||||
MODULE_IMPORT_NS("LINEDISP");
|
||||
MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
|
||||
|
@ -291,11 +291,11 @@ static struct platform_driver img_ascii_lcd_driver = {
|
||||
.of_match_table = img_ascii_lcd_matches,
|
||||
},
|
||||
.probe = img_ascii_lcd_probe,
|
||||
.remove_new = img_ascii_lcd_remove,
|
||||
.remove = img_ascii_lcd_remove,
|
||||
};
|
||||
module_platform_driver(img_ascii_lcd_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display");
|
||||
MODULE_AUTHOR("Paul Burton <paul.burton@mips.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_IMPORT_NS(LINEDISP);
|
||||
MODULE_IMPORT_NS("LINEDISP");
|
||||
|
@ -381,7 +381,7 @@ int linedisp_register(struct linedisp *linedisp, struct device *parent,
|
||||
put_device(&linedisp->dev);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(linedisp_register, LINEDISP);
|
||||
EXPORT_SYMBOL_NS_GPL(linedisp_register, "LINEDISP");
|
||||
|
||||
/**
|
||||
* linedisp_unregister - unregister a character line display
|
||||
@ -394,7 +394,7 @@ void linedisp_unregister(struct linedisp *linedisp)
|
||||
del_timer_sync(&linedisp->timer);
|
||||
put_device(&linedisp->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(linedisp_unregister, LINEDISP);
|
||||
EXPORT_SYMBOL_NS_GPL(linedisp_unregister, "LINEDISP");
|
||||
|
||||
MODULE_DESCRIPTION("Character line display core support");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -191,4 +191,4 @@ module_i2c_driver(max6959_i2c_driver);
|
||||
MODULE_DESCRIPTION("MAX6958/6959 7-segment LED controller");
|
||||
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_IMPORT_NS(LINEDISP);
|
||||
MODULE_IMPORT_NS("LINEDISP");
|
||||
|
@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(of, seg_led_of_match);
|
||||
|
||||
static struct platform_driver seg_led_driver = {
|
||||
.probe = seg_led_probe,
|
||||
.remove_new = seg_led_remove,
|
||||
.remove = seg_led_remove,
|
||||
.driver = {
|
||||
.name = "seg-led-gpio",
|
||||
.of_match_table = seg_led_of_match,
|
||||
@ -108,4 +108,4 @@ module_platform_driver(seg_led_driver);
|
||||
MODULE_AUTHOR("Chris Packham <chris.packham@alliedtelesis.co.nz>");
|
||||
MODULE_DESCRIPTION("7 segment LED driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_IMPORT_NS(LINEDISP);
|
||||
MODULE_IMPORT_NS("LINEDISP");
|
||||
|
@ -208,6 +208,10 @@ static int __init numa_register_nodes(void)
|
||||
{
|
||||
int nid;
|
||||
|
||||
/* Check the validity of the memblock/node mapping */
|
||||
if (!memblock_validate_numa_coverage(0))
|
||||
return -EINVAL;
|
||||
|
||||
/* Finally register nodes. */
|
||||
for_each_node_mask(nid, numa_nodes_parsed) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
|
@ -58,7 +58,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
|
||||
{
|
||||
struct cacheinfo *llc;
|
||||
|
||||
if (!cache_leaves(cpu))
|
||||
if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
|
||||
return false;
|
||||
|
||||
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
|
||||
@ -458,11 +458,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline
|
||||
int allocate_cache_info(int cpu)
|
||||
static inline int allocate_cache_info(int cpu)
|
||||
{
|
||||
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
|
||||
sizeof(struct cacheinfo), GFP_ATOMIC);
|
||||
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
|
||||
if (!per_cpu_cacheinfo(cpu)) {
|
||||
cache_leaves(cpu) = 0;
|
||||
return -ENOMEM;
|
||||
@ -534,7 +532,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
|
||||
*/
|
||||
ci_cacheinfo(cpu)->early_ci_levels = false;
|
||||
|
||||
if (cache_leaves(cpu) <= early_leaves)
|
||||
/*
|
||||
* Some architectures (e.g., x86) do not use early initialization.
|
||||
* Allocate memory now in such case.
|
||||
*/
|
||||
if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
|
||||
return 0;
|
||||
|
||||
kfree(per_cpu_cacheinfo(cpu));
|
||||
|
@ -61,7 +61,7 @@ bool firmware_request_builtin(struct firmware *fw, const char *name)
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, TEST_FIRMWARE);
|
||||
EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, "TEST_FIRMWARE");
|
||||
|
||||
/**
|
||||
* firmware_request_builtin_buf() - load builtin firmware into optional buffer
|
||||
|
@ -22,7 +22,7 @@ struct firmware_fallback_config fw_fallback_config = {
|
||||
.loading_timeout = 60,
|
||||
.old_timeout = 60,
|
||||
};
|
||||
EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
|
||||
EXPORT_SYMBOL_NS_GPL(fw_fallback_config, "FIRMWARE_LOADER_PRIVATE");
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static struct ctl_table firmware_config_table[] = {
|
||||
@ -56,13 +56,13 @@ int register_firmware_config_sysctl(void)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
|
||||
EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
|
||||
|
||||
void unregister_firmware_config_sysctl(void)
|
||||
{
|
||||
unregister_sysctl_table(firmware_config_sysct_table_header);
|
||||
firmware_config_sysct_table_header = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
|
||||
EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
|
||||
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include "firmware.h"
|
||||
|
||||
MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
|
||||
MODULE_IMPORT_NS("FIRMWARE_LOADER_PRIVATE");
|
||||
|
||||
extern struct firmware_fallback_config fw_fallback_config;
|
||||
extern struct device_attribute dev_attr_loading;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user