mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 05:06:29 +00:00
irqchip updates for 6.3
- New and improved irqdomain locking, closing a number of races that became apparent now that we are able to probe drivers in parallel - A bunch of OF node refcounting bugs have been fixed - We now have a new IPI mux, lifted from the Apple AIC code and made common. It is expected that riscv will eventually benefit from it - Two small fixes for the Broadcom L2 drivers - Various cleanups and minor bug fixes -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmPw4OgPHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpDYVgP/iVFxCPs+DCWUYvyTC8rvNzOj51COHUV/7yD mY5BTIjH3yTQPDhQmFvITCAjKaMYc3eDLml/nF4tTCU0MFig+KsRsWNIEFXtSsI0 wO+S19QhHzj5odUok5IDC+cNTXScp2HV+vFoOhhf0zDzXqwVxRr7lO5i+n37ELMp Mm9g2+EeUt43xTQxzbmNn5Kkpq9PMEnQFU2UkvJleg+KCgzSYThcR8/KUDKySZpk TP+mcR5PevcqGhLt7vYS2lGh8Ye1warzp54C7Je8P8Txg3BM8xBynT1d3fgrlKfm AOAPVW3PV6bPhgVYXZJopH3ykfmYM4ZiIvhRcgLyf6tbZAU6Twpiq823TAOVHyPI SRcW8dehuvgq1VJIpRGZOSB2qIvFrqLhl0B1CtT04gFWJW9bSa2n5Y1h4Gcqy29o SLJiKscx2KqvPmQqarLUUnuOZ5hhIrtYhkhhJuuwqZqzS1Kkz/mSB1MkPQEGxJi1 MpoTfbQ/0KTYXCqqgs/GBnDJ0mYrcvtBoGP7bjnVYnXpANP2bs+ZpQVPVq+17uuQ k0gjxe8iENqXjW6JMlFX5K3dxG5ygXjfECMWsCJ+JdCtJdaIL8I46X/u7wHU2mfY bohhb7xS2+HIPxz6w8aRu3IQG00mMv06vCYPBbPh+W0dUtocdM3U2kpe5gPYm1iz kWx3WLaM =ONcj -----END PGP SIGNATURE----- Merge tag 'irqchip-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core Pull irqchip updates from Marc Zyngier: - New and improved irqdomain locking, closing a number of races that became apparent now that we are able to probe drivers in parallel - A bunch of OF node refcounting bugs have been fixed - We now have a new IPI mux, lifted from the Apple AIC code and made common. It is expected that riscv will eventually benefit from it - Two small fixes for the Broadcom L2 drivers - Various cleanups and minor bug fixes Link: https://lore.kernel.org/r/20230218143452.3817627-1-maz@kernel.org
This commit is contained in:
commit
6f3ee0e22b
1
.mailmap
1
.mailmap
@ -422,6 +422,7 @@ Tony Luck <tony.luck@intel.com>
|
||||
TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
|
||||
TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
|
||||
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
|
||||
Tudor Ambarus <tudor.ambarus@linaro.org> <tudor.ambarus@microchip.com>
|
||||
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
|
||||
Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
|
||||
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
|
||||
|
@ -120,6 +120,8 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
|
||||
|
@ -31,6 +31,12 @@ def have_command(cmd):
|
||||
# Get Sphinx version
|
||||
major, minor, patch = sphinx.version_info[:3]
|
||||
|
||||
#
|
||||
# Warn about older versions that we don't want to support for much
|
||||
# longer.
|
||||
#
|
||||
if (major < 2) or (major == 2 and minor < 4):
|
||||
print('WARNING: support for Sphinx < 2.4 will be removed soon.')
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
@ -339,7 +345,11 @@ html_use_smartypants = False
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
# Note that the RTD theme ignores this
|
||||
html_sidebars = { '**': ["about.html", 'searchbox.html', 'localtoc.html', 'sourcelink.html']}
|
||||
html_sidebars = { '**': ['searchbox.html', 'localtoc.html', 'sourcelink.html']}
|
||||
|
||||
# about.html is available for alabaster theme. Add it at the front.
|
||||
if html_theme == 'alabaster':
|
||||
html_sidebars['**'].insert(0, 'about.html')
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'TheLinuxKerneldoc'
|
||||
|
@ -54,6 +54,17 @@ properties:
|
||||
- const: xo
|
||||
- const: alternate
|
||||
|
||||
interrupts:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
interrupt-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: dcvsh-irq-0
|
||||
- const: dcvsh-irq-1
|
||||
- const: dcvsh-irq-2
|
||||
|
||||
'#freq-domain-cells':
|
||||
const: 1
|
||||
|
||||
|
@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Atmel Advanced Encryption Standard (AES) HW cryptographic accelerator
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Atmel Secure Hash Algorithm (SHA) HW cryptographic accelerator
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Atmel Triple Data Encryption Standard (TDES) HW cryptographic accelerator
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -32,7 +32,7 @@ properties:
|
||||
- description: Display byte clock
|
||||
- description: Display byte interface clock
|
||||
- description: Display pixel clock
|
||||
- description: Display escape clock
|
||||
- description: Display core clock
|
||||
- description: Display AHB clock
|
||||
- description: Display AXI clock
|
||||
|
||||
@ -137,8 +137,6 @@ required:
|
||||
- phys
|
||||
- assigned-clocks
|
||||
- assigned-clock-parents
|
||||
- power-domains
|
||||
- operating-points-v2
|
||||
- ports
|
||||
|
||||
additionalProperties: false
|
||||
|
@ -69,7 +69,6 @@ required:
|
||||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- vdds-supply
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
@ -39,7 +39,6 @@ required:
|
||||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- vcca-supply
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
@ -34,6 +34,10 @@ properties:
|
||||
vddio-supply:
|
||||
description: Phandle to vdd-io regulator device node.
|
||||
|
||||
qcom,dsi-phy-regulator-ldo-mode:
|
||||
type: boolean
|
||||
description: Indicates if the LDO mode PHY regulator is wanted.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
@ -72,7 +72,7 @@ examples:
|
||||
#include <dt-bindings/interconnect/qcom,qcm2290.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
|
||||
mdss@5e00000 {
|
||||
display-subsystem@5e00000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "qcom,qcm2290-mdss";
|
||||
|
@ -62,7 +62,7 @@ examples:
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
|
||||
mdss@5e00000 {
|
||||
display-subsystem@5e00000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "qcom,sm6115-mdss";
|
||||
|
@ -16,6 +16,7 @@ properties:
|
||||
compatible:
|
||||
enum:
|
||||
- mediatek,mt8186-mt6366-rt1019-rt5682s-sound
|
||||
- mediatek,mt8186-mt6366-rt5682s-max98360-sound
|
||||
|
||||
mediatek,platform:
|
||||
$ref: "/schemas/types.yaml#/definitions/phandle"
|
||||
|
@ -30,7 +30,9 @@ properties:
|
||||
const: 0
|
||||
|
||||
clocks:
|
||||
maxItems: 5
|
||||
oneOf:
|
||||
- maxItems: 3
|
||||
- maxItems: 5
|
||||
|
||||
clock-names:
|
||||
oneOf:
|
||||
|
@ -9,9 +9,6 @@ title: LPASS(Low Power Audio Subsystem) VA Macro audio codec
|
||||
maintainers:
|
||||
- Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
|
||||
allOf:
|
||||
- $ref: dai-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
@ -30,15 +27,12 @@ properties:
|
||||
const: 0
|
||||
|
||||
clocks:
|
||||
maxItems: 5
|
||||
minItems: 5
|
||||
maxItems: 6
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: mclk
|
||||
- const: npl
|
||||
- const: macro
|
||||
- const: dcodec
|
||||
- const: fsgen
|
||||
minItems: 5
|
||||
maxItems: 6
|
||||
|
||||
clock-output-names:
|
||||
maxItems: 1
|
||||
@ -55,10 +49,51 @@ required:
|
||||
- reg
|
||||
- "#sound-dai-cells"
|
||||
|
||||
allOf:
|
||||
- $ref: dai-common.yaml#
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,sc7280-lpass-wsa-macro
|
||||
- qcom,sm8450-lpass-wsa-macro
|
||||
- qcom,sc8280xp-lpass-wsa-macro
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 5
|
||||
clock-names:
|
||||
items:
|
||||
- const: mclk
|
||||
- const: npl
|
||||
- const: macro
|
||||
- const: dcodec
|
||||
- const: fsgen
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,sm8250-lpass-wsa-macro
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 6
|
||||
clock-names:
|
||||
items:
|
||||
- const: mclk
|
||||
- const: npl
|
||||
- const: macro
|
||||
- const: dcodec
|
||||
- const: va
|
||||
- const: fsgen
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,sm8250-lpass-aoncc.h>
|
||||
#include <dt-bindings/sound/qcom,q6afe.h>
|
||||
codec@3240000 {
|
||||
compatible = "qcom,sm8250-lpass-wsa-macro";
|
||||
@ -69,7 +104,8 @@ examples:
|
||||
<&audiocc 0>,
|
||||
<&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
|
||||
<&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
|
||||
<&aoncc LPASS_CDC_VA_MCLK>,
|
||||
<&vamacro>;
|
||||
clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
|
||||
clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen";
|
||||
clock-output-names = "mclk";
|
||||
};
|
||||
|
@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Atmel SPI device
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Atmel Quad Serial Peripheral Interface (QSPI)
|
||||
|
||||
maintainers:
|
||||
- Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
- Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
|
@ -44,9 +44,9 @@ properties:
|
||||
description:
|
||||
Maximum SPI clocking speed of the device in Hz.
|
||||
|
||||
spi-cs-setup-ns:
|
||||
spi-cs-setup-delay-ns:
|
||||
description:
|
||||
Delay in nanosecods to be introduced by the controller after CS is
|
||||
Delay in nanoseconds to be introduced by the controller after CS is
|
||||
asserted.
|
||||
|
||||
spi-rx-bus-width:
|
||||
|
@ -880,8 +880,8 @@ The kernel interface functions are as follows:
|
||||
|
||||
notify_end_rx can be NULL or it can be used to specify a function to be
|
||||
called when the call changes state to end the Tx phase. This function is
|
||||
called with the call-state spinlock held to prevent any reply or final ACK
|
||||
from being delivered first.
|
||||
called with a spinlock held to prevent the last DATA packet from being
|
||||
transmitted until the function returns.
|
||||
|
||||
(#) Receive data from a call::
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
from sphinx.util.pycompat import execfile_
|
||||
from sphinx.util.osutil import fs_encoding
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
def loadConfig(namespace):
|
||||
@ -48,7 +48,9 @@ def loadConfig(namespace):
|
||||
sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
|
||||
config = namespace.copy()
|
||||
config['__file__'] = config_file
|
||||
execfile_(config_file, config)
|
||||
with open(config_file, 'rb') as f:
|
||||
code = compile(f.read(), fs_encoding, 'exec')
|
||||
exec(code, config)
|
||||
del config['__file__']
|
||||
namespace.update(config)
|
||||
else:
|
||||
|
@ -1354,6 +1354,14 @@ the memory region are automatically reflected into the guest. For example, an
|
||||
mmap() that affects the region will be made visible immediately. Another
|
||||
example is madvise(MADV_DROP).
|
||||
|
||||
Note: On arm64, a write generated by the page-table walker (to update
|
||||
the Access and Dirty flags, for example) never results in a
|
||||
KVM_EXIT_MMIO exit when the slot has the KVM_MEM_READONLY flag. This
|
||||
is because KVM cannot provide the data that would be written by the
|
||||
page-table walker, making it impossible to emulate the access.
|
||||
Instead, an abort (data abort if the cause of the page-table update
|
||||
was a load or a store, instruction abort if it was an instruction
|
||||
fetch) is injected in the guest.
|
||||
|
||||
4.36 KVM_SET_TSS_ADDR
|
||||
---------------------
|
||||
@ -8310,6 +8318,20 @@ CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``
|
||||
It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
|
||||
has enabled in-kernel emulation of the local APIC.
|
||||
|
||||
CPU topology
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Several CPUID values include topology information for the host CPU:
|
||||
0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems. Different
|
||||
versions of KVM return different values for this information and userspace
|
||||
should not rely on it. Currently they return all zeroes.
|
||||
|
||||
If userspace wishes to set up a guest topology, it should be careful that
|
||||
the values of these three leaves differ for each CPU. In particular,
|
||||
the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX
|
||||
for 0x8000001e; the latter also encodes the core id and node id in bits
|
||||
7:0 of EBX and ECX respectively.
|
||||
|
||||
Obsolete ioctls and capabilities
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
@ -24,21 +24,22 @@ The acquisition orders for mutexes are as follows:
|
||||
|
||||
For SRCU:
|
||||
|
||||
- ``synchronize_srcu(&kvm->srcu)`` is called _inside_
|
||||
the kvm->slots_lock critical section, therefore kvm->slots_lock
|
||||
cannot be taken inside a kvm->srcu read-side critical section.
|
||||
Instead, kvm->slots_arch_lock is released before the call
|
||||
to ``synchronize_srcu()`` and _can_ be taken inside a
|
||||
kvm->srcu read-side critical section.
|
||||
- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
|
||||
for kvm->lock, vcpu->mutex and kvm->slots_lock. These locks _cannot_
|
||||
be taken inside a kvm->srcu read-side critical section; that is, the
|
||||
following is broken::
|
||||
|
||||
- kvm->lock is taken inside kvm->srcu, therefore
|
||||
``synchronize_srcu(&kvm->srcu)`` cannot be called inside
|
||||
a kvm->lock critical section. If you cannot delay the
|
||||
call until after kvm->lock is released, use ``call_srcu``.
|
||||
srcu_read_lock(&kvm->srcu);
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
- kvm->slots_arch_lock instead is released before the call to
|
||||
``synchronize_srcu()``. It _can_ therefore be taken inside a
|
||||
kvm->srcu read-side critical section, for example while processing
|
||||
a vmexit.
|
||||
|
||||
On x86:
|
||||
|
||||
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
|
||||
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock and kvm->arch.xen.xen_lock
|
||||
|
||||
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
|
||||
kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
|
||||
|
15
MAINTAINERS
15
MAINTAINERS
@ -11358,9 +11358,9 @@ F: virt/kvm/*
|
||||
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
|
||||
M: Marc Zyngier <maz@kernel.org>
|
||||
R: James Morse <james.morse@arm.com>
|
||||
R: Alexandru Elisei <alexandru.elisei@arm.com>
|
||||
R: Suzuki K Poulose <suzuki.poulose@arm.com>
|
||||
R: Oliver Upton <oliver.upton@linux.dev>
|
||||
R: Zenghui Yu <yuzenghui@huawei.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: kvmarm@lists.linux.dev
|
||||
L: kvmarm@lists.cs.columbia.edu (deprecated, moderated for non-subscribers)
|
||||
@ -13622,7 +13622,7 @@ F: arch/microblaze/
|
||||
|
||||
MICROCHIP AT91 DMA DRIVERS
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Supported
|
||||
@ -13667,7 +13667,7 @@ F: Documentation/devicetree/bindings/media/microchip,csi2dc.yaml
|
||||
F: drivers/media/platform/microchip/microchip-csi2dc.c
|
||||
|
||||
MICROCHIP ECC DRIVER
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/crypto/atmel-ecc.*
|
||||
@ -13764,7 +13764,7 @@ S: Maintained
|
||||
F: drivers/mmc/host/atmel-mci.c
|
||||
|
||||
MICROCHIP NAND DRIVER
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/mtd/atmel-nand.txt
|
||||
@ -13816,7 +13816,7 @@ S: Supported
|
||||
F: drivers/power/reset/at91-sama5d2_shdwc.c
|
||||
|
||||
MICROCHIP SPI DRIVER
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
S: Supported
|
||||
F: drivers/spi/spi-atmel.*
|
||||
|
||||
@ -14921,7 +14921,8 @@ T: git://git.infradead.org/nvme.git
|
||||
F: Documentation/nvme/
|
||||
F: drivers/nvme/host/
|
||||
F: drivers/nvme/common/
|
||||
F: include/linux/nvme*
|
||||
F: include/linux/nvme.h
|
||||
F: include/linux/nvme-*.h
|
||||
F: include/uapi/linux/nvme_ioctl.h
|
||||
|
||||
NVM EXPRESS FABRICS AUTHENTICATION
|
||||
@ -19674,7 +19675,7 @@ F: drivers/clk/spear/
|
||||
F: drivers/pinctrl/spear/
|
||||
|
||||
SPI NOR SUBSYSTEM
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@linaro.org>
|
||||
M: Pratyush Yadav <pratyush@kernel.org>
|
||||
R: Michael Walle <michael@walle.cc>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -184,8 +184,6 @@ config ARM64
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
|
||||
if $(cc-option,-fpatchable-function-entry=2)
|
||||
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
|
||||
if DYNAMIC_FTRACE_WITH_ARGS
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
@ -972,6 +970,22 @@ config ARM64_ERRATUM_2457168
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2645198
|
||||
bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption"
|
||||
default y
|
||||
help
|
||||
This option adds the workaround for ARM Cortex-A715 erratum 2645198.
|
||||
|
||||
If a Cortex-A715 cpu sees a page mapping permissions change from executable
|
||||
to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the
|
||||
next instruction abort caused by permission fault.
|
||||
|
||||
Only user-space does executable to non-executable permission transition via
|
||||
mprotect() system call. Workaround the problem by doing a break-before-make
|
||||
TLB invalidation, for all changes to executable user space mappings.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
|
||||
" cbnz %w0, 1b\n" \
|
||||
" " #mb "\n" \
|
||||
"2:" \
|
||||
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
|
||||
: "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \
|
||||
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
|
||||
: cl); \
|
||||
\
|
||||
|
@ -311,7 +311,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
|
||||
" eor %[old2], %[old2], %[oldval2]\n" \
|
||||
" orr %[old1], %[old1], %[old2]" \
|
||||
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
|
||||
[v] "+Q" (*(unsigned long *)ptr) \
|
||||
[v] "+Q" (*(__uint128_t *)ptr) \
|
||||
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
|
||||
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
|
||||
: cl); \
|
||||
|
@ -124,6 +124,8 @@
|
||||
#define APPLE_CPU_PART_M1_FIRESTORM_PRO 0x025
|
||||
#define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
|
||||
#define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029
|
||||
#define APPLE_CPU_PART_M2_BLIZZARD 0x032
|
||||
#define APPLE_CPU_PART_M2_AVALANCHE 0x033
|
||||
|
||||
#define AMPERE_CPU_PART_AMPERE1 0xAC3
|
||||
|
||||
@ -177,6 +179,8 @@
|
||||
#define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
|
||||
#define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
|
||||
#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
|
||||
#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
|
||||
#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
|
||||
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
|
||||
|
||||
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
|
||||
|
@ -114,6 +114,15 @@
|
||||
#define ESR_ELx_FSC_ACCESS (0x08)
|
||||
#define ESR_ELx_FSC_FAULT (0x04)
|
||||
#define ESR_ELx_FSC_PERM (0x0C)
|
||||
#define ESR_ELx_FSC_SEA_TTW0 (0x14)
|
||||
#define ESR_ELx_FSC_SEA_TTW1 (0x15)
|
||||
#define ESR_ELx_FSC_SEA_TTW2 (0x16)
|
||||
#define ESR_ELx_FSC_SEA_TTW3 (0x17)
|
||||
#define ESR_ELx_FSC_SECC (0x18)
|
||||
#define ESR_ELx_FSC_SECC_TTW0 (0x1c)
|
||||
#define ESR_ELx_FSC_SECC_TTW1 (0x1d)
|
||||
#define ESR_ELx_FSC_SECC_TTW2 (0x1e)
|
||||
#define ESR_ELx_FSC_SECC_TTW3 (0x1f)
|
||||
|
||||
/* ISS field definitions for Data Aborts */
|
||||
#define ESR_ELx_ISV_SHIFT (24)
|
||||
|
@ -49,6 +49,15 @@ extern pte_t huge_ptep_get(pte_t *ptep);
|
||||
|
||||
void __init arm64_hugetlb_cma_reserve(void);
|
||||
|
||||
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
|
||||
extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
||||
#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
|
||||
extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t new_pte);
|
||||
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
#endif /* __ASM_HUGETLB_H */
|
||||
|
@ -319,21 +319,6 @@
|
||||
BIT(18) | \
|
||||
GENMASK(16, 15))
|
||||
|
||||
/* For compatibility with fault code shared with 32-bit */
|
||||
#define FSC_FAULT ESR_ELx_FSC_FAULT
|
||||
#define FSC_ACCESS ESR_ELx_FSC_ACCESS
|
||||
#define FSC_PERM ESR_ELx_FSC_PERM
|
||||
#define FSC_SEA ESR_ELx_FSC_EXTABT
|
||||
#define FSC_SEA_TTW0 (0x14)
|
||||
#define FSC_SEA_TTW1 (0x15)
|
||||
#define FSC_SEA_TTW2 (0x16)
|
||||
#define FSC_SEA_TTW3 (0x17)
|
||||
#define FSC_SECC (0x18)
|
||||
#define FSC_SECC_TTW0 (0x1c)
|
||||
#define FSC_SECC_TTW1 (0x1d)
|
||||
#define FSC_SECC_TTW2 (0x1e)
|
||||
#define FSC_SECC_TTW3 (0x1f)
|
||||
|
||||
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
|
||||
#define HPFAR_MASK (~UL(0xf))
|
||||
/*
|
||||
|
@ -349,16 +349,16 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *v
|
||||
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (kvm_vcpu_trap_get_fault(vcpu)) {
|
||||
case FSC_SEA:
|
||||
case FSC_SEA_TTW0:
|
||||
case FSC_SEA_TTW1:
|
||||
case FSC_SEA_TTW2:
|
||||
case FSC_SEA_TTW3:
|
||||
case FSC_SECC:
|
||||
case FSC_SECC_TTW0:
|
||||
case FSC_SECC_TTW1:
|
||||
case FSC_SECC_TTW2:
|
||||
case FSC_SECC_TTW3:
|
||||
case ESR_ELx_FSC_EXTABT:
|
||||
case ESR_ELx_FSC_SEA_TTW0:
|
||||
case ESR_ELx_FSC_SEA_TTW1:
|
||||
case ESR_ELx_FSC_SEA_TTW2:
|
||||
case ESR_ELx_FSC_SEA_TTW3:
|
||||
case ESR_ELx_FSC_SECC:
|
||||
case ESR_ELx_FSC_SECC_TTW0:
|
||||
case ESR_ELx_FSC_SECC_TTW1:
|
||||
case ESR_ELx_FSC_SECC_TTW2:
|
||||
case ESR_ELx_FSC_SECC_TTW3:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -373,8 +373,26 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_abt_iss1tw(vcpu))
|
||||
return true;
|
||||
if (kvm_vcpu_abt_iss1tw(vcpu)) {
|
||||
/*
|
||||
* Only a permission fault on a S1PTW should be
|
||||
* considered as a write. Otherwise, page tables baked
|
||||
* in a read-only memslot will result in an exception
|
||||
* being delivered in the guest.
|
||||
*
|
||||
* The drawback is that we end-up faulting twice if the
|
||||
* guest is using any of HW AF/DB: a translation fault
|
||||
* to map the page containing the PT (read only at
|
||||
* first), then a permission fault to allow the flags
|
||||
* to be set.
|
||||
*/
|
||||
switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
|
||||
case ESR_ELx_FSC_PERM:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
return false;
|
||||
|
@ -681,7 +681,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
||||
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
|
||||
#define pud_valid(pud) pte_valid(pud_pte(pud))
|
||||
#define pud_user(pud) pte_user(pud_pte(pud))
|
||||
|
||||
#define pud_user_exec(pud) pte_user_exec(pud_pte(pud))
|
||||
|
||||
static inline void set_pud(pud_t *pudp, pud_t pud)
|
||||
{
|
||||
@ -730,6 +730,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
#else
|
||||
|
||||
#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
|
||||
#define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */
|
||||
|
||||
/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
|
||||
#define pmd_set_fixmap(addr) NULL
|
||||
@ -862,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
|
||||
|
||||
static inline bool pmd_user_accessible_page(pmd_t pmd)
|
||||
{
|
||||
return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
|
||||
return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
|
||||
}
|
||||
|
||||
static inline bool pud_user_accessible_page(pud_t pud)
|
||||
{
|
||||
return pud_leaf(pud) && pud_user(pud);
|
||||
return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1093,6 +1094,15 @@ static inline bool pud_sect_supported(void)
|
||||
}
|
||||
|
||||
|
||||
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
|
||||
#define ptep_modify_prot_start ptep_modify_prot_start
|
||||
extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
||||
#define ptep_modify_prot_commit ptep_modify_prot_commit
|
||||
extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t new_pte);
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_PGTABLE_H */
|
||||
|
@ -16,7 +16,7 @@
|
||||
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
|
||||
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
|
||||
|
||||
typedef u32 uprobe_opcode_t;
|
||||
typedef __le32 uprobe_opcode_t;
|
||||
|
||||
struct arch_uprobe_task {
|
||||
};
|
||||
|
@ -661,6 +661,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2645198
|
||||
{
|
||||
.desc = "ARM erratum 2645198",
|
||||
.capability = ARM64_WORKAROUND_2645198,
|
||||
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2077057
|
||||
{
|
||||
.desc = "ARM erratum 2077057",
|
||||
|
@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
SYM_FUNC_START(__efi_rt_asm_wrapper)
|
||||
stp x29, x30, [sp, #-112]!
|
||||
|
@ -8,28 +8,27 @@
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/mte.h>
|
||||
|
||||
#define for_each_mte_vma(vmi, vma) \
|
||||
#define for_each_mte_vma(cprm, i, m) \
|
||||
if (system_supports_mte()) \
|
||||
for_each_vma(vmi, vma) \
|
||||
if (vma->vm_flags & VM_MTE)
|
||||
for (i = 0, m = cprm->vma_meta; \
|
||||
i < cprm->vma_count; \
|
||||
i++, m = cprm->vma_meta + i) \
|
||||
if (m->flags & VM_MTE)
|
||||
|
||||
static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
|
||||
static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
|
||||
{
|
||||
if (vma->vm_flags & VM_DONTDUMP)
|
||||
return 0;
|
||||
|
||||
return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
|
||||
return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
|
||||
}
|
||||
|
||||
/* Derived from dump_user_range(); start/end must be page-aligned */
|
||||
static int mte_dump_tag_range(struct coredump_params *cprm,
|
||||
unsigned long start, unsigned long end)
|
||||
unsigned long start, unsigned long len)
|
||||
{
|
||||
int ret = 1;
|
||||
unsigned long addr;
|
||||
void *tags = NULL;
|
||||
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
|
||||
struct page *page = get_dump_page(addr);
|
||||
|
||||
/*
|
||||
@ -65,7 +64,6 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
|
||||
mte_save_page_tags(page_address(page), tags);
|
||||
put_page(page);
|
||||
if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
|
||||
mte_free_tag_storage(tags);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
@ -77,13 +75,13 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
|
||||
return ret;
|
||||
}
|
||||
|
||||
Elf_Half elf_core_extra_phdrs(void)
|
||||
Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int i;
|
||||
struct core_vma_metadata *m;
|
||||
int vma_count = 0;
|
||||
VMA_ITERATOR(vmi, current->mm, 0);
|
||||
|
||||
for_each_mte_vma(vmi, vma)
|
||||
for_each_mte_vma(cprm, i, m)
|
||||
vma_count++;
|
||||
|
||||
return vma_count;
|
||||
@ -91,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void)
|
||||
|
||||
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
VMA_ITERATOR(vmi, current->mm, 0);
|
||||
int i;
|
||||
struct core_vma_metadata *m;
|
||||
|
||||
for_each_mte_vma(vmi, vma) {
|
||||
for_each_mte_vma(cprm, i, m) {
|
||||
struct elf_phdr phdr;
|
||||
|
||||
phdr.p_type = PT_AARCH64_MEMTAG_MTE;
|
||||
phdr.p_offset = offset;
|
||||
phdr.p_vaddr = vma->vm_start;
|
||||
phdr.p_vaddr = m->start;
|
||||
phdr.p_paddr = 0;
|
||||
phdr.p_filesz = mte_vma_tag_dump_size(vma);
|
||||
phdr.p_memsz = vma->vm_end - vma->vm_start;
|
||||
phdr.p_filesz = mte_vma_tag_dump_size(m);
|
||||
phdr.p_memsz = m->end - m->start;
|
||||
offset += phdr.p_filesz;
|
||||
phdr.p_flags = 0;
|
||||
phdr.p_align = 0;
|
||||
@ -114,28 +112,25 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_t elf_core_extra_data_size(void)
|
||||
size_t elf_core_extra_data_size(struct coredump_params *cprm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int i;
|
||||
struct core_vma_metadata *m;
|
||||
size_t data_size = 0;
|
||||
VMA_ITERATOR(vmi, current->mm, 0);
|
||||
|
||||
for_each_mte_vma(vmi, vma)
|
||||
data_size += mte_vma_tag_dump_size(vma);
|
||||
for_each_mte_vma(cprm, i, m)
|
||||
data_size += mte_vma_tag_dump_size(m);
|
||||
|
||||
return data_size;
|
||||
}
|
||||
|
||||
int elf_core_write_extra_data(struct coredump_params *cprm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
VMA_ITERATOR(vmi, current->mm, 0);
|
||||
int i;
|
||||
struct core_vma_metadata *m;
|
||||
|
||||
for_each_mte_vma(vmi, vma) {
|
||||
if (vma->vm_flags & VM_DONTDUMP)
|
||||
continue;
|
||||
|
||||
if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
|
||||
for_each_mte_vma(cprm, i, m) {
|
||||
if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -385,7 +385,7 @@ static void task_fpsimd_load(void)
|
||||
WARN_ON(!system_supports_fpsimd());
|
||||
WARN_ON(!have_cpu_fpsimd_context());
|
||||
|
||||
if (system_supports_sve()) {
|
||||
if (system_supports_sve() || system_supports_sme()) {
|
||||
switch (current->thread.fp_type) {
|
||||
case FP_STATE_FPSIMD:
|
||||
/* Stop tracking SVE for this task until next use. */
|
||||
|
@ -1357,7 +1357,7 @@ enum aarch64_regset {
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
REGSET_SVE,
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
#ifdef CONFIG_ARM64_SME
|
||||
REGSET_SSVE,
|
||||
REGSET_ZA,
|
||||
#endif
|
||||
|
@ -281,7 +281,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
|
||||
|
||||
vl = task_get_sme_vl(current);
|
||||
} else {
|
||||
if (!system_supports_sve())
|
||||
/*
|
||||
* A SME only system use SVE for streaming mode so can
|
||||
* have a SVE formatted context with a zero VL and no
|
||||
* payload data.
|
||||
*/
|
||||
if (!system_supports_sve() && !system_supports_sme())
|
||||
return -EINVAL;
|
||||
|
||||
vl = task_get_sve_vl(current);
|
||||
@ -732,7 +737,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (system_supports_sve()) {
|
||||
if (system_supports_sve() || system_supports_sme()) {
|
||||
unsigned int vq = 0;
|
||||
|
||||
if (add_all || test_thread_flag(TIF_SVE) ||
|
||||
|
@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
|
||||
*/
|
||||
if (!(esr & ESR_ELx_S1PTW) &&
|
||||
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
|
||||
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
|
||||
(esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
|
||||
if (!__translate_far_to_hpfar(far, &hpfar))
|
||||
return false;
|
||||
} else {
|
||||
|
@ -367,7 +367,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||||
bool valid;
|
||||
|
||||
valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
|
||||
valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
|
||||
kvm_vcpu_dabt_isvalid(vcpu) &&
|
||||
!kvm_vcpu_abt_issea(vcpu) &&
|
||||
!kvm_vcpu_abt_iss1tw(vcpu);
|
||||
|
@ -1212,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
|
||||
VM_BUG_ON(write_fault && exec_fault);
|
||||
|
||||
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
|
||||
if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
|
||||
kvm_err("Unexpected L2 read permission error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -1277,7 +1277,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
* only exception to this is when dirty logging is enabled at runtime
|
||||
* and a write fault needs to collapse a block entry into a table.
|
||||
*/
|
||||
if (fault_status != FSC_PERM || (logging_active && write_fault)) {
|
||||
if (fault_status != ESR_ELx_FSC_PERM ||
|
||||
(logging_active && write_fault)) {
|
||||
ret = kvm_mmu_topup_memory_cache(memcache,
|
||||
kvm_mmu_cache_min_pages(kvm));
|
||||
if (ret)
|
||||
@ -1342,7 +1343,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
* backed by a THP and thus use block mapping if possible.
|
||||
*/
|
||||
if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
|
||||
if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
|
||||
if (fault_status == ESR_ELx_FSC_PERM &&
|
||||
fault_granule > PAGE_SIZE)
|
||||
vma_pagesize = fault_granule;
|
||||
else
|
||||
vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
|
||||
@ -1350,7 +1352,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
&fault_ipa);
|
||||
}
|
||||
|
||||
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
|
||||
if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
|
||||
/* Check the VMM hasn't introduced a new disallowed VMA */
|
||||
if (kvm_vma_mte_allowed(vma)) {
|
||||
sanitise_mte_tags(kvm, pfn, vma_pagesize);
|
||||
@ -1376,7 +1378,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
* permissions only if vma_pagesize equals fault_granule. Otherwise,
|
||||
* kvm_pgtable_stage2_map() should be called to change block size.
|
||||
*/
|
||||
if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
|
||||
if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
|
||||
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
|
||||
else
|
||||
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
|
||||
@ -1441,7 +1443,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
|
||||
if (fault_status == FSC_FAULT) {
|
||||
if (fault_status == ESR_ELx_FSC_FAULT) {
|
||||
/* Beyond sanitised PARange (which is the IPA limit) */
|
||||
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
|
||||
kvm_inject_size_fault(vcpu);
|
||||
@ -1476,8 +1478,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
kvm_vcpu_get_hfar(vcpu), fault_ipa);
|
||||
|
||||
/* Check the stage-2 fault is trans. fault or write fault */
|
||||
if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
|
||||
fault_status != FSC_ACCESS) {
|
||||
if (fault_status != ESR_ELx_FSC_FAULT &&
|
||||
fault_status != ESR_ELx_FSC_PERM &&
|
||||
fault_status != ESR_ELx_FSC_ACCESS) {
|
||||
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
|
||||
kvm_vcpu_trap_get_class(vcpu),
|
||||
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
|
||||
@ -1539,7 +1542,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
/* Userspace should not be able to register out-of-bounds IPAs */
|
||||
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
|
||||
|
||||
if (fault_status == FSC_ACCESS) {
|
||||
if (fault_status == ESR_ELx_FSC_ACCESS) {
|
||||
handle_access_fault(vcpu, fault_ipa);
|
||||
ret = 1;
|
||||
goto out_unlock;
|
||||
|
@ -646,7 +646,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
return;
|
||||
|
||||
/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
|
||||
pmcr = read_sysreg(pmcr_el0) & ARMV8_PMU_PMCR_N_MASK;
|
||||
pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
if (!kvm_supports_32bit_el0())
|
||||
pmcr |= ARMV8_PMU_PMCR_LC;
|
||||
|
||||
|
@ -616,6 +616,8 @@ static const struct midr_range broken_seis[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -559,3 +559,24 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
|
||||
{
|
||||
return __hugetlb_valid_size(size);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
* when the permission changes from executable to non-executable
|
||||
* in cases where cpu is affected with errata #2645198.
|
||||
*/
|
||||
if (pte_user_exec(READ_ONCE(*ptep)))
|
||||
return huge_ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
}
|
||||
|
||||
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t pte)
|
||||
{
|
||||
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
}
|
||||
|
@ -1630,3 +1630,24 @@ static int __init prevent_bootmem_remove_init(void)
|
||||
}
|
||||
early_initcall(prevent_bootmem_remove_init);
|
||||
#endif
|
||||
|
||||
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
* when the permission changes from executable to non-executable
|
||||
* in cases where cpu is affected with errata #2645198.
|
||||
*/
|
||||
if (pte_user_exec(READ_ONCE(*ptep)))
|
||||
return ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
return ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
}
|
||||
|
||||
void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
|
||||
pte_t old_pte, pte_t pte)
|
||||
{
|
||||
set_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
}
|
||||
|
@ -71,6 +71,7 @@ WORKAROUND_2038923
|
||||
WORKAROUND_2064142
|
||||
WORKAROUND_2077057
|
||||
WORKAROUND_2457168
|
||||
WORKAROUND_2645198
|
||||
WORKAROUND_2658417
|
||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
WORKAROUND_TSB_FLUSH_FAILURE
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <asm/elf.h>
|
||||
|
||||
|
||||
Elf64_Half elf_core_extra_phdrs(void)
|
||||
Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm)
|
||||
{
|
||||
return GATE_EHDR->e_phnum;
|
||||
}
|
||||
@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_t elf_core_extra_data_size(void)
|
||||
size_t elf_core_extra_data_size(struct coredump_params *cprm)
|
||||
{
|
||||
const struct elf_phdr *const gate_phdrs =
|
||||
(const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
|
||||
|
@ -210,6 +210,10 @@ ld_version()
|
||||
gsub(".*version ", "");
|
||||
gsub("-.*", "");
|
||||
split($1,a, ".");
|
||||
if( length(a[3]) == "8" )
|
||||
# a[3] is probably a date of format yyyymmdd used for release snapshots. We
|
||||
# can assume it to be zero as it does not signify a new version as such.
|
||||
a[3] = 0;
|
||||
print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
|
||||
exit
|
||||
}'
|
||||
|
@ -137,7 +137,7 @@ struct imc_pmu {
|
||||
* are inited.
|
||||
*/
|
||||
struct imc_pmu_ref {
|
||||
struct mutex lock;
|
||||
spinlock_t lock;
|
||||
unsigned int id;
|
||||
int refc;
|
||||
};
|
||||
|
@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
|
||||
|
||||
void hpt_clear_stress(void);
|
||||
static struct timer_list stress_hpt_timer;
|
||||
void stress_hpt_timer_fn(struct timer_list *timer)
|
||||
static void stress_hpt_timer_fn(struct timer_list *timer)
|
||||
{
|
||||
int next_cpu;
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/smp.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/* Nest IMC data structures and variables */
|
||||
|
||||
@ -21,7 +22,7 @@
|
||||
* Used to avoid races in counting the nest-pmu units during hotplug
|
||||
* register and unregister
|
||||
*/
|
||||
static DEFINE_MUTEX(nest_init_lock);
|
||||
static DEFINE_SPINLOCK(nest_init_lock);
|
||||
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
|
||||
static struct imc_pmu **per_nest_pmu_arr;
|
||||
static cpumask_t nest_imc_cpumask;
|
||||
@ -50,7 +51,7 @@ static int trace_imc_mem_size;
|
||||
* core and trace-imc
|
||||
*/
|
||||
static struct imc_pmu_ref imc_global_refc = {
|
||||
.lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
|
||||
.lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
|
||||
.id = 0,
|
||||
.refc = 0,
|
||||
};
|
||||
@ -400,7 +401,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
|
||||
get_hard_smp_processor_id(cpu));
|
||||
/*
|
||||
* If this is the last cpu in this chip then, skip the reference
|
||||
* count mutex lock and make the reference count on this chip zero.
|
||||
* count lock and make the reference count on this chip zero.
|
||||
*/
|
||||
ref = get_nest_pmu_ref(cpu);
|
||||
if (!ref)
|
||||
@ -462,15 +463,15 @@ static void nest_imc_counters_release(struct perf_event *event)
|
||||
/*
|
||||
* See if we need to disable the nest PMU.
|
||||
* If no events are currently in use, then we have to take a
|
||||
* mutex to ensure that we don't race with another task doing
|
||||
* lock to ensure that we don't race with another task doing
|
||||
* enable or disable the nest counters.
|
||||
*/
|
||||
ref = get_nest_pmu_ref(event->cpu);
|
||||
if (!ref)
|
||||
return;
|
||||
|
||||
/* Take the mutex lock for this node and then decrement the reference count */
|
||||
mutex_lock(&ref->lock);
|
||||
/* Take the lock for this node and then decrement the reference count */
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
/*
|
||||
* The scenario where this is true is, when perf session is
|
||||
@ -482,7 +483,7 @@ static void nest_imc_counters_release(struct perf_event *event)
|
||||
* an OPAL call to disable the engine in that node.
|
||||
*
|
||||
*/
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return;
|
||||
}
|
||||
ref->refc--;
|
||||
@ -490,7 +491,7 @@ static void nest_imc_counters_release(struct perf_event *event)
|
||||
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
|
||||
return;
|
||||
}
|
||||
@ -498,7 +499,7 @@ static void nest_imc_counters_release(struct perf_event *event)
|
||||
WARN(1, "nest-imc: Invalid event reference count\n");
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
}
|
||||
|
||||
static int nest_imc_event_init(struct perf_event *event)
|
||||
@ -557,26 +558,25 @@ static int nest_imc_event_init(struct perf_event *event)
|
||||
|
||||
/*
|
||||
* Get the imc_pmu_ref struct for this node.
|
||||
* Take the mutex lock and then increment the count of nest pmu events
|
||||
* inited.
|
||||
* Take the lock and then increment the count of nest pmu events inited.
|
||||
*/
|
||||
ref = get_nest_pmu_ref(event->cpu);
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("nest-imc: Unable to start the counters for node %d\n",
|
||||
node_id);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
event->destroy = nest_imc_counters_release;
|
||||
return 0;
|
||||
@ -612,9 +612,8 @@ static int core_imc_mem_init(int cpu, int size)
|
||||
return -ENOMEM;
|
||||
mem_info->vbase = page_address(page);
|
||||
|
||||
/* Init the mutex */
|
||||
core_imc_refc[core_id].id = core_id;
|
||||
mutex_init(&core_imc_refc[core_id].lock);
|
||||
spin_lock_init(&core_imc_refc[core_id].lock);
|
||||
|
||||
rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
|
||||
__pa((void *)mem_info->vbase),
|
||||
@ -703,9 +702,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
|
||||
perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
|
||||
} else {
|
||||
/*
|
||||
* If this is the last cpu in this core then, skip taking refernce
|
||||
* count mutex lock for this core and directly zero "refc" for
|
||||
* this core.
|
||||
* If this is the last cpu in this core then skip taking reference
|
||||
* count lock for this core and directly zero "refc" for this core.
|
||||
*/
|
||||
opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(cpu));
|
||||
@ -720,11 +718,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
|
||||
* last cpu in this core and core-imc event running
|
||||
* in this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_CORE)
|
||||
imc_global_refc.refc--;
|
||||
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -739,7 +737,7 @@ static int core_imc_pmu_cpumask_init(void)
|
||||
|
||||
static void reset_global_refc(struct perf_event *event)
|
||||
{
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
imc_global_refc.refc--;
|
||||
|
||||
/*
|
||||
@ -751,7 +749,7 @@ static void reset_global_refc(struct perf_event *event)
|
||||
imc_global_refc.refc = 0;
|
||||
imc_global_refc.id = 0;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
|
||||
static void core_imc_counters_release(struct perf_event *event)
|
||||
@ -764,17 +762,17 @@ static void core_imc_counters_release(struct perf_event *event)
|
||||
/*
|
||||
* See if we need to disable the IMC PMU.
|
||||
* If no events are currently in use, then we have to take a
|
||||
* mutex to ensure that we don't race with another task doing
|
||||
* lock to ensure that we don't race with another task doing
|
||||
* enable or disable the core counters.
|
||||
*/
|
||||
core_id = event->cpu / threads_per_core;
|
||||
|
||||
/* Take the mutex lock and decrement the refernce count for this core */
|
||||
/* Take the lock and decrement the refernce count for this core */
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return;
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
/*
|
||||
* The scenario where this is true is, when perf session is
|
||||
@ -786,7 +784,7 @@ static void core_imc_counters_release(struct perf_event *event)
|
||||
* an OPAL call to disable the engine in that core.
|
||||
*
|
||||
*/
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return;
|
||||
}
|
||||
ref->refc--;
|
||||
@ -794,7 +792,7 @@ static void core_imc_counters_release(struct perf_event *event)
|
||||
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
|
||||
return;
|
||||
}
|
||||
@ -802,7 +800,7 @@ static void core_imc_counters_release(struct perf_event *event)
|
||||
WARN(1, "core-imc: Invalid event reference count\n");
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
reset_global_refc(event);
|
||||
}
|
||||
@ -840,7 +838,6 @@ static int core_imc_event_init(struct perf_event *event)
|
||||
if ((!pcmi->vbase))
|
||||
return -ENODEV;
|
||||
|
||||
/* Get the core_imc mutex for this core */
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
@ -848,22 +845,22 @@ static int core_imc_event_init(struct perf_event *event)
|
||||
/*
|
||||
* Core pmu units are enabled only when it is used.
|
||||
* See if this is triggered for the first time.
|
||||
* If yes, take the mutex lock and enable the core counters.
|
||||
* If yes, take the lock and enable the core counters.
|
||||
* If not, just increment the count in core_imc_refc struct.
|
||||
*/
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("core-imc: Unable to start the counters for core %d\n",
|
||||
core_id);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
/*
|
||||
* Since the system can run either in accumulation or trace-mode
|
||||
@ -874,7 +871,7 @@ static int core_imc_event_init(struct perf_event *event)
|
||||
* to know whether any other trace/thread imc
|
||||
* events are running.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
|
||||
/*
|
||||
* No other trace/thread imc events are running in
|
||||
@ -883,10 +880,10 @@ static int core_imc_event_init(struct perf_event *event)
|
||||
imc_global_refc.id = IMC_DOMAIN_CORE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
|
||||
event->destroy = core_imc_counters_release;
|
||||
@ -958,10 +955,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
|
||||
/* Reduce the refc if thread-imc event running on this cpu */
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_THREAD)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1001,7 +998,7 @@ static int thread_imc_event_init(struct perf_event *event)
|
||||
if (!target)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
/*
|
||||
* Check if any other trace/core imc events are running in the
|
||||
* system, if not set the global id to thread-imc.
|
||||
@ -1010,10 +1007,10 @@ static int thread_imc_event_init(struct perf_event *event)
|
||||
imc_global_refc.id = IMC_DOMAIN_THREAD;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->pmu->task_ctx_nr = perf_sw_context;
|
||||
event->destroy = reset_global_refc;
|
||||
@ -1135,25 +1132,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
|
||||
/*
|
||||
* imc pmus are enabled only when it is used.
|
||||
* See if this is triggered for the first time.
|
||||
* If yes, take the mutex lock and enable the counters.
|
||||
* If yes, take the lock and enable the counters.
|
||||
* If not, just increment the count in ref count struct.
|
||||
*/
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("thread-imc: Unable to start the counter\
|
||||
for core %d\n", core_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1170,12 +1167,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("thread-imc: Unable to stop the counters\
|
||||
for core %d\n", core_id);
|
||||
return;
|
||||
@ -1183,7 +1180,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
||||
} else if (ref->refc < 0) {
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
@ -1224,9 +1221,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
|
||||
}
|
||||
}
|
||||
|
||||
/* Init the mutex, if not already */
|
||||
trace_imc_refc[core_id].id = core_id;
|
||||
mutex_init(&trace_imc_refc[core_id].lock);
|
||||
spin_lock_init(&trace_imc_refc[core_id].lock);
|
||||
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
return 0;
|
||||
@ -1246,10 +1242,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
|
||||
* Reduce the refc if any trace-imc event running
|
||||
* on this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_TRACE)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1371,17 +1367,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
|
||||
}
|
||||
|
||||
mtspr(SPRN_LDBAR, ldbar_value);
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1414,19 +1410,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
|
||||
return;
|
||||
}
|
||||
} else if (ref->refc < 0) {
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
trace_imc_event_stop(event, flags);
|
||||
}
|
||||
@ -1448,7 +1444,7 @@ static int trace_imc_event_init(struct perf_event *event)
|
||||
* no other thread is running any core/thread imc
|
||||
* events
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
|
||||
/*
|
||||
* No core/thread imc events are running in the
|
||||
@ -1457,10 +1453,10 @@ static int trace_imc_event_init(struct perf_event *event)
|
||||
imc_global_refc.id = IMC_DOMAIN_TRACE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.idx = -1;
|
||||
|
||||
@ -1533,10 +1529,10 @@ static int init_nest_pmu_ref(void)
|
||||
i = 0;
|
||||
for_each_node(nid) {
|
||||
/*
|
||||
* Mutex lock to avoid races while tracking the number of
|
||||
* Take the lock to avoid races while tracking the number of
|
||||
* sessions using the chip's nest pmu units.
|
||||
*/
|
||||
mutex_init(&nest_imc_refc[i].lock);
|
||||
spin_lock_init(&nest_imc_refc[i].lock);
|
||||
|
||||
/*
|
||||
* Loop to init the "id" with the node_id. Variable "i" initialized to
|
||||
@ -1633,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
|
||||
static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
||||
{
|
||||
if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
|
||||
mutex_lock(&nest_init_lock);
|
||||
spin_lock(&nest_init_lock);
|
||||
if (nest_pmus == 1) {
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
|
||||
kfree(nest_imc_refc);
|
||||
@ -1643,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
||||
|
||||
if (nest_pmus > 0)
|
||||
nest_pmus--;
|
||||
mutex_unlock(&nest_init_lock);
|
||||
spin_unlock(&nest_init_lock);
|
||||
}
|
||||
|
||||
/* Free core_imc memory */
|
||||
@ -1800,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
||||
* rest. To handle the cpuhotplug callback unregister, we track
|
||||
* the number of nest pmus in "nest_pmus".
|
||||
*/
|
||||
mutex_lock(&nest_init_lock);
|
||||
spin_lock(&nest_init_lock);
|
||||
if (nest_pmus == 0) {
|
||||
ret = init_nest_pmu_ref();
|
||||
if (ret) {
|
||||
mutex_unlock(&nest_init_lock);
|
||||
spin_unlock(&nest_init_lock);
|
||||
kfree(per_nest_pmu_arr);
|
||||
per_nest_pmu_arr = NULL;
|
||||
goto err_free_mem;
|
||||
@ -1812,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
||||
/* Register for cpu hotplug notification. */
|
||||
ret = nest_pmu_cpumask_init();
|
||||
if (ret) {
|
||||
mutex_unlock(&nest_init_lock);
|
||||
spin_unlock(&nest_init_lock);
|
||||
kfree(nest_imc_refc);
|
||||
kfree(per_nest_pmu_arr);
|
||||
per_nest_pmu_arr = NULL;
|
||||
@ -1820,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
||||
}
|
||||
}
|
||||
nest_pmus++;
|
||||
mutex_unlock(&nest_init_lock);
|
||||
spin_unlock(&nest_init_lock);
|
||||
break;
|
||||
case IMC_DOMAIN_CORE:
|
||||
ret = core_imc_pmu_cpumask_init();
|
||||
|
@ -23,9 +23,9 @@
|
||||
#define memmove memmove
|
||||
#define memzero(s, n) memset((s), 0, (n))
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
#if defined(CONFIG_KERNEL_BZIP2)
|
||||
#define BOOT_HEAP_SIZE 0x400000
|
||||
#elif CONFIG_KERNEL_ZSTD
|
||||
#elif defined(CONFIG_KERNEL_ZSTD)
|
||||
#define BOOT_HEAP_SIZE 0x30000
|
||||
#else
|
||||
#define BOOT_HEAP_SIZE 0x10000
|
||||
|
@ -190,7 +190,6 @@ CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
CONFIG_NFT_NAT=m
|
||||
CONFIG_NFT_OBJREF=m
|
||||
CONFIG_NFT_REJECT=m
|
||||
CONFIG_NFT_COMPAT=m
|
||||
CONFIG_NFT_HASH=m
|
||||
@ -569,6 +568,7 @@ CONFIG_INPUT_EVDEV=y
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
# CONFIG_SERIO is not set
|
||||
CONFIG_LEGACY_PTY_COUNT=0
|
||||
# CONFIG_LEGACY_TIOCSTI is not set
|
||||
CONFIG_VIRTIO_CONSOLE=m
|
||||
CONFIG_HW_RANDOM_VIRTIO=m
|
||||
CONFIG_HANGCHECK_TIMER=m
|
||||
@ -660,6 +660,7 @@ CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
CONFIG_CRAMFS=m
|
||||
CONFIG_SQUASHFS=m
|
||||
CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
|
||||
CONFIG_SQUASHFS_XATTR=y
|
||||
CONFIG_SQUASHFS_LZ4=y
|
||||
CONFIG_SQUASHFS_LZO=y
|
||||
@ -705,6 +706,7 @@ CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
|
||||
CONFIG_SECURITY_LANDLOCK=y
|
||||
CONFIG_INTEGRITY_SIGNATURE=y
|
||||
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
|
||||
CONFIG_INTEGRITY_PLATFORM_KEYRING=y
|
||||
CONFIG_IMA=y
|
||||
CONFIG_IMA_DEFAULT_HASH_SHA256=y
|
||||
CONFIG_IMA_WRITE_POLICY=y
|
||||
@ -781,6 +783,7 @@ CONFIG_ZCRYPT=m
|
||||
CONFIG_PKEY=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=m
|
||||
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
|
||||
CONFIG_CORDIC=m
|
||||
CONFIG_CRYPTO_LIB_CURVE25519=m
|
||||
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
|
||||
@ -848,7 +851,6 @@ CONFIG_PREEMPT_TRACER=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_BPF_KPROBE_OVERRIDE=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_FTRACE_STARTUP_TEST=y
|
||||
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
|
||||
@ -870,7 +872,6 @@ CONFIG_FAIL_MAKE_REQUEST=y
|
||||
CONFIG_FAIL_IO_TIMEOUT=y
|
||||
CONFIG_FAIL_FUTEX=y
|
||||
CONFIG_FAULT_INJECTION_DEBUG_FS=y
|
||||
CONFIG_FAIL_FUNCTION=y
|
||||
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_TEST_MIN_HEAP=y
|
||||
|
@ -181,7 +181,6 @@ CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
CONFIG_NFT_NAT=m
|
||||
CONFIG_NFT_OBJREF=m
|
||||
CONFIG_NFT_REJECT=m
|
||||
CONFIG_NFT_COMPAT=m
|
||||
CONFIG_NFT_HASH=m
|
||||
@ -559,6 +558,7 @@ CONFIG_INPUT_EVDEV=y
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
# CONFIG_SERIO is not set
|
||||
CONFIG_LEGACY_PTY_COUNT=0
|
||||
# CONFIG_LEGACY_TIOCSTI is not set
|
||||
CONFIG_VIRTIO_CONSOLE=m
|
||||
CONFIG_HW_RANDOM_VIRTIO=m
|
||||
CONFIG_HANGCHECK_TIMER=m
|
||||
@ -645,6 +645,7 @@ CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
CONFIG_CRAMFS=m
|
||||
CONFIG_SQUASHFS=m
|
||||
CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
|
||||
CONFIG_SQUASHFS_XATTR=y
|
||||
CONFIG_SQUASHFS_LZ4=y
|
||||
CONFIG_SQUASHFS_LZO=y
|
||||
@ -688,6 +689,7 @@ CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
|
||||
CONFIG_SECURITY_LANDLOCK=y
|
||||
CONFIG_INTEGRITY_SIGNATURE=y
|
||||
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
|
||||
CONFIG_INTEGRITY_PLATFORM_KEYRING=y
|
||||
CONFIG_IMA=y
|
||||
CONFIG_IMA_DEFAULT_HASH_SHA256=y
|
||||
CONFIG_IMA_WRITE_POLICY=y
|
||||
@ -766,6 +768,7 @@ CONFIG_ZCRYPT=m
|
||||
CONFIG_PKEY=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=m
|
||||
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
|
||||
CONFIG_CORDIC=m
|
||||
CONFIG_PRIME_NUMBERS=m
|
||||
CONFIG_CRYPTO_LIB_CURVE25519=m
|
||||
@ -798,7 +801,6 @@ CONFIG_STACK_TRACER=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_BPF_KPROBE_OVERRIDE=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_SAMPLES=y
|
||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||
|
@ -13,7 +13,6 @@ CONFIG_TUNE_ZEC12=y
|
||||
# CONFIG_COMPAT is not set
|
||||
CONFIG_NR_CPUS=2
|
||||
CONFIG_HZ_100=y
|
||||
# CONFIG_RELOCATABLE is not set
|
||||
# CONFIG_CHSC_SCH is not set
|
||||
# CONFIG_SCM_BUS is not set
|
||||
CONFIG_CRASH_DUMP=y
|
||||
@ -50,6 +49,7 @@ CONFIG_ZFCP=y
|
||||
# CONFIG_INPUT_KEYBOARD is not set
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
# CONFIG_SERIO is not set
|
||||
# CONFIG_LEGACY_TIOCSTI is not set
|
||||
# CONFIG_HVC_IUCV is not set
|
||||
# CONFIG_HW_RANDOM_S390 is not set
|
||||
# CONFIG_HMC_DRV is not set
|
||||
|
@ -131,19 +131,21 @@ struct hws_combined_entry {
|
||||
struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
|
||||
} __packed;
|
||||
|
||||
struct hws_trailer_entry {
|
||||
union {
|
||||
struct {
|
||||
unsigned int f:1; /* 0 - Block Full Indicator */
|
||||
unsigned int a:1; /* 1 - Alert request control */
|
||||
unsigned int t:1; /* 2 - Timestamp format */
|
||||
unsigned int :29; /* 3 - 31: Reserved */
|
||||
unsigned int bsdes:16; /* 32-47: size of basic SDE */
|
||||
unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
|
||||
};
|
||||
unsigned long long flags; /* 0 - 63: All indicators */
|
||||
union hws_trailer_header {
|
||||
struct {
|
||||
unsigned int f:1; /* 0 - Block Full Indicator */
|
||||
unsigned int a:1; /* 1 - Alert request control */
|
||||
unsigned int t:1; /* 2 - Timestamp format */
|
||||
unsigned int :29; /* 3 - 31: Reserved */
|
||||
unsigned int bsdes:16; /* 32-47: size of basic SDE */
|
||||
unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
|
||||
unsigned long long overflow; /* 64 - Overflow Count */
|
||||
};
|
||||
unsigned long long overflow; /* 64 - sample Overflow count */
|
||||
__uint128_t val;
|
||||
};
|
||||
|
||||
struct hws_trailer_entry {
|
||||
union hws_trailer_header header; /* 0 - 15 Flags + Overflow Count */
|
||||
unsigned char timestamp[16]; /* 16 - 31 timestamp */
|
||||
unsigned long long reserved1; /* 32 -Reserved */
|
||||
unsigned long long reserved2; /* */
|
||||
@ -290,14 +292,11 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
|
||||
return USEC_PER_SEC * qsi->cpu_speed / rate;
|
||||
}
|
||||
|
||||
#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL
|
||||
#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
|
||||
|
||||
/* Return TOD timestamp contained in an trailer entry */
|
||||
static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
|
||||
{
|
||||
/* TOD in STCKE format */
|
||||
if (te->t)
|
||||
if (te->header.t)
|
||||
return *((unsigned long long *) &te->timestamp[1]);
|
||||
|
||||
/* TOD in STCK format */
|
||||
|
@ -4,8 +4,8 @@
|
||||
*
|
||||
* Copyright IBM Corp. 1999, 2020
|
||||
*/
|
||||
#ifndef DEBUG_H
|
||||
#define DEBUG_H
|
||||
#ifndef _ASM_S390_DEBUG_H
|
||||
#define _ASM_S390_DEBUG_H
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
@ -487,4 +487,4 @@ void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
|
||||
|
||||
#endif /* MODULE */
|
||||
|
||||
#endif /* DEBUG_H */
|
||||
#endif /* _ASM_S390_DEBUG_H */
|
||||
|
@ -31,7 +31,7 @@
|
||||
pcp_op_T__ *ptr__; \
|
||||
preempt_disable_notrace(); \
|
||||
ptr__ = raw_cpu_ptr(&(pcp)); \
|
||||
prev__ = *ptr__; \
|
||||
prev__ = READ_ONCE(*ptr__); \
|
||||
do { \
|
||||
old__ = prev__; \
|
||||
new__ = old__ op (val); \
|
||||
|
@ -187,8 +187,6 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
||||
|
||||
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
|
||||
buf.mem = data->memsz;
|
||||
if (image->type == KEXEC_TYPE_CRASH)
|
||||
buf.mem += crashk_res.start;
|
||||
|
||||
ptr = (void *)ipl_cert_list_addr;
|
||||
end = ptr + ipl_cert_list_size;
|
||||
@ -225,6 +223,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
||||
data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
|
||||
*lc_ipl_parmblock_ptr = (__u32)buf.mem;
|
||||
|
||||
if (image->type == KEXEC_TYPE_CRASH)
|
||||
buf.mem += crashk_res.start;
|
||||
|
||||
ret = kexec_add_buffer(&buf);
|
||||
out:
|
||||
return ret;
|
||||
|
@ -163,14 +163,15 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
|
||||
|
||||
static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
|
||||
{
|
||||
unsigned long sdb, *trailer;
|
||||
struct hws_trailer_entry *te;
|
||||
unsigned long sdb;
|
||||
|
||||
/* Allocate and initialize sample-data-block */
|
||||
sdb = get_zeroed_page(gfp_flags);
|
||||
if (!sdb)
|
||||
return -ENOMEM;
|
||||
trailer = trailer_entry_ptr(sdb);
|
||||
*trailer = SDB_TE_ALERT_REQ_MASK;
|
||||
te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
|
||||
te->header.a = 1;
|
||||
|
||||
/* Link SDB into the sample-data-block-table */
|
||||
*sdbt = sdb;
|
||||
@ -1206,7 +1207,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
|
||||
"%s: Found unknown"
|
||||
" sampling data entry: te->f %i"
|
||||
" basic.def %#4x (%p)\n", __func__,
|
||||
te->f, sample->def, sample);
|
||||
te->header.f, sample->def, sample);
|
||||
/* Sample slot is not yet written or other record.
|
||||
*
|
||||
* This condition can occur if the buffer was reused
|
||||
@ -1217,7 +1218,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
|
||||
* that are not full. Stop processing if the first
|
||||
* invalid format was detected.
|
||||
*/
|
||||
if (!te->f)
|
||||
if (!te->header.f)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1227,6 +1228,16 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
|
||||
}
|
||||
}
|
||||
|
||||
static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t new)
|
||||
{
|
||||
asm volatile(
|
||||
" cdsg %[old],%[new],%[ptr]\n"
|
||||
: [old] "+d" (old), [ptr] "+QS" (*ptr)
|
||||
: [new] "d" (new)
|
||||
: "memory", "cc");
|
||||
return old;
|
||||
}
|
||||
|
||||
/* hw_perf_event_update() - Process sampling buffer
|
||||
* @event: The perf event
|
||||
* @flush_all: Flag to also flush partially filled sample-data-blocks
|
||||
@ -1243,10 +1254,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
|
||||
*/
|
||||
static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
||||
{
|
||||
unsigned long long event_overflow, sampl_overflow, num_sdb;
|
||||
union hws_trailer_header old, prev, new;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct hws_trailer_entry *te;
|
||||
unsigned long *sdbt;
|
||||
unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
|
||||
int done;
|
||||
|
||||
/*
|
||||
@ -1266,25 +1278,25 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
||||
te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
|
||||
|
||||
/* Leave loop if no more work to do (block full indicator) */
|
||||
if (!te->f) {
|
||||
if (!te->header.f) {
|
||||
done = 1;
|
||||
if (!flush_all)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Check the sample overflow count */
|
||||
if (te->overflow)
|
||||
if (te->header.overflow)
|
||||
/* Account sample overflows and, if a particular limit
|
||||
* is reached, extend the sampling buffer.
|
||||
* For details, see sfb_account_overflows().
|
||||
*/
|
||||
sampl_overflow += te->overflow;
|
||||
sampl_overflow += te->header.overflow;
|
||||
|
||||
/* Timestamps are valid for full sample-data-blocks only */
|
||||
debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx "
|
||||
"overflow %llu timestamp %#llx\n",
|
||||
__func__, (unsigned long)sdbt, te->overflow,
|
||||
(te->f) ? trailer_timestamp(te) : 0ULL);
|
||||
__func__, (unsigned long)sdbt, te->header.overflow,
|
||||
(te->header.f) ? trailer_timestamp(te) : 0ULL);
|
||||
|
||||
/* Collect all samples from a single sample-data-block and
|
||||
* flag if an (perf) event overflow happened. If so, the PMU
|
||||
@ -1294,12 +1306,16 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
|
||||
num_sdb++;
|
||||
|
||||
/* Reset trailer (using compare-double-and-swap) */
|
||||
/* READ_ONCE() 16 byte header */
|
||||
prev.val = __cdsg(&te->header.val, 0, 0);
|
||||
do {
|
||||
te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
|
||||
te_flags |= SDB_TE_ALERT_REQ_MASK;
|
||||
} while (!cmpxchg_double(&te->flags, &te->overflow,
|
||||
te->flags, te->overflow,
|
||||
te_flags, 0ULL));
|
||||
old.val = prev.val;
|
||||
new.val = prev.val;
|
||||
new.f = 0;
|
||||
new.a = 1;
|
||||
new.overflow = 0;
|
||||
prev.val = __cdsg(&te->header.val, old.val, new.val);
|
||||
} while (prev.val != old.val);
|
||||
|
||||
/* Advance to next sample-data-block */
|
||||
sdbt++;
|
||||
@ -1384,7 +1400,7 @@ static void aux_output_end(struct perf_output_handle *handle)
|
||||
range_scan = AUX_SDB_NUM_ALERT(aux);
|
||||
for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
|
||||
te = aux_sdb_trailer(aux, idx);
|
||||
if (!(te->flags & SDB_TE_BUFFER_FULL_MASK))
|
||||
if (!te->header.f)
|
||||
break;
|
||||
}
|
||||
/* i is num of SDBs which are full */
|
||||
@ -1392,7 +1408,7 @@ static void aux_output_end(struct perf_output_handle *handle)
|
||||
|
||||
/* Remove alert indicators in the buffer */
|
||||
te = aux_sdb_trailer(aux, aux->alert_mark);
|
||||
te->flags &= ~SDB_TE_ALERT_REQ_MASK;
|
||||
te->header.a = 0;
|
||||
|
||||
debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
|
||||
__func__, i, range_scan, aux->head);
|
||||
@ -1437,9 +1453,9 @@ static int aux_output_begin(struct perf_output_handle *handle,
|
||||
idx = aux->empty_mark + 1;
|
||||
for (i = 0; i < range_scan; i++, idx++) {
|
||||
te = aux_sdb_trailer(aux, idx);
|
||||
te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
|
||||
SDB_TE_ALERT_REQ_MASK);
|
||||
te->overflow = 0;
|
||||
te->header.f = 0;
|
||||
te->header.a = 0;
|
||||
te->header.overflow = 0;
|
||||
}
|
||||
/* Save the position of empty SDBs */
|
||||
aux->empty_mark = aux->head + range - 1;
|
||||
@ -1448,7 +1464,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
|
||||
/* Set alert indicator */
|
||||
aux->alert_mark = aux->head + range/2 - 1;
|
||||
te = aux_sdb_trailer(aux, aux->alert_mark);
|
||||
te->flags = te->flags | SDB_TE_ALERT_REQ_MASK;
|
||||
te->header.a = 1;
|
||||
|
||||
/* Reset hardware buffer head */
|
||||
head = AUX_SDB_INDEX(aux, aux->head);
|
||||
@ -1475,14 +1491,17 @@ static int aux_output_begin(struct perf_output_handle *handle,
|
||||
static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
|
||||
unsigned long long *overflow)
|
||||
{
|
||||
unsigned long long orig_overflow, orig_flags, new_flags;
|
||||
union hws_trailer_header old, prev, new;
|
||||
struct hws_trailer_entry *te;
|
||||
|
||||
te = aux_sdb_trailer(aux, alert_index);
|
||||
/* READ_ONCE() 16 byte header */
|
||||
prev.val = __cdsg(&te->header.val, 0, 0);
|
||||
do {
|
||||
orig_flags = te->flags;
|
||||
*overflow = orig_overflow = te->overflow;
|
||||
if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
|
||||
old.val = prev.val;
|
||||
new.val = prev.val;
|
||||
*overflow = old.overflow;
|
||||
if (old.f) {
|
||||
/*
|
||||
* SDB is already set by hardware.
|
||||
* Abort and try to set somewhere
|
||||
@ -1490,10 +1509,10 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK;
|
||||
} while (!cmpxchg_double(&te->flags, &te->overflow,
|
||||
orig_flags, orig_overflow,
|
||||
new_flags, 0ULL));
|
||||
new.a = 1;
|
||||
new.overflow = 0;
|
||||
prev.val = __cdsg(&te->header.val, old.val, new.val);
|
||||
} while (prev.val != old.val);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1522,8 +1541,9 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
|
||||
static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
|
||||
unsigned long long *overflow)
|
||||
{
|
||||
unsigned long long orig_overflow, orig_flags, new_flags;
|
||||
unsigned long i, range_scan, idx, idx_old;
|
||||
union hws_trailer_header old, prev, new;
|
||||
unsigned long long orig_overflow;
|
||||
struct hws_trailer_entry *te;
|
||||
|
||||
debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
|
||||
@ -1554,17 +1574,20 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
|
||||
idx_old = idx = aux->empty_mark + 1;
|
||||
for (i = 0; i < range_scan; i++, idx++) {
|
||||
te = aux_sdb_trailer(aux, idx);
|
||||
/* READ_ONCE() 16 byte header */
|
||||
prev.val = __cdsg(&te->header.val, 0, 0);
|
||||
do {
|
||||
orig_flags = te->flags;
|
||||
orig_overflow = te->overflow;
|
||||
new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK;
|
||||
old.val = prev.val;
|
||||
new.val = prev.val;
|
||||
orig_overflow = old.overflow;
|
||||
new.f = 0;
|
||||
new.overflow = 0;
|
||||
if (idx == aux->alert_mark)
|
||||
new_flags |= SDB_TE_ALERT_REQ_MASK;
|
||||
new.a = 1;
|
||||
else
|
||||
new_flags &= ~SDB_TE_ALERT_REQ_MASK;
|
||||
} while (!cmpxchg_double(&te->flags, &te->overflow,
|
||||
orig_flags, orig_overflow,
|
||||
new_flags, 0ULL));
|
||||
new.a = 0;
|
||||
prev.val = __cdsg(&te->header.val, old.val, new.val);
|
||||
} while (prev.val != old.val);
|
||||
*overflow += orig_overflow;
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,8 @@
|
||||
/* Handle ro_after_init data on our own. */
|
||||
#define RO_AFTER_INIT_DATA
|
||||
|
||||
#define RUNTIME_DISCARD_EXIT
|
||||
|
||||
#define EMITS_PT_NOTE
|
||||
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
@ -79,6 +81,7 @@ SECTIONS
|
||||
_end_amode31_refs = .;
|
||||
}
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_edata = .; /* End of data section */
|
||||
|
||||
/* will be freed after init */
|
||||
@ -193,6 +196,7 @@ SECTIONS
|
||||
|
||||
BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE)
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_end = . ;
|
||||
|
||||
/*
|
||||
|
@ -83,8 +83,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
|
||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
union esca_sigp_ctrl *sigp_ctrl =
|
||||
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
|
||||
union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
|
||||
union esca_sigp_ctrl new_val = {0}, old_val;
|
||||
|
||||
old_val = READ_ONCE(*sigp_ctrl);
|
||||
new_val.scn = src_id;
|
||||
new_val.c = 1;
|
||||
old_val.c = 0;
|
||||
@ -95,8 +96,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
|
||||
struct bsca_block *sca = vcpu->kvm->arch.sca;
|
||||
union bsca_sigp_ctrl *sigp_ctrl =
|
||||
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
|
||||
union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
|
||||
union bsca_sigp_ctrl new_val = {0}, old_val;
|
||||
|
||||
old_val = READ_ONCE(*sigp_ctrl);
|
||||
new_val.scn = src_id;
|
||||
new_val.c = 1;
|
||||
old_val.c = 0;
|
||||
@ -126,16 +128,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
|
||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
union esca_sigp_ctrl *sigp_ctrl =
|
||||
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
|
||||
union esca_sigp_ctrl old = *sigp_ctrl;
|
||||
union esca_sigp_ctrl old;
|
||||
|
||||
old = READ_ONCE(*sigp_ctrl);
|
||||
expect = old.value;
|
||||
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
|
||||
} else {
|
||||
struct bsca_block *sca = vcpu->kvm->arch.sca;
|
||||
union bsca_sigp_ctrl *sigp_ctrl =
|
||||
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
|
||||
union bsca_sigp_ctrl old = *sigp_ctrl;
|
||||
union bsca_sigp_ctrl old;
|
||||
|
||||
old = READ_ONCE(*sigp_ctrl);
|
||||
expect = old.value;
|
||||
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
|
||||
}
|
||||
|
@ -28,7 +28,7 @@
|
||||
#define pmd_ERROR(e) \
|
||||
printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
|
||||
typedef struct {
|
||||
typedef union {
|
||||
struct {
|
||||
unsigned long pmd_low;
|
||||
unsigned long pmd_high;
|
||||
|
@ -32,7 +32,7 @@ intcall:
|
||||
movw %dx, %si
|
||||
movw %sp, %di
|
||||
movw $11, %cx
|
||||
rep; movsd
|
||||
rep; movsl
|
||||
|
||||
/* Pop full state from the stack */
|
||||
popal
|
||||
@ -67,7 +67,7 @@ intcall:
|
||||
jz 4f
|
||||
movw %sp, %si
|
||||
movw $11, %cx
|
||||
rep; movsd
|
||||
rep; movsl
|
||||
4: addw $44, %sp
|
||||
|
||||
/* Restore state and return */
|
||||
|
@ -41,6 +41,7 @@
|
||||
* MSR_CORE_C1_RES: CORE C1 Residency Counter
|
||||
* perf code: 0x00
|
||||
* Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
|
||||
* MTL
|
||||
* Scope: Core (each processor core has a MSR)
|
||||
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
|
||||
* perf code: 0x01
|
||||
@ -51,50 +52,50 @@
|
||||
* perf code: 0x02
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
|
||||
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
|
||||
* TGL,TNT,RKL,ADL,RPL,SPR
|
||||
* TGL,TNT,RKL,ADL,RPL,SPR,MTL
|
||||
* Scope: Core
|
||||
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
|
||||
* perf code: 0x03
|
||||
* Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
|
||||
* ICL,TGL,RKL,ADL,RPL
|
||||
* ICL,TGL,RKL,ADL,RPL,MTL
|
||||
* Scope: Core
|
||||
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
|
||||
* perf code: 0x00
|
||||
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
|
||||
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
|
||||
* RPL,SPR
|
||||
* RPL,SPR,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
|
||||
* perf code: 0x01
|
||||
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
|
||||
* GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
|
||||
* ADL,RPL
|
||||
* ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
|
||||
* perf code: 0x02
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
|
||||
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
|
||||
* TGL,TNT,RKL,ADL,RPL,SPR
|
||||
* TGL,TNT,RKL,ADL,RPL,SPR,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
|
||||
* perf code: 0x03
|
||||
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
|
||||
* KBL,CML,ICL,TGL,RKL,ADL,RPL
|
||||
* KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
|
||||
* perf code: 0x04
|
||||
* Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
|
||||
* ADL,RPL
|
||||
* ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
|
||||
* perf code: 0x05
|
||||
* Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
|
||||
* ADL,RPL
|
||||
* ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
|
||||
* perf code: 0x06
|
||||
* Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
|
||||
* TNT,RKL,ADL,RPL
|
||||
* TNT,RKL,ADL,RPL,MTL
|
||||
* Scope: Package (physical package)
|
||||
*
|
||||
*/
|
||||
@ -686,6 +687,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &adl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &adl_cstates),
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
|
||||
|
@ -1833,6 +1833,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
|
||||
{},
|
||||
};
|
||||
|
@ -69,6 +69,7 @@ static bool test_intel(int idx, void *data)
|
||||
case INTEL_FAM6_BROADWELL_G:
|
||||
case INTEL_FAM6_BROADWELL_X:
|
||||
case INTEL_FAM6_SAPPHIRERAPIDS_X:
|
||||
case INTEL_FAM6_EMERALDRAPIDS_X:
|
||||
|
||||
case INTEL_FAM6_ATOM_SILVERMONT:
|
||||
case INTEL_FAM6_ATOM_SILVERMONT_D:
|
||||
@ -107,6 +108,8 @@ static bool test_intel(int idx, void *data)
|
||||
case INTEL_FAM6_RAPTORLAKE:
|
||||
case INTEL_FAM6_RAPTORLAKE_P:
|
||||
case INTEL_FAM6_RAPTORLAKE_S:
|
||||
case INTEL_FAM6_METEORLAKE:
|
||||
case INTEL_FAM6_METEORLAKE_L:
|
||||
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
|
||||
return true;
|
||||
break;
|
||||
|
@ -1111,6 +1111,7 @@ struct msr_bitmap_range {
|
||||
|
||||
/* Xen emulation context */
|
||||
struct kvm_xen {
|
||||
struct mutex xen_lock;
|
||||
u32 xen_version;
|
||||
bool long_mode;
|
||||
bool runstate_update_flag;
|
||||
|
@ -2364,9 +2364,8 @@ static int mp_irqdomain_create(int ioapic)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
|
||||
(void *)(long)ioapic);
|
||||
|
||||
ip->irqdomain = irq_domain_create_hierarchy(parent, 0, hwirqs, fn, cfg->ops,
|
||||
(void *)(long)ioapic);
|
||||
if (!ip->irqdomain) {
|
||||
/* Release fw handle if it was allocated above */
|
||||
if (!cfg->dev)
|
||||
@ -2374,8 +2373,6 @@ static int mp_irqdomain_create(int ioapic)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ip->irqdomain->parent = parent;
|
||||
|
||||
if (cfg->type == IOAPIC_DOMAIN_LEGACY ||
|
||||
cfg->type == IOAPIC_DOMAIN_STRICT)
|
||||
ioapic_dynirq_base = max(ioapic_dynirq_base,
|
||||
|
@ -146,6 +146,30 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
|
||||
return entry;
|
||||
}
|
||||
|
||||
static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
|
||||
{
|
||||
u64 msr_val;
|
||||
|
||||
/*
|
||||
* As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
|
||||
* with a valid event code for supported resource type and the bits
|
||||
* IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
|
||||
* IA32_QM_CTR.data (bits 61:0) reports the monitored data.
|
||||
* IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
|
||||
* are error bits.
|
||||
*/
|
||||
wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
|
||||
rdmsrl(MSR_IA32_QM_CTR, msr_val);
|
||||
|
||||
if (msr_val & RMID_VAL_ERROR)
|
||||
return -EIO;
|
||||
if (msr_val & RMID_VAL_UNAVAIL)
|
||||
return -EINVAL;
|
||||
|
||||
*val = msr_val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
|
||||
u32 rmid,
|
||||
enum resctrl_event_id eventid)
|
||||
@ -172,8 +196,12 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
|
||||
struct arch_mbm_state *am;
|
||||
|
||||
am = get_arch_mbm_state(hw_dom, rmid, eventid);
|
||||
if (am)
|
||||
if (am) {
|
||||
memset(am, 0, sizeof(*am));
|
||||
|
||||
/* Record any initial, non-zero count value. */
|
||||
__rmid_read(rmid, eventid, &am->prev_msr);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
|
||||
@ -191,25 +219,14 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct arch_mbm_state *am;
|
||||
u64 msr_val, chunks;
|
||||
int ret;
|
||||
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
|
||||
* with a valid event code for supported resource type and the bits
|
||||
* IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
|
||||
* IA32_QM_CTR.data (bits 61:0) reports the monitored data.
|
||||
* IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
|
||||
* are error bits.
|
||||
*/
|
||||
wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
|
||||
rdmsrl(MSR_IA32_QM_CTR, msr_val);
|
||||
|
||||
if (msr_val & RMID_VAL_ERROR)
|
||||
return -EIO;
|
||||
if (msr_val & RMID_VAL_UNAVAIL)
|
||||
return -EINVAL;
|
||||
ret = __rmid_read(rmid, eventid, &msr_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
am = get_arch_mbm_state(hw_dom, rmid, eventid);
|
||||
if (am) {
|
||||
|
@ -580,8 +580,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
|
||||
/*
|
||||
* Ensure the task's closid and rmid are written before determining if
|
||||
* the task is current that will decide if it will be interrupted.
|
||||
* This pairs with the full barrier between the rq->curr update and
|
||||
* resctrl_sched_in() during context switch.
|
||||
*/
|
||||
barrier();
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* By now, the task's closid and rmid are set. If the task is current
|
||||
@ -2401,6 +2403,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
|
||||
WRITE_ONCE(t->closid, to->closid);
|
||||
WRITE_ONCE(t->rmid, to->mon.rmid);
|
||||
|
||||
/*
|
||||
* Order the closid/rmid stores above before the loads
|
||||
* in task_curr(). This pairs with the full barrier
|
||||
* between the rq->curr update and resctrl_sched_in()
|
||||
* during context switch.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* If the task is on a CPU, set the CPU in the mask.
|
||||
* The detection is inaccurate as tasks might move or
|
||||
|
@ -770,15 +770,21 @@ struct kvm_cpuid_array {
|
||||
int nent;
|
||||
};
|
||||
|
||||
static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
|
||||
u32 function, u32 index)
|
||||
static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *entry;
|
||||
|
||||
if (array->nent >= array->maxnent)
|
||||
return NULL;
|
||||
|
||||
entry = &array->entries[array->nent++];
|
||||
return &array->entries[array->nent++];
|
||||
}
|
||||
|
||||
static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
|
||||
u32 function, u32 index)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
|
||||
|
||||
if (!entry)
|
||||
return NULL;
|
||||
|
||||
memset(entry, 0, sizeof(*entry));
|
||||
entry->function = function;
|
||||
@ -956,22 +962,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
entry->edx = edx.full;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Per Intel's SDM, the 0x1f is a superset of 0xb,
|
||||
* thus they can be handled by common code.
|
||||
*/
|
||||
case 0x1f:
|
||||
case 0xb:
|
||||
/*
|
||||
* Populate entries until the level type (ECX[15:8]) of the
|
||||
* previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is
|
||||
* the starting entry, filled by the primary do_host_cpuid().
|
||||
* No topology; a valid topology is indicated by the presence
|
||||
* of subleaf 1.
|
||||
*/
|
||||
for (i = 1; entry->ecx & 0xff00; ++i) {
|
||||
entry = do_host_cpuid(array, function, i);
|
||||
if (!entry)
|
||||
goto out;
|
||||
}
|
||||
entry->eax = entry->ebx = entry->ecx = 0;
|
||||
break;
|
||||
case 0xd: {
|
||||
u64 permitted_xcr0 = kvm_caps.supported_xcr0 & xstate_get_guest_group_perm();
|
||||
@ -1202,6 +1199,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
entry->ebx = entry->ecx = entry->edx = 0;
|
||||
break;
|
||||
case 0x8000001e:
|
||||
/* Do not return host topology information. */
|
||||
entry->eax = entry->ebx = entry->ecx = 0;
|
||||
entry->edx = 0; /* reserved */
|
||||
break;
|
||||
case 0x8000001F:
|
||||
if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
|
||||
|
@ -138,15 +138,13 @@ void recalc_intercepts(struct vcpu_svm *svm)
|
||||
c->intercepts[i] = h->intercepts[i];
|
||||
|
||||
if (g->int_ctl & V_INTR_MASKING_MASK) {
|
||||
/* We only want the cr8 intercept bits of L1 */
|
||||
vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
|
||||
vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
|
||||
|
||||
/*
|
||||
* Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
|
||||
* affect any interrupt we may want to inject; therefore,
|
||||
* interrupt window vmexits are irrelevant to L0.
|
||||
* Once running L2 with HF_VINTR_MASK, EFLAGS.IF and CR8
|
||||
* does not affect any interrupt we may want to inject;
|
||||
* therefore, writes to CR8 are irrelevant to L0, as are
|
||||
* interrupt window vmexits.
|
||||
*/
|
||||
vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
|
||||
vmcb_clr_intercept(c, INTERCEPT_VINTR);
|
||||
}
|
||||
|
||||
|
@ -271,7 +271,15 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
|
||||
* Attempt to obtain the GPC lock on *both* (if there are two)
|
||||
* gfn_to_pfn caches that cover the region.
|
||||
*/
|
||||
read_lock_irqsave(&gpc1->lock, flags);
|
||||
if (atomic) {
|
||||
local_irq_save(flags);
|
||||
if (!read_trylock(&gpc1->lock)) {
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
read_lock_irqsave(&gpc1->lock, flags);
|
||||
}
|
||||
while (!kvm_gpc_check(gpc1, user_len1)) {
|
||||
read_unlock_irqrestore(&gpc1->lock, flags);
|
||||
|
||||
@ -304,9 +312,18 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
|
||||
* The guest's runstate_info is split across two pages and we
|
||||
* need to hold and validate both GPCs simultaneously. We can
|
||||
* declare a lock ordering GPC1 > GPC2 because nothing else
|
||||
* takes them more than one at a time.
|
||||
* takes them more than one at a time. Set a subclass on the
|
||||
* gpc1 lock to make lockdep shut up about it.
|
||||
*/
|
||||
read_lock(&gpc2->lock);
|
||||
lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
|
||||
if (atomic) {
|
||||
if (!read_trylock(&gpc2->lock)) {
|
||||
read_unlock_irqrestore(&gpc1->lock, flags);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
read_lock(&gpc2->lock);
|
||||
}
|
||||
|
||||
if (!kvm_gpc_check(gpc2, user_len2)) {
|
||||
read_unlock(&gpc2->lock);
|
||||
@ -590,26 +607,26 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
|
||||
if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
|
||||
r = -EINVAL;
|
||||
} else {
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
kvm->arch.xen.long_mode = !!data->u.long_mode;
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
r = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case KVM_XEN_ATTR_TYPE_SHARED_INFO:
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
break;
|
||||
|
||||
case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
|
||||
if (data->u.vector && data->u.vector < 0x10)
|
||||
r = -EINVAL;
|
||||
else {
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
kvm->arch.xen.upcall_vector = data->u.vector;
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
r = 0;
|
||||
}
|
||||
break;
|
||||
@ -619,9 +636,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
|
||||
break;
|
||||
|
||||
case KVM_XEN_ATTR_TYPE_XEN_VERSION:
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
kvm->arch.xen.xen_version = data->u.xen_version;
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
@ -630,9 +647,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
|
||||
r = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
r = 0;
|
||||
break;
|
||||
|
||||
@ -647,7 +664,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
|
||||
{
|
||||
int r = -ENOENT;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
switch (data->type) {
|
||||
case KVM_XEN_ATTR_TYPE_LONG_MODE:
|
||||
@ -686,7 +703,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -694,7 +711,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
{
|
||||
int idx, r = -ENOENT;
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
switch (data->type) {
|
||||
@ -922,7 +939,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
}
|
||||
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -930,7 +947,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
{
|
||||
int r = -ENOENT;
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
|
||||
|
||||
switch (data->type) {
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
|
||||
@ -1013,7 +1030,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1106,7 +1123,7 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
|
||||
xhc->blob_size_32 || xhc->blob_size_64))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
|
||||
static_branch_inc(&kvm_xen_enabled.key);
|
||||
@ -1115,7 +1132,7 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
|
||||
|
||||
memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1658,15 +1675,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
|
||||
mm_borrowed = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* For the irqfd workqueue, using the main kvm->lock mutex is
|
||||
* fine since this function is invoked from kvm_set_irq() with
|
||||
* no other lock held, no srcu. In future if it will be called
|
||||
* directly from a vCPU thread (e.g. on hypercall for an IPI)
|
||||
* then it may need to switch to using a leaf-node mutex for
|
||||
* serializing the shared_info mapping.
|
||||
*/
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
/*
|
||||
* It is theoretically possible for the page to be unmapped
|
||||
@ -1695,7 +1704,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
} while(!rc);
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
if (mm_borrowed)
|
||||
kthread_unuse_mm(kvm->mm);
|
||||
@ -1811,7 +1820,7 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
|
||||
int ret;
|
||||
|
||||
/* Protect writes to evtchnfd as well as the idr lookup. */
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
|
||||
|
||||
ret = -ENOENT;
|
||||
@ -1842,7 +1851,7 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
|
||||
}
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1905,10 +1914,10 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
|
||||
evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
|
||||
}
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
|
||||
GFP_KERNEL);
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
if (ret >= 0)
|
||||
return 0;
|
||||
|
||||
@ -1926,9 +1935,9 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
|
||||
{
|
||||
struct evtchnfd *evtchnfd;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
if (!evtchnfd)
|
||||
return -ENOENT;
|
||||
@ -1946,7 +1955,7 @@ static int kvm_xen_eventfd_reset(struct kvm *kvm)
|
||||
int i;
|
||||
int n = 0;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
/*
|
||||
* Because synchronize_srcu() cannot be called inside the
|
||||
@ -1958,7 +1967,7 @@ static int kvm_xen_eventfd_reset(struct kvm *kvm)
|
||||
|
||||
all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
|
||||
if (!all_evtchnfds) {
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -1967,7 +1976,7 @@ static int kvm_xen_eventfd_reset(struct kvm *kvm)
|
||||
all_evtchnfds[n++] = evtchnfd;
|
||||
idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
|
||||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.xen.xen_lock);
|
||||
|
||||
synchronize_srcu(&kvm->srcu);
|
||||
|
||||
@ -2069,6 +2078,7 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_xen_init_vm(struct kvm *kvm)
|
||||
{
|
||||
mutex_init(&kvm->arch.xen.xen_lock);
|
||||
idr_init(&kvm->arch.xen.evtchn_ports);
|
||||
kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <asm/pti.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/memtype.h>
|
||||
#include <asm/paravirt.h>
|
||||
|
||||
/*
|
||||
* We need to define the tracepoints somewhere, and tlb.c
|
||||
@ -804,6 +805,9 @@ void __init poking_init(void)
|
||||
poking_mm = mm_alloc();
|
||||
BUG_ON(!poking_mm);
|
||||
|
||||
/* Xen PV guests need the PGD to be pinned. */
|
||||
paravirt_arch_dup_mmap(NULL, poking_mm);
|
||||
|
||||
/*
|
||||
* Randomize the poking address, but make sure that the following page
|
||||
* will be mapped at the same PMD. We need 2 pages, so find space for 3,
|
||||
|
@ -387,7 +387,8 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
|
||||
u8 mtrr_type, uniform;
|
||||
|
||||
mtrr_type = mtrr_type_lookup(start, end, &uniform);
|
||||
if (mtrr_type != MTRR_TYPE_WRBACK)
|
||||
if (mtrr_type != MTRR_TYPE_WRBACK &&
|
||||
mtrr_type != MTRR_TYPE_INVALID)
|
||||
return _PAGE_CACHE_MODE_UC_MINUS;
|
||||
|
||||
return _PAGE_CACHE_MODE_WB;
|
||||
|
@ -12,6 +12,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bitmap.h>
|
||||
@ -442,17 +443,42 @@ static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
|
||||
return mcfg_res.flags;
|
||||
}
|
||||
|
||||
static bool is_efi_mmio(u64 start, u64 end, enum e820_type not_used)
|
||||
{
|
||||
#ifdef CONFIG_EFI
|
||||
efi_memory_desc_t *md;
|
||||
u64 size, mmio_start, mmio_end;
|
||||
|
||||
for_each_efi_memory_desc(md) {
|
||||
if (md->type == EFI_MEMORY_MAPPED_IO) {
|
||||
size = md->num_pages << EFI_PAGE_SHIFT;
|
||||
mmio_start = md->phys_addr;
|
||||
mmio_end = mmio_start + size;
|
||||
|
||||
/*
|
||||
* N.B. Caller supplies (start, start + size),
|
||||
* so to match, mmio_end is the first address
|
||||
* *past* the EFI_MEMORY_MAPPED_IO area.
|
||||
*/
|
||||
if (mmio_start <= start && end <= mmio_end)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type);
|
||||
|
||||
static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
|
||||
struct pci_mmcfg_region *cfg,
|
||||
struct device *dev, int with_e820)
|
||||
struct device *dev, const char *method)
|
||||
{
|
||||
u64 addr = cfg->res.start;
|
||||
u64 size = resource_size(&cfg->res);
|
||||
u64 old_size = size;
|
||||
int num_buses;
|
||||
char *method = with_e820 ? "E820" : "ACPI motherboard resources";
|
||||
|
||||
while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) {
|
||||
size >>= 1;
|
||||
@ -464,10 +490,10 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
|
||||
return false;
|
||||
|
||||
if (dev)
|
||||
dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
|
||||
dev_info(dev, "MMCONFIG at %pR reserved as %s\n",
|
||||
&cfg->res, method);
|
||||
else
|
||||
pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n",
|
||||
pr_info(PREFIX "MMCONFIG at %pR reserved as %s\n",
|
||||
&cfg->res, method);
|
||||
|
||||
if (old_size != size) {
|
||||
@ -500,7 +526,8 @@ static bool __ref
|
||||
pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
|
||||
{
|
||||
if (!early && !acpi_disabled) {
|
||||
if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
|
||||
if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
|
||||
"ACPI motherboard resource"))
|
||||
return true;
|
||||
|
||||
if (dev)
|
||||
@ -513,6 +540,10 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
|
||||
"MMCONFIG at %pR not reserved in "
|
||||
"ACPI motherboard resources\n",
|
||||
&cfg->res);
|
||||
|
||||
if (is_mmconf_reserved(is_efi_mmio, cfg, dev,
|
||||
"EfiMemoryMappedIO"))
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -527,7 +558,8 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
|
||||
/* Don't try to do this check unless configuration
|
||||
type 1 is available. how about type 2 ?*/
|
||||
if (raw_pci_ops)
|
||||
return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1);
|
||||
return is_mmconf_reserved(e820__mapped_all, cfg, dev,
|
||||
"E820 entry");
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -166,10 +166,9 @@ static struct irq_domain *uv_get_irq_domain(void)
|
||||
if (!fn)
|
||||
goto out;
|
||||
|
||||
uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
|
||||
if (uv_domain)
|
||||
uv_domain->parent = x86_vector_domain;
|
||||
else
|
||||
uv_domain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, fn,
|
||||
&uv_domain_ops, NULL);
|
||||
if (!uv_domain)
|
||||
irq_domain_free_fwnode(fn);
|
||||
out:
|
||||
mutex_unlock(&uv_lock);
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <asm/elf.h>
|
||||
|
||||
|
||||
Elf32_Half elf_core_extra_phdrs(void)
|
||||
Elf32_Half elf_core_extra_phdrs(struct coredump_params *cprm)
|
||||
{
|
||||
return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
|
||||
}
|
||||
@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_t elf_core_extra_data_size(void)
|
||||
size_t elf_core_extra_data_size(struct coredump_params *cprm)
|
||||
{
|
||||
if ( vsyscall_ehdr ) {
|
||||
const struct elfhdr *const ehdrp =
|
||||
|
@ -134,11 +134,6 @@ static inline unsigned p2m_mid_index(unsigned long pfn)
|
||||
return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
|
||||
}
|
||||
|
||||
static inline unsigned p2m_index(unsigned long pfn)
|
||||
{
|
||||
return pfn % P2M_PER_PAGE;
|
||||
}
|
||||
|
||||
static void p2m_top_mfn_init(unsigned long *top)
|
||||
{
|
||||
unsigned i;
|
||||
|
@ -154,11 +154,6 @@ struct thread_struct {
|
||||
unsigned long ra; /* kernel's a0: return address and window call size */
|
||||
unsigned long sp; /* kernel's a1: stack pointer */
|
||||
|
||||
/* struct xtensa_cpuinfo info; */
|
||||
|
||||
unsigned long bad_vaddr; /* last user fault */
|
||||
unsigned long bad_uaddr; /* last kernel fault accessing user space */
|
||||
unsigned long error_code;
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK];
|
||||
struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK];
|
||||
@ -176,10 +171,6 @@ struct thread_struct {
|
||||
{ \
|
||||
ra: 0, \
|
||||
sp: sizeof(init_stack) + (long) &init_stack, \
|
||||
/*info: {0}, */ \
|
||||
bad_vaddr: 0, \
|
||||
bad_uaddr: 0, \
|
||||
error_code: 0, \
|
||||
}
|
||||
|
||||
|
||||
|
@ -362,8 +362,6 @@ static void do_unaligned_user(struct pt_regs *regs)
|
||||
__die_if_kernel("Unhandled unaligned exception in kernel",
|
||||
regs, SIGKILL);
|
||||
|
||||
current->thread.bad_vaddr = regs->excvaddr;
|
||||
current->thread.error_code = -3;
|
||||
pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
|
||||
"(pid = %d, pc = %#010lx)\n",
|
||||
regs->excvaddr, current->comm,
|
||||
|
@ -206,8 +206,6 @@ void do_page_fault(struct pt_regs *regs)
|
||||
bad_area:
|
||||
mmap_read_unlock(mm);
|
||||
if (user_mode(regs)) {
|
||||
current->thread.bad_vaddr = address;
|
||||
current->thread.error_code = is_write;
|
||||
force_sig_fault(SIGSEGV, code, (void *) address);
|
||||
return;
|
||||
}
|
||||
@ -232,7 +230,6 @@ void do_page_fault(struct pt_regs *regs)
|
||||
/* Send a sigbus, regardless of whether we were in kernel
|
||||
* or user mode.
|
||||
*/
|
||||
current->thread.bad_vaddr = address;
|
||||
force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
|
||||
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
@ -252,7 +249,6 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
|
||||
if ((entry = search_exception_tables(regs->pc)) != NULL) {
|
||||
pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
|
||||
current->comm, regs->pc, entry->fixup);
|
||||
current->thread.bad_uaddr = address;
|
||||
regs->pc = entry->fixup;
|
||||
return;
|
||||
}
|
||||
|
@ -283,12 +283,9 @@ static void blk_free_queue(struct request_queue *q)
|
||||
*
|
||||
* Decrements the refcount of the request_queue and free it when the refcount
|
||||
* reaches 0.
|
||||
*
|
||||
* Context: Can sleep.
|
||||
*/
|
||||
void blk_put_queue(struct request_queue *q)
|
||||
{
|
||||
might_sleep();
|
||||
if (refcount_dec_and_test(&q->refs))
|
||||
blk_free_queue(q);
|
||||
}
|
||||
|
@ -75,7 +75,8 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
|
||||
}
|
||||
|
||||
#define FIND_CHILD_MIN_SCORE 1
|
||||
#define FIND_CHILD_MAX_SCORE 2
|
||||
#define FIND_CHILD_MID_SCORE 2
|
||||
#define FIND_CHILD_MAX_SCORE 3
|
||||
|
||||
static int match_any(struct acpi_device *adev, void *not_used)
|
||||
{
|
||||
@ -96,8 +97,17 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
|
||||
if (status == AE_NOT_FOUND)
|
||||
if (status == AE_NOT_FOUND) {
|
||||
/*
|
||||
* Special case: backlight device objects without _STA are
|
||||
* preferred to other objects with the same _ADR value, because
|
||||
* it is more likely that they are actually useful.
|
||||
*/
|
||||
if (adev->pnp.type.backlight)
|
||||
return FIND_CHILD_MID_SCORE;
|
||||
|
||||
return FIND_CHILD_MIN_SCORE;
|
||||
}
|
||||
|
||||
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
|
||||
return -ENODEV;
|
||||
|
@ -432,6 +432,13 @@ static const struct dmi_system_id asus_laptop[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B2402CBA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B2502",
|
||||
.matches = {
|
||||
|
@ -1370,9 +1370,12 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
|
||||
* Some devices don't reliably have _HIDs & _CIDs, so add
|
||||
* synthetic HIDs to make sure drivers can find them.
|
||||
*/
|
||||
if (acpi_is_video_device(handle))
|
||||
if (acpi_is_video_device(handle)) {
|
||||
acpi_add_id(pnp, ACPI_VIDEO_HID);
|
||||
else if (acpi_bay_match(handle))
|
||||
pnp->type.backlight = 1;
|
||||
break;
|
||||
}
|
||||
if (acpi_bay_match(handle))
|
||||
acpi_add_id(pnp, ACPI_BAY_HID);
|
||||
else if (acpi_dock_match(handle))
|
||||
acpi_add_id(pnp, ACPI_DOCK_HID);
|
||||
|
@ -50,6 +50,10 @@ static void acpi_video_parse_cmdline(void)
|
||||
acpi_backlight_cmdline = acpi_backlight_video;
|
||||
if (!strcmp("native", acpi_video_backlight_string))
|
||||
acpi_backlight_cmdline = acpi_backlight_native;
|
||||
if (!strcmp("nvidia_wmi_ec", acpi_video_backlight_string))
|
||||
acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec;
|
||||
if (!strcmp("apple_gmux", acpi_video_backlight_string))
|
||||
acpi_backlight_cmdline = acpi_backlight_apple_gmux;
|
||||
if (!strcmp("none", acpi_video_backlight_string))
|
||||
acpi_backlight_cmdline = acpi_backlight_none;
|
||||
}
|
||||
|
@ -640,6 +640,7 @@ config PATA_CS5530
|
||||
config PATA_CS5535
|
||||
tristate "CS5535 PATA support (Experimental)"
|
||||
depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
|
||||
depends on !UML
|
||||
help
|
||||
This option enables support for the NatSemi/AMD CS5535
|
||||
companion chip used with the Geode processor family.
|
||||
|
@ -524,7 +524,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xen_blkbk_remove(struct xenbus_device *dev)
|
||||
static void xen_blkbk_remove(struct xenbus_device *dev)
|
||||
{
|
||||
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
||||
|
||||
@ -547,8 +547,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
|
||||
/* Put the reference we set in xen_blkif_alloc(). */
|
||||
xen_blkif_put(be->blkif);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
|
||||
|
@ -2467,7 +2467,7 @@ static void blkback_changed(struct xenbus_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static int blkfront_remove(struct xenbus_device *xbdev)
|
||||
static void blkfront_remove(struct xenbus_device *xbdev)
|
||||
{
|
||||
struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
|
||||
|
||||
@ -2488,7 +2488,6 @@ static int blkfront_remove(struct xenbus_device *xbdev)
|
||||
}
|
||||
|
||||
kfree(info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blkfront_is_ready(struct xenbus_device *dev)
|
||||
|
@ -360,14 +360,13 @@ static int tpmfront_probe(struct xenbus_device *dev,
|
||||
return tpm_chip_register(priv->chip);
|
||||
}
|
||||
|
||||
static int tpmfront_remove(struct xenbus_device *dev)
|
||||
static void tpmfront_remove(struct xenbus_device *dev)
|
||||
{
|
||||
struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
|
||||
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
|
||||
tpm_chip_unregister(chip);
|
||||
ring_free(priv);
|
||||
dev_set_drvdata(&chip->dev, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tpmfront_resume(struct xenbus_device *dev)
|
||||
|
@ -307,6 +307,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
|
||||
max_perf = min_perf;
|
||||
|
||||
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
static int amd_get_min_freq(struct amd_cpudata *cpudata)
|
||||
|
@ -280,6 +280,7 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
|
||||
policy->cpuinfo.transition_latency = transition_latency;
|
||||
policy->dvfs_possible_from_any_cpu = true;
|
||||
policy->fast_switch_possible = true;
|
||||
policy->suspend_freq = freq_table[0].frequency;
|
||||
|
||||
if (policy_has_boost_freq(policy)) {
|
||||
ret = cpufreq_enable_boost_support();
|
||||
@ -321,7 +322,6 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
|
||||
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
|
||||
CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV,
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.attr = cpufreq_generic_attr,
|
||||
.get = apple_soc_cpufreq_get_rate,
|
||||
.init = apple_soc_cpufreq_init,
|
||||
.exit = apple_soc_cpufreq_exit,
|
||||
@ -329,6 +329,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
|
||||
.fast_switch = apple_soc_cpufreq_fast_switch,
|
||||
.register_em = cpufreq_register_em_with_opp,
|
||||
.attr = apple_soc_cpufreq_hw_attr,
|
||||
.suspend = cpufreq_generic_suspend,
|
||||
};
|
||||
|
||||
static int __init apple_soc_cpufreq_module_init(void)
|
||||
|
@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
clk = clk_get(cpu_dev, 0);
|
||||
clk = clk_get(cpu_dev, NULL);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(cpu_dev, "Cannot get clock for CPU0\n");
|
||||
return PTR_ERR(clk);
|
||||
|
@ -487,7 +487,8 @@ static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
|
||||
cpu_data = policy->driver_data;
|
||||
perf_caps = &cpu_data->perf_caps;
|
||||
max_cap = arch_scale_cpu_capacity(cpu);
|
||||
min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
|
||||
min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
|
||||
perf_caps->highest_perf);
|
||||
if ((min_cap == 0) || (max_cap < min_cap))
|
||||
return 0;
|
||||
return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
|
||||
@ -519,10 +520,10 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
|
||||
cpu_data = policy->driver_data;
|
||||
perf_caps = &cpu_data->perf_caps;
|
||||
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
|
||||
min_cap = div_u64(max_cap * perf_caps->lowest_perf,
|
||||
perf_caps->highest_perf);
|
||||
|
||||
perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
|
||||
min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
|
||||
perf_caps->highest_perf);
|
||||
perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
|
||||
max_cap);
|
||||
min_step = min_cap / CPPC_EM_CAP_STEP;
|
||||
max_step = max_cap / CPPC_EM_CAP_STEP;
|
||||
|
||||
|
@ -137,6 +137,7 @@ static const struct of_device_id blocklist[] __initconst = {
|
||||
{ .compatible = "nvidia,tegra30", },
|
||||
{ .compatible = "nvidia,tegra124", },
|
||||
{ .compatible = "nvidia,tegra210", },
|
||||
{ .compatible = "nvidia,tegra234", },
|
||||
|
||||
{ .compatible = "qcom,apq8096", },
|
||||
{ .compatible = "qcom,msm8996", },
|
||||
@ -150,6 +151,7 @@ static const struct of_device_id blocklist[] __initconst = {
|
||||
{ .compatible = "qcom,sdm845", },
|
||||
{ .compatible = "qcom,sm6115", },
|
||||
{ .compatible = "qcom,sm6350", },
|
||||
{ .compatible = "qcom,sm6375", },
|
||||
{ .compatible = "qcom,sm8150", },
|
||||
{ .compatible = "qcom,sm8250", },
|
||||
{ .compatible = "qcom,sm8350", },
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user