Linux 6.10-rc6

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmaB0NweHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGkvwH/36UJRk/o6wvXnyH
 E6QjCSWo2226APyWks22NjtC3I/8Iqdvkneuh6wG0qL2sXAB078EMjUq5R81bF8H
 wWFBJwetjYTp8GEyLioMEb2wCH/J3R29dLFC4UYTplafXRGP6//xcpJaKmTxcgdR
 31IzvTPXbApZ7L3k1U6rA2bK9PNKcFCOvZlrNMUCuwMrabymHsDfOUt1DqXyg2xp
 zjqiWYBwlklozmgawSWt/mdEgkWuTcAbg+KyqDVQF59s9aj/OOwZ0j+HACq5V8CM
 quTPIAYL6CC9p7uxa69lGr/sgC0Is/BZLPX7RTZAwCgarGvnX+1HUsjDcaFCtrVg
 O6fPUV8=
 =pgUx
 -----END PGP SIGNATURE-----

Merge v6.10-rc6 into drm-next

The exynos-next pull is based on a newer -rc than drm-next. hence
backmerge first to make sure the unrelated conflicts we accumulated
don't end up randomly in the exynos merge pull, but are separated out.

Conflicts are all benign: Adjacent changes in amdgpu and fbdev-dma
code, and cherry-pick conflict in xe.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Daniel Vetter 2024-07-05 10:35:14 +02:00
commit 86634fa4e6
776 changed files with 9976 additions and 5472 deletions

View File

@ -5,7 +5,6 @@ root = true
[{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
charset = utf-8
end_of_line = lf
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = tab
indent_size = 8
@ -13,7 +12,6 @@ indent_size = 8
[*.{json,py,rs}]
charset = utf-8
end_of_line = lf
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 4
@ -26,7 +24,6 @@ indent_size = 8
[*.yaml]
charset = utf-8
end_of_line = lf
trim_trailing_whitespace = unset
insert_final_newline = true
indent_style = space
indent_size = 2

View File

@ -72,6 +72,8 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com>
André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com>
Andy Adamson <andros@citi.umich.edu>
Andy Shevchenko <andy@kernel.org> <andy@smile.org.ua>
Andy Shevchenko <andy@kernel.org> <ext-andriy.shevchenko@nokia.com>
Anilkumar Kolli <quic_akolli@quicinc.com> <akolli@codeaurora.org>
Anirudh Ghayal <quic_aghayal@quicinc.com> <aghayal@codeaurora.org>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
@ -217,6 +219,7 @@ Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com>
Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com>
Geliang Tang <geliang@kernel.org> <geliangtang@163.com>
Geliang Tang <geliang@kernel.org> <tanggeliang@kylinos.cn>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
@ -605,6 +608,7 @@ Simon Kelley <simon@thekelleys.org.uk>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>

View File

@ -788,25 +788,6 @@
Documentation/networking/netconsole.rst for an
alternative.
<DEVNAME>:<n>.<n>[,options]
Use the specified serial port on the serial core bus.
The addressing uses DEVNAME of the physical serial port
device, followed by the serial core controller instance,
and the serial port instance. The options are the same
as documented for the ttyS addressing above.
The mapping of the serial ports to the tty instances
can be viewed with:
$ ls -d /sys/bus/serial-base/devices/*:*.*/tty/*
/sys/bus/serial-base/devices/00:04:0.0/tty/ttyS0
In the above example, the console can be addressed with
console=00:04:0.0. Note that a console addressed this
way will only get added when the related device driver
is ready. The use of an earlycon parameter in addition to
the console may be desired for console output early on.
uart[8250],io,<addr>[,options]
uart[8250],mmio,<addr>[,options]
uart[8250],mmio16,<addr>[,options]
@ -2192,12 +2173,6 @@
Format: 0 | 1
Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if
it was mlock'ed and not explicitly munlock'ed
afterwards.
Format: 0 | 1
Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON
init_pkru= [X86] Specify the default memory protection keys rights
register contents for all processes. 0x55555554 by
default (disallow access to all but pkey 0). Can

View File

@ -59,8 +59,8 @@ properties:
- 3
dma-channels:
minItems: 1
maxItems: 64
minimum: 1
maximum: 64
clocks:
minItems: 1

View File

@ -77,7 +77,7 @@ required:
- clocks
allOf:
- $ref: i2c-controller.yaml
- $ref: /schemas/i2c/i2c-controller.yaml#
- if:
properties:
compatible:

View File

@ -21,7 +21,7 @@ description: |
google,cros-ec-spi or google,cros-ec-i2c.
allOf:
- $ref: i2c-controller.yaml#
- $ref: /schemas/i2c/i2c-controller.yaml#
properties:
compatible:

View File

@ -139,7 +139,7 @@ allOf:
Voltage output range of the channel as <minimum, maximum>
Required connections:
Rfb1x for: 0 to 2.5 V; 0 to 3V; 0 to 5 V;
Rfb2x for: 0 to 10 V; 2.5 to 7.5V; -5 to 5 V;
Rfb2x for: 0 to 10 V; -2.5 to 7.5V; -5 to 5 V;
oneOf:
- items:
- const: 0

View File

@ -128,7 +128,6 @@ required:
- cell-index
- reg
- fsl,fman-ports
- ptp-timer
dependencies:
pcs-handle-names:

View File

@ -29,7 +29,6 @@ properties:
- qcom,pm7325-gpio
- qcom,pm7550ba-gpio
- qcom,pm8005-gpio
- qcom,pm8008-gpio
- qcom,pm8018-gpio
- qcom,pm8019-gpio
- qcom,pm8038-gpio
@ -126,7 +125,6 @@ allOf:
compatible:
contains:
enum:
- qcom,pm8008-gpio
- qcom,pmi8950-gpio
- qcom,pmr735d-gpio
then:
@ -448,7 +446,6 @@ $defs:
- gpio1-gpio10 for pm7325
- gpio1-gpio8 for pm7550ba
- gpio1-gpio4 for pm8005
- gpio1-gpio2 for pm8008
- gpio1-gpio6 for pm8018
- gpio1-gpio12 for pm8038
- gpio1-gpio40 for pm8058

View File

@ -65,6 +65,7 @@ patternProperties:
description: The hard wired USB devices
type: object
$ref: /schemas/usb/usb-device.yaml
additionalProperties: true
required:
- peer-hub

View File

@ -571,6 +571,7 @@ encoded manner. The codes are the following:
um userfaultfd missing tracking
uw userfaultfd wr-protect tracking
ss shadow stack page
sl sealed
== =======================================
Note that there is no guarantee that every flag and associated mnemonic will

View File

@ -1,5 +1,6 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<!-- Updated to inclusive terminology by Wolfram Sang -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
@ -1120,7 +1121,7 @@
<rect
style="opacity:1;fill:#ffb9b9;fill-opacity:1;stroke:#f00000;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect4424-3-2-9-7"
width="112.5"
width="134.5"
height="113.75008"
x="112.5"
y="471.11221"
@ -1133,15 +1134,15 @@
y="521.46259"
id="text4349"><tspan
sodipodi:role="line"
x="167.5354"
x="178.5354"
y="521.46259"
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
id="tspan1273">I2C</tspan><tspan
sodipodi:role="line"
x="167.5354"
x="178.5354"
y="552.71259"
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
id="tspan1285">Master</tspan></text>
id="tspan1285">Controller</tspan></text>
<rect
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
id="rect4424-3-2-9-7-3-3-5-3"
@ -1171,7 +1172,7 @@
x="318.59131"
y="552.08752"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
id="tspan1287">Slave</tspan></text>
id="tspan1287">Target</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968767;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
d="m 112.49995,677.36223 c 712.50005,0 712.50005,0 712.50005,0"
@ -1233,7 +1234,7 @@
x="468.59131"
y="552.08746"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
id="tspan1287-6">Slave</tspan></text>
id="tspan1287-6">Target</tspan></text>
<rect
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;vector-effect:none;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
id="rect4424-3-2-9-7-3-3-5-3-1"
@ -1258,7 +1259,7 @@
x="618.59131"
y="552.08746"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
id="tspan1287-9">Slave</tspan></text>
id="tspan1287-9">Target</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM)"
d="m 150,583.61221 v 93.75"

Before

Width:  |  Height:  |  Size: 55 KiB

After

Width:  |  Height:  |  Size: 55 KiB

View File

@ -3,29 +3,27 @@ Introduction to I2C and SMBus
=============================
I²C (pronounce: I squared C and written I2C in the kernel documentation) is
a protocol developed by Philips. It is a slow two-wire protocol (variable
speed, up to 400 kHz), with a high speed extension (3.4 MHz). It provides
a protocol developed by Philips. It is a two-wire protocol with variable
speed (typically up to 400 kHz, high speed modes up to 5 MHz). It provides
an inexpensive bus for connecting many types of devices with infrequent or
low bandwidth communications needs. I2C is widely used with embedded
systems. Some systems use variants that don't meet branding requirements,
low bandwidth communications needs. I2C is widely used with embedded
systems. Some systems use variants that don't meet branding requirements,
and so are not advertised as being I2C but come under different names,
e.g. TWI (Two Wire Interface), IIC.
The latest official I2C specification is the `"I2C-bus specification and user
manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_
published by NXP Semiconductors. However, you need to log-in to the site to
access the PDF. An older version of the specification (revision 6) is archived
`here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_.
The latest official I2C specification is the `"I²C-bus specification and user
manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
published by NXP Semiconductors, version 7 as of this writing.
SMBus (System Management Bus) is based on the I2C protocol, and is mostly
a subset of I2C protocols and signaling. Many I2C devices will work on an
a subset of I2C protocols and signaling. Many I2C devices will work on an
SMBus, but some SMBus protocols add semantics beyond what is required to
achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
devices connected through SMBus are RAM modules configured using I2C EEPROMs,
and hardware monitoring chips.
Because the SMBus is mostly a subset of the generalized I2C bus, we can
use its protocols on many I2C systems. However, there are systems that don't
use its protocols on many I2C systems. However, there are systems that don't
meet both SMBus and I2C electrical constraints; and others which can't
implement all the common SMBus protocol semantics or messages.
@ -33,29 +31,52 @@ implement all the common SMBus protocol semantics or messages.
Terminology
===========
Using the terminology from the official documentation, the I2C bus connects
one or more *master* chips and one or more *slave* chips.
The I2C bus connects one or more controller chips and one or more target chips.
.. kernel-figure:: i2c_bus.svg
:alt: Simple I2C bus with one master and 3 slaves
:alt: Simple I2C bus with one controller and 3 targets
Simple I2C bus
A **master** chip is a node that starts communications with slaves. In the
Linux kernel implementation it is called an **adapter** or bus. Adapter
drivers are in the ``drivers/i2c/busses/`` subdirectory.
A **controller** chip is a node that starts communications with targets. In the
Linux kernel implementation it is also called an "adapter" or "bus". Controller
drivers are usually in the ``drivers/i2c/busses/`` subdirectory.
An **algorithm** contains general code that can be used to implement a
whole class of I2C adapters. Each specific adapter driver either depends on
an algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes
its own implementation.
An **algorithm** contains general code that can be used to implement a whole
class of I2C controllers. Each specific controller driver either depends on an
algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes its
own implementation.
A **slave** chip is a node that responds to communications when addressed
by the master. In Linux it is called a **client**. Client drivers are kept
in a directory specific to the feature they provide, for example
``drivers/media/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
A **target** chip is a node that responds to communications when addressed by a
controller. In the Linux kernel implementation it is also called a "client".
While targets are usually separate external chips, Linux can also act as a
target (needs hardware support) and respond to another controller on the bus.
This is then called a **local target**. In contrast, an external chip is called
a **remote target**.
Target drivers are kept in a directory specific to the feature they provide,
for example ``drivers/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
video-related chips.
For the example configuration in figure, you will need a driver for your
I2C adapter, and drivers for your I2C devices (usually one driver for each
device).
For the example configuration in the figure above, you will need one driver for
the I2C controller, and drivers for your I2C targets. Usually one driver for
each target.
Synonyms
--------
As mentioned above, the Linux I2C implementation historically uses the terms
"adapter" for controller and "client" for target. A number of data structures
have these synonyms in their name. So, when discussing implementation details,
you should be aware of these terms as well. The official wording is preferred,
though.
Outdated terminology
--------------------
In earlier I2C specifications, controller was named "master" and target was
named "slave". These terms have been obsoleted with v7 of the specification and
their use is also discouraged by the Linux Kernel Code of Conduct. You may
still find them in references to documentation which has not been updated. The
general attitude, however, is to use the inclusive terms: controller and
target. Work to replace the old terminology in the Linux Kernel is on-going.

View File

@ -128,7 +128,7 @@ executed to make module versioning work.
modules_install
Install the external module(s). The default location is
/lib/modules/<kernel_release>/extra/, but a prefix may
/lib/modules/<kernel_release>/updates/, but a prefix may
be added with INSTALL_MOD_PATH (discussed in section 5).
clean
@ -417,7 +417,7 @@ directory:
And external modules are installed in:
/lib/modules/$(KERNELRELEASE)/extra/
/lib/modules/$(KERNELRELEASE)/updates/
5.1 INSTALL_MOD_PATH
--------------------
@ -438,10 +438,10 @@ And external modules are installed in:
-------------------
External modules are by default installed to a directory under
/lib/modules/$(KERNELRELEASE)/extra/, but you may wish to
/lib/modules/$(KERNELRELEASE)/updates/, but you may wish to
locate modules for a specific functionality in a separate
directory. For this purpose, use INSTALL_MOD_DIR to specify an
alternative name to "extra."::
alternative name to "updates."::
$ make INSTALL_MOD_DIR=gandalf -C $KDIR \
M=$PWD modules_install

View File

@ -1603,7 +1603,7 @@ operations:
attributes:
- header
reply:
attributes: &pse
attributes:
- header
- podl-pse-admin-state
- podl-pse-admin-control
@ -1620,7 +1620,10 @@ operations:
do:
request:
attributes: *pse
attributes:
- header
- podl-pse-admin-control
- c33-pse-admin-control
-
name: rss-get
doc: Get RSS params.

View File

@ -123,8 +123,6 @@ operations:
doc: dump pending nfsd rpc
attribute-set: rpc-status
dump:
pre: nfsd-nl-rpc-status-get-start
post: nfsd-nl-rpc-status-get-done
reply:
attributes:
- xid

View File

@ -32,6 +32,7 @@ Security-related interfaces
seccomp_filter
landlock
lsm
mfd_noexec
spec_ctrl
tee

View File

@ -0,0 +1,86 @@
.. SPDX-License-Identifier: GPL-2.0
==================================
Introduction of non-executable mfd
==================================
:Author:
Daniel Verkamp <dverkamp@chromium.org>
Jeff Xu <jeffxu@chromium.org>
:Contributor:
Aleksa Sarai <cyphar@cyphar.com>
Since Linux introduced the memfd feature, memfds have always had their
execute bit set, and the memfd_create() syscall doesn't allow setting
it differently.
However, in a secure-by-default system, such as ChromeOS, (where all
executables should come from the rootfs, which is protected by verified
boot), this executable nature of memfd opens a door for NoExec bypass
and enables “confused deputy attack”. E.g, in VRP bug [1]: cros_vm
process created a memfd to share the content with an external process,
however the memfd is overwritten and used for executing arbitrary code
and root escalation. [2] lists more VRP of this kind.
On the other hand, executable memfd has its legit use: runc uses memfds
seal and executable feature to copy the contents of the binary then
execute them. For such a system, we need a solution to differentiate runc's
use of executable memfds and an attacker's [3].
To address those above:
- Let memfd_create() set X bit at creation time.
- Let memfd be sealed for modifying X bit when NX is set.
- Add a new pid namespace sysctl: vm.memfd_noexec to help applications in
migrating and enforcing non-executable MFD.
User API
========
``int memfd_create(const char *name, unsigned int flags)``
``MFD_NOEXEC_SEAL``
When MFD_NOEXEC_SEAL bit is set in the ``flags``, memfd is created
with NX. F_SEAL_EXEC is set and the memfd can't be modified to
add X later. MFD_ALLOW_SEALING is also implied.
This is the most common case for the application to use memfd.
``MFD_EXEC``
When MFD_EXEC bit is set in the ``flags``, memfd is created with X.
Note:
``MFD_NOEXEC_SEAL`` implies ``MFD_ALLOW_SEALING``. In case that
an app doesn't want sealing, it can add F_SEAL_SEAL after creation.
Sysctl:
========
``pid namespaced sysctl vm.memfd_noexec``
The new pid namespaced sysctl vm.memfd_noexec has 3 values:
- 0: MEMFD_NOEXEC_SCOPE_EXEC
memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
MFD_EXEC was set.
- 1: MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL
memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
MFD_NOEXEC_SEAL was set.
- 2: MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED
memfd_create() without MFD_NOEXEC_SEAL will be rejected.
The sysctl allows finer control of memfd_create for old software that
doesn't set the executable bit; for example, a container with
vm.memfd_noexec=1 means the old software will create non-executable memfd
by default while new software can create executable memfd by setting
MFD_EXEC.
The value of vm.memfd_noexec is passed to child namespace at creation
time. In addition, the setting is hierarchical, i.e. during memfd_create,
we will search from current ns to root ns and use the most restrictive
setting.
[1] https://crbug.com/1305267
[2] https://bugs.chromium.org/p/chromium/issues/list?q=type%3Dbug-security%20memfd%20escalation&can=1
[3] https://lwn.net/Articles/781013/

View File

@ -62,12 +62,21 @@ shared page with scale and offset values into user space. User
space code performs the same algorithm of reading the TSC and
applying the scale and offset to get the constant 10 MHz clock.
Linux clockevents are based on Hyper-V synthetic timer 0. While
Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
timer 0. Interrupts from stimer0 are recorded on the "HVS" line in
/proc/interrupts. Clockevents based on the virtualized PIT and
local APIC timer also work, but the Hyper-V synthetic timer is
preferred.
Linux clockevents are based on Hyper-V synthetic timer 0 (stimer0).
While Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
timer 0. In older versions of Hyper-V, an interrupt from stimer0
results in a VMBus control message that is demultiplexed by
vmbus_isr() as described in the Documentation/virt/hyperv/vmbus.rst
documentation. In newer versions of Hyper-V, stimer0 interrupts can
be mapped to an architectural interrupt, which is referred to as
"Direct Mode". Linux prefers to use Direct Mode when available. Since
x86/x64 doesn't support per-CPU interrupts, Direct Mode statically
allocates an x86 interrupt vector (HYPERV_STIMER0_VECTOR) across all CPUs
and explicitly codes it to call the stimer0 interrupt handler. Hence
interrupts from stimer0 are recorded on the "HVS" line in /proc/interrupts
rather than being associated with a Linux IRQ. Clockevents based on the
virtualized PIT and local APIC timer also work, but Hyper-V stimer0
is preferred.
The driver for the Hyper-V synthetic system clock and timers is
drivers/clocksource/hyperv_timer.c.

View File

@ -40,7 +40,7 @@ Linux guests communicate with Hyper-V in four different ways:
arm64, these synthetic registers must be accessed using explicit
hypercalls.
* VMbus: VMbus is a higher-level software construct that is built on
* VMBus: VMBus is a higher-level software construct that is built on
the other 3 mechanisms. It is a message passing interface between
the Hyper-V host and the Linux guest. It uses memory that is shared
between Hyper-V and the guest, along with various signaling
@ -54,8 +54,8 @@ x86/x64 architecture only.
.. _Hyper-V Top Level Functional Spec (TLFS): https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/tlfs/tlfs
VMbus is not documented. This documentation provides a high-level
overview of VMbus and how it works, but the details can be discerned
VMBus is not documented. This documentation provides a high-level
overview of VMBus and how it works, but the details can be discerned
only from the code.
Sharing Memory
@ -74,7 +74,7 @@ follows:
physical address space. How Hyper-V is told about the GPA or list
of GPAs varies. In some cases, a single GPA is written to a
synthetic register. In other cases, a GPA or list of GPAs is sent
in a VMbus message.
in a VMBus message.
* Hyper-V translates the GPAs into "real" physical memory addresses,
and creates a virtual mapping that it can use to access the memory.
@ -133,9 +133,9 @@ only the CPUs actually present in the VM, so Linux does not report
any hot-add CPUs.
A Linux guest CPU may be taken offline using the normal Linux
mechanisms, provided no VMbus channel interrupts are assigned to
the CPU. See the section on VMbus Interrupts for more details
on how VMbus channel interrupts can be re-assigned to permit
mechanisms, provided no VMBus channel interrupts are assigned to
the CPU. See the section on VMBus Interrupts for more details
on how VMBus channel interrupts can be re-assigned to permit
taking a CPU offline.
32-bit and 64-bit
@ -169,14 +169,14 @@ and functionality. Hyper-V indicates feature/function availability
via flags in synthetic MSRs that Hyper-V provides to the guest,
and the guest code tests these flags.
VMbus has its own protocol version that is negotiated during the
initial VMbus connection from the guest to Hyper-V. This version
VMBus has its own protocol version that is negotiated during the
initial VMBus connection from the guest to Hyper-V. This version
number is also output to dmesg during boot. This version number
is checked in a few places in the code to determine if specific
functionality is present.
Furthermore, each synthetic device on VMbus also has a protocol
version that is separate from the VMbus protocol version. Device
Furthermore, each synthetic device on VMBus also has a protocol
version that is separate from the VMBus protocol version. Device
drivers for these synthetic devices typically negotiate the device
protocol version, and may test that protocol version to determine
if specific device functionality is present.

View File

@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
VMbus
VMBus
=====
VMbus is a software construct provided by Hyper-V to guest VMs. It
VMBus is a software construct provided by Hyper-V to guest VMs. It
consists of a control path and common facilities used by synthetic
devices that Hyper-V presents to guest VMs. The control path is
used to offer synthetic devices to the guest VM and, in some cases,
@ -12,9 +12,9 @@ and the synthetic device implementation that is part of Hyper-V, and
signaling primitives to allow Hyper-V and the guest to interrupt
each other.
VMbus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
entry in a running Linux guest. The VMbus driver (drivers/hv/vmbus_drv.c)
establishes the VMbus control path with the Hyper-V host, then
VMBus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
entry in a running Linux guest. The VMBus driver (drivers/hv/vmbus_drv.c)
establishes the VMBus control path with the Hyper-V host, then
registers itself as a Linux bus driver. It implements the standard
bus functions for adding and removing devices to/from the bus.
@ -49,9 +49,9 @@ synthetic NIC is referred to as "netvsc" and the Linux driver for
the synthetic SCSI controller is "storvsc". These drivers contain
functions with names like "storvsc_connect_to_vsp".
VMbus channels
VMBus channels
--------------
An instance of a synthetic device uses VMbus channels to communicate
An instance of a synthetic device uses VMBus channels to communicate
between the VSP and the VSC. Channels are bi-directional and used
for passing messages. Most synthetic devices use a single channel,
but the synthetic SCSI controller and synthetic NIC may use multiple
@ -73,7 +73,7 @@ write indices and some control flags, followed by the memory for the
actual ring. The size of the ring is determined by the VSC in the
guest and is specific to each synthetic device. The list of GPAs
making up the ring is communicated to the Hyper-V host over the
VMbus control path as a GPA Descriptor List (GPADL). See function
VMBus control path as a GPA Descriptor List (GPADL). See function
vmbus_establish_gpadl().
Each ring buffer is mapped into contiguous Linux kernel virtual
@ -102,10 +102,10 @@ resources. For Windows Server 2019 and later, this limit is
approximately 1280 Mbytes. For versions prior to Windows Server
2019, the limit is approximately 384 Mbytes.
VMbus messages
--------------
All VMbus messages have a standard header that includes the message
length, the offset of the message payload, some flags, and a
VMBus channel messages
----------------------
All messages sent in a VMBus channel have a standard header that includes
the message length, the offset of the message payload, some flags, and a
transactionID. The portion of the message after the header is
unique to each VSP/VSC pair.
@ -137,7 +137,7 @@ control message contains a list of GPAs that describe the data
buffer. For example, the storvsc driver uses this approach to
specify the data buffers to/from which disk I/O is done.
Three functions exist to send VMbus messages:
Three functions exist to send VMBus channel messages:
1. vmbus_sendpacket(): Control-only messages and messages with
embedded data -- no GPAs
@ -154,20 +154,51 @@ Historically, Linux guests have trusted Hyper-V to send well-formed
and valid messages, and Linux drivers for synthetic devices did not
fully validate messages. With the introduction of processor
technologies that fully encrypt guest memory and that allow the
guest to not trust the hypervisor (AMD SNP-SEV, Intel TDX), trusting
guest to not trust the hypervisor (AMD SEV-SNP, Intel TDX), trusting
the Hyper-V host is no longer a valid assumption. The drivers for
VMbus synthetic devices are being updated to fully validate any
VMBus synthetic devices are being updated to fully validate any
values read from memory that is shared with Hyper-V, which includes
messages from VMbus devices. To facilitate such validation,
messages from VMBus devices. To facilitate such validation,
messages read by the guest from the "in" ring buffer are copied to a
temporary buffer that is not shared with Hyper-V. Validation is
performed in this temporary buffer without the risk of Hyper-V
maliciously modifying the message after it is validated but before
it is used.
VMbus interrupts
Synthetic Interrupt Controller (synic)
--------------------------------------
Hyper-V provides each guest CPU with a synthetic interrupt controller
that is used by VMBus for host-guest communication. While each synic
defines 16 synthetic interrupts (SINT), Linux uses only one of the 16
(VMBUS_MESSAGE_SINT). All interrupts related to communication between
the Hyper-V host and a guest CPU use that SINT.
The SINT is mapped to a single per-CPU architectural interrupt (i.e,
an 8-bit x86/x64 interrupt vector, or an arm64 PPI INTID). Because
each CPU in the guest has a synic and may receive VMBus interrupts,
they are best modeled in Linux as per-CPU interrupts. This model works
well on arm64 where a single per-CPU Linux IRQ is allocated for
VMBUS_MESSAGE_SINT. This IRQ appears in /proc/interrupts as an IRQ labelled
"Hyper-V VMbus". Since x86/x64 lacks support for per-CPU IRQs, an x86
interrupt vector is statically allocated (HYPERVISOR_CALLBACK_VECTOR)
across all CPUs and explicitly coded to call vmbus_isr(). In this case,
there's no Linux IRQ, and the interrupts are visible in aggregate in
/proc/interrupts on the "HYP" line.
The synic provides the means to demultiplex the architectural interrupt into
one or more logical interrupts and route the logical interrupt to the proper
VMBus handler in Linux. This demultiplexing is done by vmbus_isr() and
related functions that access synic data structures.
The synic is not modeled in Linux as an irq chip or irq domain,
and the demultiplexed logical interrupts are not Linux IRQs. As such,
they don't appear in /proc/interrupts or /proc/irq. The CPU
affinity for one of these logical interrupts is controlled via an
entry under /sys/bus/vmbus as described below.
VMBus interrupts
----------------
VMbus provides a mechanism for the guest to interrupt the host when
VMBus provides a mechanism for the guest to interrupt the host when
the guest has queued new messages in a ring buffer. The host
expects that the guest will send an interrupt only when an "out"
ring buffer transitions from empty to non-empty. If the guest sends
@ -176,63 +207,55 @@ unnecessary. If a guest sends an excessive number of unnecessary
interrupts, the host may throttle that guest by suspending its
execution for a few seconds to prevent a denial-of-service attack.
Similarly, the host will interrupt the guest when it sends a new
message on the VMbus control path, or when a VMbus channel "in" ring
buffer transitions from empty to non-empty. Each CPU in the guest
may receive VMbus interrupts, so they are best modeled as per-CPU
interrupts in Linux. This model works well on arm64 where a single
per-CPU IRQ is allocated for VMbus. Since x86/x64 lacks support for
per-CPU IRQs, an x86 interrupt vector is statically allocated (see
HYPERVISOR_CALLBACK_VECTOR) across all CPUs and explicitly coded to
call the VMbus interrupt service routine. These interrupts are
visible in /proc/interrupts on the "HYP" line.
Similarly, the host will interrupt the guest via the synic when
it sends a new message on the VMBus control path, or when a VMBus
channel "in" ring buffer transitions from empty to non-empty due to
the host inserting a new VMBus channel message. The control message stream
and each VMBus channel "in" ring buffer are separate logical interrupts
that are demultiplexed by vmbus_isr(). It demultiplexes by first checking
for channel interrupts by calling vmbus_chan_sched(), which looks at a synic
bitmap to determine which channels have pending interrupts on this CPU.
If multiple channels have pending interrupts for this CPU, they are
processed sequentially. When all channel interrupts have been processed,
vmbus_isr() checks for and processes any messages received on the VMBus
control path.
The guest CPU that a VMbus channel will interrupt is selected by the
The guest CPU that a VMBus channel will interrupt is selected by the
guest when the channel is created, and the host is informed of that
selection. VMbus devices are broadly grouped into two categories:
selection. VMBus devices are broadly grouped into two categories:
1. "Slow" devices that need only one VMbus channel. The devices
1. "Slow" devices that need only one VMBus channel. The devices
(such as keyboard, mouse, heartbeat, and timesync) generate
relatively few interrupts. Their VMbus channels are all
relatively few interrupts. Their VMBus channels are all
assigned to interrupt the VMBUS_CONNECT_CPU, which is always
CPU 0.
2. "High speed" devices that may use multiple VMbus channels for
2. "High speed" devices that may use multiple VMBus channels for
higher parallelism and performance. These devices include the
synthetic SCSI controller and synthetic NIC. Their VMbus
synthetic SCSI controller and synthetic NIC. Their VMBus
channels interrupts are assigned to CPUs that are spread out
among the available CPUs in the VM so that interrupts on
multiple channels can be processed in parallel.
The assignment of VMbus channel interrupts to CPUs is done in the
The assignment of VMBus channel interrupts to CPUs is done in the
function init_vp_index(). This assignment is done outside of the
normal Linux interrupt affinity mechanism, so the interrupts are
neither "unmanaged" nor "managed" interrupts.
The CPU that a VMbus channel will interrupt can be seen in
The CPU that a VMBus channel will interrupt can be seen in
/sys/bus/vmbus/devices/<deviceGUID>/ channels/<channelRelID>/cpu.
When running on later versions of Hyper-V, the CPU can be changed
by writing a new value to this sysfs entry. Because the interrupt
assignment is done outside of the normal Linux affinity mechanism,
there are no entries in /proc/irq corresponding to individual
VMbus channel interrupts.
by writing a new value to this sysfs entry. Because VMBus channel
interrupts are not Linux IRQs, there are no entries in /proc/interrupts
or /proc/irq corresponding to individual VMBus channel interrupts.
An online CPU in a Linux guest may not be taken offline if it has
VMbus channel interrupts assigned to it. Any such channel
VMBus channel interrupts assigned to it. Any such channel
interrupts must first be manually reassigned to another CPU as
described above. When no channel interrupts are assigned to the
CPU, it can be taken offline.
When a guest CPU receives a VMbus interrupt from the host, the
function vmbus_isr() handles the interrupt. It first checks for
channel interrupts by calling vmbus_chan_sched(), which looks at a
bitmap setup by the host to determine which channels have pending
interrupts on this CPU. If multiple channels have pending
interrupts for this CPU, they are processed sequentially. When all
channel interrupts have been processed, vmbus_isr() checks for and
processes any message received on the VMbus control path.
The VMbus channel interrupt handling code is designed to work
The VMBus channel interrupt handling code is designed to work
correctly even if an interrupt is received on a CPU other than the
CPU assigned to the channel. Specifically, the code does not use
CPU-based exclusion for correctness. In normal operation, Hyper-V
@ -242,23 +265,23 @@ when Hyper-V will make the transition. The code must work correctly
even if there is a time lag before Hyper-V starts interrupting the
new CPU. See comments in target_cpu_store().
VMbus device creation/deletion
VMBus device creation/deletion
------------------------------
Hyper-V and the Linux guest have a separate message-passing path
that is used for synthetic device creation and deletion. This
path does not use a VMbus channel. See vmbus_post_msg() and
path does not use a VMBus channel. See vmbus_post_msg() and
vmbus_on_msg_dpc().
The first step is for the guest to connect to the generic
Hyper-V VMbus mechanism. As part of establishing this connection,
the guest and Hyper-V agree on a VMbus protocol version they will
Hyper-V VMBus mechanism. As part of establishing this connection,
the guest and Hyper-V agree on a VMBus protocol version they will
use. This negotiation allows newer Linux kernels to run on older
Hyper-V versions, and vice versa.
The guest then tells Hyper-V to "send offers". Hyper-V sends an
offer message to the guest for each synthetic device that the VM
is configured to have. Each VMbus device type has a fixed GUID
known as the "class ID", and each VMbus device instance is also
is configured to have. Each VMBus device type has a fixed GUID
known as the "class ID", and each VMBus device instance is also
identified by a GUID. The offer message from Hyper-V contains
both GUIDs to uniquely (within the VM) identify the device.
There is one offer message for each device instance, so a VM with
@ -275,7 +298,7 @@ type based on the class ID, and invokes the correct driver to set up
the device. Driver/device matching is performed using the standard
Linux mechanism.
The device driver probe function opens the primary VMbus channel to
The device driver probe function opens the primary VMBus channel to
the corresponding VSP. It allocates guest memory for the channel
ring buffers and shares the ring buffer with the Hyper-V host by
giving the host a list of GPAs for the ring buffer memory. See
@ -285,7 +308,7 @@ Once the ring buffer is set up, the device driver and VSP exchange
setup messages via the primary channel. These messages may include
negotiating the device protocol version to be used between the Linux
VSC and the VSP on the Hyper-V host. The setup messages may also
include creating additional VMbus channels, which are somewhat
include creating additional VMBus channels, which are somewhat
mis-named as "sub-channels" since they are functionally
equivalent to the primary channel once they are created.

View File

@ -1044,7 +1044,7 @@ M: Joerg Roedel <joro@8bytes.org>
R: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
L: iommu@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/amd/
F: include/linux/amd-iommu.h
@ -3980,7 +3980,7 @@ R: Song Liu <song@kernel.org>
R: Yonghong Song <yonghong.song@linux.dev>
R: John Fastabend <john.fastabend@gmail.com>
R: KP Singh <kpsingh@kernel.org>
R: Stanislav Fomichev <sdf@google.com>
R: Stanislav Fomichev <sdf@fomichev.me>
R: Hao Luo <haoluo@google.com>
R: Jiri Olsa <jolsa@kernel.org>
L: bpf@vger.kernel.org
@ -4083,12 +4083,13 @@ F: kernel/bpf/ringbuf.c
BPF [SECURITY & LSM] (Security Audit and Enforcement using BPF)
M: KP Singh <kpsingh@kernel.org>
R: Matt Bobrowski <mattbobrowski@google.com>
M: Matt Bobrowski <mattbobrowski@google.com>
L: bpf@vger.kernel.org
S: Maintained
F: Documentation/bpf/prog_lsm.rst
F: include/linux/bpf_lsm.h
F: kernel/bpf/bpf_lsm.c
F: kernel/trace/bpf_trace.c
F: security/bpf/
BPF [SELFTESTS] (Test Runners & Infrastructure)
@ -5295,7 +5296,7 @@ F: drivers/infiniband/hw/usnic/
CLANG CONTROL FLOW INTEGRITY SUPPORT
M: Sami Tolvanen <samitolvanen@google.com>
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Nathan Chancellor <nathan@kernel.org>
L: llvm@lists.linux.dev
S: Supported
@ -8217,7 +8218,7 @@ F: rust/kernel/net/phy.rs
EXEC & BINFMT API, ELF
R: Eric Biederman <ebiederm@xmission.com>
R: Kees Cook <keescook@chromium.org>
R: Kees Cook <kees@kernel.org>
L: linux-mm@kvack.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
@ -8618,7 +8619,7 @@ S: Maintained
F: drivers/net/ethernet/nvidia/*
FORTIFY_SOURCE
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@ -9108,7 +9109,7 @@ F: include/linux/mfd/gsc.h
F: include/linux/platform_data/gsc_hwmon.h
GCC PLUGINS
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@ -9242,7 +9243,7 @@ S: Maintained
F: drivers/input/touchscreen/resistive-adc-touch.c
GENERIC STRING LIBRARY
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Andy Shevchenko <andy@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
@ -11042,6 +11043,7 @@ F: include/uapi/drm/i915_drm.h
INTEL DRM XE DRIVER (Lunar Lake and newer)
M: Lucas De Marchi <lucas.demarchi@intel.com>
M: Thomas Hellström <thomas.hellstrom@linux.intel.com>
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
L: intel-xe@lists.freedesktop.org
S: Supported
W: https://drm.pages.freedesktop.org/intel-docs/
@ -11162,7 +11164,7 @@ M: David Woodhouse <dwmw2@infradead.org>
M: Lu Baolu <baolu.lu@linux.intel.com>
L: iommu@lists.linux.dev
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/intel/
INTEL IPU3 CSI-2 CIO2 DRIVER
@ -11535,7 +11537,7 @@ IOMMU DMA-API LAYER
M: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/dma-iommu.c
F: drivers/iommu/dma-iommu.h
F: drivers/iommu/iova.c
@ -11547,7 +11549,7 @@ M: Will Deacon <will@kernel.org>
R: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: Documentation/devicetree/bindings/iommu/
F: Documentation/userspace-api/iommu.rst
F: drivers/iommu/
@ -11956,7 +11958,7 @@ F: scripts/package/
F: usr/
KERNEL HARDENING (not covered by other areas)
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Gustavo A. R. Silva <gustavoars@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
@ -12388,7 +12390,6 @@ F: drivers/video/backlight/ktz8866.c
KVM PARAVIRT (KVM/paravirt)
M: Paolo Bonzini <pbonzini@redhat.com>
R: Wanpeng Li <wanpengli@tencent.com>
R: Vitaly Kuznetsov <vkuznets@redhat.com>
L: kvm@vger.kernel.org
S: Supported
@ -12484,7 +12485,7 @@ F: drivers/scsi/53c700*
LEAKING_ADDRESSES
M: Tycho Andersen <tycho@tycho.pizza>
R: Kees Cook <keescook@chromium.org>
R: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@ -12780,7 +12781,7 @@ F: arch/powerpc/platforms/8xx/
F: arch/powerpc/platforms/83xx/
LINUX KERNEL DUMP TEST MODULE (LKDTM)
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
S: Maintained
F: drivers/misc/lkdtm/*
F: tools/testing/selftests/lkdtm/*
@ -12910,7 +12911,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/usb/dvb-usb-v2/lmedm04*
LOADPIN SECURITY MODULE
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/LoadPin.rst
@ -15831,7 +15832,7 @@ F: drivers/nfc/virtual_ncidev.c
F: tools/testing/selftests/nci/
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <trond.myklebust@hammerspace.com>
M: Trond Myklebust <trondmy@kernel.org>
M: Anna Schumaker <anna@kernel.org>
L: linux-nfs@vger.kernel.org
S: Maintained
@ -17538,7 +17539,6 @@ F: include/linux/peci.h
PENSANDO ETHERNET DRIVERS
M: Shannon Nelson <shannon.nelson@amd.com>
M: Brett Creeley <brett.creeley@amd.com>
M: drivers@pensando.io
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
@ -18002,7 +18002,7 @@ F: tools/testing/selftests/proc/
PROC SYSCTL
M: Luis Chamberlain <mcgrof@kernel.org>
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
M: Joel Granados <j.granados@samsung.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
@ -18058,7 +18058,7 @@ F: Documentation/devicetree/bindings/net/pse-pd/
F: drivers/net/pse-pd/
PSTORE FILESYSTEM
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Tony Luck <tony.luck@intel.com>
R: Guilherme G. Piccoli <gpiccoli@igalia.com>
L: linux-hardening@vger.kernel.org
@ -18216,6 +18216,7 @@ QCOM AUDIO (ASoC) DRIVERS
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Banajit Goswami <bgoswami@quicinc.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linux-arm-msm@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/soc/qcom/qcom,apr*
F: Documentation/devicetree/bindings/sound/qcom,*
@ -20064,7 +20065,7 @@ F: drivers/media/cec/platform/seco/seco-cec.c
F: drivers/media/cec/platform/seco/seco-cec.h
SECURE COMPUTING
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Andy Lutomirski <luto@amacapital.net>
R: Will Drewry <wad@chromium.org>
S: Supported
@ -22752,7 +22753,7 @@ M: Jarkko Sakkinen <jarkko@kernel.org>
R: Jason Gunthorpe <jgg@ziepe.ca>
L: linux-integrity@vger.kernel.org
S: Maintained
W: https://gitlab.com/jarkkojs/linux-tpmdd-test
W: https://codeberg.org/jarkko/linux-tpmdd-test
Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
F: Documentation/devicetree/bindings/tpm/
@ -22978,7 +22979,7 @@ F: drivers/block/ublk_drv.c
F: include/uapi/linux/ublk_cmd.h
UBSAN
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Marco Elver <elver@google.com>
R: Andrey Konovalov <andreyknvl@gmail.com>
R: Andrey Ryabinin <ryabinin.a.a@gmail.com>
@ -23980,7 +23981,6 @@ VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
R: Uladzislau Rezki <urezki@gmail.com>
R: Christoph Hellwig <hch@infradead.org>
R: Lorenzo Stoakes <lstoakes@gmail.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
@ -24816,7 +24816,7 @@ F: drivers/net/hamradio/yam*
F: include/linux/yam.h
YAMA SECURITY MODULE
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/Yama.rst

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 10
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -85,7 +85,7 @@ led-user {
};
};
panel {
panel_dpi: panel {
compatible = "sii,43wvf1g";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_display_power>;

View File

@ -10,8 +10,6 @@
/plugin/;
&{/} {
/delete-node/ panel;
hdmi: connector-hdmi {
compatible = "hdmi-connector";
label = "hdmi";
@ -82,6 +80,10 @@ sii9022_out: endpoint {
};
};
&panel_dpi {
status = "disabled";
};
&tve {
status = "disabled";
};

View File

@ -14,6 +14,7 @@
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#ifdef CONFIG_EFI
void efi_init(void);
@ -25,6 +26,18 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, boo
#define arch_efi_call_virt_setup() efi_virtmap_load()
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
#ifdef CONFIG_CPU_TTBR0_PAN
#undef arch_efi_call_virt
#define arch_efi_call_virt(p, f, args...) ({ \
unsigned int flags = uaccess_save_and_enable(); \
efi_status_t res = _Generic((p)->f(args), \
efi_status_t: (p)->f(args), \
default: ((p)->f(args), EFI_ABORTED)); \
uaccess_restore(flags); \
res; \
})
#endif
#define ARCH_EFI_IRQ_FLAGS_MASK \
(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
PSR_T_BIT | MODE_MASK)

View File

@ -232,11 +232,24 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long old;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
err_out:
return;
if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
/* FP points one word below parent's top of stack */
frame_pointer += 4;
/*
* Usually, the stack frames are contiguous in memory but cases
* have been observed where the next stack frame does not live
* at 'frame_pointer + 4' as this code used to assume.
*
* Instead, dereference the field in the stack frame that
* stores the SP of the calling frame: to avoid unbounded
* recursion, this cannot involve any ftrace instrumented
* functions, so use the __get_kernel_nofault() primitive
* directly.
*/
__get_kernel_nofault(&frame_pointer,
(unsigned long *)(frame_pointer - 8),
unsigned long, err_out);
} else {
struct stackframe frame = {
.fp = frame_pointer,

View File

@ -6,6 +6,7 @@
#include <dt-bindings/phy/phy-imx8-pcie.h>
#include <dt-bindings/pwm/pwm.h>
#include "imx8mm.dtsi"
#include "imx8mm-overdrive.dtsi"
/ {
chosen {
@ -935,7 +936,7 @@ pinctrl_gpio8: gpio8grp {
/* Verdin GPIO_9_DSI (pulled-up as active-low) */
pinctrl_gpio_9_dsi: gpio9dsigrp {
fsl,pins =
<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x146>; /* SODIMM 17 */
<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x1c6>; /* SODIMM 17 */
};
/* Verdin GPIO_10_DSI (pulled-up as active-low) */

View File

@ -254,7 +254,7 @@ tc_bridge: bridge@f {
<&clk IMX8MP_CLK_CLKOUT2>,
<&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-rates = <13000000>, <13000000>, <156000000>;
assigned-clock-rates = <13000000>, <13000000>, <208000000>;
reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
status = "disabled";

View File

@ -219,7 +219,7 @@ &uart3 {
bluetooth {
compatible = "brcm,bcm4330-bt";
shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
};
};

View File

@ -36,7 +36,7 @@ reg_usdhc2_vmmc: usdhc2-vmmc {
regulator-name = "SD1_SPWR";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
enable-active-high;
};

View File

@ -296,7 +296,6 @@ &usdhc2 {
vmmc-supply = <&reg_usdhc2_vmmc>;
bus-width = <4>;
status = "okay";
no-sdio;
no-mmc;
};

View File

@ -170,6 +170,7 @@
#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
#ifdef CONFIG_ARM64_PA_BITS_52

View File

@ -840,7 +840,7 @@ __SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
__SYSCALL(__NR_io_pgetevents_time64, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418

View File

@ -9,6 +9,7 @@
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/kmemleak.h>
#include <linux/screen_info.h>
#include <linux/vmalloc.h>
@ -213,6 +214,7 @@ l: if (!p) {
return -ENOMEM;
}
kmemleak_not_leak(p);
efi_rt_stack_top = p + THREAD_SIZE;
return 0;
}

View File

@ -173,7 +173,7 @@ static void __init remap_idmap_for_lpa2(void)
* Don't bother with the FDT, we no longer need it after this.
*/
memset(init_idmap_pg_dir, 0,
(u64)init_idmap_pg_dir - (u64)init_idmap_pg_end);
(u64)init_idmap_pg_end - (u64)init_idmap_pg_dir);
create_init_idmap(init_idmap_pg_dir, mask);
dsb(ishst);

View File

@ -53,17 +53,15 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
syscall_set_return_value(current, regs, 0, ret);
/*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
* but not enough for arm64 stack utilization comfort. To keep
* reasonable stack head room, reduce the maximum offset to 9 bits.
* This value will get limited by KSTACK_OFFSET_MAX(), which is 10
* bits. The actual entropy will be further reduced by the compiler
* when applying stack alignment constraints: the AAPCS mandates a
* 16-byte aligned SP at function boundaries, which will remove the
* 4 low bits from any entropy chosen here.
*
* The actual entropy will be further reduced by the compiler when
* applying stack alignment constraints: the AAPCS mandates a
* 16-byte (i.e. 4-bit) aligned SP at function boundaries.
*
* The resulting 5 bits of entropy is seen in SP[8:4].
* The resulting 6 bits of entropy is seen in SP[9:4].
*/
choose_random_kstack_offset(get_random_u16() & 0x1FF);
choose_random_kstack_offset(get_random_u16());
}
static inline bool has_syscall_work(unsigned long flags)

View File

@ -177,6 +177,14 @@ static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
res);
}
static void ffa_rx_release(struct arm_smccc_res *res)
{
arm_smccc_1_1_smc(FFA_RX_RELEASE,
0, 0,
0, 0, 0, 0, 0,
res);
}
static void do_ffa_rxtx_map(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
@ -543,16 +551,19 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
if (WARN_ON(offset > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
ret = FFA_RET_ABORTED;
ffa_rx_release(res);
goto out_unlock;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
ffa_rx_release(res);
goto out_unlock;
}
buf = ffa_desc_buf.buf;
memcpy(buf, hyp_buffers.rx, fraglen);
ffa_rx_release(res);
for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
@ -563,6 +574,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
fraglen = res->a3;
memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
ffa_rx_release(res);
}
ffa_mem_reclaim(res, handle_lo, handle_hi, flags);

View File

@ -391,7 +391,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
vgic_v3_free_redist_region(rdreg);
vgic_v3_free_redist_region(kvm, rdreg);
INIT_LIST_HEAD(&dist->rd_regions);
} else {
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;

View File

@ -919,8 +919,19 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
return ret;
}
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
{
struct kvm_vcpu *vcpu;
unsigned long c;
lockdep_assert_held(&kvm->arch.config_lock);
/* Garbage collect the region */
kvm_for_each_vcpu(c, vcpu, kvm) {
if (vcpu->arch.vgic_cpu.rdreg == rdreg)
vcpu->arch.vgic_cpu.rdreg = NULL;
}
list_del(&rdreg->list);
kfree(rdreg);
}
@ -945,7 +956,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
vgic_v3_free_redist_region(rdreg);
vgic_v3_free_redist_region(kvm, rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}

View File

@ -316,7 +316,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
u32 index);
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);

View File

@ -124,7 +124,8 @@ bool pgattr_change_is_safe(u64 old, u64 new)
* The following mapping attributes may be updated in live
* kernel mappings without the need for break-before-make.
*/
pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG |
PTE_SWBITS_MASK;
/* creating or taking down mappings is always safe */
if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))

View File

@ -6,6 +6,7 @@
#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
#define __ARCH_WANT_SYNC_FILE_RANGE2
#include <asm-generic/unistd.h>
#define __NR_set_thread_area (__NR_arch_specific_syscall + 0)

View File

@ -20,7 +20,7 @@ SYSCALL_DEFINE6(mmap2,
unsigned long, prot,
unsigned long, flags,
unsigned long, fd,
off_t, offset)
unsigned long, offset)
{
if (unlikely(offset & (~PAGE_MASK >> 12)))
return -EINVAL;

View File

@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm-generic/syscalls.h>
asmlinkage long sys_hexagon_fadvise64_64(int fd, int advice,
u32 a2, u32 a3, u32 a4, u32 a5);

View File

@ -36,5 +36,6 @@
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_TIME32_SYSCALLS
#define __ARCH_WANT_SYNC_FILE_RANGE2
#include <asm-generic/unistd.h>

View File

@ -14,6 +14,13 @@
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
SYSCALL_DEFINE6(hexagon_fadvise64_64, int, fd, int, advice,
SC_ARG64(offset), SC_ARG64(len))
{
return ksys_fadvise64_64(fd, SC_VAL64(loff_t, offset), SC_VAL64(loff_t, len), advice);
}
#define sys_fadvise64_64 sys_hexagon_fadvise64_64
void *sys_call_table[__NR_syscalls] = {
#include <asm/unistd.h>
};

View File

@ -143,7 +143,7 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@ -261,6 +261,9 @@ config AS_HAS_EXPLICIT_RELOCS
config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
config AS_HAS_THIN_ADD_SUB
def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)

View File

@ -28,6 +28,7 @@ config UNWINDER_PROLOGUE
config UNWINDER_ORC
bool "ORC unwinder"
depends on HAVE_OBJTOOL
select OBJTOOL
help
This option enables the ORC (Oops Rewind Capability) unwinder for

View File

@ -75,6 +75,8 @@ do { \
#define CSR_MWPC_NUM 0x3f
#define CTRL_PLV_ENABLE 0x1e
#define CTRL_PLV0_ENABLE 0x02
#define CTRL_PLV3_ENABLE 0x10
#define MWPnCFG3_LoadEn 8
#define MWPnCFG3_StoreEn 9
@ -101,7 +103,7 @@ struct perf_event;
struct perf_event_attr;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type, int *offset);
int *gen_len, int *gen_type);
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,

View File

@ -174,11 +174,21 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
static int hw_breakpoint_control(struct perf_event *bp,
enum hw_breakpoint_ops ops)
{
u32 ctrl;
u32 ctrl, privilege;
int i, max_slots, enable;
struct pt_regs *regs;
struct perf_event **slots;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
if (arch_check_bp_in_kernelspace(info))
privilege = CTRL_PLV0_ENABLE;
else
privilege = CTRL_PLV3_ENABLE;
/* Whether bp belongs to a task. */
if (bp->hw.target)
regs = task_pt_regs(bp->hw.target);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
/* Breakpoint */
slots = this_cpu_ptr(bp_on_reg);
@ -197,31 +207,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
switch (ops) {
case HW_BREAKPOINT_INSTALL:
/* Set the FWPnCFG/MWPnCFG 1~4 register. */
write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
} else {
write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
ctrl = encode_ctrl_reg(info->ctrl);
write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
if (bp->hw.target)
regs->csr_prmd |= CSR_PRMD_PWE;
break;
case HW_BREAKPOINT_UNINSTALL:
/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
write_wb_reg(CSR_CFG_MASK, i, 0, 0);
write_wb_reg(CSR_CFG_MASK, i, 1, 0);
write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
write_wb_reg(CSR_CFG_MASK, i, 0, 0);
write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
} else {
write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
write_wb_reg(CSR_CFG_MASK, i, 1, 0);
write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
}
if (bp->hw.target)
regs->csr_prmd &= ~CSR_PRMD_PWE;
break;
}
@ -283,7 +300,7 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type, int *offset)
int *gen_len, int *gen_type)
{
/* Type */
switch (ctrl.type) {
@ -303,11 +320,6 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
return -EINVAL;
}
if (!ctrl.len)
return -EINVAL;
*offset = __ffs(ctrl.len);
/* Len */
switch (ctrl.len) {
case LOONGARCH_BREAKPOINT_LEN_1:
@ -386,21 +398,17 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
struct arch_hw_breakpoint *hw)
{
int ret;
u64 alignment_mask, offset;
u64 alignment_mask;
/* Build the arch_hw_breakpoint. */
ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
alignment_mask = 0x7;
else
if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
alignment_mask = 0x3;
offset = hw->address & alignment_mask;
hw->address &= ~alignment_mask;
hw->ctrl.len <<= offset;
hw->address &= ~alignment_mask;
}
return 0;
}
@ -471,12 +479,15 @@ void breakpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(bp_on_reg);
for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
bp = slots[i];
if (bp == NULL)
continue;
perf_bp_event(bp, regs);
if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
bp = slots[i];
if (bp == NULL)
continue;
perf_bp_event(bp, regs);
csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
update_bp_registers(regs, 0, 0);
}
}
update_bp_registers(regs, 0, 0);
}
NOKPROBE_SYMBOL(breakpoint_handler);
@ -488,12 +499,15 @@ void watchpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
wp = slots[i];
if (wp == NULL)
continue;
perf_bp_event(wp, regs);
if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
wp = slots[i];
if (wp == NULL)
continue;
perf_bp_event(wp, regs);
csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
update_bp_registers(regs, 0, 1);
}
}
update_bp_registers(regs, 0, 1);
}
NOKPROBE_SYMBOL(watchpoint_handler);

View File

@ -494,28 +494,14 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
int err, len, type, offset;
int err, len, type;
err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
err = arch_bp_generic_fields(ctrl, &len, &type);
if (err)
return err;
switch (note_type) {
case NT_LOONGARCH_HW_BREAK:
if ((type & HW_BREAKPOINT_X) != type)
return -EINVAL;
break;
case NT_LOONGARCH_HW_WATCH:
if ((type & HW_BREAKPOINT_RW) != type)
return -EINVAL;
break;
default:
return -EINVAL;
}
attr->bp_len = len;
attr->bp_type = type;
attr->bp_addr += offset;
return 0;
}
@ -609,10 +595,27 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
return PTR_ERR(bp);
attr = bp->attr;
decode_ctrl_reg(uctrl, &ctrl);
err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
if (err)
return err;
switch (note_type) {
case NT_LOONGARCH_HW_BREAK:
ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
break;
case NT_LOONGARCH_HW_WATCH:
decode_ctrl_reg(uctrl, &ctrl);
break;
default:
return -EINVAL;
}
if (uctrl & CTRL_PLV_ENABLE) {
err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
if (err)
return err;
attr.disabled = 0;
} else {
attr.disabled = 1;
}
return modify_user_hw_breakpoint(bp, &attr);
}
@ -643,6 +646,10 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
struct perf_event *bp;
struct perf_event_attr attr;
/* Kernel-space address cannot be monitored by user-space */
if ((unsigned long)addr >= XKPRANGE)
return -EINVAL;
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);

View File

@ -22,7 +22,7 @@
#define __SYSCALL(nr, call) [nr] = (call),
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
prot, unsigned long, flags, unsigned long, fd, off_t, offset)
prot, unsigned long, flags, unsigned long, fd, unsigned long, offset)
{
if (offset & ~PAGE_MASK)
return -EINVAL;

View File

@ -761,7 +761,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
default:
ret = KVM_HCALL_INVALID_CODE;
break;
};
}
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}

View File

@ -35,7 +35,7 @@
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
off_t, pgoff)
unsigned long, pgoff)
{
if (pgoff & ~PAGE_MASK)
return -EINVAL;

View File

@ -110,7 +110,8 @@ static void bcm6358_quirks(void)
* RAC flush causes kernel panics on BCM6358 when booting from TP1
* because the bootloader is not initializing it properly.
*/
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
!!BMIPS_GET_CBR();
}
static void bcm6368_quirks(void)

View File

@ -322,7 +322,7 @@ static inline void ehb(void)
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
_ASM_SET_MFTC0 \
" mftc0 $1, " #rt ", " #sel " \n" \
" mftc0 %0, " #rt ", " #sel " \n" \
_ASM_UNSET_MFTC0 \
" .set pop \n" \
: "=r" (__res)); \

View File

@ -354,7 +354,7 @@
412 n32 utimensat_time64 sys_utimensat
413 n32 pselect6_time64 compat_sys_pselect6_time64
414 n32 ppoll_time64 compat_sys_ppoll_time64
416 n32 io_pgetevents_time64 sys_io_pgetevents
416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64
417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64
418 n32 mq_timedsend_time64 sys_mq_timedsend
419 n32 mq_timedreceive_time64 sys_mq_timedreceive

View File

@ -27,7 +27,7 @@
17 o32 break sys_ni_syscall
# 18 was sys_stat
18 o32 unused18 sys_ni_syscall
19 o32 lseek sys_lseek
19 o32 lseek sys_lseek compat_sys_lseek
20 o32 getpid sys_getpid
21 o32 mount sys_mount
22 o32 umount sys_oldumount
@ -403,7 +403,7 @@
412 o32 utimensat_time64 sys_utimensat sys_utimensat
413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive

View File

@ -112,8 +112,8 @@ static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
* gives them time to settle
*/
if (where == PCI_VENDOR_ID) {
if (ret == 0xffffffff || ret == 0x00000000 ||
ret == 0x0000ffff || ret == 0xffff0000) {
if (*val == 0xffffffff || *val == 0x00000000 ||
*val == 0x0000ffff || *val == 0xffff0000) {
if (delay > 4)
return 0;
delay *= 2;

View File

@ -16,6 +16,7 @@ config PARISC
select ARCH_HAS_UBSAN
select ARCH_HAS_PTE_SPECIAL
select ARCH_NO_SG_CHAIN
select ARCH_SPLIT_ARG64 if !64BIT
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK

View File

@ -31,18 +31,17 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
void flush_kernel_dcache_page_addr(const void *addr);
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
/* The only way to flush a vmap range is to flush whole cache */
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
void flush_cache_vmap(unsigned long start, unsigned long end);
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
void flush_cache_vunmap(unsigned long start, unsigned long end);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
/* defined in pacache.S exported in cache.c used by flush_anon_page */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
#define ARCH_HAS_FLUSH_ON_KUNMAP
static inline void kunmap_flush_on_unmap(const void *addr)
{
flush_kernel_dcache_page_addr(addr);
}
void kunmap_flush_on_unmap(const void *addr);
#endif /* _PARISC_CACHEFLUSH_H */

View File

@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
static inline pte_t ptep_get(pte_t *ptep)
{
return READ_ONCE(*ptep);
}
#define ptep_get ptep_get
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
if (!pte_young(*ptep))
return 0;
pte = *ptep;
pte = ptep_get(ptep);
if (!pte_young(pte)) {
return 0;
}
@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
return 1;
}
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
old_pte = *ptep;
set_pte(ptep, __pte(0));
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
set_pte(ptep, pte_wrprotect(*ptep));
@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME

View File

@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
#include <linux/vmalloc.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@ -31,20 +32,31 @@
#include <asm/mmu_context.h>
#include <asm/cachectl.h>
#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
/*
* When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
* of page flushes done flush_cache_page_if_present. There are some
* pros and cons in using this option. It may increase the risk of
* random segmentation faults.
*/
#define CONFIG_FLUSH_PAGE_ACCESSED 0
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void); /* flushes local code-cache only */
static void flush_kernel_dcache_page_addr(const void *addr);
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We need a spinlock around all TLB flushes to ensure
@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
{
if (!static_branch_likely(&parisc_has_cache))
return;
/*
* The TLB is the engine of coherence on parisc. The CPU is
* entitled to speculate any page with a TLB mapping, so here
* we kill the mapping then flush the page along a special flush
* only alias mapping. This guarantees that the page is no-longer
* in the cache for any process and nor may it be speculatively
* read in (until the user or kernel specifically accesses it,
* of course).
*/
flush_tlb_page(vma, vmaddr);
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
static void flush_kernel_dcache_page_addr(const void *addr)
{
unsigned long flags, space, pgd, prot;
#ifdef CONFIG_TLB_PTLOCK
unsigned long pgd_lock;
#endif
unsigned long vaddr = (unsigned long)addr;
unsigned long flags;
vmaddr &= PAGE_MASK;
/* Purge TLB entry to remove translation on all CPUs */
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
/* Use tmpalias flush to prevent data cache move-in */
preempt_disable();
/* Set context for flush */
local_irq_save(flags);
prot = mfctl(8);
space = mfsp(SR_USER);
pgd = mfctl(25);
#ifdef CONFIG_TLB_PTLOCK
pgd_lock = mfctl(28);
#endif
switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
local_irq_restore(flags);
flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
flush_tlb_page(vma, vmaddr);
/* Restore previous context */
local_irq_save(flags);
#ifdef CONFIG_TLB_PTLOCK
mtctl(pgd_lock, 28);
#endif
mtctl(pgd, 25);
mtsp(space, SR_USER);
mtctl(prot, 8);
local_irq_restore(flags);
flush_dcache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
static void flush_kernel_icache_page_addr(const void *addr)
{
unsigned long vaddr = (unsigned long)addr;
unsigned long flags;
/* Purge TLB entry to remove translation on all CPUs */
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
/* Use tmpalias flush to prevent instruction cache move-in */
preempt_disable();
flush_icache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
void kunmap_flush_on_unmap(const void *addr)
{
flush_kernel_dcache_page_addr(addr);
}
EXPORT_SYMBOL(kunmap_flush_on_unmap);
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr)
{
@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
for (;;) {
flush_kernel_dcache_page_addr(kaddr);
flush_kernel_icache_page(kaddr);
flush_kernel_icache_page_addr(kaddr);
if (--nr == 0)
break;
kaddr += PAGE_SIZE;
}
}
/*
* Walk page directory for MM to find PTEP pointer for address ADDR.
*/
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte)
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
/*
* Return user physical address. Returns 0 if page is not present.
*/
static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags, space, pgd, prot, pa;
#ifdef CONFIG_TLB_PTLOCK
unsigned long pgd_lock;
#endif
/* Save context */
local_irq_save(flags);
prot = mfctl(8);
space = mfsp(SR_USER);
pgd = mfctl(25);
#ifdef CONFIG_TLB_PTLOCK
pgd_lock = mfctl(28);
#endif
/* Set context for lpa_user */
switch_mm_irqs_off(NULL, mm, NULL);
pa = lpa_user(addr);
/* Restore previous context */
#ifdef CONFIG_TLB_PTLOCK
mtctl(pgd_lock, 28);
#endif
mtctl(pgd, 25);
mtsp(space, SR_USER);
mtctl(prot, 8);
local_irq_restore(flags);
return pa;
}
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping = folio_flush_mapping(folio);
@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio)
if (addr + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - addr) / PAGE_SIZE;
if (parisc_requires_coherency()) {
for (i = 0; i < nr; i++) {
pte_t *ptep = get_ptep(vma->vm_mm,
addr + i * PAGE_SIZE);
if (!ptep)
continue;
if (pte_needs_flush(*ptep))
flush_user_cache_page(vma,
addr + i * PAGE_SIZE);
/* Optimise accesses to the same table? */
pte_unmap(ptep);
}
} else {
/*
* The TLB is the engine of coherence on parisc:
* The CPU is entitled to speculate any page
* with a TLB mapping, so here we kill the
* mapping then flush the page along a special
* flush only alias mapping. This guarantees that
* the page is no-longer in the cache for any
* process and nor may it be speculatively read
* in (until the user or kernel specifically
* accesses it, of course)
*/
for (i = 0; i < nr; i++)
flush_tlb_page(vma, addr + i * PAGE_SIZE);
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
!= (addr & (SHM_COLOUR - 1))) {
for (i = 0; i < nr; i++)
__flush_cache_page(vma,
addr + i * PAGE_SIZE,
(pfn + i) * PAGE_SIZE);
/*
* Software is allowed to have any number
* of private mappings to a page.
*/
if (!(vma->vm_flags & VM_SHARED))
continue;
if (old_addr)
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
old_addr, addr, vma->vm_file);
if (nr == folio_nr_pages(folio))
old_addr = addr;
}
for (i = 0; i < nr; i++)
__flush_cache_page(vma,
addr + i * PAGE_SIZE,
(pfn + i) * PAGE_SIZE);
/*
* Software is allowed to have any number
* of private mappings to a page.
*/
if (!(vma->vm_flags & VM_SHARED))
continue;
if (old_addr)
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
old_addr, addr, vma->vm_file);
if (nr == folio_nr_pages(folio))
old_addr = addr;
}
WARN_ON(++count == 4096);
}
@ -591,35 +624,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
void flush_kernel_dcache_page_addr(const void *addr)
{
unsigned long flags;
flush_kernel_dcache_page_asm(addr);
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
static void flush_cache_page_if_present(struct vm_area_struct *vma,
unsigned long vmaddr, unsigned long pfn)
unsigned long vmaddr)
{
#if CONFIG_FLUSH_PAGE_ACCESSED
bool needs_flush = false;
pte_t *ptep;
pte_t *ptep, pte;
/*
* The pte check is racy and sometimes the flush will trigger
* a non-access TLB miss. Hopefully, the page has already been
* flushed.
*/
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pte = ptep_get(ptep);
needs_flush = pte_needs_flush(pte);
pte_unmap(ptep);
}
if (needs_flush)
flush_cache_page(vma, vmaddr, pfn);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
#else
struct mm_struct *mm = vma->vm_mm;
unsigned long physaddr = get_upa(mm, vmaddr);
if (physaddr)
__flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
#endif
}
void copy_user_highpage(struct page *to, struct page *from,
@ -629,7 +655,7 @@ void copy_user_highpage(struct page *to, struct page *from,
kfrom = kmap_local_page(from);
kto = kmap_local_page(to);
flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
__flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
copy_page_asm(kto, kfrom);
kunmap_local(kto);
kunmap_local(kfrom);
@ -638,16 +664,17 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
}
/* __flush_tlb_range()
@ -681,32 +708,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
unsigned long addr, pfn;
pte_t *ptep;
unsigned long addr;
for (addr = start; addr < end; addr += PAGE_SIZE) {
bool needs_flush = false;
/*
* The vma can contain pages that aren't present. Although
* the pte search is expensive, we need the pte to find the
* page pfn and to check whether the page should be flushed.
*/
ptep = get_ptep(vma->vm_mm, addr);
if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pfn = pte_pfn(*ptep);
pte_unmap(ptep);
}
if (needs_flush) {
if (parisc_requires_coherency()) {
flush_user_cache_page(vma, addr);
} else {
if (WARN_ON(!pfn_valid(pfn)))
return;
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
for (addr = start; addr < end; addr += PAGE_SIZE)
flush_cache_page_if_present(vma, addr);
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
@ -757,21 +762,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_range(vma, start, end);
flush_cache_all();
if (vma->vm_flags & VM_EXEC)
flush_cache_all();
else
flush_data_cache();
return;
}
flush_cache_pages(vma, start, end);
flush_cache_pages(vma, start & PAGE_MASK, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
if (WARN_ON(!pfn_valid(pfn)))
return;
if (parisc_requires_coherency())
flush_user_cache_page(vma, vmaddr);
else
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@ -779,34 +782,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
if (!PageAnon(page))
return;
if (parisc_requires_coherency()) {
if (vma->vm_flags & VM_SHARED)
flush_data_cache();
else
flush_user_cache_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
}
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
pte_t pte = ptep_get(ptep);
if (!pte_young(pte))
return 0;
set_pte(ptep, pte_mkold(pte));
#if CONFIG_FLUSH_PAGE_ACCESSED
__flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
#endif
return 1;
}
/*
* After a PTE is cleared, we have no way to flush the cache for
* the physical page. On PA8800 and PA8900 processors, these lines
* can cause random cache corruption. Thus, we must flush the cache
* as well as the TLB when clearing a PTE that's valid.
*/
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
struct mm_struct *mm = (vma)->vm_mm;
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn))
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
else if (pte_accessible(mm, pte))
flush_tlb_page(vma, addr);
return pte;
}
/*
* The physical address for pages in the ioremap case can be obtained
* from the vm_struct struct. I wasn't able to successfully handle the
* vmalloc and vmap cases. We have an array of struct page pointers in
* the uninitialized vmalloc case but the flush failed using page_to_pfn.
*/
void flush_cache_vmap(unsigned long start, unsigned long end)
{
unsigned long addr, physaddr;
struct vm_struct *vm;
/* Prevent cache move-in */
flush_tlb_kernel_range(start, end);
if (end - start >= parisc_cache_flush_threshold) {
flush_cache_all();
return;
}
flush_tlb_page(vma, vmaddr);
preempt_disable();
flush_dcache_page_asm(page_to_phys(page), vmaddr);
preempt_enable();
}
if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
flush_cache_all();
return;
}
vm = find_vm_area((void *)start);
if (WARN_ON_ONCE(!vm)) {
flush_cache_all();
return;
}
/* The physical addresses of IOREMAP regions are contiguous */
if (vm->flags & VM_IOREMAP) {
physaddr = vm->phys_addr;
for (addr = start; addr < end; addr += PAGE_SIZE) {
preempt_disable();
flush_dcache_page_asm(physaddr, start);
flush_icache_page_asm(physaddr, start);
preempt_enable();
physaddr += PAGE_SIZE;
}
return;
}
flush_cache_all();
}
EXPORT_SYMBOL(flush_cache_vmap);
/*
* The vm_struct has been retired and the page table is set up. The
* last page in the range is a guard page. Its physical address can't
* be determined using lpa, so there is no way to flush the range
* using flush_dcache_page_asm.
*/
void flush_cache_vunmap(unsigned long start, unsigned long end)
{
/* Prevent cache move-in */
flush_tlb_kernel_range(start, end);
flush_data_cache();
}
EXPORT_SYMBOL(flush_cache_vunmap);
/*
* On systems with PA8800/PA8900 processors, there is no way to flush
* a vmap range other than using the architected loop to flush the
* entire cache. The page directory is not set up, so we can't use
* fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
* L2 is physically indexed but FDCE/FICE instructions in virtual
* mode output their virtual address on the core bus, not their
* real address. As a result, the L2 cache index formed from the
* virtual address will most likely not be the same as the L2 index
* formed from the real address.
*/
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache();
flush_tlb_kernel_range(start, end);
if (!static_branch_likely(&parisc_has_dcache))
return;
/* If interrupts are disabled, we can only do local flush */
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
flush_data_cache_local(NULL);
return;
}
flush_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
flush_data_cache();
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
@ -818,15 +920,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
/* Ensure DMA is complete */
asm_syncdma();
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache();
flush_tlb_kernel_range(start, end);
if (!static_branch_likely(&parisc_has_dcache))
return;
/* If interrupts are disabled, we can only do local flush */
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
flush_data_cache_local(NULL);
return;
}
purge_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
flush_data_cache();
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);

View File

@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
current->comm, current->pid, r20);
return -ENOSYS;
}
asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
const char __user * pathname)
{
return sys_fanotify_mark(fanotify_fd, flags,
((__u64)mask1 << 32) | mask0,
dfd, pathname);
}

View File

@ -108,7 +108,7 @@
95 common fchown sys_fchown
96 common getpriority sys_getpriority
97 common setpriority sys_setpriority
98 common recv sys_recv
98 common recv sys_recv compat_sys_recv
99 common statfs sys_statfs compat_sys_statfs
100 common fstatfs sys_fstatfs compat_sys_fstatfs
101 common stat64 sys_stat64
@ -135,7 +135,7 @@
120 common clone sys_clone_wrapper
121 common setdomainname sys_setdomainname
122 common sendfile sys_sendfile compat_sys_sendfile
123 common recvfrom sys_recvfrom
123 common recvfrom sys_recvfrom compat_sys_recvfrom
124 32 adjtimex sys_adjtimex_time32
124 64 adjtimex sys_adjtimex
125 common mprotect sys_mprotect
@ -364,7 +364,7 @@
320 common accept4 sys_accept4
321 common prlimit64 sys_prlimit64
322 common fanotify_init sys_fanotify_init
323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
324 32 clock_adjtime sys_clock_adjtime32
324 64 clock_adjtime sys_clock_adjtime
325 common name_to_handle_at sys_name_to_handle_at

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
aesp10-ppc.S
aesp8-ppc.S
ghashp10-ppc.S
ghashp8-ppc.S

View File

@ -230,8 +230,10 @@
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
179 64 pread64 sys_pread64
179 spu pread64 sys_pread64
180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
180 64 pwrite64 sys_pwrite64
180 spu pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
@ -246,6 +248,7 @@
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
191 64 readahead sys_readahead
191 spu readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
@ -293,6 +296,7 @@
232 nospu set_tid_address sys_set_tid_address
233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
233 64 fadvise64 sys_fadvise64
233 spu fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
235 nospu lookup_dcookie sys_ni_syscall
236 common epoll_create sys_epoll_create
@ -502,7 +506,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive

View File

@ -130,14 +130,16 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
}
rcu_read_unlock();
fdput(f);
if (!found)
if (!found) {
fdput(f);
return -EINVAL;
}
table_group = iommu_group_get_iommudata(grp);
if (WARN_ON(!table_group))
if (WARN_ON(!table_group)) {
fdput(f);
return -EFAULT;
}
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbltmp = table_group->tables[i];
@ -158,8 +160,10 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
break;
}
}
if (!tbl)
if (!tbl) {
fdput(f);
return -EINVAL;
}
rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
@ -170,6 +174,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
/* stit is being destroyed */
iommu_tce_table_put(tbl);
rcu_read_unlock();
fdput(f);
return -ENOTTY;
}
/*
@ -177,6 +182,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
* its KVM reference counter and can return.
*/
rcu_read_unlock();
fdput(f);
return 0;
}
rcu_read_unlock();
@ -184,6 +190,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
if (!stit) {
iommu_tce_table_put(tbl);
fdput(f);
return -ENOMEM;
}
@ -192,6 +199,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
list_add_rcu(&stit->next, &stt->iommu_tables);
fdput(f);
return 0;
}

View File

@ -45,6 +45,7 @@ &sdhci0 {
no-1-8-v;
no-mmc;
no-sdio;
disable-wp;
};
&uart0 {

View File

@ -145,7 +145,7 @@
/* parts of opcode for RVF, RVD and RVQ */
#define RVFDQ_FL_FS_WIDTH_OFF 12
#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(3, 0)
#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(2, 0)
#define RVFDQ_FL_FS_WIDTH_W 2
#define RVFDQ_FL_FS_WIDTH_D 3
#define RVFDQ_LS_FS_WIDTH_Q 4

View File

@ -120,9 +120,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
mutex_unlock(&text_mutex);
if (!mod)
local_flush_icache_range(rec->ip, rec->ip + MCOUNT_INSN_SIZE);
return out;
}
@ -156,9 +153,9 @@ static int __ftrace_modify_code(void *data)
} else {
while (atomic_read(&param->cpu_count) <= num_online_cpus())
cpu_relax();
}
local_flush_icache_all();
local_flush_icache_all();
}
return 0;
}

View File

@ -89,6 +89,14 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
memset(waddr, c, len);
/*
* We could have just patched a function that is about to be
* called so make sure we don't execute partially patched
* instructions by flushing the icache as soon as possible.
*/
local_flush_icache_range((unsigned long)waddr,
(unsigned long)waddr + len);
patch_unmap(FIX_TEXT_POKE0);
if (across_pages)
@ -135,6 +143,14 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
ret = copy_to_kernel_nofault(waddr, insn, len);
/*
* We could have just patched a function that is about to be
* called so make sure we don't execute partially patched
* instructions by flushing the icache as soon as possible.
*/
local_flush_icache_range((unsigned long)waddr,
(unsigned long)waddr + len);
patch_unmap(FIX_TEXT_POKE0);
if (across_pages)
@ -189,9 +205,6 @@ int patch_text_set_nosync(void *addr, u8 c, size_t len)
ret = patch_insn_set(tp, c, len);
if (!ret)
flush_icache_range((uintptr_t)tp, (uintptr_t)tp + len);
return ret;
}
NOKPROBE_SYMBOL(patch_text_set_nosync);
@ -224,9 +237,6 @@ int patch_text_nosync(void *addr, const void *insns, size_t len)
ret = patch_insn_write(tp, insns, len);
if (!ret)
flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
return ret;
}
NOKPROBE_SYMBOL(patch_text_nosync);
@ -253,9 +263,9 @@ static int patch_text_cb(void *data)
} else {
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
cpu_relax();
}
local_flush_icache_all();
local_flush_icache_all();
}
return ret;
}

View File

@ -156,7 +156,7 @@ unsigned long __get_wchan(struct task_struct *task)
return pc;
}
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);

View File

@ -23,7 +23,7 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
#ifdef CONFIG_64BIT
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, off_t, offset)
unsigned long, fd, unsigned long, offset)
{
return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
}
@ -32,7 +32,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, off_t, offset)
unsigned long, fd, unsigned long, offset)
{
/*
* Note that the shift for mmap2 is constant (12),

View File

@ -170,11 +170,14 @@ static void kaslr_adjust_got(unsigned long offset)
u64 *entry;
/*
* Even without -fPIE, Clang still uses a global offset table for some
* reason. Adjust the GOT entries.
* Adjust GOT entries, except for ones for undefined weak symbols
* that resolved to zero. This also skips the first three reserved
* entries on s390x that are zero.
*/
for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++)
*entry += offset - __START_KERNEL;
for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
if (*entry)
*entry += offset - __START_KERNEL;
}
}
/*
@ -384,7 +387,7 @@ static void fixup_vmlinux_info(void)
void startup_kernel(void)
{
unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
unsigned long nokaslr_offset_phys = mem_safe_offset();
unsigned long nokaslr_offset_phys, kaslr_large_page_offset;
unsigned long amode31_lma = 0;
unsigned long max_physmem_end;
unsigned long asce_limit;
@ -393,6 +396,12 @@ void startup_kernel(void)
fixup_vmlinux_info();
setup_lpp();
/*
* Non-randomized kernel physical start address must be _SEGMENT_SIZE
* aligned (see blow).
*/
nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);
/*
@ -425,10 +434,25 @@ void startup_kernel(void)
save_ipl_cert_comp_list();
rescue_initrd(safe_addr, ident_map_size);
if (kaslr_enabled())
__kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size);
/*
* __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
* 20 bits (the offset within a large page) are zero. Copy the last
* 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
* __kaslr_offset_phys.
*
* With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
* are identical, which is required to allow for large mappings of the
* kernel image.
*/
kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
if (kaslr_enabled()) {
unsigned long end = ident_map_size - kaslr_large_page_offset;
__kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end);
}
if (!__kaslr_offset_phys)
__kaslr_offset_phys = nokaslr_offset_phys;
__kaslr_offset_phys |= kaslr_large_page_offset;
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
deploy_kernel((void *)__kaslr_offset_phys);

View File

@ -261,21 +261,27 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
static bool large_allowed(enum populate_mode mode)
{
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY);
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
}
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
unsigned long size = end - addr;
return machine.has_edat2 && large_allowed(mode) &&
IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
}
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
enum populate_mode mode)
{
unsigned long size = end - addr;
return machine.has_edat1 && large_allowed(mode) &&
IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
}
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,

View File

@ -109,6 +109,7 @@ SECTIONS
#ifdef CONFIG_KERNEL_UNCOMPRESSED
. = ALIGN(PAGE_SIZE);
. += AMODE31_SIZE; /* .amode31 section */
. = ALIGN(1 << 20); /* _SEGMENT_SIZE */
#else
. = ALIGN(8);
#endif

View File

@ -43,7 +43,6 @@ CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
@ -51,6 +50,7 @@ CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
# CONFIG_EXPOLINE_EXTERN is not set
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
@ -76,6 +76,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@ -100,7 +101,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
@ -119,6 +119,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_SMC_DIAG=m
CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@ -133,7 +134,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@ -167,6 +167,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
@ -183,17 +184,39 @@ CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_CT_NETLINK_HELPER=m
CONFIG_NETFILTER_NETLINK_GLUE_CT=y
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_TUNNEL=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NETFILTER_XTABLES_COMPAT=y
CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
CONFIG_NFT_OSF=m
CONFIG_NFT_TPROXY=m
CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NF_FLOW_TABLE_PROCFS=y
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -206,8 +229,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
@ -216,6 +241,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CGROUP=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
@ -230,6 +256,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
@ -247,6 +274,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@ -302,7 +330,6 @@ CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_FIB_IPV6=m
@ -373,7 +400,6 @@ CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
@ -462,6 +488,7 @@ CONFIG_DM_VERITY=m
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
CONFIG_DM_SWITCH=m
CONFIG_DM_INTEGRITY=m
CONFIG_DM_VDO=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
@ -574,7 +601,6 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
CONFIG_FB=y
# CONFIG_FB_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
@ -645,7 +671,6 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@ -663,6 +688,7 @@ CONFIG_SQUASHFS_XZ=y
CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V2=m
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
@ -879,6 +905,5 @@ CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_STRING_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m

View File

@ -41,7 +41,6 @@ CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
@ -49,6 +48,7 @@ CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
# CONFIG_EXPOLINE_EXTERN is not set
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
@ -71,6 +71,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@ -110,6 +111,7 @@ CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_SMC_DIAG=m
CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@ -124,7 +126,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
@ -158,6 +159,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
@ -174,17 +176,39 @@ CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_CT_NETLINK_HELPER=m
CONFIG_NETFILTER_NETLINK_GLUE_CT=y
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_FLOW_OFFLOAD=m
CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_TUNNEL=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NETFILTER_XTABLES_COMPAT=y
CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
CONFIG_NFT_OSF=m
CONFIG_NFT_TPROXY=m
CONFIG_NFT_SYNPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NF_FLOW_TABLE_PROCFS=y
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@ -197,8 +221,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
@ -207,6 +233,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CGROUP=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
@ -221,6 +248,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
@ -238,6 +266,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@ -293,7 +322,6 @@ CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_FIB_IPV6=m
@ -363,7 +391,6 @@ CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
@ -452,6 +479,7 @@ CONFIG_DM_VERITY=m
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
CONFIG_DM_SWITCH=m
CONFIG_DM_INTEGRITY=m
CONFIG_DM_VDO=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
@ -630,7 +658,6 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_EXFAT_FS=m
CONFIG_NTFS_FS=m
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@ -649,6 +676,7 @@ CONFIG_SQUASHFS_XZ=y
CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V2=m
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y

View File

@ -9,25 +9,22 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=2
CONFIG_HZ_100=y
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
# CONFIG_AP is not set
# CONFIG_PFAULT is not set
# CONFIG_S390_HYPFS is not set
# CONFIG_VIRTUALIZATION is not set
# CONFIG_S390_GUEST is not set
# CONFIG_SECCOMP is not set
# CONFIG_GCC_PLUGINS is not set
# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SWAP is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
CONFIG_NET=y
# CONFIG_IUCV is not set
# CONFIG_PCPU_DEV_REFCNT is not set

View File

@ -54,7 +54,7 @@ static __always_inline void arch_exit_to_user_mode(void)
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
{
choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
choose_random_kstack_offset(get_tod_clock_fast());
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare

View File

@ -38,33 +38,6 @@
#include "entry.h"
/*
* Perform the mmap() system call. Linux for S/390 isn't able to handle more
* than 5 system call parameters, so this system call uses a memory block
* for parameter passing.
*/
struct s390_mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg)
{
struct s390_mmap_arg_struct a;
int error = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
error = ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
out:
return error;
}
#ifdef CONFIG_SYSVIPC
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls.

View File

@ -418,7 +418,7 @@
412 32 utimensat_time64 - sys_utimensat
413 32 pselect6_time64 - compat_sys_pselect6_time64
414 32 ppoll_time64 - compat_sys_ppoll_time64
416 32 io_pgetevents_time64 - sys_io_pgetevents
416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 - sys_mq_timedsend
419 32 mq_timedreceive_time64 - sys_mq_timedreceive

View File

@ -410,7 +410,7 @@ static void __init cpu_enable_directed_irq(void *unused)
union zpci_sic_iib iib = {{0}};
union zpci_sic_iib ziib = {{0}};
iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
iib.cdiib.dibv_addr = virt_to_phys(zpci_ibv[smp_processor_id()]->vector);
zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);

View File

@ -59,3 +59,14 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
(u64)len0 << 32 | len1, advice);
#endif
}
/*
* swap the arguments the way that libc wants them instead of
* moving flags ahead of the 64-bit nbytes argument
*/
SYSCALL_DEFINE6(sh_sync_file_range6, int, fd, SC_ARG64(offset),
SC_ARG64(nbytes), unsigned int, flags)
{
return ksys_sync_file_range(fd, SC_VAL64(loff_t, offset),
SC_VAL64(loff_t, nbytes), flags);
}

View File

@ -321,7 +321,7 @@
311 common set_robust_list sys_set_robust_list
312 common get_robust_list sys_get_robust_list
313 common splice sys_splice
314 common sync_file_range sys_sync_file_range
314 common sync_file_range sys_sh_sync_file_range6
315 common tee sys_tee
316 common vmsplice sys_vmsplice
317 common move_pages sys_move_pages
@ -395,6 +395,7 @@
385 common pkey_alloc sys_pkey_alloc
386 common pkey_free sys_pkey_free
387 common rseq sys_rseq
388 common sync_file_range2 sys_sync_file_range2
# room for arch specific syscalls
393 common semget sys_semget
394 common semctl sys_semctl

View File

@ -18,224 +18,3 @@ sys32_mmap2:
sethi %hi(sys_mmap), %g1
jmpl %g1 + %lo(sys_mmap), %g0
sllx %o5, 12, %o5
.align 32
.globl sys32_socketcall
sys32_socketcall: /* %o0=call, %o1=args */
cmp %o0, 1
bl,pn %xcc, do_einval
cmp %o0, 18
bg,pn %xcc, do_einval
sub %o0, 1, %o0
sllx %o0, 5, %o0
sethi %hi(__socketcall_table_begin), %g2
or %g2, %lo(__socketcall_table_begin), %g2
jmpl %g2 + %o0, %g0
nop
do_einval:
retl
mov -EINVAL, %o0
.align 32
__socketcall_table_begin:
/* Each entry is exactly 32 bytes. */
do_sys_socket: /* sys_socket(int, int, int) */
1: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socket), %g1
2: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_socket), %g0
3: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
4: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_bind), %g1
5: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_bind), %g0
6: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
7: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_connect), %g1
8: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_connect), %g0
9: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_listen: /* sys_listen(int, int) */
10: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_listen), %g1
jmpl %g1 + %lo(sys_listen), %g0
11: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
12: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept), %g1
13: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_accept), %g0
14: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
15: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getsockname), %g1
16: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getsockname), %g0
17: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
18: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getpeername), %g1
19: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getpeername), %g0
20: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
21: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socketpair), %g1
22: ldswa [%o1 + 0x8] %asi, %o2
23: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_socketpair), %g0
24: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
25: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_send), %g1
26: lduwa [%o1 + 0x8] %asi, %o2
27: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_send), %g0
28: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
29: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recv), %g1
30: lduwa [%o1 + 0x8] %asi, %o2
31: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_recv), %g0
32: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
33: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_sendto), %g1
34: lduwa [%o1 + 0x8] %asi, %o2
35: lduwa [%o1 + 0xc] %asi, %o3
36: lduwa [%o1 + 0x10] %asi, %o4
37: ldswa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_sendto), %g0
38: lduwa [%o1 + 0x4] %asi, %o1
do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
39: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recvfrom), %g1
40: lduwa [%o1 + 0x8] %asi, %o2
41: lduwa [%o1 + 0xc] %asi, %o3
42: lduwa [%o1 + 0x10] %asi, %o4
43: lduwa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_recvfrom), %g0
44: lduwa [%o1 + 0x4] %asi, %o1
do_sys_shutdown: /* sys_shutdown(int, int) */
45: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_shutdown), %g1
jmpl %g1 + %lo(sys_shutdown), %g0
46: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */
47: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_setsockopt), %g1
48: ldswa [%o1 + 0x8] %asi, %o2
49: lduwa [%o1 + 0xc] %asi, %o3
50: ldswa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(sys_setsockopt), %g0
51: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */
52: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getsockopt), %g1
53: ldswa [%o1 + 0x8] %asi, %o2
54: lduwa [%o1 + 0xc] %asi, %o3
55: lduwa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(sys_getsockopt), %g0
56: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
57: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_sendmsg), %g1
58: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_sendmsg), %g0
59: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
60: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_recvmsg), %g1
61: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_recvmsg), %g0
62: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
63: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept4), %g1
64: lduwa [%o1 + 0x8] %asi, %o2
65: ldswa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_accept4), %g0
66: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
.section __ex_table,"a"
.align 4
.word 1b, __retl_efault, 2b, __retl_efault
.word 3b, __retl_efault, 4b, __retl_efault
.word 5b, __retl_efault, 6b, __retl_efault
.word 7b, __retl_efault, 8b, __retl_efault
.word 9b, __retl_efault, 10b, __retl_efault
.word 11b, __retl_efault, 12b, __retl_efault
.word 13b, __retl_efault, 14b, __retl_efault
.word 15b, __retl_efault, 16b, __retl_efault
.word 17b, __retl_efault, 18b, __retl_efault
.word 19b, __retl_efault, 20b, __retl_efault
.word 21b, __retl_efault, 22b, __retl_efault
.word 23b, __retl_efault, 24b, __retl_efault
.word 25b, __retl_efault, 26b, __retl_efault
.word 27b, __retl_efault, 28b, __retl_efault
.word 29b, __retl_efault, 30b, __retl_efault
.word 31b, __retl_efault, 32b, __retl_efault
.word 33b, __retl_efault, 34b, __retl_efault
.word 35b, __retl_efault, 36b, __retl_efault
.word 37b, __retl_efault, 38b, __retl_efault
.word 39b, __retl_efault, 40b, __retl_efault
.word 41b, __retl_efault, 42b, __retl_efault
.word 43b, __retl_efault, 44b, __retl_efault
.word 45b, __retl_efault, 46b, __retl_efault
.word 47b, __retl_efault, 48b, __retl_efault
.word 49b, __retl_efault, 50b, __retl_efault
.word 51b, __retl_efault, 52b, __retl_efault
.word 53b, __retl_efault, 54b, __retl_efault
.word 55b, __retl_efault, 56b, __retl_efault
.word 57b, __retl_efault, 58b, __retl_efault
.word 59b, __retl_efault, 60b, __retl_efault
.word 61b, __retl_efault, 62b, __retl_efault
.word 63b, __retl_efault, 64b, __retl_efault
.word 65b, __retl_efault, 66b, __retl_efault
.previous

View File

@ -117,7 +117,7 @@
90 common dup2 sys_dup2
91 32 setfsuid32 sys_setfsuid
92 common fcntl sys_fcntl compat_sys_fcntl
93 common select sys_select
93 common select sys_select compat_sys_select
94 32 setfsgid32 sys_setfsgid
95 common fsync sys_fsync
96 common setpriority sys_setpriority
@ -155,7 +155,7 @@
123 32 fchown sys_fchown16
123 64 fchown sys_fchown
124 common fchmod sys_fchmod
125 common recvfrom sys_recvfrom
125 common recvfrom sys_recvfrom compat_sys_recvfrom
126 32 setreuid sys_setreuid16
126 64 setreuid sys_setreuid
127 32 setregid sys_setregid16
@ -247,7 +247,7 @@
204 32 readdir sys_old_readdir compat_sys_old_readdir
204 64 readdir sys_nis_syscall
205 common readahead sys_readahead compat_sys_readahead
206 common socketcall sys_socketcall sys32_socketcall
206 common socketcall sys_socketcall compat_sys_socketcall
207 common syslog sys_syslog
208 common lookup_dcookie sys_ni_syscall
209 common fadvise64 sys_fadvise64 compat_sys_fadvise64
@ -461,7 +461,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive

View File

@ -105,9 +105,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
$(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S

View File

@ -420,7 +420,7 @@
412 i386 utimensat_time64 sys_utimensat
413 i386 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 i386 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
416 i386 io_pgetevents_time64 sys_io_pgetevents
416 i386 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 i386 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 i386 mq_timedsend_time64 sys_mq_timedsend
419 i386 mq_timedreceive_time64 sys_mq_timedreceive

View File

@ -93,10 +93,9 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
\
asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
_lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
: [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high), "S" (_ptr) \
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
: "+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\
o.full; \
@ -122,12 +121,11 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
\
asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
_lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
CC_SET(e) \
: CC_OUT(e) (ret), \
[ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high), "S" (_ptr) \
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\
if (unlikely(!ret)) \

View File

@ -401,7 +401,6 @@ extern int __init efi_memmap_alloc(unsigned int num_entries,
struct efi_memory_map_data *data);
extern void __efi_memmap_free(u64 phys, unsigned long size,
unsigned long flags);
#define __efi_memmap_free __efi_memmap_free
extern int __init efi_memmap_install(struct efi_memory_map_data *data);
extern int __init efi_memmap_split_count(efi_memory_desc_t *md,

View File

@ -73,19 +73,16 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
#endif
/*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
* but not enough for x86 stack utilization comfort. To keep
* reasonable stack head room, reduce the maximum offset to 8 bits.
*
* The actual entropy will be further reduced by the compiler when
* applying stack alignment constraints (see cc_stack_align4/8 in
* This value will get limited by KSTACK_OFFSET_MAX(), which is 10
* bits. The actual entropy will be further reduced by the compiler
* when applying stack alignment constraints (see cc_stack_align4/8 in
* arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
* low bits from any entropy chosen here.
*
* Therefore, final stack offset entropy will be 5 (x86_64) or
* 6 (ia32) bits.
* Therefore, final stack offset entropy will be 7 (x86_64) or
* 8 (ia32) bits.
*/
choose_random_kstack_offset(rdtsc() & 0xFF);
choose_random_kstack_offset(rdtsc());
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare

View File

@ -78,10 +78,10 @@ extern int __get_user_bad(void);
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
asm volatile("call __" #fn "_%c4" \
asm volatile("call __" #fn "_%c[size]" \
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
: "0" (ptr), [size] "i" (sizeof(*(ptr)))); \
instrument_get_user(__val_gu); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \

View File

@ -519,7 +519,8 @@ void free_rmid(u32 closid, u32 rmid)
* allows architectures that ignore the closid parameter to avoid an
* unnecessary check.
*/
if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
if (!resctrl_arch_mon_capable() ||
idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
RESCTRL_RESERVED_RMID))
return;

View File

@ -27,25 +27,7 @@
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (!user_mode(regs) && in_lock_functions(pc)) {
#ifdef CONFIG_FRAME_POINTER
return *(unsigned long *)(regs->bp + sizeof(long));
#else
unsigned long *sp = (unsigned long *)regs->sp;
/*
* Return address is either directly at stack pointer
* or above a saved flags. Eflags has bits 22-31 zero,
* kernel addresses don't.
*/
if (sp[0] >> 22)
return sp[0];
if (sp[1] >> 22)
return sp[1];
#endif
}
return pc;
return instruction_pointer(regs);
}
EXPORT_SYMBOL(profile_pc);

View File

@ -2843,7 +2843,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (sev_es_prevent_msr_access(vcpu, msr_info)) {
msr_info->data = 0;
return -EINVAL;
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
}
switch (msr_info->index) {
@ -2998,7 +2998,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
u64 data = msr->data;
if (sev_es_prevent_msr_access(vcpu, msr))
return -EINVAL;
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
switch (ecx) {
case MSR_AMD64_TSC_RATIO:

View File

@ -10718,13 +10718,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (irqchip_split(vcpu->kvm))
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
else {
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (ioapic_in_kernel(vcpu->kvm))
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
}
else if (ioapic_in_kernel(vcpu->kvm))
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
if (is_guest_mode(vcpu))
vcpu->arch.load_eoi_exitmap_pending = true;

View File

@ -44,7 +44,11 @@
or %rdx, %rax
.else
cmp $TASK_SIZE_MAX-\size+1, %eax
.if \size != 8
jae .Lbad_get_user
.else
jae .Lbad_get_user_8
.endif
sbb %edx, %edx /* array_index_mask_nospec() */
and %edx, %eax
.endif
@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception)
#ifdef CONFIG_X86_32
SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
ASM_CLAC
bad_get_user_8:
.Lbad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX

Some files were not shown because too many files have changed in this diff Show More