mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 18:52:02 +00:00
Linux 6.10-rc4
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmZvTbAeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGVksIAJEn4a9IVM8FNCJy Dxo0BItD1/qJ5mLDptqUFRKlxInjbojofz5CyoeIeXb0DwRfB16ALXqNXAkd3APi saoOpfjFsg2H2OqL9CHdkzWcJEAq2lDnL0zaOjumeDVu/EyeT+tC4e4hq1e6Bm0E fPC5ms2b+07DF9Rg6/DW8yPbdM5n6Mz1bRd3fQOIgvpM3yGOyGztEBgTRub/ZUgH 5pNJauknFAZgdiWhgNpc+lPWYZbgHKULQPhUBPdVhDIXPtQNUlKgNTQc6+L0Nmbb K1sG1q7FLeMJOTFGQfD4r26X5DNQUi894q/9SX8X7rcrECdJKcw2WjVyB4myADpf ae2gP+A= =XjWP -----END PGP SIGNATURE----- Merge tag 'v6.10-rc4' into char-misc-next We need the char-misc and iio fixes in here as well to build on top of. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
2046047295
@ -5,7 +5,6 @@ root = true
|
||||
[{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
indent_style = tab
|
||||
indent_size = 8
|
||||
@ -13,7 +12,6 @@ indent_size = 8
|
||||
[*.{json,py,rs}]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
@ -26,7 +24,6 @@ indent_size = 8
|
||||
[*.yaml]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
trim_trailing_whitespace = unset
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
3
.mailmap
3
.mailmap
@ -72,6 +72,8 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
|
||||
Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com>
|
||||
André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com>
|
||||
Andy Adamson <andros@citi.umich.edu>
|
||||
Andy Shevchenko <andy@kernel.org> <andy@smile.org.ua>
|
||||
Andy Shevchenko <andy@kernel.org> <ext-andriy.shevchenko@nokia.com>
|
||||
Anilkumar Kolli <quic_akolli@quicinc.com> <akolli@codeaurora.org>
|
||||
Anirudh Ghayal <quic_aghayal@quicinc.com> <aghayal@codeaurora.org>
|
||||
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
|
||||
@ -217,6 +219,7 @@ Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
|
||||
Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com>
|
||||
Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com>
|
||||
Geliang Tang <geliang@kernel.org> <geliangtang@163.com>
|
||||
Geliang Tang <geliang@kernel.org> <tanggeliang@kylinos.cn>
|
||||
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
|
||||
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
|
||||
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
|
||||
|
@ -9,8 +9,8 @@ TOMOYO is a name-based MAC extension (LSM module) for the Linux kernel.
|
||||
|
||||
LiveCD-based tutorials are available at
|
||||
|
||||
http://tomoyo.sourceforge.jp/1.8/ubuntu12.04-live.html
|
||||
http://tomoyo.sourceforge.jp/1.8/centos6-live.html
|
||||
https://tomoyo.sourceforge.net/1.8/ubuntu12.04-live.html
|
||||
https://tomoyo.sourceforge.net/1.8/centos6-live.html
|
||||
|
||||
Though these tutorials use non-LSM version of TOMOYO, they are useful for you
|
||||
to know what TOMOYO is.
|
||||
@ -21,45 +21,32 @@ How to enable TOMOYO?
|
||||
Build the kernel with ``CONFIG_SECURITY_TOMOYO=y`` and pass ``security=tomoyo`` on
|
||||
kernel's command line.
|
||||
|
||||
Please see http://tomoyo.osdn.jp/2.5/ for details.
|
||||
Please see https://tomoyo.sourceforge.net/2.6/ for details.
|
||||
|
||||
Where is documentation?
|
||||
=======================
|
||||
|
||||
User <-> Kernel interface documentation is available at
|
||||
https://tomoyo.osdn.jp/2.5/policy-specification/index.html .
|
||||
https://tomoyo.sourceforge.net/2.6/policy-specification/index.html .
|
||||
|
||||
Materials we prepared for seminars and symposiums are available at
|
||||
https://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
|
||||
https://sourceforge.net/projects/tomoyo/files/docs/ .
|
||||
Below lists are chosen from three aspects.
|
||||
|
||||
What is TOMOYO?
|
||||
TOMOYO Linux Overview
|
||||
https://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf
|
||||
https://sourceforge.net/projects/tomoyo/files/docs/lca2009-takeda.pdf
|
||||
TOMOYO Linux: pragmatic and manageable security for Linux
|
||||
https://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
|
||||
https://sourceforge.net/projects/tomoyo/files/docs/freedomhectaipei-tomoyo.pdf
|
||||
TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box
|
||||
https://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
|
||||
https://sourceforge.net/projects/tomoyo/files/docs/PacSec2007-en-no-demo.pdf
|
||||
|
||||
What can TOMOYO do?
|
||||
Deep inside TOMOYO Linux
|
||||
https://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
|
||||
https://sourceforge.net/projects/tomoyo/files/docs/lca2009-kumaneko.pdf
|
||||
The role of "pathname based access control" in security.
|
||||
https://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf
|
||||
https://sourceforge.net/projects/tomoyo/files/docs/lfj2008-bof.pdf
|
||||
|
||||
History of TOMOYO?
|
||||
Realities of Mainlining
|
||||
https://osdn.jp/projects/tomoyo/docs/lfj2008.pdf
|
||||
|
||||
What is future plan?
|
||||
====================
|
||||
|
||||
We believe that inode based security and name based security are complementary
|
||||
and both should be used together. But unfortunately, so far, we cannot enable
|
||||
multiple LSM modules at the same time. We feel sorry that you have to give up
|
||||
SELinux/SMACK/AppArmor etc. when you want to use TOMOYO.
|
||||
|
||||
We hope that LSM becomes stackable in future. Meanwhile, you can use non-LSM
|
||||
version of TOMOYO, available at http://tomoyo.osdn.jp/1.8/ .
|
||||
LSM version of TOMOYO is a subset of non-LSM version of TOMOYO. We are planning
|
||||
to port non-LSM version's functionalities to LSM versions.
|
||||
https://sourceforge.net/projects/tomoyo/files/docs/lfj2008.pdf
|
||||
|
@ -467,11 +467,11 @@ anon_fault_fallback_charge
|
||||
instead falls back to using huge pages with lower orders or
|
||||
small pages even though the allocation was successful.
|
||||
|
||||
anon_swpout
|
||||
swpout
|
||||
is incremented every time a huge page is swapped out in one
|
||||
piece without splitting.
|
||||
|
||||
anon_swpout_fallback
|
||||
swpout_fallback
|
||||
is incremented if a huge page has to be split before swapout.
|
||||
Usually because failed to allocate some continuous swap space
|
||||
for the huge page.
|
||||
|
@ -217,7 +217,7 @@ current *struct* is::
|
||||
int (*media_changed)(struct cdrom_device_info *, int);
|
||||
int (*tray_move)(struct cdrom_device_info *, int);
|
||||
int (*lock_door)(struct cdrom_device_info *, int);
|
||||
int (*select_speed)(struct cdrom_device_info *, int);
|
||||
int (*select_speed)(struct cdrom_device_info *, unsigned long);
|
||||
int (*get_last_session) (struct cdrom_device_info *,
|
||||
struct cdrom_multisession *);
|
||||
int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *);
|
||||
@ -396,7 +396,7 @@ action need be taken, and the return value should be 0.
|
||||
|
||||
::
|
||||
|
||||
int select_speed(struct cdrom_device_info *cdi, int speed)
|
||||
int select_speed(struct cdrom_device_info *cdi, unsigned long speed)
|
||||
|
||||
Some CD-ROM drives are capable of changing their head-speed. There
|
||||
are several reasons for changing the speed of a CD-ROM drive. Badly
|
||||
|
@ -54,11 +54,10 @@ unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
mlahb: ahb@38000000 {
|
||||
ahb {
|
||||
compatible = "st,mlahb", "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
reg = <0x10000000 0x40000>;
|
||||
ranges;
|
||||
dma-ranges = <0x00000000 0x38000000 0x10000>,
|
||||
<0x10000000 0x10000000 0x60000>,
|
||||
|
@ -57,17 +57,17 @@ properties:
|
||||
- const: allwinner,sun8i-v3s
|
||||
|
||||
- description: Anbernic RG35XX (2024)
|
||||
- items:
|
||||
items:
|
||||
- const: anbernic,rg35xx-2024
|
||||
- const: allwinner,sun50i-h700
|
||||
|
||||
- description: Anbernic RG35XX Plus
|
||||
- items:
|
||||
items:
|
||||
- const: anbernic,rg35xx-plus
|
||||
- const: allwinner,sun50i-h700
|
||||
|
||||
- description: Anbernic RG35XX H
|
||||
- items:
|
||||
items:
|
||||
- const: anbernic,rg35xx-h
|
||||
- const: allwinner,sun50i-h700
|
||||
|
||||
|
@ -145,7 +145,7 @@ allOf:
|
||||
Voltage output range of the channel as <minimum, maximum>
|
||||
Required connections:
|
||||
Rfb1x for: 0 to 2.5 V; 0 to 3V; 0 to 5 V;
|
||||
Rfb2x for: 0 to 10 V; 2.5 to 7.5V; -5 to 5 V;
|
||||
Rfb2x for: 0 to 10 V; -2.5 to 7.5V; -5 to 5 V;
|
||||
oneOf:
|
||||
- items:
|
||||
- const: 0
|
||||
|
@ -18,9 +18,12 @@ allOf:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- elan,ekth6915
|
||||
- ilitek,ili2901
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- elan,ekth5015m
|
||||
- const: elan,ekth6915
|
||||
- const: elan,ekth6915
|
||||
|
||||
reg:
|
||||
const: 0x10
|
||||
@ -33,6 +36,12 @@ properties:
|
||||
reset-gpios:
|
||||
description: Reset GPIO; not all touchscreens using eKTH6915 hook this up.
|
||||
|
||||
no-reset-on-power-off:
|
||||
type: boolean
|
||||
description:
|
||||
Reset line is wired so that it can (and should) be left deasserted when
|
||||
the power supply is off.
|
||||
|
||||
vcc33-supply:
|
||||
description: The 3.3V supply to the touchscreen.
|
||||
|
||||
@ -58,8 +67,8 @@ examples:
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ap_ts: touchscreen@10 {
|
||||
compatible = "elan,ekth6915";
|
||||
touchscreen@10 {
|
||||
compatible = "elan,ekth5015m", "elan,ekth6915";
|
||||
reg = <0x10>;
|
||||
|
||||
interrupt-parent = <&tlmm>;
|
||||
|
66
Documentation/devicetree/bindings/input/ilitek,ili2901.yaml
Normal file
66
Documentation/devicetree/bindings/input/ilitek,ili2901.yaml
Normal file
@ -0,0 +1,66 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/input/ilitek,ili2901.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Ilitek ILI2901 touchscreen controller
|
||||
|
||||
maintainers:
|
||||
- Jiri Kosina <jkosina@suse.com>
|
||||
|
||||
description:
|
||||
Supports the Ilitek ILI2901 touchscreen controller.
|
||||
This touchscreen controller uses the i2c-hid protocol with a reset GPIO.
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/input/touchscreen/touchscreen.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- ilitek,ili2901
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
panel: true
|
||||
|
||||
reset-gpios:
|
||||
maxItems: 1
|
||||
|
||||
vcc33-supply: true
|
||||
|
||||
vccio-supply: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- vcc33-supply
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
|
||||
i2c {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
touchscreen@41 {
|
||||
compatible = "ilitek,ili2901";
|
||||
reg = <0x41>;
|
||||
|
||||
interrupt-parent = <&tlmm>;
|
||||
interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
|
||||
|
||||
reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>;
|
||||
vcc33-supply = <&pp3300_ts>;
|
||||
};
|
||||
};
|
@ -65,6 +65,7 @@ patternProperties:
|
||||
description: The hard wired USB devices
|
||||
type: object
|
||||
$ref: /schemas/usb/usb-device.yaml
|
||||
additionalProperties: true
|
||||
|
||||
required:
|
||||
- peer-hub
|
||||
|
@ -150,6 +150,12 @@ applicable everywhere (see syntax).
|
||||
That will limit the usefulness but on the other hand avoid
|
||||
the illegal configurations all over.
|
||||
|
||||
If "select" <symbol> is followed by "if" <expr>, <symbol> will be
|
||||
selected by the logical AND of the value of the current menu symbol
|
||||
and <expr>. This means, the lower limit can be downgraded due to the
|
||||
presence of "if" <expr>. This behavior may seem weird, but we rely on
|
||||
it. (The future of this behavior is undecided.)
|
||||
|
||||
- weak reverse dependencies: "imply" <symbol> ["if" <expr>]
|
||||
|
||||
This is similar to "select" as it enforces a lower limit on another
|
||||
@ -184,7 +190,7 @@ applicable everywhere (see syntax).
|
||||
ability to hook into a secondary subsystem while allowing the user to
|
||||
configure that subsystem out without also having to unset these drivers.
|
||||
|
||||
Note: If the combination of FOO=y and BAR=m causes a link error,
|
||||
Note: If the combination of FOO=y and BAZ=m causes a link error,
|
||||
you can guard the function call with IS_REACHABLE()::
|
||||
|
||||
foo_init()
|
||||
@ -202,6 +208,10 @@ applicable everywhere (see syntax).
|
||||
imply BAR
|
||||
imply BAZ
|
||||
|
||||
Note: If "imply" <symbol> is followed by "if" <expr>, the default of <symbol>
|
||||
will be the logical AND of the value of the current menu symbol and <expr>.
|
||||
(The future of this behavior is undecided.)
|
||||
|
||||
- limiting menu display: "visible if" <expr>
|
||||
|
||||
This attribute is only applicable to menu blocks, if the condition is
|
||||
|
@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
|
||||
sxdp_shared_umem_fd field as you registered the UMEM on that
|
||||
socket. These two sockets will now share one and the same UMEM.
|
||||
|
||||
In this case, it is possible to use the NIC's packet steering
|
||||
capabilities to steer the packets to the right queue. This is not
|
||||
possible in the previous example as there is only one queue shared
|
||||
among sockets, so the NIC cannot do this steering as it can only steer
|
||||
between queues.
|
||||
There is no need to supply an XDP program like the one in the previous
|
||||
case where sockets were bound to the same queue id and
|
||||
device. Instead, use the NIC's packet steering capabilities to steer
|
||||
the packets to the right queue. In the previous example, there is only
|
||||
one queue shared among sockets, so the NIC cannot do this steering. It
|
||||
can only steer between queues.
|
||||
|
||||
In libxdp (or libbpf prior to version 1.0), you need to use the
|
||||
xsk_socket__create_shared() API as it takes a reference to a FILL ring
|
||||
and a COMPLETION ring that will be created for you and bound to the
|
||||
shared UMEM. You can use this function for all the sockets you create,
|
||||
or you can use it for the second and following ones and use
|
||||
xsk_socket__create() for the first one. Both methods yield the same
|
||||
result.
|
||||
In libbpf, you need to use the xsk_socket__create_shared() API as it
|
||||
takes a reference to a FILL ring and a COMPLETION ring that will be
|
||||
created for you and bound to the shared UMEM. You can use this
|
||||
function for all the sockets you create, or you can use it for the
|
||||
second and following ones and use xsk_socket__create() for the first
|
||||
one. Both methods yield the same result.
|
||||
|
||||
Note that a UMEM can be shared between sockets on the same queue id
|
||||
and device, as well as between queues on the same device and between
|
||||
devices at the same time. It is also possible to redirect to any
|
||||
socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
|
||||
devices at the same time.
|
||||
|
||||
XDP_USE_NEED_WAKEUP bind flag
|
||||
-----------------------------
|
||||
@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The
|
||||
switch, or other distribution mechanism, in your NIC to direct
|
||||
traffic to the correct queue id and socket.
|
||||
|
||||
Note that if you are using the XDP_SHARED_UMEM option, it is
|
||||
possible to switch traffic between any socket bound to the same
|
||||
umem.
|
||||
|
||||
Q: My packets are sometimes corrupted. What is wrong?
|
||||
|
||||
A: Care has to be taken not to feed the same buffer in the UMEM into
|
||||
|
@ -582,7 +582,7 @@ depending on the hardware. In all cases, however, only routes that have the
|
||||
Devices generating the streams may allow enabling and disabling some of the
|
||||
routes or have a fixed routing configuration. If the routes can be disabled, not
|
||||
declaring the routes (or declaring them without
|
||||
``VIDIOC_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will
|
||||
``V4L2_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will
|
||||
disable the routes. ``VIDIOC_SUBDEV_S_ROUTING`` will still return such routes
|
||||
back to the user in the routes array, with the ``V4L2_SUBDEV_STREAM_FL_ACTIVE``
|
||||
flag unset.
|
||||
|
@ -1117,7 +1117,6 @@ L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/admin-guide/pm/amd-pstate.rst
|
||||
F: drivers/cpufreq/amd-pstate*
|
||||
F: include/linux/amd-pstate.h
|
||||
F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
|
||||
|
||||
AMD PTDMA DRIVER
|
||||
@ -11045,8 +11044,8 @@ F: include/uapi/drm/i915_drm.h
|
||||
|
||||
INTEL DRM XE DRIVER (Lunar Lake and newer)
|
||||
M: Lucas De Marchi <lucas.demarchi@intel.com>
|
||||
M: Oded Gabbay <ogabbay@kernel.org>
|
||||
M: Thomas Hellström <thomas.hellstrom@linux.intel.com>
|
||||
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
|
||||
L: intel-xe@lists.freedesktop.org
|
||||
S: Supported
|
||||
W: https://drm.pages.freedesktop.org/intel-docs/
|
||||
@ -15249,7 +15248,6 @@ F: drivers/staging/most/
|
||||
F: include/linux/most.h
|
||||
|
||||
MOTORCOMM PHY DRIVER
|
||||
M: Peter Geis <pgwipeout@gmail.com>
|
||||
M: Frank <Frank.Sae@motor-comm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -15838,7 +15836,7 @@ F: drivers/nfc/virtual_ncidev.c
|
||||
F: tools/testing/selftests/nci/
|
||||
|
||||
NFS, SUNRPC, AND LOCKD CLIENTS
|
||||
M: Trond Myklebust <trond.myklebust@hammerspace.com>
|
||||
M: Trond Myklebust <trondmy@kernel.org>
|
||||
M: Anna Schumaker <anna@kernel.org>
|
||||
L: linux-nfs@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -22698,7 +22696,7 @@ L: tomoyo-users-en@lists.osdn.me (subscribers-only, for users in English)
|
||||
L: tomoyo-dev@lists.osdn.me (subscribers-only, for developers in Japanese)
|
||||
L: tomoyo-users@lists.osdn.me (subscribers-only, for users in Japanese)
|
||||
S: Maintained
|
||||
W: https://tomoyo.osdn.jp/
|
||||
W: https://tomoyo.sourceforge.net/
|
||||
F: security/tomoyo/
|
||||
|
||||
TOPSTAR LAPTOP EXTRAS DRIVER
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -232,11 +232,24 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
unsigned long old;
|
||||
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
err_out:
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
|
||||
/* FP points one word below parent's top of stack */
|
||||
frame_pointer += 4;
|
||||
/*
|
||||
* Usually, the stack frames are contiguous in memory but cases
|
||||
* have been observed where the next stack frame does not live
|
||||
* at 'frame_pointer + 4' as this code used to assume.
|
||||
*
|
||||
* Instead, dereference the field in the stack frame that
|
||||
* stores the SP of the calling frame: to avoid unbounded
|
||||
* recursion, this cannot involve any ftrace instrumented
|
||||
* functions, so use the __get_kernel_nofault() primitive
|
||||
* directly.
|
||||
*/
|
||||
__get_kernel_nofault(&frame_pointer,
|
||||
(unsigned long *)(frame_pointer - 8),
|
||||
unsigned long, err_out);
|
||||
} else {
|
||||
struct stackframe frame = {
|
||||
.fp = frame_pointer,
|
||||
|
@ -146,7 +146,7 @@
|
||||
/* Coprocessor traps */
|
||||
.macro __init_el2_cptr
|
||||
__check_hvhe .LnVHE_\@, x1
|
||||
mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
|
||||
mov x0, #CPACR_ELx_FPEN
|
||||
msr cpacr_el1, x0
|
||||
b .Lskip_set_cptr_\@
|
||||
.LnVHE_\@:
|
||||
@ -277,7 +277,7 @@
|
||||
|
||||
// (h)VHE case
|
||||
mrs x0, cpacr_el1 // Disable SVE traps
|
||||
orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
|
||||
orr x0, x0, #CPACR_ELx_ZEN
|
||||
msr cpacr_el1, x0
|
||||
b .Lskip_set_cptr_\@
|
||||
|
||||
@ -298,7 +298,7 @@
|
||||
|
||||
// (h)VHE case
|
||||
mrs x0, cpacr_el1 // Disable SME traps
|
||||
orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
|
||||
orr x0, x0, #CPACR_ELx_SMEN
|
||||
msr cpacr_el1, x0
|
||||
b .Lskip_set_cptr_sme_\@
|
||||
|
||||
|
@ -153,8 +153,9 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
|
||||
* emit the large TLP from the CPU.
|
||||
*/
|
||||
|
||||
static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
|
||||
const u32 *from, size_t count)
|
||||
static __always_inline void
|
||||
__const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from,
|
||||
size_t count)
|
||||
{
|
||||
switch (count) {
|
||||
case 8:
|
||||
@ -196,24 +197,22 @@ static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
|
||||
|
||||
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
|
||||
|
||||
static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
|
||||
size_t count)
|
||||
static __always_inline void
|
||||
__iowrite32_copy(void __iomem *to, const void *from, size_t count)
|
||||
{
|
||||
if (count == 8 || count == 4 || count == 2 || count == 1) {
|
||||
if (__builtin_constant_p(count) &&
|
||||
(count == 8 || count == 4 || count == 2 || count == 1)) {
|
||||
__const_memcpy_toio_aligned32(to, from, count);
|
||||
dgh();
|
||||
} else {
|
||||
__iowrite32_copy_full(to, from, count);
|
||||
}
|
||||
}
|
||||
#define __iowrite32_copy __iowrite32_copy
|
||||
|
||||
#define __iowrite32_copy(to, from, count) \
|
||||
(__builtin_constant_p(count) ? \
|
||||
__const_iowrite32_copy(to, from, count) : \
|
||||
__iowrite32_copy_full(to, from, count))
|
||||
|
||||
static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
|
||||
const u64 *from, size_t count)
|
||||
static __always_inline void
|
||||
__const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from,
|
||||
size_t count)
|
||||
{
|
||||
switch (count) {
|
||||
case 8:
|
||||
@ -255,21 +254,18 @@ static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
|
||||
|
||||
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
|
||||
|
||||
static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
|
||||
size_t count)
|
||||
static __always_inline void
|
||||
__iowrite64_copy(void __iomem *to, const void *from, size_t count)
|
||||
{
|
||||
if (count == 8 || count == 4 || count == 2 || count == 1) {
|
||||
if (__builtin_constant_p(count) &&
|
||||
(count == 8 || count == 4 || count == 2 || count == 1)) {
|
||||
__const_memcpy_toio_aligned64(to, from, count);
|
||||
dgh();
|
||||
} else {
|
||||
__iowrite64_copy_full(to, from, count);
|
||||
}
|
||||
}
|
||||
|
||||
#define __iowrite64_copy(to, from, count) \
|
||||
(__builtin_constant_p(count) ? \
|
||||
__const_iowrite64_copy(to, from, count) : \
|
||||
__iowrite64_copy_full(to, from, count))
|
||||
#define __iowrite64_copy __iowrite64_copy
|
||||
|
||||
/*
|
||||
* I/O memory mapping functions.
|
||||
|
@ -305,6 +305,12 @@
|
||||
GENMASK(19, 14) | \
|
||||
BIT(11))
|
||||
|
||||
#define CPTR_VHE_EL2_RES0 (GENMASK(63, 32) | \
|
||||
GENMASK(27, 26) | \
|
||||
GENMASK(23, 22) | \
|
||||
GENMASK(19, 18) | \
|
||||
GENMASK(15, 0))
|
||||
|
||||
/* Hyp Debug Configuration Register bits */
|
||||
#define MDCR_EL2_E2TB_MASK (UL(0x3))
|
||||
#define MDCR_EL2_E2TB_SHIFT (UL(24))
|
||||
|
@ -557,6 +557,68 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
|
||||
vcpu_set_flag((v), e); \
|
||||
} while (0)
|
||||
|
||||
#define __build_check_all_or_none(r, bits) \
|
||||
BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
|
||||
|
||||
#define __cpacr_to_cptr_clr(clr, set) \
|
||||
({ \
|
||||
u64 cptr = 0; \
|
||||
\
|
||||
if ((set) & CPACR_ELx_FPEN) \
|
||||
cptr |= CPTR_EL2_TFP; \
|
||||
if ((set) & CPACR_ELx_ZEN) \
|
||||
cptr |= CPTR_EL2_TZ; \
|
||||
if ((set) & CPACR_ELx_SMEN) \
|
||||
cptr |= CPTR_EL2_TSM; \
|
||||
if ((clr) & CPACR_ELx_TTA) \
|
||||
cptr |= CPTR_EL2_TTA; \
|
||||
if ((clr) & CPTR_EL2_TAM) \
|
||||
cptr |= CPTR_EL2_TAM; \
|
||||
if ((clr) & CPTR_EL2_TCPAC) \
|
||||
cptr |= CPTR_EL2_TCPAC; \
|
||||
\
|
||||
cptr; \
|
||||
})
|
||||
|
||||
#define __cpacr_to_cptr_set(clr, set) \
|
||||
({ \
|
||||
u64 cptr = 0; \
|
||||
\
|
||||
if ((clr) & CPACR_ELx_FPEN) \
|
||||
cptr |= CPTR_EL2_TFP; \
|
||||
if ((clr) & CPACR_ELx_ZEN) \
|
||||
cptr |= CPTR_EL2_TZ; \
|
||||
if ((clr) & CPACR_ELx_SMEN) \
|
||||
cptr |= CPTR_EL2_TSM; \
|
||||
if ((set) & CPACR_ELx_TTA) \
|
||||
cptr |= CPTR_EL2_TTA; \
|
||||
if ((set) & CPTR_EL2_TAM) \
|
||||
cptr |= CPTR_EL2_TAM; \
|
||||
if ((set) & CPTR_EL2_TCPAC) \
|
||||
cptr |= CPTR_EL2_TCPAC; \
|
||||
\
|
||||
cptr; \
|
||||
})
|
||||
|
||||
#define cpacr_clear_set(clr, set) \
|
||||
do { \
|
||||
BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
|
||||
BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
|
||||
__build_check_all_or_none((clr), CPACR_ELx_FPEN); \
|
||||
__build_check_all_or_none((set), CPACR_ELx_FPEN); \
|
||||
__build_check_all_or_none((clr), CPACR_ELx_ZEN); \
|
||||
__build_check_all_or_none((set), CPACR_ELx_ZEN); \
|
||||
__build_check_all_or_none((clr), CPACR_ELx_SMEN); \
|
||||
__build_check_all_or_none((set), CPACR_ELx_SMEN); \
|
||||
\
|
||||
if (has_vhe() || has_hvhe()) \
|
||||
sysreg_clear_set(cpacr_el1, clr, set); \
|
||||
else \
|
||||
sysreg_clear_set(cptr_el2, \
|
||||
__cpacr_to_cptr_clr(clr, set), \
|
||||
__cpacr_to_cptr_set(clr, set));\
|
||||
} while (0)
|
||||
|
||||
static __always_inline void kvm_write_cptr_el2(u64 val)
|
||||
{
|
||||
if (has_vhe() || has_hvhe())
|
||||
@ -570,17 +632,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
|
||||
u64 val;
|
||||
|
||||
if (has_vhe()) {
|
||||
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
|
||||
CPACR_EL1_ZEN_EL1EN);
|
||||
val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
|
||||
if (cpus_have_final_cap(ARM64_SME))
|
||||
val |= CPACR_EL1_SMEN_EL1EN;
|
||||
} else if (has_hvhe()) {
|
||||
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
|
||||
val = CPACR_ELx_FPEN;
|
||||
|
||||
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
|
||||
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
|
||||
val |= CPACR_ELx_ZEN;
|
||||
if (cpus_have_final_cap(ARM64_SME))
|
||||
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
|
||||
val |= CPACR_ELx_SMEN;
|
||||
} else {
|
||||
val = CPTR_NVHE_EL2_RES1;
|
||||
|
||||
|
@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
extern unsigned int __ro_after_init kvm_sve_max_vl;
|
||||
extern unsigned int __ro_after_init kvm_host_sve_max_vl;
|
||||
int __init kvm_arm_init_sve(void);
|
||||
|
||||
u32 __attribute_const__ kvm_target_cpu(void);
|
||||
@ -521,6 +522,20 @@ struct kvm_cpu_context {
|
||||
u64 *vncr_array;
|
||||
};
|
||||
|
||||
struct cpu_sve_state {
|
||||
__u64 zcr_el1;
|
||||
|
||||
/*
|
||||
* Ordering is important since __sve_save_state/__sve_restore_state
|
||||
* relies on it.
|
||||
*/
|
||||
__u32 fpsr;
|
||||
__u32 fpcr;
|
||||
|
||||
/* Must be SVE_VQ_BYTES (128 bit) aligned. */
|
||||
__u8 sve_regs[];
|
||||
};
|
||||
|
||||
/*
|
||||
* This structure is instantiated on a per-CPU basis, and contains
|
||||
* data that is:
|
||||
@ -534,7 +549,15 @@ struct kvm_cpu_context {
|
||||
*/
|
||||
struct kvm_host_data {
|
||||
struct kvm_cpu_context host_ctxt;
|
||||
struct user_fpsimd_state *fpsimd_state; /* hyp VA */
|
||||
|
||||
/*
|
||||
* All pointers in this union are hyp VA.
|
||||
* sve_state is only used in pKVM and if system_supports_sve().
|
||||
*/
|
||||
union {
|
||||
struct user_fpsimd_state *fpsimd_state;
|
||||
struct cpu_sve_state *sve_state;
|
||||
};
|
||||
|
||||
/* Ownership of the FP regs */
|
||||
enum {
|
||||
|
@ -111,7 +111,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
||||
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
||||
void __sve_restore_state(void *sve_pffr, u32 *fpsr);
|
||||
void __sve_save_state(void *sve_pffr, u32 *fpsr, int save_ffr);
|
||||
void __sve_restore_state(void *sve_pffr, u32 *fpsr, int restore_ffr);
|
||||
|
||||
u64 __guest_enter(struct kvm_vcpu *vcpu);
|
||||
|
||||
@ -142,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
|
||||
|
||||
extern unsigned long kvm_nvhe_sym(__icache_flags);
|
||||
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
|
||||
extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_H__ */
|
||||
|
@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
|
||||
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline size_t pkvm_host_sve_state_size(void)
|
||||
{
|
||||
if (!system_supports_sve())
|
||||
return 0;
|
||||
|
||||
return size_add(sizeof(struct cpu_sve_state),
|
||||
SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_PKVM_H__ */
|
||||
|
@ -462,6 +462,9 @@ static int run_all_insn_set_hw_mode(unsigned int cpu)
|
||||
for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
|
||||
struct insn_emulation *insn = insn_emulations[i];
|
||||
bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
|
||||
if (insn->status == INSN_UNAVAILABLE)
|
||||
continue;
|
||||
|
||||
if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
|
||||
pr_warn("CPU[%u] cannot support the emulation of %s",
|
||||
cpu, insn->name);
|
||||
|
@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void)
|
||||
return size ? get_order(size) : 0;
|
||||
}
|
||||
|
||||
static size_t pkvm_host_sve_state_order(void)
|
||||
{
|
||||
return get_order(pkvm_host_sve_state_size());
|
||||
}
|
||||
|
||||
/* A lookup table holding the hypervisor VA for each vector slot */
|
||||
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
|
||||
|
||||
@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void)
|
||||
|
||||
static void __init teardown_hyp_mode(void)
|
||||
{
|
||||
bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
|
||||
int cpu;
|
||||
|
||||
free_hyp_pgds();
|
||||
for_each_possible_cpu(cpu) {
|
||||
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
|
||||
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
|
||||
|
||||
if (free_sve) {
|
||||
struct cpu_sve_state *sve_state;
|
||||
|
||||
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
|
||||
free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2398,6 +2411,58 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_pkvm_host_sve_state(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!system_supports_sve())
|
||||
return 0;
|
||||
|
||||
/* Allocate pages for host sve state in protected mode. */
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
|
||||
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't map the pages in hyp since these are only used in protected
|
||||
* mode, which will (re)create its own mapping when initialized.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finalizes the initialization of hyp mode, once everything else is initialized
|
||||
* and the initialziation process cannot fail.
|
||||
*/
|
||||
static void finalize_init_hyp_mode(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (system_supports_sve() && is_protected_kvm_enabled()) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpu_sve_state *sve_state;
|
||||
|
||||
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
|
||||
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
|
||||
kern_hyp_va(sve_state);
|
||||
}
|
||||
} else {
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct user_fpsimd_state *fpsimd_state;
|
||||
|
||||
fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
|
||||
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
|
||||
kern_hyp_va(fpsimd_state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void pkvm_hyp_init_ptrauth(void)
|
||||
{
|
||||
struct kvm_cpu_context *hyp_ctxt;
|
||||
@ -2566,6 +2631,10 @@ static int __init init_hyp_mode(void)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
err = init_pkvm_host_sve_state();
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
err = kvm_hyp_init_protection(hyp_va_bits);
|
||||
if (err) {
|
||||
kvm_err("Failed to init hyp memory protection\n");
|
||||
@ -2730,6 +2799,13 @@ static __init int kvm_arm_init(void)
|
||||
if (err)
|
||||
goto out_subs;
|
||||
|
||||
/*
|
||||
* This should be called after initialization is done and failure isn't
|
||||
* possible anymore.
|
||||
*/
|
||||
if (!in_hyp_mode)
|
||||
finalize_init_hyp_mode();
|
||||
|
||||
kvm_arm_initialised = true;
|
||||
|
||||
return 0;
|
||||
|
@ -2181,16 +2181,23 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
|
||||
if (forward_traps(vcpu, HCR_NV))
|
||||
return;
|
||||
|
||||
spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
|
||||
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
|
||||
|
||||
/* Check for an ERETAx */
|
||||
esr = kvm_vcpu_get_esr(vcpu);
|
||||
if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
|
||||
/*
|
||||
* Oh no, ERETAx failed to authenticate. If we have
|
||||
* FPACCOMBINE, deliver an exception right away. If we
|
||||
* don't, then let the mangled ELR value trickle down the
|
||||
* Oh no, ERETAx failed to authenticate.
|
||||
*
|
||||
* If we have FPACCOMBINE and we don't have a pending
|
||||
* Illegal Execution State exception (which has priority
|
||||
* over FPAC), deliver an exception right away.
|
||||
*
|
||||
* Otherwise, let the mangled ELR value trickle down the
|
||||
* ERET handling, and the guest will have a little surprise.
|
||||
*/
|
||||
if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
|
||||
if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE) && !(spsr & PSR_IL_BIT)) {
|
||||
esr &= ESR_ELx_ERET_ISS_ERETA;
|
||||
esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
|
||||
kvm_inject_nested_sync(vcpu, esr);
|
||||
@ -2201,17 +2208,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
|
||||
preempt_disable();
|
||||
kvm_arch_vcpu_put(vcpu);
|
||||
|
||||
spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
|
||||
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
|
||||
if (!esr_iss_is_eretax(esr))
|
||||
elr = __vcpu_sys_reg(vcpu, ELR_EL2);
|
||||
|
||||
trace_kvm_nested_eret(vcpu, elr, spsr);
|
||||
|
||||
/*
|
||||
* Note that the current exception level is always the virtual EL2,
|
||||
* since we set HCR_EL2.NV bit only when entering the virtual EL2.
|
||||
*/
|
||||
*vcpu_pc(vcpu) = elr;
|
||||
*vcpu_cpsr(vcpu) = spsr;
|
||||
|
||||
|
@ -90,6 +90,13 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||
fpsimd_save_and_flush_cpu_state();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If normal guests gain SME support, maintain this behavior for pKVM
|
||||
* guests, which don't support SME.
|
||||
*/
|
||||
WARN_ON(is_protected_kvm_enabled() && system_supports_sme() &&
|
||||
read_sysreg_s(SYS_SVCR));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -161,9 +168,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
||||
if (has_vhe() && system_supports_sme()) {
|
||||
/* Also restore EL0 state seen on entry */
|
||||
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
|
||||
sysreg_clear_set(CPACR_EL1, 0,
|
||||
CPACR_EL1_SMEN_EL0EN |
|
||||
CPACR_EL1_SMEN_EL1EN);
|
||||
sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
|
||||
else
|
||||
sysreg_clear_set(CPACR_EL1,
|
||||
CPACR_EL1_SMEN_EL0EN,
|
||||
|
@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
case PSR_AA32_MODE_SVC:
|
||||
case PSR_AA32_MODE_ABT:
|
||||
case PSR_AA32_MODE_UND:
|
||||
case PSR_AA32_MODE_SYS:
|
||||
if (!vcpu_el1_is_32bit(vcpu))
|
||||
return -EINVAL;
|
||||
break;
|
||||
@ -276,7 +277,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
|
||||
int i, nr_reg;
|
||||
|
||||
switch (*vcpu_cpsr(vcpu)) {
|
||||
switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
|
||||
/*
|
||||
* Either we are dealing with user mode, and only the
|
||||
* first 15 registers (+ PC) must be narrowed to 32bit.
|
||||
|
@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
|
||||
u32 cpsr_cond;
|
||||
int cond;
|
||||
|
||||
/* Top two bits non-zero? Unconditional. */
|
||||
if (kvm_vcpu_get_esr(vcpu) >> 30)
|
||||
/*
|
||||
* These are the exception classes that could fire with a
|
||||
* conditional instruction.
|
||||
*/
|
||||
switch (kvm_vcpu_trap_get_class(vcpu)) {
|
||||
case ESR_ELx_EC_CP15_32:
|
||||
case ESR_ELx_EC_CP15_64:
|
||||
case ESR_ELx_EC_CP14_MR:
|
||||
case ESR_ELx_EC_CP14_LS:
|
||||
case ESR_ELx_EC_FP_ASIMD:
|
||||
case ESR_ELx_EC_CP10_ID:
|
||||
case ESR_ELx_EC_CP14_64:
|
||||
case ESR_ELx_EC_SVC32:
|
||||
break;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Is condition field valid? */
|
||||
cond = kvm_vcpu_get_condition(vcpu);
|
||||
|
@ -25,3 +25,9 @@ SYM_FUNC_START(__sve_restore_state)
|
||||
sve_load 0, x1, x2, 3
|
||||
ret
|
||||
SYM_FUNC_END(__sve_restore_state)
|
||||
|
||||
SYM_FUNC_START(__sve_save_state)
|
||||
mov x2, #1
|
||||
sve_save 0, x1, x2, 3
|
||||
ret
|
||||
SYM_FUNC_END(__sve_save_state)
|
||||
|
@ -316,10 +316,24 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
|
||||
__sve_restore_state(vcpu_sve_pffr(vcpu),
|
||||
&vcpu->arch.ctxt.fp_regs.fpsr);
|
||||
&vcpu->arch.ctxt.fp_regs.fpsr,
|
||||
true);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
|
||||
}
|
||||
|
||||
static inline void __hyp_sve_save_host(void)
|
||||
{
|
||||
struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
|
||||
|
||||
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
|
||||
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
__sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
|
||||
&sve_state->fpsr,
|
||||
true);
|
||||
}
|
||||
|
||||
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
|
||||
|
||||
/*
|
||||
* We trap the first access to the FP/SIMD to save the host context and
|
||||
* restore the guest context lazily.
|
||||
@ -330,7 +344,6 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
bool sve_guest;
|
||||
u8 esr_ec;
|
||||
u64 reg;
|
||||
|
||||
if (!system_supports_fpsimd())
|
||||
return false;
|
||||
@ -353,24 +366,15 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
/* Valid trap. Switch the context: */
|
||||
|
||||
/* First disable enough traps to allow us to update the registers */
|
||||
if (has_vhe() || has_hvhe()) {
|
||||
reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
|
||||
if (sve_guest)
|
||||
reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
|
||||
|
||||
sysreg_clear_set(cpacr_el1, 0, reg);
|
||||
} else {
|
||||
reg = CPTR_EL2_TFP;
|
||||
if (sve_guest)
|
||||
reg |= CPTR_EL2_TZ;
|
||||
|
||||
sysreg_clear_set(cptr_el2, reg, 0);
|
||||
}
|
||||
if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
|
||||
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
else
|
||||
cpacr_clear_set(0, CPACR_ELx_FPEN);
|
||||
isb();
|
||||
|
||||
/* Write out the host state if it's in the registers */
|
||||
if (host_owns_fp_regs())
|
||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||
kvm_hyp_save_fpsimd_host(vcpu);
|
||||
|
||||
/* Restore the guest state */
|
||||
if (sve_guest)
|
||||
|
@ -59,7 +59,6 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
}
|
||||
|
||||
void pkvm_hyp_vm_table_init(void *tbl);
|
||||
void pkvm_host_fpsimd_state_init(void);
|
||||
|
||||
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
|
||||
unsigned long pgd_hva);
|
||||
|
@ -23,20 +23,80 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||
|
||||
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
|
||||
|
||||
static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
|
||||
/*
|
||||
* On saving/restoring guest sve state, always use the maximum VL for
|
||||
* the guest. The layout of the data when saving the sve state depends
|
||||
* on the VL, so use a consistent (i.e., the maximum) guest VL.
|
||||
*/
|
||||
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
|
||||
__sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true);
|
||||
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
}
|
||||
|
||||
static void __hyp_sve_restore_host(void)
|
||||
{
|
||||
struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
|
||||
|
||||
/*
|
||||
* On saving/restoring host sve state, always use the maximum VL for
|
||||
* the host. The layout of the data when saving the sve state depends
|
||||
* on the VL, so use a consistent (i.e., the maximum) host VL.
|
||||
*
|
||||
* Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length
|
||||
* supported by the system (or limited at EL3).
|
||||
*/
|
||||
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
__sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
|
||||
&sve_state->fpsr,
|
||||
true);
|
||||
write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR);
|
||||
}
|
||||
|
||||
static void fpsimd_sve_flush(void)
|
||||
{
|
||||
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
|
||||
}
|
||||
|
||||
static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!guest_owns_fp_regs())
|
||||
return;
|
||||
|
||||
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
isb();
|
||||
|
||||
if (vcpu_has_sve(vcpu))
|
||||
__hyp_sve_save_guest(vcpu);
|
||||
else
|
||||
__fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
|
||||
|
||||
if (system_supports_sve())
|
||||
__hyp_sve_restore_host();
|
||||
else
|
||||
__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
|
||||
|
||||
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
|
||||
}
|
||||
|
||||
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
|
||||
|
||||
fpsimd_sve_flush();
|
||||
|
||||
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
|
||||
|
||||
hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
|
||||
hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
|
||||
/* Limit guest vector length to the maximum supported by the host. */
|
||||
hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
|
||||
|
||||
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
|
||||
|
||||
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
|
||||
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
|
||||
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
|
||||
|
||||
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
|
||||
|
||||
@ -54,10 +114,11 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
|
||||
unsigned int i;
|
||||
|
||||
fpsimd_sve_sync(&hyp_vcpu->vcpu);
|
||||
|
||||
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
|
||||
|
||||
host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
|
||||
host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;
|
||||
|
||||
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
|
||||
|
||||
@ -79,6 +140,17 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
|
||||
struct pkvm_hyp_vcpu *hyp_vcpu;
|
||||
struct kvm *host_kvm;
|
||||
|
||||
/*
|
||||
* KVM (and pKVM) doesn't support SME guests for now, and
|
||||
* ensures that SME features aren't enabled in pstate when
|
||||
* loading a vcpu. Therefore, if SME features enabled the host
|
||||
* is misbehaving.
|
||||
*/
|
||||
if (unlikely(system_supports_sme() && read_sysreg_s(SYS_SVCR))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
host_kvm = kern_hyp_va(host_vcpu->kvm);
|
||||
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
|
||||
host_vcpu->vcpu_idx);
|
||||
@ -405,11 +477,7 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
|
||||
handle_host_smc(host_ctxt);
|
||||
break;
|
||||
case ESR_ELx_EC_SVE:
|
||||
if (has_hvhe())
|
||||
sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
|
||||
CPACR_EL1_ZEN_EL0EN));
|
||||
else
|
||||
sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
|
||||
cpacr_clear_set(0, CPACR_ELx_ZEN);
|
||||
isb();
|
||||
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
|
||||
break;
|
||||
|
@ -18,6 +18,8 @@ unsigned long __icache_flags;
|
||||
/* Used by kvm_get_vttbr(). */
|
||||
unsigned int kvm_arm_vmid_bits;
|
||||
|
||||
unsigned int kvm_host_sve_max_vl;
|
||||
|
||||
/*
|
||||
* Set trap register values based on features in ID_AA64PFR0.
|
||||
*/
|
||||
@ -63,7 +65,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
|
||||
/* Trap SVE */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
|
||||
if (has_hvhe())
|
||||
cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
|
||||
cptr_clear |= CPACR_ELx_ZEN;
|
||||
else
|
||||
cptr_set |= CPTR_EL2_TZ;
|
||||
}
|
||||
@ -247,17 +249,6 @@ void pkvm_hyp_vm_table_init(void *tbl)
|
||||
vm_table = tbl;
|
||||
}
|
||||
|
||||
void pkvm_host_fpsimd_state_init(void)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < hyp_nr_cpus; i++) {
|
||||
struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
|
||||
|
||||
host_data->fpsimd_state = &host_data->host_ctxt.fp_regs;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the hyp vm structure corresponding to the handle.
|
||||
*/
|
||||
@ -586,6 +577,8 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
|
||||
if (ret)
|
||||
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
|
||||
|
||||
hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pkvm_create_host_sve_mappings(void)
|
||||
{
|
||||
void *start, *end;
|
||||
int ret, i;
|
||||
|
||||
if (!system_supports_sve())
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < hyp_nr_cpus; i++) {
|
||||
struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
|
||||
struct cpu_sve_state *sve_state = host_data->sve_state;
|
||||
|
||||
start = kern_hyp_va(sve_state);
|
||||
end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
|
||||
ret = pkvm_create_mappings(start, end, PAGE_HYP);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
unsigned long *per_cpu_base,
|
||||
u32 hyp_va_bits)
|
||||
@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
return ret;
|
||||
}
|
||||
|
||||
pkvm_create_host_sve_mappings();
|
||||
|
||||
/*
|
||||
* Map the host sections RO in the hypervisor, but transfer the
|
||||
* ownership from the host to the hypervisor itself to make sure they
|
||||
@ -300,7 +324,6 @@ void __noreturn __pkvm_init_finalise(void)
|
||||
goto out;
|
||||
|
||||
pkvm_hyp_vm_table_init(vm_table_base);
|
||||
pkvm_host_fpsimd_state_init();
|
||||
out:
|
||||
/*
|
||||
* We tail-called to here from handle___pkvm_init() and will not return,
|
||||
|
@ -48,15 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
|
||||
if (cpus_have_final_cap(ARM64_SME)) {
|
||||
if (has_hvhe())
|
||||
val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
|
||||
val &= ~CPACR_ELx_SMEN;
|
||||
else
|
||||
val |= CPTR_EL2_TSM;
|
||||
}
|
||||
|
||||
if (!guest_owns_fp_regs()) {
|
||||
if (has_hvhe())
|
||||
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
|
||||
CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
|
||||
val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
else
|
||||
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
|
||||
|
||||
@ -182,6 +181,25 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
kvm_handle_pvm_sysreg(vcpu, exit_code));
|
||||
}
|
||||
|
||||
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Non-protected kvm relies on the host restoring its sve state.
|
||||
* Protected kvm restores the host's sve state as not to reveal that
|
||||
* fpsimd was used by a guest nor leak upper sve bits.
|
||||
*/
|
||||
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
|
||||
__hyp_sve_save_host();
|
||||
|
||||
/* Re-enable SVE traps if not supported for the guest vcpu. */
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
cpacr_clear_set(CPACR_ELx_ZEN, 0);
|
||||
|
||||
} else {
|
||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||
}
|
||||
}
|
||||
|
||||
static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = NULL,
|
||||
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
|
||||
|
@ -93,8 +93,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
|
||||
val = read_sysreg(cpacr_el1);
|
||||
val |= CPACR_ELx_TTA;
|
||||
val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
|
||||
CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
|
||||
val &= ~(CPACR_ELx_ZEN | CPACR_ELx_SMEN);
|
||||
|
||||
/*
|
||||
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
|
||||
@ -109,9 +108,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (guest_owns_fp_regs()) {
|
||||
if (vcpu_has_sve(vcpu))
|
||||
val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
|
||||
val |= CPACR_ELx_ZEN;
|
||||
} else {
|
||||
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
|
||||
val &= ~CPACR_ELx_FPEN;
|
||||
__activate_traps_fpsimd32(vcpu);
|
||||
}
|
||||
|
||||
@ -262,6 +261,11 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||
}
|
||||
|
||||
static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = NULL,
|
||||
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
|
||||
|
@ -58,8 +58,10 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
|
||||
break;
|
||||
|
||||
case SYS_ID_AA64PFR1_EL1:
|
||||
/* Only support SSBS */
|
||||
val &= NV_FTR(PFR1, SSBS);
|
||||
/* Only support BTI, SSBS, CSV2_frac */
|
||||
val &= (NV_FTR(PFR1, BT) |
|
||||
NV_FTR(PFR1, SSBS) |
|
||||
NV_FTR(PFR1, CSV2_frac));
|
||||
break;
|
||||
|
||||
case SYS_ID_AA64MMFR0_EL1:
|
||||
|
@ -32,6 +32,7 @@
|
||||
|
||||
/* Maximum phys_shift supported for any VM on this host */
|
||||
static u32 __ro_after_init kvm_ipa_limit;
|
||||
unsigned int __ro_after_init kvm_host_sve_max_vl;
|
||||
|
||||
/*
|
||||
* ARMv8 Reset Values
|
||||
@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void)
|
||||
{
|
||||
if (system_supports_sve()) {
|
||||
kvm_sve_max_vl = sve_max_virtualisable_vl();
|
||||
kvm_host_sve_max_vl = sve_max_vl();
|
||||
kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
|
||||
|
||||
/*
|
||||
* The get_sve_reg()/set_sve_reg() ioctl interface will need
|
||||
|
@ -376,7 +376,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
|
||||
* clearing access/dirty for the whole block.
|
||||
*/
|
||||
unsigned long start = addr;
|
||||
unsigned long end = start + nr;
|
||||
unsigned long end = start + nr * PAGE_SIZE;
|
||||
|
||||
if (pte_cont(__ptep_get(ptep + nr - 1)))
|
||||
end = ALIGN(end, CONT_PTE_SIZE);
|
||||
@ -386,7 +386,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
|
||||
ptep = contpte_align_down(ptep);
|
||||
}
|
||||
|
||||
__clear_young_dirty_ptes(vma, start, ptep, end - start, flags);
|
||||
__clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
|
||||
|
||||
|
@ -44,14 +44,14 @@ linux,cma {
|
||||
&gmac0 {
|
||||
status = "okay";
|
||||
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
bus_id = <0x0>;
|
||||
};
|
||||
|
||||
&gmac1 {
|
||||
status = "okay";
|
||||
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
bus_id = <0x1>;
|
||||
};
|
||||
|
||||
|
@ -43,7 +43,7 @@ linux,cma {
|
||||
&gmac0 {
|
||||
status = "okay";
|
||||
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-handle = <&phy0>;
|
||||
mdio {
|
||||
compatible = "snps,dwmac-mdio";
|
||||
@ -58,7 +58,7 @@ phy0: ethernet-phy@0 {
|
||||
&gmac1 {
|
||||
status = "okay";
|
||||
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-handle = <&phy1>;
|
||||
mdio {
|
||||
compatible = "snps,dwmac-mdio";
|
||||
|
@ -92,7 +92,7 @@ phy1: ethernet-phy@1 {
|
||||
&gmac2 {
|
||||
status = "okay";
|
||||
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-handle = <&phy2>;
|
||||
mdio {
|
||||
compatible = "snps,dwmac-mdio";
|
||||
|
@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu);
|
||||
static inline void early_numa_add_cpu(int cpuid, s16 node) { }
|
||||
static inline void numa_add_cpu(unsigned int cpu) { }
|
||||
static inline void numa_remove_cpu(unsigned int cpu) { }
|
||||
static inline void set_cpuid_to_node(int cpuid, s16 node) { }
|
||||
|
||||
static inline int early_cpu_to_node(int cpu)
|
||||
{
|
||||
|
@ -42,7 +42,7 @@
|
||||
.macro JUMP_VIRT_ADDR temp1 temp2
|
||||
li.d \temp1, CACHE_BASE
|
||||
pcaddi \temp2, 0
|
||||
or \temp1, \temp1, \temp2
|
||||
bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
|
||||
jirl zero, \temp1, 0xc
|
||||
.endm
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
_head:
|
||||
.word MZ_MAGIC /* "MZ", MS-DOS header */
|
||||
.org 0x8
|
||||
.dword kernel_entry /* Kernel entry point */
|
||||
.dword _kernel_entry /* Kernel entry point (physical address) */
|
||||
.dword _kernel_asize /* Kernel image effective size */
|
||||
.quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
|
||||
.org 0x38 /* 0x20 ~ 0x37 reserved */
|
||||
|
@ -282,7 +282,7 @@ static void __init fdt_setup(void)
|
||||
return;
|
||||
|
||||
/* Prefer to use built-in dtb, checking its legality first. */
|
||||
if (!fdt_check_header(__dtb_start))
|
||||
if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
|
||||
fdt_pointer = __dtb_start;
|
||||
else
|
||||
fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
|
||||
@ -351,10 +351,8 @@ void __init platform_init(void)
|
||||
arch_reserve_vmcore();
|
||||
arch_reserve_crashkernel();
|
||||
|
||||
#ifdef CONFIG_ACPI_TABLE_UPGRADE
|
||||
acpi_table_upgrade();
|
||||
#endif
|
||||
#ifdef CONFIG_ACPI
|
||||
acpi_table_upgrade();
|
||||
acpi_gbl_use_default_register_widths = false;
|
||||
acpi_boot_table_init();
|
||||
#endif
|
||||
|
@ -273,7 +273,6 @@ static void __init fdt_smp_setup(void)
|
||||
|
||||
if (cpuid == loongson_sysconf.boot_cpu_id) {
|
||||
cpu = 0;
|
||||
numa_add_cpu(cpu);
|
||||
} else {
|
||||
cpu = cpumask_next_zero(-1, cpu_present_mask);
|
||||
}
|
||||
@ -283,6 +282,9 @@ static void __init fdt_smp_setup(void)
|
||||
set_cpu_present(cpu, true);
|
||||
__cpu_number_map[cpuid] = cpu;
|
||||
__cpu_logical_map[cpu] = cpuid;
|
||||
|
||||
early_numa_add_cpu(cpu, 0);
|
||||
set_cpuid_to_node(cpuid, 0);
|
||||
}
|
||||
|
||||
loongson_sysconf.nr_cpus = num_processors;
|
||||
@ -468,6 +470,7 @@ void smp_prepare_boot_cpu(void)
|
||||
set_cpu_possible(0, true);
|
||||
set_cpu_online(0, true);
|
||||
set_my_cpu_offset(per_cpu_offset(0));
|
||||
numa_add_cpu(0);
|
||||
|
||||
rr_node = first_node(node_online_map);
|
||||
for_each_possible_cpu(cpu) {
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#define PAGE_SIZE _PAGE_SIZE
|
||||
#define RO_EXCEPTION_TABLE_ALIGN 4
|
||||
#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */
|
||||
|
||||
/*
|
||||
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
|
||||
@ -142,10 +143,11 @@ SECTIONS
|
||||
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
/* header symbols */
|
||||
_kernel_asize = _end - _text;
|
||||
_kernel_fsize = _edata - _text;
|
||||
_kernel_vsize = _end - __initdata_begin;
|
||||
_kernel_rsize = _edata - __initdata_begin;
|
||||
_kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK);
|
||||
_kernel_asize = ABSOLUTE(_end - _text);
|
||||
_kernel_fsize = ABSOLUTE(_edata - _text);
|
||||
_kernel_vsize = ABSOLUTE(_end - __initdata_begin);
|
||||
_kernel_rsize = ABSOLUTE(_edata - __initdata_begin);
|
||||
#endif
|
||||
|
||||
.gptab.sdata : {
|
||||
|
@ -31,18 +31,17 @@ void flush_cache_all_local(void);
|
||||
void flush_cache_all(void);
|
||||
void flush_cache_mm(struct mm_struct *mm);
|
||||
|
||||
void flush_kernel_dcache_page_addr(const void *addr);
|
||||
|
||||
#define flush_kernel_dcache_range(start,size) \
|
||||
flush_kernel_dcache_range_asm((start), (start)+(size));
|
||||
|
||||
/* The only way to flush a vmap range is to flush whole cache */
|
||||
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
|
||||
void flush_kernel_vmap_range(void *vaddr, int size);
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
void flush_cache_vmap(unsigned long start, unsigned long end);
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
void flush_cache_vunmap(unsigned long start, unsigned long end);
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#define flush_dcache_folio flush_dcache_folio
|
||||
@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
|
||||
/* defined in pacache.S exported in cache.c used by flush_anon_page */
|
||||
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ANON_PAGE
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ON_KUNMAP
|
||||
static inline void kunmap_flush_on_unmap(const void *addr)
|
||||
{
|
||||
flush_kernel_dcache_page_addr(addr);
|
||||
}
|
||||
void kunmap_flush_on_unmap(const void *addr);
|
||||
|
||||
#endif /* _PARISC_CACHEFLUSH_H */
|
||||
|
||||
|
@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t ptep_get(pte_t *ptep)
|
||||
{
|
||||
return READ_ONCE(*ptep);
|
||||
}
|
||||
#define ptep_get ptep_get
|
||||
|
||||
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
|
||||
if (!pte_young(*ptep))
|
||||
return 0;
|
||||
|
||||
pte = *ptep;
|
||||
pte = ptep_get(ptep);
|
||||
if (!pte_young(pte)) {
|
||||
return 0;
|
||||
}
|
||||
@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
|
||||
return 1;
|
||||
}
|
||||
|
||||
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
|
||||
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
|
||||
|
||||
struct mm_struct;
|
||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t old_pte;
|
||||
|
||||
old_pte = *ptep;
|
||||
set_pte(ptep, __pte(0));
|
||||
|
||||
return old_pte;
|
||||
}
|
||||
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
set_pte(ptep, pte_wrprotect(*ptep));
|
||||
@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
||||
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/pdc.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@ -31,20 +32,31 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cachectl.h>
|
||||
|
||||
#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
|
||||
* of page flushes done flush_cache_page_if_present. There are some
|
||||
* pros and cons in using this option. It may increase the risk of
|
||||
* random segmentation faults.
|
||||
*/
|
||||
#define CONFIG_FLUSH_PAGE_ACCESSED 0
|
||||
|
||||
int split_tlb __ro_after_init;
|
||||
int dcache_stride __ro_after_init;
|
||||
int icache_stride __ro_after_init;
|
||||
EXPORT_SYMBOL(dcache_stride);
|
||||
|
||||
/* Internal implementation in arch/parisc/kernel/pacache.S */
|
||||
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
EXPORT_SYMBOL(flush_dcache_page_asm);
|
||||
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
|
||||
/* Internal implementation in arch/parisc/kernel/pacache.S */
|
||||
void flush_data_cache_local(void *); /* flushes local data-cache only */
|
||||
void flush_instruction_cache_local(void); /* flushes local code-cache only */
|
||||
|
||||
static void flush_kernel_dcache_page_addr(const void *addr);
|
||||
|
||||
/* On some machines (i.e., ones with the Merced bus), there can be
|
||||
* only a single PxTLB broadcast at a time; this must be guaranteed
|
||||
* by software. We need a spinlock around all TLB flushes to ensure
|
||||
@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
{
|
||||
if (!static_branch_likely(&parisc_has_cache))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The TLB is the engine of coherence on parisc. The CPU is
|
||||
* entitled to speculate any page with a TLB mapping, so here
|
||||
* we kill the mapping then flush the page along a special flush
|
||||
* only alias mapping. This guarantees that the page is no-longer
|
||||
* in the cache for any process and nor may it be speculatively
|
||||
* read in (until the user or kernel specifically accesses it,
|
||||
* of course).
|
||||
*/
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(physaddr, vmaddr);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
static void flush_kernel_dcache_page_addr(const void *addr)
|
||||
{
|
||||
unsigned long flags, space, pgd, prot;
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
unsigned long pgd_lock;
|
||||
#endif
|
||||
unsigned long vaddr = (unsigned long)addr;
|
||||
unsigned long flags;
|
||||
|
||||
vmaddr &= PAGE_MASK;
|
||||
/* Purge TLB entry to remove translation on all CPUs */
|
||||
purge_tlb_start(flags);
|
||||
pdtlb(SR_KERNEL, addr);
|
||||
purge_tlb_end(flags);
|
||||
|
||||
/* Use tmpalias flush to prevent data cache move-in */
|
||||
preempt_disable();
|
||||
|
||||
/* Set context for flush */
|
||||
local_irq_save(flags);
|
||||
prot = mfctl(8);
|
||||
space = mfsp(SR_USER);
|
||||
pgd = mfctl(25);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
pgd_lock = mfctl(28);
|
||||
#endif
|
||||
switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
|
||||
local_irq_restore(flags);
|
||||
|
||||
flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
|
||||
/* Restore previous context */
|
||||
local_irq_save(flags);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
mtctl(pgd_lock, 28);
|
||||
#endif
|
||||
mtctl(pgd, 25);
|
||||
mtsp(space, SR_USER);
|
||||
mtctl(prot, 8);
|
||||
local_irq_restore(flags);
|
||||
|
||||
flush_dcache_page_asm(__pa(vaddr), vaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void flush_kernel_icache_page_addr(const void *addr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)addr;
|
||||
unsigned long flags;
|
||||
|
||||
/* Purge TLB entry to remove translation on all CPUs */
|
||||
purge_tlb_start(flags);
|
||||
pdtlb(SR_KERNEL, addr);
|
||||
purge_tlb_end(flags);
|
||||
|
||||
/* Use tmpalias flush to prevent instruction cache move-in */
|
||||
preempt_disable();
|
||||
flush_icache_page_asm(__pa(vaddr), vaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void kunmap_flush_on_unmap(const void *addr)
|
||||
{
|
||||
flush_kernel_dcache_page_addr(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(kunmap_flush_on_unmap);
|
||||
|
||||
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned int nr)
|
||||
{
|
||||
@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
||||
|
||||
for (;;) {
|
||||
flush_kernel_dcache_page_addr(kaddr);
|
||||
flush_kernel_icache_page(kaddr);
|
||||
flush_kernel_icache_page_addr(kaddr);
|
||||
if (--nr == 0)
|
||||
break;
|
||||
kaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk page directory for MM to find PTEP pointer for address ADDR.
|
||||
*/
|
||||
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pte_t *ptep = NULL;
|
||||
@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte)
|
||||
== (_PAGE_PRESENT | _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return user physical address. Returns 0 if page is not present.
|
||||
*/
|
||||
static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
unsigned long flags, space, pgd, prot, pa;
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
unsigned long pgd_lock;
|
||||
#endif
|
||||
|
||||
/* Save context */
|
||||
local_irq_save(flags);
|
||||
prot = mfctl(8);
|
||||
space = mfsp(SR_USER);
|
||||
pgd = mfctl(25);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
pgd_lock = mfctl(28);
|
||||
#endif
|
||||
|
||||
/* Set context for lpa_user */
|
||||
switch_mm_irqs_off(NULL, mm, NULL);
|
||||
pa = lpa_user(addr);
|
||||
|
||||
/* Restore previous context */
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
mtctl(pgd_lock, 28);
|
||||
#endif
|
||||
mtctl(pgd, 25);
|
||||
mtsp(space, SR_USER);
|
||||
mtctl(prot, 8);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return pa;
|
||||
}
|
||||
|
||||
void flush_dcache_folio(struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping = folio_flush_mapping(folio);
|
||||
@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio)
|
||||
if (addr + nr * PAGE_SIZE > vma->vm_end)
|
||||
nr = (vma->vm_end - addr) / PAGE_SIZE;
|
||||
|
||||
if (parisc_requires_coherency()) {
|
||||
for (i = 0; i < nr; i++) {
|
||||
pte_t *ptep = get_ptep(vma->vm_mm,
|
||||
addr + i * PAGE_SIZE);
|
||||
if (!ptep)
|
||||
continue;
|
||||
if (pte_needs_flush(*ptep))
|
||||
flush_user_cache_page(vma,
|
||||
addr + i * PAGE_SIZE);
|
||||
/* Optimise accesses to the same table? */
|
||||
pte_unmap(ptep);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* The TLB is the engine of coherence on parisc:
|
||||
* The CPU is entitled to speculate any page
|
||||
* with a TLB mapping, so here we kill the
|
||||
* mapping then flush the page along a special
|
||||
* flush only alias mapping. This guarantees that
|
||||
* the page is no-longer in the cache for any
|
||||
* process and nor may it be speculatively read
|
||||
* in (until the user or kernel specifically
|
||||
* accesses it, of course)
|
||||
*/
|
||||
for (i = 0; i < nr; i++)
|
||||
flush_tlb_page(vma, addr + i * PAGE_SIZE);
|
||||
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
|
||||
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
|
||||
!= (addr & (SHM_COLOUR - 1))) {
|
||||
for (i = 0; i < nr; i++)
|
||||
__flush_cache_page(vma,
|
||||
addr + i * PAGE_SIZE,
|
||||
(pfn + i) * PAGE_SIZE);
|
||||
/*
|
||||
* Software is allowed to have any number
|
||||
* of private mappings to a page.
|
||||
*/
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
continue;
|
||||
if (old_addr)
|
||||
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
|
||||
old_addr, addr, vma->vm_file);
|
||||
if (nr == folio_nr_pages(folio))
|
||||
old_addr = addr;
|
||||
}
|
||||
for (i = 0; i < nr; i++)
|
||||
__flush_cache_page(vma,
|
||||
addr + i * PAGE_SIZE,
|
||||
(pfn + i) * PAGE_SIZE);
|
||||
/*
|
||||
* Software is allowed to have any number
|
||||
* of private mappings to a page.
|
||||
*/
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
continue;
|
||||
if (old_addr)
|
||||
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
|
||||
old_addr, addr, vma->vm_file);
|
||||
if (nr == folio_nr_pages(folio))
|
||||
old_addr = addr;
|
||||
}
|
||||
WARN_ON(++count == 4096);
|
||||
}
|
||||
@ -591,35 +624,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
|
||||
extern void clear_user_page_asm(void *, unsigned long);
|
||||
extern void copy_user_page_asm(void *, void *, unsigned long);
|
||||
|
||||
void flush_kernel_dcache_page_addr(const void *addr)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flush_kernel_dcache_page_asm(addr);
|
||||
purge_tlb_start(flags);
|
||||
pdtlb(SR_KERNEL, addr);
|
||||
purge_tlb_end(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
|
||||
|
||||
static void flush_cache_page_if_present(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr, unsigned long pfn)
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
#if CONFIG_FLUSH_PAGE_ACCESSED
|
||||
bool needs_flush = false;
|
||||
pte_t *ptep;
|
||||
pte_t *ptep, pte;
|
||||
|
||||
/*
|
||||
* The pte check is racy and sometimes the flush will trigger
|
||||
* a non-access TLB miss. Hopefully, the page has already been
|
||||
* flushed.
|
||||
*/
|
||||
ptep = get_ptep(vma->vm_mm, vmaddr);
|
||||
if (ptep) {
|
||||
needs_flush = pte_needs_flush(*ptep);
|
||||
pte = ptep_get(ptep);
|
||||
needs_flush = pte_needs_flush(pte);
|
||||
pte_unmap(ptep);
|
||||
}
|
||||
if (needs_flush)
|
||||
flush_cache_page(vma, vmaddr, pfn);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
|
||||
#else
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long physaddr = get_upa(mm, vmaddr);
|
||||
|
||||
if (physaddr)
|
||||
__flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
|
||||
#endif
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
@ -629,7 +655,7 @@ void copy_user_highpage(struct page *to, struct page *from,
|
||||
|
||||
kfrom = kmap_local_page(from);
|
||||
kto = kmap_local_page(to);
|
||||
flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
|
||||
__flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
|
||||
copy_page_asm(kto, kfrom);
|
||||
kunmap_local(kto);
|
||||
kunmap_local(kfrom);
|
||||
@ -638,16 +664,17 @@ void copy_user_highpage(struct page *to, struct page *from,
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len)
|
||||
{
|
||||
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
|
||||
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
|
||||
memcpy(dst, src, len);
|
||||
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
|
||||
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
|
||||
}
|
||||
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len)
|
||||
{
|
||||
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
|
||||
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
|
||||
memcpy(dst, src, len);
|
||||
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
|
||||
}
|
||||
|
||||
/* __flush_tlb_range()
|
||||
@ -681,32 +708,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
|
||||
|
||||
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long addr, pfn;
|
||||
pte_t *ptep;
|
||||
unsigned long addr;
|
||||
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
bool needs_flush = false;
|
||||
/*
|
||||
* The vma can contain pages that aren't present. Although
|
||||
* the pte search is expensive, we need the pte to find the
|
||||
* page pfn and to check whether the page should be flushed.
|
||||
*/
|
||||
ptep = get_ptep(vma->vm_mm, addr);
|
||||
if (ptep) {
|
||||
needs_flush = pte_needs_flush(*ptep);
|
||||
pfn = pte_pfn(*ptep);
|
||||
pte_unmap(ptep);
|
||||
}
|
||||
if (needs_flush) {
|
||||
if (parisc_requires_coherency()) {
|
||||
flush_user_cache_page(vma, addr);
|
||||
} else {
|
||||
if (WARN_ON(!pfn_valid(pfn)))
|
||||
return;
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
}
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE)
|
||||
flush_cache_page_if_present(vma, addr);
|
||||
}
|
||||
|
||||
static inline unsigned long mm_total_size(struct mm_struct *mm)
|
||||
@ -757,21 +762,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
|
||||
return;
|
||||
flush_tlb_range(vma, start, end);
|
||||
flush_cache_all();
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_cache_all();
|
||||
else
|
||||
flush_data_cache();
|
||||
return;
|
||||
}
|
||||
|
||||
flush_cache_pages(vma, start, end);
|
||||
flush_cache_pages(vma, start & PAGE_MASK, end);
|
||||
}
|
||||
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
||||
{
|
||||
if (WARN_ON(!pfn_valid(pfn)))
|
||||
return;
|
||||
if (parisc_requires_coherency())
|
||||
flush_user_cache_page(vma, vmaddr);
|
||||
else
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
||||
@ -779,34 +782,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
|
||||
if (!PageAnon(page))
|
||||
return;
|
||||
|
||||
if (parisc_requires_coherency()) {
|
||||
if (vma->vm_flags & VM_SHARED)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_user_cache_page(vma, vmaddr);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
|
||||
}
|
||||
|
||||
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
pte_t pte = ptep_get(ptep);
|
||||
|
||||
if (!pte_young(pte))
|
||||
return 0;
|
||||
set_pte(ptep, pte_mkold(pte));
|
||||
#if CONFIG_FLUSH_PAGE_ACCESSED
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* After a PTE is cleared, we have no way to flush the cache for
|
||||
* the physical page. On PA8800 and PA8900 processors, these lines
|
||||
* can cause random cache corruption. Thus, we must flush the cache
|
||||
* as well as the TLB when clearing a PTE that's valid.
|
||||
*/
|
||||
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
struct mm_struct *mm = (vma)->vm_mm;
|
||||
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
unsigned long pfn = pte_pfn(pte);
|
||||
|
||||
if (pfn_valid(pfn))
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
else if (pte_accessible(mm, pte))
|
||||
flush_tlb_page(vma, addr);
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
/*
|
||||
* The physical address for pages in the ioremap case can be obtained
|
||||
* from the vm_struct struct. I wasn't able to successfully handle the
|
||||
* vmalloc and vmap cases. We have an array of struct page pointers in
|
||||
* the uninitialized vmalloc case but the flush failed using page_to_pfn.
|
||||
*/
|
||||
void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long addr, physaddr;
|
||||
struct vm_struct *vm;
|
||||
|
||||
/* Prevent cache move-in */
|
||||
flush_tlb_kernel_range(start, end);
|
||||
|
||||
if (end - start >= parisc_cache_flush_threshold) {
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(page_to_phys(page), vmaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
vm = find_vm_area((void *)start);
|
||||
if (WARN_ON_ONCE(!vm)) {
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
/* The physical addresses of IOREMAP regions are contiguous */
|
||||
if (vm->flags & VM_IOREMAP) {
|
||||
physaddr = vm->phys_addr;
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(physaddr, start);
|
||||
flush_icache_page_asm(physaddr, start);
|
||||
preempt_enable();
|
||||
physaddr += PAGE_SIZE;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
flush_cache_all();
|
||||
}
|
||||
EXPORT_SYMBOL(flush_cache_vmap);
|
||||
|
||||
/*
|
||||
* The vm_struct has been retired and the page table is set up. The
|
||||
* last page in the range is a guard page. Its physical address can't
|
||||
* be determined using lpa, so there is no way to flush the range
|
||||
* using flush_dcache_page_asm.
|
||||
*/
|
||||
void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
/* Prevent cache move-in */
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
}
|
||||
EXPORT_SYMBOL(flush_cache_vunmap);
|
||||
|
||||
/*
|
||||
* On systems with PA8800/PA8900 processors, there is no way to flush
|
||||
* a vmap range other than using the architected loop to flush the
|
||||
* entire cache. The page directory is not set up, so we can't use
|
||||
* fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
|
||||
* L2 is physically indexed but FDCE/FICE instructions in virtual
|
||||
* mode output their virtual address on the core bus, not their
|
||||
* real address. As a result, the L2 cache index formed from the
|
||||
* virtual address will most likely not be the same as the L2 index
|
||||
* formed from the real address.
|
||||
*/
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
unsigned long end = start + size;
|
||||
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
(unsigned long)size >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
flush_tlb_kernel_range(start, end);
|
||||
|
||||
if (!static_branch_likely(&parisc_has_dcache))
|
||||
return;
|
||||
|
||||
/* If interrupts are disabled, we can only do local flush */
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
|
||||
flush_data_cache_local(NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
flush_kernel_dcache_range_asm(start, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
||||
|
||||
@ -818,15 +920,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
/* Ensure DMA is complete */
|
||||
asm_syncdma();
|
||||
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
(unsigned long)size >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
flush_tlb_kernel_range(start, end);
|
||||
|
||||
if (!static_branch_likely(&parisc_has_dcache))
|
||||
return;
|
||||
|
||||
/* If interrupts are disabled, we can only do local flush */
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
|
||||
flush_data_cache_local(NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
purge_kernel_dcache_range_asm(start, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
||||
|
||||
|
@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
|
||||
|
||||
static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
|
||||
{
|
||||
u32 hart, group = 0;
|
||||
u32 hart = 0, group = 0;
|
||||
|
||||
hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
|
||||
GENMASK_ULL(aia->nr_hart_bits - 1, 0);
|
||||
if (aia->nr_hart_bits)
|
||||
hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
|
||||
GENMASK_ULL(aia->nr_hart_bits - 1, 0);
|
||||
if (aia->nr_group_bits)
|
||||
group = (addr >> aia->nr_group_shift) &
|
||||
GENMASK_ULL(aia->nr_group_bits - 1, 0);
|
||||
|
@ -724,9 +724,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
|
||||
switch (reg_subtype) {
|
||||
case KVM_REG_RISCV_ISA_SINGLE:
|
||||
return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
|
||||
case KVM_REG_RISCV_SBI_MULTI_EN:
|
||||
case KVM_REG_RISCV_ISA_MULTI_EN:
|
||||
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
|
||||
case KVM_REG_RISCV_SBI_MULTI_DIS:
|
||||
case KVM_REG_RISCV_ISA_MULTI_DIS:
|
||||
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
|
||||
default:
|
||||
return -ENOENT;
|
||||
|
@ -293,8 +293,8 @@ void handle_page_fault(struct pt_regs *regs)
|
||||
if (unlikely(access_error(cause, vma))) {
|
||||
vma_end_read(vma);
|
||||
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
|
||||
tsk->thread.bad_cause = SEGV_ACCERR;
|
||||
bad_area_nosemaphore(regs, code, addr);
|
||||
tsk->thread.bad_cause = cause;
|
||||
bad_area_nosemaphore(regs, SEGV_ACCERR, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -250,18 +250,19 @@ static void __init setup_bootmem(void)
|
||||
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
|
||||
|
||||
/*
|
||||
* memblock allocator is not aware of the fact that last 4K bytes of
|
||||
* the addressable memory can not be mapped because of IS_ERR_VALUE
|
||||
* macro. Make sure that last 4k bytes are not usable by memblock
|
||||
* if end of dram is equal to maximum addressable memory. For 64-bit
|
||||
* kernel, this problem can't happen here as the end of the virtual
|
||||
* address space is occupied by the kernel mapping then this check must
|
||||
* be done as soon as the kernel mapping base address is determined.
|
||||
* Reserve physical address space that would be mapped to virtual
|
||||
* addresses greater than (void *)(-PAGE_SIZE) because:
|
||||
* - This memory would overlap with ERR_PTR
|
||||
* - This memory belongs to high memory, which is not supported
|
||||
*
|
||||
* This is not applicable to 64-bit kernel, because virtual addresses
|
||||
* after (void *)(-PAGE_SIZE) are not linearly mapped: they are
|
||||
* occupied by kernel mapping. Also it is unrealistic for high memory
|
||||
* to exist on 64-bit platforms.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_64BIT)) {
|
||||
max_mapped_addr = __pa(~(ulong)0);
|
||||
if (max_mapped_addr == (phys_ram_end - 1))
|
||||
memblock_set_current_limit(max_mapped_addr - 4096);
|
||||
max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
|
||||
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
|
||||
}
|
||||
|
||||
min_low_pfn = PFN_UP(phys_ram_base);
|
||||
|
@ -384,7 +384,7 @@ static void fixup_vmlinux_info(void)
|
||||
void startup_kernel(void)
|
||||
{
|
||||
unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
|
||||
unsigned long nokaslr_offset_phys = mem_safe_offset();
|
||||
unsigned long nokaslr_offset_phys, kaslr_large_page_offset;
|
||||
unsigned long amode31_lma = 0;
|
||||
unsigned long max_physmem_end;
|
||||
unsigned long asce_limit;
|
||||
@ -393,6 +393,12 @@ void startup_kernel(void)
|
||||
|
||||
fixup_vmlinux_info();
|
||||
setup_lpp();
|
||||
|
||||
/*
|
||||
* Non-randomized kernel physical start address must be _SEGMENT_SIZE
|
||||
* aligned (see blow).
|
||||
*/
|
||||
nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
|
||||
safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);
|
||||
|
||||
/*
|
||||
@ -425,10 +431,25 @@ void startup_kernel(void)
|
||||
save_ipl_cert_comp_list();
|
||||
rescue_initrd(safe_addr, ident_map_size);
|
||||
|
||||
if (kaslr_enabled())
|
||||
__kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size);
|
||||
/*
|
||||
* __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
|
||||
* 20 bits (the offset within a large page) are zero. Copy the last
|
||||
* 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
|
||||
* __kaslr_offset_phys.
|
||||
*
|
||||
* With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
|
||||
* are identical, which is required to allow for large mappings of the
|
||||
* kernel image.
|
||||
*/
|
||||
kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
|
||||
if (kaslr_enabled()) {
|
||||
unsigned long end = ident_map_size - kaslr_large_page_offset;
|
||||
|
||||
__kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end);
|
||||
}
|
||||
if (!__kaslr_offset_phys)
|
||||
__kaslr_offset_phys = nokaslr_offset_phys;
|
||||
__kaslr_offset_phys |= kaslr_large_page_offset;
|
||||
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
|
||||
physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
|
||||
deploy_kernel((void *)__kaslr_offset_phys);
|
||||
|
@ -261,21 +261,27 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
|
||||
|
||||
static bool large_allowed(enum populate_mode mode)
|
||||
{
|
||||
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY);
|
||||
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
|
||||
}
|
||||
|
||||
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
|
||||
enum populate_mode mode)
|
||||
{
|
||||
unsigned long size = end - addr;
|
||||
|
||||
return machine.has_edat2 && large_allowed(mode) &&
|
||||
IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
|
||||
IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
|
||||
IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
|
||||
}
|
||||
|
||||
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
|
||||
enum populate_mode mode)
|
||||
{
|
||||
unsigned long size = end - addr;
|
||||
|
||||
return machine.has_edat1 && large_allowed(mode) &&
|
||||
IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
|
||||
IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
|
||||
IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
|
||||
}
|
||||
|
||||
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||
|
@ -109,6 +109,7 @@ SECTIONS
|
||||
#ifdef CONFIG_KERNEL_UNCOMPRESSED
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
. += AMODE31_SIZE; /* .amode31 section */
|
||||
. = ALIGN(1 << 20); /* _SEGMENT_SIZE */
|
||||
#else
|
||||
. = ALIGN(8);
|
||||
#endif
|
||||
|
@ -43,7 +43,6 @@ CONFIG_PROFILING=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=512
|
||||
@ -51,6 +50,7 @@ CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
CONFIG_CERT_STORE=y
|
||||
CONFIG_EXPOLINE=y
|
||||
# CONFIG_EXPOLINE_EXTERN is not set
|
||||
CONFIG_EXPOLINE_AUTO=y
|
||||
CONFIG_CHSC_SCH=y
|
||||
CONFIG_VFIO_CCW=m
|
||||
@ -76,6 +76,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
||||
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SRCVERSION_ALL=y
|
||||
CONFIG_MODULE_SIG_SHA256=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_WBT=y
|
||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||
@ -100,7 +101,6 @@ CONFIG_MEMORY_HOTPLUG=y
|
||||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CMA_DEBUG=y
|
||||
CONFIG_CMA_DEBUGFS=y
|
||||
CONFIG_CMA_SYSFS=y
|
||||
CONFIG_CMA_AREAS=7
|
||||
@ -119,6 +119,7 @@ CONFIG_UNIX_DIAG=m
|
||||
CONFIG_XFRM_USER=m
|
||||
CONFIG_NET_KEY=m
|
||||
CONFIG_SMC_DIAG=m
|
||||
CONFIG_SMC_LO=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
@ -133,7 +134,6 @@ CONFIG_IP_MROUTE=y
|
||||
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
|
||||
CONFIG_IP_PIMSM_V1=y
|
||||
CONFIG_IP_PIMSM_V2=y
|
||||
CONFIG_SYN_COOKIES=y
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
@ -167,6 +167,7 @@ CONFIG_BRIDGE_NETFILTER=m
|
||||
CONFIG_NETFILTER_NETLINK_HOOK=m
|
||||
CONFIG_NF_CONNTRACK=m
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_ZONES=y
|
||||
CONFIG_NF_CONNTRACK_PROCFS=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CONNTRACK_TIMEOUT=y
|
||||
@ -183,17 +184,39 @@ CONFIG_NF_CONNTRACK_SIP=m
|
||||
CONFIG_NF_CONNTRACK_TFTP=m
|
||||
CONFIG_NF_CT_NETLINK=m
|
||||
CONFIG_NF_CT_NETLINK_TIMEOUT=m
|
||||
CONFIG_NF_CT_NETLINK_HELPER=m
|
||||
CONFIG_NETFILTER_NETLINK_GLUE_CT=y
|
||||
CONFIG_NF_TABLES=m
|
||||
CONFIG_NF_TABLES_INET=y
|
||||
CONFIG_NF_TABLES_NETDEV=y
|
||||
CONFIG_NFT_NUMGEN=m
|
||||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_FLOW_OFFLOAD=m
|
||||
CONFIG_NFT_CONNLIMIT=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
CONFIG_NFT_MASQ=m
|
||||
CONFIG_NFT_REDIR=m
|
||||
CONFIG_NFT_NAT=m
|
||||
CONFIG_NFT_TUNNEL=m
|
||||
CONFIG_NFT_QUEUE=m
|
||||
CONFIG_NFT_QUOTA=m
|
||||
CONFIG_NFT_REJECT=m
|
||||
CONFIG_NFT_COMPAT=m
|
||||
CONFIG_NFT_HASH=m
|
||||
CONFIG_NFT_FIB_INET=m
|
||||
CONFIG_NETFILTER_XTABLES_COMPAT=y
|
||||
CONFIG_NFT_XFRM=m
|
||||
CONFIG_NFT_SOCKET=m
|
||||
CONFIG_NFT_OSF=m
|
||||
CONFIG_NFT_TPROXY=m
|
||||
CONFIG_NFT_SYNPROXY=m
|
||||
CONFIG_NFT_DUP_NETDEV=m
|
||||
CONFIG_NFT_FWD_NETDEV=m
|
||||
CONFIG_NFT_FIB_NETDEV=m
|
||||
CONFIG_NFT_REJECT_NETDEV=m
|
||||
CONFIG_NF_FLOW_TABLE_INET=m
|
||||
CONFIG_NF_FLOW_TABLE=m
|
||||
CONFIG_NF_FLOW_TABLE_PROCFS=y
|
||||
CONFIG_NETFILTER_XT_SET=m
|
||||
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
|
||||
@ -206,8 +229,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
||||
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=m
|
||||
@ -216,6 +241,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CGROUP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
|
||||
@ -230,6 +256,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPVS=m
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
|
||||
@ -247,6 +274,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
|
||||
CONFIG_NETFILTER_XT_MATCH_REALM=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RECENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=m
|
||||
@ -302,7 +330,6 @@ CONFIG_IP_NF_TARGET_ECN=m
|
||||
CONFIG_IP_NF_TARGET_TTL=m
|
||||
CONFIG_IP_NF_RAW=m
|
||||
CONFIG_IP_NF_SECURITY=m
|
||||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
@ -373,7 +400,6 @@ CONFIG_NET_ACT_POLICE=m
|
||||
CONFIG_NET_ACT_GACT=m
|
||||
CONFIG_GACT_PROB=y
|
||||
CONFIG_NET_ACT_MIRRED=m
|
||||
CONFIG_NET_ACT_IPT=m
|
||||
CONFIG_NET_ACT_NAT=m
|
||||
CONFIG_NET_ACT_PEDIT=m
|
||||
CONFIG_NET_ACT_SIMP=m
|
||||
@ -462,6 +488,7 @@ CONFIG_DM_VERITY=m
|
||||
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
|
||||
CONFIG_DM_SWITCH=m
|
||||
CONFIG_DM_INTEGRITY=m
|
||||
CONFIG_DM_VDO=m
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_BONDING=m
|
||||
CONFIG_DUMMY=m
|
||||
@ -574,7 +601,6 @@ CONFIG_WATCHDOG=y
|
||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
||||
CONFIG_SOFT_WATCHDOG=m
|
||||
CONFIG_DIAG288_WATCHDOG=m
|
||||
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
|
||||
CONFIG_FB=y
|
||||
# CONFIG_FB_DEVICE is not set
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
@ -645,7 +671,6 @@ CONFIG_MSDOS_FS=m
|
||||
CONFIG_VFAT_FS=m
|
||||
CONFIG_EXFAT_FS=m
|
||||
CONFIG_NTFS_FS=m
|
||||
CONFIG_NTFS_RW=y
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
@ -663,6 +688,7 @@ CONFIG_SQUASHFS_XZ=y
|
||||
CONFIG_SQUASHFS_ZSTD=y
|
||||
CONFIG_ROMFS_FS=m
|
||||
CONFIG_NFS_FS=m
|
||||
CONFIG_NFS_V2=m
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NFS_V4=m
|
||||
CONFIG_NFS_SWAP=y
|
||||
@ -879,6 +905,5 @@ CONFIG_RBTREE_TEST=y
|
||||
CONFIG_INTERVAL_TREE_TEST=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_STRING_SELFTEST=y
|
||||
CONFIG_TEST_BITOPS=m
|
||||
CONFIG_TEST_BPF=m
|
||||
|
@ -41,7 +41,6 @@ CONFIG_PROFILING=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=512
|
||||
@ -49,6 +48,7 @@ CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
CONFIG_CERT_STORE=y
|
||||
CONFIG_EXPOLINE=y
|
||||
# CONFIG_EXPOLINE_EXTERN is not set
|
||||
CONFIG_EXPOLINE_AUTO=y
|
||||
CONFIG_CHSC_SCH=y
|
||||
CONFIG_VFIO_CCW=m
|
||||
@ -71,6 +71,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
||||
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SRCVERSION_ALL=y
|
||||
CONFIG_MODULE_SIG_SHA256=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_WBT=y
|
||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||
@ -110,6 +111,7 @@ CONFIG_UNIX_DIAG=m
|
||||
CONFIG_XFRM_USER=m
|
||||
CONFIG_NET_KEY=m
|
||||
CONFIG_SMC_DIAG=m
|
||||
CONFIG_SMC_LO=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
@ -124,7 +126,6 @@ CONFIG_IP_MROUTE=y
|
||||
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
|
||||
CONFIG_IP_PIMSM_V1=y
|
||||
CONFIG_IP_PIMSM_V2=y
|
||||
CONFIG_SYN_COOKIES=y
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
@ -158,6 +159,7 @@ CONFIG_BRIDGE_NETFILTER=m
|
||||
CONFIG_NETFILTER_NETLINK_HOOK=m
|
||||
CONFIG_NF_CONNTRACK=m
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_ZONES=y
|
||||
CONFIG_NF_CONNTRACK_PROCFS=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CONNTRACK_TIMEOUT=y
|
||||
@ -174,17 +176,39 @@ CONFIG_NF_CONNTRACK_SIP=m
|
||||
CONFIG_NF_CONNTRACK_TFTP=m
|
||||
CONFIG_NF_CT_NETLINK=m
|
||||
CONFIG_NF_CT_NETLINK_TIMEOUT=m
|
||||
CONFIG_NF_CT_NETLINK_HELPER=m
|
||||
CONFIG_NETFILTER_NETLINK_GLUE_CT=y
|
||||
CONFIG_NF_TABLES=m
|
||||
CONFIG_NF_TABLES_INET=y
|
||||
CONFIG_NF_TABLES_NETDEV=y
|
||||
CONFIG_NFT_NUMGEN=m
|
||||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_FLOW_OFFLOAD=m
|
||||
CONFIG_NFT_CONNLIMIT=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
CONFIG_NFT_MASQ=m
|
||||
CONFIG_NFT_REDIR=m
|
||||
CONFIG_NFT_NAT=m
|
||||
CONFIG_NFT_TUNNEL=m
|
||||
CONFIG_NFT_QUEUE=m
|
||||
CONFIG_NFT_QUOTA=m
|
||||
CONFIG_NFT_REJECT=m
|
||||
CONFIG_NFT_COMPAT=m
|
||||
CONFIG_NFT_HASH=m
|
||||
CONFIG_NFT_FIB_INET=m
|
||||
CONFIG_NETFILTER_XTABLES_COMPAT=y
|
||||
CONFIG_NFT_XFRM=m
|
||||
CONFIG_NFT_SOCKET=m
|
||||
CONFIG_NFT_OSF=m
|
||||
CONFIG_NFT_TPROXY=m
|
||||
CONFIG_NFT_SYNPROXY=m
|
||||
CONFIG_NFT_DUP_NETDEV=m
|
||||
CONFIG_NFT_FWD_NETDEV=m
|
||||
CONFIG_NFT_FIB_NETDEV=m
|
||||
CONFIG_NFT_REJECT_NETDEV=m
|
||||
CONFIG_NF_FLOW_TABLE_INET=m
|
||||
CONFIG_NF_FLOW_TABLE=m
|
||||
CONFIG_NF_FLOW_TABLE_PROCFS=y
|
||||
CONFIG_NETFILTER_XT_SET=m
|
||||
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
|
||||
@ -197,8 +221,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
||||
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=m
|
||||
@ -207,6 +233,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CGROUP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
|
||||
@ -221,6 +248,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPVS=m
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
|
||||
@ -238,6 +266,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
|
||||
CONFIG_NETFILTER_XT_MATCH_REALM=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RECENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=m
|
||||
@ -293,7 +322,6 @@ CONFIG_IP_NF_TARGET_ECN=m
|
||||
CONFIG_IP_NF_TARGET_TTL=m
|
||||
CONFIG_IP_NF_RAW=m
|
||||
CONFIG_IP_NF_SECURITY=m
|
||||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
@ -363,7 +391,6 @@ CONFIG_NET_ACT_POLICE=m
|
||||
CONFIG_NET_ACT_GACT=m
|
||||
CONFIG_GACT_PROB=y
|
||||
CONFIG_NET_ACT_MIRRED=m
|
||||
CONFIG_NET_ACT_IPT=m
|
||||
CONFIG_NET_ACT_NAT=m
|
||||
CONFIG_NET_ACT_PEDIT=m
|
||||
CONFIG_NET_ACT_SIMP=m
|
||||
@ -452,6 +479,7 @@ CONFIG_DM_VERITY=m
|
||||
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
|
||||
CONFIG_DM_SWITCH=m
|
||||
CONFIG_DM_INTEGRITY=m
|
||||
CONFIG_DM_VDO=m
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_BONDING=m
|
||||
CONFIG_DUMMY=m
|
||||
@ -630,7 +658,6 @@ CONFIG_MSDOS_FS=m
|
||||
CONFIG_VFAT_FS=m
|
||||
CONFIG_EXFAT_FS=m
|
||||
CONFIG_NTFS_FS=m
|
||||
CONFIG_NTFS_RW=y
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
@ -649,6 +676,7 @@ CONFIG_SQUASHFS_XZ=y
|
||||
CONFIG_SQUASHFS_ZSTD=y
|
||||
CONFIG_ROMFS_FS=m
|
||||
CONFIG_NFS_FS=m
|
||||
CONFIG_NFS_V2=m
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NFS_V4=m
|
||||
CONFIG_NFS_SWAP=y
|
||||
|
@ -9,25 +9,22 @@ CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=2
|
||||
CONFIG_HZ_100=y
|
||||
# CONFIG_CHSC_SCH is not set
|
||||
# CONFIG_SCM_BUS is not set
|
||||
# CONFIG_AP is not set
|
||||
# CONFIG_PFAULT is not set
|
||||
# CONFIG_S390_HYPFS is not set
|
||||
# CONFIG_VIRTUALIZATION is not set
|
||||
# CONFIG_S390_GUEST is not set
|
||||
# CONFIG_SECCOMP is not set
|
||||
# CONFIG_GCC_PLUGINS is not set
|
||||
# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_SWAP is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
# CONFIG_COMPACTION is not set
|
||||
# CONFIG_MIGRATION is not set
|
||||
CONFIG_NET=y
|
||||
# CONFIG_IUCV is not set
|
||||
# CONFIG_PCPU_DEV_REFCNT is not set
|
||||
|
@ -451,7 +451,7 @@ static void *nt_final(void *ptr)
|
||||
/*
|
||||
* Initialize ELF header (new kernel)
|
||||
*/
|
||||
static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
|
||||
static void *ehdr_init(Elf64_Ehdr *ehdr, int phdr_count)
|
||||
{
|
||||
memset(ehdr, 0, sizeof(*ehdr));
|
||||
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
|
||||
@ -465,11 +465,8 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
|
||||
ehdr->e_phoff = sizeof(Elf64_Ehdr);
|
||||
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
|
||||
ehdr->e_phentsize = sizeof(Elf64_Phdr);
|
||||
/*
|
||||
* Number of memory chunk PT_LOAD program headers plus one kernel
|
||||
* image PT_LOAD program header plus one PT_NOTE program header.
|
||||
*/
|
||||
ehdr->e_phnum = mem_chunk_cnt + 1 + 1;
|
||||
/* Number of PT_LOAD program headers plus PT_NOTE program header */
|
||||
ehdr->e_phnum = phdr_count + 1;
|
||||
return ehdr + 1;
|
||||
}
|
||||
|
||||
@ -503,12 +500,14 @@ static int get_mem_chunk_cnt(void)
|
||||
/*
|
||||
* Initialize ELF loads (new kernel)
|
||||
*/
|
||||
static void loads_init(Elf64_Phdr *phdr)
|
||||
static void loads_init(Elf64_Phdr *phdr, bool os_info_has_vm)
|
||||
{
|
||||
unsigned long old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
|
||||
unsigned long old_identity_base = 0;
|
||||
phys_addr_t start, end;
|
||||
u64 idx;
|
||||
|
||||
if (os_info_has_vm)
|
||||
old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE);
|
||||
for_each_physmem_range(idx, &oldmem_type, &start, &end) {
|
||||
phdr->p_type = PT_LOAD;
|
||||
phdr->p_vaddr = old_identity_base + start;
|
||||
@ -522,6 +521,11 @@ static void loads_init(Elf64_Phdr *phdr)
|
||||
}
|
||||
}
|
||||
|
||||
static bool os_info_has_vm(void)
|
||||
{
|
||||
return os_info_old_value(OS_INFO_KASLR_OFFSET);
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare PT_LOAD type program header for kernel image region
|
||||
*/
|
||||
@ -566,7 +570,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static size_t get_elfcorehdr_size(int mem_chunk_cnt)
|
||||
static size_t get_elfcorehdr_size(int phdr_count)
|
||||
{
|
||||
size_t size;
|
||||
|
||||
@ -581,10 +585,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
|
||||
size += nt_vmcoreinfo_size();
|
||||
/* nt_final */
|
||||
size += sizeof(Elf64_Nhdr);
|
||||
/* PT_LOAD type program header for kernel text region */
|
||||
size += sizeof(Elf64_Phdr);
|
||||
/* PT_LOADS */
|
||||
size += mem_chunk_cnt * sizeof(Elf64_Phdr);
|
||||
size += phdr_count * sizeof(Elf64_Phdr);
|
||||
|
||||
return size;
|
||||
}
|
||||
@ -595,8 +597,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
|
||||
int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
|
||||
{
|
||||
Elf64_Phdr *phdr_notes, *phdr_loads, *phdr_text;
|
||||
int mem_chunk_cnt, phdr_text_cnt;
|
||||
size_t alloc_size;
|
||||
int mem_chunk_cnt;
|
||||
void *ptr, *hdr;
|
||||
u64 hdr_off;
|
||||
|
||||
@ -615,12 +617,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
|
||||
}
|
||||
|
||||
mem_chunk_cnt = get_mem_chunk_cnt();
|
||||
phdr_text_cnt = os_info_has_vm() ? 1 : 0;
|
||||
|
||||
alloc_size = get_elfcorehdr_size(mem_chunk_cnt);
|
||||
alloc_size = get_elfcorehdr_size(mem_chunk_cnt + phdr_text_cnt);
|
||||
|
||||
hdr = kzalloc(alloc_size, GFP_KERNEL);
|
||||
|
||||
/* Without elfcorehdr /proc/vmcore cannot be created. Thus creating
|
||||
/*
|
||||
* Without elfcorehdr /proc/vmcore cannot be created. Thus creating
|
||||
* a dump with this crash kernel will fail. Panic now to allow other
|
||||
* dump mechanisms to take over.
|
||||
*/
|
||||
@ -628,21 +632,23 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
|
||||
panic("s390 kdump allocating elfcorehdr failed");
|
||||
|
||||
/* Init elf header */
|
||||
ptr = ehdr_init(hdr, mem_chunk_cnt);
|
||||
phdr_notes = ehdr_init(hdr, mem_chunk_cnt + phdr_text_cnt);
|
||||
/* Init program headers */
|
||||
phdr_notes = ptr;
|
||||
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
|
||||
phdr_text = ptr;
|
||||
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
|
||||
phdr_loads = ptr;
|
||||
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
|
||||
if (phdr_text_cnt) {
|
||||
phdr_text = phdr_notes + 1;
|
||||
phdr_loads = phdr_text + 1;
|
||||
} else {
|
||||
phdr_loads = phdr_notes + 1;
|
||||
}
|
||||
ptr = PTR_ADD(phdr_loads, sizeof(Elf64_Phdr) * mem_chunk_cnt);
|
||||
/* Init notes */
|
||||
hdr_off = PTR_DIFF(ptr, hdr);
|
||||
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
|
||||
/* Init kernel text program header */
|
||||
text_init(phdr_text);
|
||||
if (phdr_text_cnt)
|
||||
text_init(phdr_text);
|
||||
/* Init loads */
|
||||
loads_init(phdr_loads);
|
||||
loads_init(phdr_loads, phdr_text_cnt);
|
||||
/* Finalize program headers */
|
||||
hdr_off = PTR_DIFF(ptr, hdr);
|
||||
*addr = (unsigned long long) hdr;
|
||||
|
@ -105,9 +105,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
|
||||
|
||||
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
|
||||
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
|
||||
vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
|
||||
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
|
||||
$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
|
||||
|
@ -2154,6 +2154,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
||||
void *insn, int insn_len);
|
||||
void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg);
|
||||
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
|
||||
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
u64 addr, unsigned long roots);
|
||||
|
@ -78,10 +78,10 @@ extern int __get_user_bad(void);
|
||||
int __ret_gu; \
|
||||
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
|
||||
__chk_user_ptr(ptr); \
|
||||
asm volatile("call __" #fn "_%c4" \
|
||||
asm volatile("call __" #fn "_%c[size]" \
|
||||
: "=a" (__ret_gu), "=r" (__val_gu), \
|
||||
ASM_CALL_CONSTRAINT \
|
||||
: "0" (ptr), "i" (sizeof(*(ptr)))); \
|
||||
: "0" (ptr), [size] "i" (sizeof(*(ptr)))); \
|
||||
instrument_get_user(__val_gu); \
|
||||
(x) = (__force __typeof__(*(ptr))) __val_gu; \
|
||||
__builtin_expect(__ret_gu, 0); \
|
||||
|
@ -77,7 +77,7 @@
|
||||
#define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* "" VM-Exit on ENCLS (leaf dependent) */
|
||||
#define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* "" VM-Exit on RDSEED */
|
||||
#define VMX_FEATURE_PAGE_MOD_LOGGING ( 2*32+ 17) /* "pml" Log dirty pages into buffer */
|
||||
#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* "" Conditionally reflect EPT violations as #VE exceptions */
|
||||
#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* Conditionally reflect EPT violations as #VE exceptions */
|
||||
#define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* "" Suppress VMX indicators in Processor Trace */
|
||||
#define VMX_FEATURE_XSAVES ( 2*32+ 20) /* "" Enable XSAVES and XRSTORS in guest */
|
||||
#define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */
|
||||
|
@ -215,7 +215,14 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
|
||||
|
||||
int amd_smn_read(u16 node, u32 address, u32 *value)
|
||||
{
|
||||
return __amd_smn_rw(node, address, value, false);
|
||||
int err = __amd_smn_rw(node, address, value, false);
|
||||
|
||||
if (PCI_POSSIBLE_ERROR(*value)) {
|
||||
err = -ENODEV;
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_smn_read);
|
||||
|
||||
|
@ -295,8 +295,15 @@ void machine_kexec_cleanup(struct kimage *image)
|
||||
void machine_kexec(struct kimage *image)
|
||||
{
|
||||
unsigned long page_list[PAGES_NR];
|
||||
void *control_page;
|
||||
unsigned int host_mem_enc_active;
|
||||
int save_ftrace_enabled;
|
||||
void *control_page;
|
||||
|
||||
/*
|
||||
* This must be done before load_segments() since if call depth tracking
|
||||
* is used then GS must be valid to make any function calls.
|
||||
*/
|
||||
host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT);
|
||||
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
if (image->preserve_context)
|
||||
@ -358,7 +365,7 @@ void machine_kexec(struct kimage *image)
|
||||
(unsigned long)page_list,
|
||||
image->start,
|
||||
image->preserve_context,
|
||||
cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));
|
||||
host_mem_enc_active);
|
||||
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
if (image->preserve_context)
|
||||
|
@ -44,6 +44,7 @@ config KVM
|
||||
select KVM_VFIO
|
||||
select HAVE_KVM_PM_NOTIFIER if PM
|
||||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
select KVM_WERROR if WERROR
|
||||
help
|
||||
Support hosting fully virtualized guest machines using hardware
|
||||
virtualization extensions. You will need a fairly recent
|
||||
@ -66,7 +67,7 @@ config KVM_WERROR
|
||||
# FRAME_WARN, i.e. KVM_WERROR=y with KASAN=y requires special tuning.
|
||||
# Building KVM with -Werror and KASAN is still doable via enabling
|
||||
# the kernel-wide WERROR=y.
|
||||
depends on KVM && EXPERT && !KASAN
|
||||
depends on KVM && ((EXPERT && !KASAN) || WERROR)
|
||||
help
|
||||
Add -Werror to the build flags for KVM.
|
||||
|
||||
@ -97,15 +98,17 @@ config KVM_INTEL
|
||||
|
||||
config KVM_INTEL_PROVE_VE
|
||||
bool "Check that guests do not receive #VE exceptions"
|
||||
default KVM_PROVE_MMU || DEBUG_KERNEL
|
||||
depends on KVM_INTEL
|
||||
depends on KVM_INTEL && EXPERT
|
||||
help
|
||||
|
||||
Checks that KVM's page table management code will not incorrectly
|
||||
let guests receive a virtualization exception. Virtualization
|
||||
exceptions will be trapped by the hypervisor rather than injected
|
||||
in the guest.
|
||||
|
||||
Note: some CPUs appear to generate spurious EPT Violations #VEs
|
||||
that trigger KVM's WARN, in particular with eptad=0 and/or nested
|
||||
virtualization.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config X86_SGX_KVM
|
||||
|
@ -59,7 +59,17 @@
|
||||
#define MAX_APIC_VECTOR 256
|
||||
#define APIC_VECTORS_PER_REG 32
|
||||
|
||||
static bool lapic_timer_advance_dynamic __read_mostly;
|
||||
/*
|
||||
* Enable local APIC timer advancement (tscdeadline mode only) with adaptive
|
||||
* tuning. When enabled, KVM programs the host timer event to fire early, i.e.
|
||||
* before the deadline expires, to account for the delay between taking the
|
||||
* VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume
|
||||
* the guest, i.e. so that the interrupt arrives in the guest with minimal
|
||||
* latency relative to the deadline programmed by the guest.
|
||||
*/
|
||||
static bool lapic_timer_advance __read_mostly = true;
|
||||
module_param(lapic_timer_advance, bool, 0444);
|
||||
|
||||
#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
|
||||
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
|
||||
#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
|
||||
@ -1854,16 +1864,14 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
|
||||
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
|
||||
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
|
||||
|
||||
if (lapic_timer_advance_dynamic) {
|
||||
adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
|
||||
/*
|
||||
* If the timer fired early, reread the TSC to account for the
|
||||
* overhead of the above adjustment to avoid waiting longer
|
||||
* than is necessary.
|
||||
*/
|
||||
if (guest_tsc < tsc_deadline)
|
||||
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
|
||||
}
|
||||
adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
|
||||
|
||||
/*
|
||||
* If the timer fired early, reread the TSC to account for the overhead
|
||||
* of the above adjustment to avoid waiting longer than is necessary.
|
||||
*/
|
||||
if (guest_tsc < tsc_deadline)
|
||||
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
|
||||
|
||||
if (guest_tsc < tsc_deadline)
|
||||
__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
|
||||
@ -2812,7 +2820,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
|
||||
int kvm_create_lapic(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_lapic *apic;
|
||||
|
||||
@ -2845,13 +2853,8 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
|
||||
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
|
||||
HRTIMER_MODE_ABS_HARD);
|
||||
apic->lapic_timer.timer.function = apic_timer_fn;
|
||||
if (timer_advance_ns == -1) {
|
||||
if (lapic_timer_advance)
|
||||
apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
|
||||
lapic_timer_advance_dynamic = true;
|
||||
} else {
|
||||
apic->lapic_timer.timer_advance_ns = timer_advance_ns;
|
||||
lapic_timer_advance_dynamic = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stuff the APIC ENABLE bit in lieu of temporarily incrementing
|
||||
|
@ -85,7 +85,7 @@ struct kvm_lapic {
|
||||
|
||||
struct dest_map;
|
||||
|
||||
int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns);
|
||||
int kvm_create_lapic(struct kvm_vcpu *vcpu);
|
||||
void kvm_free_lapic(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
|
@ -336,16 +336,19 @@ static int is_cpuid_PSE36(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
static void __set_spte(u64 *sptep, u64 spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
|
||||
WRITE_ONCE(*sptep, spte);
|
||||
}
|
||||
|
||||
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
|
||||
WRITE_ONCE(*sptep, spte);
|
||||
}
|
||||
|
||||
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
|
||||
return xchg(sptep, spte);
|
||||
}
|
||||
|
||||
@ -4101,6 +4104,22 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
|
||||
return leaf;
|
||||
}
|
||||
|
||||
static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
|
||||
int *root_level)
|
||||
{
|
||||
int leaf;
|
||||
|
||||
walk_shadow_page_lockless_begin(vcpu);
|
||||
|
||||
if (is_tdp_mmu_active(vcpu))
|
||||
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level);
|
||||
else
|
||||
leaf = get_walk(vcpu, addr, sptes, root_level);
|
||||
|
||||
walk_shadow_page_lockless_end(vcpu);
|
||||
return leaf;
|
||||
}
|
||||
|
||||
/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
|
||||
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
||||
{
|
||||
@ -4109,15 +4128,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
||||
int root, leaf, level;
|
||||
bool reserved = false;
|
||||
|
||||
walk_shadow_page_lockless_begin(vcpu);
|
||||
|
||||
if (is_tdp_mmu_active(vcpu))
|
||||
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
|
||||
else
|
||||
leaf = get_walk(vcpu, addr, sptes, &root);
|
||||
|
||||
walk_shadow_page_lockless_end(vcpu);
|
||||
|
||||
leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
|
||||
if (unlikely(leaf < 0)) {
|
||||
*sptep = 0ull;
|
||||
return reserved;
|
||||
@ -4400,9 +4411,6 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||
return RET_PF_EMULATE;
|
||||
}
|
||||
|
||||
fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
* Check for a relevant mmu_notifier invalidation event before getting
|
||||
* the pfn from the primary MMU, and before acquiring mmu_lock.
|
||||
@ -5921,6 +5929,22 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
|
||||
|
||||
void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
|
||||
{
|
||||
u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
|
||||
int root_level, leaf, level;
|
||||
|
||||
leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level);
|
||||
if (unlikely(leaf < 0))
|
||||
return;
|
||||
|
||||
pr_err("%s %llx", msg, gpa);
|
||||
for (level = root_level; level >= leaf; level--)
|
||||
pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
|
||||
pr_cont("\n");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes);
|
||||
|
||||
static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
u64 addr, hpa_t root_hpa)
|
||||
{
|
||||
|
@ -3,6 +3,8 @@
|
||||
#ifndef KVM_X86_MMU_SPTE_H
|
||||
#define KVM_X86_MMU_SPTE_H
|
||||
|
||||
#include <asm/vmx.h>
|
||||
|
||||
#include "mmu.h"
|
||||
#include "mmu_internal.h"
|
||||
|
||||
@ -276,6 +278,13 @@ static inline bool is_shadow_present_pte(u64 pte)
|
||||
return !!(pte & SPTE_MMU_PRESENT_MASK);
|
||||
}
|
||||
|
||||
static inline bool is_ept_ve_possible(u64 spte)
|
||||
{
|
||||
return (shadow_present_mask & VMX_EPT_SUPPRESS_VE_BIT) &&
|
||||
!(spte & VMX_EPT_SUPPRESS_VE_BIT) &&
|
||||
(spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if A/D bits are supported in hardware and are enabled by KVM.
|
||||
* When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can
|
||||
|
@ -21,11 +21,13 @@ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
|
||||
|
||||
static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
|
||||
return xchg(rcu_dereference(sptep), new_spte);
|
||||
}
|
||||
|
||||
static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
|
||||
WRITE_ONCE(*rcu_dereference(sptep), new_spte);
|
||||
}
|
||||
|
||||
|
@ -626,7 +626,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
|
||||
* SPTEs.
|
||||
*/
|
||||
handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
|
||||
0, iter->level, true);
|
||||
SHADOW_NONPRESENT_VALUE, iter->level, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -779,6 +779,14 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
fpstate_set_confidential(&vcpu->arch.guest_fpu);
|
||||
vcpu->arch.guest_state_protected = true;
|
||||
|
||||
/*
|
||||
* SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
|
||||
* only after setting guest_state_protected because KVM_SET_MSRS allows
|
||||
* dynamic toggling of LBRV (for performance reason) on write access to
|
||||
* MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
|
||||
*/
|
||||
svm_enable_lbrv(vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2406,6 +2414,12 @@ void __init sev_hardware_setup(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_SEV_ES))
|
||||
goto out;
|
||||
|
||||
if (!lbrv) {
|
||||
WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
|
||||
"LBRV must be present for SEV-ES support");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Has the system been allocated ASIDs for SEV-ES? */
|
||||
if (min_sev_asid == 1)
|
||||
goto out;
|
||||
@ -3216,7 +3230,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
|
||||
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
|
||||
svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
|
||||
|
||||
/*
|
||||
* An SEV-ES guest requires a VMSA area that is a separate from the
|
||||
@ -3268,10 +3281,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
/* Clear intercepts on selected MSRs */
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
|
||||
}
|
||||
|
||||
void sev_init_vmcb(struct vcpu_svm *svm)
|
||||
|
@ -99,6 +99,7 @@ static const struct svm_direct_access_msrs {
|
||||
{ .index = MSR_IA32_SPEC_CTRL, .always = false },
|
||||
{ .index = MSR_IA32_PRED_CMD, .always = false },
|
||||
{ .index = MSR_IA32_FLUSH_CMD, .always = false },
|
||||
{ .index = MSR_IA32_DEBUGCTLMSR, .always = false },
|
||||
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
|
||||
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
|
||||
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
|
||||
@ -215,7 +216,7 @@ int vgif = true;
|
||||
module_param(vgif, int, 0444);
|
||||
|
||||
/* enable/disable LBR virtualization */
|
||||
static int lbrv = true;
|
||||
int lbrv = true;
|
||||
module_param(lbrv, int, 0444);
|
||||
|
||||
static int tsc_scaling = true;
|
||||
@ -990,7 +991,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
|
||||
vmcb_mark_dirty(to_vmcb, VMCB_LBR);
|
||||
}
|
||||
|
||||
static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
|
||||
void svm_enable_lbrv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
@ -1000,6 +1001,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
|
||||
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
|
||||
|
||||
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
|
||||
if (is_guest_mode(vcpu))
|
||||
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
|
||||
@ -1009,6 +1013,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
|
||||
|
||||
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
|
||||
@ -2822,10 +2828,24 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
return sev_es_guest(vcpu->kvm) &&
|
||||
vcpu->arch.guest_state_protected &&
|
||||
svm_msrpm_offset(msr_info->index) != MSR_INVALID &&
|
||||
!msr_write_intercepted(vcpu, msr_info->index);
|
||||
}
|
||||
|
||||
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
if (sev_es_prevent_msr_access(vcpu, msr_info)) {
|
||||
msr_info->data = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (msr_info->index) {
|
||||
case MSR_AMD64_TSC_RATIO:
|
||||
if (!msr_info->host_initiated &&
|
||||
@ -2976,6 +2996,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
|
||||
u32 ecx = msr->index;
|
||||
u64 data = msr->data;
|
||||
|
||||
if (sev_es_prevent_msr_access(vcpu, msr))
|
||||
return -EINVAL;
|
||||
|
||||
switch (ecx) {
|
||||
case MSR_AMD64_TSC_RATIO:
|
||||
|
||||
@ -3846,16 +3870,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
/*
|
||||
* KVM should never request an NMI window when vNMI is enabled, as KVM
|
||||
* allows at most one to-be-injected NMI and one pending NMI, i.e. if
|
||||
* two NMIs arrive simultaneously, KVM will inject one and set
|
||||
* V_NMI_PENDING for the other. WARN, but continue with the standard
|
||||
* single-step approach to try and salvage the pending NMI.
|
||||
* If NMIs are outright masked, i.e. the vCPU is already handling an
|
||||
* NMI, and KVM has not yet intercepted an IRET, then there is nothing
|
||||
* more to do at this time as KVM has already enabled IRET intercepts.
|
||||
* If KVM has already intercepted IRET, then single-step over the IRET,
|
||||
* as NMIs aren't architecturally unmasked until the IRET completes.
|
||||
*
|
||||
* If vNMI is enabled, KVM should never request an NMI window if NMIs
|
||||
* are masked, as KVM allows at most one to-be-injected NMI and one
|
||||
* pending NMI. If two NMIs arrive simultaneously, KVM will inject one
|
||||
* NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
|
||||
* unmasked. KVM _will_ request an NMI window in some situations, e.g.
|
||||
* if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
|
||||
* inject the NMI. In those situations, KVM needs to single-step over
|
||||
* the STI shadow or intercept STGI.
|
||||
*/
|
||||
WARN_ON_ONCE(is_vnmi_enabled(svm));
|
||||
if (svm_get_nmi_mask(vcpu)) {
|
||||
WARN_ON_ONCE(is_vnmi_enabled(svm));
|
||||
|
||||
if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
|
||||
return; /* IRET will cause a vm exit */
|
||||
if (!svm->awaiting_iret_completion)
|
||||
return; /* IRET will cause a vm exit */
|
||||
}
|
||||
|
||||
/*
|
||||
* SEV-ES guests are responsible for signaling when a vCPU is ready to
|
||||
@ -5265,6 +5300,12 @@ static __init int svm_hardware_setup(void)
|
||||
|
||||
nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
|
||||
|
||||
if (lbrv) {
|
||||
if (!boot_cpu_has(X86_FEATURE_LBRV))
|
||||
lbrv = false;
|
||||
else
|
||||
pr_info("LBR virtualization supported\n");
|
||||
}
|
||||
/*
|
||||
* Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
|
||||
* may be modified by svm_adjust_mmio_mask()), as well as nrips.
|
||||
@ -5318,14 +5359,6 @@ static __init int svm_hardware_setup(void)
|
||||
svm_x86_ops.set_vnmi_pending = NULL;
|
||||
}
|
||||
|
||||
|
||||
if (lbrv) {
|
||||
if (!boot_cpu_has(X86_FEATURE_LBRV))
|
||||
lbrv = false;
|
||||
else
|
||||
pr_info("LBR virtualization supported\n");
|
||||
}
|
||||
|
||||
if (!enable_pmu)
|
||||
pr_info("PMU virtualization is disabled\n");
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
#define IOPM_SIZE PAGE_SIZE * 3
|
||||
#define MSRPM_SIZE PAGE_SIZE * 2
|
||||
|
||||
#define MAX_DIRECT_ACCESS_MSRS 47
|
||||
#define MAX_DIRECT_ACCESS_MSRS 48
|
||||
#define MSRPM_OFFSETS 32
|
||||
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
|
||||
extern bool npt_enabled;
|
||||
@ -39,6 +39,7 @@ extern int vgif;
|
||||
extern bool intercept_smi;
|
||||
extern bool x2avic_enabled;
|
||||
extern bool vnmi;
|
||||
extern int lbrv;
|
||||
|
||||
/*
|
||||
* Clean bits in VMCB.
|
||||
@ -552,6 +553,7 @@ u32 *svm_vcpu_alloc_msrpm(void);
|
||||
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
|
||||
void svm_vcpu_free_msrpm(u32 *msrpm);
|
||||
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
|
||||
void svm_enable_lbrv(struct kvm_vcpu *vcpu);
|
||||
void svm_update_lbrv(struct kvm_vcpu *vcpu);
|
||||
|
||||
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
||||
|
@ -2242,6 +2242,9 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
|
||||
vmcs_write64(EPT_POINTER,
|
||||
construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
|
||||
|
||||
if (vmx->ve_info)
|
||||
vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info));
|
||||
|
||||
/* All VMFUNCs are currently emulated through L0 vmexits. */
|
||||
if (cpu_has_vmx_vmfunc())
|
||||
vmcs_write64(VM_FUNCTION_CONTROL, 0);
|
||||
@ -6230,6 +6233,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
|
||||
else if (is_alignment_check(intr_info) &&
|
||||
!vmx_guest_inject_ac(vcpu))
|
||||
return true;
|
||||
else if (is_ve_fault(intr_info))
|
||||
return true;
|
||||
return false;
|
||||
case EXIT_REASON_EXTERNAL_INTERRUPT:
|
||||
return true;
|
||||
|
@ -5218,8 +5218,15 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
|
||||
if (is_invalid_opcode(intr_info))
|
||||
return handle_ud(vcpu);
|
||||
|
||||
if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm))
|
||||
return -EIO;
|
||||
if (WARN_ON_ONCE(is_ve_fault(intr_info))) {
|
||||
struct vmx_ve_information *ve_info = vmx->ve_info;
|
||||
|
||||
WARN_ONCE(ve_info->exit_reason != EXIT_REASON_EPT_VIOLATION,
|
||||
"Unexpected #VE on VM-Exit reason 0x%x", ve_info->exit_reason);
|
||||
dump_vmcs(vcpu);
|
||||
kvm_mmu_print_sptes(vcpu, ve_info->guest_physical_address, "#VE");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error_code = 0;
|
||||
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
|
||||
|
@ -164,15 +164,6 @@ module_param(kvmclock_periodic_sync, bool, 0444);
|
||||
static u32 __read_mostly tsc_tolerance_ppm = 250;
|
||||
module_param(tsc_tolerance_ppm, uint, 0644);
|
||||
|
||||
/*
|
||||
* lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
|
||||
* adaptive tuning starting from default advancement of 1000ns. '0' disables
|
||||
* advancement entirely. Any other value is used as-is and disables adaptive
|
||||
* tuning, i.e. allows privileged userspace to set an exact advancement time.
|
||||
*/
|
||||
static int __read_mostly lapic_timer_advance_ns = -1;
|
||||
module_param(lapic_timer_advance_ns, int, 0644);
|
||||
|
||||
static bool __read_mostly vector_hashing = true;
|
||||
module_param(vector_hashing, bool, 0444);
|
||||
|
||||
@ -12169,7 +12160,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
|
||||
r = kvm_create_lapic(vcpu);
|
||||
if (r < 0)
|
||||
goto fail_mmu_destroy;
|
||||
|
||||
|
@ -44,7 +44,11 @@
|
||||
or %rdx, %rax
|
||||
.else
|
||||
cmp $TASK_SIZE_MAX-\size+1, %eax
|
||||
.if \size != 8
|
||||
jae .Lbad_get_user
|
||||
.else
|
||||
jae .Lbad_get_user_8
|
||||
.endif
|
||||
sbb %edx, %edx /* array_index_mask_nospec() */
|
||||
and %edx, %eax
|
||||
.endif
|
||||
@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception)
|
||||
#ifdef CONFIG_X86_32
|
||||
SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
|
||||
ASM_CLAC
|
||||
bad_get_user_8:
|
||||
.Lbad_get_user_8:
|
||||
xor %edx,%edx
|
||||
xor %ecx,%ecx
|
||||
mov $(-EFAULT),%_ASM_AX
|
||||
|
@ -493,7 +493,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
|
||||
for_each_reserved_mem_region(mb_region) {
|
||||
int nid = memblock_get_region_node(mb_region);
|
||||
|
||||
if (nid != MAX_NUMNODES)
|
||||
if (nid != NUMA_NO_NODE)
|
||||
node_set(nid, reserved_nodemask);
|
||||
}
|
||||
|
||||
@ -614,9 +614,9 @@ static int __init numa_init(int (*init_func)(void))
|
||||
nodes_clear(node_online_map);
|
||||
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
|
||||
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
|
||||
MAX_NUMNODES));
|
||||
NUMA_NO_NODE));
|
||||
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
|
||||
MAX_NUMNODES));
|
||||
NUMA_NO_NODE));
|
||||
/* In case that parsing SRAT failed. */
|
||||
WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
|
||||
numa_reset_distance();
|
||||
|
@ -144,16 +144,38 @@ void bio_integrity_free(struct bio *bio)
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
|
||||
if (bip->bip_flags & BIP_INTEGRITY_USER)
|
||||
return;
|
||||
if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
|
||||
kfree(bvec_virt(bip->bip_vec));
|
||||
else if (bip->bip_flags & BIP_INTEGRITY_USER)
|
||||
bio_integrity_unmap_user(bip);
|
||||
|
||||
__bio_integrity_free(bs, bip);
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_unmap_free_user - Unmap and free bio user integrity payload
|
||||
* @bio: bio containing bip to be unmapped and freed
|
||||
*
|
||||
* Description: Used to unmap and free the user mapped integrity portion of a
|
||||
* bio. Submitter attaching the user integrity buffer is responsible for
|
||||
* unmapping and freeing it during completion.
|
||||
*/
|
||||
void bio_integrity_unmap_free_user(struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
|
||||
if (WARN_ON_ONCE(!(bip->bip_flags & BIP_INTEGRITY_USER)))
|
||||
return;
|
||||
bio_integrity_unmap_user(bip);
|
||||
__bio_integrity_free(bs, bip);
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_integrity_unmap_free_user);
|
||||
|
||||
/**
|
||||
* bio_integrity_add_page - Attach integrity metadata
|
||||
* @bio: bio to update
|
||||
|
@ -185,7 +185,7 @@ static void blk_flush_complete_seq(struct request *rq,
|
||||
/* queue for flush */
|
||||
if (list_empty(pending))
|
||||
fq->flush_pending_since = jiffies;
|
||||
list_move_tail(&rq->queuelist, pending);
|
||||
list_add_tail(&rq->queuelist, pending);
|
||||
break;
|
||||
|
||||
case REQ_FSEQ_DATA:
|
||||
@ -263,6 +263,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
|
||||
unsigned int seq = blk_flush_cur_seq(rq);
|
||||
|
||||
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
|
||||
list_del_init(&rq->queuelist);
|
||||
blk_flush_complete_seq(rq, fq, seq, error);
|
||||
}
|
||||
|
||||
|
@ -1552,6 +1552,9 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
|
||||
|
||||
void disk_free_zone_resources(struct gendisk *disk)
|
||||
{
|
||||
if (!disk->zone_wplugs_pool)
|
||||
return;
|
||||
|
||||
cancel_work_sync(&disk->zone_wplugs_work);
|
||||
|
||||
if (disk->zone_wplugs_wq) {
|
||||
|
@ -314,7 +314,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
|
||||
&key_type_user, key_name, true);
|
||||
|
||||
if (IS_ERR(kref))
|
||||
ret = PTR_ERR(kref);
|
||||
return PTR_ERR(kref);
|
||||
|
||||
key = key_ref_to_ptr(kref);
|
||||
down_read(&key->sem);
|
||||
|
@ -145,7 +145,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
|
||||
dev_name(&adev->dev), event,
|
||||
(u32) ac->state);
|
||||
acpi_notifier_call_chain(adev, event, (u32) ac->state);
|
||||
kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
|
||||
power_supply_changed(ac->charger);
|
||||
}
|
||||
}
|
||||
|
||||
@ -268,7 +268,7 @@ static int acpi_ac_resume(struct device *dev)
|
||||
if (acpi_ac_get_state(ac))
|
||||
return 0;
|
||||
if (old_state != ac->state)
|
||||
kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
|
||||
power_supply_changed(ac->charger);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -191,6 +191,10 @@ void
|
||||
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
|
||||
acpi_adr_space_type space_id, u32 function);
|
||||
|
||||
void
|
||||
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
|
||||
acpi_adr_space_type space_id);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
|
||||
|
||||
|
@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[];
|
||||
|
||||
/* Local prototypes */
|
||||
|
||||
static void
|
||||
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
|
||||
acpi_adr_space_type space_id);
|
||||
|
||||
static acpi_status
|
||||
acpi_ev_reg_run(acpi_handle obj_handle,
|
||||
u32 level, void *context, void **return_value);
|
||||
@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
void
|
||||
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
|
||||
acpi_adr_space_type space_id)
|
||||
{
|
||||
|
@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_execute_orphan_reg_method
|
||||
*
|
||||
* PARAMETERS: device - Handle for the device
|
||||
* space_id - The address space ID
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
|
||||
* device. This is a _REG method that has no corresponding region
|
||||
* within the device's scope.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
|
||||
{
|
||||
struct acpi_namespace_node *node;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
|
||||
|
||||
/* Parameter validation */
|
||||
|
||||
if (!device) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Convert and validate the device handle */
|
||||
|
||||
node = acpi_ns_validate_handle(device);
|
||||
if (node) {
|
||||
|
||||
/*
|
||||
* If an "orphan" _REG method is present in the device's scope
|
||||
* for the given address space ID, run it.
|
||||
*/
|
||||
|
||||
acpi_ev_execute_orphan_reg_method(node, space_id);
|
||||
} else {
|
||||
status = AE_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)
|
||||
|
@ -909,7 +909,7 @@ static void __exit einj_exit(void)
|
||||
if (einj_initialized)
|
||||
platform_driver_unregister(&einj_driver);
|
||||
|
||||
platform_device_del(einj_dev);
|
||||
platform_device_unregister(einj_dev);
|
||||
}
|
||||
|
||||
module_init(einj_init);
|
||||
|
@ -1333,10 +1333,13 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
|
||||
if (ec->busy_polling || bits > 8)
|
||||
acpi_ec_burst_enable(ec);
|
||||
|
||||
for (i = 0; i < bytes; ++i, ++address, ++value)
|
||||
for (i = 0; i < bytes; ++i, ++address, ++value) {
|
||||
result = (function == ACPI_READ) ?
|
||||
acpi_ec_read(ec, address, value) :
|
||||
acpi_ec_write(ec, address, *value);
|
||||
if (result < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ec->busy_polling || bits > 8)
|
||||
acpi_ec_burst_disable(ec);
|
||||
@ -1348,8 +1351,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
|
||||
return AE_NOT_FOUND;
|
||||
case -ETIME:
|
||||
return AE_TIME;
|
||||
default:
|
||||
case 0:
|
||||
return AE_OK;
|
||||
default:
|
||||
return AE_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1502,6 +1507,9 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
|
||||
|
||||
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
|
||||
acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
|
||||
if (scope_handle != ec->handle)
|
||||
acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
|
||||
|
||||
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
|
||||
}
|
||||
|
||||
|
@ -610,7 +610,7 @@ static void acpi_sbs_callback(void *context)
|
||||
if (sbs->charger_exists) {
|
||||
acpi_ac_get_present(sbs);
|
||||
if (sbs->charger_present != saved_charger_state)
|
||||
kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE);
|
||||
power_supply_changed(sbs->charger);
|
||||
}
|
||||
|
||||
if (sbs->manager_present) {
|
||||
@ -622,7 +622,7 @@ static void acpi_sbs_callback(void *context)
|
||||
acpi_battery_read(bat);
|
||||
if (saved_battery_state == bat->present)
|
||||
continue;
|
||||
kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE);
|
||||
power_supply_changed(bat->bat);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
|
||||
|
||||
static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
|
||||
{
|
||||
int temp;
|
||||
|
||||
if (temp_deci_k == THERMAL_TEMP_INVALID)
|
||||
return THERMAL_TEMP_INVALID;
|
||||
|
||||
return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
|
||||
temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
|
||||
tz->kelvin_offset);
|
||||
if (temp <= 0)
|
||||
return THERMAL_TEMP_INVALID;
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
||||
static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip)
|
||||
|
@ -206,16 +206,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
|
||||
}
|
||||
|
||||
/*
|
||||
* AMD systems from Renoir and Lucienne *require* that the NVME controller
|
||||
* AMD systems from Renoir onwards *require* that the NVME controller
|
||||
* is put into D3 over a Modern Standby / suspend-to-idle cycle.
|
||||
*
|
||||
* This is "typically" accomplished using the `StorageD3Enable`
|
||||
* property in the _DSD that is checked via the `acpi_storage_d3` function
|
||||
* but this property was introduced after many of these systems launched
|
||||
* and most OEM systems don't have it in their BIOS.
|
||||
* but some OEM systems still don't have it in their BIOS.
|
||||
*
|
||||
* The Microsoft documentation for StorageD3Enable mentioned that Windows has
|
||||
* a hardcoded allowlist for D3 support, which was used for these platforms.
|
||||
* a hardcoded allowlist for D3 support as well as a registry key to override
|
||||
* the BIOS, which has been used for these cases.
|
||||
*
|
||||
* This allows quirking on Linux in a similar fashion.
|
||||
*
|
||||
@ -228,19 +228,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=216773
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=217003
|
||||
* 2) On at least one HP system StorageD3Enable is missing on the second NVME
|
||||
disk in the system.
|
||||
* disk in the system.
|
||||
* 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
|
||||
* NVME device.
|
||||
*/
|
||||
static const struct x86_cpu_id storage_d3_cpu_ids[] = {
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
|
||||
{}
|
||||
};
|
||||
|
||||
bool force_storage_d3(void)
|
||||
{
|
||||
return x86_match_cpu(storage_d3_cpu_ids);
|
||||
if (!cpu_feature_enabled(X86_FEATURE_ZEN))
|
||||
return false;
|
||||
return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1831,11 +1831,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
|
||||
2
|
||||
};
|
||||
|
||||
/* set scsi removable (RMB) bit per ata bit, or if the
|
||||
* AHCI port says it's external (Hotplug-capable, eSATA).
|
||||
/*
|
||||
* Set the SCSI Removable Media Bit (RMB) if the ATA removable media
|
||||
* device bit (obsolete since ATA-8 ACS) is set.
|
||||
*/
|
||||
if (ata_id_removable(args->id) ||
|
||||
(args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
|
||||
if (ata_id_removable(args->id))
|
||||
hdr[1] |= (1 << 7);
|
||||
|
||||
if (args->dev->class == ATA_DEV_ZAC) {
|
||||
|
@ -915,10 +915,13 @@ static const struct scsi_host_template pata_macio_sht = {
|
||||
.sg_tablesize = MAX_DCMDS,
|
||||
/* We may not need that strict one */
|
||||
.dma_boundary = ATA_DMA_BOUNDARY,
|
||||
/* Not sure what the real max is but we know it's less than 64K, let's
|
||||
* use 64K minus 256
|
||||
/*
|
||||
* The SCSI core requires the segment size to cover at least a page, so
|
||||
* for 64K page size kernels this must be at least 64K. However the
|
||||
* hardware can't handle 64K, so pata_macio_qc_prep() will split large
|
||||
* requests.
|
||||
*/
|
||||
.max_segment_size = MAX_DBDMA_SEG,
|
||||
.max_segment_size = SZ_64K,
|
||||
.device_configure = pata_macio_device_configure,
|
||||
.sdev_groups = ata_common_sdev_groups,
|
||||
.can_queue = ATA_DEF_QUEUE,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user