Merge branch 'sched/urgent' into sched/core, to pick up fixes and refresh the branch

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2024-07-11 10:42:33 +02:00
commit 011b1134b8
1104 changed files with 12765 additions and 6682 deletions

View File

@ -5,7 +5,6 @@ root = true
[{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
charset = utf-8
end_of_line = lf
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = tab
indent_size = 8
@ -13,7 +12,6 @@ indent_size = 8
[*.{json,py,rs}]
charset = utf-8
end_of_line = lf
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 4
@ -26,7 +24,6 @@ indent_size = 8
[*.yaml]
charset = utf-8
end_of_line = lf
trim_trailing_whitespace = unset
insert_final_newline = true
indent_style = space
indent_size = 2

View File

@ -72,6 +72,8 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com>
André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com>
Andy Adamson <andros@citi.umich.edu>
Andy Shevchenko <andy@kernel.org> <andy@smile.org.ua>
Andy Shevchenko <andy@kernel.org> <ext-andriy.shevchenko@nokia.com>
Anilkumar Kolli <quic_akolli@quicinc.com> <akolli@codeaurora.org>
Anirudh Ghayal <quic_aghayal@quicinc.com> <aghayal@codeaurora.org>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
@ -217,6 +219,7 @@ Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com>
Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com>
Geliang Tang <geliang@kernel.org> <geliangtang@163.com>
Geliang Tang <geliang@kernel.org> <tanggeliang@kylinos.cn>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
@ -337,10 +340,11 @@ Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Karthikeyan Periyasamy <quic_periyasa@quicinc.com> <periyasa@codeaurora.org>
Kathiravan T <quic_kathirav@quicinc.com> <kathirav@codeaurora.org>
Kay Sievers <kay.sievers@vrfy.org>
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
Kees Cook <keescook@chromium.org> <keescook@google.com>
Kees Cook <keescook@chromium.org> <kees@outflux.net>
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
Kees Cook <kees@kernel.org> <kees.cook@canonical.com>
Kees Cook <kees@kernel.org> <keescook@chromium.org>
Kees Cook <kees@kernel.org> <keescook@google.com>
Kees Cook <kees@kernel.org> <kees@outflux.net>
Kees Cook <kees@kernel.org> <kees@ubuntu.com>
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
@ -604,6 +608,7 @@ Simon Kelley <simon@thekelleys.org.uk>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>

View File

@ -9,8 +9,8 @@ TOMOYO is a name-based MAC extension (LSM module) for the Linux kernel.
LiveCD-based tutorials are available at
http://tomoyo.sourceforge.jp/1.8/ubuntu12.04-live.html
http://tomoyo.sourceforge.jp/1.8/centos6-live.html
https://tomoyo.sourceforge.net/1.8/ubuntu12.04-live.html
https://tomoyo.sourceforge.net/1.8/centos6-live.html
Though these tutorials use non-LSM version of TOMOYO, they are useful for you
to know what TOMOYO is.
@ -21,45 +21,32 @@ How to enable TOMOYO?
Build the kernel with ``CONFIG_SECURITY_TOMOYO=y`` and pass ``security=tomoyo`` on
kernel's command line.
Please see http://tomoyo.osdn.jp/2.5/ for details.
Please see https://tomoyo.sourceforge.net/2.6/ for details.
Where is documentation?
=======================
User <-> Kernel interface documentation is available at
https://tomoyo.osdn.jp/2.5/policy-specification/index.html .
https://tomoyo.sourceforge.net/2.6/policy-specification/index.html .
Materials we prepared for seminars and symposiums are available at
https://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
https://sourceforge.net/projects/tomoyo/files/docs/ .
Below lists are chosen from three aspects.
What is TOMOYO?
TOMOYO Linux Overview
https://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf
https://sourceforge.net/projects/tomoyo/files/docs/lca2009-takeda.pdf
TOMOYO Linux: pragmatic and manageable security for Linux
https://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
https://sourceforge.net/projects/tomoyo/files/docs/freedomhectaipei-tomoyo.pdf
TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box
https://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
https://sourceforge.net/projects/tomoyo/files/docs/PacSec2007-en-no-demo.pdf
What can TOMOYO do?
Deep inside TOMOYO Linux
https://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
https://sourceforge.net/projects/tomoyo/files/docs/lca2009-kumaneko.pdf
The role of "pathname based access control" in security.
https://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf
https://sourceforge.net/projects/tomoyo/files/docs/lfj2008-bof.pdf
History of TOMOYO?
Realities of Mainlining
https://osdn.jp/projects/tomoyo/docs/lfj2008.pdf
What is future plan?
====================
We believe that inode based security and name based security are complementary
and both should be used together. But unfortunately, so far, we cannot enable
multiple LSM modules at the same time. We feel sorry that you have to give up
SELinux/SMACK/AppArmor etc. when you want to use TOMOYO.
We hope that LSM becomes stackable in future. Meanwhile, you can use non-LSM
version of TOMOYO, available at http://tomoyo.osdn.jp/1.8/ .
LSM version of TOMOYO is a subset of non-LSM version of TOMOYO. We are planning
to port non-LSM version's functionalities to LSM versions.
https://sourceforge.net/projects/tomoyo/files/docs/lfj2008.pdf

View File

@ -1921,6 +1921,28 @@
Format:
<bus_id>,<clkrate>
i2c_touchscreen_props= [HW,ACPI,X86]
Set device-properties for ACPI-enumerated I2C-attached
touchscreen, to e.g. fix coordinates of upside-down
mounted touchscreens. If you need this option please
submit a drivers/platform/x86/touchscreen_dmi.c patch
adding a DMI quirk for this.
Format:
<ACPI_HW_ID>:<prop_name>=<val>[:prop_name=val][:...]
Where <val> is one of:
Omit "=<val>" entirely Set a boolean device-property
Unsigned number Set a u32 device-property
Anything else Set a string device-property
Examples (split over multiple lines):
i2c_touchscreen_props=GDIX1001:touchscreen-inverted-x:
touchscreen-inverted-y
i2c_touchscreen_props=MSSL1680:touchscreen-size-x=1920:
touchscreen-size-y=1080:touchscreen-inverted-y:
firmware-name=gsl1680-vendor-model.fw:silead,home-button
i8042.debug [HW] Toggle i8042 debug mode
i8042.unmask_kbd_data
[HW] Enable printing of interrupt data from the KBD port
@ -2170,12 +2192,6 @@
Format: 0 | 1
Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if
it was mlock'ed and not explicitly munlock'ed
afterwards.
Format: 0 | 1
Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON
init_pkru= [X86] Specify the default memory protection keys rights
register contents for all processes. 0x55555554 by
default (disallow access to all but pkey 0). Can

View File

@ -467,11 +467,11 @@ anon_fault_fallback_charge
instead falls back to using huge pages with lower orders or
small pages even though the allocation was successful.
anon_swpout
swpout
is incremented every time a huge page is swapped out in one
piece without splitting.
anon_swpout_fallback
swpout_fallback
is incremented if a huge page has to be split before swapout.
Usually because failed to allocate some continuous swap space
for the huge page.

View File

@ -65,4 +65,6 @@ the extension, or may have deliberately removed it from the listing.
Misaligned accesses
-------------------
Misaligned accesses are supported in userspace, but they may perform poorly.
Misaligned scalar accesses are supported in userspace, but they may perform
poorly. Misaligned vector accesses are only supported if the Zicclsm extension
is supported.

View File

@ -217,7 +217,7 @@ current *struct* is::
int (*media_changed)(struct cdrom_device_info *, int);
int (*tray_move)(struct cdrom_device_info *, int);
int (*lock_door)(struct cdrom_device_info *, int);
int (*select_speed)(struct cdrom_device_info *, int);
int (*select_speed)(struct cdrom_device_info *, unsigned long);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *);
@ -396,7 +396,7 @@ action need be taken, and the return value should be 0.
::
int select_speed(struct cdrom_device_info *cdi, int speed)
int select_speed(struct cdrom_device_info *cdi, unsigned long speed)
Some CD-ROM drives are capable of changing their head-speed. There
are several reasons for changing the speed of a CD-ROM drive. Badly

View File

@ -192,7 +192,7 @@ alignment larger than PAGE_SIZE.
Dynamic swiotlb
---------------
When CONFIG_DYNAMIC_SWIOTLB is enabled, swiotlb can do on-demand expansion of
When CONFIG_SWIOTLB_DYNAMIC is enabled, swiotlb can do on-demand expansion of
the amount of memory available for allocation as bounce buffers. If a bounce
buffer request fails due to lack of available space, an asynchronous background
task is kicked off to allocate memory from general system memory and turn it

View File

@ -54,11 +54,10 @@ unevaluatedProperties: false
examples:
- |
mlahb: ahb@38000000 {
ahb {
compatible = "st,mlahb", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x10000000 0x40000>;
ranges;
dma-ranges = <0x00000000 0x38000000 0x10000>,
<0x10000000 0x10000000 0x60000>,

View File

@ -57,17 +57,17 @@ properties:
- const: allwinner,sun8i-v3s
- description: Anbernic RG35XX (2024)
- items:
items:
- const: anbernic,rg35xx-2024
- const: allwinner,sun50i-h700
- description: Anbernic RG35XX Plus
- items:
items:
- const: anbernic,rg35xx-plus
- const: allwinner,sun50i-h700
- description: Anbernic RG35XX H
- items:
items:
- const: anbernic,rg35xx-h
- const: allwinner,sun50i-h700

View File

@ -59,8 +59,8 @@ properties:
- 3
dma-channels:
minItems: 1
maxItems: 64
minimum: 1
maximum: 64
clocks:
minItems: 1

View File

@ -77,7 +77,7 @@ required:
- clocks
allOf:
- $ref: i2c-controller.yaml
- $ref: /schemas/i2c/i2c-controller.yaml#
- if:
properties:
compatible:

View File

@ -21,7 +21,7 @@ description: |
google,cros-ec-spi or google,cros-ec-i2c.
allOf:
- $ref: i2c-controller.yaml#
- $ref: /schemas/i2c/i2c-controller.yaml#
properties:
compatible:

View File

@ -139,7 +139,7 @@ allOf:
Voltage output range of the channel as <minimum, maximum>
Required connections:
Rfb1x for: 0 to 2.5 V; 0 to 3V; 0 to 5 V;
Rfb2x for: 0 to 10 V; 2.5 to 7.5V; -5 to 5 V;
Rfb2x for: 0 to 10 V; -2.5 to 7.5V; -5 to 5 V;
oneOf:
- items:
- const: 0

View File

@ -18,9 +18,12 @@ allOf:
properties:
compatible:
enum:
- elan,ekth6915
- ilitek,ili2901
oneOf:
- items:
- enum:
- elan,ekth5015m
- const: elan,ekth6915
- const: elan,ekth6915
reg:
const: 0x10
@ -33,6 +36,12 @@ properties:
reset-gpios:
description: Reset GPIO; not all touchscreens using eKTH6915 hook this up.
no-reset-on-power-off:
type: boolean
description:
Reset line is wired so that it can (and should) be left deasserted when
the power supply is off.
vcc33-supply:
description: The 3.3V supply to the touchscreen.
@ -58,8 +67,8 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
ap_ts: touchscreen@10 {
compatible = "elan,ekth6915";
touchscreen@10 {
compatible = "elan,ekth5015m", "elan,ekth6915";
reg = <0x10>;
interrupt-parent = <&tlmm>;

View File

@ -0,0 +1,66 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/input/ilitek,ili2901.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ilitek ILI2901 touchscreen controller
maintainers:
- Jiri Kosina <jkosina@suse.com>
description:
Supports the Ilitek ILI2901 touchscreen controller.
This touchscreen controller uses the i2c-hid protocol with a reset GPIO.
allOf:
- $ref: /schemas/input/touchscreen/touchscreen.yaml#
properties:
compatible:
enum:
- ilitek,ili2901
reg:
maxItems: 1
interrupts:
maxItems: 1
panel: true
reset-gpios:
maxItems: 1
vcc33-supply: true
vccio-supply: true
required:
- compatible
- reg
- interrupts
- vcc33-supply
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
touchscreen@41 {
compatible = "ilitek,ili2901";
reg = <0x41>;
interrupt-parent = <&tlmm>;
interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>;
vcc33-supply = <&pp3300_ts>;
};
};

View File

@ -24,6 +24,7 @@ properties:
managers:
type: object
additionalProperties: false
description:
List of the PD69208T4/PD69204T4/PD69208M PSE managers. Each manager
have 4 or 8 physical ports according to the chip version. No need to
@ -47,8 +48,9 @@ properties:
- "#size-cells"
patternProperties:
"^manager@0[0-9a-b]$":
"^manager@[0-9a-b]$":
type: object
additionalProperties: false
description:
PD69208T4/PD69204T4/PD69208M PSE manager exposing 4 or 8 physical
ports.
@ -69,9 +71,14 @@ properties:
patternProperties:
'^port@[0-7]$':
type: object
additionalProperties: false
properties:
reg:
maxItems: 1
required:
- reg
additionalProperties: false
required:
- reg

View File

@ -29,13 +29,31 @@ properties:
of the ports conversion matrix that establishes relationship between
the logical ports and the physical channels.
type: object
additionalProperties: false
properties:
"#address-cells":
const: 1
"#size-cells":
const: 0
patternProperties:
'^channel@[0-7]$':
type: object
additionalProperties: false
properties:
reg:
maxItems: 1
required:
- reg
required:
- "#address-cells"
- "#size-cells"
unevaluatedProperties: false
required:

View File

@ -65,6 +65,7 @@ patternProperties:
description: The hard wired USB devices
type: object
$ref: /schemas/usb/usb-device.yaml
additionalProperties: true
required:
- peer-hub

View File

@ -1,5 +1,6 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<!-- Updated to inclusive terminology by Wolfram Sang -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
@ -1120,7 +1121,7 @@
<rect
style="opacity:1;fill:#ffb9b9;fill-opacity:1;stroke:#f00000;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
id="rect4424-3-2-9-7"
width="112.5"
width="134.5"
height="113.75008"
x="112.5"
y="471.11221"
@ -1133,15 +1134,15 @@
y="521.46259"
id="text4349"><tspan
sodipodi:role="line"
x="167.5354"
x="178.5354"
y="521.46259"
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
id="tspan1273">I2C</tspan><tspan
sodipodi:role="line"
x="167.5354"
x="178.5354"
y="552.71259"
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
id="tspan1285">Master</tspan></text>
id="tspan1285">Controller</tspan></text>
<rect
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
id="rect4424-3-2-9-7-3-3-5-3"
@ -1171,7 +1172,7 @@
x="318.59131"
y="552.08752"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
id="tspan1287">Slave</tspan></text>
id="tspan1287">Target</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968767;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
d="m 112.49995,677.36223 c 712.50005,0 712.50005,0 712.50005,0"
@ -1233,7 +1234,7 @@
x="468.59131"
y="552.08746"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
id="tspan1287-6">Slave</tspan></text>
id="tspan1287-6">Target</tspan></text>
<rect
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;vector-effect:none;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
id="rect4424-3-2-9-7-3-3-5-3-1"
@ -1258,7 +1259,7 @@
x="618.59131"
y="552.08746"
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
id="tspan1287-9">Slave</tspan></text>
id="tspan1287-9">Target</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM)"
d="m 150,583.61221 v 93.75"

Before

Width:  |  Height:  |  Size: 55 KiB

After

Width:  |  Height:  |  Size: 55 KiB

View File

@ -3,29 +3,27 @@ Introduction to I2C and SMBus
=============================
I²C (pronounce: I squared C and written I2C in the kernel documentation) is
a protocol developed by Philips. It is a slow two-wire protocol (variable
speed, up to 400 kHz), with a high speed extension (3.4 MHz). It provides
a protocol developed by Philips. It is a two-wire protocol with variable
speed (typically up to 400 kHz, high speed modes up to 5 MHz). It provides
an inexpensive bus for connecting many types of devices with infrequent or
low bandwidth communications needs. I2C is widely used with embedded
systems. Some systems use variants that don't meet branding requirements,
low bandwidth communications needs. I2C is widely used with embedded
systems. Some systems use variants that don't meet branding requirements,
and so are not advertised as being I2C but come under different names,
e.g. TWI (Two Wire Interface), IIC.
The latest official I2C specification is the `"I2C-bus specification and user
manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_
published by NXP Semiconductors. However, you need to log-in to the site to
access the PDF. An older version of the specification (revision 6) is archived
`here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_.
The latest official I2C specification is the `"I²C-bus specification and user
manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
published by NXP Semiconductors, version 7 as of this writing.
SMBus (System Management Bus) is based on the I2C protocol, and is mostly
a subset of I2C protocols and signaling. Many I2C devices will work on an
a subset of I2C protocols and signaling. Many I2C devices will work on an
SMBus, but some SMBus protocols add semantics beyond what is required to
achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
devices connected through SMBus are RAM modules configured using I2C EEPROMs,
and hardware monitoring chips.
Because the SMBus is mostly a subset of the generalized I2C bus, we can
use its protocols on many I2C systems. However, there are systems that don't
use its protocols on many I2C systems. However, there are systems that don't
meet both SMBus and I2C electrical constraints; and others which can't
implement all the common SMBus protocol semantics or messages.
@ -33,29 +31,52 @@ implement all the common SMBus protocol semantics or messages.
Terminology
===========
Using the terminology from the official documentation, the I2C bus connects
one or more *master* chips and one or more *slave* chips.
The I2C bus connects one or more controller chips and one or more target chips.
.. kernel-figure:: i2c_bus.svg
:alt: Simple I2C bus with one master and 3 slaves
:alt: Simple I2C bus with one controller and 3 targets
Simple I2C bus
A **master** chip is a node that starts communications with slaves. In the
Linux kernel implementation it is called an **adapter** or bus. Adapter
drivers are in the ``drivers/i2c/busses/`` subdirectory.
A **controller** chip is a node that starts communications with targets. In the
Linux kernel implementation it is also called an "adapter" or "bus". Controller
drivers are usually in the ``drivers/i2c/busses/`` subdirectory.
An **algorithm** contains general code that can be used to implement a
whole class of I2C adapters. Each specific adapter driver either depends on
an algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes
its own implementation.
An **algorithm** contains general code that can be used to implement a whole
class of I2C controllers. Each specific controller driver either depends on an
algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes its
own implementation.
A **slave** chip is a node that responds to communications when addressed
by the master. In Linux it is called a **client**. Client drivers are kept
in a directory specific to the feature they provide, for example
``drivers/media/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
A **target** chip is a node that responds to communications when addressed by a
controller. In the Linux kernel implementation it is also called a "client".
While targets are usually separate external chips, Linux can also act as a
target (needs hardware support) and respond to another controller on the bus.
This is then called a **local target**. In contrast, an external chip is called
a **remote target**.
Target drivers are kept in a directory specific to the feature they provide,
for example ``drivers/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
video-related chips.
For the example configuration in figure, you will need a driver for your
I2C adapter, and drivers for your I2C devices (usually one driver for each
device).
For the example configuration in the figure above, you will need one driver for
the I2C controller, and drivers for your I2C targets. Usually one driver for
each target.
Synonyms
--------
As mentioned above, the Linux I2C implementation historically uses the terms
"adapter" for controller and "client" for target. A number of data structures
have these synonyms in their name. So, when discussing implementation details,
you should be aware of these terms as well. The official wording is preferred,
though.
Outdated terminology
--------------------
In earlier I2C specifications, controller was named "master" and target was
named "slave". These terms have been obsoleted with v7 of the specification and
their use is also discouraged by the Linux Kernel Code of Conduct. You may
still find them in references to documentation which has not been updated. The
general attitude, however, is to use the inclusive terms: controller and
target. Work to replace the old terminology in the Linux Kernel is on-going.

View File

@ -150,6 +150,12 @@ applicable everywhere (see syntax).
That will limit the usefulness but on the other hand avoid
the illegal configurations all over.
If "select" <symbol> is followed by "if" <expr>, <symbol> will be
selected by the logical AND of the value of the current menu symbol
and <expr>. This means, the lower limit can be downgraded due to the
presence of "if" <expr>. This behavior may seem weird, but we rely on
it. (The future of this behavior is undecided.)
- weak reverse dependencies: "imply" <symbol> ["if" <expr>]
This is similar to "select" as it enforces a lower limit on another
@ -184,7 +190,7 @@ applicable everywhere (see syntax).
ability to hook into a secondary subsystem while allowing the user to
configure that subsystem out without also having to unset these drivers.
Note: If the combination of FOO=y and BAR=m causes a link error,
Note: If the combination of FOO=y and BAZ=m causes a link error,
you can guard the function call with IS_REACHABLE()::
foo_init()
@ -202,6 +208,10 @@ applicable everywhere (see syntax).
imply BAR
imply BAZ
Note: If "imply" <symbol> is followed by "if" <expr>, the default of <symbol>
will be the logical AND of the value of the current menu symbol and <expr>.
(The future of this behavior is undecided.)
- limiting menu display: "visible if" <expr>
This attribute is only applicable to menu blocks, if the condition is

View File

@ -349,6 +349,10 @@ attribute-sets:
Number of packets dropped due to transient lack of resources, such as
buffer space, host descriptors etc.
type: uint
-
name: rx-csum-complete
doc: Number of packets that were marked as CHECKSUM_COMPLETE.
type: uint
-
name: rx-csum-unnecessary
doc: Number of packets that were marked as CHECKSUM_UNNECESSARY.

View File

@ -123,8 +123,6 @@ operations:
doc: dump pending nfsd rpc
attribute-set: rpc-status
dump:
pre: nfsd-nl-rpc-status-get-start
post: nfsd-nl-rpc-status-get-done
reply:
attributes:
- xid

View File

@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
sxdp_shared_umem_fd field as you registered the UMEM on that
socket. These two sockets will now share one and the same UMEM.
In this case, it is possible to use the NIC's packet steering
capabilities to steer the packets to the right queue. This is not
possible in the previous example as there is only one queue shared
among sockets, so the NIC cannot do this steering as it can only steer
between queues.
There is no need to supply an XDP program like the one in the previous
case where sockets were bound to the same queue id and
device. Instead, use the NIC's packet steering capabilities to steer
the packets to the right queue. In the previous example, there is only
one queue shared among sockets, so the NIC cannot do this steering. It
can only steer between queues.
In libxdp (or libbpf prior to version 1.0), you need to use the
xsk_socket__create_shared() API as it takes a reference to a FILL ring
and a COMPLETION ring that will be created for you and bound to the
shared UMEM. You can use this function for all the sockets you create,
or you can use it for the second and following ones and use
xsk_socket__create() for the first one. Both methods yield the same
result.
In libbpf, you need to use the xsk_socket__create_shared() API as it
takes a reference to a FILL ring and a COMPLETION ring that will be
created for you and bound to the shared UMEM. You can use this
function for all the sockets you create, or you can use it for the
second and following ones and use xsk_socket__create() for the first
one. Both methods yield the same result.
Note that a UMEM can be shared between sockets on the same queue id
and device, as well as between queues on the same device and between
devices at the same time. It is also possible to redirect to any
socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
devices at the same time.
XDP_USE_NEED_WAKEUP bind flag
-----------------------------
@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The
switch, or other distribution mechanism, in your NIC to direct
traffic to the correct queue id and socket.
Note that if you are using the XDP_SHARED_UMEM option, it is
possible to switch traffic between any socket bound to the same
umem.
Q: My packets are sometimes corrupted. What is wrong?
A: Care has to be taken not to feed the same buffer in the UMEM into

View File

@ -227,7 +227,7 @@ preferably including links to previous postings, for example::
The amount of mooing will depend on packet rate so should match
the diurnal cycle quite well.
Signed-of-by: Joe Defarmer <joe@barn.org>
Signed-off-by: Joe Defarmer <joe@barn.org>
---
v3:
- add a note about time-of-day mooing fluctuation to the commit message

View File

@ -32,6 +32,7 @@ Security-related interfaces
seccomp_filter
landlock
lsm
mfd_noexec
spec_ctrl
tee

View File

@ -582,7 +582,7 @@ depending on the hardware. In all cases, however, only routes that have the
Devices generating the streams may allow enabling and disabling some of the
routes or have a fixed routing configuration. If the routes can be disabled, not
declaring the routes (or declaring them without
``VIDIOC_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will
``V4L2_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will
disable the routes. ``VIDIOC_SUBDEV_S_ROUTING`` will still return such routes
back to the user in the routes array, with the ``V4L2_SUBDEV_STREAM_FL_ACTIVE``
flag unset.

View File

@ -0,0 +1,86 @@
.. SPDX-License-Identifier: GPL-2.0
==================================
Introduction of non-executable mfd
==================================
:Author:
Daniel Verkamp <dverkamp@chromium.org>
Jeff Xu <jeffxu@chromium.org>
:Contributor:
Aleksa Sarai <cyphar@cyphar.com>
Since Linux introduced the memfd feature, memfds have always had their
execute bit set, and the memfd_create() syscall doesn't allow setting
it differently.
However, in a secure-by-default system, such as ChromeOS, (where all
executables should come from the rootfs, which is protected by verified
boot), this executable nature of memfd opens a door for NoExec bypass
and enables “confused deputy attack”. E.g, in VRP bug [1]: cros_vm
process created a memfd to share the content with an external process,
however the memfd is overwritten and used for executing arbitrary code
and root escalation. [2] lists more VRP of this kind.
On the other hand, executable memfd has its legit use: runc uses memfds
seal and executable feature to copy the contents of the binary then
execute them. For such a system, we need a solution to differentiate runc's
use of executable memfds and an attacker's [3].
To address those above:
- Let memfd_create() set X bit at creation time.
- Let memfd be sealed for modifying X bit when NX is set.
- Add a new pid namespace sysctl: vm.memfd_noexec to help applications in
migrating and enforcing non-executable MFD.
User API
========
``int memfd_create(const char *name, unsigned int flags)``
``MFD_NOEXEC_SEAL``
When MFD_NOEXEC_SEAL bit is set in the ``flags``, memfd is created
with NX. F_SEAL_EXEC is set and the memfd can't be modified to
add X later. MFD_ALLOW_SEALING is also implied.
This is the most common case for the application to use memfd.
``MFD_EXEC``
When MFD_EXEC bit is set in the ``flags``, memfd is created with X.
Note:
``MFD_NOEXEC_SEAL`` implies ``MFD_ALLOW_SEALING``. In case that
an app doesn't want sealing, it can add F_SEAL_SEAL after creation.
Sysctl:
========
``pid namespaced sysctl vm.memfd_noexec``
The new pid namespaced sysctl vm.memfd_noexec has 3 values:
- 0: MEMFD_NOEXEC_SCOPE_EXEC
memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
MFD_EXEC was set.
- 1: MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL
memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
MFD_NOEXEC_SEAL was set.
- 2: MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED
memfd_create() without MFD_NOEXEC_SEAL will be rejected.
The sysctl allows finer control of memfd_create for old software that
doesn't set the executable bit; for example, a container with
vm.memfd_noexec=1 means the old software will create non-executable memfd
by default while new software can create executable memfd by setting
MFD_EXEC.
The value of vm.memfd_noexec is passed to child namespace at creation
time. In addition, the setting is hierarchical, i.e. during memfd_create,
we will search from current ns to root ns and use the most restrictive
setting.
[1] https://crbug.com/1305267
[2] https://bugs.chromium.org/p/chromium/issues/list?q=type%3Dbug-security%20memfd%20escalation&can=1
[3] https://lwn.net/Articles/781013/

View File

@ -62,12 +62,21 @@ shared page with scale and offset values into user space. User
space code performs the same algorithm of reading the TSC and
applying the scale and offset to get the constant 10 MHz clock.
Linux clockevents are based on Hyper-V synthetic timer 0. While
Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
timer 0. Interrupts from stimer0 are recorded on the "HVS" line in
/proc/interrupts. Clockevents based on the virtualized PIT and
local APIC timer also work, but the Hyper-V synthetic timer is
preferred.
Linux clockevents are based on Hyper-V synthetic timer 0 (stimer0).
While Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
timer 0. In older versions of Hyper-V, an interrupt from stimer0
results in a VMBus control message that is demultiplexed by
vmbus_isr() as described in the Documentation/virt/hyperv/vmbus.rst
documentation. In newer versions of Hyper-V, stimer0 interrupts can
be mapped to an architectural interrupt, which is referred to as
"Direct Mode". Linux prefers to use Direct Mode when available. Since
x86/x64 doesn't support per-CPU interrupts, Direct Mode statically
allocates an x86 interrupt vector (HYPERV_STIMER0_VECTOR) across all CPUs
and explicitly codes it to call the stimer0 interrupt handler. Hence
interrupts from stimer0 are recorded on the "HVS" line in /proc/interrupts
rather than being associated with a Linux IRQ. Clockevents based on the
virtualized PIT and local APIC timer also work, but Hyper-V stimer0
is preferred.
The driver for the Hyper-V synthetic system clock and timers is
drivers/clocksource/hyperv_timer.c.

View File

@ -40,7 +40,7 @@ Linux guests communicate with Hyper-V in four different ways:
arm64, these synthetic registers must be accessed using explicit
hypercalls.
* VMbus: VMbus is a higher-level software construct that is built on
* VMBus: VMBus is a higher-level software construct that is built on
the other 3 mechanisms. It is a message passing interface between
the Hyper-V host and the Linux guest. It uses memory that is shared
between Hyper-V and the guest, along with various signaling
@ -54,8 +54,8 @@ x86/x64 architecture only.
.. _Hyper-V Top Level Functional Spec (TLFS): https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/tlfs/tlfs
VMbus is not documented. This documentation provides a high-level
overview of VMbus and how it works, but the details can be discerned
VMBus is not documented. This documentation provides a high-level
overview of VMBus and how it works, but the details can be discerned
only from the code.
Sharing Memory
@ -74,7 +74,7 @@ follows:
physical address space. How Hyper-V is told about the GPA or list
of GPAs varies. In some cases, a single GPA is written to a
synthetic register. In other cases, a GPA or list of GPAs is sent
in a VMbus message.
in a VMBus message.
* Hyper-V translates the GPAs into "real" physical memory addresses,
and creates a virtual mapping that it can use to access the memory.
@ -133,9 +133,9 @@ only the CPUs actually present in the VM, so Linux does not report
any hot-add CPUs.
A Linux guest CPU may be taken offline using the normal Linux
mechanisms, provided no VMbus channel interrupts are assigned to
the CPU. See the section on VMbus Interrupts for more details
on how VMbus channel interrupts can be re-assigned to permit
mechanisms, provided no VMBus channel interrupts are assigned to
the CPU. See the section on VMBus Interrupts for more details
on how VMBus channel interrupts can be re-assigned to permit
taking a CPU offline.
32-bit and 64-bit
@ -169,14 +169,14 @@ and functionality. Hyper-V indicates feature/function availability
via flags in synthetic MSRs that Hyper-V provides to the guest,
and the guest code tests these flags.
VMbus has its own protocol version that is negotiated during the
initial VMbus connection from the guest to Hyper-V. This version
VMBus has its own protocol version that is negotiated during the
initial VMBus connection from the guest to Hyper-V. This version
number is also output to dmesg during boot. This version number
is checked in a few places in the code to determine if specific
functionality is present.
Furthermore, each synthetic device on VMbus also has a protocol
version that is separate from the VMbus protocol version. Device
Furthermore, each synthetic device on VMBus also has a protocol
version that is separate from the VMBus protocol version. Device
drivers for these synthetic devices typically negotiate the device
protocol version, and may test that protocol version to determine
if specific device functionality is present.

View File

@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
VMbus
VMBus
=====
VMbus is a software construct provided by Hyper-V to guest VMs. It
VMBus is a software construct provided by Hyper-V to guest VMs. It
consists of a control path and common facilities used by synthetic
devices that Hyper-V presents to guest VMs. The control path is
used to offer synthetic devices to the guest VM and, in some cases,
@ -12,9 +12,9 @@ and the synthetic device implementation that is part of Hyper-V, and
signaling primitives to allow Hyper-V and the guest to interrupt
each other.
VMbus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
entry in a running Linux guest. The VMbus driver (drivers/hv/vmbus_drv.c)
establishes the VMbus control path with the Hyper-V host, then
VMBus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
entry in a running Linux guest. The VMBus driver (drivers/hv/vmbus_drv.c)
establishes the VMBus control path with the Hyper-V host, then
registers itself as a Linux bus driver. It implements the standard
bus functions for adding and removing devices to/from the bus.
@ -49,9 +49,9 @@ synthetic NIC is referred to as "netvsc" and the Linux driver for
the synthetic SCSI controller is "storvsc". These drivers contain
functions with names like "storvsc_connect_to_vsp".
VMbus channels
VMBus channels
--------------
An instance of a synthetic device uses VMbus channels to communicate
An instance of a synthetic device uses VMBus channels to communicate
between the VSP and the VSC. Channels are bi-directional and used
for passing messages. Most synthetic devices use a single channel,
but the synthetic SCSI controller and synthetic NIC may use multiple
@ -73,7 +73,7 @@ write indices and some control flags, followed by the memory for the
actual ring. The size of the ring is determined by the VSC in the
guest and is specific to each synthetic device. The list of GPAs
making up the ring is communicated to the Hyper-V host over the
VMbus control path as a GPA Descriptor List (GPADL). See function
VMBus control path as a GPA Descriptor List (GPADL). See function
vmbus_establish_gpadl().
Each ring buffer is mapped into contiguous Linux kernel virtual
@ -102,10 +102,10 @@ resources. For Windows Server 2019 and later, this limit is
approximately 1280 Mbytes. For versions prior to Windows Server
2019, the limit is approximately 384 Mbytes.
VMbus messages
--------------
All VMbus messages have a standard header that includes the message
length, the offset of the message payload, some flags, and a
VMBus channel messages
----------------------
All messages sent in a VMBus channel have a standard header that includes
the message length, the offset of the message payload, some flags, and a
transactionID. The portion of the message after the header is
unique to each VSP/VSC pair.
@ -137,7 +137,7 @@ control message contains a list of GPAs that describe the data
buffer. For example, the storvsc driver uses this approach to
specify the data buffers to/from which disk I/O is done.
Three functions exist to send VMbus messages:
Three functions exist to send VMBus channel messages:
1. vmbus_sendpacket(): Control-only messages and messages with
embedded data -- no GPAs
@ -154,20 +154,51 @@ Historically, Linux guests have trusted Hyper-V to send well-formed
and valid messages, and Linux drivers for synthetic devices did not
fully validate messages. With the introduction of processor
technologies that fully encrypt guest memory and that allow the
guest to not trust the hypervisor (AMD SNP-SEV, Intel TDX), trusting
guest to not trust the hypervisor (AMD SEV-SNP, Intel TDX), trusting
the Hyper-V host is no longer a valid assumption. The drivers for
VMbus synthetic devices are being updated to fully validate any
VMBus synthetic devices are being updated to fully validate any
values read from memory that is shared with Hyper-V, which includes
messages from VMbus devices. To facilitate such validation,
messages from VMBus devices. To facilitate such validation,
messages read by the guest from the "in" ring buffer are copied to a
temporary buffer that is not shared with Hyper-V. Validation is
performed in this temporary buffer without the risk of Hyper-V
maliciously modifying the message after it is validated but before
it is used.
VMbus interrupts
Synthetic Interrupt Controller (synic)
--------------------------------------
Hyper-V provides each guest CPU with a synthetic interrupt controller
that is used by VMBus for host-guest communication. While each synic
defines 16 synthetic interrupts (SINT), Linux uses only one of the 16
(VMBUS_MESSAGE_SINT). All interrupts related to communication between
the Hyper-V host and a guest CPU use that SINT.
The SINT is mapped to a single per-CPU architectural interrupt (i.e,
an 8-bit x86/x64 interrupt vector, or an arm64 PPI INTID). Because
each CPU in the guest has a synic and may receive VMBus interrupts,
they are best modeled in Linux as per-CPU interrupts. This model works
well on arm64 where a single per-CPU Linux IRQ is allocated for
VMBUS_MESSAGE_SINT. This IRQ appears in /proc/interrupts as an IRQ labelled
"Hyper-V VMbus". Since x86/x64 lacks support for per-CPU IRQs, an x86
interrupt vector is statically allocated (HYPERVISOR_CALLBACK_VECTOR)
across all CPUs and explicitly coded to call vmbus_isr(). In this case,
there's no Linux IRQ, and the interrupts are visible in aggregate in
/proc/interrupts on the "HYP" line.
The synic provides the means to demultiplex the architectural interrupt into
one or more logical interrupts and route the logical interrupt to the proper
VMBus handler in Linux. This demultiplexing is done by vmbus_isr() and
related functions that access synic data structures.
The synic is not modeled in Linux as an irq chip or irq domain,
and the demultiplexed logical interrupts are not Linux IRQs. As such,
they don't appear in /proc/interrupts or /proc/irq. The CPU
affinity for one of these logical interrupts is controlled via an
entry under /sys/bus/vmbus as described below.
VMBus interrupts
----------------
VMbus provides a mechanism for the guest to interrupt the host when
VMBus provides a mechanism for the guest to interrupt the host when
the guest has queued new messages in a ring buffer. The host
expects that the guest will send an interrupt only when an "out"
ring buffer transitions from empty to non-empty. If the guest sends
@ -176,63 +207,55 @@ unnecessary. If a guest sends an excessive number of unnecessary
interrupts, the host may throttle that guest by suspending its
execution for a few seconds to prevent a denial-of-service attack.
Similarly, the host will interrupt the guest when it sends a new
message on the VMbus control path, or when a VMbus channel "in" ring
buffer transitions from empty to non-empty. Each CPU in the guest
may receive VMbus interrupts, so they are best modeled as per-CPU
interrupts in Linux. This model works well on arm64 where a single
per-CPU IRQ is allocated for VMbus. Since x86/x64 lacks support for
per-CPU IRQs, an x86 interrupt vector is statically allocated (see
HYPERVISOR_CALLBACK_VECTOR) across all CPUs and explicitly coded to
call the VMbus interrupt service routine. These interrupts are
visible in /proc/interrupts on the "HYP" line.
Similarly, the host will interrupt the guest via the synic when
it sends a new message on the VMBus control path, or when a VMBus
channel "in" ring buffer transitions from empty to non-empty due to
the host inserting a new VMBus channel message. The control message stream
and each VMBus channel "in" ring buffer are separate logical interrupts
that are demultiplexed by vmbus_isr(). It demultiplexes by first checking
for channel interrupts by calling vmbus_chan_sched(), which looks at a synic
bitmap to determine which channels have pending interrupts on this CPU.
If multiple channels have pending interrupts for this CPU, they are
processed sequentially. When all channel interrupts have been processed,
vmbus_isr() checks for and processes any messages received on the VMBus
control path.
The guest CPU that a VMbus channel will interrupt is selected by the
The guest CPU that a VMBus channel will interrupt is selected by the
guest when the channel is created, and the host is informed of that
selection. VMbus devices are broadly grouped into two categories:
selection. VMBus devices are broadly grouped into two categories:
1. "Slow" devices that need only one VMbus channel. The devices
1. "Slow" devices that need only one VMBus channel. The devices
(such as keyboard, mouse, heartbeat, and timesync) generate
relatively few interrupts. Their VMbus channels are all
relatively few interrupts. Their VMBus channels are all
assigned to interrupt the VMBUS_CONNECT_CPU, which is always
CPU 0.
2. "High speed" devices that may use multiple VMbus channels for
2. "High speed" devices that may use multiple VMBus channels for
higher parallelism and performance. These devices include the
synthetic SCSI controller and synthetic NIC. Their VMbus
synthetic SCSI controller and synthetic NIC. Their VMBus
channels interrupts are assigned to CPUs that are spread out
among the available CPUs in the VM so that interrupts on
multiple channels can be processed in parallel.
The assignment of VMbus channel interrupts to CPUs is done in the
The assignment of VMBus channel interrupts to CPUs is done in the
function init_vp_index(). This assignment is done outside of the
normal Linux interrupt affinity mechanism, so the interrupts are
neither "unmanaged" nor "managed" interrupts.
The CPU that a VMbus channel will interrupt can be seen in
The CPU that a VMBus channel will interrupt can be seen in
/sys/bus/vmbus/devices/<deviceGUID>/ channels/<channelRelID>/cpu.
When running on later versions of Hyper-V, the CPU can be changed
by writing a new value to this sysfs entry. Because the interrupt
assignment is done outside of the normal Linux affinity mechanism,
there are no entries in /proc/irq corresponding to individual
VMbus channel interrupts.
by writing a new value to this sysfs entry. Because VMBus channel
interrupts are not Linux IRQs, there are no entries in /proc/interrupts
or /proc/irq corresponding to individual VMBus channel interrupts.
An online CPU in a Linux guest may not be taken offline if it has
VMbus channel interrupts assigned to it. Any such channel
VMBus channel interrupts assigned to it. Any such channel
interrupts must first be manually reassigned to another CPU as
described above. When no channel interrupts are assigned to the
CPU, it can be taken offline.
When a guest CPU receives a VMbus interrupt from the host, the
function vmbus_isr() handles the interrupt. It first checks for
channel interrupts by calling vmbus_chan_sched(), which looks at a
bitmap setup by the host to determine which channels have pending
interrupts on this CPU. If multiple channels have pending
interrupts for this CPU, they are processed sequentially. When all
channel interrupts have been processed, vmbus_isr() checks for and
processes any message received on the VMbus control path.
The VMbus channel interrupt handling code is designed to work
The VMBus channel interrupt handling code is designed to work
correctly even if an interrupt is received on a CPU other than the
CPU assigned to the channel. Specifically, the code does not use
CPU-based exclusion for correctness. In normal operation, Hyper-V
@ -242,23 +265,23 @@ when Hyper-V will make the transition. The code must work correctly
even if there is a time lag before Hyper-V starts interrupting the
new CPU. See comments in target_cpu_store().
VMbus device creation/deletion
VMBus device creation/deletion
------------------------------
Hyper-V and the Linux guest have a separate message-passing path
that is used for synthetic device creation and deletion. This
path does not use a VMbus channel. See vmbus_post_msg() and
path does not use a VMBus channel. See vmbus_post_msg() and
vmbus_on_msg_dpc().
The first step is for the guest to connect to the generic
Hyper-V VMbus mechanism. As part of establishing this connection,
the guest and Hyper-V agree on a VMbus protocol version they will
Hyper-V VMBus mechanism. As part of establishing this connection,
the guest and Hyper-V agree on a VMBus protocol version they will
use. This negotiation allows newer Linux kernels to run on older
Hyper-V versions, and vice versa.
The guest then tells Hyper-V to "send offers". Hyper-V sends an
offer message to the guest for each synthetic device that the VM
is configured to have. Each VMbus device type has a fixed GUID
known as the "class ID", and each VMbus device instance is also
is configured to have. Each VMBus device type has a fixed GUID
known as the "class ID", and each VMBus device instance is also
identified by a GUID. The offer message from Hyper-V contains
both GUIDs to uniquely (within the VM) identify the device.
There is one offer message for each device instance, so a VM with
@ -275,7 +298,7 @@ type based on the class ID, and invokes the correct driver to set up
the device. Driver/device matching is performed using the standard
Linux mechanism.
The device driver probe function opens the primary VMbus channel to
The device driver probe function opens the primary VMBus channel to
the corresponding VSP. It allocates guest memory for the channel
ring buffers and shares the ring buffer with the Hyper-V host by
giving the host a list of GPAs for the ring buffer memory. See
@ -285,7 +308,7 @@ Once the ring buffer is set up, the device driver and VSP exchange
setup messages via the primary channel. These messages may include
negotiating the device protocol version to be used between the Linux
VSC and the VSP on the Hyper-V host. The setup messages may also
include creating additional VMbus channels, which are somewhat
include creating additional VMBus channels, which are somewhat
mis-named as "sub-channels" since they are functionally
equivalent to the primary channel once they are created.

View File

@ -1107,7 +1107,6 @@ L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/admin-guide/pm/amd-pstate.rst
F: drivers/cpufreq/amd-pstate*
F: include/linux/amd-pstate.h
F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
AMD PTDMA DRIVER
@ -3854,6 +3853,7 @@ BPF JIT for ARM64
M: Daniel Borkmann <daniel@iogearbox.net>
M: Alexei Starovoitov <ast@kernel.org>
M: Puranjay Mohan <puranjay@kernel.org>
R: Xu Kuohai <xukuohai@huaweicloud.com>
L: bpf@vger.kernel.org
S: Supported
F: arch/arm64/net/
@ -3980,7 +3980,7 @@ R: Song Liu <song@kernel.org>
R: Yonghong Song <yonghong.song@linux.dev>
R: John Fastabend <john.fastabend@gmail.com>
R: KP Singh <kpsingh@kernel.org>
R: Stanislav Fomichev <sdf@google.com>
R: Stanislav Fomichev <sdf@fomichev.me>
R: Hao Luo <haoluo@google.com>
R: Jiri Olsa <jolsa@kernel.org>
L: bpf@vger.kernel.org
@ -5187,7 +5187,6 @@ F: Documentation/devicetree/bindings/media/i2c/chrontel,ch7322.yaml
F: drivers/media/cec/i2c/ch7322.c
CIRRUS LOGIC AUDIO CODEC DRIVERS
M: James Schulman <james.schulman@cirrus.com>
M: David Rhodes <david.rhodes@cirrus.com>
M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@ -5296,7 +5295,7 @@ F: drivers/infiniband/hw/usnic/
CLANG CONTROL FLOW INTEGRITY SUPPORT
M: Sami Tolvanen <samitolvanen@google.com>
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Nathan Chancellor <nathan@kernel.org>
L: llvm@lists.linux.dev
S: Supported
@ -8212,7 +8211,7 @@ F: rust/kernel/net/phy.rs
EXEC & BINFMT API, ELF
R: Eric Biederman <ebiederm@xmission.com>
R: Kees Cook <keescook@chromium.org>
R: Kees Cook <kees@kernel.org>
L: linux-mm@kvack.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
@ -8613,7 +8612,7 @@ S: Maintained
F: drivers/net/ethernet/nvidia/*
FORTIFY_SOURCE
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@ -9103,7 +9102,7 @@ F: include/linux/mfd/gsc.h
F: include/linux/platform_data/gsc_hwmon.h
GCC PLUGINS
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@ -9237,7 +9236,7 @@ S: Maintained
F: drivers/input/touchscreen/resistive-adc-touch.c
GENERIC STRING LIBRARY
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Andy Shevchenko <andy@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
@ -11035,8 +11034,8 @@ F: include/uapi/drm/i915_drm.h
INTEL DRM XE DRIVER (Lunar Lake and newer)
M: Lucas De Marchi <lucas.demarchi@intel.com>
M: Oded Gabbay <ogabbay@kernel.org>
M: Thomas Hellström <thomas.hellstrom@linux.intel.com>
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
L: intel-xe@lists.freedesktop.org
S: Supported
W: https://drm.pages.freedesktop.org/intel-docs/
@ -11951,7 +11950,7 @@ F: scripts/package/
F: usr/
KERNEL HARDENING (not covered by other areas)
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Gustavo A. R. Silva <gustavoars@kernel.org>
L: linux-hardening@vger.kernel.org
S: Supported
@ -12383,7 +12382,6 @@ F: drivers/video/backlight/ktz8866.c
KVM PARAVIRT (KVM/paravirt)
M: Paolo Bonzini <pbonzini@redhat.com>
R: Wanpeng Li <wanpengli@tencent.com>
R: Vitaly Kuznetsov <vkuznets@redhat.com>
L: kvm@vger.kernel.org
S: Supported
@ -12479,7 +12477,7 @@ F: drivers/scsi/53c700*
LEAKING_ADDRESSES
M: Tycho Andersen <tycho@tycho.pizza>
R: Kees Cook <keescook@chromium.org>
R: Kees Cook <kees@kernel.org>
L: linux-hardening@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
@ -12775,7 +12773,7 @@ F: arch/powerpc/platforms/8xx/
F: arch/powerpc/platforms/83xx/
LINUX KERNEL DUMP TEST MODULE (LKDTM)
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
S: Maintained
F: drivers/misc/lkdtm/*
F: tools/testing/selftests/lkdtm/*
@ -12905,7 +12903,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/usb/dvb-usb-v2/lmedm04*
LOADPIN SECURITY MODULE
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/LoadPin.rst
@ -15238,7 +15236,6 @@ F: drivers/staging/most/
F: include/linux/most.h
MOTORCOMM PHY DRIVER
M: Peter Geis <pgwipeout@gmail.com>
M: Frank <Frank.Sae@motor-comm.com>
L: netdev@vger.kernel.org
S: Maintained
@ -15827,7 +15824,7 @@ F: drivers/nfc/virtual_ncidev.c
F: tools/testing/selftests/nci/
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <trond.myklebust@hammerspace.com>
M: Trond Myklebust <trondmy@kernel.org>
M: Anna Schumaker <anna@kernel.org>
L: linux-nfs@vger.kernel.org
S: Maintained
@ -17998,7 +17995,7 @@ F: tools/testing/selftests/proc/
PROC SYSCTL
M: Luis Chamberlain <mcgrof@kernel.org>
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
M: Joel Granados <j.granados@samsung.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
@ -18054,7 +18051,7 @@ F: Documentation/devicetree/bindings/net/pse-pd/
F: drivers/net/pse-pd/
PSTORE FILESYSTEM
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Tony Luck <tony.luck@intel.com>
R: Guilherme G. Piccoli <gpiccoli@igalia.com>
L: linux-hardening@vger.kernel.org
@ -20060,7 +20057,7 @@ F: drivers/media/cec/platform/seco/seco-cec.c
F: drivers/media/cec/platform/seco/seco-cec.h
SECURE COMPUTING
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Andy Lutomirski <luto@amacapital.net>
R: Will Drewry <wad@chromium.org>
S: Supported
@ -21316,7 +21313,7 @@ F: arch/riscv/boot/dts/starfive/
STARFIVE DWMAC GLUE LAYER
M: Emil Renner Berthing <kernel@esmil.dk>
M: Samin Guo <samin.guo@starfivetech.com>
M: Minda Chen <minda.chen@starfivetech.com>
S: Maintained
F: Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@ -22679,7 +22676,7 @@ L: tomoyo-users-en@lists.osdn.me (subscribers-only, for users in English)
L: tomoyo-dev@lists.osdn.me (subscribers-only, for developers in Japanese)
L: tomoyo-users@lists.osdn.me (subscribers-only, for users in Japanese)
S: Maintained
W: https://tomoyo.osdn.jp/
W: https://tomoyo.sourceforge.net/
F: security/tomoyo/
TOPSTAR LAPTOP EXTRAS DRIVER
@ -22974,7 +22971,7 @@ F: drivers/block/ublk_drv.c
F: include/uapi/linux/ublk_cmd.h
UBSAN
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
R: Marco Elver <elver@google.com>
R: Andrey Konovalov <andreyknvl@gmail.com>
R: Andrey Ryabinin <ryabinin.a.a@gmail.com>
@ -23976,7 +23973,6 @@ VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
R: Uladzislau Rezki <urezki@gmail.com>
R: Christoph Hellwig <hch@infradead.org>
R: Lorenzo Stoakes <lstoakes@gmail.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
@ -24812,7 +24808,7 @@ F: drivers/net/hamradio/yam*
F: include/linux/yam.h
YAMA SECURITY MODULE
M: Kees Cook <keescook@chromium.org>
M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/Yama.rst

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 10
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc5
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -39,7 +39,7 @@
/************** Functions that the back-end must provide **************/
/* Extension for 32-bit operations. */
inline u8 zext(u8 *buf, u8 rd);
u8 zext(u8 *buf, u8 rd);
/***** Moves *****/
u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);

View File

@ -62,7 +62,7 @@ enum {
* If/when we decide to add ARCv2 instructions that do use register pairs,
* the mapping, hopefully, doesn't need to be revisited.
*/
const u8 bpf2arc[][2] = {
static const u8 bpf2arc[][2] = {
/* Return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = {ARC_R_8, ARC_R_9},
/* Arguments from eBPF program to in-kernel function */
@ -1302,7 +1302,7 @@ static u8 arc_b(u8 *buf, s32 offset)
/************* Packers (Deal with BPF_REGs) **************/
inline u8 zext(u8 *buf, u8 rd)
u8 zext(u8 *buf, u8 rd)
{
if (rd != BPF_REG_FP)
return arc_movi_r(buf, REG_HI(rd), 0);
@ -2235,6 +2235,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break;
default:
/* The caller must have handled this. */
break;
}
} else {
/*
@ -2253,6 +2254,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break;
default:
/* The caller must have handled this. */
break;
}
}
@ -2517,7 +2519,7 @@ u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size)
#define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */
#define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */
#define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */
const struct {
static const struct {
/*
* "jit_off" is common between all "jmp[]" and is coupled with
* "cond" of each "jmp[]" instance. e.g.:
@ -2883,7 +2885,7 @@ u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
* The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
* instruction that precedes the conditional branch.
*/
const u8 arcv2_32_jmps[ARC_CC_LAST] = {
static const u8 arcv2_32_jmps[ARC_CC_LAST] = {
[ARC_CC_UGT] = CC_great_u,
[ARC_CC_UGE] = CC_great_eq_u,
[ARC_CC_ULT] = CC_less_u,

View File

@ -159,7 +159,7 @@ static void jit_dump(const struct jit_context *ctx)
/* Initialise the context so there's no garbage. */
static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
{
memset(ctx, 0, sizeof(ctx));
memset(ctx, 0, sizeof(*ctx));
ctx->orig_prog = prog;
@ -167,7 +167,7 @@ static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
ctx->prog = bpf_jit_blind_constants(prog);
if (IS_ERR(ctx->prog))
return PTR_ERR(ctx->prog);
ctx->blinded = (ctx->prog == ctx->orig_prog ? false : true);
ctx->blinded = (ctx->prog != ctx->orig_prog);
/* If the verifier doesn't zero-extend, then we have to do it. */
ctx->do_zext = !ctx->prog->aux->verifier_zext;
@ -1182,12 +1182,12 @@ static int jit_prepare(struct jit_context *ctx)
}
/*
* All the "handle_*()" functions have been called before by the
* "jit_prepare()". If there was an error, we would know by now.
* Therefore, no extra error checking at this point, other than
* a sanity check at the end that expects the calculated length
* (jit.len) to be equal to the length of generated instructions
* (jit.index).
* jit_compile() is the real compilation phase. jit_prepare() is
* invoked before jit_compile() as a dry-run to make sure everything
* will go OK and allocate the necessary memory.
*
* In the end, jit_compile() checks if it has produced the same number
* of instructions as jit_prepare() would.
*/
static int jit_compile(struct jit_context *ctx)
{
@ -1407,9 +1407,9 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
/*
* This function may be invoked twice for the same stream of BPF
* instructions. The "extra pass" happens, when there are "call"s
* involved that their addresses are not known during the first
* invocation.
* instructions. The "extra pass" happens, when there are
* (re)locations involved that their addresses are not known
* during the first run.
*/
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{

View File

@ -85,7 +85,7 @@ led-user {
};
};
panel {
panel_dpi: panel {
compatible = "sii,43wvf1g";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_display_power>;

View File

@ -10,8 +10,6 @@
/plugin/;
&{/} {
/delete-node/ panel;
hdmi: connector-hdmi {
compatible = "hdmi-connector";
label = "hdmi";
@ -82,6 +80,10 @@ sii9022_out: endpoint {
};
};
&panel_dpi {
status = "disabled";
};
&tve {
status = "disabled";
};

View File

@ -14,6 +14,7 @@
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#ifdef CONFIG_EFI
void efi_init(void);
@ -25,6 +26,18 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, boo
#define arch_efi_call_virt_setup() efi_virtmap_load()
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
#ifdef CONFIG_CPU_TTBR0_PAN
#undef arch_efi_call_virt
#define arch_efi_call_virt(p, f, args...) ({ \
unsigned int flags = uaccess_save_and_enable(); \
efi_status_t res = _Generic((p)->f(args), \
efi_status_t: (p)->f(args), \
default: ((p)->f(args), EFI_ABORTED)); \
uaccess_restore(flags); \
res; \
})
#endif
#define ARCH_EFI_IRQ_FLAGS_MASK \
(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
PSR_T_BIT | MODE_MASK)

View File

@ -232,11 +232,24 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long old;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
err_out:
return;
if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
/* FP points one word below parent's top of stack */
frame_pointer += 4;
/*
* Usually, the stack frames are contiguous in memory but cases
* have been observed where the next stack frame does not live
* at 'frame_pointer + 4' as this code used to assume.
*
* Instead, dereference the field in the stack frame that
* stores the SP of the calling frame: to avoid unbounded
* recursion, this cannot involve any ftrace instrumented
* functions, so use the __get_kernel_nofault() primitive
* directly.
*/
__get_kernel_nofault(&frame_pointer,
(unsigned long *)(frame_pointer - 8),
unsigned long, err_out);
} else {
struct stackframe frame = {
.fp = frame_pointer,

View File

@ -6,6 +6,7 @@
#include <dt-bindings/phy/phy-imx8-pcie.h>
#include <dt-bindings/pwm/pwm.h>
#include "imx8mm.dtsi"
#include "imx8mm-overdrive.dtsi"
/ {
chosen {
@ -935,7 +936,7 @@ pinctrl_gpio8: gpio8grp {
/* Verdin GPIO_9_DSI (pulled-up as active-low) */
pinctrl_gpio_9_dsi: gpio9dsigrp {
fsl,pins =
<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x146>; /* SODIMM 17 */
<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x1c6>; /* SODIMM 17 */
};
/* Verdin GPIO_10_DSI (pulled-up as active-low) */

View File

@ -254,7 +254,7 @@ tc_bridge: bridge@f {
<&clk IMX8MP_CLK_CLKOUT2>,
<&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-rates = <13000000>, <13000000>, <156000000>;
assigned-clock-rates = <13000000>, <13000000>, <208000000>;
reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
status = "disabled";

View File

@ -219,7 +219,7 @@ &uart3 {
bluetooth {
compatible = "brcm,bcm4330-bt";
shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
};
};

View File

@ -36,7 +36,7 @@ reg_usdhc2_vmmc: usdhc2-vmmc {
regulator-name = "SD1_SPWR";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
enable-active-high;
};

View File

@ -296,7 +296,6 @@ &usdhc2 {
vmmc-supply = <&reg_usdhc2_vmmc>;
bus-width = <4>;
status = "okay";
no-sdio;
no-mmc;
};

View File

@ -146,7 +146,7 @@
/* Coprocessor traps */
.macro __init_el2_cptr
__check_hvhe .LnVHE_\@, x1
mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
mov x0, #CPACR_ELx_FPEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
.LnVHE_\@:
@ -277,7 +277,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SVE traps
orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
orr x0, x0, #CPACR_ELx_ZEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
@ -298,7 +298,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SME traps
orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
orr x0, x0, #CPACR_ELx_SMEN
msr cpacr_el1, x0
b .Lskip_set_cptr_sme_\@

View File

@ -153,8 +153,9 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
* emit the large TLP from the CPU.
*/
static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
const u32 *from, size_t count)
static __always_inline void
__const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from,
size_t count)
{
switch (count) {
case 8:
@ -196,24 +197,22 @@ static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
size_t count)
static __always_inline void
__iowrite32_copy(void __iomem *to, const void *from, size_t count)
{
if (count == 8 || count == 4 || count == 2 || count == 1) {
if (__builtin_constant_p(count) &&
(count == 8 || count == 4 || count == 2 || count == 1)) {
__const_memcpy_toio_aligned32(to, from, count);
dgh();
} else {
__iowrite32_copy_full(to, from, count);
}
}
#define __iowrite32_copy __iowrite32_copy
#define __iowrite32_copy(to, from, count) \
(__builtin_constant_p(count) ? \
__const_iowrite32_copy(to, from, count) : \
__iowrite32_copy_full(to, from, count))
static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
const u64 *from, size_t count)
static __always_inline void
__const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from,
size_t count)
{
switch (count) {
case 8:
@ -255,21 +254,18 @@ static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
size_t count)
static __always_inline void
__iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
if (count == 8 || count == 4 || count == 2 || count == 1) {
if (__builtin_constant_p(count) &&
(count == 8 || count == 4 || count == 2 || count == 1)) {
__const_memcpy_toio_aligned64(to, from, count);
dgh();
} else {
__iowrite64_copy_full(to, from, count);
}
}
#define __iowrite64_copy(to, from, count) \
(__builtin_constant_p(count) ? \
__const_iowrite64_copy(to, from, count) : \
__iowrite64_copy_full(to, from, count))
#define __iowrite64_copy __iowrite64_copy
/*
* I/O memory mapping functions.

View File

@ -305,6 +305,12 @@
GENMASK(19, 14) | \
BIT(11))
#define CPTR_VHE_EL2_RES0 (GENMASK(63, 32) | \
GENMASK(27, 26) | \
GENMASK(23, 22) | \
GENMASK(19, 18) | \
GENMASK(15, 0))
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_E2TB_MASK (UL(0x3))
#define MDCR_EL2_E2TB_SHIFT (UL(24))

View File

@ -557,6 +557,68 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu_set_flag((v), e); \
} while (0)
#define __build_check_all_or_none(r, bits) \
BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
#define __cpacr_to_cptr_clr(clr, set) \
({ \
u64 cptr = 0; \
\
if ((set) & CPACR_ELx_FPEN) \
cptr |= CPTR_EL2_TFP; \
if ((set) & CPACR_ELx_ZEN) \
cptr |= CPTR_EL2_TZ; \
if ((set) & CPACR_ELx_SMEN) \
cptr |= CPTR_EL2_TSM; \
if ((clr) & CPACR_ELx_TTA) \
cptr |= CPTR_EL2_TTA; \
if ((clr) & CPTR_EL2_TAM) \
cptr |= CPTR_EL2_TAM; \
if ((clr) & CPTR_EL2_TCPAC) \
cptr |= CPTR_EL2_TCPAC; \
\
cptr; \
})
#define __cpacr_to_cptr_set(clr, set) \
({ \
u64 cptr = 0; \
\
if ((clr) & CPACR_ELx_FPEN) \
cptr |= CPTR_EL2_TFP; \
if ((clr) & CPACR_ELx_ZEN) \
cptr |= CPTR_EL2_TZ; \
if ((clr) & CPACR_ELx_SMEN) \
cptr |= CPTR_EL2_TSM; \
if ((set) & CPACR_ELx_TTA) \
cptr |= CPTR_EL2_TTA; \
if ((set) & CPTR_EL2_TAM) \
cptr |= CPTR_EL2_TAM; \
if ((set) & CPTR_EL2_TCPAC) \
cptr |= CPTR_EL2_TCPAC; \
\
cptr; \
})
#define cpacr_clear_set(clr, set) \
do { \
BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
__build_check_all_or_none((clr), CPACR_ELx_FPEN); \
__build_check_all_or_none((set), CPACR_ELx_FPEN); \
__build_check_all_or_none((clr), CPACR_ELx_ZEN); \
__build_check_all_or_none((set), CPACR_ELx_ZEN); \
__build_check_all_or_none((clr), CPACR_ELx_SMEN); \
__build_check_all_or_none((set), CPACR_ELx_SMEN); \
\
if (has_vhe() || has_hvhe()) \
sysreg_clear_set(cpacr_el1, clr, set); \
else \
sysreg_clear_set(cptr_el2, \
__cpacr_to_cptr_clr(clr, set), \
__cpacr_to_cptr_set(clr, set));\
} while (0)
static __always_inline void kvm_write_cptr_el2(u64 val)
{
if (has_vhe() || has_hvhe())
@ -570,17 +632,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
u64 val;
if (has_vhe()) {
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
CPACR_EL1_ZEN_EL1EN);
val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN;
} else if (has_hvhe()) {
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
val = CPACR_ELx_FPEN;
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
val |= CPACR_ELx_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
val |= CPACR_ELx_SMEN;
} else {
val = CPTR_NVHE_EL2_RES1;

View File

@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
extern unsigned int __ro_after_init kvm_sve_max_vl;
extern unsigned int __ro_after_init kvm_host_sve_max_vl;
int __init kvm_arm_init_sve(void);
u32 __attribute_const__ kvm_target_cpu(void);
@ -521,6 +522,20 @@ struct kvm_cpu_context {
u64 *vncr_array;
};
struct cpu_sve_state {
__u64 zcr_el1;
/*
* Ordering is important since __sve_save_state/__sve_restore_state
* relies on it.
*/
__u32 fpsr;
__u32 fpcr;
/* Must be SVE_VQ_BYTES (128 bit) aligned. */
__u8 sve_regs[];
};
/*
* This structure is instantiated on a per-CPU basis, and contains
* data that is:
@ -534,7 +549,15 @@ struct kvm_cpu_context {
*/
struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
struct user_fpsimd_state *fpsimd_state; /* hyp VA */
/*
* All pointers in this union are hyp VA.
* sve_state is only used in pKVM and if system_supports_sve().
*/
union {
struct user_fpsimd_state *fpsimd_state;
struct cpu_sve_state *sve_state;
};
/* Ownership of the FP regs */
enum {

View File

@ -111,7 +111,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
void __sve_restore_state(void *sve_pffr, u32 *fpsr);
void __sve_save_state(void *sve_pffr, u32 *fpsr, int save_ffr);
void __sve_restore_state(void *sve_pffr, u32 *fpsr, int restore_ffr);
u64 __guest_enter(struct kvm_vcpu *vcpu);
@ -142,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
extern unsigned long kvm_nvhe_sym(__icache_flags);
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
#endif /* __ARM64_KVM_HYP_H__ */

View File

@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
}
static inline size_t pkvm_host_sve_state_size(void)
{
if (!system_supports_sve())
return 0;
return size_add(sizeof(struct cpu_sve_state),
SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
}
#endif /* __ARM64_KVM_PKVM_H__ */

View File

@ -462,6 +462,9 @@ static int run_all_insn_set_hw_mode(unsigned int cpu)
for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
struct insn_emulation *insn = insn_emulations[i];
bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
if (insn->status == INSN_UNAVAILABLE)
continue;
if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
pr_warn("CPU[%u] cannot support the emulation of %s",
cpu, insn->name);

View File

@ -9,6 +9,7 @@
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/kmemleak.h>
#include <linux/screen_info.h>
#include <linux/vmalloc.h>
@ -213,6 +214,7 @@ l: if (!p) {
return -ENOMEM;
}
kmemleak_not_leak(p);
efi_rt_stack_top = p + THREAD_SIZE;
return 0;
}

View File

@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void)
return size ? get_order(size) : 0;
}
static size_t pkvm_host_sve_state_order(void)
{
return get_order(pkvm_host_sve_state_size());
}
/* A lookup table holding the hypervisor VA for each vector slot */
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void)
static void __init teardown_hyp_mode(void)
{
bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
int cpu;
free_hyp_pgds();
for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
if (free_sve) {
struct cpu_sve_state *sve_state;
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
}
}
}
@ -2398,6 +2411,58 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
return 0;
}
static int init_pkvm_host_sve_state(void)
{
int cpu;
if (!system_supports_sve())
return 0;
/* Allocate pages for host sve state in protected mode. */
for_each_possible_cpu(cpu) {
struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
if (!page)
return -ENOMEM;
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
}
/*
* Don't map the pages in hyp since these are only used in protected
* mode, which will (re)create its own mapping when initialized.
*/
return 0;
}
/*
* Finalizes the initialization of hyp mode, once everything else is initialized
* and the initialziation process cannot fail.
*/
static void finalize_init_hyp_mode(void)
{
int cpu;
if (system_supports_sve() && is_protected_kvm_enabled()) {
for_each_possible_cpu(cpu) {
struct cpu_sve_state *sve_state;
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
kern_hyp_va(sve_state);
}
} else {
for_each_possible_cpu(cpu) {
struct user_fpsimd_state *fpsimd_state;
fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
kern_hyp_va(fpsimd_state);
}
}
}
static void pkvm_hyp_init_ptrauth(void)
{
struct kvm_cpu_context *hyp_ctxt;
@ -2566,6 +2631,10 @@ static int __init init_hyp_mode(void)
goto out_err;
}
err = init_pkvm_host_sve_state();
if (err)
goto out_err;
err = kvm_hyp_init_protection(hyp_va_bits);
if (err) {
kvm_err("Failed to init hyp memory protection\n");
@ -2730,6 +2799,13 @@ static __init int kvm_arm_init(void)
if (err)
goto out_subs;
/*
* This should be called after initialization is done and failure isn't
* possible anymore.
*/
if (!in_hyp_mode)
finalize_init_hyp_mode();
kvm_arm_initialised = true;
return 0;

View File

@ -2181,16 +2181,23 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
if (forward_traps(vcpu, HCR_NV))
return;
spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
/* Check for an ERETAx */
esr = kvm_vcpu_get_esr(vcpu);
if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
/*
* Oh no, ERETAx failed to authenticate. If we have
* FPACCOMBINE, deliver an exception right away. If we
* don't, then let the mangled ELR value trickle down the
* Oh no, ERETAx failed to authenticate.
*
* If we have FPACCOMBINE and we don't have a pending
* Illegal Execution State exception (which has priority
* over FPAC), deliver an exception right away.
*
* Otherwise, let the mangled ELR value trickle down the
* ERET handling, and the guest will have a little surprise.
*/
if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE) && !(spsr & PSR_IL_BIT)) {
esr &= ESR_ELx_ERET_ISS_ERETA;
esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
kvm_inject_nested_sync(vcpu, esr);
@ -2201,17 +2208,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
preempt_disable();
kvm_arch_vcpu_put(vcpu);
spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
if (!esr_iss_is_eretax(esr))
elr = __vcpu_sys_reg(vcpu, ELR_EL2);
trace_kvm_nested_eret(vcpu, elr, spsr);
/*
* Note that the current exception level is always the virtual EL2,
* since we set HCR_EL2.NV bit only when entering the virtual EL2.
*/
*vcpu_pc(vcpu) = elr;
*vcpu_cpsr(vcpu) = spsr;

View File

@ -90,6 +90,13 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
fpsimd_save_and_flush_cpu_state();
}
}
/*
* If normal guests gain SME support, maintain this behavior for pKVM
* guests, which don't support SME.
*/
WARN_ON(is_protected_kvm_enabled() && system_supports_sme() &&
read_sysreg_s(SYS_SVCR));
}
/*
@ -161,9 +168,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
sysreg_clear_set(CPACR_EL1, 0,
CPACR_EL1_SMEN_EL0EN |
CPACR_EL1_SMEN_EL1EN);
sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
else
sysreg_clear_set(CPACR_EL1,
CPACR_EL1_SMEN_EL0EN,

View File

@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
case PSR_AA32_MODE_SYS:
if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
@ -276,7 +277,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i, nr_reg;
switch (*vcpu_cpsr(vcpu)) {
switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
/*
* Either we are dealing with user mode, and only the
* first 15 registers (+ PC) must be narrowed to 32bit.

View File

@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
u32 cpsr_cond;
int cond;
/* Top two bits non-zero? Unconditional. */
if (kvm_vcpu_get_esr(vcpu) >> 30)
/*
* These are the exception classes that could fire with a
* conditional instruction.
*/
switch (kvm_vcpu_trap_get_class(vcpu)) {
case ESR_ELx_EC_CP15_32:
case ESR_ELx_EC_CP15_64:
case ESR_ELx_EC_CP14_MR:
case ESR_ELx_EC_CP14_LS:
case ESR_ELx_EC_FP_ASIMD:
case ESR_ELx_EC_CP10_ID:
case ESR_ELx_EC_CP14_64:
case ESR_ELx_EC_SVC32:
break;
default:
return true;
}
/* Is condition field valid? */
cond = kvm_vcpu_get_condition(vcpu);

View File

@ -25,3 +25,9 @@ SYM_FUNC_START(__sve_restore_state)
sve_load 0, x1, x2, 3
ret
SYM_FUNC_END(__sve_restore_state)
SYM_FUNC_START(__sve_save_state)
mov x2, #1
sve_save 0, x1, x2, 3
ret
SYM_FUNC_END(__sve_save_state)

View File

@ -316,10 +316,24 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
__sve_restore_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.fp_regs.fpsr);
&vcpu->arch.ctxt.fp_regs.fpsr,
true);
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
}
static inline void __hyp_sve_save_host(void)
{
struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
__sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr,
true);
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
/*
* We trap the first access to the FP/SIMD to save the host context and
* restore the guest context lazily.
@ -330,7 +344,6 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest;
u8 esr_ec;
u64 reg;
if (!system_supports_fpsimd())
return false;
@ -353,24 +366,15 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
/* Valid trap. Switch the context: */
/* First disable enough traps to allow us to update the registers */
if (has_vhe() || has_hvhe()) {
reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
if (sve_guest)
reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
sysreg_clear_set(cpacr_el1, 0, reg);
} else {
reg = CPTR_EL2_TFP;
if (sve_guest)
reg |= CPTR_EL2_TZ;
sysreg_clear_set(cptr_el2, reg, 0);
}
if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
else
cpacr_clear_set(0, CPACR_ELx_FPEN);
isb();
/* Write out the host state if it's in the registers */
if (host_owns_fp_regs())
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
kvm_hyp_save_fpsimd_host(vcpu);
/* Restore the guest state */
if (sve_guest)

View File

@ -59,7 +59,6 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
}
void pkvm_hyp_vm_table_init(void *tbl);
void pkvm_host_fpsimd_state_init(void);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
unsigned long pgd_hva);

View File

@ -177,6 +177,14 @@ static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
res);
}
static void ffa_rx_release(struct arm_smccc_res *res)
{
arm_smccc_1_1_smc(FFA_RX_RELEASE,
0, 0,
0, 0, 0, 0, 0,
res);
}
static void do_ffa_rxtx_map(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
@ -543,16 +551,19 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
if (WARN_ON(offset > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
ret = FFA_RET_ABORTED;
ffa_rx_release(res);
goto out_unlock;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
ffa_rx_release(res);
goto out_unlock;
}
buf = ffa_desc_buf.buf;
memcpy(buf, hyp_buffers.rx, fraglen);
ffa_rx_release(res);
for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
@ -563,6 +574,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
fraglen = res->a3;
memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
ffa_rx_release(res);
}
ffa_mem_reclaim(res, handle_lo, handle_hi, flags);

View File

@ -23,20 +23,80 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
{
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
/*
* On saving/restoring guest sve state, always use the maximum VL for
* the guest. The layout of the data when saving the sve state depends
* on the VL, so use a consistent (i.e., the maximum) guest VL.
*/
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
__sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true);
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
}
static void __hyp_sve_restore_host(void)
{
struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
/*
* On saving/restoring host sve state, always use the maximum VL for
* the host. The layout of the data when saving the sve state depends
* on the VL, so use a consistent (i.e., the maximum) host VL.
*
* Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length
* supported by the system (or limited at EL3).
*/
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
__sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr,
true);
write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR);
}
static void fpsimd_sve_flush(void)
{
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
}
static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
{
if (!guest_owns_fp_regs())
return;
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
isb();
if (vcpu_has_sve(vcpu))
__hyp_sve_save_guest(vcpu);
else
__fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
if (system_supports_sve())
__hyp_sve_restore_host();
else
__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
}
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
fpsimd_sve_flush();
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
/* Limit guest vector length to the maximum supported by the host. */
hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl);
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
@ -54,10 +114,11 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
unsigned int i;
fpsimd_sve_sync(&hyp_vcpu->vcpu);
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
@ -79,6 +140,17 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
struct pkvm_hyp_vcpu *hyp_vcpu;
struct kvm *host_kvm;
/*
* KVM (and pKVM) doesn't support SME guests for now, and
* ensures that SME features aren't enabled in pstate when
* loading a vcpu. Therefore, if SME features enabled the host
* is misbehaving.
*/
if (unlikely(system_supports_sme() && read_sysreg_s(SYS_SVCR))) {
ret = -EINVAL;
goto out;
}
host_kvm = kern_hyp_va(host_vcpu->kvm);
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
host_vcpu->vcpu_idx);
@ -405,11 +477,7 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_SVE:
if (has_hvhe())
sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
CPACR_EL1_ZEN_EL0EN));
else
sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
cpacr_clear_set(0, CPACR_ELx_ZEN);
isb();
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
break;

View File

@ -18,6 +18,8 @@ unsigned long __icache_flags;
/* Used by kvm_get_vttbr(). */
unsigned int kvm_arm_vmid_bits;
unsigned int kvm_host_sve_max_vl;
/*
* Set trap register values based on features in ID_AA64PFR0.
*/
@ -63,7 +65,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
/* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
if (has_hvhe())
cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
cptr_clear |= CPACR_ELx_ZEN;
else
cptr_set |= CPTR_EL2_TZ;
}
@ -247,17 +249,6 @@ void pkvm_hyp_vm_table_init(void *tbl)
vm_table = tbl;
}
void pkvm_host_fpsimd_state_init(void)
{
unsigned long i;
for (i = 0; i < hyp_nr_cpus; i++) {
struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
host_data->fpsimd_state = &host_data->host_ctxt.fp_regs;
}
}
/*
* Return the hyp vm structure corresponding to the handle.
*/
@ -586,6 +577,8 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
if (ret)
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
return ret;
}

View File

@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size)
return 0;
}
static int pkvm_create_host_sve_mappings(void)
{
void *start, *end;
int ret, i;
if (!system_supports_sve())
return 0;
for (i = 0; i < hyp_nr_cpus; i++) {
struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
struct cpu_sve_state *sve_state = host_data->sve_state;
start = kern_hyp_va(sve_state);
end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
ret = pkvm_create_mappings(start, end, PAGE_HYP);
if (ret)
return ret;
}
return 0;
}
static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
unsigned long *per_cpu_base,
u32 hyp_va_bits)
@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
return ret;
}
pkvm_create_host_sve_mappings();
/*
* Map the host sections RO in the hypervisor, but transfer the
* ownership from the host to the hypervisor itself to make sure they
@ -300,7 +324,6 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
pkvm_host_fpsimd_state_init();
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,

View File

@ -48,15 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
if (cpus_have_final_cap(ARM64_SME)) {
if (has_hvhe())
val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
val &= ~CPACR_ELx_SMEN;
else
val |= CPTR_EL2_TSM;
}
if (!guest_owns_fp_regs()) {
if (has_hvhe())
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
else
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
@ -182,6 +181,25 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_handle_pvm_sysreg(vcpu, exit_code));
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
/*
* Non-protected kvm relies on the host restoring its sve state.
* Protected kvm restores the host's sve state as not to reveal that
* fpsimd was used by a guest nor leak upper sve bits.
*/
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
__hyp_sve_save_host();
/* Re-enable SVE traps if not supported for the guest vcpu. */
if (!vcpu_has_sve(vcpu))
cpacr_clear_set(CPACR_ELx_ZEN, 0);
} else {
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
}
}
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,

View File

@ -93,8 +93,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_ELx_TTA;
val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
val &= ~(CPACR_ELx_ZEN | CPACR_ELx_SMEN);
/*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
@ -109,9 +108,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
if (guest_owns_fp_regs()) {
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
val |= CPACR_ELx_ZEN;
} else {
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
val &= ~CPACR_ELx_FPEN;
__activate_traps_fpsimd32(vcpu);
}
@ -262,6 +261,11 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
return true;
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
}
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,

View File

@ -58,8 +58,10 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
break;
case SYS_ID_AA64PFR1_EL1:
/* Only support SSBS */
val &= NV_FTR(PFR1, SSBS);
/* Only support BTI, SSBS, CSV2_frac */
val &= (NV_FTR(PFR1, BT) |
NV_FTR(PFR1, SSBS) |
NV_FTR(PFR1, CSV2_frac));
break;
case SYS_ID_AA64MMFR0_EL1:

View File

@ -32,6 +32,7 @@
/* Maximum phys_shift supported for any VM on this host */
static u32 __ro_after_init kvm_ipa_limit;
unsigned int __ro_after_init kvm_host_sve_max_vl;
/*
* ARMv8 Reset Values
@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void)
{
if (system_supports_sve()) {
kvm_sve_max_vl = sve_max_virtualisable_vl();
kvm_host_sve_max_vl = sve_max_vl();
kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
/*
* The get_sve_reg()/set_sve_reg() ioctl interface will need

View File

@ -391,7 +391,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
vgic_v3_free_redist_region(rdreg);
vgic_v3_free_redist_region(kvm, rdreg);
INIT_LIST_HEAD(&dist->rd_regions);
} else {
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;

View File

@ -919,8 +919,19 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
return ret;
}
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
{
struct kvm_vcpu *vcpu;
unsigned long c;
lockdep_assert_held(&kvm->arch.config_lock);
/* Garbage collect the region */
kvm_for_each_vcpu(c, vcpu, kvm) {
if (vcpu->arch.vgic_cpu.rdreg == rdreg)
vcpu->arch.vgic_cpu.rdreg = NULL;
}
list_del(&rdreg->list);
kfree(rdreg);
}
@ -945,7 +956,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
vgic_v3_free_redist_region(rdreg);
vgic_v3_free_redist_region(kvm, rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}

View File

@ -316,7 +316,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
u32 index);
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);

View File

@ -376,7 +376,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
* clearing access/dirty for the whole block.
*/
unsigned long start = addr;
unsigned long end = start + nr;
unsigned long end = start + nr * PAGE_SIZE;
if (pte_cont(__ptep_get(ptep + nr - 1)))
end = ALIGN(end, CONT_PTE_SIZE);
@ -386,7 +386,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
ptep = contpte_align_down(ptep);
}
__clear_young_dirty_ptes(vma, start, ptep, end - start, flags);
__clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags);
}
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);

View File

@ -143,7 +143,7 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@ -261,6 +261,9 @@ config AS_HAS_EXPLICIT_RELOCS
config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
config AS_HAS_THIN_ADD_SUB
def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)

View File

@ -28,6 +28,7 @@ config UNWINDER_PROLOGUE
config UNWINDER_ORC
bool "ORC unwinder"
depends on HAVE_OBJTOOL
select OBJTOOL
help
This option enables the ORC (Oops Rewind Capability) unwinder for

View File

@ -44,14 +44,14 @@ linux,cma {
&gmac0 {
status = "okay";
phy-mode = "rgmii";
phy-mode = "rgmii-id";
bus_id = <0x0>;
};
&gmac1 {
status = "okay";
phy-mode = "rgmii";
phy-mode = "rgmii-id";
bus_id = <0x1>;
};

View File

@ -43,7 +43,7 @@ linux,cma {
&gmac0 {
status = "okay";
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&phy0>;
mdio {
compatible = "snps,dwmac-mdio";
@ -58,7 +58,7 @@ phy0: ethernet-phy@0 {
&gmac1 {
status = "okay";
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&phy1>;
mdio {
compatible = "snps,dwmac-mdio";

View File

@ -92,7 +92,7 @@ phy1: ethernet-phy@1 {
&gmac2 {
status = "okay";
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&phy2>;
mdio {
compatible = "snps,dwmac-mdio";

View File

@ -75,6 +75,8 @@ do { \
#define CSR_MWPC_NUM 0x3f
#define CTRL_PLV_ENABLE 0x1e
#define CTRL_PLV0_ENABLE 0x02
#define CTRL_PLV3_ENABLE 0x10
#define MWPnCFG3_LoadEn 8
#define MWPnCFG3_StoreEn 9
@ -101,7 +103,7 @@ struct perf_event;
struct perf_event_attr;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type, int *offset);
int *gen_len, int *gen_type);
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,

View File

@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu);
static inline void early_numa_add_cpu(int cpuid, s16 node) { }
static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void set_cpuid_to_node(int cpuid, s16 node) { }
static inline int early_cpu_to_node(int cpu)
{

View File

@ -42,7 +42,7 @@
.macro JUMP_VIRT_ADDR temp1 temp2
li.d \temp1, CACHE_BASE
pcaddi \temp2, 0
or \temp1, \temp1, \temp2
bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
jirl zero, \temp1, 0xc
.endm

View File

@ -22,7 +22,7 @@
_head:
.word MZ_MAGIC /* "MZ", MS-DOS header */
.org 0x8
.dword kernel_entry /* Kernel entry point */
.dword _kernel_entry /* Kernel entry point (physical address) */
.dword _kernel_asize /* Kernel image effective size */
.quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
.org 0x38 /* 0x20 ~ 0x37 reserved */

View File

@ -174,11 +174,21 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
static int hw_breakpoint_control(struct perf_event *bp,
enum hw_breakpoint_ops ops)
{
u32 ctrl;
u32 ctrl, privilege;
int i, max_slots, enable;
struct pt_regs *regs;
struct perf_event **slots;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
if (arch_check_bp_in_kernelspace(info))
privilege = CTRL_PLV0_ENABLE;
else
privilege = CTRL_PLV3_ENABLE;
/* Whether bp belongs to a task. */
if (bp->hw.target)
regs = task_pt_regs(bp->hw.target);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
/* Breakpoint */
slots = this_cpu_ptr(bp_on_reg);
@ -197,31 +207,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
switch (ops) {
case HW_BREAKPOINT_INSTALL:
/* Set the FWPnCFG/MWPnCFG 1~4 register. */
write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
} else {
write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
ctrl = encode_ctrl_reg(info->ctrl);
write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
if (bp->hw.target)
regs->csr_prmd |= CSR_PRMD_PWE;
break;
case HW_BREAKPOINT_UNINSTALL:
/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
write_wb_reg(CSR_CFG_MASK, i, 0, 0);
write_wb_reg(CSR_CFG_MASK, i, 1, 0);
write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
write_wb_reg(CSR_CFG_MASK, i, 0, 0);
write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
} else {
write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
write_wb_reg(CSR_CFG_MASK, i, 1, 0);
write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
}
if (bp->hw.target)
regs->csr_prmd &= ~CSR_PRMD_PWE;
break;
}
@ -283,7 +300,7 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type, int *offset)
int *gen_len, int *gen_type)
{
/* Type */
switch (ctrl.type) {
@ -303,11 +320,6 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
return -EINVAL;
}
if (!ctrl.len)
return -EINVAL;
*offset = __ffs(ctrl.len);
/* Len */
switch (ctrl.len) {
case LOONGARCH_BREAKPOINT_LEN_1:
@ -386,21 +398,17 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
struct arch_hw_breakpoint *hw)
{
int ret;
u64 alignment_mask, offset;
u64 alignment_mask;
/* Build the arch_hw_breakpoint. */
ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
alignment_mask = 0x7;
else
if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
alignment_mask = 0x3;
offset = hw->address & alignment_mask;
hw->address &= ~alignment_mask;
hw->ctrl.len <<= offset;
hw->address &= ~alignment_mask;
}
return 0;
}
@ -471,12 +479,15 @@ void breakpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(bp_on_reg);
for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
bp = slots[i];
if (bp == NULL)
continue;
perf_bp_event(bp, regs);
if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
bp = slots[i];
if (bp == NULL)
continue;
perf_bp_event(bp, regs);
csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
update_bp_registers(regs, 0, 0);
}
}
update_bp_registers(regs, 0, 0);
}
NOKPROBE_SYMBOL(breakpoint_handler);
@ -488,12 +499,15 @@ void watchpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
wp = slots[i];
if (wp == NULL)
continue;
perf_bp_event(wp, regs);
if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
wp = slots[i];
if (wp == NULL)
continue;
perf_bp_event(wp, regs);
csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
update_bp_registers(regs, 0, 1);
}
}
update_bp_registers(regs, 0, 1);
}
NOKPROBE_SYMBOL(watchpoint_handler);

View File

@ -494,28 +494,14 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
int err, len, type, offset;
int err, len, type;
err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
err = arch_bp_generic_fields(ctrl, &len, &type);
if (err)
return err;
switch (note_type) {
case NT_LOONGARCH_HW_BREAK:
if ((type & HW_BREAKPOINT_X) != type)
return -EINVAL;
break;
case NT_LOONGARCH_HW_WATCH:
if ((type & HW_BREAKPOINT_RW) != type)
return -EINVAL;
break;
default:
return -EINVAL;
}
attr->bp_len = len;
attr->bp_type = type;
attr->bp_addr += offset;
return 0;
}
@ -609,10 +595,27 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
return PTR_ERR(bp);
attr = bp->attr;
decode_ctrl_reg(uctrl, &ctrl);
err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
if (err)
return err;
switch (note_type) {
case NT_LOONGARCH_HW_BREAK:
ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
break;
case NT_LOONGARCH_HW_WATCH:
decode_ctrl_reg(uctrl, &ctrl);
break;
default:
return -EINVAL;
}
if (uctrl & CTRL_PLV_ENABLE) {
err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
if (err)
return err;
attr.disabled = 0;
} else {
attr.disabled = 1;
}
return modify_user_hw_breakpoint(bp, &attr);
}
@ -643,6 +646,10 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
struct perf_event *bp;
struct perf_event_attr attr;
/* Kernel-space address cannot be monitored by user-space */
if ((unsigned long)addr >= XKPRANGE)
return -EINVAL;
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);

View File

@ -282,7 +282,7 @@ static void __init fdt_setup(void)
return;
/* Prefer to use built-in dtb, checking its legality first. */
if (!fdt_check_header(__dtb_start))
if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
fdt_pointer = __dtb_start;
else
fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
@ -351,10 +351,8 @@ void __init platform_init(void)
arch_reserve_vmcore();
arch_reserve_crashkernel();
#ifdef CONFIG_ACPI_TABLE_UPGRADE
acpi_table_upgrade();
#endif
#ifdef CONFIG_ACPI
acpi_table_upgrade();
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
#endif

View File

@ -273,7 +273,6 @@ static void __init fdt_smp_setup(void)
if (cpuid == loongson_sysconf.boot_cpu_id) {
cpu = 0;
numa_add_cpu(cpu);
} else {
cpu = cpumask_next_zero(-1, cpu_present_mask);
}
@ -283,6 +282,9 @@ static void __init fdt_smp_setup(void)
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
early_numa_add_cpu(cpu, 0);
set_cpuid_to_node(cpuid, 0);
}
loongson_sysconf.nr_cpus = num_processors;
@ -468,6 +470,7 @@ void smp_prepare_boot_cpu(void)
set_cpu_possible(0, true);
set_cpu_online(0, true);
set_my_cpu_offset(per_cpu_offset(0));
numa_add_cpu(0);
rr_node = first_node(node_online_map);
for_each_possible_cpu(cpu) {

View File

@ -6,6 +6,7 @@
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */
/*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
@ -142,10 +143,11 @@ SECTIONS
#ifdef CONFIG_EFI_STUB
/* header symbols */
_kernel_asize = _end - _text;
_kernel_fsize = _edata - _text;
_kernel_vsize = _end - __initdata_begin;
_kernel_rsize = _edata - __initdata_begin;
_kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK);
_kernel_asize = ABSOLUTE(_end - _text);
_kernel_fsize = ABSOLUTE(_edata - _text);
_kernel_vsize = ABSOLUTE(_end - __initdata_begin);
_kernel_rsize = ABSOLUTE(_edata - __initdata_begin);
#endif
.gptab.sdata : {

View File

@ -761,7 +761,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
default:
ret = KVM_HCALL_INVALID_CODE;
break;
};
}
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}

View File

@ -110,7 +110,8 @@ static void bcm6358_quirks(void)
* RAC flush causes kernel panics on BCM6358 when booting from TP1
* because the bootloader is not initializing it properly.
*/
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
!!BMIPS_GET_CBR();
}
static void bcm6368_quirks(void)

View File

@ -322,7 +322,7 @@ static inline void ehb(void)
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
_ASM_SET_MFTC0 \
" mftc0 $1, " #rt ", " #sel " \n" \
" mftc0 %0, " #rt ", " #sel " \n" \
_ASM_UNSET_MFTC0 \
" .set pop \n" \
: "=r" (__res)); \

View File

@ -27,7 +27,7 @@
17 o32 break sys_ni_syscall
# 18 was sys_stat
18 o32 unused18 sys_ni_syscall
19 o32 lseek sys_lseek
19 o32 lseek sys_lseek compat_sys_lseek
20 o32 getpid sys_getpid
21 o32 mount sys_mount
22 o32 umount sys_oldumount

View File

@ -112,8 +112,8 @@ static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
* gives them time to settle
*/
if (where == PCI_VENDOR_ID) {
if (ret == 0xffffffff || ret == 0x00000000 ||
ret == 0x0000ffff || ret == 0xffff0000) {
if (*val == 0xffffffff || *val == 0x00000000 ||
*val == 0x0000ffff || *val == 0xffff0000) {
if (delay > 4)
return 0;
delay *= 2;

View File

@ -31,18 +31,17 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
void flush_kernel_dcache_page_addr(const void *addr);
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
/* The only way to flush a vmap range is to flush whole cache */
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
void flush_cache_vmap(unsigned long start, unsigned long end);
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
void flush_cache_vunmap(unsigned long start, unsigned long end);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
/* defined in pacache.S exported in cache.c used by flush_anon_page */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
#define ARCH_HAS_FLUSH_ON_KUNMAP
static inline void kunmap_flush_on_unmap(const void *addr)
{
flush_kernel_dcache_page_addr(addr);
}
void kunmap_flush_on_unmap(const void *addr);
#endif /* _PARISC_CACHEFLUSH_H */

View File

@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
static inline pte_t ptep_get(pte_t *ptep)
{
return READ_ONCE(*ptep);
}
#define ptep_get ptep_get
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
if (!pte_young(*ptep))
return 0;
pte = *ptep;
pte = ptep_get(ptep);
if (!pte_young(pte)) {
return 0;
}
@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
return 1;
}
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
old_pte = *ptep;
set_pte(ptep, __pte(0));
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
set_pte(ptep, pte_wrprotect(*ptep));
@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME

View File

@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
#include <linux/vmalloc.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@ -31,20 +32,31 @@
#include <asm/mmu_context.h>
#include <asm/cachectl.h>
#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
/*
* When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
* of page flushes done flush_cache_page_if_present. There are some
* pros and cons in using this option. It may increase the risk of
* random segmentation faults.
*/
#define CONFIG_FLUSH_PAGE_ACCESSED 0
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void); /* flushes local code-cache only */
static void flush_kernel_dcache_page_addr(const void *addr);
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We need a spinlock around all TLB flushes to ensure
@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
{
if (!static_branch_likely(&parisc_has_cache))
return;
/*
* The TLB is the engine of coherence on parisc. The CPU is
* entitled to speculate any page with a TLB mapping, so here
* we kill the mapping then flush the page along a special flush
* only alias mapping. This guarantees that the page is no-longer
* in the cache for any process and nor may it be speculatively
* read in (until the user or kernel specifically accesses it,
* of course).
*/
flush_tlb_page(vma, vmaddr);
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
static void flush_kernel_dcache_page_addr(const void *addr)
{
unsigned long flags, space, pgd, prot;
#ifdef CONFIG_TLB_PTLOCK
unsigned long pgd_lock;
#endif
unsigned long vaddr = (unsigned long)addr;
unsigned long flags;
vmaddr &= PAGE_MASK;
/* Purge TLB entry to remove translation on all CPUs */
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
/* Use tmpalias flush to prevent data cache move-in */
preempt_disable();
/* Set context for flush */
local_irq_save(flags);
prot = mfctl(8);
space = mfsp(SR_USER);
pgd = mfctl(25);
#ifdef CONFIG_TLB_PTLOCK
pgd_lock = mfctl(28);
#endif
switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
local_irq_restore(flags);
flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
flush_tlb_page(vma, vmaddr);
/* Restore previous context */
local_irq_save(flags);
#ifdef CONFIG_TLB_PTLOCK
mtctl(pgd_lock, 28);
#endif
mtctl(pgd, 25);
mtsp(space, SR_USER);
mtctl(prot, 8);
local_irq_restore(flags);
flush_dcache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
static void flush_kernel_icache_page_addr(const void *addr)
{
unsigned long vaddr = (unsigned long)addr;
unsigned long flags;
/* Purge TLB entry to remove translation on all CPUs */
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
/* Use tmpalias flush to prevent instruction cache move-in */
preempt_disable();
flush_icache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
void kunmap_flush_on_unmap(const void *addr)
{
flush_kernel_dcache_page_addr(addr);
}
EXPORT_SYMBOL(kunmap_flush_on_unmap);
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr)
{
@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
for (;;) {
flush_kernel_dcache_page_addr(kaddr);
flush_kernel_icache_page(kaddr);
flush_kernel_icache_page_addr(kaddr);
if (--nr == 0)
break;
kaddr += PAGE_SIZE;
}
}
/*
* Walk page directory for MM to find PTEP pointer for address ADDR.
*/
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte)
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
/*
* Return user physical address. Returns 0 if page is not present.
*/
static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags, space, pgd, prot, pa;
#ifdef CONFIG_TLB_PTLOCK
unsigned long pgd_lock;
#endif
/* Save context */
local_irq_save(flags);
prot = mfctl(8);
space = mfsp(SR_USER);
pgd = mfctl(25);
#ifdef CONFIG_TLB_PTLOCK
pgd_lock = mfctl(28);
#endif
/* Set context for lpa_user */
switch_mm_irqs_off(NULL, mm, NULL);
pa = lpa_user(addr);
/* Restore previous context */
#ifdef CONFIG_TLB_PTLOCK
mtctl(pgd_lock, 28);
#endif
mtctl(pgd, 25);
mtsp(space, SR_USER);
mtctl(prot, 8);
local_irq_restore(flags);
return pa;
}
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping = folio_flush_mapping(folio);
@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio)
if (addr + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - addr) / PAGE_SIZE;
if (parisc_requires_coherency()) {
for (i = 0; i < nr; i++) {
pte_t *ptep = get_ptep(vma->vm_mm,
addr + i * PAGE_SIZE);
if (!ptep)
continue;
if (pte_needs_flush(*ptep))
flush_user_cache_page(vma,
addr + i * PAGE_SIZE);
/* Optimise accesses to the same table? */
pte_unmap(ptep);
}
} else {
/*
* The TLB is the engine of coherence on parisc:
* The CPU is entitled to speculate any page
* with a TLB mapping, so here we kill the
* mapping then flush the page along a special
* flush only alias mapping. This guarantees that
* the page is no-longer in the cache for any
* process and nor may it be speculatively read
* in (until the user or kernel specifically
* accesses it, of course)
*/
for (i = 0; i < nr; i++)
flush_tlb_page(vma, addr + i * PAGE_SIZE);
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
!= (addr & (SHM_COLOUR - 1))) {
for (i = 0; i < nr; i++)
__flush_cache_page(vma,
addr + i * PAGE_SIZE,
(pfn + i) * PAGE_SIZE);
/*
* Software is allowed to have any number
* of private mappings to a page.
*/
if (!(vma->vm_flags & VM_SHARED))
continue;
if (old_addr)
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
old_addr, addr, vma->vm_file);
if (nr == folio_nr_pages(folio))
old_addr = addr;
}
for (i = 0; i < nr; i++)
__flush_cache_page(vma,
addr + i * PAGE_SIZE,
(pfn + i) * PAGE_SIZE);
/*
* Software is allowed to have any number
* of private mappings to a page.
*/
if (!(vma->vm_flags & VM_SHARED))
continue;
if (old_addr)
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
old_addr, addr, vma->vm_file);
if (nr == folio_nr_pages(folio))
old_addr = addr;
}
WARN_ON(++count == 4096);
}
@ -591,35 +624,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
void flush_kernel_dcache_page_addr(const void *addr)
{
unsigned long flags;
flush_kernel_dcache_page_asm(addr);
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
static void flush_cache_page_if_present(struct vm_area_struct *vma,
unsigned long vmaddr, unsigned long pfn)
unsigned long vmaddr)
{
#if CONFIG_FLUSH_PAGE_ACCESSED
bool needs_flush = false;
pte_t *ptep;
pte_t *ptep, pte;
/*
* The pte check is racy and sometimes the flush will trigger
* a non-access TLB miss. Hopefully, the page has already been
* flushed.
*/
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pte = ptep_get(ptep);
needs_flush = pte_needs_flush(pte);
pte_unmap(ptep);
}
if (needs_flush)
flush_cache_page(vma, vmaddr, pfn);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
#else
struct mm_struct *mm = vma->vm_mm;
unsigned long physaddr = get_upa(mm, vmaddr);
if (physaddr)
__flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
#endif
}
void copy_user_highpage(struct page *to, struct page *from,
@ -629,7 +655,7 @@ void copy_user_highpage(struct page *to, struct page *from,
kfrom = kmap_local_page(from);
kto = kmap_local_page(to);
flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
__flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
copy_page_asm(kto, kfrom);
kunmap_local(kto);
kunmap_local(kfrom);
@ -638,16 +664,17 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
}
/* __flush_tlb_range()
@ -681,32 +708,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
unsigned long addr, pfn;
pte_t *ptep;
unsigned long addr;
for (addr = start; addr < end; addr += PAGE_SIZE) {
bool needs_flush = false;
/*
* The vma can contain pages that aren't present. Although
* the pte search is expensive, we need the pte to find the
* page pfn and to check whether the page should be flushed.
*/
ptep = get_ptep(vma->vm_mm, addr);
if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pfn = pte_pfn(*ptep);
pte_unmap(ptep);
}
if (needs_flush) {
if (parisc_requires_coherency()) {
flush_user_cache_page(vma, addr);
} else {
if (WARN_ON(!pfn_valid(pfn)))
return;
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
for (addr = start; addr < end; addr += PAGE_SIZE)
flush_cache_page_if_present(vma, addr);
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
@ -757,21 +762,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_range(vma, start, end);
flush_cache_all();
if (vma->vm_flags & VM_EXEC)
flush_cache_all();
else
flush_data_cache();
return;
}
flush_cache_pages(vma, start, end);
flush_cache_pages(vma, start & PAGE_MASK, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
if (WARN_ON(!pfn_valid(pfn)))
return;
if (parisc_requires_coherency())
flush_user_cache_page(vma, vmaddr);
else
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@ -779,34 +782,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
if (!PageAnon(page))
return;
if (parisc_requires_coherency()) {
if (vma->vm_flags & VM_SHARED)
flush_data_cache();
else
flush_user_cache_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
}
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
pte_t pte = ptep_get(ptep);
if (!pte_young(pte))
return 0;
set_pte(ptep, pte_mkold(pte));
#if CONFIG_FLUSH_PAGE_ACCESSED
__flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
#endif
return 1;
}
/*
* After a PTE is cleared, we have no way to flush the cache for
* the physical page. On PA8800 and PA8900 processors, these lines
* can cause random cache corruption. Thus, we must flush the cache
* as well as the TLB when clearing a PTE that's valid.
*/
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
struct mm_struct *mm = (vma)->vm_mm;
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn))
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
else if (pte_accessible(mm, pte))
flush_tlb_page(vma, addr);
return pte;
}
/*
* The physical address for pages in the ioremap case can be obtained
* from the vm_struct struct. I wasn't able to successfully handle the
* vmalloc and vmap cases. We have an array of struct page pointers in
* the uninitialized vmalloc case but the flush failed using page_to_pfn.
*/
void flush_cache_vmap(unsigned long start, unsigned long end)
{
unsigned long addr, physaddr;
struct vm_struct *vm;
/* Prevent cache move-in */
flush_tlb_kernel_range(start, end);
if (end - start >= parisc_cache_flush_threshold) {
flush_cache_all();
return;
}
flush_tlb_page(vma, vmaddr);
preempt_disable();
flush_dcache_page_asm(page_to_phys(page), vmaddr);
preempt_enable();
}
if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
flush_cache_all();
return;
}
vm = find_vm_area((void *)start);
if (WARN_ON_ONCE(!vm)) {
flush_cache_all();
return;
}
/* The physical addresses of IOREMAP regions are contiguous */
if (vm->flags & VM_IOREMAP) {
physaddr = vm->phys_addr;
for (addr = start; addr < end; addr += PAGE_SIZE) {
preempt_disable();
flush_dcache_page_asm(physaddr, start);
flush_icache_page_asm(physaddr, start);
preempt_enable();
physaddr += PAGE_SIZE;
}
return;
}
flush_cache_all();
}
EXPORT_SYMBOL(flush_cache_vmap);
/*
* The vm_struct has been retired and the page table is set up. The
* last page in the range is a guard page. Its physical address can't
* be determined using lpa, so there is no way to flush the range
* using flush_dcache_page_asm.
*/
void flush_cache_vunmap(unsigned long start, unsigned long end)
{
/* Prevent cache move-in */
flush_tlb_kernel_range(start, end);
flush_data_cache();
}
EXPORT_SYMBOL(flush_cache_vunmap);
/*
* On systems with PA8800/PA8900 processors, there is no way to flush
* a vmap range other than using the architected loop to flush the
* entire cache. The page directory is not set up, so we can't use
* fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
* L2 is physically indexed but FDCE/FICE instructions in virtual
* mode output their virtual address on the core bus, not their
* real address. As a result, the L2 cache index formed from the
* virtual address will most likely not be the same as the L2 index
* formed from the real address.
*/
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache();
flush_tlb_kernel_range(start, end);
if (!static_branch_likely(&parisc_has_dcache))
return;
/* If interrupts are disabled, we can only do local flush */
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
flush_data_cache_local(NULL);
return;
}
flush_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
flush_data_cache();
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
@ -818,15 +920,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
/* Ensure DMA is complete */
asm_syncdma();
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache();
flush_tlb_kernel_range(start, end);
if (!static_branch_likely(&parisc_has_dcache))
return;
/* If interrupts are disabled, we can only do local flush */
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
flush_data_cache_local(NULL);
return;
}
purge_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
flush_data_cache();
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);

View File

@ -137,7 +137,7 @@ config PPC
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_HUGEPD if HUGETLB_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC_FPU
select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC64 && PPC_FPU
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
aesp10-ppc.S
aesp8-ppc.S
ghashp10-ppc.S
ghashp8-ppc.S

View File

@ -92,9 +92,25 @@ __pu_failed: \
: label)
#endif
#ifdef CONFIG_CC_IS_CLANG
#define DS_FORM_CONSTRAINT "Z<>"
#else
#define DS_FORM_CONSTRAINT "YZ<>"
#endif
#ifdef __powerpc64__
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
__put_user_asm_goto(x, ptr, label, "std")
#else
#define __put_user_asm2_goto(x, addr, label) \
asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
EX_TABLE(1b, %l2) \
: \
: "r" (x), DS_FORM_CONSTRAINT (*addr) \
: \
: label)
#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
asm goto( \
@ -165,8 +181,19 @@ do { \
#endif
#ifdef __powerpc64__
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __get_user_asm2_goto(x, addr, label) \
__get_user_asm_goto(x, addr, label, "ld")
#else
#define __get_user_asm2_goto(x, addr, label) \
asm_goto_output( \
"1: ld%U1%X1 %0, %1 # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
: DS_FORM_CONSTRAINT (*addr) \
: \
: label)
#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __get_user_asm2_goto(x, addr, label) \
asm_goto_output( \

Some files were not shown because too many files have changed in this diff Show More