mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-10 07:10:27 +00:00
Merge branches 'edac-drivers', 'edac-amd64' and 'edac-misc' into edac-updates
Combine all queued EDAC changes for submission into v6.4: * ras/edac-drivers: EDAC/i10nm: Add Intel Sierra Forest server support EDAC/skx: Fix overflows on the DRAM row address mapping arrays * ras/edac-amd64: (27 commits) EDAC/amd64: Fix indentation in umc_determine_edac_cap() EDAC/amd64: Add get_err_info() to pvt->ops EDAC/amd64: Split dump_misc_regs() into dct/umc functions EDAC/amd64: Split init_csrows() into dct/umc functions EDAC/amd64: Split determine_edac_cap() into dct/umc functions EDAC/amd64: Rename f17h_determine_edac_ctl_cap() EDAC/amd64: Split setup_mci_misc_attrs() into dct/umc functions EDAC/amd64: Split ecc_enabled() into dct/umc functions EDAC/amd64: Split read_mc_regs() into dct/umc functions EDAC/amd64: Split determine_memory_type() into dct/umc functions EDAC/amd64: Split read_base_mask() into dct/umc functions EDAC/amd64: Split prep_chip_selects() into dct/umc functions EDAC/amd64: Rework hw_info_{get,put} EDAC/amd64: Merge struct amd64_family_type into struct amd64_pvt EDAC/amd64: Do not discover ECC symbol size for Family 17h and later EDAC/amd64: Drop dbam_to_cs() for Family 17h and later EDAC/amd64: Split get_csrow_nr_pages() into dct/umc functions EDAC/amd64: Rename debug_display_dimm_sizes() * ras/edac-misc: EDAC/altera: Remove MODULE_LICENSE in non-module EDAC: Sanitize MODULE_AUTHOR strings EDAC/amd81[13]1: Remove trailing newline from MODULE_AUTHOR EDAC/i5100: Fix typo in comment EDAC/altera: Remove redundant error logging Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
This commit is contained in:
commit
ce8ac91130
1
.gitignore
vendored
1
.gitignore
vendored
@ -78,6 +78,7 @@ modules.order
|
||||
# RPM spec file (make rpm-pkg)
|
||||
#
|
||||
/*.spec
|
||||
/rpmbuild/
|
||||
|
||||
#
|
||||
# Debian directory (make deb-pkg)
|
||||
|
7
.mailmap
7
.mailmap
@ -28,6 +28,7 @@ Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
|
||||
Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com>
|
||||
Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
|
||||
Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
|
||||
Alexandre Ghiti <alex@ghiti.fr> <alexandre.ghiti@canonical.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
|
||||
@ -121,7 +122,7 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
|
||||
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
|
||||
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
|
||||
<dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
|
||||
Dikshita Agarwal <dikshita@qti.qualcomm.com> <dikshita@codeaurora.org>
|
||||
Dikshita Agarwal <quic_dikshita@quicinc.com> <dikshita@codeaurora.org>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]>
|
||||
Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com>
|
||||
@ -194,6 +195,7 @@ Jan Glauber <jan.glauber@gmail.com> <jang@linux.vnet.ibm.com>
|
||||
Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
|
||||
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
|
||||
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
|
||||
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@tuni.fi>
|
||||
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
|
||||
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
|
||||
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
|
||||
@ -213,6 +215,9 @@ Jens Axboe <axboe@suse.de>
|
||||
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
|
||||
Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
|
||||
Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org>
|
||||
Jiri Pirko <jiri@resnulli.us> <jiri@nvidia.com>
|
||||
Jiri Pirko <jiri@resnulli.us> <jiri@mellanox.com>
|
||||
Jiri Pirko <jiri@resnulli.us> <jpirko@redhat.com>
|
||||
Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
|
||||
Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
|
||||
Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
|
||||
|
@ -242,7 +242,7 @@ group and can access them as follows::
|
||||
VFIO User API
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
Please see include/linux/vfio.h for complete API documentation.
|
||||
Please see include/uapi/linux/vfio.h for complete API documentation.
|
||||
|
||||
VFIO bus driver API
|
||||
-------------------------------------------------------------------------------
|
||||
|
@ -1222,7 +1222,7 @@ defined:
|
||||
return
|
||||
-ECHILD and it will be called again in ref-walk mode.
|
||||
|
||||
``_weak_revalidate``
|
||||
``d_weak_revalidate``
|
||||
called when the VFS needs to revalidate a "jumped" dentry. This
|
||||
is called when a path-walk ends at dentry that was not acquired
|
||||
by doing a lookup in the parent directory. This includes "/",
|
||||
|
@ -19,7 +19,7 @@ possible we decided to do following:
|
||||
platform devices.
|
||||
|
||||
- Devices behind real busses where there is a connector resource
|
||||
are represented as struct spi_device or struct i2c_device. Note
|
||||
are represented as struct spi_device or struct i2c_client. Note
|
||||
that standard UARTs are not busses so there is no struct uart_device,
|
||||
although some of them may be represented by struct serdev_device.
|
||||
|
||||
|
@ -213,11 +213,7 @@ point rather than some random spot. If your upstream-bound branch has
|
||||
emptied entirely into the mainline during the merge window, you can pull it
|
||||
forward with a command like::
|
||||
|
||||
git merge v5.2-rc1^0
|
||||
|
||||
The "^0" will cause Git to do a fast-forward merge (which should be
|
||||
possible in this situation), thus avoiding the addition of a spurious merge
|
||||
commit.
|
||||
git merge --ff-only v5.2-rc1
|
||||
|
||||
The guidelines laid out above are just that: guidelines. There will always
|
||||
be situations that call out for a different solution, and these guidelines
|
||||
|
@ -5,10 +5,10 @@ Hugetlbfs Reservation
|
||||
Overview
|
||||
========
|
||||
|
||||
Huge pages as described at Documentation/mm/hugetlbpage.rst are typically
|
||||
preallocated for application use. These huge pages are instantiated in a
|
||||
task's address space at page fault time if the VMA indicates huge pages are
|
||||
to be used. If no huge page exists at page fault time, the task is sent
|
||||
Huge pages as described at Documentation/admin-guide/mm/hugetlbpage.rst are
|
||||
typically preallocated for application use. These huge pages are instantiated
|
||||
in a task's address space at page fault time if the VMA indicates huge pages
|
||||
are to be used. If no huge page exists at page fault time, the task is sent
|
||||
a SIGBUS and often dies an unhappy death. Shortly after huge page support
|
||||
was added, it was determined that it would be better to detect a shortage
|
||||
of huge pages at mmap() time. The idea is that if there were not enough
|
||||
|
@ -66,7 +66,7 @@ one of the types described below.
|
||||
also populated on boot using one of ``kernelcore``, ``movablecore`` and
|
||||
``movable_node`` kernel command line parameters. See
|
||||
Documentation/mm/page_migration.rst and
|
||||
Documentation/admin-guide/mm/memory_hotplug.rst for additional details.
|
||||
Documentation/admin-guide/mm/memory-hotplug.rst for additional details.
|
||||
|
||||
* ``ZONE_DEVICE`` represents memory residing on devices such as PMEM and GPU.
|
||||
It has different characteristics than RAM zone types and it exists to provide
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://kernel.org/schemas/netlink/genetlink-c.yaml#
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
|
||||
|
||||
name: ethtool
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
|
||||
|
||||
name: fou
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
|
||||
|
||||
name: netdev
|
||||
|
||||
@ -9,6 +9,7 @@ definitions:
|
||||
-
|
||||
type: flags
|
||||
name: xdp-act
|
||||
render-max: true
|
||||
entries:
|
||||
-
|
||||
name: basic
|
||||
|
@ -12,10 +12,6 @@ under ``-std=gnu11`` [gcc-c-dialect-options]_: the GNU dialect of ISO C11.
|
||||
This dialect contains many extensions to the language [gnu-extensions]_,
|
||||
and many of them are used within the kernel as a matter of course.
|
||||
|
||||
There is some support for compiling the kernel with ``icc`` [icc]_ for several
|
||||
of the architectures, although at the time of writing it is not completed,
|
||||
requiring third-party patches.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
|
||||
@ -35,12 +31,28 @@ in order to feature detect which ones can be used and/or to shorten the code.
|
||||
|
||||
Please refer to ``include/linux/compiler_attributes.h`` for more information.
|
||||
|
||||
Rust
|
||||
----
|
||||
|
||||
The kernel has experimental support for the Rust programming language
|
||||
[rust-language]_ under ``CONFIG_RUST``. It is compiled with ``rustc`` [rustc]_
|
||||
under ``--edition=2021`` [rust-editions]_. Editions are a way to introduce
|
||||
small changes to the language that are not backwards compatible.
|
||||
|
||||
On top of that, some unstable features [rust-unstable-features]_ are used in
|
||||
the kernel. Unstable features may change in the future, thus it is an important
|
||||
goal to reach a point where only stable features are used.
|
||||
|
||||
Please refer to Documentation/rust/index.rst for more information.
|
||||
|
||||
.. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards
|
||||
.. [gcc] https://gcc.gnu.org
|
||||
.. [clang] https://clang.llvm.org
|
||||
.. [icc] https://software.intel.com/en-us/c-compilers
|
||||
.. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html
|
||||
.. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html
|
||||
.. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html
|
||||
.. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf
|
||||
|
||||
.. [rust-language] https://www.rust-lang.org
|
||||
.. [rustc] https://doc.rust-lang.org/rustc/
|
||||
.. [rust-editions] https://doc.rust-lang.org/edition-guide/editions/
|
||||
.. [rust-unstable-features] https://github.com/Rust-for-Linux/linux/issues/2
|
||||
|
@ -320,7 +320,7 @@ for their time. Code review is a tiring and time-consuming process, and
|
||||
reviewers sometimes get grumpy. Even in that case, though, respond
|
||||
politely and address the problems they have pointed out. When sending a next
|
||||
version, add a ``patch changelog`` to the cover letter or to individual patches
|
||||
explaining difference aganst previous submission (see
|
||||
explaining difference against previous submission (see
|
||||
:ref:`the_canonical_patch_format`).
|
||||
|
||||
See Documentation/process/email-clients.rst for recommendations on email
|
||||
|
@ -258,7 +258,7 @@ Linux cannot currently figure out CPU capacity on its own, this information thus
|
||||
needs to be handed to it. Architectures must define arch_scale_cpu_capacity()
|
||||
for that purpose.
|
||||
|
||||
The arm and arm64 architectures directly map this to the arch_topology driver
|
||||
The arm, arm64, and RISC-V architectures directly map this to the arch_topology driver
|
||||
CPU scaling data, which is derived from the capacity-dmips-mhz CPU binding; see
|
||||
Documentation/devicetree/bindings/cpu/cpu-capacity.txt.
|
||||
|
||||
|
@ -15,7 +15,8 @@ Hugetlbfs 预留
|
||||
概述
|
||||
====
|
||||
|
||||
Documentation/mm/hugetlbpage.rst 中描述的巨页通常是预先分配给应用程序使用的。如果VMA指
|
||||
Documentation/admin-guide/mm/hugetlbpage.rst
|
||||
中描述的巨页通常是预先分配给应用程序使用的 。如果VMA指
|
||||
示要使用巨页,这些巨页会在缺页异常时被实例化到任务的地址空间。如果在缺页异常
|
||||
时没有巨页存在,任务就会被发送一个SIGBUS,并经常不高兴地死去。在加入巨页支
|
||||
持后不久,人们决定,在mmap()时检测巨页的短缺情况会更好。这个想法是,如果
|
||||
|
@ -231,7 +231,7 @@ CFS调度类基于实体负载跟踪机制(Per-Entity Load Tracking, PELT)
|
||||
当前,Linux无法凭自身算出CPU算力,因此必须要有把这个信息传递给Linux的方式。每个架构必须为此
|
||||
定义arch_scale_cpu_capacity()函数。
|
||||
|
||||
arm和arm64架构直接把这个信息映射到arch_topology驱动的CPU scaling数据中(译注:参考
|
||||
arm、arm64和RISC-V架构直接把这个信息映射到arch_topology驱动的CPU scaling数据中(译注:参考
|
||||
arch_topology.h的percpu变量cpu_scale),它是从capacity-dmips-mhz CPU binding中衍生计算
|
||||
出来的。参见Documentation/devicetree/bindings/cpu/cpu-capacity.txt。
|
||||
|
||||
|
@ -24,7 +24,8 @@ YAML specifications can be found under ``Documentation/netlink/specs/``
|
||||
This document describes details of the schema.
|
||||
See :doc:`intro-specs` for a practical starting guide.
|
||||
|
||||
All specs must be licensed under ``GPL-2.0-only OR BSD-3-Clause``
|
||||
All specs must be licensed under
|
||||
``((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)``
|
||||
to allow for easy adoption in user space code.
|
||||
|
||||
Compatibility levels
|
||||
|
20
MAINTAINERS
20
MAINTAINERS
@ -5971,7 +5971,7 @@ F: include/linux/dm-*.h
|
||||
F: include/uapi/linux/dm-*.h
|
||||
|
||||
DEVLINK
|
||||
M: Jiri Pirko <jiri@nvidia.com>
|
||||
M: Jiri Pirko <jiri@resnulli.us>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/networking/devlink
|
||||
@ -14872,12 +14872,12 @@ M: Sagi Grimberg <sagi@grimberg.me>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
S: Supported
|
||||
W: http://git.infradead.org/nvme.git
|
||||
T: git://git.infradead.org/nvme.git
|
||||
T: git git://git.infradead.org/nvme.git
|
||||
F: Documentation/nvme/
|
||||
F: drivers/nvme/host/
|
||||
F: drivers/nvme/common/
|
||||
F: include/linux/nvme.h
|
||||
F: drivers/nvme/host/
|
||||
F: include/linux/nvme-*.h
|
||||
F: include/linux/nvme.h
|
||||
F: include/uapi/linux/nvme_ioctl.h
|
||||
|
||||
NVM EXPRESS FABRICS AUTHENTICATION
|
||||
@ -14912,7 +14912,7 @@ M: Chaitanya Kulkarni <kch@nvidia.com>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
S: Supported
|
||||
W: http://git.infradead.org/nvme.git
|
||||
T: git://git.infradead.org/nvme.git
|
||||
T: git git://git.infradead.org/nvme.git
|
||||
F: drivers/nvme/target/
|
||||
|
||||
NVMEM FRAMEWORK
|
||||
@ -15079,7 +15079,7 @@ F: Documentation/hwmon/nzxt-smart2.rst
|
||||
F: drivers/hwmon/nzxt-smart2.c
|
||||
|
||||
OBJAGG
|
||||
M: Jiri Pirko <jiri@nvidia.com>
|
||||
M: Jiri Pirko <jiri@resnulli.us>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: include/linux/objagg.h
|
||||
@ -15853,7 +15853,7 @@ F: drivers/video/logo/logo_parisc*
|
||||
F: include/linux/hp_sdc.h
|
||||
|
||||
PARMAN
|
||||
M: Jiri Pirko <jiri@nvidia.com>
|
||||
M: Jiri Pirko <jiri@resnulli.us>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: include/linux/parman.h
|
||||
@ -17990,7 +17990,7 @@ F: Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
|
||||
F: Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml
|
||||
F: arch/riscv/boot/dts/microchip/
|
||||
F: drivers/char/hw_random/mpfs-rng.c
|
||||
F: drivers/clk/microchip/clk-mpfs.c
|
||||
F: drivers/clk/microchip/clk-mpfs*.c
|
||||
F: drivers/i2c/busses/i2c-microchip-corei2c.c
|
||||
F: drivers/mailbox/mailbox-mpfs.c
|
||||
F: drivers/pci/controller/pcie-microchip-host.c
|
||||
@ -19150,9 +19150,7 @@ W: http://www.brownhat.org/sis900.html
|
||||
F: drivers/net/ethernet/sis/sis900.*
|
||||
|
||||
SIS FRAMEBUFFER DRIVER
|
||||
M: Thomas Winischhofer <thomas@winischhofer.net>
|
||||
S: Maintained
|
||||
W: http://www.winischhofer.net/linuxsisvga.shtml
|
||||
S: Orphan
|
||||
F: Documentation/fb/sisfb.rst
|
||||
F: drivers/video/fbdev/sis/
|
||||
F: include/video/sisfb.h
|
||||
|
13
Makefile
13
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -274,8 +274,7 @@ no-dot-config-targets := $(clean-targets) \
|
||||
cscope gtags TAGS tags help% %docs check% coccicheck \
|
||||
$(version_h) headers headers_% archheaders archscripts \
|
||||
%asm-generic kernelversion %src-pkg dt_binding_check \
|
||||
outputmakefile rustavailable rustfmt rustfmtcheck \
|
||||
scripts_package
|
||||
outputmakefile rustavailable rustfmt rustfmtcheck
|
||||
# Installation targets should not require compiler. Unfortunately, vdso_install
|
||||
# is an exception where build artifacts may be updated. This must be fixed.
|
||||
no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
|
||||
@ -1605,7 +1604,7 @@ MRPROPER_FILES += include/config include/generated \
|
||||
certs/signing_key.pem \
|
||||
certs/x509.genkey \
|
||||
vmlinux-gdb.py \
|
||||
*.spec \
|
||||
*.spec rpmbuild \
|
||||
rust/libmacros.so
|
||||
|
||||
# clean - Delete most, but leave enough to build external modules
|
||||
@ -1656,10 +1655,6 @@ distclean: mrproper
|
||||
%pkg: include/config/kernel.release FORCE
|
||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.package $@
|
||||
|
||||
PHONY += scripts_package
|
||||
scripts_package: scripts_basic
|
||||
$(Q)$(MAKE) $(build)=scripts scripts/list-gitignored
|
||||
|
||||
# Brief documentation of the typical targets used
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@ -1886,6 +1881,8 @@ endif
|
||||
|
||||
else # KBUILD_EXTMOD
|
||||
|
||||
filechk_kernel.release = echo $(KERNELRELEASE)
|
||||
|
||||
###
|
||||
# External module support.
|
||||
# When building external modules the kernel used as basis is considered
|
||||
|
@ -193,6 +193,9 @@ struct kvm_arch {
|
||||
/* Interrupt controller */
|
||||
struct vgic_dist vgic;
|
||||
|
||||
/* Timers */
|
||||
struct arch_timer_vm_data timer_data;
|
||||
|
||||
/* Mandated version of PSCI */
|
||||
u32 psci_version;
|
||||
|
||||
|
@ -84,14 +84,10 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
|
||||
|
||||
static u64 timer_get_offset(struct arch_timer_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
if (ctxt->offset.vm_offset)
|
||||
return *ctxt->offset.vm_offset;
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
||||
@ -128,15 +124,12 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
||||
|
||||
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
|
||||
break;
|
||||
default:
|
||||
if (!ctxt->offset.vm_offset) {
|
||||
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
|
||||
return;
|
||||
}
|
||||
|
||||
WRITE_ONCE(*ctxt->offset.vm_offset, offset);
|
||||
}
|
||||
|
||||
u64 kvm_phys_timer_read(void)
|
||||
@ -765,25 +758,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Make the updates of cntvoff for all vtimer contexts atomic */
|
||||
static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
|
||||
{
|
||||
unsigned long i;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_vcpu *tmp;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_for_each_vcpu(i, tmp, kvm)
|
||||
timer_set_offset(vcpu_vtimer(tmp), cntvoff);
|
||||
|
||||
/*
|
||||
* When called from the vcpu create path, the CPU being created is not
|
||||
* included in the loop above, so we just set it here as well.
|
||||
*/
|
||||
timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
@ -791,10 +765,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
|
||||
|
||||
vtimer->vcpu = vcpu;
|
||||
vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset;
|
||||
ptimer->vcpu = vcpu;
|
||||
|
||||
/* Synchronize cntvoff across all vtimers of a VM. */
|
||||
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
|
||||
timer_set_offset(vtimer, kvm_phys_timer_read());
|
||||
timer_set_offset(ptimer, 0);
|
||||
|
||||
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
|
||||
@ -840,7 +815,7 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
|
||||
timer_set_offset(timer, kvm_phys_timer_read() - value);
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
|
@ -44,7 +44,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
|
||||
feature = smccc_get_arg1(vcpu);
|
||||
switch (feature) {
|
||||
case KVM_PTP_VIRT_COUNTER:
|
||||
cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2);
|
||||
cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
|
||||
break;
|
||||
case KVM_PTP_PHYS_COUNTER:
|
||||
cycles = systime_snapshot.cycles;
|
||||
|
@ -271,11 +271,16 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for a read fault. This could be caused by a read on an
|
||||
* inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page.
|
||||
* VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
|
||||
* defined in protection_map[]. Read faults can only be caused by
|
||||
* a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
|
||||
*/
|
||||
if (unlikely(!(vma->vm_flags & VM_READ)))
|
||||
if (unlikely(!vma_is_accessible(vma)))
|
||||
return true;
|
||||
|
||||
if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We should ideally do the vma pkey access check here. But in the
|
||||
* fault path, handle_mm_fault() also does the same check. To avoid
|
||||
|
@ -7,6 +7,7 @@ config PPC_PSERIES
|
||||
select OF_DYNAMIC
|
||||
select FORCE_PCI
|
||||
select PCI_MSI
|
||||
select GENERIC_ALLOCATOR
|
||||
select PPC_XICS
|
||||
select PPC_XIVE_SPAPR
|
||||
select PPC_ICP_NATIVE
|
||||
|
@ -19,8 +19,6 @@ typedef struct {
|
||||
#ifdef CONFIG_SMP
|
||||
/* A local icache flush is needed before user execution can resume. */
|
||||
cpumask_t icache_stale_mask;
|
||||
/* A local tlb flush is needed before user execution can resume. */
|
||||
cpumask_t tlb_stale_mask;
|
||||
#endif
|
||||
} mm_context_t;
|
||||
|
||||
|
@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
|
||||
{
|
||||
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_all_asid(unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma x0, %0"
|
||||
:
|
||||
: "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_page_asid(unsigned long addr,
|
||||
unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma %0, %1"
|
||||
:
|
||||
: "r" (addr), "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
#define local_flush_tlb_all() do { } while (0)
|
||||
#define local_flush_tlb_page(addr) do { } while (0)
|
||||
|
@ -196,16 +196,6 @@ switch_mm_fast:
|
||||
|
||||
if (need_flush_tlb)
|
||||
local_flush_tlb_all();
|
||||
#ifdef CONFIG_SMP
|
||||
else {
|
||||
cpumask_t *mask = &mm->context.tlb_stale_mask;
|
||||
|
||||
if (cpumask_test_cpu(cpu, mask)) {
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
local_flush_tlb_all_asid(cntx & asid_mask);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void set_mm_noasid(struct mm_struct *mm)
|
||||
@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
|
||||
static inline void set_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next, unsigned int cpu)
|
||||
{
|
||||
if (static_branch_unlikely(&use_asid_allocator))
|
||||
set_mm_asid(mm, cpu);
|
||||
else
|
||||
set_mm_noasid(mm);
|
||||
/*
|
||||
* The mm_cpumask indicates which harts' TLBs contain the virtual
|
||||
* address mapping of the mm. Compared to noasid, using asid
|
||||
* can't guarantee that stale TLB entries are invalidated because
|
||||
* the asid mechanism wouldn't flush TLB for every switch_mm for
|
||||
* performance. So when using asid, keep all CPUs footmarks in
|
||||
* cpumask() until mm reset.
|
||||
*/
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
if (static_branch_unlikely(&use_asid_allocator)) {
|
||||
set_mm_asid(next, cpu);
|
||||
} else {
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
set_mm_noasid(next);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init asids_init(void)
|
||||
@ -274,7 +276,8 @@ static int __init asids_init(void)
|
||||
}
|
||||
early_initcall(asids_init);
|
||||
#else
|
||||
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
|
||||
static inline void set_mm(struct mm_struct *prev,
|
||||
struct mm_struct *next, unsigned int cpu)
|
||||
{
|
||||
/* Nothing to do here when there is no MMU */
|
||||
}
|
||||
@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
*/
|
||||
cpu = smp_processor_id();
|
||||
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
set_mm(next, cpu);
|
||||
set_mm(prev, next, cpu);
|
||||
|
||||
flush_icache_deferred(next, cpu);
|
||||
}
|
||||
|
@ -143,6 +143,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
if (pud_leaf(*pud_k))
|
||||
goto flush_tlb;
|
||||
|
||||
/*
|
||||
* Since the vmalloc area is global, it is unnecessary
|
||||
@ -153,6 +155,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
if (pmd_leaf(*pmd_k))
|
||||
goto flush_tlb;
|
||||
|
||||
/*
|
||||
* Make sure the actual PTE exists as well to
|
||||
@ -172,6 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
||||
* ordering constraint, not a cache flush; it is
|
||||
* necessary even after writing invalid entries.
|
||||
*/
|
||||
flush_tlb:
|
||||
local_flush_tlb_page(addr);
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,23 @@
|
||||
#include <linux/sched.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static inline void local_flush_tlb_all_asid(unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma x0, %0"
|
||||
:
|
||||
: "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_page_asid(unsigned long addr,
|
||||
unsigned long asid)
|
||||
{
|
||||
__asm__ __volatile__ ("sfence.vma %0, %1"
|
||||
:
|
||||
: "r" (addr), "r" (asid)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
@ -15,7 +31,6 @@ void flush_tlb_all(void)
|
||||
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long size, unsigned long stride)
|
||||
{
|
||||
struct cpumask *pmask = &mm->context.tlb_stale_mask;
|
||||
struct cpumask *cmask = mm_cpumask(mm);
|
||||
unsigned int cpuid;
|
||||
bool broadcast;
|
||||
@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
|
||||
if (static_branch_unlikely(&use_asid_allocator)) {
|
||||
unsigned long asid = atomic_long_read(&mm->context.id);
|
||||
|
||||
/*
|
||||
* TLB will be immediately flushed on harts concurrently
|
||||
* executing this MM context. TLB flush on other harts
|
||||
* is deferred until this MM context migrates there.
|
||||
*/
|
||||
cpumask_setall(pmask);
|
||||
cpumask_clear_cpu(cpuid, pmask);
|
||||
cpumask_andnot(pmask, pmask, cmask);
|
||||
|
||||
if (broadcast) {
|
||||
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
|
||||
} else if (size <= stride) {
|
||||
|
@ -57,11 +57,19 @@ repeat:
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
|
||||
intersects(initrd_data.start, initrd_data.size, safe_addr, size))
|
||||
safe_addr = initrd_data.start + initrd_data.size;
|
||||
if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
|
||||
safe_addr = (unsigned long)comps + comps->len;
|
||||
goto repeat;
|
||||
}
|
||||
for_each_rb_entry(comp, comps)
|
||||
if (intersects(safe_addr, size, comp->addr, comp->len)) {
|
||||
safe_addr = comp->addr + comp->len;
|
||||
goto repeat;
|
||||
}
|
||||
if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
|
||||
safe_addr = (unsigned long)certs + certs->len;
|
||||
goto repeat;
|
||||
}
|
||||
for_each_rb_entry(cert, certs)
|
||||
if (intersects(safe_addr, size, cert->addr, cert->len)) {
|
||||
safe_addr = cert->addr + cert->len;
|
||||
|
@ -23,7 +23,6 @@ CONFIG_NUMA_BALANCING=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CFS_BANDWIDTH=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_RDMA=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
@ -90,7 +89,6 @@ CONFIG_MINIX_SUBPARTITION=y
|
||||
CONFIG_SOLARIS_X86_PARTITION=y
|
||||
CONFIG_UNIXWARE_DISKLABEL=y
|
||||
CONFIG_IOSCHED_BFQ=y
|
||||
CONFIG_BFQ_GROUP_IOSCHED=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
@ -298,7 +296,6 @@ CONFIG_IP_NF_TARGET_REJECT=m
|
||||
CONFIG_IP_NF_NAT=m
|
||||
CONFIG_IP_NF_TARGET_MASQUERADE=m
|
||||
CONFIG_IP_NF_MANGLE=m
|
||||
CONFIG_IP_NF_TARGET_CLUSTERIP=m
|
||||
CONFIG_IP_NF_TARGET_ECN=m
|
||||
CONFIG_IP_NF_TARGET_TTL=m
|
||||
CONFIG_IP_NF_RAW=m
|
||||
@ -340,7 +337,6 @@ CONFIG_BRIDGE_MRP=y
|
||||
CONFIG_VLAN_8021Q=m
|
||||
CONFIG_VLAN_8021Q_GVRP=y
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_CBQ=m
|
||||
CONFIG_NET_SCH_HTB=m
|
||||
CONFIG_NET_SCH_HFSC=m
|
||||
CONFIG_NET_SCH_PRIO=m
|
||||
@ -351,7 +347,6 @@ CONFIG_NET_SCH_SFQ=m
|
||||
CONFIG_NET_SCH_TEQL=m
|
||||
CONFIG_NET_SCH_TBF=m
|
||||
CONFIG_NET_SCH_GRED=m
|
||||
CONFIG_NET_SCH_DSMARK=m
|
||||
CONFIG_NET_SCH_NETEM=m
|
||||
CONFIG_NET_SCH_DRR=m
|
||||
CONFIG_NET_SCH_MQPRIO=m
|
||||
@ -363,14 +358,11 @@ CONFIG_NET_SCH_INGRESS=m
|
||||
CONFIG_NET_SCH_PLUG=m
|
||||
CONFIG_NET_SCH_ETS=m
|
||||
CONFIG_NET_CLS_BASIC=m
|
||||
CONFIG_NET_CLS_TCINDEX=m
|
||||
CONFIG_NET_CLS_ROUTE4=m
|
||||
CONFIG_NET_CLS_FW=m
|
||||
CONFIG_NET_CLS_U32=m
|
||||
CONFIG_CLS_U32_PERF=y
|
||||
CONFIG_CLS_U32_MARK=y
|
||||
CONFIG_NET_CLS_RSVP=m
|
||||
CONFIG_NET_CLS_RSVP6=m
|
||||
CONFIG_NET_CLS_FLOW=m
|
||||
CONFIG_NET_CLS_CGROUP=y
|
||||
CONFIG_NET_CLS_BPF=m
|
||||
@ -584,7 +576,7 @@ CONFIG_DIAG288_WATCHDOG=m
|
||||
CONFIG_FB=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
|
||||
# CONFIG_HID is not set
|
||||
# CONFIG_HID_SUPPORT is not set
|
||||
# CONFIG_USB_SUPPORT is not set
|
||||
CONFIG_INFINIBAND=m
|
||||
CONFIG_INFINIBAND_USER_ACCESS=m
|
||||
@ -828,6 +820,7 @@ CONFIG_PANIC_ON_OOPS=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_WQ_WATCHDOG=y
|
||||
CONFIG_TEST_LOCKUP=m
|
||||
CONFIG_DEBUG_PREEMPT=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
CONFIG_LOCK_STAT=y
|
||||
CONFIG_DEBUG_ATOMIC_SLEEP=y
|
||||
@ -843,6 +836,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300
|
||||
# CONFIG_RCU_TRACE is not set
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_BOOTTIME_TRACING=y
|
||||
CONFIG_FPROBE=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_IRQSOFF_TRACER=y
|
||||
@ -857,6 +851,7 @@ CONFIG_SAMPLES=y
|
||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
|
||||
CONFIG_SAMPLE_FTRACE_OPS=m
|
||||
CONFIG_DEBUG_ENTRY=y
|
||||
CONFIG_CIO_INJECT=y
|
||||
CONFIG_KUNIT=m
|
||||
|
@ -21,7 +21,6 @@ CONFIG_NUMA_BALANCING=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CFS_BANDWIDTH=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_CGROUP_PIDS=y
|
||||
CONFIG_CGROUP_RDMA=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
@ -85,7 +84,6 @@ CONFIG_MINIX_SUBPARTITION=y
|
||||
CONFIG_SOLARIS_X86_PARTITION=y
|
||||
CONFIG_UNIXWARE_DISKLABEL=y
|
||||
CONFIG_IOSCHED_BFQ=y
|
||||
CONFIG_BFQ_GROUP_IOSCHED=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
@ -289,7 +287,6 @@ CONFIG_IP_NF_TARGET_REJECT=m
|
||||
CONFIG_IP_NF_NAT=m
|
||||
CONFIG_IP_NF_TARGET_MASQUERADE=m
|
||||
CONFIG_IP_NF_MANGLE=m
|
||||
CONFIG_IP_NF_TARGET_CLUSTERIP=m
|
||||
CONFIG_IP_NF_TARGET_ECN=m
|
||||
CONFIG_IP_NF_TARGET_TTL=m
|
||||
CONFIG_IP_NF_RAW=m
|
||||
@ -330,7 +327,6 @@ CONFIG_BRIDGE_MRP=y
|
||||
CONFIG_VLAN_8021Q=m
|
||||
CONFIG_VLAN_8021Q_GVRP=y
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_CBQ=m
|
||||
CONFIG_NET_SCH_HTB=m
|
||||
CONFIG_NET_SCH_HFSC=m
|
||||
CONFIG_NET_SCH_PRIO=m
|
||||
@ -341,7 +337,6 @@ CONFIG_NET_SCH_SFQ=m
|
||||
CONFIG_NET_SCH_TEQL=m
|
||||
CONFIG_NET_SCH_TBF=m
|
||||
CONFIG_NET_SCH_GRED=m
|
||||
CONFIG_NET_SCH_DSMARK=m
|
||||
CONFIG_NET_SCH_NETEM=m
|
||||
CONFIG_NET_SCH_DRR=m
|
||||
CONFIG_NET_SCH_MQPRIO=m
|
||||
@ -353,14 +348,11 @@ CONFIG_NET_SCH_INGRESS=m
|
||||
CONFIG_NET_SCH_PLUG=m
|
||||
CONFIG_NET_SCH_ETS=m
|
||||
CONFIG_NET_CLS_BASIC=m
|
||||
CONFIG_NET_CLS_TCINDEX=m
|
||||
CONFIG_NET_CLS_ROUTE4=m
|
||||
CONFIG_NET_CLS_FW=m
|
||||
CONFIG_NET_CLS_U32=m
|
||||
CONFIG_CLS_U32_PERF=y
|
||||
CONFIG_CLS_U32_MARK=y
|
||||
CONFIG_NET_CLS_RSVP=m
|
||||
CONFIG_NET_CLS_RSVP6=m
|
||||
CONFIG_NET_CLS_FLOW=m
|
||||
CONFIG_NET_CLS_CGROUP=y
|
||||
CONFIG_NET_CLS_BPF=m
|
||||
@ -573,7 +565,7 @@ CONFIG_DIAG288_WATCHDOG=m
|
||||
CONFIG_FB=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
|
||||
# CONFIG_HID is not set
|
||||
# CONFIG_HID_SUPPORT is not set
|
||||
# CONFIG_USB_SUPPORT is not set
|
||||
CONFIG_INFINIBAND=m
|
||||
CONFIG_INFINIBAND_USER_ACCESS=m
|
||||
@ -795,6 +787,7 @@ CONFIG_RCU_REF_SCALE_TEST=m
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_BOOTTIME_TRACING=y
|
||||
CONFIG_FPROBE=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
@ -805,6 +798,7 @@ CONFIG_SAMPLES=y
|
||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
|
||||
CONFIG_SAMPLE_FTRACE_OPS=m
|
||||
CONFIG_KUNIT=m
|
||||
CONFIG_KUNIT_DEBUGFS=y
|
||||
CONFIG_LKDTM=m
|
||||
|
@ -58,7 +58,7 @@ CONFIG_ZFCP=y
|
||||
# CONFIG_VMCP is not set
|
||||
# CONFIG_MONWRITER is not set
|
||||
# CONFIG_S390_VMUR is not set
|
||||
# CONFIG_HID is not set
|
||||
# CONFIG_HID_SUPPORT is not set
|
||||
# CONFIG_VIRTIO_MENU is not set
|
||||
# CONFIG_VHOST_MENU is not set
|
||||
# CONFIG_IOMMU_SUPPORT is not set
|
||||
|
@ -544,8 +544,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
|
||||
return r;
|
||||
}
|
||||
|
||||
int zpci_setup_bus_resources(struct zpci_dev *zdev,
|
||||
struct list_head *resources)
|
||||
int zpci_setup_bus_resources(struct zpci_dev *zdev)
|
||||
{
|
||||
unsigned long addr, size, flags;
|
||||
struct resource *res;
|
||||
@ -581,7 +580,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
zdev->bars[i].res = res;
|
||||
pci_add_resource(resources, res);
|
||||
}
|
||||
zdev->has_resources = 1;
|
||||
|
||||
@ -590,17 +588,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
|
||||
|
||||
static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
|
||||
{
|
||||
struct resource *res;
|
||||
int i;
|
||||
|
||||
pci_lock_rescan_remove();
|
||||
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
|
||||
if (!zdev->bars[i].size || !zdev->bars[i].res)
|
||||
res = zdev->bars[i].res;
|
||||
if (!res)
|
||||
continue;
|
||||
|
||||
release_resource(res);
|
||||
pci_bus_remove_resource(zdev->zbus->bus, res);
|
||||
zpci_free_iomap(zdev, zdev->bars[i].map_idx);
|
||||
release_resource(zdev->bars[i].res);
|
||||
kfree(zdev->bars[i].res);
|
||||
zdev->bars[i].res = NULL;
|
||||
kfree(res);
|
||||
}
|
||||
zdev->has_resources = 0;
|
||||
pci_unlock_rescan_remove();
|
||||
}
|
||||
|
||||
int pcibios_device_add(struct pci_dev *pdev)
|
||||
|
@ -41,9 +41,7 @@ static int zpci_nb_devices;
|
||||
*/
|
||||
static int zpci_bus_prepare_device(struct zpci_dev *zdev)
|
||||
{
|
||||
struct resource_entry *window, *n;
|
||||
struct resource *res;
|
||||
int rc;
|
||||
int rc, i;
|
||||
|
||||
if (!zdev_enabled(zdev)) {
|
||||
rc = zpci_enable_device(zdev);
|
||||
@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
|
||||
}
|
||||
|
||||
if (!zdev->has_resources) {
|
||||
zpci_setup_bus_resources(zdev, &zdev->zbus->resources);
|
||||
resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) {
|
||||
res = window->res;
|
||||
pci_bus_add_resource(zdev->zbus->bus, res, 0);
|
||||
zpci_setup_bus_resources(zdev);
|
||||
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
|
||||
if (zdev->bars[i].res)
|
||||
pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev)
|
||||
|
||||
int zpci_alloc_domain(int domain);
|
||||
void zpci_free_domain(int domain);
|
||||
int zpci_setup_bus_resources(struct zpci_dev *zdev,
|
||||
struct list_head *resources);
|
||||
int zpci_setup_bus_resources(struct zpci_dev *zdev);
|
||||
|
||||
static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
|
||||
unsigned int devfn)
|
||||
|
@ -128,8 +128,9 @@ struct snp_psc_desc {
|
||||
struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
|
||||
} __packed;
|
||||
|
||||
/* Guest message request error code */
|
||||
/* Guest message request error codes */
|
||||
#define SNP_GUEST_REQ_INVALID_LEN BIT_ULL(32)
|
||||
#define SNP_GUEST_REQ_ERR_BUSY BIT_ULL(33)
|
||||
|
||||
#define GHCB_MSR_TERM_REQ 0x100
|
||||
#define GHCB_MSR_TERM_REASON_SET_POS 12
|
||||
|
@ -261,20 +261,22 @@ enum avic_ipi_failure_cause {
|
||||
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
|
||||
};
|
||||
|
||||
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(9, 0)
|
||||
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
|
||||
|
||||
/*
|
||||
* For AVIC, the max index allowed for physical APIC ID
|
||||
* table is 0xff (255).
|
||||
* For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as
|
||||
* 0xff is a broadcast to all CPUs, i.e. can't be targeted individually.
|
||||
*/
|
||||
#define AVIC_MAX_PHYSICAL_ID 0XFEULL
|
||||
|
||||
/*
|
||||
* For x2AVIC, the max index allowed for physical APIC ID
|
||||
* table is 0x1ff (511).
|
||||
* For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511).
|
||||
*/
|
||||
#define X2AVIC_MAX_PHYSICAL_ID 0x1FFUL
|
||||
|
||||
static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID);
|
||||
static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID);
|
||||
|
||||
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
|
||||
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
|
||||
|
||||
|
@ -89,11 +89,21 @@
|
||||
* Sub-leaf 2: EAX: host tsc frequency in kHz
|
||||
*/
|
||||
|
||||
#define XEN_CPUID_TSC_EMULATED (1u << 0)
|
||||
#define XEN_CPUID_HOST_TSC_RELIABLE (1u << 1)
|
||||
#define XEN_CPUID_RDTSCP_INSTR_AVAIL (1u << 2)
|
||||
|
||||
#define XEN_CPUID_TSC_MODE_DEFAULT (0)
|
||||
#define XEN_CPUID_TSC_MODE_ALWAYS_EMULATE (1u)
|
||||
#define XEN_CPUID_TSC_MODE_NEVER_EMULATE (2u)
|
||||
#define XEN_CPUID_TSC_MODE_PVRDTSCP (3u)
|
||||
|
||||
/*
|
||||
* Leaf 5 (0x40000x04)
|
||||
* HVM-specific features
|
||||
* Sub-leaf 0: EAX: Features
|
||||
* Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
|
||||
* Sub-leaf 0: ECX: domain id (iff EAX has XEN_HVM_CPUID_DOMID_PRESENT flag)
|
||||
*/
|
||||
#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */
|
||||
#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */
|
||||
@ -102,12 +112,16 @@
|
||||
#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
|
||||
#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */
|
||||
/*
|
||||
* Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be
|
||||
* used to store high bits for the Destination ID. This expands the Destination
|
||||
* ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
|
||||
* With interrupt format set to 0 (non-remappable) bits 55:49 from the
|
||||
* IO-APIC RTE and bits 11:5 from the MSI address can be used to store
|
||||
* high bits for the Destination ID. This expands the Destination ID
|
||||
* field from 8 to 15 bits, allowing to target APIC IDs up 32768.
|
||||
*/
|
||||
#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
|
||||
/* Per-vCPU event channel upcalls */
|
||||
/*
|
||||
* Per-vCPU event channel upcalls work correctly with physical IRQs
|
||||
* bound to event channels.
|
||||
*/
|
||||
#define XEN_HVM_CPUID_UPCALL_VECTOR (1u << 6)
|
||||
|
||||
/*
|
||||
|
@ -2355,6 +2355,7 @@ static void mce_restart(void)
|
||||
{
|
||||
mce_timer_delete_all();
|
||||
on_each_cpu(mce_cpu_restart, NULL, 1);
|
||||
mce_schedule_work();
|
||||
}
|
||||
|
||||
/* Toggle features for corrected errors */
|
||||
|
@ -368,7 +368,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||
{
|
||||
struct resctrl_schema *s;
|
||||
struct rdtgroup *rdtgrp;
|
||||
struct rdt_domain *dom;
|
||||
struct rdt_resource *r;
|
||||
char *tok, *resname;
|
||||
int ret = 0;
|
||||
@ -397,10 +396,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
list_for_each_entry(dom, &s->res->domains, list)
|
||||
memset(dom->staged_config, 0, sizeof(dom->staged_config));
|
||||
}
|
||||
rdt_staged_configs_clear();
|
||||
|
||||
while ((tok = strsep(&buf, "\n")) != NULL) {
|
||||
resname = strim(strsep(&tok, ":"));
|
||||
@ -445,6 +441,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||
}
|
||||
|
||||
out:
|
||||
rdt_staged_configs_clear();
|
||||
rdtgroup_kn_unlock(of->kn);
|
||||
cpus_read_unlock();
|
||||
return ret ?: nbytes;
|
||||
|
@ -555,5 +555,6 @@ void __check_limbo(struct rdt_domain *d, bool force_free);
|
||||
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
|
||||
void __init thread_throttle_mode_init(void);
|
||||
void __init mbm_config_rftype_init(const char *config);
|
||||
void rdt_staged_configs_clear(void);
|
||||
|
||||
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
|
||||
|
@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...)
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void rdt_staged_configs_clear(void)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
struct rdt_domain *dom;
|
||||
|
||||
lockdep_assert_held(&rdtgroup_mutex);
|
||||
|
||||
for_each_alloc_capable_rdt_resource(r) {
|
||||
list_for_each_entry(dom, &r->domains, list)
|
||||
memset(dom->staged_config, 0, sizeof(dom->staged_config));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
|
||||
* we can keep a bitmap of free CLOSIDs in a single integer.
|
||||
@ -3107,7 +3120,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
{
|
||||
struct resctrl_schema *s;
|
||||
struct rdt_resource *r;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
rdt_staged_configs_clear();
|
||||
|
||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||
r = s->res;
|
||||
@ -3119,20 +3134,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
} else {
|
||||
ret = rdtgroup_init_cat(s, rdtgrp->closid);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = resctrl_arch_update_domains(r, rdtgrp->closid);
|
||||
if (ret < 0) {
|
||||
rdt_last_cmd_puts("Failed to initialize allocations\n");
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
rdtgrp->mode = RDT_MODE_SHAREABLE;
|
||||
|
||||
return 0;
|
||||
out:
|
||||
rdt_staged_configs_clear();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||
|
@ -136,10 +136,12 @@ SYM_TYPED_FUNC_START(ftrace_stub)
|
||||
RET
|
||||
SYM_FUNC_END(ftrace_stub)
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
SYM_TYPED_FUNC_START(ftrace_stub_graph)
|
||||
CALL_DEPTH_ACCOUNT
|
||||
RET
|
||||
SYM_FUNC_END(ftrace_stub_graph)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
|
@ -2183,9 +2183,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
|
||||
struct ghcb *ghcb;
|
||||
int ret;
|
||||
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
|
||||
return -ENODEV;
|
||||
|
||||
if (!fw_err)
|
||||
return -EINVAL;
|
||||
|
||||
@ -2212,15 +2209,26 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
|
||||
if (ret)
|
||||
goto e_put;
|
||||
|
||||
if (ghcb->save.sw_exit_info_2) {
|
||||
*fw_err = ghcb->save.sw_exit_info_2;
|
||||
switch (*fw_err) {
|
||||
case 0:
|
||||
break;
|
||||
|
||||
case SNP_GUEST_REQ_ERR_BUSY:
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
|
||||
case SNP_GUEST_REQ_INVALID_LEN:
|
||||
/* Number of expected pages are returned in RBX */
|
||||
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
|
||||
ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
|
||||
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
|
||||
input->data_npages = ghcb_get_rbx(ghcb);
|
||||
|
||||
*fw_err = ghcb->save.sw_exit_info_2;
|
||||
|
||||
ret = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
e_put:
|
||||
|
@ -27,19 +27,38 @@
|
||||
#include "irq.h"
|
||||
#include "svm.h"
|
||||
|
||||
/* AVIC GATAG is encoded using VM and VCPU IDs */
|
||||
#define AVIC_VCPU_ID_BITS 8
|
||||
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
|
||||
/*
|
||||
* Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
|
||||
* into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
|
||||
* if an interrupt can't be delivered, e.g. because the vCPU isn't running.
|
||||
*
|
||||
* For the vCPU ID, use however many bits are currently allowed for the max
|
||||
* guest physical APIC ID (limited by the size of the physical ID table), and
|
||||
* use whatever bits remain to assign arbitrary AVIC IDs to VMs. Note, the
|
||||
* size of the GATag is defined by hardware (32 bits), but is an opaque value
|
||||
* as far as hardware is concerned.
|
||||
*/
|
||||
#define AVIC_VCPU_ID_MASK AVIC_PHYSICAL_MAX_INDEX_MASK
|
||||
|
||||
#define AVIC_VM_ID_BITS 24
|
||||
#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
|
||||
#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
|
||||
#define AVIC_VM_ID_SHIFT HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
|
||||
#define AVIC_VM_ID_MASK (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
|
||||
|
||||
#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
|
||||
(y & AVIC_VCPU_ID_MASK))
|
||||
#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
|
||||
#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
|
||||
#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
|
||||
|
||||
#define __AVIC_GATAG(vm_id, vcpu_id) ((((vm_id) & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
|
||||
((vcpu_id) & AVIC_VCPU_ID_MASK))
|
||||
#define AVIC_GATAG(vm_id, vcpu_id) \
|
||||
({ \
|
||||
u32 ga_tag = __AVIC_GATAG(vm_id, vcpu_id); \
|
||||
\
|
||||
WARN_ON_ONCE(AVIC_GATAG_TO_VCPUID(ga_tag) != (vcpu_id)); \
|
||||
WARN_ON_ONCE(AVIC_GATAG_TO_VMID(ga_tag) != (vm_id)); \
|
||||
ga_tag; \
|
||||
})
|
||||
|
||||
static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
|
||||
|
||||
static bool force_avic;
|
||||
module_param_unsafe(force_avic, bool, 0444);
|
||||
|
||||
|
@ -2903,7 +2903,7 @@ static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
|
||||
static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
bool ia32e;
|
||||
bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
|
||||
|
||||
if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
|
||||
CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
|
||||
@ -2923,12 +2923,6 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
|
||||
vmcs12->host_ia32_perf_global_ctrl)))
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
|
||||
#else
|
||||
ia32e = false;
|
||||
#endif
|
||||
|
||||
if (ia32e) {
|
||||
if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
|
||||
return -EINVAL;
|
||||
@ -3022,7 +3016,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12,
|
||||
enum vm_entry_failure_code *entry_failure_code)
|
||||
{
|
||||
bool ia32e;
|
||||
bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
|
||||
|
||||
*entry_failure_code = ENTRY_FAIL_DEFAULT;
|
||||
|
||||
@ -3048,6 +3042,13 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
|
||||
vmcs12->guest_ia32_perf_global_ctrl)))
|
||||
return -EINVAL;
|
||||
|
||||
if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
|
||||
return -EINVAL;
|
||||
|
||||
if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
|
||||
CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* If the load IA32_EFER VM-entry control is 1, the following checks
|
||||
* are performed on the field for the IA32_EFER MSR:
|
||||
@ -3059,7 +3060,6 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
if (to_vmx(vcpu)->nested.nested_run_pending &&
|
||||
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
|
||||
ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
|
||||
if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
|
||||
CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
|
||||
CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
|
||||
|
@ -262,7 +262,7 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
|
||||
* eIBRS has its own protection against poisoned RSB, so it doesn't
|
||||
* need the RSB filling sequence. But it does need to be enabled, and a
|
||||
* single call to retire, before the first unbalanced RET.
|
||||
*/
|
||||
*/
|
||||
|
||||
FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
|
||||
X86_FEATURE_RSB_VMEXIT_LITE
|
||||
@ -311,7 +311,7 @@ SYM_FUNC_END(vmx_do_nmi_irqoff)
|
||||
* vmread_error_trampoline - Trampoline from inline asm to vmread_error()
|
||||
* @field: VMCS field encoding that failed
|
||||
* @fault: %true if the VMREAD faulted, %false if it failed
|
||||
|
||||
*
|
||||
* Save and restore volatile registers across a call to vmread_error(). Note,
|
||||
* all parameters are passed on the stack.
|
||||
*/
|
||||
|
@ -874,7 +874,7 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
if (is_guest_mode(vcpu))
|
||||
eb |= get_vmcs12(vcpu)->exception_bitmap;
|
||||
else {
|
||||
else {
|
||||
int mask = 0, match = 0;
|
||||
|
||||
if (enable_ept && (eb & (1u << PF_VECTOR))) {
|
||||
@ -1282,7 +1282,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
if (vmx->nested.need_vmcs12_to_shadow_sync)
|
||||
if (vmx->nested.need_vmcs12_to_shadow_sync)
|
||||
nested_sync_vmcs12_to_shadow(vcpu);
|
||||
|
||||
if (vmx->guest_state_loaded)
|
||||
@ -5049,10 +5049,10 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
if (to_vmx(vcpu)->nested.nested_run_pending)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
|
||||
* e.g. if the IRQ arrived asynchronously after checking nested events.
|
||||
*/
|
||||
/*
|
||||
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
|
||||
* e.g. if the IRQ arrived asynchronously after checking nested events.
|
||||
*/
|
||||
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
|
||||
return -EBUSY;
|
||||
|
||||
|
@ -600,7 +600,8 @@ void __init sme_enable(struct boot_params *bp)
|
||||
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
|
||||
((u64)bp->ext_cmd_line_ptr << 32));
|
||||
|
||||
cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
|
||||
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
|
||||
return;
|
||||
|
||||
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
|
||||
sme_me_mask = me_mask;
|
||||
|
@ -45,6 +45,6 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
|
||||
|
||||
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
|
||||
|
||||
obj-$(CONFIG_XEN_PV_DOM0) += vga.o
|
||||
obj-$(CONFIG_XEN_DOM0) += vga.o
|
||||
|
||||
obj-$(CONFIG_XEN_EFI) += efi.o
|
||||
|
@ -1390,7 +1390,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
|
||||
|
||||
x86_platform.set_legacy_features =
|
||||
xen_dom0_set_legacy_features;
|
||||
xen_init_vga(info, xen_start_info->console.dom0.info_size);
|
||||
xen_init_vga(info, xen_start_info->console.dom0.info_size,
|
||||
&boot_params.screen_info);
|
||||
xen_start_info->console.domU.mfn = 0;
|
||||
xen_start_info->console.domU.evtchn = 0;
|
||||
|
||||
|
@ -43,6 +43,19 @@ void __init xen_pvh_init(struct boot_params *boot_params)
|
||||
x86_init.oem.banner = xen_banner;
|
||||
|
||||
xen_efi_init(boot_params);
|
||||
|
||||
if (xen_initial_domain()) {
|
||||
struct xen_platform_op op = {
|
||||
.cmd = XENPF_get_dom0_console,
|
||||
};
|
||||
long ret = HYPERVISOR_platform_op(&op);
|
||||
|
||||
if (ret > 0)
|
||||
xen_init_vga(&op.u.dom0_console,
|
||||
min(ret * sizeof(char),
|
||||
sizeof(op.u.dom0_console)),
|
||||
&boot_params->screen_info);
|
||||
}
|
||||
}
|
||||
|
||||
void __init mem_map_via_hcall(struct boot_params *boot_params_p)
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <asm/pvclock.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
#include <asm/xen/cpuid.h>
|
||||
|
||||
#include <xen/events.h>
|
||||
#include <xen/features.h>
|
||||
@ -503,11 +504,7 @@ static int __init xen_tsc_safe_clocksource(void)
|
||||
/* Leaf 4, sub-leaf 0 (0x40000x03) */
|
||||
cpuid_count(xen_cpuid_base() + 3, 0, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
/* tsc_mode = no_emulate (2) */
|
||||
if (ebx != 2)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
return ebx == XEN_CPUID_TSC_MODE_NEVER_EMULATE;
|
||||
}
|
||||
|
||||
static void __init xen_time_init(void)
|
||||
|
@ -9,10 +9,9 @@
|
||||
|
||||
#include "xen-ops.h"
|
||||
|
||||
void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
|
||||
void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size,
|
||||
struct screen_info *screen_info)
|
||||
{
|
||||
struct screen_info *screen_info = &boot_params.screen_info;
|
||||
|
||||
/* This is drawn from a dump from vgacon:startup in
|
||||
* standard Linux. */
|
||||
screen_info->orig_video_mode = 3;
|
||||
|
@ -108,11 +108,12 @@ static inline void xen_uninit_lock_cpu(int cpu)
|
||||
|
||||
struct dom0_vga_console_info;
|
||||
|
||||
#ifdef CONFIG_XEN_PV_DOM0
|
||||
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
|
||||
#ifdef CONFIG_XEN_DOM0
|
||||
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size,
|
||||
struct screen_info *);
|
||||
#else
|
||||
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
|
||||
size_t size)
|
||||
size_t size, struct screen_info *si)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@ -204,9 +204,6 @@ config BLK_INLINE_ENCRYPTION_FALLBACK
|
||||
|
||||
source "block/partitions/Kconfig"
|
||||
|
||||
config BLOCK_COMPAT
|
||||
def_bool COMPAT
|
||||
|
||||
config BLK_MQ_PCI
|
||||
def_bool PCI
|
||||
|
||||
|
@ -959,16 +959,11 @@ again:
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long bdev_start_io_acct(struct block_device *bdev,
|
||||
unsigned int sectors, enum req_op op,
|
||||
unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
|
||||
unsigned long start_time)
|
||||
{
|
||||
const int sgrp = op_stat_group(op);
|
||||
|
||||
part_stat_lock();
|
||||
update_io_ticks(bdev, start_time, false);
|
||||
part_stat_inc(bdev, ios[sgrp]);
|
||||
part_stat_add(bdev, sectors[sgrp], sectors);
|
||||
part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
|
||||
part_stat_unlock();
|
||||
|
||||
@ -984,13 +979,12 @@ EXPORT_SYMBOL(bdev_start_io_acct);
|
||||
*/
|
||||
unsigned long bio_start_io_acct(struct bio *bio)
|
||||
{
|
||||
return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
|
||||
bio_op(bio), jiffies);
|
||||
return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_start_io_acct);
|
||||
|
||||
void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
|
||||
unsigned long start_time)
|
||||
unsigned int sectors, unsigned long start_time)
|
||||
{
|
||||
const int sgrp = op_stat_group(op);
|
||||
unsigned long now = READ_ONCE(jiffies);
|
||||
@ -998,6 +992,8 @@ void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
|
||||
|
||||
part_stat_lock();
|
||||
update_io_ticks(bdev, now, true);
|
||||
part_stat_inc(bdev, ios[sgrp]);
|
||||
part_stat_add(bdev, sectors[sgrp], sectors);
|
||||
part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
|
||||
part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
|
||||
part_stat_unlock();
|
||||
@ -1007,7 +1003,7 @@ EXPORT_SYMBOL(bdev_end_io_acct);
|
||||
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
|
||||
struct block_device *orig_bdev)
|
||||
{
|
||||
bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
|
||||
bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
|
||||
|
||||
|
@ -2725,6 +2725,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
|
||||
struct blk_mq_hw_ctx *this_hctx = NULL;
|
||||
struct blk_mq_ctx *this_ctx = NULL;
|
||||
struct request *requeue_list = NULL;
|
||||
struct request **requeue_lastp = &requeue_list;
|
||||
unsigned int depth = 0;
|
||||
LIST_HEAD(list);
|
||||
|
||||
@ -2735,10 +2736,10 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
|
||||
this_hctx = rq->mq_hctx;
|
||||
this_ctx = rq->mq_ctx;
|
||||
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
|
||||
rq_list_add(&requeue_list, rq);
|
||||
rq_list_add_tail(&requeue_lastp, rq);
|
||||
continue;
|
||||
}
|
||||
list_add_tail(&rq->queuelist, &list);
|
||||
list_add(&rq->queuelist, &list);
|
||||
depth++;
|
||||
} while (!rq_list_empty(plug->mq_list));
|
||||
|
||||
|
@ -378,12 +378,13 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
||||
#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
|
||||
do { \
|
||||
if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
|
||||
struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
|
||||
int srcu_idx; \
|
||||
\
|
||||
might_sleep_if(check_sleep); \
|
||||
srcu_idx = srcu_read_lock((q)->tag_set->srcu); \
|
||||
srcu_idx = srcu_read_lock(__tag_set->srcu); \
|
||||
(dispatch_ops); \
|
||||
srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \
|
||||
srcu_read_unlock(__tag_set->srcu, srcu_idx); \
|
||||
} else { \
|
||||
rcu_read_lock(); \
|
||||
(dispatch_ops); \
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
obj-y += habanalabs/
|
||||
obj-y += ivpu/
|
||||
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
|
||||
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
|
||||
|
@ -536,16 +536,19 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
|
||||
static struct acpi_table_header *acpi_get_pptt(void)
|
||||
{
|
||||
static struct acpi_table_header *pptt;
|
||||
static bool is_pptt_checked;
|
||||
acpi_status status;
|
||||
|
||||
/*
|
||||
* PPTT will be used at runtime on every CPU hotplug in path, so we
|
||||
* don't need to call acpi_put_table() to release the table mapping.
|
||||
*/
|
||||
if (!pptt) {
|
||||
if (!pptt && !is_pptt_checked) {
|
||||
status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
|
||||
if (ACPI_FAILURE(status))
|
||||
acpi_pptt_warn_missing();
|
||||
|
||||
is_pptt_checked = true;
|
||||
}
|
||||
|
||||
return pptt;
|
||||
|
@ -716,6 +716,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
|
||||
},
|
||||
},
|
||||
|
||||
/*
|
||||
* Desktops which falsely report a backlight and which our heuristics
|
||||
|
@ -251,6 +251,7 @@ bool force_storage_d3(void)
|
||||
#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1)
|
||||
#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2)
|
||||
#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3)
|
||||
#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(4)
|
||||
|
||||
static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
/*
|
||||
@ -279,6 +280,16 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
* need the x86-android-tablets module to properly work.
|
||||
*/
|
||||
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
|
||||
{
|
||||
/* Acer Iconia One 7 B1-750 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
@ -286,7 +297,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_UART1_TTY_UART2_SKIP |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Lenovo Yoga Book X90F/L */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
@ -294,7 +317,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Lenovo Yoga Tablet 2 1050F/L */
|
||||
@ -336,7 +360,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Whitelabel (sold as various brands) TM800A550L */
|
||||
@ -413,6 +438,20 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
|
||||
|
||||
bool acpi_quirk_skip_gpio_event_handlers(void)
|
||||
{
|
||||
const struct dmi_system_id *dmi_id;
|
||||
long quirks;
|
||||
|
||||
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
|
||||
if (!dmi_id)
|
||||
return false;
|
||||
|
||||
quirks = (unsigned long)dmi_id->driver_data;
|
||||
return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers);
|
||||
#endif
|
||||
|
||||
/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
|
||||
|
@ -381,6 +381,7 @@ static void pata_parport_dev_release(struct device *dev)
|
||||
{
|
||||
struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
|
||||
|
||||
ida_free(&pata_parport_bus_dev_ids, dev->id);
|
||||
kfree(pi);
|
||||
}
|
||||
|
||||
@ -433,23 +434,27 @@ static struct pi_adapter *pi_init_one(struct parport *parport,
|
||||
if (bus_for_each_dev(&pata_parport_bus_type, NULL, &match, pi_find_dev))
|
||||
return NULL;
|
||||
|
||||
pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL);
|
||||
if (!pi)
|
||||
id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
return NULL;
|
||||
|
||||
pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL);
|
||||
if (!pi) {
|
||||
ida_free(&pata_parport_bus_dev_ids, id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* set up pi->dev before pi_probe_unit() so it can use dev_printk() */
|
||||
pi->dev.parent = &pata_parport_bus;
|
||||
pi->dev.bus = &pata_parport_bus_type;
|
||||
pi->dev.driver = &pr->driver;
|
||||
pi->dev.release = pata_parport_dev_release;
|
||||
id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
return NULL; /* pata_parport_dev_release will do kfree(pi) */
|
||||
pi->dev.id = id;
|
||||
dev_set_name(&pi->dev, "pata_parport.%u", pi->dev.id);
|
||||
if (device_register(&pi->dev)) {
|
||||
put_device(&pi->dev);
|
||||
goto out_ida_free;
|
||||
/* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pi->proto = pr;
|
||||
@ -464,8 +469,7 @@ static struct pi_adapter *pi_init_one(struct parport *parport,
|
||||
pi->port = parport->base;
|
||||
|
||||
par_cb.private = pi;
|
||||
pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb,
|
||||
pi->dev.id);
|
||||
pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, id);
|
||||
if (!pi->pardev)
|
||||
goto out_module_put;
|
||||
|
||||
@ -487,12 +491,13 @@ static struct pi_adapter *pi_init_one(struct parport *parport,
|
||||
|
||||
pi_connect(pi);
|
||||
if (ata_host_activate(host, 0, NULL, 0, &pata_parport_sht))
|
||||
goto out_unreg_parport;
|
||||
goto out_disconnect;
|
||||
|
||||
return pi;
|
||||
|
||||
out_unreg_parport:
|
||||
out_disconnect:
|
||||
pi_disconnect(pi);
|
||||
out_unreg_parport:
|
||||
parport_unregister_device(pi->pardev);
|
||||
if (pi->proto->release_proto)
|
||||
pi->proto->release_proto(pi);
|
||||
@ -500,8 +505,7 @@ out_module_put:
|
||||
module_put(pi->proto->owner);
|
||||
out_unreg_dev:
|
||||
device_unregister(&pi->dev);
|
||||
out_ida_free:
|
||||
ida_free(&pata_parport_bus_dev_ids, pi->dev.id);
|
||||
/* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -626,8 +630,7 @@ static void pi_remove_one(struct device *dev)
|
||||
pi_disconnect(pi);
|
||||
pi_release(pi);
|
||||
device_unregister(dev);
|
||||
ida_free(&pata_parport_bus_dev_ids, dev->id);
|
||||
/* pata_parport_dev_release will do kfree(pi) */
|
||||
/* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
|
||||
}
|
||||
|
||||
static ssize_t delete_device_store(struct bus_type *bus, const char *buf,
|
||||
@ -643,6 +646,7 @@ static ssize_t delete_device_store(struct bus_type *bus, const char *buf,
|
||||
}
|
||||
|
||||
pi_remove_one(dev);
|
||||
put_device(dev);
|
||||
mutex_unlock(&pi_mutex);
|
||||
|
||||
return count;
|
||||
|
@ -1859,35 +1859,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
static void loop_handle_cmd(struct loop_cmd *cmd)
|
||||
{
|
||||
struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
|
||||
struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
|
||||
struct request *rq = blk_mq_rq_from_pdu(cmd);
|
||||
const bool write = op_is_write(req_op(rq));
|
||||
struct loop_device *lo = rq->q->queuedata;
|
||||
int ret = 0;
|
||||
struct mem_cgroup *old_memcg = NULL;
|
||||
const bool use_aio = cmd->use_aio;
|
||||
|
||||
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
|
||||
ret = -EIO;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (cmd->blkcg_css)
|
||||
kthread_associate_blkcg(cmd->blkcg_css);
|
||||
if (cmd->memcg_css)
|
||||
if (cmd_blkcg_css)
|
||||
kthread_associate_blkcg(cmd_blkcg_css);
|
||||
if (cmd_memcg_css)
|
||||
old_memcg = set_active_memcg(
|
||||
mem_cgroup_from_css(cmd->memcg_css));
|
||||
mem_cgroup_from_css(cmd_memcg_css));
|
||||
|
||||
/*
|
||||
* do_req_filebacked() may call blk_mq_complete_request() synchronously
|
||||
* or asynchronously if using aio. Hence, do not touch 'cmd' after
|
||||
* do_req_filebacked() has returned unless we are sure that 'cmd' has
|
||||
* not yet been completed.
|
||||
*/
|
||||
ret = do_req_filebacked(lo, rq);
|
||||
|
||||
if (cmd->blkcg_css)
|
||||
if (cmd_blkcg_css)
|
||||
kthread_associate_blkcg(NULL);
|
||||
|
||||
if (cmd->memcg_css) {
|
||||
if (cmd_memcg_css) {
|
||||
set_active_memcg(old_memcg);
|
||||
css_put(cmd->memcg_css);
|
||||
css_put(cmd_memcg_css);
|
||||
}
|
||||
failed:
|
||||
/* complete non-aio request */
|
||||
if (!cmd->use_aio || ret) {
|
||||
if (!use_aio || ret) {
|
||||
if (ret == -EOPNOTSUPP)
|
||||
cmd->ret = ret;
|
||||
else
|
||||
|
@ -1413,8 +1413,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
|
||||
case NULL_IRQ_SOFTIRQ:
|
||||
switch (cmd->nq->dev->queue_mode) {
|
||||
case NULL_Q_MQ:
|
||||
if (likely(!blk_should_fake_timeout(cmd->rq->q)))
|
||||
blk_mq_complete_request(cmd->rq);
|
||||
blk_mq_complete_request(cmd->rq);
|
||||
break;
|
||||
case NULL_Q_BIO:
|
||||
/*
|
||||
@ -1658,12 +1657,13 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
|
||||
}
|
||||
|
||||
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
const struct blk_mq_queue_data *bd)
|
||||
{
|
||||
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
||||
struct request *rq = bd->rq;
|
||||
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
struct nullb_queue *nq = hctx->driver_data;
|
||||
sector_t nr_sectors = blk_rq_sectors(bd->rq);
|
||||
sector_t sector = blk_rq_pos(bd->rq);
|
||||
sector_t nr_sectors = blk_rq_sectors(rq);
|
||||
sector_t sector = blk_rq_pos(rq);
|
||||
const bool is_poll = hctx->type == HCTX_TYPE_POLL;
|
||||
|
||||
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
|
||||
@ -1672,14 +1672,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
cmd->timer.function = null_cmd_timer_expired;
|
||||
}
|
||||
cmd->rq = bd->rq;
|
||||
cmd->rq = rq;
|
||||
cmd->error = BLK_STS_OK;
|
||||
cmd->nq = nq;
|
||||
cmd->fake_timeout = should_timeout_request(bd->rq);
|
||||
cmd->fake_timeout = should_timeout_request(rq) ||
|
||||
blk_should_fake_timeout(rq->q);
|
||||
|
||||
blk_mq_start_request(bd->rq);
|
||||
blk_mq_start_request(rq);
|
||||
|
||||
if (should_requeue_request(bd->rq)) {
|
||||
if (should_requeue_request(rq)) {
|
||||
/*
|
||||
* Alternate between hitting the core BUSY path, and the
|
||||
* driver driven requeue path
|
||||
@ -1687,22 +1688,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
nq->requeue_selection++;
|
||||
if (nq->requeue_selection & 1)
|
||||
return BLK_STS_RESOURCE;
|
||||
else {
|
||||
blk_mq_requeue_request(bd->rq, true);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
blk_mq_requeue_request(rq, true);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
if (is_poll) {
|
||||
spin_lock(&nq->poll_lock);
|
||||
list_add_tail(&bd->rq->queuelist, &nq->poll_list);
|
||||
list_add_tail(&rq->queuelist, &nq->poll_list);
|
||||
spin_unlock(&nq->poll_lock);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
if (cmd->fake_timeout)
|
||||
return BLK_STS_OK;
|
||||
|
||||
return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
|
||||
return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
|
||||
}
|
||||
|
||||
static void cleanup_queue(struct nullb_queue *nq)
|
||||
|
@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
print_version();
|
||||
|
||||
hp = mdesc_grab();
|
||||
if (!hp)
|
||||
return -ENODEV;
|
||||
|
||||
err = -ENODEV;
|
||||
if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
|
||||
|
@ -91,7 +91,7 @@ config COMMON_CLK_RK808
|
||||
config COMMON_CLK_HI655X
|
||||
tristate "Clock driver for Hi655x" if EXPERT
|
||||
depends on (MFD_HI655X_PMIC || COMPILE_TEST)
|
||||
depends on REGMAP
|
||||
select REGMAP
|
||||
default MFD_HI655X_PMIC
|
||||
help
|
||||
This driver supports the hi655x PMIC clock. This
|
||||
|
@ -69,4 +69,3 @@ builtin_platform_driver(bcm2835_aux_clk_driver);
|
||||
|
||||
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
|
||||
MODULE_DESCRIPTION("BCM2835 auxiliary peripheral clock driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -2350,4 +2350,3 @@ builtin_platform_driver(bcm2835_clk_driver);
|
||||
|
||||
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
|
||||
MODULE_DESCRIPTION("BCM2835 clock driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -99,4 +99,3 @@ module_platform_driver(of_fixed_mmio_clk_driver);
|
||||
|
||||
MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
|
||||
MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -88,5 +88,4 @@ module_platform_driver(fsl_sai_clk_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver");
|
||||
MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:fsl-sai-clk");
|
||||
|
@ -495,7 +495,7 @@ static unsigned long k210_pll_get_rate(struct clk_hw *hw,
|
||||
f = FIELD_GET(K210_PLL_CLKF, reg) + 1;
|
||||
od = FIELD_GET(K210_PLL_CLKOD, reg) + 1;
|
||||
|
||||
return (u64)parent_rate * f / (r * od);
|
||||
return div_u64((u64)parent_rate * f, r * od);
|
||||
}
|
||||
|
||||
static const struct clk_ops k210_pll_ops = {
|
||||
|
@ -841,5 +841,4 @@ static void __exit hi3559av100_crg_exit(void)
|
||||
module_exit(hi3559av100_crg_exit);
|
||||
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("HiSilicon Hi3559AV100 CRG Driver");
|
||||
|
@ -291,4 +291,3 @@ module_exit(clk_ccc_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Conditioning Circuitry Driver");
|
||||
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -106,7 +106,8 @@ static void psci_pd_remove(void)
|
||||
struct psci_pd_provider *pd_provider, *it;
|
||||
struct generic_pm_domain *genpd;
|
||||
|
||||
list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
|
||||
list_for_each_entry_safe_reverse(pd_provider, it,
|
||||
&psci_pd_providers, link) {
|
||||
of_genpd_del_provider(pd_provider->node);
|
||||
|
||||
genpd = of_genpd_remove_last(pd_provider->node);
|
||||
|
@ -2149,10 +2149,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
edac->sb_irq = platform_get_irq(pdev, 0);
|
||||
if (edac->sb_irq < 0) {
|
||||
dev_err(&pdev->dev, "No SBERR IRQ resource\n");
|
||||
if (edac->sb_irq < 0)
|
||||
return edac->sb_irq;
|
||||
}
|
||||
|
||||
irq_set_chained_handler_and_data(edac->sb_irq,
|
||||
altr_edac_a10_irq_handler,
|
||||
@ -2184,10 +2182,9 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
|
||||
}
|
||||
#else
|
||||
edac->db_irq = platform_get_irq(pdev, 1);
|
||||
if (edac->db_irq < 0) {
|
||||
dev_err(&pdev->dev, "No DBERR IRQ resource\n");
|
||||
if (edac->db_irq < 0)
|
||||
return edac->db_irq;
|
||||
}
|
||||
|
||||
irq_set_chained_handler_and_data(edac->db_irq,
|
||||
altr_edac_a10_irq_handler, edac);
|
||||
#endif
|
||||
@ -2226,6 +2223,5 @@ static struct platform_driver altr_edac_a10_driver = {
|
||||
};
|
||||
module_platform_driver(altr_edac_a10_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Thor Thayer");
|
||||
MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -273,25 +273,6 @@
|
||||
|
||||
#define UMC_SDP_INIT BIT(31)
|
||||
|
||||
enum amd_families {
|
||||
K8_CPUS = 0,
|
||||
F10_CPUS,
|
||||
F15_CPUS,
|
||||
F15_M30H_CPUS,
|
||||
F15_M60H_CPUS,
|
||||
F16_CPUS,
|
||||
F16_M30H_CPUS,
|
||||
F17_CPUS,
|
||||
F17_M10H_CPUS,
|
||||
F17_M30H_CPUS,
|
||||
F17_M60H_CPUS,
|
||||
F17_M70H_CPUS,
|
||||
F19_CPUS,
|
||||
F19_M10H_CPUS,
|
||||
F19_M50H_CPUS,
|
||||
NUM_FAMILIES,
|
||||
};
|
||||
|
||||
/* Error injection control structure */
|
||||
struct error_injection {
|
||||
u32 section;
|
||||
@ -334,6 +315,16 @@ struct amd64_umc {
|
||||
enum mem_type dram_type;
|
||||
};
|
||||
|
||||
struct amd64_family_flags {
|
||||
/*
|
||||
* Indicates that the system supports the new register offsets, etc.
|
||||
* first introduced with Family 19h Model 10h.
|
||||
*/
|
||||
__u64 zn_regs_v2 : 1,
|
||||
|
||||
__reserved : 63;
|
||||
};
|
||||
|
||||
struct amd64_pvt {
|
||||
struct low_ops *ops;
|
||||
|
||||
@ -375,6 +366,12 @@ struct amd64_pvt {
|
||||
/* x4, x8, or x16 syndromes in use */
|
||||
u8 ecc_sym_sz;
|
||||
|
||||
const char *ctl_name;
|
||||
u16 f1_id, f2_id;
|
||||
/* Maximum number of memory controllers per die/node. */
|
||||
u8 max_mcs;
|
||||
|
||||
struct amd64_family_flags flags;
|
||||
/* place to store error injection parameters prior to issue */
|
||||
struct error_injection injection;
|
||||
|
||||
@ -465,29 +462,15 @@ struct ecc_settings {
|
||||
* functions and per device encoding/decoding logic.
|
||||
*/
|
||||
struct low_ops {
|
||||
void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
|
||||
struct err_info *);
|
||||
int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct,
|
||||
unsigned cs_mode, int cs_mask_nr);
|
||||
};
|
||||
|
||||
struct amd64_family_flags {
|
||||
/*
|
||||
* Indicates that the system supports the new register offsets, etc.
|
||||
* first introduced with Family 19h Model 10h.
|
||||
*/
|
||||
__u64 zn_regs_v2 : 1,
|
||||
|
||||
__reserved : 63;
|
||||
};
|
||||
|
||||
struct amd64_family_type {
|
||||
const char *ctl_name;
|
||||
u16 f1_id, f2_id;
|
||||
/* Maximum number of memory controllers per die/node. */
|
||||
u8 max_mcs;
|
||||
struct amd64_family_flags flags;
|
||||
struct low_ops ops;
|
||||
void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci, u64 sys_addr,
|
||||
struct err_info *err);
|
||||
int (*dbam_to_cs)(struct amd64_pvt *pvt, u8 dct,
|
||||
unsigned int cs_mode, int cs_mask_nr);
|
||||
int (*hw_info_get)(struct amd64_pvt *pvt);
|
||||
bool (*ecc_enabled)(struct amd64_pvt *pvt);
|
||||
void (*setup_mci_misc_attrs)(struct mem_ctl_info *mci);
|
||||
void (*dump_misc_regs)(struct amd64_pvt *pvt);
|
||||
void (*get_err_info)(struct mce *m, struct err_info *err);
|
||||
};
|
||||
|
||||
int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
|
||||
|
@ -593,5 +593,5 @@ module_init(amd8111_edac_init);
|
||||
module_exit(amd8111_edac_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
|
||||
MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
|
||||
MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");
|
||||
|
@ -354,5 +354,5 @@ module_init(amd8131_edac_init);
|
||||
module_exit(amd8131_edac_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
|
||||
MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
|
||||
MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
|
||||
|
@ -1462,7 +1462,7 @@ module_init(e752x_init);
|
||||
module_exit(e752x_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
|
||||
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman");
|
||||
MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
|
||||
|
||||
module_param(force_function_unhide, int, 0444);
|
||||
|
@ -596,8 +596,7 @@ module_init(e7xxx_init);
|
||||
module_exit(e7xxx_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
|
||||
"Based on.work by Dan Hollis et al");
|
||||
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al");
|
||||
MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
|
||||
module_param(edac_op_state, int, 0444);
|
||||
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
|
||||
|
@ -1573,13 +1573,10 @@ module_init(i5000_init);
|
||||
module_exit(i5000_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR
|
||||
("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
|
||||
MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
|
||||
I5000_REVISION);
|
||||
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
|
||||
MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " I5000_REVISION);
|
||||
|
||||
module_param(edac_op_state, int, 0444);
|
||||
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
|
||||
module_param(misc_messages, int, 0444);
|
||||
MODULE_PARM_DESC(misc_messages, "Log miscellaneous non fatal messages");
|
||||
|
||||
|
@ -909,7 +909,7 @@ static void i5100_do_inject(struct mem_ctl_info *mci)
|
||||
*
|
||||
* The injection code don't work without setting this register.
|
||||
* The register needs to be flipped off then on else the hardware
|
||||
* will only preform the first injection.
|
||||
* will only perform the first injection.
|
||||
*
|
||||
* Stop condition bits 7:4
|
||||
* 1010 - Stop after one injection
|
||||
@ -1220,6 +1220,5 @@ module_init(i5100_init);
|
||||
module_exit(i5100_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR
|
||||
("Arthur Jones <ajones@riverbed.com>");
|
||||
MODULE_AUTHOR("Arthur Jones <ajones@riverbed.com>");
|
||||
MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
|
||||
|
@ -355,8 +355,7 @@ module_init(i82860_init);
|
||||
module_exit(i82860_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
|
||||
"Ben Woodard <woodard@redhat.com>");
|
||||
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) Ben Woodard <woodard@redhat.com>");
|
||||
MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
|
||||
|
||||
module_param(edac_op_state, int, 0444);
|
||||
|
@ -72,5 +72,4 @@ module_exit(fsl_ddr_mc_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("NXP Semiconductor");
|
||||
module_param(edac_op_state, int, 0444);
|
||||
MODULE_PARM_DESC(edac_op_state,
|
||||
"EDAC Error Reporting state: 0=Poll, 2=Interrupt");
|
||||
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
|
||||
|
@ -711,5 +711,4 @@ module_exit(mpc85xx_mc_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Montavista Software, Inc.");
|
||||
module_param(edac_op_state, int, 0444);
|
||||
MODULE_PARM_DESC(edac_op_state,
|
||||
"EDAC Error Reporting state: 0=Poll, 2=Interrupt");
|
||||
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
|
||||
|
@ -415,8 +415,7 @@ module_init(r82600_init);
|
||||
module_exit(r82600_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
|
||||
"on behalf of EADS Astrium");
|
||||
MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. on behalf of EADS Astrium");
|
||||
MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
|
||||
|
||||
module_param(disable_hardware_scrub, bool, 0644);
|
||||
|
@ -206,7 +206,7 @@ static int do_feature_check_call(const u32 api_id)
|
||||
}
|
||||
|
||||
/* Add new entry if not present */
|
||||
feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
|
||||
feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
|
||||
if (!feature_data)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -536,6 +536,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
|
||||
if (ACPI_FAILURE(status))
|
||||
return;
|
||||
|
||||
if (acpi_quirk_skip_gpio_event_handlers())
|
||||
return;
|
||||
|
||||
acpi_walk_resources(handle, METHOD_NAME__AEI,
|
||||
acpi_gpiochip_alloc_event, acpi_gpio);
|
||||
|
||||
|
@ -4145,8 +4145,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
|
||||
DRM_WARN("smart shift update failed\n");
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
if (fbcon)
|
||||
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
|
||||
|
||||
@ -4243,8 +4241,6 @@ exit:
|
||||
if (fbcon)
|
||||
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
amdgpu_ras_resume(adev);
|
||||
|
||||
if (adev->mode_info.num_crtc) {
|
||||
|
@ -1618,6 +1618,8 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
|
||||
struct drm_connector_list_iter iter;
|
||||
int r;
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
/* turn off display hw */
|
||||
drm_modeset_lock_all(dev);
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
@ -1694,6 +1696,8 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
|
||||
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <drm/drm_drv.h>
|
||||
@ -114,6 +115,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
|
||||
/*
|
||||
* Some Steam Deck's BIOS versions are incompatible with the
|
||||
* indirect SRAM mode, leading to amdgpu being unable to get
|
||||
* properly probed (and even potentially crashing the kernel).
|
||||
* Hence, check for these versions here - notice this is
|
||||
* restricted to Vangogh (Deck's APU).
|
||||
*/
|
||||
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
|
||||
const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
|
||||
|
||||
if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
|
||||
!strncmp("F7A0114", bios_ver, 7))) {
|
||||
adev->vcn.indirect_sram = false;
|
||||
dev_info(adev->dev,
|
||||
"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
|
||||
}
|
||||
}
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
|
||||
|
||||
|
@ -124,6 +124,8 @@ enum AMDGIM_FEATURE_FLAG {
|
||||
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
|
||||
/* Indirect Reg Access enabled */
|
||||
AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
|
||||
/* AV1 Support MODE*/
|
||||
AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
|
||||
};
|
||||
|
||||
enum AMDGIM_REG_ACCESS_FLAG {
|
||||
@ -322,6 +324,8 @@ static inline bool is_virtual_machine(void)
|
||||
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
|
||||
#define amdgpu_sriov_is_normal(adev) \
|
||||
((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
|
||||
#define amdgpu_sriov_is_av1_support(adev) \
|
||||
((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
|
||||
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
|
||||
|
@ -93,7 +93,8 @@ union amd_sriov_msg_feature_flags {
|
||||
uint32_t mm_bw_management : 1;
|
||||
uint32_t pp_one_vf_mode : 1;
|
||||
uint32_t reg_indirect_acc : 1;
|
||||
uint32_t reserved : 26;
|
||||
uint32_t av1_support : 1;
|
||||
uint32_t reserved : 25;
|
||||
} flags;
|
||||
uint32_t all;
|
||||
};
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user