mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-10 23:29:46 +00:00
Merge branch 'perf/urgent' into perf/core, before pulling new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
d3df65c198
@ -43,7 +43,7 @@ o udev 081 # udevd --version
|
||||
o grub 0.93 # grub --version || grub-install --version
|
||||
o mcelog 0.6 # mcelog --version
|
||||
o iptables 1.4.2 # iptables -V
|
||||
o openssl & libcrypto 1.0.1k # openssl version
|
||||
o openssl & libcrypto 1.0.0 # openssl version
|
||||
|
||||
|
||||
Kernel compilation
|
||||
|
@ -25,7 +25,7 @@ Example:
|
||||
/* Cypress Gen3 touchpad */
|
||||
touchpad@67 {
|
||||
compatible = "cypress,cyapa";
|
||||
reg = <0x24>;
|
||||
reg = <0x67>;
|
||||
interrupt-parent = <&gpio>;
|
||||
interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */
|
||||
wakeup-source;
|
||||
|
@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is
|
||||
ABS_MT_POSITION_X := T_X
|
||||
ABS_MT_POSITION_Y := T_Y
|
||||
ABS_MT_TOOL_X := C_X
|
||||
ABS_MT_TOOL_X := C_Y
|
||||
ABS_MT_TOOL_Y := C_Y
|
||||
|
||||
Unfortunately, there is not enough information to specify both the touching
|
||||
ellipse and the tool ellipse, so one has to resort to approximations. One
|
||||
|
@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned
|
||||
(alternatively, the runtime_suspend() callback will have to check if the
|
||||
device should really be suspended and return -EAGAIN if that is not the case).
|
||||
|
||||
The runtime PM of PCI devices is disabled by default. It is also blocked by
|
||||
pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI
|
||||
driver implements the runtime PM callbacks and intends to use the runtime PM
|
||||
framework provided by the PM core and the PCI subsystem, it should enable this
|
||||
feature by executing the pm_runtime_enable() helper function. However, the
|
||||
driver should not call the pm_runtime_allow() helper function unblocking
|
||||
the runtime PM of the device. Instead, it should allow user space or some
|
||||
platform-specific code to do that (user space can do it via sysfs), although
|
||||
once it has called pm_runtime_enable(), it must be prepared to handle the
|
||||
The runtime PM of PCI devices is enabled by default by the PCI core. PCI
|
||||
device drivers do not need to enable it and should not attempt to do so.
|
||||
However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid()
|
||||
helper function. In addition to that, the runtime PM usage counter of
|
||||
each PCI device is incremented by local_pci_probe() before executing the
|
||||
probe callback provided by the device's driver.
|
||||
|
||||
If a PCI driver implements the runtime PM callbacks and intends to use the
|
||||
runtime PM framework provided by the PM core and the PCI subsystem, it needs
|
||||
to decrement the device's runtime PM usage counter in its probe callback
|
||||
function. If it doesn't do that, the counter will always be different from
|
||||
zero for the device and it will never be runtime-suspended. The simplest
|
||||
way to do that is by calling pm_runtime_put_noidle(), but if the driver
|
||||
wants to schedule an autosuspend right away, for example, it may call
|
||||
pm_runtime_put_autosuspend() instead for this purpose. Generally, it
|
||||
just needs to call a function that decrements the devices usage counter
|
||||
from its probe routine to make runtime PM work for the device.
|
||||
|
||||
It is important to remember that the driver's runtime_suspend() callback
|
||||
may be executed right after the usage counter has been decremented, because
|
||||
user space may already have cuased the pm_runtime_allow() helper function
|
||||
unblocking the runtime PM of the device to run via sysfs, so the driver must
|
||||
be prepared to cope with that.
|
||||
|
||||
The driver itself should not call pm_runtime_allow(), though. Instead, it
|
||||
should let user space or some platform-specific code do that (user space can
|
||||
do it via sysfs as stated above), but it must be prepared to handle the
|
||||
runtime PM of the device correctly as soon as pm_runtime_allow() is called
|
||||
(which may happen at any time). [It also is possible that user space causes
|
||||
pm_runtime_allow() to be called via sysfs before the driver is loaded, so in
|
||||
fact the driver has to be prepared to handle the runtime PM of the device as
|
||||
soon as it calls pm_runtime_enable().]
|
||||
(which may happen at any time, even before the driver is loaded).
|
||||
|
||||
When the driver's remove callback runs, it has to balance the decrementation
|
||||
of the device's runtime PM usage counter at the probe time. For this reason,
|
||||
if it has decremented the counter in its probe callback, it must run
|
||||
pm_runtime_get_noresume() in its remove callback. [Since the core carries
|
||||
out a runtime resume of the device and bumps up the device's usage counter
|
||||
before running the driver's remove callback, the runtime PM of the device
|
||||
is effectively disabled for the duration of the remove execution and all
|
||||
runtime PM helper functions incrementing the device's usage counter are
|
||||
then effectively equivalent to pm_runtime_get_noresume().]
|
||||
|
||||
The runtime PM framework works by processing requests to suspend or resume
|
||||
devices, or to check if they are idle (in which cases it is reasonable to
|
||||
|
@ -18,6 +18,7 @@
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
#define _GNU_SOURCE
|
||||
#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <inttypes.h>
|
||||
|
@ -5957,7 +5957,7 @@ F: virt/kvm/
|
||||
KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
|
||||
M: Joerg Roedel <joro@8bytes.org>
|
||||
L: kvm@vger.kernel.org
|
||||
W: http://kvm.qumranet.com
|
||||
W: http://www.linux-kvm.org/
|
||||
S: Maintained
|
||||
F: arch/x86/include/asm/svm.h
|
||||
F: arch/x86/kvm/svm.c
|
||||
@ -5965,7 +5965,7 @@ F: arch/x86/kvm/svm.c
|
||||
KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
|
||||
M: Alexander Graf <agraf@suse.com>
|
||||
L: kvm-ppc@vger.kernel.org
|
||||
W: http://kvm.qumranet.com
|
||||
W: http://www.linux-kvm.org/
|
||||
T: git git://github.com/agraf/linux-2.6.git
|
||||
S: Supported
|
||||
F: arch/powerpc/include/asm/kvm*
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -48,4 +48,5 @@ generic-y += types.h
|
||||
generic-y += ucontext.h
|
||||
generic-y += user.h
|
||||
generic-y += vga.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
|
||||
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
|
||||
|
||||
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
|
||||
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||
#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
@ -496,7 +496,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
|
||||
PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
|
||||
PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
|
||||
/* preserve the hardware dirty information */
|
||||
if (pte_hw_dirty(pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
|
@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void)
|
||||
*/
|
||||
if (!is_normal_ram(md))
|
||||
prot = __pgprot(PROT_DEVICE_nGnRE);
|
||||
else if (md->type == EFI_RUNTIME_SERVICES_CODE)
|
||||
else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
|
||||
!PAGE_ALIGNED(md->phys_addr))
|
||||
prot = PAGE_KERNEL_EXEC;
|
||||
else
|
||||
prot = PAGE_KERNEL;
|
||||
|
@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
|
||||
ENDPROC(ftrace_stub)
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/* save return value regs*/
|
||||
.macro save_return_regs
|
||||
sub sp, sp, #64
|
||||
stp x0, x1, [sp]
|
||||
stp x2, x3, [sp, #16]
|
||||
stp x4, x5, [sp, #32]
|
||||
stp x6, x7, [sp, #48]
|
||||
.endm
|
||||
|
||||
/* restore return value regs*/
|
||||
.macro restore_return_regs
|
||||
ldp x0, x1, [sp]
|
||||
ldp x2, x3, [sp, #16]
|
||||
ldp x4, x5, [sp, #32]
|
||||
ldp x6, x7, [sp, #48]
|
||||
add sp, sp, #64
|
||||
.endm
|
||||
|
||||
/*
|
||||
* void ftrace_graph_caller(void)
|
||||
*
|
||||
@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
|
||||
* only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
|
||||
*/
|
||||
ENTRY(return_to_handler)
|
||||
str x0, [sp, #-16]!
|
||||
save_return_regs
|
||||
mov x0, x29 // parent's fp
|
||||
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
|
||||
mov x30, x0 // restore the original return address
|
||||
ldr x0, [sp], #16
|
||||
restore_return_regs
|
||||
ret
|
||||
END(return_to_handler)
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
@ -20,4 +20,5 @@ generic-y += sections.h
|
||||
generic-y += topology.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += vga.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -46,4 +46,5 @@ generic-y += types.h
|
||||
generic-y += ucontext.h
|
||||
generic-y += unaligned.h
|
||||
generic-y += user.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -59,4 +59,5 @@ generic-y += types.h
|
||||
generic-y += ucontext.h
|
||||
generic-y += user.h
|
||||
generic-y += vga.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -43,4 +43,5 @@ generic-y += topology.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += types.h
|
||||
generic-y += vga.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += preempt.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -58,4 +58,5 @@ generic-y += types.h
|
||||
generic-y += ucontext.h
|
||||
generic-y += unaligned.h
|
||||
generic-y += vga.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h
|
||||
generic-y += preempt.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += vtime.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -9,3 +9,4 @@ generic-y += module.h
|
||||
generic-y += preempt.h
|
||||
generic-y += sections.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SMSC is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -344,6 +349,7 @@ CONFIG_VETH=m
|
||||
# CONFIG_NET_VENDOR_SAMSUNG is not set
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -355,6 +360,7 @@ CONFIG_NE2000=y
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
CONFIG_SMC91X=y
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y
|
||||
# CONFIG_NET_VENDOR_SAMSUNG is not set
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -345,6 +350,7 @@ CONFIG_HPLANCE=y
|
||||
# CONFIG_NET_VENDOR_SAMSUNG is not set
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -364,6 +369,7 @@ CONFIG_MAC8390=y
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SMSC is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
CONFIG_SMC91X=y
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PLIP=m
|
||||
@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y
|
||||
# CONFIG_NET_VENDOR_SAMSUNG is not set
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y
|
||||
# CONFIG_NET_VENDOR_SAMSUNG is not set
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -354,6 +359,7 @@ CONFIG_NE2000=y
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_SMSC is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PLIP=m
|
||||
@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SUN is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
CONFIG_CRYPTO_MANAGER=y
|
||||
|
@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
|
||||
# CONFIG_PID_NS is not set
|
||||
# CONFIG_NET_NS is not set
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
|
||||
CONFIG_NET_IPGRE=m
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_NET_FOU_IP_TUNNELS=y
|
||||
CONFIG_GENEVE_CORE=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
CONFIG_INET_IPCOMP=m
|
||||
@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
|
||||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_INET_DIAG=m
|
||||
CONFIG_INET_UDP_DIAG=m
|
||||
CONFIG_IPV6=m
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
CONFIG_IPV6_ILA=m
|
||||
CONFIG_IPV6_VTI=m
|
||||
CONFIG_IPV6_GRE=m
|
||||
CONFIG_NETFILTER=y
|
||||
@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
|
||||
CONFIG_IP_SET_LIST_SET=m
|
||||
CONFIG_NF_CONNTRACK_IPV4=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
|
||||
CONFIG_NFT_DUP_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=m
|
||||
CONFIG_NF_LOG_ARP=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV4=m
|
||||
@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_CONNTRACK_IPV6=m
|
||||
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
|
||||
CONFIG_NFT_DUP_IPV6=m
|
||||
CONFIG_NFT_CHAIN_NAT_IPV6=m
|
||||
CONFIG_NFT_MASQ_IPV6=m
|
||||
CONFIG_NFT_REDIR_IPV6=m
|
||||
@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_MPLS=y
|
||||
CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
# CONFIG_WIRELESS is not set
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y
|
||||
# CONFIG_NET_VENDOR_SAMSUNG is not set
|
||||
# CONFIG_NET_VENDOR_SEEQ is not set
|
||||
# CONFIG_NET_VENDOR_STMICRO is not set
|
||||
# CONFIG_NET_VENDOR_SYNOPSYS is not set
|
||||
# CONFIG_NET_VENDOR_VIA is not set
|
||||
# CONFIG_NET_VENDOR_WIZNET is not set
|
||||
CONFIG_PPP=m
|
||||
@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_FIRMWARE=m
|
||||
CONFIG_TEST_UDELAY=m
|
||||
CONFIG_TEST_STATIC_KEYS=m
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_CRYPTO_RSA=m
|
||||
|
@ -4,4 +4,34 @@
|
||||
#define __ALIGN .align 4
|
||||
#define __ALIGN_STR ".align 4"
|
||||
|
||||
/*
|
||||
* Make sure the compiler doesn't do anything stupid with the
|
||||
* arguments on the stack - they are owned by the *caller*, not
|
||||
* the callee. This just fools gcc into not spilling into them,
|
||||
* and keeps it from doing tailcall recursion and/or using the
|
||||
* stack slots for temporaries, since they are live and "used"
|
||||
* all the way to the end of the function.
|
||||
*/
|
||||
#define asmlinkage_protect(n, ret, args...) \
|
||||
__asmlinkage_protect##n(ret, ##args)
|
||||
#define __asmlinkage_protect_n(ret, args...) \
|
||||
__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
|
||||
#define __asmlinkage_protect0(ret) \
|
||||
__asmlinkage_protect_n(ret)
|
||||
#define __asmlinkage_protect1(ret, arg1) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1))
|
||||
#define __asmlinkage_protect2(ret, arg1, arg2) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
|
||||
#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
|
||||
#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
||||
"m" (arg4))
|
||||
#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
||||
"m" (arg4), "m" (arg5))
|
||||
#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
||||
"m" (arg4), "m" (arg5), "m" (arg6))
|
||||
|
||||
#endif
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define NR_syscalls 356
|
||||
#define NR_syscalls 375
|
||||
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
#define __ARCH_WANT_OLD_STAT
|
||||
|
@ -361,5 +361,24 @@
|
||||
#define __NR_memfd_create 353
|
||||
#define __NR_bpf 354
|
||||
#define __NR_execveat 355
|
||||
#define __NR_socket 356
|
||||
#define __NR_socketpair 357
|
||||
#define __NR_bind 358
|
||||
#define __NR_connect 359
|
||||
#define __NR_listen 360
|
||||
#define __NR_accept4 361
|
||||
#define __NR_getsockopt 362
|
||||
#define __NR_setsockopt 363
|
||||
#define __NR_getsockname 364
|
||||
#define __NR_getpeername 365
|
||||
#define __NR_sendto 366
|
||||
#define __NR_sendmsg 367
|
||||
#define __NR_recvfrom 368
|
||||
#define __NR_recvmsg 369
|
||||
#define __NR_shutdown 370
|
||||
#define __NR_recvmmsg 371
|
||||
#define __NR_sendmmsg 372
|
||||
#define __NR_userfaultfd 373
|
||||
#define __NR_membarrier 374
|
||||
|
||||
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
|
||||
|
@ -376,4 +376,22 @@ ENTRY(sys_call_table)
|
||||
.long sys_memfd_create
|
||||
.long sys_bpf
|
||||
.long sys_execveat /* 355 */
|
||||
|
||||
.long sys_socket
|
||||
.long sys_socketpair
|
||||
.long sys_bind
|
||||
.long sys_connect
|
||||
.long sys_listen /* 360 */
|
||||
.long sys_accept4
|
||||
.long sys_getsockopt
|
||||
.long sys_setsockopt
|
||||
.long sys_getsockname
|
||||
.long sys_getpeername /* 365 */
|
||||
.long sys_sendto
|
||||
.long sys_sendmsg
|
||||
.long sys_recvfrom
|
||||
.long sys_recvmsg
|
||||
.long sys_shutdown /* 370 */
|
||||
.long sys_recvmmsg
|
||||
.long sys_sendmmsg
|
||||
.long sys_userfaultfd
|
||||
.long sys_membarrier
|
||||
|
@ -54,4 +54,5 @@ generic-y += ucontext.h
|
||||
generic-y += unaligned.h
|
||||
generic-y += user.h
|
||||
generic-y += vga.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h
|
||||
generic-y += preempt.h
|
||||
generic-y += syscalls.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -933,7 +933,7 @@ void __init plat_mem_setup(void)
|
||||
while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
|
||||
&& (total < MAX_MEMORY)) {
|
||||
memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
|
||||
__pa_symbol(&__init_end), -1,
|
||||
__pa_symbol(&_end), -1,
|
||||
0x100000,
|
||||
CVMX_BOOTMEM_FLAG_NO_LOCKING);
|
||||
if (memory >= 0) {
|
||||
|
@ -17,4 +17,5 @@ generic-y += segment.h
|
||||
generic-y += serial.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += user.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -377,16 +377,18 @@
|
||||
#define __NR_memfd_create (__NR_Linux + 354)
|
||||
#define __NR_bpf (__NR_Linux + 355)
|
||||
#define __NR_execveat (__NR_Linux + 356)
|
||||
#define __NR_userfaultfd (__NR_Linux + 357)
|
||||
#define __NR_membarrier (__NR_Linux + 358)
|
||||
|
||||
/*
|
||||
* Offset of the last Linux o32 flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 356
|
||||
#define __NR_Linux_syscalls 358
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
|
||||
|
||||
#define __NR_O32_Linux 4000
|
||||
#define __NR_O32_Linux_syscalls 356
|
||||
#define __NR_O32_Linux_syscalls 358
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI64
|
||||
|
||||
@ -711,16 +713,18 @@
|
||||
#define __NR_memfd_create (__NR_Linux + 314)
|
||||
#define __NR_bpf (__NR_Linux + 315)
|
||||
#define __NR_execveat (__NR_Linux + 316)
|
||||
#define __NR_userfaultfd (__NR_Linux + 317)
|
||||
#define __NR_membarrier (__NR_Linux + 318)
|
||||
|
||||
/*
|
||||
* Offset of the last Linux 64-bit flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 316
|
||||
#define __NR_Linux_syscalls 318
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
|
||||
|
||||
#define __NR_64_Linux 5000
|
||||
#define __NR_64_Linux_syscalls 316
|
||||
#define __NR_64_Linux_syscalls 318
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_NABI32
|
||||
|
||||
@ -1049,15 +1053,17 @@
|
||||
#define __NR_memfd_create (__NR_Linux + 318)
|
||||
#define __NR_bpf (__NR_Linux + 319)
|
||||
#define __NR_execveat (__NR_Linux + 320)
|
||||
#define __NR_userfaultfd (__NR_Linux + 321)
|
||||
#define __NR_membarrier (__NR_Linux + 322)
|
||||
|
||||
/*
|
||||
* Offset of the last N32 flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 320
|
||||
#define __NR_Linux_syscalls 322
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
|
||||
|
||||
#define __NR_N32_Linux 6000
|
||||
#define __NR_N32_Linux_syscalls 320
|
||||
#define __NR_N32_Linux_syscalls 322
|
||||
|
||||
#endif /* _UAPI_ASM_UNISTD_H */
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/power/jz4740-battery.h>
|
||||
#include <linux/power/gpio-charger.h>
|
||||
|
||||
#include <asm/mach-jz4740/gpio.h>
|
||||
#include <asm/mach-jz4740/jz4740_fb.h>
|
||||
#include <asm/mach-jz4740/jz4740_mmc.h>
|
||||
#include <asm/mach-jz4740/jz4740_nand.h>
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/mach-jz4740/base.h>
|
||||
#include <asm/mach-jz4740/gpio.h>
|
||||
|
||||
#define JZ4740_GPIO_BASE_A (32*0)
|
||||
#define JZ4740_GPIO_BASE_B (32*1)
|
||||
|
@ -39,6 +39,7 @@
|
||||
mfc0 \dest, CP0_CONFIG, 3
|
||||
andi \dest, \dest, MIPS_CONF3_MT
|
||||
beqz \dest, \nomt
|
||||
nop
|
||||
.endm
|
||||
|
||||
.section .text.cps-vec
|
||||
@ -223,10 +224,9 @@ LEAF(excep_ejtag)
|
||||
END(excep_ejtag)
|
||||
|
||||
LEAF(mips_cps_core_init)
|
||||
#ifdef CONFIG_MIPS_MT
|
||||
#ifdef CONFIG_MIPS_MT_SMP
|
||||
/* Check that the core implements the MT ASE */
|
||||
has_mt t0, 3f
|
||||
nop
|
||||
|
||||
.set push
|
||||
.set mips64r2
|
||||
@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes)
|
||||
PTR_ADDU t0, t0, t1
|
||||
|
||||
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
|
||||
li t9, 0
|
||||
#ifdef CONFIG_MIPS_MT_SMP
|
||||
has_mt ta2, 1f
|
||||
li t9, 0
|
||||
|
||||
/* Find the number of VPEs present in the core */
|
||||
mfc0 t1, CP0_MVPCONF0
|
||||
@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes)
|
||||
/* Retrieve the VPE ID from EBase.CPUNum */
|
||||
mfc0 t9, $15, 1
|
||||
and t9, t9, t1
|
||||
#endif
|
||||
|
||||
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
|
||||
li t1, VPEBOOTCFG_SIZE
|
||||
@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes)
|
||||
PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
|
||||
PTR_ADDU v0, v0, ta3
|
||||
|
||||
#ifdef CONFIG_MIPS_MT
|
||||
#ifdef CONFIG_MIPS_MT_SMP
|
||||
|
||||
/* If the core doesn't support MT then return */
|
||||
bnez ta2, 1f
|
||||
@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes)
|
||||
|
||||
2: .set pop
|
||||
|
||||
#endif /* CONFIG_MIPS_MT */
|
||||
#endif /* CONFIG_MIPS_MT_SMP */
|
||||
|
||||
/* Return */
|
||||
jr ra
|
||||
|
@ -18,7 +18,7 @@
|
||||
.set pop
|
||||
/*
|
||||
* task_struct *resume(task_struct *prev, task_struct *next,
|
||||
* struct thread_info *next_ti, int usedfpu)
|
||||
* struct thread_info *next_ti)
|
||||
*/
|
||||
.align 7
|
||||
LEAF(resume)
|
||||
@ -28,30 +28,6 @@
|
||||
cpu_save_nonscratch a0
|
||||
LONG_S ra, THREAD_REG31(a0)
|
||||
|
||||
/*
|
||||
* check if we need to save FPU registers
|
||||
*/
|
||||
.set push
|
||||
.set noreorder
|
||||
beqz a3, 1f
|
||||
PTR_L t3, TASK_THREAD_INFO(a0)
|
||||
.set pop
|
||||
|
||||
/*
|
||||
* clear saved user stack CU1 bit
|
||||
*/
|
||||
LONG_L t0, ST_OFF(t3)
|
||||
li t1, ~ST0_CU1
|
||||
and t0, t0, t1
|
||||
LONG_S t0, ST_OFF(t3)
|
||||
|
||||
.set push
|
||||
.set arch=mips64r2
|
||||
fpu_save_double a0 t0 t1 # c0_status passed in t0
|
||||
# clobbers t1
|
||||
.set pop
|
||||
1:
|
||||
|
||||
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
|
||||
/* Check if we need to store CVMSEG state */
|
||||
dmfc0 t0, $11,7 /* CvmMemCtl */
|
||||
|
@ -30,19 +30,9 @@
|
||||
*/
|
||||
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
|
||||
|
||||
/*
|
||||
* FPU context is saved iff the process has used it's FPU in the current
|
||||
* time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
|
||||
* space STATUS register should be 0, so that a process *always* starts its
|
||||
* userland with FPU disabled after each context switch.
|
||||
*
|
||||
* FPU will be enabled as soon as the process accesses FPU again, through
|
||||
* do_cpu() trap.
|
||||
*/
|
||||
|
||||
/*
|
||||
* task_struct *resume(task_struct *prev, task_struct *next,
|
||||
* struct thread_info *next_ti, int usedfpu)
|
||||
* struct thread_info *next_ti)
|
||||
*/
|
||||
LEAF(resume)
|
||||
mfc0 t1, CP0_STATUS
|
||||
@ -50,22 +40,6 @@ LEAF(resume)
|
||||
cpu_save_nonscratch a0
|
||||
sw ra, THREAD_REG31(a0)
|
||||
|
||||
beqz a3, 1f
|
||||
|
||||
PTR_L t3, TASK_THREAD_INFO(a0)
|
||||
|
||||
/*
|
||||
* clear saved user stack CU1 bit
|
||||
*/
|
||||
lw t0, ST_OFF(t3)
|
||||
li t1, ~ST0_CU1
|
||||
and t0, t0, t1
|
||||
sw t0, ST_OFF(t3)
|
||||
|
||||
fpu_save_single a0, t0 # clobbers t0
|
||||
|
||||
1:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_LA t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
|
@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp)
|
||||
lw t1, PT_EPC(sp) # skip syscall on return
|
||||
|
||||
subu v0, v0, __NR_O32_Linux # check syscall number
|
||||
sltiu t0, v0, __NR_O32_Linux_syscalls + 1
|
||||
addiu t1, 4 # skip to next instruction
|
||||
sw t1, PT_EPC(sp)
|
||||
beqz t0, illegal_syscall
|
||||
|
||||
sll t0, v0, 2
|
||||
la t1, sys_call_table
|
||||
addu t1, t0
|
||||
lw t2, (t1) # syscall routine
|
||||
beqz t2, illegal_syscall
|
||||
|
||||
sw a3, PT_R26(sp) # save a3 for syscall restarting
|
||||
|
||||
@ -96,6 +88,16 @@ loads_done:
|
||||
li t1, _TIF_WORK_SYSCALL_ENTRY
|
||||
and t0, t1
|
||||
bnez t0, syscall_trace_entry # -> yes
|
||||
syscall_common:
|
||||
sltiu t0, v0, __NR_O32_Linux_syscalls + 1
|
||||
beqz t0, illegal_syscall
|
||||
|
||||
sll t0, v0, 2
|
||||
la t1, sys_call_table
|
||||
addu t1, t0
|
||||
lw t2, (t1) # syscall routine
|
||||
|
||||
beqz t2, illegal_syscall
|
||||
|
||||
jalr t2 # Do The Real Thing (TM)
|
||||
|
||||
@ -116,7 +118,7 @@ o32_syscall_exit:
|
||||
|
||||
syscall_trace_entry:
|
||||
SAVE_STATIC
|
||||
move s0, t2
|
||||
move s0, v0
|
||||
move a0, sp
|
||||
|
||||
/*
|
||||
@ -129,27 +131,18 @@ syscall_trace_entry:
|
||||
|
||||
1: jal syscall_trace_enter
|
||||
|
||||
bltz v0, 2f # seccomp failed? Skip syscall
|
||||
bltz v0, 1f # seccomp failed? Skip syscall
|
||||
|
||||
move v0, s0 # restore syscall
|
||||
|
||||
move t0, s0
|
||||
RESTORE_STATIC
|
||||
lw a0, PT_R4(sp) # Restore argument registers
|
||||
lw a1, PT_R5(sp)
|
||||
lw a2, PT_R6(sp)
|
||||
lw a3, PT_R7(sp)
|
||||
jalr t0
|
||||
j syscall_common
|
||||
|
||||
li t0, -EMAXERRNO - 1 # error?
|
||||
sltu t0, t0, v0
|
||||
sw t0, PT_R7(sp) # set error flag
|
||||
beqz t0, 1f
|
||||
|
||||
lw t1, PT_R2(sp) # syscall number
|
||||
negu v0 # error
|
||||
sw t1, PT_R0(sp) # save it for syscall restarting
|
||||
1: sw v0, PT_R2(sp) # result
|
||||
|
||||
2: j syscall_exit
|
||||
1: j syscall_exit
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
@ -599,3 +592,5 @@ EXPORT(sys_call_table)
|
||||
PTR sys_memfd_create
|
||||
PTR sys_bpf /* 4355 */
|
||||
PTR sys_execveat
|
||||
PTR sys_userfaultfd
|
||||
PTR sys_membarrier
|
||||
|
@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp)
|
||||
.set at
|
||||
#endif
|
||||
|
||||
dsubu t0, v0, __NR_64_Linux # check syscall number
|
||||
sltiu t0, t0, __NR_64_Linux_syscalls + 1
|
||||
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
|
||||
ld t1, PT_EPC(sp) # skip syscall on return
|
||||
daddiu t1, 4 # skip to next instruction
|
||||
sd t1, PT_EPC(sp)
|
||||
#endif
|
||||
beqz t0, illegal_syscall
|
||||
|
||||
dsll t0, v0, 3 # offset into table
|
||||
ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
|
||||
# syscall routine
|
||||
|
||||
sd a3, PT_R26(sp) # save a3 for syscall restarting
|
||||
|
||||
@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp)
|
||||
and t0, t1, t0
|
||||
bnez t0, syscall_trace_entry
|
||||
|
||||
syscall_common:
|
||||
dsubu t2, v0, __NR_64_Linux
|
||||
sltiu t0, t2, __NR_64_Linux_syscalls + 1
|
||||
beqz t0, illegal_syscall
|
||||
|
||||
dsll t0, t2, 3 # offset into table
|
||||
dla t2, sys_call_table
|
||||
daddu t0, t2, t0
|
||||
ld t2, (t0) # syscall routine
|
||||
beqz t2, illegal_syscall
|
||||
|
||||
jalr t2 # Do The Real Thing (TM)
|
||||
|
||||
li t0, -EMAXERRNO - 1 # error?
|
||||
@ -78,14 +82,14 @@ n64_syscall_exit:
|
||||
|
||||
syscall_trace_entry:
|
||||
SAVE_STATIC
|
||||
move s0, t2
|
||||
move s0, v0
|
||||
move a0, sp
|
||||
move a1, v0
|
||||
jal syscall_trace_enter
|
||||
|
||||
bltz v0, 2f # seccomp failed? Skip syscall
|
||||
bltz v0, 1f # seccomp failed? Skip syscall
|
||||
|
||||
move t0, s0
|
||||
move v0, s0
|
||||
RESTORE_STATIC
|
||||
ld a0, PT_R4(sp) # Restore argument registers
|
||||
ld a1, PT_R5(sp)
|
||||
@ -93,19 +97,9 @@ syscall_trace_entry:
|
||||
ld a3, PT_R7(sp)
|
||||
ld a4, PT_R8(sp)
|
||||
ld a5, PT_R9(sp)
|
||||
jalr t0
|
||||
j syscall_common
|
||||
|
||||
li t0, -EMAXERRNO - 1 # error?
|
||||
sltu t0, t0, v0
|
||||
sd t0, PT_R7(sp) # set error flag
|
||||
beqz t0, 1f
|
||||
|
||||
ld t1, PT_R2(sp) # syscall number
|
||||
dnegu v0 # error
|
||||
sd t1, PT_R0(sp) # save it for syscall restarting
|
||||
1: sd v0, PT_R2(sp) # result
|
||||
|
||||
2: j syscall_exit
|
||||
1: j syscall_exit
|
||||
|
||||
illegal_syscall:
|
||||
/* This also isn't a 64-bit syscall, throw an error. */
|
||||
@ -436,4 +430,6 @@ EXPORT(sys_call_table)
|
||||
PTR sys_memfd_create
|
||||
PTR sys_bpf /* 5315 */
|
||||
PTR sys_execveat
|
||||
PTR sys_userfaultfd
|
||||
PTR sys_membarrier
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
|
||||
and t0, t1, t0
|
||||
bnez t0, n32_syscall_trace_entry
|
||||
|
||||
syscall_common:
|
||||
jalr t2 # Do The Real Thing (TM)
|
||||
|
||||
li t0, -EMAXERRNO - 1 # error?
|
||||
@ -75,9 +76,9 @@ n32_syscall_trace_entry:
|
||||
move a1, v0
|
||||
jal syscall_trace_enter
|
||||
|
||||
bltz v0, 2f # seccomp failed? Skip syscall
|
||||
bltz v0, 1f # seccomp failed? Skip syscall
|
||||
|
||||
move t0, s0
|
||||
move t2, s0
|
||||
RESTORE_STATIC
|
||||
ld a0, PT_R4(sp) # Restore argument registers
|
||||
ld a1, PT_R5(sp)
|
||||
@ -85,19 +86,9 @@ n32_syscall_trace_entry:
|
||||
ld a3, PT_R7(sp)
|
||||
ld a4, PT_R8(sp)
|
||||
ld a5, PT_R9(sp)
|
||||
jalr t0
|
||||
j syscall_common
|
||||
|
||||
li t0, -EMAXERRNO - 1 # error?
|
||||
sltu t0, t0, v0
|
||||
sd t0, PT_R7(sp) # set error flag
|
||||
beqz t0, 1f
|
||||
|
||||
ld t1, PT_R2(sp) # syscall number
|
||||
dnegu v0 # error
|
||||
sd t1, PT_R0(sp) # save it for syscall restarting
|
||||
1: sd v0, PT_R2(sp) # result
|
||||
|
||||
2: j syscall_exit
|
||||
1: j syscall_exit
|
||||
|
||||
not_n32_scall:
|
||||
/* This is not an n32 compatibility syscall, pass it on to
|
||||
@ -429,4 +420,6 @@ EXPORT(sysn32_call_table)
|
||||
PTR sys_memfd_create
|
||||
PTR sys_bpf
|
||||
PTR compat_sys_execveat /* 6320 */
|
||||
PTR sys_userfaultfd
|
||||
PTR sys_membarrier
|
||||
.size sysn32_call_table,.-sysn32_call_table
|
||||
|
@ -87,6 +87,7 @@ loads_done:
|
||||
and t0, t1, t0
|
||||
bnez t0, trace_a_syscall
|
||||
|
||||
syscall_common:
|
||||
jalr t2 # Do The Real Thing (TM)
|
||||
|
||||
li t0, -EMAXERRNO - 1 # error?
|
||||
@ -130,9 +131,9 @@ trace_a_syscall:
|
||||
|
||||
1: jal syscall_trace_enter
|
||||
|
||||
bltz v0, 2f # seccomp failed? Skip syscall
|
||||
bltz v0, 1f # seccomp failed? Skip syscall
|
||||
|
||||
move t0, s0
|
||||
move t2, s0
|
||||
RESTORE_STATIC
|
||||
ld a0, PT_R4(sp) # Restore argument registers
|
||||
ld a1, PT_R5(sp)
|
||||
@ -142,19 +143,9 @@ trace_a_syscall:
|
||||
ld a5, PT_R9(sp)
|
||||
ld a6, PT_R10(sp)
|
||||
ld a7, PT_R11(sp) # For indirect syscalls
|
||||
jalr t0
|
||||
j syscall_common
|
||||
|
||||
li t0, -EMAXERRNO - 1 # error?
|
||||
sltu t0, t0, v0
|
||||
sd t0, PT_R7(sp) # set error flag
|
||||
beqz t0, 1f
|
||||
|
||||
ld t1, PT_R2(sp) # syscall number
|
||||
dnegu v0 # error
|
||||
sd t1, PT_R0(sp) # save it for syscall restarting
|
||||
1: sd v0, PT_R2(sp) # result
|
||||
|
||||
2: j syscall_exit
|
||||
1: j syscall_exit
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
@ -584,4 +575,6 @@ EXPORT(sys32_call_table)
|
||||
PTR sys_memfd_create
|
||||
PTR sys_bpf /* 4355 */
|
||||
PTR compat_sys_execveat
|
||||
PTR sys_userfaultfd
|
||||
PTR sys_membarrier
|
||||
.size sys32_call_table,.-sys32_call_table
|
||||
|
@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
|
||||
else
|
||||
#endif
|
||||
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
|
||||
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
|
||||
if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
|
||||
dma_flag = __GFP_DMA;
|
||||
else
|
||||
#endif
|
||||
|
@ -57,12 +57,13 @@
|
||||
|
||||
LEAF(sk_load_word)
|
||||
is_offset_negative(word)
|
||||
.globl sk_load_word_positive
|
||||
sk_load_word_positive:
|
||||
FEXPORT(sk_load_word_positive)
|
||||
is_offset_in_header(4, word)
|
||||
/* Offset within header boundaries */
|
||||
PTR_ADDU t1, $r_skb_data, offset
|
||||
.set reorder
|
||||
lw $r_A, 0(t1)
|
||||
.set noreorder
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
|
||||
wsbh t0, $r_A
|
||||
@ -85,12 +86,13 @@ sk_load_word_positive:
|
||||
|
||||
LEAF(sk_load_half)
|
||||
is_offset_negative(half)
|
||||
.globl sk_load_half_positive
|
||||
sk_load_half_positive:
|
||||
FEXPORT(sk_load_half_positive)
|
||||
is_offset_in_header(2, half)
|
||||
/* Offset within header boundaries */
|
||||
PTR_ADDU t1, $r_skb_data, offset
|
||||
.set reorder
|
||||
lh $r_A, 0(t1)
|
||||
.set noreorder
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
|
||||
wsbh t0, $r_A
|
||||
@ -109,8 +111,7 @@ sk_load_half_positive:
|
||||
|
||||
LEAF(sk_load_byte)
|
||||
is_offset_negative(byte)
|
||||
.globl sk_load_byte_positive
|
||||
sk_load_byte_positive:
|
||||
FEXPORT(sk_load_byte_positive)
|
||||
is_offset_in_header(1, byte)
|
||||
/* Offset within header boundaries */
|
||||
PTR_ADDU t1, $r_skb_data, offset
|
||||
|
@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h
|
||||
generic-y += preempt.h
|
||||
generic-y += sections.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -61,4 +61,5 @@ generic-y += types.h
|
||||
generic-y += unaligned.h
|
||||
generic-y += user.h
|
||||
generic-y += vga.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
|
||||
generic-y += preempt.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += vtime.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
|
||||
|
||||
KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
|
||||
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
|
||||
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
|
||||
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
|
||||
|
||||
|
@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m
|
||||
CONFIG_SCSI_DEBUG=m
|
||||
CONFIG_ZFCP=y
|
||||
CONFIG_SCSI_VIRTIO=m
|
||||
CONFIG_SCSI_DH=m
|
||||
CONFIG_SCSI_DH=y
|
||||
CONFIG_SCSI_DH_RDAC=m
|
||||
CONFIG_SCSI_DH_HP_SW=m
|
||||
CONFIG_SCSI_DH_EMC=m
|
||||
|
@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
|
||||
CONFIG_SCSI_DEBUG=m
|
||||
CONFIG_ZFCP=y
|
||||
CONFIG_SCSI_VIRTIO=m
|
||||
CONFIG_SCSI_DH=m
|
||||
CONFIG_SCSI_DH=y
|
||||
CONFIG_SCSI_DH_RDAC=m
|
||||
CONFIG_SCSI_DH_HP_SW=m
|
||||
CONFIG_SCSI_DH_EMC=m
|
||||
|
@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
|
||||
CONFIG_SCSI_DEBUG=m
|
||||
CONFIG_ZFCP=y
|
||||
CONFIG_SCSI_VIRTIO=m
|
||||
CONFIG_SCSI_DH=m
|
||||
CONFIG_SCSI_DH=y
|
||||
CONFIG_SCSI_DH_RDAC=m
|
||||
CONFIG_SCSI_DH_HP_SW=m
|
||||
CONFIG_SCSI_DH_EMC=m
|
||||
|
@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += preempt.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
|
||||
int __node_distance(int a, int b);
|
||||
void numa_update_cpu_topology(void);
|
||||
|
||||
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
|
||||
extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
|
||||
extern int numa_debug_enabled;
|
||||
|
||||
#else
|
||||
|
@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
|
||||
#define cpumask_of_node cpumask_of_node
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return node_to_cpumask_map[node];
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -176,6 +176,7 @@ int main(void)
|
||||
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
|
||||
DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
|
||||
DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
|
||||
DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
|
||||
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
|
||||
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
|
||||
DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
|
||||
|
@ -733,6 +733,14 @@ ENTRY(psw_idle)
|
||||
stg %r3,__SF_EMPTY(%r15)
|
||||
larl %r1,.Lpsw_idle_lpsw+4
|
||||
stg %r1,__SF_EMPTY+8(%r15)
|
||||
#ifdef CONFIG_SMP
|
||||
larl %r1,smp_cpu_mtid
|
||||
llgf %r1,0(%r1)
|
||||
ltgr %r1,%r1
|
||||
jz .Lpsw_idle_stcctm
|
||||
.insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
|
||||
.Lpsw_idle_stcctm:
|
||||
#endif
|
||||
STCK __CLOCK_IDLE_ENTER(%r2)
|
||||
stpt __TIMER_IDLE_ENTER(%r2)
|
||||
.Lpsw_idle_lpsw:
|
||||
@ -1159,7 +1167,27 @@ cleanup_critical:
|
||||
jhe 1f
|
||||
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
|
||||
mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
|
||||
1: # account system time going idle
|
||||
1: # calculate idle cycles
|
||||
#ifdef CONFIG_SMP
|
||||
clg %r9,BASED(.Lcleanup_idle_insn)
|
||||
jl 3f
|
||||
larl %r1,smp_cpu_mtid
|
||||
llgf %r1,0(%r1)
|
||||
ltgr %r1,%r1
|
||||
jz 3f
|
||||
.insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
|
||||
larl %r3,mt_cycles
|
||||
ag %r3,__LC_PERCPU_OFFSET
|
||||
la %r4,__SF_EMPTY+16(%r15)
|
||||
2: lg %r0,0(%r3)
|
||||
slg %r0,0(%r4)
|
||||
alg %r0,64(%r4)
|
||||
stg %r0,0(%r3)
|
||||
la %r3,8(%r3)
|
||||
la %r4,8(%r4)
|
||||
brct %r1,2b
|
||||
#endif
|
||||
3: # account system time going idle
|
||||
lg %r9,__LC_STEAL_TIMER
|
||||
alg %r9,__CLOCK_IDLE_ENTER(%r2)
|
||||
slg %r9,__LC_LAST_UPDATE_CLOCK
|
||||
|
@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock);
|
||||
static atomic64_t virt_timer_current;
|
||||
static atomic64_t virt_timer_elapsed;
|
||||
|
||||
static DEFINE_PER_CPU(u64, mt_cycles[32]);
|
||||
DEFINE_PER_CPU(u64, mt_cycles[8]);
|
||||
static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
|
||||
static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
|
||||
static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
|
||||
@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed)
|
||||
return elapsed >= atomic64_read(&virt_timer_current);
|
||||
}
|
||||
|
||||
static void update_mt_scaling(void)
|
||||
{
|
||||
u64 cycles_new[8], *cycles_old;
|
||||
u64 delta, fac, mult, div;
|
||||
int i;
|
||||
|
||||
stcctm5(smp_cpu_mtid + 1, cycles_new);
|
||||
cycles_old = this_cpu_ptr(mt_cycles);
|
||||
fac = 1;
|
||||
mult = div = 0;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
delta = cycles_new[i] - cycles_old[i];
|
||||
div += delta;
|
||||
mult *= i + 1;
|
||||
mult += delta * fac;
|
||||
fac *= i + 1;
|
||||
}
|
||||
div *= fac;
|
||||
if (div > 0) {
|
||||
/* Update scaling factor */
|
||||
__this_cpu_write(mt_scaling_mult, mult);
|
||||
__this_cpu_write(mt_scaling_div, div);
|
||||
memcpy(cycles_old, cycles_new,
|
||||
sizeof(u64) * (smp_cpu_mtid + 1));
|
||||
}
|
||||
__this_cpu_write(mt_scaling_jiffies, jiffies_64);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update process times based on virtual cpu times stored by entry.S
|
||||
* to the lowcore fields user_timer, system_timer & steal_clock.
|
||||
@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, clock, user, system, steal;
|
||||
u64 user_scaled, system_scaled;
|
||||
int i;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
clock = S390_lowcore.last_update_clock;
|
||||
@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
||||
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
|
||||
|
||||
/* Do MT utilization calculation */
|
||||
/* Update MT utilization calculation */
|
||||
if (smp_cpu_mtid &&
|
||||
time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
|
||||
u64 cycles_new[32], *cycles_old;
|
||||
u64 delta, fac, mult, div;
|
||||
|
||||
cycles_old = this_cpu_ptr(mt_cycles);
|
||||
if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
|
||||
fac = 1;
|
||||
mult = div = 0;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
delta = cycles_new[i] - cycles_old[i];
|
||||
div += delta;
|
||||
mult *= i + 1;
|
||||
mult += delta * fac;
|
||||
fac *= i + 1;
|
||||
}
|
||||
div *= fac;
|
||||
if (div > 0) {
|
||||
/* Update scaling factor */
|
||||
__this_cpu_write(mt_scaling_mult, mult);
|
||||
__this_cpu_write(mt_scaling_div, div);
|
||||
memcpy(cycles_old, cycles_new,
|
||||
sizeof(u64) * (smp_cpu_mtid + 1));
|
||||
}
|
||||
}
|
||||
__this_cpu_write(mt_scaling_jiffies, jiffies_64);
|
||||
}
|
||||
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
|
||||
update_mt_scaling();
|
||||
|
||||
user = S390_lowcore.user_timer - ti->user_timer;
|
||||
S390_lowcore.steal_timer -= user;
|
||||
@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
S390_lowcore.last_update_timer = get_vtimer();
|
||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
||||
|
||||
/* Update MT utilization calculation */
|
||||
if (smp_cpu_mtid &&
|
||||
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
|
||||
update_mt_scaling();
|
||||
|
||||
system = S390_lowcore.system_timer - ti->system_timer;
|
||||
S390_lowcore.steal_timer -= system;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
|
@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
|
||||
cpumask_copy(&top->thread_mask, &core->mask);
|
||||
cpumask_copy(&top->core_mask, &core_mc(core)->mask);
|
||||
cpumask_copy(&top->book_mask, &core_book(core)->mask);
|
||||
cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
|
||||
cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
|
||||
top->node_id = core_node(core)->id;
|
||||
}
|
||||
}
|
||||
@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
|
||||
|
||||
/* Clear all node masks */
|
||||
for (i = 0; i < MAX_NUMNODES; i++)
|
||||
cpumask_clear(node_to_cpumask_map[i]);
|
||||
cpumask_clear(&node_to_cpumask_map[i]);
|
||||
|
||||
/* Rebuild all masks */
|
||||
toptree_for_each(core, numa, CORE)
|
||||
|
@ -23,7 +23,7 @@
|
||||
pg_data_t *node_data[MAX_NUMNODES];
|
||||
EXPORT_SYMBOL(node_data);
|
||||
|
||||
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
|
||||
cpumask_t node_to_cpumask_map[MAX_NUMNODES];
|
||||
EXPORT_SYMBOL(node_to_cpumask_map);
|
||||
|
||||
const struct numa_mode numa_mode_plain = {
|
||||
@ -144,7 +144,7 @@ void __init numa_setup(void)
|
||||
static int __init numa_init_early(void)
|
||||
{
|
||||
/* Attach all possible CPUs to node 0 for now. */
|
||||
cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
|
||||
cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(numa_init_early);
|
||||
|
@ -13,3 +13,4 @@ generic-y += sections.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += xor.h
|
||||
generic-y += serial.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <gxio/iorpc_globals.h>
|
||||
#include <gxio/iorpc_mpipe.h>
|
||||
@ -29,32 +30,6 @@
|
||||
/* HACK: Avoid pointless "shadow" warnings. */
|
||||
#define link link_shadow
|
||||
|
||||
/**
|
||||
* strscpy - Copy a C-string into a sized buffer, but only if it fits
|
||||
* @dest: Where to copy the string to
|
||||
* @src: Where to copy the string from
|
||||
* @size: size of destination buffer
|
||||
*
|
||||
* Use this routine to avoid copying too-long strings.
|
||||
* The routine returns the total number of bytes copied
|
||||
* (including the trailing NUL) or zero if the buffer wasn't
|
||||
* big enough. To ensure that programmers pay attention
|
||||
* to the return code, the destination has a single NUL
|
||||
* written at the front (if size is non-zero) when the
|
||||
* buffer is not big enough.
|
||||
*/
|
||||
static size_t strscpy(char *dest, const char *src, size_t size)
|
||||
{
|
||||
size_t len = strnlen(src, size) + 1;
|
||||
if (len > size) {
|
||||
if (size)
|
||||
dest[0] = '\0';
|
||||
return 0;
|
||||
}
|
||||
memcpy(dest, src, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
|
||||
{
|
||||
char file[32];
|
||||
@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name)
|
||||
if (!context)
|
||||
return GXIO_ERR_NO_DEVICE;
|
||||
|
||||
if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
|
||||
if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
|
||||
return GXIO_ERR_NO_DEVICE;
|
||||
|
||||
return gxio_mpipe_info_instance_aux(context, name);
|
||||
@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
|
||||
|
||||
rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
|
||||
if (rv >= 0) {
|
||||
if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
|
||||
if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
|
||||
return GXIO_ERR_INVAL_MEMORY_SIZE;
|
||||
memcpy(link_mac, mac.mac, sizeof(mac.mac));
|
||||
}
|
||||
@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
|
||||
_gxio_mpipe_link_name_t name;
|
||||
int rv;
|
||||
|
||||
if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
|
||||
if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
|
||||
return GXIO_ERR_NO_DEVICE;
|
||||
|
||||
rv = gxio_mpipe_link_open_aux(context, name, flags);
|
||||
|
@ -40,4 +40,5 @@ generic-y += termbits.h
|
||||
generic-y += termios.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += types.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/usb/tilegx.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
static u64 ehci_dmamask = DMA_BIT_MASK(32);
|
||||
|
@ -25,4 +25,5 @@ generic-y += preempt.h
|
||||
generic-y += switch_to.h
|
||||
generic-y += topology.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -62,4 +62,5 @@ generic-y += ucontext.h
|
||||
generic-y += unaligned.h
|
||||
generic-y += user.h
|
||||
generic-y += vga.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -193,7 +193,7 @@
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
|
||||
#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
|
||||
#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
|
||||
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
|
||||
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
|
||||
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
|
||||
|
@ -86,6 +86,7 @@ extern u64 asmlinkage efi_call(void *fp, ...);
|
||||
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
|
||||
u32 type, u64 attribute);
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
/*
|
||||
* CONFIG_KASAN may redefine memset to __memset. __memset function is present
|
||||
* only in kernel binary. Since the EFI stub linked into a separate binary it
|
||||
@ -95,6 +96,7 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
|
||||
#undef memcpy
|
||||
#undef memset
|
||||
#undef memmove
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
|
@ -41,6 +41,7 @@ struct pvclock_wall_clock {
|
||||
|
||||
#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
|
||||
#define PVCLOCK_GUEST_STOPPED (1 << 1)
|
||||
/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
|
||||
#define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_PVCLOCK_ABI_H */
|
||||
|
@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc)
|
||||
return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
|
||||
}
|
||||
|
||||
static inline int
|
||||
static inline long
|
||||
HYPERVISOR_memory_op(unsigned int cmd, void *arg)
|
||||
{
|
||||
return _hypercall2(int, memory_op, cmd, arg);
|
||||
return _hypercall2(long, memory_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef __ASM_X86_BITSPERLONG_H
|
||||
#define __ASM_X86_BITSPERLONG_H
|
||||
|
||||
#ifdef __x86_64__
|
||||
#if defined(__x86_64__) && !defined(__ILP32__)
|
||||
# define __BITS_PER_LONG 64
|
||||
#else
|
||||
# define __BITS_PER_LONG 32
|
||||
|
@ -34,11 +34,10 @@
|
||||
struct ms_hyperv_info ms_hyperv;
|
||||
EXPORT_SYMBOL_GPL(ms_hyperv);
|
||||
|
||||
static void (*hv_kexec_handler)(void);
|
||||
static void (*hv_crash_handler)(struct pt_regs *regs);
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
static void (*vmbus_handler)(void);
|
||||
static void (*hv_kexec_handler)(void);
|
||||
static void (*hv_crash_handler)(struct pt_regs *regs);
|
||||
|
||||
void hyperv_vector_handler(struct pt_regs *regs)
|
||||
{
|
||||
@ -96,8 +95,8 @@ void hv_remove_crash_handler(void)
|
||||
hv_crash_handler = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
static void hv_machine_shutdown(void)
|
||||
{
|
||||
if (kexec_in_progress && hv_kexec_handler)
|
||||
@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
|
||||
hv_crash_handler(regs);
|
||||
native_machine_crash_shutdown(regs);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
#endif /* CONFIG_HYPERV */
|
||||
|
||||
static uint32_t __init ms_hyperv_platform(void)
|
||||
{
|
||||
@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void)
|
||||
no_timer_check = 1;
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
|
||||
machine_ops.shutdown = hv_machine_shutdown;
|
||||
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
|
||||
#endif
|
||||
mark_tsc_unstable("running on Hyper-V");
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
||||
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
|
||||
{ X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
|
||||
{ X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
|
||||
|
@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
static int get_nr_ram_ranges_callback(unsigned long start_pfn,
|
||||
unsigned long nr_pfn, void *arg)
|
||||
static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
|
||||
{
|
||||
int *nr_ranges = arg;
|
||||
unsigned int *nr_ranges = arg;
|
||||
|
||||
(*nr_ranges)++;
|
||||
return 0;
|
||||
@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
|
||||
|
||||
ced->image = image;
|
||||
|
||||
walk_system_ram_range(0, -1, &nr_ranges,
|
||||
walk_system_ram_res(0, -1, &nr_ranges,
|
||||
get_nr_ram_ranges_callback);
|
||||
|
||||
ced->max_nr_ranges = nr_ranges;
|
||||
|
@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from fs/proc with a reference on @p to find the function
|
||||
* which called into schedule(). This needs to be done carefully
|
||||
* because the task might wake up and we might look at a stack
|
||||
* changing under us.
|
||||
*/
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
unsigned long start, bottom, top, sp, fp, ip;
|
||||
int count = 0;
|
||||
|
||||
if (!p || p == current || p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
|
||||
start = (unsigned long)task_stack_page(p);
|
||||
if (!start)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Layout of the stack page:
|
||||
*
|
||||
* ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
|
||||
* PADDING
|
||||
* ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
|
||||
* stack
|
||||
* ----------- bottom = start + sizeof(thread_info)
|
||||
* thread_info
|
||||
* ----------- start
|
||||
*
|
||||
* The tasks stack pointer points at the location where the
|
||||
* framepointer is stored. The data on the stack is:
|
||||
* ... IP FP ... IP FP
|
||||
*
|
||||
* We need to read FP and IP, so we need to adjust the upper
|
||||
* bound by another unsigned long.
|
||||
*/
|
||||
top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
|
||||
top -= 2 * sizeof(unsigned long);
|
||||
bottom = start + sizeof(struct thread_info);
|
||||
|
||||
sp = READ_ONCE(p->thread.sp);
|
||||
if (sp < bottom || sp > top)
|
||||
return 0;
|
||||
|
||||
fp = READ_ONCE(*(unsigned long *)sp);
|
||||
do {
|
||||
if (fp < bottom || fp > top)
|
||||
return 0;
|
||||
ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
|
||||
if (!in_sched_functions(ip))
|
||||
return ip;
|
||||
fp = READ_ONCE(*(unsigned long *)fp);
|
||||
} while (count++ < 16 && p->state != TASK_RUNNING);
|
||||
return 0;
|
||||
}
|
||||
|
@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
|
||||
return prev_p;
|
||||
}
|
||||
|
||||
#define top_esp (THREAD_SIZE - sizeof(unsigned long))
|
||||
#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
unsigned long bp, sp, ip;
|
||||
unsigned long stack_page;
|
||||
int count = 0;
|
||||
if (!p || p == current || p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
stack_page = (unsigned long)task_stack_page(p);
|
||||
sp = p->thread.sp;
|
||||
if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
|
||||
return 0;
|
||||
/* include/asm-i386/system.h:switch_to() pushes bp last. */
|
||||
bp = *(unsigned long *) sp;
|
||||
do {
|
||||
if (bp < stack_page || bp > top_ebp+stack_page)
|
||||
return 0;
|
||||
ip = *(unsigned long *) (bp+4);
|
||||
if (!in_sched_functions(ip))
|
||||
return ip;
|
||||
bp = *(unsigned long *) bp;
|
||||
} while (count++ < 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -499,30 +499,6 @@ void set_personality_ia32(bool x32)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_personality_ia32);
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
unsigned long stack;
|
||||
u64 fp, ip;
|
||||
int count = 0;
|
||||
|
||||
if (!p || p == current || p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
stack = (unsigned long)task_stack_page(p);
|
||||
if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
|
||||
return 0;
|
||||
fp = *(u64 *)(p->thread.sp);
|
||||
do {
|
||||
if (fp < (unsigned long)stack ||
|
||||
fp >= (unsigned long)stack+THREAD_SIZE)
|
||||
return 0;
|
||||
ip = *(u64 *)(fp+8);
|
||||
if (!in_sched_functions(ip))
|
||||
return ip;
|
||||
fp = *(u64 *)fp;
|
||||
} while (count++ < 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -514,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
if (svm->vmcb->control.next_rip != 0) {
|
||||
WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
|
||||
WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
|
||||
svm->next_rip = svm->vmcb->control.next_rip;
|
||||
}
|
||||
|
||||
@ -866,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
|
||||
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
|
||||
}
|
||||
|
||||
#define MTRR_TYPE_UC_MINUS 7
|
||||
#define MTRR2PROTVAL_INVALID 0xff
|
||||
|
||||
static u8 mtrr2protval[8];
|
||||
|
||||
static u8 fallback_mtrr_type(int mtrr)
|
||||
{
|
||||
/*
|
||||
* WT and WP aren't always available in the host PAT. Treat
|
||||
* them as UC and UC- respectively. Everything else should be
|
||||
* there.
|
||||
*/
|
||||
switch (mtrr)
|
||||
{
|
||||
case MTRR_TYPE_WRTHROUGH:
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
case MTRR_TYPE_WRPROT:
|
||||
return MTRR_TYPE_UC_MINUS;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static void build_mtrr2protval(void)
|
||||
{
|
||||
int i;
|
||||
u64 pat;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
mtrr2protval[i] = MTRR2PROTVAL_INVALID;
|
||||
|
||||
/* Ignore the invalid MTRR types. */
|
||||
mtrr2protval[2] = 0;
|
||||
mtrr2protval[3] = 0;
|
||||
|
||||
/*
|
||||
* Use host PAT value to figure out the mapping from guest MTRR
|
||||
* values to nested page table PAT/PCD/PWT values. We do not
|
||||
* want to change the host PAT value every time we enter the
|
||||
* guest.
|
||||
*/
|
||||
rdmsrl(MSR_IA32_CR_PAT, pat);
|
||||
for (i = 0; i < 8; i++) {
|
||||
u8 mtrr = pat >> (8 * i);
|
||||
|
||||
if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
|
||||
mtrr2protval[mtrr] = __cm_idx2pte(i);
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
|
||||
u8 fallback = fallback_mtrr_type(i);
|
||||
mtrr2protval[i] = mtrr2protval[fallback];
|
||||
BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static __init int svm_hardware_setup(void)
|
||||
{
|
||||
int cpu;
|
||||
@ -990,7 +932,6 @@ static __init int svm_hardware_setup(void)
|
||||
} else
|
||||
kvm_disable_tdp();
|
||||
|
||||
build_mtrr2protval();
|
||||
return 0;
|
||||
|
||||
err:
|
||||
@ -1145,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
|
||||
return target_tsc - tsc;
|
||||
}
|
||||
|
||||
static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
|
||||
/* Unlike Intel, AMD takes the guest's CR0.CD into account.
|
||||
*
|
||||
* AMD does not have IPAT. To emulate it for the case of guests
|
||||
* with no assigned devices, just set everything to WB. If guests
|
||||
* have assigned devices, however, we cannot force WB for RAM
|
||||
* pages only, so use the guest PAT directly.
|
||||
*/
|
||||
if (!kvm_arch_has_assigned_device(vcpu->kvm))
|
||||
*g_pat = 0x0606060606060606;
|
||||
else
|
||||
*g_pat = vcpu->arch.pat;
|
||||
}
|
||||
|
||||
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
||||
{
|
||||
u8 mtrr;
|
||||
|
||||
/*
|
||||
* 1. MMIO: trust guest MTRR, so same as item 3.
|
||||
* 2. No passthrough: always map as WB, and force guest PAT to WB as well
|
||||
* 3. Passthrough: can't guarantee the result, try to trust guest.
|
||||
*/
|
||||
if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) &&
|
||||
kvm_read_cr0(vcpu) & X86_CR0_CD)
|
||||
return _PAGE_NOCACHE;
|
||||
|
||||
mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
|
||||
return mtrr2protval[mtrr];
|
||||
}
|
||||
|
||||
static void init_vmcb(struct vcpu_svm *svm, bool init_event)
|
||||
{
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
@ -1278,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
|
||||
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
|
||||
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
|
||||
save->g_pat = svm->vcpu.arch.pat;
|
||||
svm_set_guest_pat(svm, &save->g_pat);
|
||||
save->cr3 = 0;
|
||||
save->cr4 = 0;
|
||||
}
|
||||
@ -1673,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
|
||||
if (!vcpu->fpu_active)
|
||||
cr0 |= X86_CR0_TS;
|
||||
|
||||
/* These are emulated via page tables. */
|
||||
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
|
||||
|
||||
/*
|
||||
* re-enable caching here because the QEMU bios
|
||||
* does not do it - this results in some delay at
|
||||
* reboot
|
||||
*/
|
||||
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
|
||||
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
|
||||
svm->vmcb->save.cr0 = cr0;
|
||||
mark_dirty(svm->vmcb, VMCB_CR);
|
||||
update_cr0_intercept(svm);
|
||||
@ -3351,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
case MSR_VM_IGNNE:
|
||||
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
|
||||
break;
|
||||
case MSR_IA32_CR_PAT:
|
||||
if (npt_enabled) {
|
||||
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
|
||||
return 1;
|
||||
vcpu->arch.pat = data;
|
||||
svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
|
||||
mark_dirty(svm->vmcb, VMCB_NPT);
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
return kvm_set_msr_common(vcpu, msr);
|
||||
}
|
||||
@ -4195,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
@ -8617,17 +8617,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
||||
u64 ipat = 0;
|
||||
|
||||
/* For VT-d and EPT combination
|
||||
* 1. MMIO: guest may want to apply WC, trust it.
|
||||
* 1. MMIO: always map as UC
|
||||
* 2. EPT with VT-d:
|
||||
* a. VT-d without snooping control feature: can't guarantee the
|
||||
* result, try to trust guest. So the same as item 1.
|
||||
* result, try to trust guest.
|
||||
* b. VT-d with snooping control feature: snooping control feature of
|
||||
* VT-d engine can guarantee the cache correctness. Just set it
|
||||
* to WB to keep consistent with host. So the same as item 3.
|
||||
* 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
|
||||
* consistent with host MTRR
|
||||
*/
|
||||
if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
|
||||
if (is_mmio) {
|
||||
cache = MTRR_TYPE_UNCACHABLE;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
|
||||
ipat = VMX_EPT_IPAT_BIT;
|
||||
cache = MTRR_TYPE_WRBACK;
|
||||
goto exit;
|
||||
|
@ -1708,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
vcpu->pvclock_set_guest_stopped_request = false;
|
||||
}
|
||||
|
||||
pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO;
|
||||
|
||||
/* If the host uses TSC clocksource, then it is stable */
|
||||
if (use_master_clock)
|
||||
pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
|
||||
@ -2007,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
&vcpu->requests);
|
||||
|
||||
ka->boot_vcpu_runs_old_kvmclock = tmp;
|
||||
|
||||
ka->kvmclock_offset = -get_kernel_ns();
|
||||
}
|
||||
|
||||
vcpu->arch.time = data;
|
||||
|
@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
|
||||
* has been zapped already via cleanup_highmem().
|
||||
*/
|
||||
all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
|
||||
set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
|
||||
set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
|
||||
|
||||
rodata_test();
|
||||
|
||||
|
@ -704,6 +704,70 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate the EFI memory map in reverse order because the regions
|
||||
* will be mapped top-down. The end result is the same as if we had
|
||||
* mapped things forward, but doesn't require us to change the
|
||||
* existing implementation of efi_map_region().
|
||||
*/
|
||||
static inline void *efi_map_next_entry_reverse(void *entry)
|
||||
{
|
||||
/* Initial call */
|
||||
if (!entry)
|
||||
return memmap.map_end - memmap.desc_size;
|
||||
|
||||
entry -= memmap.desc_size;
|
||||
if (entry < memmap.map)
|
||||
return NULL;
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/*
|
||||
* efi_map_next_entry - Return the next EFI memory map descriptor
|
||||
* @entry: Previous EFI memory map descriptor
|
||||
*
|
||||
* This is a helper function to iterate over the EFI memory map, which
|
||||
* we do in different orders depending on the current configuration.
|
||||
*
|
||||
* To begin traversing the memory map @entry must be %NULL.
|
||||
*
|
||||
* Returns %NULL when we reach the end of the memory map.
|
||||
*/
|
||||
static void *efi_map_next_entry(void *entry)
|
||||
{
|
||||
if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
|
||||
/*
|
||||
* Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
|
||||
* config table feature requires us to map all entries
|
||||
* in the same order as they appear in the EFI memory
|
||||
* map. That is to say, entry N must have a lower
|
||||
* virtual address than entry N+1. This is because the
|
||||
* firmware toolchain leaves relative references in
|
||||
* the code/data sections, which are split and become
|
||||
* separate EFI memory regions. Mapping things
|
||||
* out-of-order leads to the firmware accessing
|
||||
* unmapped addresses.
|
||||
*
|
||||
* Since we need to map things this way whether or not
|
||||
* the kernel actually makes use of
|
||||
* EFI_PROPERTIES_TABLE, let's just switch to this
|
||||
* scheme by default for 64-bit.
|
||||
*/
|
||||
return efi_map_next_entry_reverse(entry);
|
||||
}
|
||||
|
||||
/* Initial call */
|
||||
if (!entry)
|
||||
return memmap.map;
|
||||
|
||||
entry += memmap.desc_size;
|
||||
if (entry >= memmap.map_end)
|
||||
return NULL;
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the efi memory ranges of the runtime services and update new_mmap with
|
||||
* virtual addresses.
|
||||
@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
|
||||
unsigned long left = 0;
|
||||
efi_memory_desc_t *md;
|
||||
|
||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||
p = NULL;
|
||||
while ((p = efi_map_next_entry(p))) {
|
||||
md = p;
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -33,6 +33,10 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/edd.h>
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
#include <linux/kexec.h>
|
||||
#endif
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/events.h>
|
||||
#include <xen/interface/xen.h>
|
||||
@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
|
||||
/* Fast syscall setup is all done in hypercalls, so
|
||||
these are all ignored. Stub them out here to stop
|
||||
Xen console noise. */
|
||||
break;
|
||||
|
||||
default:
|
||||
if (!pmu_msr_write(msr, low, high, &ret))
|
||||
@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
|
||||
.notifier_call = xen_hvm_cpu_notify,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
static void xen_hvm_shutdown(void)
|
||||
{
|
||||
native_machine_shutdown();
|
||||
if (kexec_in_progress)
|
||||
xen_reboot(SHUTDOWN_soft_reset);
|
||||
}
|
||||
|
||||
static void xen_hvm_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
native_machine_crash_shutdown(regs);
|
||||
xen_reboot(SHUTDOWN_soft_reset);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init xen_hvm_guest_init(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void)
|
||||
x86_init.irqs.intr_init = xen_init_IRQ;
|
||||
xen_hvm_init_time_ops();
|
||||
xen_hvm_init_mmu_ops();
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
machine_ops.shutdown = xen_hvm_shutdown;
|
||||
machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -112,6 +112,15 @@ static unsigned long *p2m_identity;
|
||||
static pte_t *p2m_missing_pte;
|
||||
static pte_t *p2m_identity_pte;
|
||||
|
||||
/*
|
||||
* Hint at last populated PFN.
|
||||
*
|
||||
* Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
|
||||
* can avoid scanning the whole P2M (which may be sized to account for
|
||||
* hotplugged memory).
|
||||
*/
|
||||
static unsigned long xen_p2m_last_pfn;
|
||||
|
||||
static inline unsigned p2m_top_index(unsigned long pfn)
|
||||
{
|
||||
BUG_ON(pfn >= MAX_P2M_PFN);
|
||||
@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void)
|
||||
else
|
||||
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
|
||||
virt_to_mfn(p2m_top_mfn);
|
||||
HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
|
||||
HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
|
||||
HYPERVISOR_shared_info->arch.p2m_generation = 0;
|
||||
HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
|
||||
HYPERVISOR_shared_info->arch.p2m_cr3 =
|
||||
@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void)
|
||||
static struct vm_struct vm;
|
||||
unsigned long p2m_limit;
|
||||
|
||||
xen_p2m_last_pfn = xen_max_p2m_pfn;
|
||||
|
||||
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
|
||||
vm.flags = VM_ALLOC;
|
||||
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
|
||||
@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn)
|
||||
free_p2m_page(p2m);
|
||||
}
|
||||
|
||||
/* Expanded the p2m? */
|
||||
if (pfn > xen_p2m_last_pfn) {
|
||||
xen_p2m_last_pfn = pfn;
|
||||
HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void)
|
||||
{
|
||||
unsigned long max_pages, limit;
|
||||
domid_t domid = DOMID_SELF;
|
||||
int ret;
|
||||
long ret;
|
||||
|
||||
limit = xen_get_pages_limit();
|
||||
max_pages = limit;
|
||||
@ -798,7 +798,7 @@ char * __init xen_memory_setup(void)
|
||||
xen_ignore_unusable();
|
||||
|
||||
/* Make sure the Xen-supplied memory map is well-ordered. */
|
||||
sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
|
||||
sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
|
||||
&xen_e820_map_entries);
|
||||
|
||||
max_pages = xen_get_max_pages();
|
||||
|
@ -28,4 +28,5 @@ generic-y += statfs.h
|
||||
generic-y += termios.h
|
||||
generic-y += topology.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += word-at-a-time.h
|
||||
generic-y += xor.h
|
||||
|
@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu)
|
||||
return cpu;
|
||||
}
|
||||
|
||||
int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
|
||||
int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
|
||||
const struct cpumask *online_mask)
|
||||
{
|
||||
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
|
||||
cpumask_var_t cpus;
|
||||
@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
|
||||
|
||||
cpumask_clear(cpus);
|
||||
nr_cpus = nr_uniq_cpus = 0;
|
||||
for_each_online_cpu(i) {
|
||||
for_each_cpu(i, online_mask) {
|
||||
nr_cpus++;
|
||||
first_sibling = get_first_sibling(i);
|
||||
if (!cpumask_test_cpu(first_sibling, cpus))
|
||||
@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
|
||||
|
||||
queue = 0;
|
||||
for_each_possible_cpu(i) {
|
||||
if (!cpu_online(i)) {
|
||||
if (!cpumask_test_cpu(i, online_mask)) {
|
||||
map[i] = 0;
|
||||
continue;
|
||||
}
|
||||
@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
|
||||
if (!map)
|
||||
return NULL;
|
||||
|
||||
if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
|
||||
if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
|
||||
return map;
|
||||
|
||||
kfree(map);
|
||||
|
@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
|
||||
unsigned int i, first = 1;
|
||||
ssize_t ret = 0;
|
||||
|
||||
blk_mq_disable_hotplug();
|
||||
|
||||
for_each_cpu(i, hctx->cpumask) {
|
||||
if (first)
|
||||
ret += sprintf(ret + page, "%u", i);
|
||||
@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
|
||||
first = 0;
|
||||
}
|
||||
|
||||
blk_mq_enable_hotplug();
|
||||
|
||||
ret += sprintf(ret + page, "\n");
|
||||
return ret;
|
||||
}
|
||||
@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
struct blk_mq_ctx *ctx;
|
||||
int i;
|
||||
|
||||
if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
|
||||
if (!hctx->nr_ctx)
|
||||
return;
|
||||
|
||||
hctx_for_each_ctx(hctx, ctx, i)
|
||||
@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
struct blk_mq_ctx *ctx;
|
||||
int i, ret;
|
||||
|
||||
if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
|
||||
if (!hctx->nr_ctx)
|
||||
return 0;
|
||||
|
||||
ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
|
||||
@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk)
|
||||
struct blk_mq_ctx *ctx;
|
||||
int i, j;
|
||||
|
||||
blk_mq_disable_hotplug();
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
blk_mq_unregister_hctx(hctx);
|
||||
|
||||
@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk)
|
||||
kobject_put(&q->mq_kobj);
|
||||
|
||||
kobject_put(&disk_to_dev(disk)->kobj);
|
||||
|
||||
q->mq_sysfs_init_done = false;
|
||||
blk_mq_enable_hotplug();
|
||||
}
|
||||
|
||||
static void blk_mq_sysfs_init(struct request_queue *q)
|
||||
@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk)
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int ret, i;
|
||||
|
||||
blk_mq_disable_hotplug();
|
||||
|
||||
blk_mq_sysfs_init(q);
|
||||
|
||||
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
hctx->flags |= BLK_MQ_F_SYSFS_UP;
|
||||
ret = blk_mq_register_hctx(hctx);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
if (ret)
|
||||
blk_mq_unregister_disk(disk);
|
||||
return ret;
|
||||
}
|
||||
else
|
||||
q->mq_sysfs_init_done = true;
|
||||
out:
|
||||
blk_mq_enable_hotplug();
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_register_disk);
|
||||
|
||||
@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
if (!q->mq_sysfs_init_done)
|
||||
return;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_unregister_hctx(hctx);
|
||||
}
|
||||
@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q)
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i, ret = 0;
|
||||
|
||||
if (!q->mq_sysfs_init_done)
|
||||
return ret;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
ret = blk_mq_register_hctx(hctx);
|
||||
if (ret)
|
||||
|
@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
|
||||
|
||||
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
|
||||
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
void *priv)
|
||||
{
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
|
||||
/*
|
||||
* If not software queues are currently mapped to this
|
||||
* hardware queue, there's nothing to check
|
||||
*/
|
||||
if (!blk_mq_hw_queue_mapped(hctx))
|
||||
continue;
|
||||
|
||||
if (tags->nr_reserved_tags)
|
||||
bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
|
||||
bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
|
||||
false);
|
||||
}
|
||||
|
||||
if (tags->nr_reserved_tags)
|
||||
bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
|
||||
bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
|
||||
false);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_tag_busy_iter);
|
||||
|
||||
static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
|
||||
{
|
||||
|
@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
|
||||
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
|
||||
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
|
||||
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
|
||||
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
void *priv);
|
||||
|
||||
enum {
|
||||
BLK_MQ_TAG_CACHE_MIN = 1,
|
||||
|
118
block/blk-mq.c
118
block/blk-mq.c
@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq)
|
||||
* Ends all I/O on a request. It does not handle partial completions.
|
||||
* The actual completion happens out-of-order, through a IPI handler.
|
||||
**/
|
||||
void blk_mq_complete_request(struct request *rq)
|
||||
void blk_mq_complete_request(struct request *rq, int error)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
|
||||
if (unlikely(blk_should_fake_timeout(q)))
|
||||
return;
|
||||
if (!blk_mark_rq_complete(rq))
|
||||
if (!blk_mark_rq_complete(rq)) {
|
||||
rq->errors = error;
|
||||
__blk_mq_complete_request(rq);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_complete_request);
|
||||
|
||||
@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||
* If a request wasn't started before the queue was
|
||||
* marked dying, kill it here or it'll go unnoticed.
|
||||
*/
|
||||
if (unlikely(blk_queue_dying(rq->q))) {
|
||||
rq->errors = -EIO;
|
||||
blk_mq_complete_request(rq);
|
||||
}
|
||||
if (unlikely(blk_queue_dying(rq->q)))
|
||||
blk_mq_complete_request(rq, -EIO);
|
||||
return;
|
||||
}
|
||||
if (rq->cmd_flags & REQ_NO_TIMEOUT)
|
||||
@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv)
|
||||
.next = 0,
|
||||
.next_set = 0,
|
||||
};
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
/*
|
||||
* If not software queues are currently mapped to this
|
||||
* hardware queue, there's nothing to check
|
||||
*/
|
||||
if (!blk_mq_hw_queue_mapped(hctx))
|
||||
continue;
|
||||
|
||||
blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
|
||||
}
|
||||
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
|
||||
|
||||
if (data.next_set) {
|
||||
data.next = blk_rq_timeout(round_jiffies_up(data.next));
|
||||
mod_timer(&q->timeout, data.next);
|
||||
} else {
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
/* the hctx may be unmapped, so check it here */
|
||||
if (blk_mq_hw_queue_mapped(hctx))
|
||||
@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
|
||||
}
|
||||
}
|
||||
|
||||
static void blk_mq_map_swqueue(struct request_queue *q)
|
||||
static void blk_mq_map_swqueue(struct request_queue *q,
|
||||
const struct cpumask *online_mask)
|
||||
{
|
||||
unsigned int i;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct blk_mq_ctx *ctx;
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
|
||||
/*
|
||||
* Avoid others reading imcomplete hctx->cpumask through sysfs
|
||||
*/
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
cpumask_clear(hctx->cpumask);
|
||||
hctx->nr_ctx = 0;
|
||||
@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
||||
*/
|
||||
queue_for_each_ctx(q, ctx, i) {
|
||||
/* If the cpu isn't online, the cpu is mapped to first hctx */
|
||||
if (!cpu_online(i))
|
||||
if (!cpumask_test_cpu(i, online_mask))
|
||||
continue;
|
||||
|
||||
hctx = q->mq_ops->map_queue(q, i);
|
||||
cpumask_set_cpu(i, hctx->cpumask);
|
||||
cpumask_set_cpu(i, hctx->tags->cpumask);
|
||||
ctx->index_hw = hctx->nr_ctx;
|
||||
hctx->ctxs[hctx->nr_ctx++] = ctx;
|
||||
}
|
||||
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
struct blk_mq_ctxmap *map = &hctx->ctx_map;
|
||||
|
||||
@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
||||
hctx->next_cpu = cpumask_first(hctx->cpumask);
|
||||
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
||||
}
|
||||
|
||||
queue_for_each_ctx(q, ctx, i) {
|
||||
if (!cpumask_test_cpu(i, online_mask))
|
||||
continue;
|
||||
|
||||
hctx = q->mq_ops->map_queue(q, i);
|
||||
cpumask_set_cpu(i, hctx->tags->cpumask);
|
||||
}
|
||||
}
|
||||
|
||||
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
|
||||
@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q)
|
||||
kfree(hctx);
|
||||
}
|
||||
|
||||
kfree(q->mq_map);
|
||||
q->mq_map = NULL;
|
||||
|
||||
kfree(q->queue_hw_ctx);
|
||||
|
||||
/* ctx kobj stays in queue_ctx */
|
||||
@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
if (blk_mq_init_hw_queues(q, set))
|
||||
goto err_hctxs;
|
||||
|
||||
get_online_cpus();
|
||||
mutex_lock(&all_q_mutex);
|
||||
|
||||
list_add_tail(&q->all_q_node, &all_q_list);
|
||||
mutex_unlock(&all_q_mutex);
|
||||
|
||||
blk_mq_add_queue_tag_set(set, q);
|
||||
blk_mq_map_swqueue(q, cpu_online_mask);
|
||||
|
||||
blk_mq_map_swqueue(q);
|
||||
mutex_unlock(&all_q_mutex);
|
||||
put_online_cpus();
|
||||
|
||||
return q;
|
||||
|
||||
@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
|
||||
mutex_lock(&all_q_mutex);
|
||||
list_del_init(&q->all_q_node);
|
||||
mutex_unlock(&all_q_mutex);
|
||||
|
||||
blk_mq_del_queue_tag_set(q);
|
||||
|
||||
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
|
||||
blk_mq_free_hw_queues(q, set);
|
||||
|
||||
percpu_ref_exit(&q->mq_usage_counter);
|
||||
|
||||
kfree(q->mq_map);
|
||||
|
||||
q->mq_map = NULL;
|
||||
|
||||
mutex_lock(&all_q_mutex);
|
||||
list_del_init(&q->all_q_node);
|
||||
mutex_unlock(&all_q_mutex);
|
||||
}
|
||||
|
||||
/* Basically redo blk_mq_init_queue with queue frozen */
|
||||
static void blk_mq_queue_reinit(struct request_queue *q)
|
||||
static void blk_mq_queue_reinit(struct request_queue *q,
|
||||
const struct cpumask *online_mask)
|
||||
{
|
||||
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
|
||||
|
||||
blk_mq_sysfs_unregister(q);
|
||||
|
||||
blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
|
||||
blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
|
||||
|
||||
/*
|
||||
* redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
|
||||
@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q)
|
||||
* involves free and re-allocate memory, worthy doing?)
|
||||
*/
|
||||
|
||||
blk_mq_map_swqueue(q);
|
||||
blk_mq_map_swqueue(q, online_mask);
|
||||
|
||||
blk_mq_sysfs_register(q);
|
||||
}
|
||||
@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int cpu = (unsigned long)hcpu;
|
||||
/*
|
||||
* New online cpumask which is going to be set in this hotplug event.
|
||||
* Declare this cpumasks as global as cpu-hotplug operation is invoked
|
||||
* one-by-one and dynamically allocating this could result in a failure.
|
||||
*/
|
||||
static struct cpumask online_new;
|
||||
|
||||
/*
|
||||
* Before new mappings are established, hotadded cpu might already
|
||||
* start handling requests. This doesn't break anything as we map
|
||||
* offline CPUs to first hardware queue. We will re-init the queue
|
||||
* below to get optimal settings.
|
||||
* Before hotadded cpu starts handling requests, new mappings must
|
||||
* be established. Otherwise, these requests in hw queue might
|
||||
* never be dispatched.
|
||||
*
|
||||
* For example, there is a single hw queue (hctx) and two CPU queues
|
||||
* (ctx0 for CPU0, and ctx1 for CPU1).
|
||||
*
|
||||
* Now CPU1 is just onlined and a request is inserted into
|
||||
* ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
|
||||
* still zero.
|
||||
*
|
||||
* And then while running hw queue, flush_busy_ctxs() finds bit0 is
|
||||
* set in pending bitmap and tries to retrieve requests in
|
||||
* hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0,
|
||||
* so the request in ctx1->rq_list is ignored.
|
||||
*/
|
||||
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
|
||||
action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_DEAD:
|
||||
case CPU_UP_CANCELED:
|
||||
cpumask_copy(&online_new, cpu_online_mask);
|
||||
break;
|
||||
case CPU_UP_PREPARE:
|
||||
cpumask_copy(&online_new, cpu_online_mask);
|
||||
cpumask_set_cpu(cpu, &online_new);
|
||||
break;
|
||||
default:
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
mutex_lock(&all_q_mutex);
|
||||
|
||||
@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
|
||||
}
|
||||
|
||||
list_for_each_entry(q, &all_q_list, all_q_node)
|
||||
blk_mq_queue_reinit(q);
|
||||
blk_mq_queue_reinit(q, &online_new);
|
||||
|
||||
list_for_each_entry(q, &all_q_list, all_q_node)
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void);
|
||||
* CPU -> queue mappings
|
||||
*/
|
||||
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
|
||||
extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
|
||||
extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
|
||||
const struct cpumask *online_mask);
|
||||
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
|
||||
|
||||
/*
|
||||
|
@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
|
||||
srlen = cert->raw_serial_size;
|
||||
q = cert->raw_serial;
|
||||
}
|
||||
if (srlen > 1 && *q == 0) {
|
||||
srlen--;
|
||||
q++;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
|
||||
|
@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
|
||||
goto err_exit;
|
||||
|
||||
mutex_lock(&ec->mutex);
|
||||
result = -ENODATA;
|
||||
list_for_each_entry(handler, &ec->list, node) {
|
||||
if (value == handler->query_bit) {
|
||||
result = 0;
|
||||
q->handler = acpi_ec_get_query_handler(handler);
|
||||
ec_dbg_evt("Query(0x%02x) scheduled",
|
||||
q->handler->query_bit);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user