mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
s390 updates for the 6.7 merge window
- Get rid of private VM_FAULT flags - Add word-at-a-time implementation - Add DCACHE_WORD_ACCESS support - Cleanup control register handling - Disallow CPU hotplug of CPU 0 to simplify its handling complexity, following a similar restriction in x86 - Optimize pai crypto map allocation - Update the list of crypto express EP11 coprocessor operation modes - Fixes and improvements for secure guests AP pass-through - Several fixes to address incorrect page marking for address translation with the "cmma no-dat" feature, preventing potential incorrect guest TLB flushes - Fix early IPI handling - Several virtual vs physical address confusion fixes - Various small fixes and improvements all over the code -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEE3QHqV+H2a8xAv27vjYWKoQLXFBgFAmVFLkYACgkQjYWKoQLX FBgxRwf9FSNFwLcbYbG1x94rUUHnbaiyJWCezp3/ypr+m+qDvQatLYc75SxwrH0y ocSygqvtVryVkWAKKvOHF1Kg5R2Fedmzf5wuVTXglfPqE1ZgMGdwS/LtknIoz556 twZJIpFzUFt5xaljpTCZJanLMvy/npl0bilezhNGl6v7N5rsWLbfK6vsPMDm+TTZ yscapOsk8Z16NjXq0FETS5JHG65jjj9rkRfb0qD8SOFhti0fR9MSP2xeRXrDMDZE IWXog5usx2DS6VX2HnxA8O7z1hhuTccJ1K1+rYqbb0Fwccqi7QaGZXEvocYEvlvy lVe3/jbyn27hUoypHcfVCAVxdoOrnw== =SMOp -----END PGP SIGNATURE----- Merge tag 's390-6.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 updates from Vasily Gorbik: - Get rid of private VM_FAULT flags - Add word-at-a-time implementation - Add DCACHE_WORD_ACCESS support - Cleanup control register handling - Disallow CPU hotplug of CPU 0 to simplify its handling complexity, following a similar restriction in x86 - Optimize pai crypto map allocation - Update the list of crypto express EP11 coprocessor operation modes - Fixes and improvements for secure guests AP pass-through - Several fixes to address incorrect page marking for address translation with the "cmma no-dat" feature, preventing potential incorrect guest TLB flushes - Fix early IPI handling - Several virtual vs physical address confusion fixes - Various small fixes and improvements all over the code * tag 's390-6.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (74 commits) s390/cio: replace deprecated strncpy with strscpy s390/sclp: replace deprecated strncpy with strtomem s390/cio: fix virtual vs physical address confusion s390/cio: export CMG value as decimal s390: delete the unused store_prefix() function s390/cmma: fix handling of swapper_pg_dir and invalid_pg_dir s390/cmma: fix detection of DAT pages s390/sclp: handle default case in sclp memory notifier s390/pai_crypto: remove per-cpu variable assignement in event initialization s390/pai: initialize event count once at initialization s390/pai_crypto: use PERF_ATTACH_TASK define for per task detection s390/mm: add missing arch_set_page_dat() call to gmap allocations s390/mm: add missing arch_set_page_dat() call to vmem_crst_alloc() s390/cmma: fix initial kernel address space page table walk s390/diag: add missing virt_to_phys() translation to diag224() s390/mm,fault: move VM_FAULT_ERROR handling to do_exception() s390/mm,fault: remove VM_FAULT_BADMAP and VM_FAULT_BADACCESS s390/mm,fault: remove VM_FAULT_SIGNAL s390/mm,fault: remove VM_FAULT_BADCONTEXT s390/mm,fault: simplify kfence fault handling ...
This commit is contained in:
commit
e392ea4d4d
@ -131,6 +131,7 @@ config S390
|
||||
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
|
||||
select BUILDTIME_TABLE_SORT
|
||||
select CLONE_BACKWARDS2
|
||||
select DCACHE_WORD_ACCESS if !KMSAN
|
||||
select DMA_OPS if PCI
|
||||
select DYNAMIC_FTRACE if FUNCTION_TRACER
|
||||
select FUNCTION_ALIGNMENT_8B if CC_IS_GCC
|
||||
|
@ -49,7 +49,7 @@ static void detect_facilities(void)
|
||||
{
|
||||
if (test_facility(8)) {
|
||||
machine.has_edat1 = 1;
|
||||
__ctl_set_bit(0, 23);
|
||||
local_ctl_set_bit(0, CR0_EDAT_BIT);
|
||||
}
|
||||
if (test_facility(78))
|
||||
machine.has_edat2 = 1;
|
||||
|
@ -5,13 +5,14 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/physmem_info.h>
|
||||
#include <asm/maccess.h>
|
||||
#include <asm/abs_lowcore.h>
|
||||
#include "decompressor.h"
|
||||
#include "boot.h"
|
||||
|
||||
unsigned long __bootdata_preserved(s390_invalid_asce);
|
||||
struct ctlreg __bootdata_preserved(s390_invalid_asce);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
|
||||
@ -166,8 +167,6 @@ static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
|
||||
|
||||
static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
|
||||
{
|
||||
pte_t entry;
|
||||
|
||||
if (mode == POPULATE_KASAN_ZERO_SHADOW) {
|
||||
set_pte(pte, pte_z);
|
||||
return true;
|
||||
@ -426,7 +425,7 @@ void setup_vmem(unsigned long asce_limit)
|
||||
asce_type = _REGION3_ENTRY_EMPTY;
|
||||
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
|
||||
}
|
||||
s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
|
||||
s390_invalid_asce.val = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
|
||||
|
||||
crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
|
||||
crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
|
||||
@ -447,12 +446,12 @@ void setup_vmem(unsigned long asce_limit)
|
||||
|
||||
kasan_populate_shadow();
|
||||
|
||||
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
|
||||
S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits;
|
||||
S390_lowcore.user_asce = s390_invalid_asce;
|
||||
|
||||
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
|
||||
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
||||
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
|
||||
local_ctl_load(1, &S390_lowcore.kernel_asce);
|
||||
local_ctl_load(7, &S390_lowcore.user_asce);
|
||||
local_ctl_load(13, &S390_lowcore.kernel_asce);
|
||||
|
||||
init_mm.context.asce = S390_lowcore.kernel_asce;
|
||||
init_mm.context.asce = S390_lowcore.kernel_asce.val;
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define EX_TYPE_UA_LOAD_MEM 4
|
||||
#define EX_TYPE_UA_LOAD_REG 5
|
||||
#define EX_TYPE_UA_LOAD_REGPAIR 6
|
||||
#define EX_TYPE_ZEROPAD 7
|
||||
|
||||
#define EX_DATA_REG_ERR_SHIFT 0
|
||||
#define EX_DATA_REG_ERR GENMASK(3, 0)
|
||||
@ -23,16 +24,7 @@
|
||||
#define EX_DATA_LEN_SHIFT 8
|
||||
#define EX_DATA_LEN GENMASK(11, 8)
|
||||
|
||||
#define __EX_TABLE(_section, _fault, _target, _type) \
|
||||
stringify_in_c(.section _section,"a";) \
|
||||
stringify_in_c(.balign 4;) \
|
||||
stringify_in_c(.long (_fault) - .;) \
|
||||
stringify_in_c(.long (_target) - .;) \
|
||||
stringify_in_c(.short (_type);) \
|
||||
stringify_in_c(.short 0;) \
|
||||
stringify_in_c(.previous)
|
||||
|
||||
#define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\
|
||||
#define __EX_TABLE(_section, _fault, _target, _type, _regerr, _regaddr, _len) \
|
||||
stringify_in_c(.section _section,"a";) \
|
||||
stringify_in_c(.balign 4;) \
|
||||
stringify_in_c(.long (_fault) - .;) \
|
||||
@ -72,21 +64,24 @@
|
||||
stringify_in_c(.previous)
|
||||
|
||||
#define EX_TABLE(_fault, _target) \
|
||||
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP)
|
||||
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0)
|
||||
|
||||
#define EX_TABLE_AMODE31(_fault, _target) \
|
||||
__EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP)
|
||||
__EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0)
|
||||
|
||||
#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \
|
||||
__EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
|
||||
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
|
||||
|
||||
#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \
|
||||
__EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
|
||||
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
|
||||
|
||||
#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \
|
||||
__EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
|
||||
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
|
||||
|
||||
#define EX_TABLE_UA_LOAD_REGPAIR(_fault, _target, _regerr, _regzero) \
|
||||
__EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REGPAIR, _regerr, _regzero, 0)
|
||||
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REGPAIR, _regerr, _regzero, 0)
|
||||
|
||||
#define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr) \
|
||||
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0)
|
||||
|
||||
#endif /* __ASM_EXTABLE_H */
|
||||
|
@ -1,146 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2009
|
||||
*
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_CTL_REG_H
|
||||
#define __ASM_CTL_REG_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
#define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10)
|
||||
#define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35)
|
||||
#define CR0_FETCH_PROTECTION_OVERRIDE BIT(63 - 38)
|
||||
#define CR0_STORAGE_PROTECTION_OVERRIDE BIT(63 - 39)
|
||||
#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49)
|
||||
#define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50)
|
||||
#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52)
|
||||
#define CR0_CPU_TIMER_SUBMASK BIT(63 - 53)
|
||||
#define CR0_SERVICE_SIGNAL_SUBMASK BIT(63 - 54)
|
||||
#define CR0_UNUSED_56 BIT(63 - 56)
|
||||
#define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57)
|
||||
#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58)
|
||||
|
||||
#define CR14_UNUSED_32 BIT(63 - 32)
|
||||
#define CR14_UNUSED_33 BIT(63 - 33)
|
||||
#define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35)
|
||||
#define CR14_RECOVERY_SUBMASK BIT(63 - 36)
|
||||
#define CR14_DEGRADATION_SUBMASK BIT(63 - 37)
|
||||
#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(63 - 38)
|
||||
#define CR14_WARNING_SUBMASK BIT(63 - 39)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/bug.h>
|
||||
|
||||
#define __ctl_load(array, low, high) do { \
|
||||
typedef struct { char _[sizeof(array)]; } addrtype; \
|
||||
\
|
||||
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
|
||||
asm volatile( \
|
||||
" lctlg %1,%2,%0\n" \
|
||||
: \
|
||||
: "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
|
||||
: "memory"); \
|
||||
} while (0)
|
||||
|
||||
#define __ctl_store(array, low, high) do { \
|
||||
typedef struct { char _[sizeof(array)]; } addrtype; \
|
||||
\
|
||||
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
|
||||
asm volatile( \
|
||||
" stctg %1,%2,%0\n" \
|
||||
: "=Q" (*(addrtype *)(&array)) \
|
||||
: "i" (low), "i" (high)); \
|
||||
} while (0)
|
||||
|
||||
static __always_inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
|
||||
{
|
||||
unsigned long reg;
|
||||
|
||||
__ctl_store(reg, cr, cr);
|
||||
reg |= 1UL << bit;
|
||||
__ctl_load(reg, cr, cr);
|
||||
}
|
||||
|
||||
static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
|
||||
{
|
||||
unsigned long reg;
|
||||
|
||||
__ctl_store(reg, cr, cr);
|
||||
reg &= ~(1UL << bit);
|
||||
__ctl_load(reg, cr, cr);
|
||||
}
|
||||
|
||||
void smp_ctl_set_clear_bit(int cr, int bit, bool set);
|
||||
|
||||
static inline void ctl_set_bit(int cr, int bit)
|
||||
{
|
||||
smp_ctl_set_clear_bit(cr, bit, true);
|
||||
}
|
||||
|
||||
static inline void ctl_clear_bit(int cr, int bit)
|
||||
{
|
||||
smp_ctl_set_clear_bit(cr, bit, false);
|
||||
}
|
||||
|
||||
union ctlreg0 {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long : 8;
|
||||
unsigned long tcx : 1; /* Transactional-Execution control */
|
||||
unsigned long pifo : 1; /* Transactional-Execution Program-
|
||||
Interruption-Filtering Override */
|
||||
unsigned long : 3;
|
||||
unsigned long ccc : 1; /* Cryptography counter control */
|
||||
unsigned long pec : 1; /* PAI extension control */
|
||||
unsigned long : 17;
|
||||
unsigned long : 3;
|
||||
unsigned long lap : 1; /* Low-address-protection control */
|
||||
unsigned long : 4;
|
||||
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
|
||||
unsigned long : 2;
|
||||
unsigned long iep : 1; /* Instruction-Execution-Protection */
|
||||
unsigned long : 1;
|
||||
unsigned long afp : 1; /* AFP-register control */
|
||||
unsigned long vx : 1; /* Vector enablement control */
|
||||
unsigned long : 7;
|
||||
unsigned long sssm : 1; /* Service signal subclass mask */
|
||||
unsigned long : 9;
|
||||
};
|
||||
};
|
||||
|
||||
union ctlreg2 {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long : 33;
|
||||
unsigned long ducto : 25;
|
||||
unsigned long : 1;
|
||||
unsigned long gse : 1;
|
||||
unsigned long : 1;
|
||||
unsigned long tds : 1;
|
||||
unsigned long tdc : 2;
|
||||
};
|
||||
};
|
||||
|
||||
union ctlreg5 {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long : 33;
|
||||
unsigned long pasteo: 25;
|
||||
unsigned long : 6;
|
||||
};
|
||||
};
|
||||
|
||||
union ctlreg15 {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long lsea : 61;
|
||||
unsigned long : 3;
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_CTL_REG_H */
|
251
arch/s390/include/asm/ctlreg.h
Normal file
251
arch/s390/include/asm/ctlreg.h
Normal file
@ -0,0 +1,251 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2009
|
||||
*
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_S390_CTLREG_H
|
||||
#define __ASM_S390_CTLREG_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
#define CR0_TRANSACTIONAL_EXECUTION_BIT (63 - 8)
|
||||
#define CR0_CLOCK_COMPARATOR_SIGN_BIT (63 - 10)
|
||||
#define CR0_CRYPTOGRAPHY_COUNTER_BIT (63 - 13)
|
||||
#define CR0_PAI_EXTENSION_BIT (63 - 14)
|
||||
#define CR0_CPUMF_EXTRACTION_AUTH_BIT (63 - 15)
|
||||
#define CR0_WARNING_TRACK_BIT (63 - 30)
|
||||
#define CR0_LOW_ADDRESS_PROTECTION_BIT (63 - 35)
|
||||
#define CR0_FETCH_PROTECTION_OVERRIDE_BIT (63 - 38)
|
||||
#define CR0_STORAGE_PROTECTION_OVERRIDE_BIT (63 - 39)
|
||||
#define CR0_EDAT_BIT (63 - 40)
|
||||
#define CR0_INSTRUCTION_EXEC_PROTECTION_BIT (63 - 43)
|
||||
#define CR0_VECTOR_BIT (63 - 46)
|
||||
#define CR0_MALFUNCTION_ALERT_SUBMASK_BIT (63 - 48)
|
||||
#define CR0_EMERGENCY_SIGNAL_SUBMASK_BIT (63 - 49)
|
||||
#define CR0_EXTERNAL_CALL_SUBMASK_BIT (63 - 50)
|
||||
#define CR0_CLOCK_COMPARATOR_SUBMASK_BIT (63 - 52)
|
||||
#define CR0_CPU_TIMER_SUBMASK_BIT (63 - 53)
|
||||
#define CR0_SERVICE_SIGNAL_SUBMASK_BIT (63 - 54)
|
||||
#define CR0_UNUSED_56_BIT (63 - 56)
|
||||
#define CR0_INTERRUPT_KEY_SUBMASK_BIT (63 - 57)
|
||||
#define CR0_MEASUREMENT_ALERT_SUBMASK_BIT (63 - 58)
|
||||
#define CR0_ETR_SUBMASK_BIT (63 - 59)
|
||||
#define CR0_IUCV_BIT (63 - 62)
|
||||
|
||||
#define CR0_TRANSACTIONAL_EXECUTION BIT(CR0_TRANSACTIONAL_EXECUTION_BIT)
|
||||
#define CR0_CLOCK_COMPARATOR_SIGN BIT(CR0_CLOCK_COMPARATOR_SIGN_BIT)
|
||||
#define CR0_CRYPTOGRAPHY_COUNTER BIT(CR0_CRYPTOGRAPHY_COUNTER_BIT)
|
||||
#define CR0_PAI_EXTENSION BIT(CR0_PAI_EXTENSION_BIT)
|
||||
#define CR0_CPUMF_EXTRACTION_AUTH BIT(CR0_CPUMF_EXTRACTION_AUTH_BIT)
|
||||
#define CR0_WARNING_TRACK BIT(CR0_WARNING_TRACK_BIT)
|
||||
#define CR0_LOW_ADDRESS_PROTECTION BIT(CR0_LOW_ADDRESS_PROTECTION_BIT)
|
||||
#define CR0_FETCH_PROTECTION_OVERRIDE BIT(CR0_FETCH_PROTECTION_OVERRIDE_BIT)
|
||||
#define CR0_STORAGE_PROTECTION_OVERRIDE BIT(CR0_STORAGE_PROTECTION_OVERRIDE_BIT)
|
||||
#define CR0_EDAT BIT(CR0_EDAT_BIT)
|
||||
#define CR0_INSTRUCTION_EXEC_PROTECTION BIT(CR0_INSTRUCTION_EXEC_PROTECTION_BIT)
|
||||
#define CR0_VECTOR BIT(CR0_VECTOR_BIT)
|
||||
#define CR0_MALFUNCTION_ALERT_SUBMASK BIT(CR0_MALFUNCTION_ALERT_SUBMASK_BIT)
|
||||
#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(CR0_EMERGENCY_SIGNAL_SUBMASK_BIT)
|
||||
#define CR0_EXTERNAL_CALL_SUBMASK BIT(CR0_EXTERNAL_CALL_SUBMASK_BIT)
|
||||
#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(CR0_CLOCK_COMPARATOR_SUBMASK_BIT)
|
||||
#define CR0_CPU_TIMER_SUBMASK BIT(CR0_CPU_TIMER_SUBMASK_BIT)
|
||||
#define CR0_SERVICE_SIGNAL_SUBMASK BIT(CR0_SERVICE_SIGNAL_SUBMASK_BIT)
|
||||
#define CR0_UNUSED_56 BIT(CR0_UNUSED_56_BIT)
|
||||
#define CR0_INTERRUPT_KEY_SUBMASK BIT(CR0_INTERRUPT_KEY_SUBMASK_BIT)
|
||||
#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(CR0_MEASUREMENT_ALERT_SUBMASK_BIT)
|
||||
#define CR0_ETR_SUBMASK BIT(CR0_ETR_SUBMASK_BIT)
|
||||
#define CR0_IUCV BIT(CR0_IUCV_BIT)
|
||||
|
||||
#define CR2_MIO_ADDRESSING_BIT (63 - 58)
|
||||
#define CR2_GUARDED_STORAGE_BIT (63 - 59)
|
||||
|
||||
#define CR2_MIO_ADDRESSING BIT(CR2_MIO_ADDRESSING_BIT)
|
||||
#define CR2_GUARDED_STORAGE BIT(CR2_GUARDED_STORAGE_BIT)
|
||||
|
||||
#define CR14_UNUSED_32_BIT (63 - 32)
|
||||
#define CR14_UNUSED_33_BIT (63 - 33)
|
||||
#define CR14_CHANNEL_REPORT_SUBMASK_BIT (63 - 35)
|
||||
#define CR14_RECOVERY_SUBMASK_BIT (63 - 36)
|
||||
#define CR14_DEGRADATION_SUBMASK_BIT (63 - 37)
|
||||
#define CR14_EXTERNAL_DAMAGE_SUBMASK_BIT (63 - 38)
|
||||
#define CR14_WARNING_SUBMASK_BIT (63 - 39)
|
||||
|
||||
#define CR14_UNUSED_32 BIT(CR14_UNUSED_32_BIT)
|
||||
#define CR14_UNUSED_33 BIT(CR14_UNUSED_33_BIT)
|
||||
#define CR14_CHANNEL_REPORT_SUBMASK BIT(CR14_CHANNEL_REPORT_SUBMASK_BIT)
|
||||
#define CR14_RECOVERY_SUBMASK BIT(CR14_RECOVERY_SUBMASK_BIT)
|
||||
#define CR14_DEGRADATION_SUBMASK BIT(CR14_DEGRADATION_SUBMASK_BIT)
|
||||
#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(CR14_EXTERNAL_DAMAGE_SUBMASK_BIT)
|
||||
#define CR14_WARNING_SUBMASK BIT(CR14_WARNING_SUBMASK_BIT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/bug.h>
|
||||
|
||||
struct ctlreg {
|
||||
unsigned long val;
|
||||
};
|
||||
|
||||
#define __local_ctl_load(low, high, array) do { \
|
||||
struct addrtype { \
|
||||
char _[sizeof(array)]; \
|
||||
}; \
|
||||
int _high = high; \
|
||||
int _low = low; \
|
||||
int _esize; \
|
||||
\
|
||||
_esize = (_high - _low + 1) * sizeof(struct ctlreg); \
|
||||
BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
|
||||
typecheck(struct ctlreg, array[0]); \
|
||||
asm volatile( \
|
||||
" lctlg %[_low],%[_high],%[_arr]\n" \
|
||||
: \
|
||||
: [_arr] "Q" (*(struct addrtype *)(&array)), \
|
||||
[_low] "i" (low), [_high] "i" (high) \
|
||||
: "memory"); \
|
||||
} while (0)
|
||||
|
||||
#define __local_ctl_store(low, high, array) do { \
|
||||
struct addrtype { \
|
||||
char _[sizeof(array)]; \
|
||||
}; \
|
||||
int _high = high; \
|
||||
int _low = low; \
|
||||
int _esize; \
|
||||
\
|
||||
_esize = (_high - _low + 1) * sizeof(struct ctlreg); \
|
||||
BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
|
||||
typecheck(struct ctlreg, array[0]); \
|
||||
asm volatile( \
|
||||
" stctg %[_low],%[_high],%[_arr]\n" \
|
||||
: [_arr] "=Q" (*(struct addrtype *)(&array)) \
|
||||
: [_low] "i" (low), [_high] "i" (high)); \
|
||||
} while (0)
|
||||
|
||||
static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg)
|
||||
{
|
||||
asm volatile(
|
||||
" lctlg %[cr],%[cr],%[reg]\n"
|
||||
:
|
||||
: [reg] "Q" (*reg), [cr] "i" (cr)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg)
|
||||
{
|
||||
asm volatile(
|
||||
" stctg %[cr],%[cr],%[reg]\n"
|
||||
: [reg] "=Q" (*reg)
|
||||
: [cr] "i" (cr));
|
||||
}
|
||||
|
||||
static __always_inline void local_ctl_set_bit(unsigned int cr, unsigned int bit)
|
||||
{
|
||||
struct ctlreg reg;
|
||||
|
||||
local_ctl_store(cr, ®);
|
||||
reg.val |= 1UL << bit;
|
||||
local_ctl_load(cr, ®);
|
||||
}
|
||||
|
||||
static __always_inline void local_ctl_clear_bit(unsigned int cr, unsigned int bit)
|
||||
{
|
||||
struct ctlreg reg;
|
||||
|
||||
local_ctl_store(cr, ®);
|
||||
reg.val &= ~(1UL << bit);
|
||||
local_ctl_load(cr, ®);
|
||||
}
|
||||
|
||||
struct lowcore;
|
||||
|
||||
void system_ctlreg_lock(void);
|
||||
void system_ctlreg_unlock(void);
|
||||
void system_ctlreg_init_save_area(struct lowcore *lc);
|
||||
void system_ctlreg_modify(unsigned int cr, unsigned long data, int request);
|
||||
|
||||
enum {
|
||||
CTLREG_SET_BIT,
|
||||
CTLREG_CLEAR_BIT,
|
||||
CTLREG_LOAD,
|
||||
};
|
||||
|
||||
static inline void system_ctl_set_bit(unsigned int cr, unsigned int bit)
|
||||
{
|
||||
system_ctlreg_modify(cr, bit, CTLREG_SET_BIT);
|
||||
}
|
||||
|
||||
static inline void system_ctl_clear_bit(unsigned int cr, unsigned int bit)
|
||||
{
|
||||
system_ctlreg_modify(cr, bit, CTLREG_CLEAR_BIT);
|
||||
}
|
||||
|
||||
static inline void system_ctl_load(unsigned int cr, struct ctlreg *reg)
|
||||
{
|
||||
system_ctlreg_modify(cr, reg->val, CTLREG_LOAD);
|
||||
}
|
||||
|
||||
union ctlreg0 {
|
||||
unsigned long val;
|
||||
struct ctlreg reg;
|
||||
struct {
|
||||
unsigned long : 8;
|
||||
unsigned long tcx : 1; /* Transactional-Execution control */
|
||||
unsigned long pifo : 1; /* Transactional-Execution Program-
|
||||
Interruption-Filtering Override */
|
||||
unsigned long : 3;
|
||||
unsigned long ccc : 1; /* Cryptography counter control */
|
||||
unsigned long pec : 1; /* PAI extension control */
|
||||
unsigned long : 17;
|
||||
unsigned long : 3;
|
||||
unsigned long lap : 1; /* Low-address-protection control */
|
||||
unsigned long : 4;
|
||||
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
|
||||
unsigned long : 2;
|
||||
unsigned long iep : 1; /* Instruction-Execution-Protection */
|
||||
unsigned long : 1;
|
||||
unsigned long afp : 1; /* AFP-register control */
|
||||
unsigned long vx : 1; /* Vector enablement control */
|
||||
unsigned long : 7;
|
||||
unsigned long sssm : 1; /* Service signal subclass mask */
|
||||
unsigned long : 9;
|
||||
};
|
||||
};
|
||||
|
||||
union ctlreg2 {
|
||||
unsigned long val;
|
||||
struct ctlreg reg;
|
||||
struct {
|
||||
unsigned long : 33;
|
||||
unsigned long ducto : 25;
|
||||
unsigned long : 1;
|
||||
unsigned long gse : 1;
|
||||
unsigned long : 1;
|
||||
unsigned long tds : 1;
|
||||
unsigned long tdc : 2;
|
||||
};
|
||||
};
|
||||
|
||||
union ctlreg5 {
|
||||
unsigned long val;
|
||||
struct ctlreg reg;
|
||||
struct {
|
||||
unsigned long : 33;
|
||||
unsigned long pasteo: 25;
|
||||
unsigned long : 6;
|
||||
};
|
||||
};
|
||||
|
||||
union ctlreg15 {
|
||||
unsigned long val;
|
||||
struct ctlreg reg;
|
||||
struct {
|
||||
unsigned long lsea : 61;
|
||||
unsigned long : 3;
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_S390_CTLREG_H */
|
28
arch/s390/include/asm/fault.h
Normal file
28
arch/s390/include/asm/fault.h
Normal file
@ -0,0 +1,28 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2023
|
||||
*/
|
||||
#ifndef _ASM_S390_FAULT_H
|
||||
#define _ASM_S390_FAULT_H
|
||||
|
||||
union teid {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long addr : 52; /* Translation-exception Address */
|
||||
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
|
||||
unsigned long : 2;
|
||||
unsigned long b56 : 1;
|
||||
unsigned long : 3;
|
||||
unsigned long b60 : 1;
|
||||
unsigned long b61 : 1;
|
||||
unsigned long as : 2; /* ASCE Identifier */
|
||||
};
|
||||
};
|
||||
|
||||
enum {
|
||||
TEID_FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
|
||||
TEID_FSI_STORE = 1, /* Exception was due to store operation */
|
||||
TEID_FSI_FETCH = 2 /* Exception was due to fetch operation */
|
||||
};
|
||||
|
||||
#endif /* _ASM_S390_FAULT_H */
|
@ -10,7 +10,6 @@
|
||||
#define _ASM_S390_FPU_INTERNAL_H
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/fpu/types.h>
|
||||
|
||||
static inline void save_vx_regs(__vector128 *vxrs)
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/ctlreg.h>
|
||||
|
||||
enum interruption_class {
|
||||
IRQEXT_CLK,
|
||||
@ -101,17 +102,17 @@ enum irq_subclass {
|
||||
};
|
||||
|
||||
#define CR0_IRQ_SUBCLASS_MASK \
|
||||
((1UL << (63 - 30)) /* Warning Track */ | \
|
||||
(1UL << (63 - 48)) /* Malfunction Alert */ | \
|
||||
(1UL << (63 - 49)) /* Emergency Signal */ | \
|
||||
(1UL << (63 - 50)) /* External Call */ | \
|
||||
(1UL << (63 - 52)) /* Clock Comparator */ | \
|
||||
(1UL << (63 - 53)) /* CPU Timer */ | \
|
||||
(1UL << (63 - 54)) /* Service Signal */ | \
|
||||
(1UL << (63 - 57)) /* Interrupt Key */ | \
|
||||
(1UL << (63 - 58)) /* Measurement Alert */ | \
|
||||
(1UL << (63 - 59)) /* Timing Alert */ | \
|
||||
(1UL << (63 - 62))) /* IUCV */
|
||||
(CR0_WARNING_TRACK | \
|
||||
CR0_MALFUNCTION_ALERT_SUBMASK | \
|
||||
CR0_EMERGENCY_SIGNAL_SUBMASK | \
|
||||
CR0_EXTERNAL_CALL_SUBMASK | \
|
||||
CR0_CLOCK_COMPARATOR_SUBMASK | \
|
||||
CR0_CPU_TIMER_SUBMASK | \
|
||||
CR0_SERVICE_SIGNAL_SUBMASK | \
|
||||
CR0_INTERRUPT_KEY_SUBMASK | \
|
||||
CR0_MEASUREMENT_ALERT_SUBMASK | \
|
||||
CR0_ETR_SUBMASK | \
|
||||
CR0_IUCV)
|
||||
|
||||
void irq_subclass_register(enum irq_subclass subclass);
|
||||
void irq_subclass_unregister(enum irq_subclass subclass);
|
||||
|
@ -15,6 +15,7 @@
|
||||
* <grundym@us.ibm.com>
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm-generic/kprobes.h>
|
||||
|
||||
#define BREAKPOINT_INSTRUCTION 0x0002
|
||||
@ -65,7 +66,7 @@ struct prev_kprobe {
|
||||
struct kprobe_ctlblk {
|
||||
unsigned long kprobe_status;
|
||||
unsigned long kprobe_saved_imask;
|
||||
unsigned long kprobe_saved_ctl[3];
|
||||
struct ctlreg kprobe_saved_ctl[3];
|
||||
struct prev_kprobe prev_kprobe;
|
||||
};
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
@ -139,8 +140,8 @@ struct lowcore {
|
||||
__u32 restart_flags; /* 0x0384 */
|
||||
|
||||
/* Address space pointer. */
|
||||
__u64 kernel_asce; /* 0x0388 */
|
||||
__u64 user_asce; /* 0x0390 */
|
||||
struct ctlreg kernel_asce; /* 0x0388 */
|
||||
struct ctlreg user_asce; /* 0x0390 */
|
||||
|
||||
/*
|
||||
* The lpp and current_pid fields form a
|
||||
@ -199,7 +200,7 @@ struct lowcore {
|
||||
__u32 clock_comp_save_area[2]; /* 0x1330 */
|
||||
__u64 last_break_save_area; /* 0x1338 */
|
||||
__u32 access_regs_save_area[16]; /* 0x1340 */
|
||||
__u64 cregs_save_area[16]; /* 0x1380 */
|
||||
struct ctlreg cregs_save_area[16]; /* 0x1380 */
|
||||
__u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */
|
||||
/* Cryptography-counter designation */
|
||||
__u64 ccd; /* 0x1500 */
|
||||
@ -221,12 +222,4 @@ static inline void set_prefix(__u32 address)
|
||||
asm volatile("spx %0" : : "Q" (address) : "memory");
|
||||
}
|
||||
|
||||
static inline __u32 store_prefix(void)
|
||||
{
|
||||
__u32 address;
|
||||
|
||||
asm volatile("stpx %0" : "=Q" (address));
|
||||
return address;
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_LOWCORE_H */
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
#define init_new_context init_new_context
|
||||
@ -78,10 +78,10 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *
|
||||
if (next == &init_mm)
|
||||
S390_lowcore.user_asce = s390_invalid_asce;
|
||||
else
|
||||
S390_lowcore.user_asce = next->context.asce;
|
||||
S390_lowcore.user_asce.val = next->context.asce;
|
||||
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
|
||||
/* Clear previous user-ASCE from CR7 */
|
||||
__ctl_load(s390_invalid_asce, 7, 7);
|
||||
local_ctl_load(7, &s390_invalid_asce);
|
||||
if (prev != next)
|
||||
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
|
||||
}
|
||||
@ -111,7 +111,7 @@ static inline void finish_arch_post_lock_switch(void)
|
||||
__tlb_flush_mm_lazy(mm);
|
||||
preempt_enable();
|
||||
}
|
||||
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
||||
local_ctl_load(7, &S390_lowcore.user_asce);
|
||||
}
|
||||
|
||||
#define activate_mm activate_mm
|
||||
@ -120,7 +120,7 @@ static inline void activate_mm(struct mm_struct *prev,
|
||||
{
|
||||
switch_mm(prev, next, current);
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
||||
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
||||
local_ctl_load(7, &S390_lowcore.user_asce);
|
||||
}
|
||||
|
||||
#include <asm-generic/mmu_context.h>
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/uv.h>
|
||||
@ -25,7 +26,7 @@
|
||||
extern pgd_t swapper_pg_dir[];
|
||||
extern pgd_t invalid_pg_dir[];
|
||||
extern void paging_init(void);
|
||||
extern unsigned long s390_invalid_asce;
|
||||
extern struct ctlreg s390_invalid_asce;
|
||||
|
||||
enum {
|
||||
PG_DIRECT_MAP_4K = 0,
|
||||
|
@ -59,7 +59,6 @@ static inline void smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
}
|
||||
|
||||
extern int smp_reinit_ipl_cpu(void);
|
||||
extern int smp_rescan_cpus(void);
|
||||
extern void __noreturn cpu_die(void);
|
||||
extern void __cpu_die(unsigned int cpu);
|
||||
|
@ -15,7 +15,6 @@
|
||||
*/
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/extable.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm-generic/access_ok.h>
|
||||
|
64
arch/s390/include/asm/word-at-a-time.h
Normal file
64
arch/s390/include/asm/word-at-a-time.h
Normal file
@ -0,0 +1,64 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_WORD_AT_A_TIME_H
|
||||
#define _ASM_WORD_AT_A_TIME_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
struct word_at_a_time {
|
||||
const unsigned long bits;
|
||||
};
|
||||
|
||||
#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x7f) }
|
||||
|
||||
static inline unsigned long prep_zero_mask(unsigned long val, unsigned long data, const struct word_at_a_time *c)
|
||||
{
|
||||
return data;
|
||||
}
|
||||
|
||||
static inline unsigned long create_zero_mask(unsigned long data)
|
||||
{
|
||||
return __fls(data);
|
||||
}
|
||||
|
||||
static inline unsigned long find_zero(unsigned long data)
|
||||
{
|
||||
return (data ^ (BITS_PER_LONG - 1)) >> 3;
|
||||
}
|
||||
|
||||
static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
|
||||
{
|
||||
unsigned long mask = (val & c->bits) + c->bits;
|
||||
|
||||
*data = ~(mask | val | c->bits);
|
||||
return *data;
|
||||
}
|
||||
|
||||
static inline unsigned long zero_bytemask(unsigned long data)
|
||||
{
|
||||
return ~1UL << data;
|
||||
}
|
||||
|
||||
/*
|
||||
* Load an unaligned word from kernel space.
|
||||
*
|
||||
* In the (very unlikely) case of the word being a page-crosser
|
||||
* and the next page not being mapped, take the exception and
|
||||
* return zeroes in the non-existing part.
|
||||
*/
|
||||
static inline unsigned long load_unaligned_zeropad(const void *addr)
|
||||
{
|
||||
unsigned long data;
|
||||
|
||||
asm volatile(
|
||||
"0: lg %[data],0(%[addr])\n"
|
||||
"1: nopr %%r7\n"
|
||||
EX_TABLE_ZEROPAD(0b, 1b, %[data], %[addr])
|
||||
EX_TABLE_ZEROPAD(1b, 1b, %[data], %[addr])
|
||||
: [data] "=d" (data)
|
||||
: [addr] "a" (addr), "m" (*(unsigned long *)addr));
|
||||
return data;
|
||||
}
|
||||
|
||||
#endif /* _ASM_WORD_AT_A_TIME_H */
|
@ -37,7 +37,7 @@ CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
|
||||
obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
|
||||
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
|
||||
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o
|
||||
obj-y += sysinfo.o lgr.o os_info.o
|
||||
obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
|
||||
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
|
||||
obj-y += entry.o reipl.o kdebugfs.o alternative.o
|
||||
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
|
||||
|
@ -498,7 +498,7 @@ static int get_mem_chunk_cnt(void)
|
||||
/*
|
||||
* Initialize ELF loads (new kernel)
|
||||
*/
|
||||
static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
|
||||
static void loads_init(Elf64_Phdr *phdr)
|
||||
{
|
||||
phys_addr_t start, end;
|
||||
u64 idx;
|
||||
@ -507,7 +507,7 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
|
||||
phdr->p_filesz = end - start;
|
||||
phdr->p_type = PT_LOAD;
|
||||
phdr->p_offset = start;
|
||||
phdr->p_vaddr = start;
|
||||
phdr->p_vaddr = (unsigned long)__va(start);
|
||||
phdr->p_paddr = start;
|
||||
phdr->p_memsz = end - start;
|
||||
phdr->p_flags = PF_R | PF_W | PF_X;
|
||||
@ -612,7 +612,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
|
||||
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
|
||||
/* Init loads */
|
||||
hdr_off = PTR_DIFF(ptr, hdr);
|
||||
loads_init(phdr_loads, hdr_off);
|
||||
loads_init(phdr_loads);
|
||||
*addr = (unsigned long long) hdr;
|
||||
*size = (unsigned long long) hdr_off;
|
||||
BUG_ON(elfcorehdr_size > alloc_size);
|
||||
|
121
arch/s390/kernel/ctlreg.c
Normal file
121
arch/s390/kernel/ctlreg.c
Normal file
@ -0,0 +1,121 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2023
|
||||
*/
|
||||
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cache.h>
|
||||
#include <asm/abs_lowcore.h>
|
||||
#include <asm/ctlreg.h>
|
||||
|
||||
/*
|
||||
* ctl_lock guards access to global control register contents which
|
||||
* are kept in the control register save area within absolute lowcore
|
||||
* at physical address zero.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(system_ctl_lock);
|
||||
|
||||
void system_ctlreg_lock(void)
|
||||
__acquires(&system_ctl_lock)
|
||||
{
|
||||
spin_lock(&system_ctl_lock);
|
||||
}
|
||||
|
||||
void system_ctlreg_unlock(void)
|
||||
__releases(&system_ctl_lock)
|
||||
{
|
||||
spin_unlock(&system_ctl_lock);
|
||||
}
|
||||
|
||||
static bool system_ctlreg_area_init __ro_after_init;
|
||||
|
||||
void __init system_ctlreg_init_save_area(struct lowcore *lc)
|
||||
{
|
||||
struct lowcore *abs_lc;
|
||||
|
||||
abs_lc = get_abs_lowcore();
|
||||
__local_ctl_store(0, 15, lc->cregs_save_area);
|
||||
__local_ctl_store(0, 15, abs_lc->cregs_save_area);
|
||||
put_abs_lowcore(abs_lc);
|
||||
system_ctlreg_area_init = true;
|
||||
}
|
||||
|
||||
struct ctlreg_parms {
|
||||
unsigned long andval;
|
||||
unsigned long orval;
|
||||
unsigned long val;
|
||||
int request;
|
||||
int cr;
|
||||
};
|
||||
|
||||
static void ctlreg_callback(void *info)
|
||||
{
|
||||
struct ctlreg_parms *pp = info;
|
||||
struct ctlreg regs[16];
|
||||
|
||||
__local_ctl_store(0, 15, regs);
|
||||
if (pp->request == CTLREG_LOAD) {
|
||||
regs[pp->cr].val = pp->val;
|
||||
} else {
|
||||
regs[pp->cr].val &= pp->andval;
|
||||
regs[pp->cr].val |= pp->orval;
|
||||
}
|
||||
__local_ctl_load(0, 15, regs);
|
||||
}
|
||||
|
||||
static void system_ctlreg_update(void *info)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (system_state == SYSTEM_BOOTING) {
|
||||
/*
|
||||
* For very early calls do not call on_each_cpu()
|
||||
* since not everything might be setup.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
ctlreg_callback(info);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
on_each_cpu(ctlreg_callback, info, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void system_ctlreg_modify(unsigned int cr, unsigned long data, int request)
|
||||
{
|
||||
struct ctlreg_parms pp = { .cr = cr, .request = request, };
|
||||
struct lowcore *abs_lc;
|
||||
|
||||
switch (request) {
|
||||
case CTLREG_SET_BIT:
|
||||
pp.orval = 1UL << data;
|
||||
pp.andval = -1UL;
|
||||
break;
|
||||
case CTLREG_CLEAR_BIT:
|
||||
pp.orval = 0;
|
||||
pp.andval = ~(1UL << data);
|
||||
break;
|
||||
case CTLREG_LOAD:
|
||||
pp.val = data;
|
||||
break;
|
||||
}
|
||||
if (system_ctlreg_area_init) {
|
||||
system_ctlreg_lock();
|
||||
abs_lc = get_abs_lowcore();
|
||||
if (request == CTLREG_LOAD) {
|
||||
abs_lc->cregs_save_area[cr].val = pp.val;
|
||||
} else {
|
||||
abs_lc->cregs_save_area[cr].val &= pp.andval;
|
||||
abs_lc->cregs_save_area[cr].val |= pp.orval;
|
||||
}
|
||||
put_abs_lowcore(abs_lc);
|
||||
system_ctlreg_update(&pp);
|
||||
system_ctlreg_unlock();
|
||||
} else {
|
||||
system_ctlreg_update(&pp);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(system_ctlreg_modify);
|
@ -245,6 +245,7 @@ EXPORT_SYMBOL(diag8c);
|
||||
|
||||
int diag224(void *ptr)
|
||||
{
|
||||
unsigned long addr = __pa(ptr);
|
||||
int rc = -EOPNOTSUPP;
|
||||
|
||||
diag_stat_inc(DIAG_STAT_X224);
|
||||
@ -253,7 +254,7 @@ int diag224(void *ptr)
|
||||
"0: lhi %0,0x0\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (rc) :"d" (0), "d" (ptr) : "memory");
|
||||
: "+d" (rc) :"d" (0), "d" (addr) : "memory");
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(diag224);
|
||||
|
@ -216,7 +216,7 @@ static __init void detect_machine_facilities(void)
|
||||
{
|
||||
if (test_facility(8)) {
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
|
||||
__ctl_set_bit(0, 23);
|
||||
system_ctl_set_bit(0, CR0_EDAT_BIT);
|
||||
}
|
||||
if (test_facility(78))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
|
||||
@ -224,13 +224,13 @@ static __init void detect_machine_facilities(void)
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
|
||||
if (test_facility(50) && test_facility(73)) {
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
|
||||
__ctl_set_bit(0, 55);
|
||||
system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
|
||||
}
|
||||
if (test_facility(51))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
|
||||
if (test_facility(129)) {
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
|
||||
__ctl_set_bit(0, 17);
|
||||
system_ctl_set_bit(0, CR0_VECTOR_BIT);
|
||||
}
|
||||
if (test_facility(130))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
|
||||
@ -240,7 +240,7 @@ static __init void detect_machine_facilities(void)
|
||||
/* Enabled signed clock comparator comparisons */
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
|
||||
clock_comparator_max = -1ULL >> 1;
|
||||
__ctl_set_bit(0, 53);
|
||||
system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
|
||||
@ -258,15 +258,9 @@ static inline void save_vector_registers(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void setup_control_registers(void)
|
||||
static inline void setup_low_address_protection(void)
|
||||
{
|
||||
unsigned long reg;
|
||||
|
||||
__ctl_store(reg, 0, 0);
|
||||
reg |= CR0_LOW_ADDRESS_PROTECTION;
|
||||
reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
|
||||
reg |= CR0_EXTERNAL_CALL_SUBMASK;
|
||||
__ctl_load(reg, 0, 0);
|
||||
system_ctl_set_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
|
||||
}
|
||||
|
||||
static inline void setup_access_registers(void)
|
||||
@ -279,7 +273,7 @@ static inline void setup_access_registers(void)
|
||||
static int __init disable_vector_extension(char *str)
|
||||
{
|
||||
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
|
||||
__ctl_clear_bit(0, 17);
|
||||
system_ctl_clear_bit(0, CR0_VECTOR_BIT);
|
||||
return 0;
|
||||
}
|
||||
early_param("novx", disable_vector_extension);
|
||||
@ -314,7 +308,7 @@ void __init startup_init(void)
|
||||
save_vector_registers();
|
||||
setup_topology();
|
||||
sclp_early_detect();
|
||||
setup_control_registers();
|
||||
setup_low_address_protection();
|
||||
setup_access_registers();
|
||||
lockdep_on();
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ static int gs_enable(void)
|
||||
return -ENOMEM;
|
||||
gs_cb->gsd = 25;
|
||||
preempt_disable();
|
||||
__ctl_set_bit(2, 4);
|
||||
local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
|
||||
load_gs_cb(gs_cb);
|
||||
current->thread.gs_cb = gs_cb;
|
||||
preempt_enable();
|
||||
@ -42,7 +42,7 @@ static int gs_disable(void)
|
||||
preempt_disable();
|
||||
kfree(current->thread.gs_cb);
|
||||
current->thread.gs_cb = NULL;
|
||||
__ctl_clear_bit(2, 4);
|
||||
local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
|
||||
preempt_enable();
|
||||
}
|
||||
return 0;
|
||||
@ -84,7 +84,7 @@ void gs_load_bc_cb(struct pt_regs *regs)
|
||||
if (gs_cb) {
|
||||
kfree(current->thread.gs_cb);
|
||||
current->thread.gs_bc_cb = NULL;
|
||||
__ctl_set_bit(2, 4);
|
||||
local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
|
||||
load_gs_cb(gs_cb);
|
||||
current->thread.gs_cb = gs_cb;
|
||||
}
|
||||
|
@ -2381,7 +2381,7 @@ void s390_reset_system(void)
|
||||
set_prefix(0);
|
||||
|
||||
/* Disable lowcore protection */
|
||||
__ctl_clear_bit(0, 28);
|
||||
local_ctl_clear_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
|
||||
diag_amode31_ops.diag308_reset();
|
||||
}
|
||||
|
||||
|
@ -385,7 +385,7 @@ void irq_subclass_register(enum irq_subclass subclass)
|
||||
{
|
||||
spin_lock(&irq_subclass_lock);
|
||||
if (!irq_subclass_refcount[subclass])
|
||||
ctl_set_bit(0, subclass);
|
||||
system_ctl_set_bit(0, subclass);
|
||||
irq_subclass_refcount[subclass]++;
|
||||
spin_unlock(&irq_subclass_lock);
|
||||
}
|
||||
@ -396,7 +396,7 @@ void irq_subclass_unregister(enum irq_subclass subclass)
|
||||
spin_lock(&irq_subclass_lock);
|
||||
irq_subclass_refcount[subclass]--;
|
||||
if (!irq_subclass_refcount[subclass])
|
||||
ctl_clear_bit(0, subclass);
|
||||
system_ctl_clear_bit(0, subclass);
|
||||
spin_unlock(&irq_subclass_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(irq_subclass_unregister);
|
||||
|
@ -224,20 +224,27 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct per_regs per_kprobe;
|
||||
union {
|
||||
struct ctlreg regs[3];
|
||||
struct {
|
||||
struct ctlreg control;
|
||||
struct ctlreg start;
|
||||
struct ctlreg end;
|
||||
};
|
||||
} per_kprobe;
|
||||
|
||||
/* Set up the PER control registers %cr9-%cr11 */
|
||||
per_kprobe.control = PER_EVENT_IFETCH;
|
||||
per_kprobe.start = ip;
|
||||
per_kprobe.end = ip;
|
||||
per_kprobe.control.val = PER_EVENT_IFETCH;
|
||||
per_kprobe.start.val = ip;
|
||||
per_kprobe.end.val = ip;
|
||||
|
||||
/* Save control regs and psw mask */
|
||||
__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
|
||||
__local_ctl_store(9, 11, kcb->kprobe_saved_ctl);
|
||||
kcb->kprobe_saved_imask = regs->psw.mask &
|
||||
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
|
||||
|
||||
/* Set PER control regs, turns on single step for the given address */
|
||||
__ctl_load(per_kprobe, 9, 11);
|
||||
__local_ctl_load(9, 11, per_kprobe.regs);
|
||||
regs->psw.mask |= PSW_MASK_PER;
|
||||
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
|
||||
regs->psw.addr = ip;
|
||||
@ -249,7 +256,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
unsigned long ip)
|
||||
{
|
||||
/* Restore control regs and psw mask, set new psw address */
|
||||
__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
|
||||
__local_ctl_load(9, 11, kcb->kprobe_saved_ctl);
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
regs->psw.mask |= kcb->kprobe_saved_imask;
|
||||
regs->psw.addr = ip;
|
||||
|
@ -94,12 +94,12 @@ static noinline void __machine_kdump(void *image)
|
||||
if (MACHINE_HAS_VX)
|
||||
save_vx_regs((__vector128 *) mcesa->vector_save_area);
|
||||
if (MACHINE_HAS_GS) {
|
||||
__ctl_store(cr2_old.val, 2, 2);
|
||||
local_ctl_store(2, &cr2_old.reg);
|
||||
cr2_new = cr2_old;
|
||||
cr2_new.gse = 1;
|
||||
__ctl_load(cr2_new.val, 2, 2);
|
||||
local_ctl_load(2, &cr2_new.reg);
|
||||
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
|
||||
__ctl_load(cr2_old.val, 2, 2);
|
||||
local_ctl_load(2, &cr2_old.reg);
|
||||
}
|
||||
/*
|
||||
* To create a good backchain for this CPU in the dump store_status
|
||||
|
@ -22,13 +22,13 @@
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/stp.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/crw.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/pai.h>
|
||||
#include <asm/vx-insn.h>
|
||||
@ -131,10 +131,10 @@ static notrace void s390_handle_damage(void)
|
||||
* Disable low address protection and make machine check new PSW a
|
||||
* disabled wait PSW. Any additional machine check cannot be handled.
|
||||
*/
|
||||
__ctl_store(cr0.val, 0, 0);
|
||||
local_ctl_store(0, &cr0.reg);
|
||||
cr0_new = cr0;
|
||||
cr0_new.lap = 0;
|
||||
__ctl_load(cr0_new.val, 0, 0);
|
||||
local_ctl_load(0, &cr0_new.reg);
|
||||
psw_save = S390_lowcore.mcck_new_psw;
|
||||
psw_bits(S390_lowcore.mcck_new_psw).io = 0;
|
||||
psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
|
||||
@ -146,7 +146,7 @@ static notrace void s390_handle_damage(void)
|
||||
* values. This makes possible system dump analysis easier.
|
||||
*/
|
||||
S390_lowcore.mcck_new_psw = psw_save;
|
||||
__ctl_load(cr0.val, 0, 0);
|
||||
local_ctl_load(0, &cr0.reg);
|
||||
disabled_wait();
|
||||
while (1);
|
||||
}
|
||||
@ -185,7 +185,7 @@ void s390_handle_mcck(void)
|
||||
static int mchchk_wng_posted = 0;
|
||||
|
||||
/* Use single cpu clear, as we cannot handle smp here. */
|
||||
__ctl_clear_bit(14, 24); /* Disable WARNING MCH */
|
||||
local_ctl_clear_bit(14, CR14_WARNING_SUBMASK_BIT);
|
||||
if (xchg(&mchchk_wng_posted, 1) == 0)
|
||||
kill_cad_pid(SIGPWR, 1);
|
||||
}
|
||||
@ -269,9 +269,9 @@ static int notrace s390_validate_registers(union mci mci)
|
||||
*/
|
||||
if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
|
||||
kill_task = 1;
|
||||
cr0.val = S390_lowcore.cregs_save_area[0];
|
||||
cr0.reg = S390_lowcore.cregs_save_area[0];
|
||||
cr0.afp = cr0.vx = 1;
|
||||
__ctl_load(cr0.val, 0, 0);
|
||||
local_ctl_load(0, &cr0.reg);
|
||||
asm volatile(
|
||||
" la 1,%0\n"
|
||||
" VLM 0,15,0,1\n"
|
||||
@ -279,7 +279,7 @@ static int notrace s390_validate_registers(union mci mci)
|
||||
:
|
||||
: "Q" (*(struct vx_array *)mcesa->vector_save_area)
|
||||
: "1");
|
||||
__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
|
||||
local_ctl_load(0, &S390_lowcore.cregs_save_area[0]);
|
||||
}
|
||||
/* Validate access registers */
|
||||
asm volatile(
|
||||
@ -290,7 +290,7 @@ static int notrace s390_validate_registers(union mci mci)
|
||||
if (!mci.ar)
|
||||
kill_task = 1;
|
||||
/* Validate guarded storage registers */
|
||||
cr2.val = S390_lowcore.cregs_save_area[2];
|
||||
cr2.reg = S390_lowcore.cregs_save_area[2];
|
||||
if (cr2.gse) {
|
||||
if (!mci.gs) {
|
||||
/*
|
||||
@ -505,9 +505,9 @@ NOKPROBE_SYMBOL(s390_do_machine_check);
|
||||
|
||||
static int __init machine_check_init(void)
|
||||
{
|
||||
ctl_set_bit(14, 25); /* enable external damage MCH */
|
||||
ctl_set_bit(14, 27); /* enable system recovery MCH */
|
||||
ctl_set_bit(14, 24); /* enable warning MCH */
|
||||
system_ctl_set_bit(14, CR14_EXTERNAL_DAMAGE_SUBMASK_BIT);
|
||||
system_ctl_set_bit(14, CR14_RECOVERY_SUBMASK_BIT);
|
||||
system_ctl_set_bit(14, CR14_WARNING_SUBMASK_BIT);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(machine_check_init);
|
||||
|
@ -1193,7 +1193,7 @@ static int __init cpumf_pmu_init(void)
|
||||
* Clear bit 15 of cr0 to unauthorize problem-state to
|
||||
* extract measurement counters
|
||||
*/
|
||||
ctl_clear_bit(0, 48);
|
||||
system_ctl_clear_bit(0, CR0_CPUMF_EXTRACTION_AUTH_BIT);
|
||||
|
||||
/* register handler for measurement-alert interruptions */
|
||||
rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
|
||||
|
@ -16,8 +16,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/pai.h>
|
||||
#include <asm/debug.h>
|
||||
|
||||
@ -41,7 +40,43 @@ struct paicrypt_map {
|
||||
struct perf_event *event; /* Perf event for sampling */
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map);
|
||||
struct paicrypt_mapptr {
|
||||
struct paicrypt_map *mapptr;
|
||||
};
|
||||
|
||||
static struct paicrypt_root { /* Anchor to per CPU data */
|
||||
refcount_t refcnt; /* Overall active events */
|
||||
struct paicrypt_mapptr __percpu *mapptr;
|
||||
} paicrypt_root;
|
||||
|
||||
/* Free per CPU data when the last event is removed. */
|
||||
static void paicrypt_root_free(void)
|
||||
{
|
||||
if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
|
||||
free_percpu(paicrypt_root.mapptr);
|
||||
paicrypt_root.mapptr = NULL;
|
||||
}
|
||||
debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
|
||||
refcount_read(&paicrypt_root.refcnt));
|
||||
}
|
||||
|
||||
/*
|
||||
* On initialization of first event also allocate per CPU data dynamically.
|
||||
* Start with an array of pointers, the array size is the maximum number of
|
||||
* CPUs possible, which might be larger than the number of CPUs currently
|
||||
* online.
|
||||
*/
|
||||
static int paicrypt_root_alloc(void)
|
||||
{
|
||||
if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
|
||||
/* The memory is already zeroed. */
|
||||
paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
|
||||
if (!paicrypt_root.mapptr)
|
||||
return -ENOMEM;
|
||||
refcount_set(&paicrypt_root.refcnt, 1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Release the PMU if event is the last perf event */
|
||||
static DEFINE_MUTEX(pai_reserve_mutex);
|
||||
@ -51,7 +86,9 @@ static DEFINE_MUTEX(pai_reserve_mutex);
|
||||
*/
|
||||
static void paicrypt_event_destroy(struct perf_event *event)
|
||||
{
|
||||
struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
|
||||
struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr,
|
||||
event->cpu);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
|
||||
cpump->event = NULL;
|
||||
static_branch_dec(&pai_key);
|
||||
@ -66,11 +103,11 @@ static void paicrypt_event_destroy(struct perf_event *event)
|
||||
__func__, (unsigned long)cpump->page,
|
||||
cpump->save);
|
||||
free_page((unsigned long)cpump->page);
|
||||
cpump->page = NULL;
|
||||
kvfree(cpump->save);
|
||||
cpump->save = NULL;
|
||||
cpump->mode = PAI_MODE_NONE;
|
||||
kfree(cpump);
|
||||
mp->mapptr = NULL;
|
||||
}
|
||||
paicrypt_root_free();
|
||||
mutex_unlock(&pai_reserve_mutex);
|
||||
}
|
||||
|
||||
@ -86,7 +123,8 @@ static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
|
||||
*/
|
||||
static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
|
||||
{
|
||||
struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
|
||||
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
u64 sum = 0;
|
||||
int i;
|
||||
|
||||
@ -132,11 +170,31 @@ static u64 paicrypt_getall(struct perf_event *event)
|
||||
*
|
||||
* Allocate the memory for the event.
|
||||
*/
|
||||
static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
|
||||
static struct paicrypt_map *paicrypt_busy(struct perf_event *event)
|
||||
{
|
||||
int rc = 0;
|
||||
struct perf_event_attr *a = &event->attr;
|
||||
struct paicrypt_map *cpump = NULL;
|
||||
struct paicrypt_mapptr *mp;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&pai_reserve_mutex);
|
||||
|
||||
/* Allocate root node */
|
||||
rc = paicrypt_root_alloc();
|
||||
if (rc)
|
||||
goto unlock;
|
||||
|
||||
/* Allocate node for this event */
|
||||
mp = per_cpu_ptr(paicrypt_root.mapptr, event->cpu);
|
||||
cpump = mp->mapptr;
|
||||
if (!cpump) { /* Paicrypt_map allocated? */
|
||||
cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
|
||||
if (!cpump) {
|
||||
rc = -ENOMEM;
|
||||
goto free_root;
|
||||
}
|
||||
}
|
||||
|
||||
if (a->sample_period) { /* Sampling requested */
|
||||
if (cpump->mode != PAI_MODE_NONE)
|
||||
rc = -EBUSY; /* ... sampling/counting active */
|
||||
@ -144,8 +202,15 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
|
||||
if (cpump->mode == PAI_MODE_SAMPLING)
|
||||
rc = -EBUSY; /* ... and sampling active */
|
||||
}
|
||||
/*
|
||||
* This error case triggers when there is a conflict:
|
||||
* Either sampling requested and counting already active, or visa
|
||||
* versa. Therefore the struct paicrypto_map for this CPU is
|
||||
* needed or the error could not have occurred. Only adjust root
|
||||
* node refcount.
|
||||
*/
|
||||
if (rc)
|
||||
goto unlock;
|
||||
goto free_root;
|
||||
|
||||
/* Allocate memory for counter page and counter extraction.
|
||||
* Only the first counting event has to allocate a page.
|
||||
@ -158,30 +223,36 @@ static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
|
||||
rc = -ENOMEM;
|
||||
cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!cpump->page)
|
||||
goto unlock;
|
||||
goto free_paicrypt_map;
|
||||
cpump->save = kvmalloc_array(paicrypt_cnt + 1,
|
||||
sizeof(struct pai_userdata), GFP_KERNEL);
|
||||
if (!cpump->save) {
|
||||
free_page((unsigned long)cpump->page);
|
||||
cpump->page = NULL;
|
||||
goto unlock;
|
||||
goto free_paicrypt_map;
|
||||
}
|
||||
|
||||
/* Set mode and reference count */
|
||||
rc = 0;
|
||||
refcount_set(&cpump->refcnt, 1);
|
||||
|
||||
unlock:
|
||||
/* If rc is non-zero, do not set mode and reference count */
|
||||
if (!rc) {
|
||||
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
|
||||
: PAI_MODE_COUNTING;
|
||||
}
|
||||
cpump->mode = a->sample_period ? PAI_MODE_SAMPLING : PAI_MODE_COUNTING;
|
||||
mp->mapptr = cpump;
|
||||
debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
|
||||
" mode %d refcnt %u page %#lx save %p rc %d\n",
|
||||
__func__, a->sample_period, cpump->active_events,
|
||||
cpump->mode, refcount_read(&cpump->refcnt),
|
||||
(unsigned long)cpump->page, cpump->save, rc);
|
||||
goto unlock;
|
||||
|
||||
free_paicrypt_map:
|
||||
kfree(cpump);
|
||||
mp->mapptr = NULL;
|
||||
free_root:
|
||||
paicrypt_root_free();
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&pai_reserve_mutex);
|
||||
return rc;
|
||||
return rc ? ERR_PTR(rc) : cpump;
|
||||
}
|
||||
|
||||
/* Might be called on different CPU than the one the event is intended for. */
|
||||
@ -189,7 +260,6 @@ static int paicrypt_event_init(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_attr *a = &event->attr;
|
||||
struct paicrypt_map *cpump;
|
||||
int rc;
|
||||
|
||||
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
|
||||
if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
|
||||
@ -199,16 +269,15 @@ static int paicrypt_event_init(struct perf_event *event)
|
||||
a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
|
||||
return -EINVAL;
|
||||
/* Allow only CPU wide operation, no process context for now. */
|
||||
if (event->hw.target || event->cpu == -1)
|
||||
if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
|
||||
return -ENOENT;
|
||||
/* Allow only CRYPTO_ALL for sampling. */
|
||||
if (a->sample_period && a->config != PAI_CRYPTO_BASE)
|
||||
return -EINVAL;
|
||||
|
||||
cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
|
||||
rc = paicrypt_busy(a, cpump);
|
||||
if (rc)
|
||||
return rc;
|
||||
cpump = paicrypt_busy(event);
|
||||
if (IS_ERR(cpump))
|
||||
return PTR_ERR(cpump);
|
||||
|
||||
/* Event initialization sets last_tag to 0. When later on the events
|
||||
* are deleted and re-added, do not reset the event count value to zero.
|
||||
@ -216,7 +285,6 @@ static int paicrypt_event_init(struct perf_event *event)
|
||||
* are active at the same time.
|
||||
*/
|
||||
event->hw.last_tag = 0;
|
||||
cpump->event = event;
|
||||
event->destroy = paicrypt_event_destroy;
|
||||
|
||||
if (a->sample_period) {
|
||||
@ -253,20 +321,20 @@ static void paicrypt_start(struct perf_event *event, int flags)
|
||||
if (!event->hw.last_tag) {
|
||||
event->hw.last_tag = 1;
|
||||
sum = paicrypt_getall(event); /* Get current value */
|
||||
local64_set(&event->count, 0);
|
||||
local64_set(&event->hw.prev_count, sum);
|
||||
}
|
||||
}
|
||||
|
||||
static int paicrypt_add(struct perf_event *event, int flags)
|
||||
{
|
||||
struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
|
||||
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
unsigned long ccd;
|
||||
|
||||
if (++cpump->active_events == 1) {
|
||||
ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
|
||||
WRITE_ONCE(S390_lowcore.ccd, ccd);
|
||||
__ctl_set_bit(0, 50);
|
||||
local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
|
||||
}
|
||||
cpump->event = event;
|
||||
if (flags & PERF_EF_START && !event->attr.sample_period) {
|
||||
@ -287,7 +355,8 @@ static void paicrypt_stop(struct perf_event *event, int flags)
|
||||
|
||||
static void paicrypt_del(struct perf_event *event, int flags)
|
||||
{
|
||||
struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
|
||||
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
|
||||
if (event->attr.sample_period)
|
||||
perf_sched_cb_dec(event->pmu);
|
||||
@ -295,7 +364,7 @@ static void paicrypt_del(struct perf_event *event, int flags)
|
||||
/* Only counting needs to read counter */
|
||||
paicrypt_stop(event, PERF_EF_UPDATE);
|
||||
if (--cpump->active_events == 0) {
|
||||
__ctl_clear_bit(0, 50);
|
||||
local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
|
||||
WRITE_ONCE(S390_lowcore.ccd, 0);
|
||||
}
|
||||
}
|
||||
@ -329,7 +398,8 @@ static size_t paicrypt_copy(struct pai_userdata *userdata,
|
||||
|
||||
static int paicrypt_push_sample(void)
|
||||
{
|
||||
struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
|
||||
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
struct perf_event *event = cpump->event;
|
||||
struct perf_sample_data data;
|
||||
struct perf_raw_record raw;
|
||||
|
@ -17,8 +17,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/pai.h>
|
||||
#include <asm/debug.h>
|
||||
|
||||
@ -249,7 +248,7 @@ static int paiext_event_init(struct perf_event *event)
|
||||
if (rc)
|
||||
return rc;
|
||||
/* Allow only CPU wide operation, no process context for now. */
|
||||
if (event->hw.target || event->cpu == -1)
|
||||
if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
|
||||
return -ENOENT;
|
||||
/* Allow only event NNPA_ALL for sampling. */
|
||||
if (a->sample_period && a->config != PAI_NNPA_BASE)
|
||||
@ -327,7 +326,6 @@ static void paiext_start(struct perf_event *event, int flags)
|
||||
event->hw.last_tag = 1;
|
||||
sum = paiext_getall(event); /* Get current value */
|
||||
local64_set(&event->hw.prev_count, sum);
|
||||
local64_set(&event->count, 0);
|
||||
}
|
||||
|
||||
static int paiext_add(struct perf_event *event, int flags)
|
||||
@ -340,7 +338,7 @@ static int paiext_add(struct perf_event *event, int flags)
|
||||
S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
|
||||
pcb->acc = virt_to_phys(cpump->area) | 0x1;
|
||||
/* Enable CPU instruction lookup for PAIE1 control block */
|
||||
__ctl_set_bit(0, 49);
|
||||
local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
|
||||
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
|
||||
__func__, S390_lowcore.aicd, pcb->acc);
|
||||
}
|
||||
@ -376,7 +374,7 @@ static void paiext_del(struct perf_event *event, int flags)
|
||||
}
|
||||
if (--cpump->active_events == 0) {
|
||||
/* Disable CPU instruction lookup for PAIE1 control block */
|
||||
__ctl_clear_bit(0, 49);
|
||||
local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
|
||||
pcb->acc = 0;
|
||||
S390_lowcore.aicd = 0;
|
||||
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
|
||||
|
@ -41,13 +41,20 @@ void update_cr_regs(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(task);
|
||||
struct thread_struct *thread = &task->thread;
|
||||
struct per_regs old, new;
|
||||
union ctlreg0 cr0_old, cr0_new;
|
||||
union ctlreg2 cr2_old, cr2_new;
|
||||
int cr0_changed, cr2_changed;
|
||||
union {
|
||||
struct ctlreg regs[3];
|
||||
struct {
|
||||
struct ctlreg control;
|
||||
struct ctlreg start;
|
||||
struct ctlreg end;
|
||||
};
|
||||
} old, new;
|
||||
|
||||
__ctl_store(cr0_old.val, 0, 0);
|
||||
__ctl_store(cr2_old.val, 2, 2);
|
||||
local_ctl_store(0, &cr0_old.reg);
|
||||
local_ctl_store(2, &cr2_old.reg);
|
||||
cr0_new = cr0_old;
|
||||
cr2_new = cr2_old;
|
||||
/* Take care of the enable/disable of transactional execution. */
|
||||
@ -75,38 +82,38 @@ void update_cr_regs(struct task_struct *task)
|
||||
cr0_changed = cr0_new.val != cr0_old.val;
|
||||
cr2_changed = cr2_new.val != cr2_old.val;
|
||||
if (cr0_changed)
|
||||
__ctl_load(cr0_new.val, 0, 0);
|
||||
local_ctl_load(0, &cr0_new.reg);
|
||||
if (cr2_changed)
|
||||
__ctl_load(cr2_new.val, 2, 2);
|
||||
local_ctl_load(2, &cr2_new.reg);
|
||||
/* Copy user specified PER registers */
|
||||
new.control = thread->per_user.control;
|
||||
new.start = thread->per_user.start;
|
||||
new.end = thread->per_user.end;
|
||||
new.control.val = thread->per_user.control;
|
||||
new.start.val = thread->per_user.start;
|
||||
new.end.val = thread->per_user.end;
|
||||
|
||||
/* merge TIF_SINGLE_STEP into user specified PER registers. */
|
||||
if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
|
||||
test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
|
||||
if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
|
||||
new.control |= PER_EVENT_BRANCH;
|
||||
new.control.val |= PER_EVENT_BRANCH;
|
||||
else
|
||||
new.control |= PER_EVENT_IFETCH;
|
||||
new.control |= PER_CONTROL_SUSPENSION;
|
||||
new.control |= PER_EVENT_TRANSACTION_END;
|
||||
new.control.val |= PER_EVENT_IFETCH;
|
||||
new.control.val |= PER_CONTROL_SUSPENSION;
|
||||
new.control.val |= PER_EVENT_TRANSACTION_END;
|
||||
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
|
||||
new.control |= PER_EVENT_IFETCH;
|
||||
new.start = 0;
|
||||
new.end = -1UL;
|
||||
new.control.val |= PER_EVENT_IFETCH;
|
||||
new.start.val = 0;
|
||||
new.end.val = -1UL;
|
||||
}
|
||||
|
||||
/* Take care of the PER enablement bit in the PSW. */
|
||||
if (!(new.control & PER_EVENT_MASK)) {
|
||||
if (!(new.control.val & PER_EVENT_MASK)) {
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
return;
|
||||
}
|
||||
regs->psw.mask |= PSW_MASK_PER;
|
||||
__ctl_store(old, 9, 11);
|
||||
__local_ctl_store(9, 11, old.regs);
|
||||
if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
|
||||
__ctl_load(new, 9, 11);
|
||||
__local_ctl_load(9, 11, new.regs);
|
||||
}
|
||||
|
||||
void user_enable_single_step(struct task_struct *task)
|
||||
@ -1107,7 +1114,7 @@ static int s390_gs_cb_set(struct task_struct *target,
|
||||
target->thread.gs_cb = data;
|
||||
*target->thread.gs_cb = gs_cb;
|
||||
if (target == current) {
|
||||
__ctl_set_bit(2, 4);
|
||||
local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
|
||||
restore_gs_cb(target->thread.gs_cb);
|
||||
}
|
||||
preempt_enable();
|
||||
|
@ -305,7 +305,7 @@ static void __init setup_zfcpdump(void)
|
||||
return;
|
||||
if (oldmem_data.start)
|
||||
return;
|
||||
strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
|
||||
strlcat(boot_command_line, " cio_ignore=all,!ipldev,!condev", COMMAND_LINE_SIZE);
|
||||
console_loglevel = 2;
|
||||
}
|
||||
#else
|
||||
@ -381,12 +381,6 @@ void stack_free(unsigned long stack)
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init __noreturn arch_call_rest_init(void)
|
||||
{
|
||||
smp_reinit_ipl_cpu();
|
||||
rest_init();
|
||||
}
|
||||
|
||||
static unsigned long __init stack_alloc_early(void)
|
||||
{
|
||||
unsigned long stack;
|
||||
@ -455,7 +449,6 @@ static void __init setup_lowcore(void)
|
||||
lc->restart_fn = (unsigned long) do_restart;
|
||||
lc->restart_data = 0;
|
||||
lc->restart_source = -1U;
|
||||
__ctl_store(lc->cregs_save_area, 0, 15);
|
||||
lc->spinlock_lockval = arch_spin_lockval(0);
|
||||
lc->spinlock_index = 0;
|
||||
arch_spin_lock_setup(0);
|
||||
@ -465,6 +458,7 @@ static void __init setup_lowcore(void)
|
||||
lc->kernel_asce = S390_lowcore.kernel_asce;
|
||||
lc->user_asce = S390_lowcore.user_asce;
|
||||
|
||||
system_ctlreg_init_save_area(lc);
|
||||
abs_lc = get_abs_lowcore();
|
||||
abs_lc->restart_stack = lc->restart_stack;
|
||||
abs_lc->restart_fn = lc->restart_fn;
|
||||
@ -472,7 +466,6 @@ static void __init setup_lowcore(void)
|
||||
abs_lc->restart_source = lc->restart_source;
|
||||
abs_lc->restart_psw = lc->restart_psw;
|
||||
abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
|
||||
memcpy(abs_lc->cregs_save_area, lc->cregs_save_area, sizeof(abs_lc->cregs_save_area));
|
||||
abs_lc->program_new_psw = lc->program_new_psw;
|
||||
abs_lc->mcesad = lc->mcesad;
|
||||
put_abs_lowcore(abs_lc);
|
||||
@ -797,15 +790,15 @@ static void __init setup_cr(void)
|
||||
__ctl_duct[4] = (unsigned long)__ctl_duald;
|
||||
|
||||
/* Update control registers CR2, CR5 and CR15 */
|
||||
__ctl_store(cr2.val, 2, 2);
|
||||
__ctl_store(cr5.val, 5, 5);
|
||||
__ctl_store(cr15.val, 15, 15);
|
||||
local_ctl_store(2, &cr2.reg);
|
||||
local_ctl_store(5, &cr5.reg);
|
||||
local_ctl_store(15, &cr15.reg);
|
||||
cr2.ducto = (unsigned long)__ctl_duct >> 6;
|
||||
cr5.pasteo = (unsigned long)__ctl_duct >> 6;
|
||||
cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
|
||||
__ctl_load(cr2.val, 2, 2);
|
||||
__ctl_load(cr5.val, 5, 5);
|
||||
__ctl_load(cr15.val, 15, 15);
|
||||
system_ctl_load(2, &cr2.reg);
|
||||
system_ctl_load(5, &cr5.reg);
|
||||
system_ctl_load(15, &cr15.reg);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/pfault.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/switch_to.h>
|
||||
@ -567,54 +568,6 @@ void arch_irq_work_raise(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* parameter area for the set/clear control bit callbacks
|
||||
*/
|
||||
struct ec_creg_mask_parms {
|
||||
unsigned long orval;
|
||||
unsigned long andval;
|
||||
int cr;
|
||||
};
|
||||
|
||||
/*
|
||||
* callback for setting/clearing control bits
|
||||
*/
|
||||
static void smp_ctl_bit_callback(void *info)
|
||||
{
|
||||
struct ec_creg_mask_parms *pp = info;
|
||||
unsigned long cregs[16];
|
||||
|
||||
__ctl_store(cregs, 0, 15);
|
||||
cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
|
||||
__ctl_load(cregs, 0, 15);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(ctl_lock);
|
||||
|
||||
void smp_ctl_set_clear_bit(int cr, int bit, bool set)
|
||||
{
|
||||
struct ec_creg_mask_parms parms = { .cr = cr, };
|
||||
struct lowcore *abs_lc;
|
||||
u64 ctlreg;
|
||||
|
||||
if (set) {
|
||||
parms.orval = 1UL << bit;
|
||||
parms.andval = -1UL;
|
||||
} else {
|
||||
parms.orval = 0;
|
||||
parms.andval = ~(1UL << bit);
|
||||
}
|
||||
spin_lock(&ctl_lock);
|
||||
abs_lc = get_abs_lowcore();
|
||||
ctlreg = abs_lc->cregs_save_area[cr];
|
||||
ctlreg = (ctlreg & parms.andval) | parms.orval;
|
||||
abs_lc->cregs_save_area[cr] = ctlreg;
|
||||
put_abs_lowcore(abs_lc);
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
|
||||
spin_unlock(&ctl_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_ctl_set_clear_bit);
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
|
||||
int smp_store_status(int cpu)
|
||||
@ -935,14 +888,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
* Make sure global control register contents do not change
|
||||
* until new CPU has initialized control registers.
|
||||
*/
|
||||
spin_lock(&ctl_lock);
|
||||
system_ctlreg_lock();
|
||||
pcpu_prepare_secondary(pcpu, cpu);
|
||||
pcpu_attach_task(pcpu, tidle);
|
||||
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
|
||||
/* Wait until cpu puts itself in the online & active maps */
|
||||
while (!cpu_online(cpu))
|
||||
cpu_relax();
|
||||
spin_unlock(&ctl_lock);
|
||||
system_ctlreg_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -957,7 +910,7 @@ early_param("possible_cpus", _setup_possible_cpus);
|
||||
|
||||
int __cpu_disable(void)
|
||||
{
|
||||
unsigned long cregs[16];
|
||||
struct ctlreg cregs[16];
|
||||
int cpu;
|
||||
|
||||
/* Handle possible pending IPIs */
|
||||
@ -969,11 +922,11 @@ int __cpu_disable(void)
|
||||
/* Disable pseudo page faults on this cpu. */
|
||||
pfault_fini();
|
||||
/* Disable interrupt sources via control register. */
|
||||
__ctl_store(cregs, 0, 15);
|
||||
cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
|
||||
cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
|
||||
cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
|
||||
__ctl_load(cregs, 0, 15);
|
||||
__local_ctl_store(0, 15, cregs);
|
||||
cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */
|
||||
cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */
|
||||
cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */
|
||||
__local_ctl_load(0, 15, cregs);
|
||||
clear_cpu_flag(CIF_NOHZ_DELAY);
|
||||
return 0;
|
||||
}
|
||||
@ -1013,12 +966,12 @@ void __init smp_fill_possible_mask(void)
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
/* request the 0x1201 emergency signal external interrupt */
|
||||
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1201");
|
||||
/* request the 0x1202 external call external interrupt */
|
||||
system_ctl_set_bit(0, 14);
|
||||
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1202");
|
||||
system_ctl_set_bit(0, 13);
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
@ -1076,11 +1029,9 @@ static ssize_t cpu_configure_store(struct device *dev,
|
||||
cpus_read_lock();
|
||||
mutex_lock(&smp_cpu_state_mutex);
|
||||
rc = -EBUSY;
|
||||
/* disallow configuration changes of online cpus and cpu 0 */
|
||||
/* disallow configuration changes of online cpus */
|
||||
cpu = dev->id;
|
||||
cpu = smp_get_base_cpu(cpu);
|
||||
if (cpu == 0)
|
||||
goto out;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++)
|
||||
if (cpu_online(cpu + i))
|
||||
goto out;
|
||||
@ -1180,7 +1131,7 @@ static int smp_add_present_cpu(int cpu)
|
||||
return -ENOMEM;
|
||||
per_cpu(cpu_device, cpu) = c;
|
||||
s = &c->dev;
|
||||
c->hotpluggable = 1;
|
||||
c->hotpluggable = !!cpu;
|
||||
rc = register_cpu(c, cpu);
|
||||
if (rc)
|
||||
goto out;
|
||||
@ -1258,60 +1209,3 @@ static int __init s390_smp_init(void)
|
||||
return rc;
|
||||
}
|
||||
subsys_initcall(s390_smp_init);
|
||||
|
||||
static __always_inline void set_new_lowcore(struct lowcore *lc)
|
||||
{
|
||||
union register_pair dst, src;
|
||||
u32 pfx;
|
||||
|
||||
src.even = (unsigned long) &S390_lowcore;
|
||||
src.odd = sizeof(S390_lowcore);
|
||||
dst.even = (unsigned long) lc;
|
||||
dst.odd = sizeof(*lc);
|
||||
pfx = __pa(lc);
|
||||
|
||||
asm volatile(
|
||||
" mvcl %[dst],%[src]\n"
|
||||
" spx %[pfx]\n"
|
||||
: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
|
||||
: [pfx] "Q" (pfx)
|
||||
: "memory", "cc");
|
||||
}
|
||||
|
||||
int __init smp_reinit_ipl_cpu(void)
|
||||
{
|
||||
unsigned long async_stack, nodat_stack, mcck_stack;
|
||||
struct lowcore *lc, *lc_ipl;
|
||||
unsigned long flags, cr0;
|
||||
u64 mcesad;
|
||||
|
||||
lc_ipl = lowcore_ptr[0];
|
||||
lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
|
||||
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
|
||||
async_stack = stack_alloc();
|
||||
mcck_stack = stack_alloc();
|
||||
if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
|
||||
panic("Couldn't allocate memory");
|
||||
|
||||
local_irq_save(flags);
|
||||
local_mcck_disable();
|
||||
set_new_lowcore(lc);
|
||||
S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
|
||||
S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
|
||||
S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
|
||||
__ctl_store(cr0, 0, 0);
|
||||
__ctl_clear_bit(0, 28); /* disable lowcore protection */
|
||||
S390_lowcore.mcesad = mcesad;
|
||||
__ctl_load(cr0, 0, 0);
|
||||
if (abs_lowcore_map(0, lc, false))
|
||||
panic("Couldn't remap absolute lowcore");
|
||||
lowcore_ptr[0] = lc;
|
||||
local_mcck_enable();
|
||||
local_irq_restore(flags);
|
||||
|
||||
memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
|
||||
memblock_free_late(__pa(lc_ipl->async_stack - STACK_INIT_OFFSET), THREAD_SIZE);
|
||||
memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
|
||||
memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
|
||||
return 0;
|
||||
}
|
||||
|
@ -173,10 +173,10 @@ void init_cpu_timer(void)
|
||||
clockevents_register_device(cd);
|
||||
|
||||
/* Enable clock comparator timer interrupt. */
|
||||
__ctl_set_bit(0,11);
|
||||
local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SUBMASK_BIT);
|
||||
|
||||
/* Always allow the timing alert external interrupt. */
|
||||
__ctl_set_bit(0, 4);
|
||||
local_ctl_set_bit(0, CR0_ETR_SUBMASK_BIT);
|
||||
}
|
||||
|
||||
static void clock_comparator_interrupt(struct ext_code ext_code,
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
#include <asm/fault.h>
|
||||
#include <asm/gmap.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
@ -466,23 +466,6 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct trans_exc_code_bits {
|
||||
unsigned long addr : 52; /* Translation-exception Address */
|
||||
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
|
||||
unsigned long : 2;
|
||||
unsigned long b56 : 1;
|
||||
unsigned long : 3;
|
||||
unsigned long b60 : 1;
|
||||
unsigned long b61 : 1;
|
||||
unsigned long as : 2; /* ASCE Identifier */
|
||||
};
|
||||
|
||||
enum {
|
||||
FSI_UNKNOWN = 0, /* Unknown whether fetch or store */
|
||||
FSI_STORE = 1, /* Exception was due to store operation */
|
||||
FSI_FETCH = 2 /* Exception was due to fetch operation */
|
||||
};
|
||||
|
||||
enum prot_type {
|
||||
PROT_TYPE_LA = 0,
|
||||
PROT_TYPE_KEYC = 1,
|
||||
@ -497,11 +480,11 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
|
||||
enum gacc_mode mode, enum prot_type prot, bool terminate)
|
||||
{
|
||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||
struct trans_exc_code_bits *tec;
|
||||
union teid *teid;
|
||||
|
||||
memset(pgm, 0, sizeof(*pgm));
|
||||
pgm->code = code;
|
||||
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
||||
teid = (union teid *)&pgm->trans_exc_code;
|
||||
|
||||
switch (code) {
|
||||
case PGM_PROTECTION:
|
||||
@ -511,25 +494,25 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
case PROT_TYPE_IEP:
|
||||
tec->b61 = 1;
|
||||
teid->b61 = 1;
|
||||
fallthrough;
|
||||
case PROT_TYPE_LA:
|
||||
tec->b56 = 1;
|
||||
teid->b56 = 1;
|
||||
break;
|
||||
case PROT_TYPE_KEYC:
|
||||
tec->b60 = 1;
|
||||
teid->b60 = 1;
|
||||
break;
|
||||
case PROT_TYPE_ALC:
|
||||
tec->b60 = 1;
|
||||
teid->b60 = 1;
|
||||
fallthrough;
|
||||
case PROT_TYPE_DAT:
|
||||
tec->b61 = 1;
|
||||
teid->b61 = 1;
|
||||
break;
|
||||
}
|
||||
if (terminate) {
|
||||
tec->b56 = 0;
|
||||
tec->b60 = 0;
|
||||
tec->b61 = 0;
|
||||
teid->b56 = 0;
|
||||
teid->b60 = 0;
|
||||
teid->b61 = 0;
|
||||
}
|
||||
fallthrough;
|
||||
case PGM_ASCE_TYPE:
|
||||
@ -543,9 +526,9 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
|
||||
* exc_access_id has to be set to 0 for some instructions. Both
|
||||
* cases have to be handled by the caller.
|
||||
*/
|
||||
tec->addr = gva >> PAGE_SHIFT;
|
||||
tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
|
||||
tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
|
||||
teid->addr = gva >> PAGE_SHIFT;
|
||||
teid->fsi = mode == GACC_STORE ? TEID_FSI_STORE : TEID_FSI_FETCH;
|
||||
teid->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
|
||||
fallthrough;
|
||||
case PGM_ALEN_TRANSLATION:
|
||||
case PGM_ALE_SEQUENCE:
|
||||
|
@ -4927,7 +4927,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
if (MACHINE_HAS_GS) {
|
||||
preempt_disable();
|
||||
__ctl_set_bit(2, 4);
|
||||
local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
|
||||
if (current->thread.gs_cb) {
|
||||
vcpu->arch.host_gscb = current->thread.gs_cb;
|
||||
save_gs_cb(vcpu->arch.host_gscb);
|
||||
@ -5004,13 +5004,13 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
|
||||
kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
|
||||
if (MACHINE_HAS_GS) {
|
||||
preempt_disable();
|
||||
__ctl_set_bit(2, 4);
|
||||
local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
|
||||
if (vcpu->arch.gs_enabled)
|
||||
save_gs_cb(current->thread.gs_cb);
|
||||
current->thread.gs_cb = vcpu->arch.host_gscb;
|
||||
restore_gs_cb(vcpu->arch.host_gscb);
|
||||
if (!vcpu->arch.host_gscb)
|
||||
__ctl_clear_bit(2, 4);
|
||||
local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
|
||||
vcpu->arch.host_gscb = NULL;
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ static int handle_gs(struct kvm_vcpu *vcpu)
|
||||
if (test_kvm_facility(vcpu->kvm, 133)) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
|
||||
preempt_disable();
|
||||
__ctl_set_bit(2, 4);
|
||||
local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
|
||||
current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
|
||||
restore_gs_cb(current->thread.gs_cb);
|
||||
preempt_enable();
|
||||
|
@ -12,21 +12,22 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/ctlreg.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_ENTRY
|
||||
void debug_user_asce(int exit)
|
||||
{
|
||||
unsigned long cr1, cr7;
|
||||
struct ctlreg cr1, cr7;
|
||||
|
||||
__ctl_store(cr1, 1, 1);
|
||||
__ctl_store(cr7, 7, 7);
|
||||
if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
|
||||
local_ctl_store(1, &cr1);
|
||||
local_ctl_store(7, &cr7);
|
||||
if (cr1.val == S390_lowcore.kernel_asce.val && cr7.val == S390_lowcore.user_asce.val)
|
||||
return;
|
||||
panic("incorrect ASCE on kernel %s\n"
|
||||
"cr1: %016lx cr7: %016lx\n"
|
||||
"kernel: %016llx user: %016llx\n",
|
||||
exit ? "exit" : "entry", cr1, cr7,
|
||||
S390_lowcore.kernel_asce, S390_lowcore.user_asce);
|
||||
"kernel: %016lx user: %016lx\n",
|
||||
exit ? "exit" : "entry", cr1.val, cr7.val,
|
||||
S390_lowcore.kernel_asce.val, S390_lowcore.user_asce.val);
|
||||
}
|
||||
#endif /*CONFIG_DEBUG_ENTRY */
|
||||
|
||||
|
@ -287,7 +287,7 @@ static int pt_dump_init(void)
|
||||
* kernel ASCE. We need this to keep the page table walker functions
|
||||
* from accessing non-existent entries.
|
||||
*/
|
||||
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
|
||||
max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
|
||||
max_addr = 1UL << (max_addr * 11 + 31);
|
||||
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
|
||||
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
|
||||
|
@ -61,6 +61,22 @@ static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ex_handler_zeropad(const struct exception_table_entry *ex, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
|
||||
unsigned int reg_data = FIELD_GET(EX_DATA_REG_ERR, ex->data);
|
||||
unsigned long data, addr, offset;
|
||||
|
||||
addr = regs->gprs[reg_addr];
|
||||
offset = addr & (sizeof(unsigned long) - 1);
|
||||
addr &= ~(sizeof(unsigned long) - 1);
|
||||
data = *(unsigned long *)addr;
|
||||
data <<= BITS_PER_BYTE * offset;
|
||||
regs->gprs[reg_data] = data;
|
||||
regs->psw.addr = extable_fixup(ex);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool fixup_exception(struct pt_regs *regs)
|
||||
{
|
||||
const struct exception_table_entry *ex;
|
||||
@ -81,6 +97,8 @@ bool fixup_exception(struct pt_regs *regs)
|
||||
return ex_handler_ua_load_reg(ex, false, regs);
|
||||
case EX_TYPE_UA_LOAD_REGPAIR:
|
||||
return ex_handler_ua_load_reg(ex, true, regs);
|
||||
case EX_TYPE_ZEROPAD:
|
||||
return ex_handler_zeropad(ex, regs);
|
||||
}
|
||||
panic("invalid exception table entry");
|
||||
}
|
||||
|
@ -3,17 +3,19 @@
|
||||
* S390 version
|
||||
* Copyright IBM Corp. 1999
|
||||
* Author(s): Hartmut Penner (hp@de.ibm.com)
|
||||
* Ulrich Weigand (uweigand@de.ibm.com)
|
||||
* Ulrich Weigand (uweigand@de.ibm.com)
|
||||
*
|
||||
* Derived from "arch/i386/mm/fault.c"
|
||||
* Copyright (C) 1995 Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
@ -34,38 +36,27 @@
|
||||
#include <linux/kfence.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/fault.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/gmap.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/uv.h>
|
||||
#include "../kernel/entry.h"
|
||||
|
||||
#define __FAIL_ADDR_MASK -4096L
|
||||
|
||||
/*
|
||||
* Allocate private vm_fault_reason from top. Please make sure it won't
|
||||
* collide with vm_fault_reason.
|
||||
*/
|
||||
#define VM_FAULT_BADCONTEXT ((__force vm_fault_t)0x80000000)
|
||||
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x40000000)
|
||||
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x20000000)
|
||||
#define VM_FAULT_SIGNAL ((__force vm_fault_t)0x10000000)
|
||||
#define VM_FAULT_PFAULT ((__force vm_fault_t)0x8000000)
|
||||
|
||||
enum fault_type {
|
||||
KERNEL_FAULT,
|
||||
USER_FAULT,
|
||||
GMAP_FAULT,
|
||||
};
|
||||
|
||||
static unsigned long store_indication __read_mostly;
|
||||
static DEFINE_STATIC_KEY_FALSE(have_store_indication);
|
||||
|
||||
static int __init fault_init(void)
|
||||
{
|
||||
if (test_facility(75))
|
||||
store_indication = 0xc00;
|
||||
static_branch_enable(&have_store_indication);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(fault_init);
|
||||
@ -75,11 +66,9 @@ early_initcall(fault_init);
|
||||
*/
|
||||
static enum fault_type get_fault_type(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long trans_exc_code;
|
||||
union teid teid = { .val = regs->int_parm_long };
|
||||
|
||||
trans_exc_code = regs->int_parm_long & 3;
|
||||
if (likely(trans_exc_code == 0)) {
|
||||
/* primary space exception */
|
||||
if (likely(teid.as == PSW_BITS_AS_PRIMARY)) {
|
||||
if (user_mode(regs))
|
||||
return USER_FAULT;
|
||||
if (!IS_ENABLED(CONFIG_PGSTE))
|
||||
@ -88,83 +77,77 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
|
||||
return GMAP_FAULT;
|
||||
return KERNEL_FAULT;
|
||||
}
|
||||
if (trans_exc_code == 2)
|
||||
if (teid.as == PSW_BITS_AS_SECONDARY)
|
||||
return USER_FAULT;
|
||||
if (trans_exc_code == 1) {
|
||||
/* access register mode, not used in the kernel */
|
||||
/* Access register mode, not used in the kernel */
|
||||
if (teid.as == PSW_BITS_AS_ACCREG)
|
||||
return USER_FAULT;
|
||||
}
|
||||
/* home space exception -> access via kernel ASCE */
|
||||
/* Home space -> access via kernel ASCE */
|
||||
return KERNEL_FAULT;
|
||||
}
|
||||
|
||||
static unsigned long get_fault_address(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long trans_exc_code = regs->int_parm_long;
|
||||
union teid teid = { .val = regs->int_parm_long };
|
||||
|
||||
return trans_exc_code & __FAIL_ADDR_MASK;
|
||||
return teid.addr * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static bool fault_is_write(struct pt_regs *regs)
|
||||
static __always_inline bool fault_is_write(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long trans_exc_code = regs->int_parm_long;
|
||||
union teid teid = { .val = regs->int_parm_long };
|
||||
|
||||
return (trans_exc_code & store_indication) == 0x400;
|
||||
}
|
||||
|
||||
static int bad_address(void *p)
|
||||
{
|
||||
unsigned long dummy;
|
||||
|
||||
return get_kernel_nofault(dummy, (unsigned long *)p);
|
||||
if (static_branch_likely(&have_store_indication))
|
||||
return teid.fsi == TEID_FSI_STORE;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void dump_pagetable(unsigned long asce, unsigned long address)
|
||||
{
|
||||
unsigned long *table = __va(asce & _ASCE_ORIGIN);
|
||||
unsigned long entry, *table = __va(asce & _ASCE_ORIGIN);
|
||||
|
||||
pr_alert("AS:%016lx ", asce);
|
||||
switch (asce & _ASCE_TYPE_MASK) {
|
||||
case _ASCE_TYPE_REGION1:
|
||||
table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
|
||||
if (bad_address(table))
|
||||
if (get_kernel_nofault(entry, table))
|
||||
goto bad;
|
||||
pr_cont("R1:%016lx ", *table);
|
||||
if (*table & _REGION_ENTRY_INVALID)
|
||||
pr_cont("R1:%016lx ", entry);
|
||||
if (entry & _REGION_ENTRY_INVALID)
|
||||
goto out;
|
||||
table = __va(*table & _REGION_ENTRY_ORIGIN);
|
||||
table = __va(entry & _REGION_ENTRY_ORIGIN);
|
||||
fallthrough;
|
||||
case _ASCE_TYPE_REGION2:
|
||||
table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
|
||||
if (bad_address(table))
|
||||
if (get_kernel_nofault(entry, table))
|
||||
goto bad;
|
||||
pr_cont("R2:%016lx ", *table);
|
||||
if (*table & _REGION_ENTRY_INVALID)
|
||||
pr_cont("R2:%016lx ", entry);
|
||||
if (entry & _REGION_ENTRY_INVALID)
|
||||
goto out;
|
||||
table = __va(*table & _REGION_ENTRY_ORIGIN);
|
||||
table = __va(entry & _REGION_ENTRY_ORIGIN);
|
||||
fallthrough;
|
||||
case _ASCE_TYPE_REGION3:
|
||||
table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
|
||||
if (bad_address(table))
|
||||
if (get_kernel_nofault(entry, table))
|
||||
goto bad;
|
||||
pr_cont("R3:%016lx ", *table);
|
||||
if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
|
||||
pr_cont("R3:%016lx ", entry);
|
||||
if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
|
||||
goto out;
|
||||
table = __va(*table & _REGION_ENTRY_ORIGIN);
|
||||
table = __va(entry & _REGION_ENTRY_ORIGIN);
|
||||
fallthrough;
|
||||
case _ASCE_TYPE_SEGMENT:
|
||||
table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
|
||||
if (bad_address(table))
|
||||
if (get_kernel_nofault(entry, table))
|
||||
goto bad;
|
||||
pr_cont("S:%016lx ", *table);
|
||||
if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
|
||||
pr_cont("S:%016lx ", entry);
|
||||
if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
|
||||
goto out;
|
||||
table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
table = __va(entry & _SEGMENT_ENTRY_ORIGIN);
|
||||
}
|
||||
table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
|
||||
if (bad_address(table))
|
||||
if (get_kernel_nofault(entry, table))
|
||||
goto bad;
|
||||
pr_cont("P:%016lx ", *table);
|
||||
pr_cont("P:%016lx ", entry);
|
||||
out:
|
||||
pr_cont("\n");
|
||||
return;
|
||||
@ -174,173 +157,113 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
|
||||
|
||||
static void dump_fault_info(struct pt_regs *regs)
|
||||
{
|
||||
union teid teid = { .val = regs->int_parm_long };
|
||||
unsigned long asce;
|
||||
|
||||
pr_alert("Failing address: %016lx TEID: %016lx\n",
|
||||
regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
|
||||
get_fault_address(regs), teid.val);
|
||||
pr_alert("Fault in ");
|
||||
switch (regs->int_parm_long & 3) {
|
||||
case 3:
|
||||
switch (teid.as) {
|
||||
case PSW_BITS_AS_HOME:
|
||||
pr_cont("home space ");
|
||||
break;
|
||||
case 2:
|
||||
case PSW_BITS_AS_SECONDARY:
|
||||
pr_cont("secondary space ");
|
||||
break;
|
||||
case 1:
|
||||
case PSW_BITS_AS_ACCREG:
|
||||
pr_cont("access register ");
|
||||
break;
|
||||
case 0:
|
||||
case PSW_BITS_AS_PRIMARY:
|
||||
pr_cont("primary space ");
|
||||
break;
|
||||
}
|
||||
pr_cont("mode while using ");
|
||||
switch (get_fault_type(regs)) {
|
||||
case USER_FAULT:
|
||||
asce = S390_lowcore.user_asce;
|
||||
asce = S390_lowcore.user_asce.val;
|
||||
pr_cont("user ");
|
||||
break;
|
||||
case GMAP_FAULT:
|
||||
asce = ((struct gmap *) S390_lowcore.gmap)->asce;
|
||||
asce = ((struct gmap *)S390_lowcore.gmap)->asce;
|
||||
pr_cont("gmap ");
|
||||
break;
|
||||
case KERNEL_FAULT:
|
||||
asce = S390_lowcore.kernel_asce;
|
||||
asce = S390_lowcore.kernel_asce.val;
|
||||
pr_cont("kernel ");
|
||||
break;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
pr_cont("ASCE.\n");
|
||||
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
|
||||
dump_pagetable(asce, get_fault_address(regs));
|
||||
}
|
||||
|
||||
int show_unhandled_signals = 1;
|
||||
|
||||
void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
|
||||
{
|
||||
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
|
||||
|
||||
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
|
||||
return;
|
||||
if (!unhandled_signal(current, signr))
|
||||
return;
|
||||
if (!printk_ratelimit())
|
||||
if (!__ratelimit(&rs))
|
||||
return;
|
||||
printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
|
||||
regs->int_code & 0xffff, regs->int_code >> 17);
|
||||
pr_alert("User process fault: interruption code %04x ilc:%d ",
|
||||
regs->int_code & 0xffff, regs->int_code >> 17);
|
||||
print_vma_addr(KERN_CONT "in ", regs->psw.addr);
|
||||
printk(KERN_CONT "\n");
|
||||
pr_cont("\n");
|
||||
if (is_mm_fault)
|
||||
dump_fault_info(regs);
|
||||
show_regs(regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send SIGSEGV to task. This is an external routine
|
||||
* to keep the stack usage of do_page_fault small.
|
||||
*/
|
||||
static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
|
||||
static void do_sigsegv(struct pt_regs *regs, int si_code)
|
||||
{
|
||||
report_user_fault(regs, SIGSEGV, 1);
|
||||
force_sig_fault(SIGSEGV, si_code,
|
||||
(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
|
||||
force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
|
||||
}
|
||||
|
||||
static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
|
||||
static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
|
||||
{
|
||||
enum fault_type fault_type;
|
||||
unsigned long address;
|
||||
bool is_write;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
if (WARN_ON_ONCE(!si_code))
|
||||
si_code = SEGV_MAPERR;
|
||||
return do_sigsegv(regs, si_code);
|
||||
}
|
||||
if (fixup_exception(regs))
|
||||
return;
|
||||
fault_type = get_fault_type(regs);
|
||||
if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
|
||||
if (fault_type == KERNEL_FAULT) {
|
||||
address = get_fault_address(regs);
|
||||
is_write = fault_is_write(regs);
|
||||
if (kfence_handle_page_fault(address, is_write, regs))
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Oops. The kernel tried to access some bad page. We'll have to
|
||||
* terminate things with extreme prejudice.
|
||||
*/
|
||||
if (fault_type == KERNEL_FAULT)
|
||||
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
|
||||
" in virtual kernel address space\n");
|
||||
pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n");
|
||||
else
|
||||
printk(KERN_ALERT "Unable to handle kernel paging request"
|
||||
" in virtual user address space\n");
|
||||
pr_alert("Unable to handle kernel paging request in virtual user address space\n");
|
||||
dump_fault_info(regs);
|
||||
die(regs, "Oops");
|
||||
}
|
||||
|
||||
static noinline void do_low_address(struct pt_regs *regs)
|
||||
static void handle_fault_error(struct pt_regs *regs, int si_code)
|
||||
{
|
||||
/* Low-address protection hit in kernel mode means
|
||||
NULL pointer write access in kernel mode. */
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
||||
/* Low-address protection hit in user mode 'cannot happen'. */
|
||||
die (regs, "Low-address protection");
|
||||
}
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
||||
do_no_context(regs, VM_FAULT_BADACCESS);
|
||||
mmap_read_unlock(mm);
|
||||
handle_fault_error_nolock(regs, si_code);
|
||||
}
|
||||
|
||||
static noinline void do_sigbus(struct pt_regs *regs)
|
||||
static void do_sigbus(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Send a sigbus, regardless of whether we were in kernel
|
||||
* or user mode.
|
||||
*/
|
||||
force_sig_fault(SIGBUS, BUS_ADRERR,
|
||||
(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
|
||||
}
|
||||
|
||||
static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
|
||||
{
|
||||
int si_code;
|
||||
|
||||
switch (fault) {
|
||||
case VM_FAULT_BADACCESS:
|
||||
case VM_FAULT_BADMAP:
|
||||
/* Bad memory access. Check if it is kernel or user space. */
|
||||
if (user_mode(regs)) {
|
||||
/* User mode accesses just cause a SIGSEGV */
|
||||
si_code = (fault == VM_FAULT_BADMAP) ?
|
||||
SEGV_MAPERR : SEGV_ACCERR;
|
||||
do_sigsegv(regs, si_code);
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
case VM_FAULT_BADCONTEXT:
|
||||
case VM_FAULT_PFAULT:
|
||||
do_no_context(regs, fault);
|
||||
break;
|
||||
case VM_FAULT_SIGNAL:
|
||||
if (!user_mode(regs))
|
||||
do_no_context(regs, fault);
|
||||
break;
|
||||
default: /* fault & VM_FAULT_ERROR */
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
if (!user_mode(regs))
|
||||
do_no_context(regs, fault);
|
||||
else
|
||||
pagefault_out_of_memory();
|
||||
} else if (fault & VM_FAULT_SIGSEGV) {
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
if (!user_mode(regs))
|
||||
do_no_context(regs, fault);
|
||||
else
|
||||
do_sigsegv(regs, SEGV_MAPERR);
|
||||
} else if (fault & VM_FAULT_SIGBUS) {
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
if (!user_mode(regs))
|
||||
do_no_context(regs, fault);
|
||||
else
|
||||
do_sigbus(regs);
|
||||
} else
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -349,20 +272,20 @@ static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
|
||||
* routines.
|
||||
*
|
||||
* interruption code (int_code):
|
||||
* 04 Protection -> Write-Protection (suppression)
|
||||
* 10 Segment translation -> Not present (nullification)
|
||||
* 11 Page translation -> Not present (nullification)
|
||||
* 3b Region third trans. -> Not present (nullification)
|
||||
* 04 Protection -> Write-Protection (suppression)
|
||||
* 10 Segment translation -> Not present (nullification)
|
||||
* 11 Page translation -> Not present (nullification)
|
||||
* 3b Region third trans. -> Not present (nullification)
|
||||
*/
|
||||
static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
static void do_exception(struct pt_regs *regs, int access)
|
||||
{
|
||||
struct gmap *gmap;
|
||||
struct task_struct *tsk;
|
||||
struct mm_struct *mm;
|
||||
struct vm_area_struct *vma;
|
||||
enum fault_type type;
|
||||
struct task_struct *tsk;
|
||||
unsigned long address;
|
||||
struct mm_struct *mm;
|
||||
enum fault_type type;
|
||||
unsigned int flags;
|
||||
struct gmap *gmap;
|
||||
vm_fault_t fault;
|
||||
bool is_write;
|
||||
|
||||
@ -372,31 +295,21 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
* been nullified. Don't signal single step via SIGTRAP.
|
||||
*/
|
||||
clear_thread_flag(TIF_PER_TRAP);
|
||||
|
||||
if (kprobe_page_fault(regs, 14))
|
||||
return 0;
|
||||
|
||||
return;
|
||||
mm = tsk->mm;
|
||||
address = get_fault_address(regs);
|
||||
is_write = fault_is_write(regs);
|
||||
|
||||
/*
|
||||
* Verify that the fault happened in user space, that
|
||||
* we are not in an interrupt and that there is a
|
||||
* user context.
|
||||
*/
|
||||
fault = VM_FAULT_BADCONTEXT;
|
||||
type = get_fault_type(regs);
|
||||
switch (type) {
|
||||
case KERNEL_FAULT:
|
||||
goto out;
|
||||
return handle_fault_error_nolock(regs, 0);
|
||||
case USER_FAULT:
|
||||
case GMAP_FAULT:
|
||||
if (faulthandler_disabled() || !mm)
|
||||
goto out;
|
||||
return handle_fault_error_nolock(regs, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
flags = FAULT_FLAG_DEFAULT;
|
||||
if (user_mode(regs))
|
||||
@ -419,125 +332,117 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
vma_end_read(vma);
|
||||
if (!(fault & VM_FAULT_RETRY)) {
|
||||
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
|
||||
if (likely(!(fault & VM_FAULT_ERROR)))
|
||||
fault = 0;
|
||||
goto out;
|
||||
if (unlikely(fault & VM_FAULT_ERROR))
|
||||
goto error;
|
||||
return;
|
||||
}
|
||||
count_vm_vma_lock_event(VMA_LOCK_RETRY);
|
||||
/* Quick path to respond to signals */
|
||||
if (fault_signal_pending(fault, regs)) {
|
||||
fault = VM_FAULT_SIGNAL;
|
||||
goto out;
|
||||
if (!user_mode(regs))
|
||||
handle_fault_error_nolock(regs, 0);
|
||||
return;
|
||||
}
|
||||
lock_mmap:
|
||||
mmap_read_lock(mm);
|
||||
|
||||
gmap = NULL;
|
||||
if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
|
||||
gmap = (struct gmap *) S390_lowcore.gmap;
|
||||
gmap = (struct gmap *)S390_lowcore.gmap;
|
||||
current->thread.gmap_addr = address;
|
||||
current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
|
||||
current->thread.gmap_int_code = regs->int_code & 0xffff;
|
||||
address = __gmap_translate(gmap, address);
|
||||
if (address == -EFAULT) {
|
||||
fault = VM_FAULT_BADMAP;
|
||||
goto out_up;
|
||||
}
|
||||
if (address == -EFAULT)
|
||||
return handle_fault_error(regs, SEGV_MAPERR);
|
||||
if (gmap->pfault_enabled)
|
||||
flags |= FAULT_FLAG_RETRY_NOWAIT;
|
||||
}
|
||||
|
||||
retry:
|
||||
fault = VM_FAULT_BADMAP;
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
goto out_up;
|
||||
|
||||
return handle_fault_error(regs, SEGV_MAPERR);
|
||||
if (unlikely(vma->vm_start > address)) {
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
goto out_up;
|
||||
return handle_fault_error(regs, SEGV_MAPERR);
|
||||
vma = expand_stack(mm, address);
|
||||
if (!vma)
|
||||
goto out;
|
||||
return handle_fault_error_nolock(regs, SEGV_MAPERR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, we have a good vm_area for this memory access, so
|
||||
* we can handle it..
|
||||
*/
|
||||
fault = VM_FAULT_BADACCESS;
|
||||
if (unlikely(!(vma->vm_flags & access)))
|
||||
goto out_up;
|
||||
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
return handle_fault_error(regs, SEGV_ACCERR);
|
||||
fault = handle_mm_fault(vma, address, flags, regs);
|
||||
if (fault_signal_pending(fault, regs)) {
|
||||
fault = VM_FAULT_SIGNAL;
|
||||
if (flags & FAULT_FLAG_RETRY_NOWAIT)
|
||||
goto out_up;
|
||||
goto out;
|
||||
mmap_read_unlock(mm);
|
||||
if (!user_mode(regs))
|
||||
handle_fault_error_nolock(regs, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/* The fault is fully completed (including releasing mmap lock) */
|
||||
if (fault & VM_FAULT_COMPLETED) {
|
||||
if (gmap) {
|
||||
mmap_read_lock(mm);
|
||||
goto out_gmap;
|
||||
goto gmap;
|
||||
}
|
||||
fault = 0;
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
mmap_read_unlock(mm);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (unlikely(fault & VM_FAULT_ERROR))
|
||||
goto out_up;
|
||||
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
|
||||
(flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
||||
if (IS_ENABLED(CONFIG_PGSTE) && gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
||||
/*
|
||||
* FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
|
||||
* not been released
|
||||
* FAULT_FLAG_RETRY_NOWAIT has been set,
|
||||
* mmap_lock has not been released
|
||||
*/
|
||||
current->thread.gmap_pfault = 1;
|
||||
fault = VM_FAULT_PFAULT;
|
||||
goto out_up;
|
||||
return handle_fault_error(regs, 0);
|
||||
}
|
||||
flags &= ~FAULT_FLAG_RETRY_NOWAIT;
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
mmap_read_lock(mm);
|
||||
goto retry;
|
||||
}
|
||||
out_gmap:
|
||||
gmap:
|
||||
if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
|
||||
address = __gmap_link(gmap, current->thread.gmap_addr,
|
||||
address);
|
||||
if (address == -EFAULT) {
|
||||
fault = VM_FAULT_BADMAP;
|
||||
goto out_up;
|
||||
}
|
||||
if (address == -EFAULT)
|
||||
return handle_fault_error(regs, SEGV_MAPERR);
|
||||
if (address == -ENOMEM) {
|
||||
fault = VM_FAULT_OOM;
|
||||
goto out_up;
|
||||
mmap_read_unlock(mm);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
fault = 0;
|
||||
out_up:
|
||||
mmap_read_unlock(mm);
|
||||
out:
|
||||
return fault;
|
||||
return;
|
||||
error:
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
if (!user_mode(regs))
|
||||
handle_fault_error_nolock(regs, 0);
|
||||
else
|
||||
pagefault_out_of_memory();
|
||||
} else if (fault & VM_FAULT_SIGSEGV) {
|
||||
if (!user_mode(regs))
|
||||
handle_fault_error_nolock(regs, 0);
|
||||
else
|
||||
do_sigsegv(regs, SEGV_MAPERR);
|
||||
} else if (fault & VM_FAULT_SIGBUS) {
|
||||
if (!user_mode(regs))
|
||||
handle_fault_error_nolock(regs, 0);
|
||||
else
|
||||
do_sigbus(regs);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
void do_protection_exception(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long trans_exc_code;
|
||||
int access;
|
||||
vm_fault_t fault;
|
||||
union teid teid = { .val = regs->int_parm_long };
|
||||
|
||||
trans_exc_code = regs->int_parm_long;
|
||||
/*
|
||||
* Protection exceptions are suppressing, decrement psw address.
|
||||
* The exception to this rule are aborted transactions, for these
|
||||
@ -550,33 +455,28 @@ void do_protection_exception(struct pt_regs *regs)
|
||||
* as a special case because the translation exception code
|
||||
* field is not guaranteed to contain valid data in this case.
|
||||
*/
|
||||
if (unlikely(!(trans_exc_code & 4))) {
|
||||
do_low_address(regs);
|
||||
return;
|
||||
if (unlikely(!teid.b61)) {
|
||||
if (user_mode(regs)) {
|
||||
/* Low-address protection in user mode: cannot happen */
|
||||
die(regs, "Low-address protection");
|
||||
}
|
||||
/*
|
||||
* Low-address protection in kernel mode means
|
||||
* NULL pointer write access in kernel mode.
|
||||
*/
|
||||
return handle_fault_error_nolock(regs, 0);
|
||||
}
|
||||
if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
|
||||
regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
|
||||
(regs->psw.addr & PAGE_MASK);
|
||||
access = VM_EXEC;
|
||||
fault = VM_FAULT_BADACCESS;
|
||||
} else {
|
||||
access = VM_WRITE;
|
||||
fault = do_exception(regs, access);
|
||||
if (unlikely(MACHINE_HAS_NX && teid.b56)) {
|
||||
regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
|
||||
return handle_fault_error_nolock(regs, SEGV_ACCERR);
|
||||
}
|
||||
if (unlikely(fault))
|
||||
do_fault_error(regs, fault);
|
||||
do_exception(regs, VM_WRITE);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_protection_exception);
|
||||
|
||||
void do_dat_exception(struct pt_regs *regs)
|
||||
{
|
||||
int access;
|
||||
vm_fault_t fault;
|
||||
|
||||
access = VM_ACCESS_FLAGS;
|
||||
fault = do_exception(regs, access);
|
||||
if (unlikely(fault))
|
||||
do_fault_error(regs, fault);
|
||||
do_exception(regs, VM_ACCESS_FLAGS);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_dat_exception);
|
||||
|
||||
@ -584,7 +484,8 @@ NOKPROBE_SYMBOL(do_dat_exception);
|
||||
|
||||
void do_secure_storage_access(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
|
||||
union teid teid = { .val = regs->int_parm_long };
|
||||
unsigned long addr = get_fault_address(regs);
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
struct page *page;
|
||||
@ -592,14 +493,12 @@ void do_secure_storage_access(struct pt_regs *regs)
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* bit 61 tells us if the address is valid, if it's not we
|
||||
* have a major problem and should stop the kernel or send a
|
||||
* SIGSEGV to the process. Unfortunately bit 61 is not
|
||||
* reliable without the misc UV feature so we need to check
|
||||
* for that as well.
|
||||
* Bit 61 indicates if the address is valid, if it is not the
|
||||
* kernel should be stopped or SIGSEGV should be sent to the
|
||||
* process. Bit 61 is not reliable without the misc UV feature,
|
||||
* therefore this needs to be checked too.
|
||||
*/
|
||||
if (uv_has_feature(BIT_UV_FEAT_MISC) &&
|
||||
!test_bit_inv(61, ®s->int_parm_long)) {
|
||||
if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) {
|
||||
/*
|
||||
* When this happens, userspace did something that it
|
||||
* was not supposed to do, e.g. branching into secure
|
||||
@ -609,14 +508,12 @@ void do_secure_storage_access(struct pt_regs *regs)
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The kernel should never run into this case and we
|
||||
* have no way out of this situation.
|
||||
* The kernel should never run into this case and
|
||||
* there is no way out of this situation.
|
||||
*/
|
||||
panic("Unexpected PGM 0x3d with TEID bit 61=0");
|
||||
}
|
||||
|
||||
switch (get_fault_type(regs)) {
|
||||
case GMAP_FAULT:
|
||||
mm = current->mm;
|
||||
@ -624,20 +521,15 @@ void do_secure_storage_access(struct pt_regs *regs)
|
||||
mmap_read_lock(mm);
|
||||
addr = __gmap_translate(gmap, addr);
|
||||
mmap_read_unlock(mm);
|
||||
if (IS_ERR_VALUE(addr)) {
|
||||
do_fault_error(regs, VM_FAULT_BADMAP);
|
||||
break;
|
||||
}
|
||||
if (IS_ERR_VALUE(addr))
|
||||
return handle_fault_error_nolock(regs, SEGV_MAPERR);
|
||||
fallthrough;
|
||||
case USER_FAULT:
|
||||
mm = current->mm;
|
||||
mmap_read_lock(mm);
|
||||
vma = find_vma(mm, addr);
|
||||
if (!vma) {
|
||||
mmap_read_unlock(mm);
|
||||
do_fault_error(regs, VM_FAULT_BADMAP);
|
||||
break;
|
||||
}
|
||||
if (!vma)
|
||||
return handle_fault_error(regs, SEGV_MAPERR);
|
||||
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
|
||||
if (IS_ERR_OR_NULL(page)) {
|
||||
mmap_read_unlock(mm);
|
||||
@ -658,23 +550,18 @@ void do_secure_storage_access(struct pt_regs *regs)
|
||||
BUG();
|
||||
break;
|
||||
default:
|
||||
do_fault_error(regs, VM_FAULT_BADMAP);
|
||||
WARN_ON_ONCE(1);
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_secure_storage_access);
|
||||
|
||||
void do_non_secure_storage_access(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
|
||||
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
|
||||
unsigned long gaddr = get_fault_address(regs);
|
||||
|
||||
if (get_fault_type(regs) != GMAP_FAULT) {
|
||||
do_fault_error(regs, VM_FAULT_BADMAP);
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
|
||||
return handle_fault_error_nolock(regs, SEGV_MAPERR);
|
||||
if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
}
|
||||
@ -682,8 +569,8 @@ NOKPROBE_SYMBOL(do_non_secure_storage_access);
|
||||
|
||||
void do_secure_storage_violation(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
|
||||
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
|
||||
unsigned long gaddr = get_fault_address(regs);
|
||||
|
||||
/*
|
||||
* If the VM has been rebooted, its address space might still contain
|
||||
@ -699,9 +586,8 @@ void do_secure_storage_violation(struct pt_regs *regs)
|
||||
* This exception is only triggered when a guest 2 is running
|
||||
* and can therefore never occur in kernel context.
|
||||
*/
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"Secure storage violation in task: %s, pid %d\n",
|
||||
current->comm, current->pid);
|
||||
pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n",
|
||||
current->comm, current->pid);
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
}
|
||||
|
||||
|
@ -21,10 +21,22 @@
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/gmap.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
#define GMAP_SHADOW_FAKE_TABLE 1ULL
|
||||
|
||||
static struct page *gmap_alloc_crst(void)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
if (!page)
|
||||
return NULL;
|
||||
arch_set_page_dat(page, CRST_ALLOC_ORDER);
|
||||
return page;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmap_alloc - allocate and initialize a guest address space
|
||||
* @limit: maximum address of the gmap address space
|
||||
@ -67,7 +79,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
|
||||
spin_lock_init(&gmap->guest_table_lock);
|
||||
spin_lock_init(&gmap->shadow_lock);
|
||||
refcount_set(&gmap->ref_count, 1);
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
page = gmap_alloc_crst();
|
||||
if (!page)
|
||||
goto out_free;
|
||||
page->index = 0;
|
||||
@ -308,7 +320,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
|
||||
unsigned long *new;
|
||||
|
||||
/* since we dont free the gmap table until gmap_free we can unlock */
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
page = gmap_alloc_crst();
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
new = page_to_virt(page);
|
||||
@ -1759,7 +1771,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
/* Allocate a shadow region second table */
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
page = gmap_alloc_crst();
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
page->index = r2t & _REGION_ENTRY_ORIGIN;
|
||||
@ -1843,7 +1855,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
/* Allocate a shadow region second table */
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
page = gmap_alloc_crst();
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
page->index = r3t & _REGION_ENTRY_ORIGIN;
|
||||
@ -1927,7 +1939,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
|
||||
/* Allocate a shadow segment table */
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
page = gmap_alloc_crst();
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
page->index = sgt & _REGION_ENTRY_ORIGIN;
|
||||
@ -2855,7 +2867,7 @@ int s390_replace_asce(struct gmap *gmap)
|
||||
if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
|
||||
return -EINVAL;
|
||||
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
page = gmap_alloc_crst();
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
page->index = 0;
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/kfence.h>
|
||||
#include <asm/ptdump.h>
|
||||
#include <asm/dma.h>
|
||||
@ -42,7 +43,6 @@
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/kasan.h>
|
||||
@ -54,7 +54,7 @@
|
||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
|
||||
pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
|
||||
|
||||
unsigned long __bootdata_preserved(s390_invalid_asce);
|
||||
struct ctlreg __bootdata_preserved(s390_invalid_asce);
|
||||
|
||||
unsigned long empty_zero_page, zero_page_mask;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
@ -15,10 +15,10 @@
|
||||
#include <linux/uio.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/abs_lowcore.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/maccess.h>
|
||||
#include <asm/ctlreg.h>
|
||||
|
||||
unsigned long __bootdata_preserved(__memcpy_real_area);
|
||||
pte_t *__bootdata_preserved(memcpy_real_ptep);
|
||||
|
@ -121,7 +121,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
|
||||
continue;
|
||||
if (!pud_folded(*pud)) {
|
||||
page = phys_to_page(pud_val(*pud));
|
||||
for (i = 0; i < 3; i++)
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_pmd(pud, addr, next);
|
||||
@ -142,7 +142,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
|
||||
continue;
|
||||
if (!p4d_folded(*p4d)) {
|
||||
page = phys_to_page(p4d_val(*p4d));
|
||||
for (i = 0; i < 3; i++)
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_pud(p4d, addr, next);
|
||||
@ -151,24 +151,31 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
|
||||
|
||||
static void mark_kernel_pgd(void)
|
||||
{
|
||||
unsigned long addr, next;
|
||||
unsigned long addr, next, max_addr;
|
||||
struct page *page;
|
||||
pgd_t *pgd;
|
||||
int i;
|
||||
|
||||
addr = 0;
|
||||
/*
|
||||
* Figure out maximum virtual address accessible with the
|
||||
* kernel ASCE. This is required to keep the page table walker
|
||||
* from accessing non-existent entries.
|
||||
*/
|
||||
max_addr = (S390_lowcore.kernel_asce.val & _ASCE_TYPE_MASK) >> 2;
|
||||
max_addr = 1UL << (max_addr * 11 + 31);
|
||||
pgd = pgd_offset_k(addr);
|
||||
do {
|
||||
next = pgd_addr_end(addr, MODULES_END);
|
||||
next = pgd_addr_end(addr, max_addr);
|
||||
if (pgd_none(*pgd))
|
||||
continue;
|
||||
if (!pgd_folded(*pgd)) {
|
||||
page = phys_to_page(pgd_val(*pgd));
|
||||
for (i = 0; i < 3; i++)
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_p4d(pgd, addr, next);
|
||||
} while (pgd++, addr = next, addr != MODULES_END);
|
||||
} while (pgd++, addr = next, addr != max_addr);
|
||||
}
|
||||
|
||||
void __init cmma_init_nodat(void)
|
||||
@ -181,6 +188,12 @@ void __init cmma_init_nodat(void)
|
||||
return;
|
||||
/* Mark pages used in kernel page tables */
|
||||
mark_kernel_pgd();
|
||||
page = virt_to_page(&swapper_pg_dir);
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
page = virt_to_page(&invalid_pg_dir);
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
|
||||
/* Set all kernel pages not used for page tables to stable/no-dat */
|
||||
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
|
||||
|
@ -75,7 +75,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
|
||||
break;
|
||||
}
|
||||
table = (unsigned long *)((unsigned long)old & mask);
|
||||
crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
|
||||
crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val);
|
||||
} else if (MACHINE_HAS_IDTE) {
|
||||
cspg(old, *old, new);
|
||||
} else {
|
||||
|
@ -61,8 +61,8 @@ static void __crst_table_upgrade(void *arg)
|
||||
|
||||
/* change all active ASCEs to avoid the creation of new TLBs */
|
||||
if (current->active_mm == mm) {
|
||||
S390_lowcore.user_asce = mm->context.asce;
|
||||
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
||||
S390_lowcore.user_asce.val = mm->context.asce;
|
||||
local_ctl_load(7, &S390_lowcore.user_asce);
|
||||
}
|
||||
__tlb_flush_local();
|
||||
}
|
||||
@ -145,6 +145,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
|
||||
ptdesc = pagetable_alloc(GFP_KERNEL, 0);
|
||||
if (ptdesc) {
|
||||
table = (u64 *)ptdesc_to_virt(ptdesc);
|
||||
arch_set_page_dat(virt_to_page(table), 0);
|
||||
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
||||
}
|
||||
@ -487,11 +488,10 @@ static unsigned long *base_crst_alloc(unsigned long val)
|
||||
unsigned long *table;
|
||||
struct ptdesc *ptdesc;
|
||||
|
||||
ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, CRST_ALLOC_ORDER);
|
||||
ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
|
||||
if (!ptdesc)
|
||||
return NULL;
|
||||
table = ptdesc_address(ptdesc);
|
||||
|
||||
crst_table_init(table, val);
|
||||
return table;
|
||||
}
|
||||
|
@ -12,8 +12,10 @@
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/page-states.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -45,8 +47,11 @@ void *vmem_crst_alloc(unsigned long val)
|
||||
unsigned long *table;
|
||||
|
||||
table = vmem_alloc_pages(CRST_ALLOC_ORDER);
|
||||
if (table)
|
||||
crst_table_init(table, val);
|
||||
if (!table)
|
||||
return NULL;
|
||||
crst_table_init(table, val);
|
||||
if (slab_is_available())
|
||||
arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
|
||||
return table;
|
||||
}
|
||||
|
||||
@ -498,6 +503,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
|
||||
void vmemmap_free(unsigned long start, unsigned long end,
|
||||
struct vmem_altmap *altmap)
|
||||
{
|
||||
@ -506,6 +513,8 @@ void vmemmap_free(unsigned long start, unsigned long end,
|
||||
mutex_unlock(&vmem_mutex);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void vmem_remove_mapping(unsigned long start, unsigned long size)
|
||||
{
|
||||
mutex_lock(&vmem_mutex);
|
||||
@ -659,7 +668,7 @@ void __init vmem_map_init(void)
|
||||
__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
|
||||
}
|
||||
if (MACHINE_HAS_NX)
|
||||
ctl_set_bit(0, 20);
|
||||
system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
|
||||
pr_info("Write protected kernel read-only data: %luk\n",
|
||||
(unsigned long)(__end_rodata - _stext) >> 10);
|
||||
}
|
||||
|
@ -1094,7 +1094,7 @@ static int __init pci_base_init(void)
|
||||
|
||||
if (MACHINE_HAS_PCI_MIO) {
|
||||
static_branch_enable(&have_mio);
|
||||
ctl_set_bit(2, 5);
|
||||
system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
|
||||
}
|
||||
|
||||
rc = zpci_debug_init();
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/diag.h>
|
||||
|
||||
#include "hmcdrv_ftp.h"
|
||||
|
@ -81,7 +81,7 @@ static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
|
||||
struct sclp_trace_entry e;
|
||||
|
||||
memset(&e, 0, sizeof(e));
|
||||
strncpy(e.id, id, sizeof(e.id));
|
||||
strtomem(e.id, id);
|
||||
e.a = a;
|
||||
e.b = b;
|
||||
debug_event(&sclp_debug, prio, &e, sizeof(e));
|
||||
@ -706,8 +706,8 @@ void
|
||||
sclp_sync_wait(void)
|
||||
{
|
||||
unsigned long long old_tick;
|
||||
struct ctlreg cr0, cr0_sync;
|
||||
unsigned long flags;
|
||||
unsigned long cr0, cr0_sync;
|
||||
static u64 sync_count;
|
||||
u64 timeout;
|
||||
int irq_context;
|
||||
@ -732,10 +732,10 @@ sclp_sync_wait(void)
|
||||
/* Enable service-signal interruption, disable timer interrupts */
|
||||
old_tick = local_tick_disable();
|
||||
trace_hardirqs_on();
|
||||
__ctl_store(cr0, 0, 0);
|
||||
cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
|
||||
cr0_sync |= 1UL << (63 - 54);
|
||||
__ctl_load(cr0_sync, 0, 0);
|
||||
local_ctl_store(0, &cr0);
|
||||
cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
|
||||
cr0_sync.val |= 1UL << (63 - 54);
|
||||
local_ctl_load(0, &cr0_sync);
|
||||
__arch_local_irq_stosm(0x01);
|
||||
/* Loop until driver state indicates finished request */
|
||||
while (sclp_running_state != sclp_running_state_idle) {
|
||||
@ -745,7 +745,7 @@ sclp_sync_wait(void)
|
||||
cpu_relax();
|
||||
}
|
||||
local_irq_disable();
|
||||
__ctl_load(cr0, 0, 0);
|
||||
local_ctl_load(0, &cr0);
|
||||
if (!irq_context)
|
||||
_local_bh_enable();
|
||||
local_tick_enable(old_tick);
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
@ -353,7 +353,6 @@ static int sclp_mem_notifier(struct notifier_block *nb,
|
||||
sclp_mem_change_state(start, size, 0);
|
||||
break;
|
||||
default:
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&sclp_mem_mutex);
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/setup.h>
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/sections.h>
|
||||
@ -31,11 +32,11 @@ void sclp_early_wait_irq(void)
|
||||
psw_t psw_ext_save, psw_wait;
|
||||
union ctlreg0 cr0, cr0_new;
|
||||
|
||||
__ctl_store(cr0.val, 0, 0);
|
||||
local_ctl_store(0, &cr0.reg);
|
||||
cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
|
||||
cr0_new.lap = 0;
|
||||
cr0_new.sssm = 1;
|
||||
__ctl_load(cr0_new.val, 0, 0);
|
||||
local_ctl_load(0, &cr0_new.reg);
|
||||
|
||||
psw_ext_save = S390_lowcore.external_new_psw;
|
||||
psw_mask = __extract_psw();
|
||||
@ -58,7 +59,7 @@ void sclp_early_wait_irq(void)
|
||||
} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
|
||||
|
||||
S390_lowcore.external_new_psw = psw_ext_save;
|
||||
__ctl_load(cr0.val, 0, 0);
|
||||
local_ctl_load(0, &cr0.reg);
|
||||
}
|
||||
|
||||
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
|
||||
|
@ -334,7 +334,7 @@ static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
|
||||
return 0;
|
||||
if (chp->cmg == -1) /* channel measurements not available */
|
||||
return sprintf(buf, "unknown\n");
|
||||
return sprintf(buf, "%x\n", chp->cmg);
|
||||
return sprintf(buf, "%d\n", chp->cmg);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
|
||||
|
@ -393,8 +393,8 @@ static void format_node_data(char *params, char *id, struct node_descriptor *nd)
|
||||
memset(id, 0, NODEID_LEN);
|
||||
|
||||
if (nd->validity != ND_VALIDITY_VALID) {
|
||||
strncpy(params, "n/a", PARAMS_LEN - 1);
|
||||
strncpy(id, "n/a", NODEID_LEN - 1);
|
||||
strscpy(params, "n/a", PARAMS_LEN);
|
||||
strscpy(id, "n/a", NODEID_LEN);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -881,8 +881,8 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
|
||||
secm_area->request.code = 0x0016;
|
||||
|
||||
secm_area->key = PAGE_DEFAULT_KEY >> 4;
|
||||
secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
|
||||
secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
|
||||
secm_area->cub_addr1 = virt_to_phys(css->cub_addr1);
|
||||
secm_area->cub_addr2 = virt_to_phys(css->cub_addr2);
|
||||
|
||||
secm_area->operation_code = enable ? 0 : 1;
|
||||
|
||||
|
@ -12,8 +12,8 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/wait.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/crw.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include "ioasm.h"
|
||||
|
||||
static DEFINE_MUTEX(crw_handler_mutex);
|
||||
@ -156,7 +156,7 @@ static int __init crw_machine_check_init(void)
|
||||
task = kthread_run(crw_collect_info, NULL, "kmcheck");
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
ctl_set_bit(14, 28); /* enable channel report MCH */
|
||||
system_ctl_set_bit(14, CR14_CHANNEL_REPORT_SUBMASK_BIT);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(crw_machine_check_init);
|
||||
|
@ -33,7 +33,7 @@ void isc_register(unsigned int isc)
|
||||
|
||||
spin_lock(&isc_ref_lock);
|
||||
if (isc_refs[isc] == 0)
|
||||
ctl_set_bit(6, 31 - isc);
|
||||
system_ctl_set_bit(6, 31 - isc);
|
||||
isc_refs[isc]++;
|
||||
spin_unlock(&isc_ref_lock);
|
||||
}
|
||||
@ -61,7 +61,7 @@ void isc_unregister(unsigned int isc)
|
||||
goto out_unlock;
|
||||
}
|
||||
if (isc_refs[isc] == 1)
|
||||
ctl_clear_bit(6, 31 - isc);
|
||||
system_ctl_clear_bit(6, 31 - isc);
|
||||
isc_refs[isc]--;
|
||||
out_unlock:
|
||||
spin_unlock(&isc_ref_lock);
|
||||
|
@ -1865,15 +1865,18 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
}
|
||||
/* get it and thus adjust reference counter */
|
||||
get_device(dev);
|
||||
if (decfg)
|
||||
if (decfg) {
|
||||
AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
|
||||
__func__, ac->id, dom);
|
||||
else if (chkstop)
|
||||
} else if (chkstop) {
|
||||
AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
|
||||
__func__, ac->id, dom);
|
||||
else
|
||||
} else {
|
||||
/* nudge the queue's state machine */
|
||||
ap_queue_init_state(aq);
|
||||
AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
|
||||
__func__, ac->id, dom);
|
||||
}
|
||||
goto put_dev_and_continue;
|
||||
}
|
||||
/* handle state changes on already existing queue device */
|
||||
@ -1895,10 +1898,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
} else if (!chkstop && aq->chkstop) {
|
||||
/* checkstop off */
|
||||
aq->chkstop = false;
|
||||
if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
|
||||
aq->dev_state = AP_DEV_STATE_OPERATING;
|
||||
aq->sm_state = AP_SM_STATE_RESET_START;
|
||||
}
|
||||
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
|
||||
_ap_queue_init_state(aq);
|
||||
spin_unlock_bh(&aq->lock);
|
||||
AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
|
||||
__func__, ac->id, dom);
|
||||
@ -1922,10 +1923,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
} else if (!decfg && !aq->config) {
|
||||
/* config on this queue device */
|
||||
aq->config = true;
|
||||
if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
|
||||
aq->dev_state = AP_DEV_STATE_OPERATING;
|
||||
aq->sm_state = AP_SM_STATE_RESET_START;
|
||||
}
|
||||
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
|
||||
_ap_queue_init_state(aq);
|
||||
spin_unlock_bh(&aq->lock);
|
||||
AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
|
||||
__func__, ac->id, dom);
|
||||
|
@ -207,6 +207,7 @@ struct ap_queue {
|
||||
bool chkstop; /* checkstop state */
|
||||
ap_qid_t qid; /* AP queue id. */
|
||||
bool interrupt; /* indicate if interrupts are enabled */
|
||||
bool se_bound; /* SE bound state */
|
||||
unsigned int assoc_idx; /* SE association index */
|
||||
int queue_count; /* # messages currently on AP queue. */
|
||||
int pendingq_count; /* # requests on pendingq list. */
|
||||
@ -271,6 +272,7 @@ enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
|
||||
int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
|
||||
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
|
||||
void ap_flush_queue(struct ap_queue *aq);
|
||||
bool ap_queue_usable(struct ap_queue *aq);
|
||||
|
||||
void *ap_airq_ptr(void);
|
||||
int ap_sb_available(void);
|
||||
@ -287,6 +289,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
|
||||
void ap_queue_prepare_remove(struct ap_queue *aq);
|
||||
void ap_queue_remove(struct ap_queue *aq);
|
||||
void ap_queue_init_state(struct ap_queue *aq);
|
||||
void _ap_queue_init_state(struct ap_queue *aq);
|
||||
|
||||
struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
|
||||
int comp_type, unsigned int functions, int ml);
|
||||
|
@ -33,6 +33,11 @@ static inline bool ap_q_supports_assoc(struct ap_queue *aq)
|
||||
return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
|
||||
}
|
||||
|
||||
static inline bool ap_q_needs_bind(struct ap_queue *aq)
|
||||
{
|
||||
return ap_q_supports_bind(aq) && ap_sb_available();
|
||||
}
|
||||
|
||||
/**
|
||||
* ap_queue_enable_irq(): Enable interrupt support on this AP queue.
|
||||
* @aq: The AP queue
|
||||
@ -304,6 +309,7 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
|
||||
aq->sm_state = AP_SM_STATE_RESET_WAIT;
|
||||
aq->interrupt = false;
|
||||
aq->rapq_fbit = 0;
|
||||
aq->se_bound = false;
|
||||
return AP_SM_WAIT_LOW_TIMEOUT;
|
||||
default:
|
||||
aq->dev_state = AP_DEV_STATE_ERROR;
|
||||
@ -868,7 +874,12 @@ static ssize_t se_bind_store(struct device *dev,
|
||||
}
|
||||
status = ap_bapq(aq->qid);
|
||||
spin_unlock_bh(&aq->lock);
|
||||
if (status.response_code) {
|
||||
if (!status.response_code) {
|
||||
aq->se_bound = true;
|
||||
AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
|
||||
AP_QID_CARD(aq->qid),
|
||||
AP_QID_QUEUE(aq->qid));
|
||||
} else {
|
||||
AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
|
||||
__func__, status.response_code,
|
||||
AP_QID_CARD(aq->qid),
|
||||
@ -1073,6 +1084,42 @@ int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
|
||||
}
|
||||
EXPORT_SYMBOL(ap_queue_message);
|
||||
|
||||
/**
|
||||
* ap_queue_usable(): Check if queue is usable just now.
|
||||
* @aq: The AP queue device to test for usability.
|
||||
* This function is intended for the scheduler to query if it makes
|
||||
* sense to enqueue a message into this AP queue device by calling
|
||||
* ap_queue_message(). The perspective is very short-term as the
|
||||
* state machine and device state(s) may change at any time.
|
||||
*/
|
||||
bool ap_queue_usable(struct ap_queue *aq)
|
||||
{
|
||||
bool rc = true;
|
||||
|
||||
spin_lock_bh(&aq->lock);
|
||||
|
||||
/* check for not configured or checkstopped */
|
||||
if (!aq->config || aq->chkstop) {
|
||||
rc = false;
|
||||
goto unlock_and_out;
|
||||
}
|
||||
|
||||
/* device state needs to be ok */
|
||||
if (aq->dev_state != AP_DEV_STATE_OPERATING) {
|
||||
rc = false;
|
||||
goto unlock_and_out;
|
||||
}
|
||||
|
||||
/* SE guest's queues additionally need to be bound */
|
||||
if (ap_q_needs_bind(aq) && !aq->se_bound)
|
||||
rc = false;
|
||||
|
||||
unlock_and_out:
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(ap_queue_usable);
|
||||
|
||||
/**
|
||||
* ap_cancel_message(): Cancel a crypto request.
|
||||
* @aq: The AP device that has the message queued
|
||||
@ -1160,14 +1207,19 @@ void ap_queue_remove(struct ap_queue *aq)
|
||||
spin_unlock_bh(&aq->lock);
|
||||
}
|
||||
|
||||
void ap_queue_init_state(struct ap_queue *aq)
|
||||
void _ap_queue_init_state(struct ap_queue *aq)
|
||||
{
|
||||
spin_lock_bh(&aq->lock);
|
||||
aq->dev_state = AP_DEV_STATE_OPERATING;
|
||||
aq->sm_state = AP_SM_STATE_RESET_START;
|
||||
aq->last_err_rc = 0;
|
||||
aq->assoc_idx = ASSOC_IDX_INVALID;
|
||||
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
|
||||
}
|
||||
|
||||
void ap_queue_init_state(struct ap_queue *aq)
|
||||
{
|
||||
spin_lock_bh(&aq->lock);
|
||||
_ap_queue_init_state(aq);
|
||||
spin_unlock_bh(&aq->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ap_queue_init_state);
|
||||
|
@ -693,7 +693,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
|
||||
for_each_zcrypt_queue(zq, zc) {
|
||||
/* check if device is usable and eligible */
|
||||
if (!zq->online || !zq->ops->rsa_modexpo ||
|
||||
!zq->queue->config || zq->queue->chkstop)
|
||||
!ap_queue_usable(zq->queue))
|
||||
continue;
|
||||
/* check if device node has admission for this queue */
|
||||
if (!zcrypt_check_queue(perms,
|
||||
@ -798,7 +798,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
|
||||
for_each_zcrypt_queue(zq, zc) {
|
||||
/* check if device is usable and eligible */
|
||||
if (!zq->online || !zq->ops->rsa_modexpo_crt ||
|
||||
!zq->queue->config || zq->queue->chkstop)
|
||||
!ap_queue_usable(zq->queue))
|
||||
continue;
|
||||
/* check if device node has admission for this queue */
|
||||
if (!zcrypt_check_queue(perms,
|
||||
@ -916,7 +916,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
|
||||
for_each_zcrypt_queue(zq, zc) {
|
||||
/* check for device usable and eligible */
|
||||
if (!zq->online || !zq->ops->send_cprb ||
|
||||
!zq->queue->config || zq->queue->chkstop ||
|
||||
!ap_queue_usable(zq->queue) ||
|
||||
(tdom != AUTOSEL_DOM &&
|
||||
tdom != AP_QID_QUEUE(zq->queue->qid)))
|
||||
continue;
|
||||
@ -1087,7 +1087,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
|
||||
for_each_zcrypt_queue(zq, zc) {
|
||||
/* check if device is usable and eligible */
|
||||
if (!zq->online || !zq->ops->send_ep11_cprb ||
|
||||
!zq->queue->config || zq->queue->chkstop ||
|
||||
!ap_queue_usable(zq->queue) ||
|
||||
(targets &&
|
||||
!is_desired_ep11_queue(zq->queue->qid,
|
||||
target_num, targets)))
|
||||
@ -1186,7 +1186,7 @@ static long zcrypt_rng(char *buffer)
|
||||
for_each_zcrypt_queue(zq, zc) {
|
||||
/* check if device is usable and eligible */
|
||||
if (!zq->online || !zq->ops->rng ||
|
||||
!zq->queue->config || zq->queue->chkstop)
|
||||
!ap_queue_usable(zq->queue))
|
||||
continue;
|
||||
if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
|
||||
continue;
|
||||
|
@ -279,7 +279,11 @@ static const struct {
|
||||
{ 1, "BSI2009" },
|
||||
{ 2, "FIPS2011" },
|
||||
{ 3, "BSI2011" },
|
||||
{ 4, "SIGG-IMPORT" },
|
||||
{ 5, "SIGG" },
|
||||
{ 6, "BSICC2017" },
|
||||
{ 7, "FIPS2021" },
|
||||
{ 8, "FIPS2024" },
|
||||
{ 0, NULL }
|
||||
};
|
||||
|
||||
|
@ -98,8 +98,22 @@ static inline int convert_error(struct zcrypt_queue *zq,
|
||||
case REP88_ERROR_MESSAGE_MALFORMD: /* 0x22 */
|
||||
case REP88_ERROR_KEY_TYPE: /* 0x34 */
|
||||
/* RY indicates malformed request */
|
||||
ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
|
||||
__func__, card, queue, ehdr->reply_code);
|
||||
if (ehdr->reply_code == REP82_ERROR_FILTERED_BY_HYPERVISOR &&
|
||||
ehdr->type == TYPE86_RSP_CODE) {
|
||||
struct {
|
||||
struct type86_hdr hdr;
|
||||
struct type86_fmt2_ext fmt2;
|
||||
} __packed * head = reply->msg;
|
||||
unsigned int apfs = *((u32 *)head->fmt2.apfs);
|
||||
|
||||
ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x apfs=0x%x => rc=EINVAL\n",
|
||||
__func__, card, queue,
|
||||
ehdr->reply_code, apfs);
|
||||
} else {
|
||||
ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
|
||||
__func__, card, queue,
|
||||
ehdr->reply_code);
|
||||
}
|
||||
return -EINVAL;
|
||||
case REP82_ERROR_MACHINE_FAILURE: /* 0x10 */
|
||||
case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
|
||||
|
@ -1823,7 +1823,7 @@ static int __init iucv_init(void)
|
||||
rc = -EPROTONOSUPPORT;
|
||||
goto out;
|
||||
}
|
||||
ctl_set_bit(0, 1);
|
||||
system_ctl_set_bit(0, CR0_IUCV_BIT);
|
||||
rc = iucv_query_maxconn();
|
||||
if (rc)
|
||||
goto out_ctl;
|
||||
@ -1871,7 +1871,7 @@ static int __init iucv_init(void)
|
||||
out_int:
|
||||
unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
|
||||
out_ctl:
|
||||
ctl_clear_bit(0, 1);
|
||||
system_ctl_clear_bit(0, 1);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user