s390 updates for 6.12 merge window

- Optimize ftrace and kprobes code patching and avoid stop machine for
   kprobes if sequential instruction fetching facility is available
 
 - Add hiperdispatch feature to dynamically adjust CPU capacity in
   vertical polarization to improve scheduling efficiency and overall
   performance. Also add infrastructure for handling warning track
   interrupts (WTI), allowing for graceful CPU preemption
 
 - Rework crypto code pkey module and split it into separate, independent
   modules for sysfs, PCKMO, CCA, and EP11, allowing modules to load only
   when the relevant hardware is available
 
 - Add hardware acceleration for HMAC modes and the full AES-XTS cipher,
   utilizing message-security assist extensions (MSA) 10 and 11. It
   introduces new shash implementations for HMAC-SHA224/256/384/512 and
   registers the hardware-accelerated AES-XTS cipher as the preferred
   option. Also add clear key token support
 
 - Add MSA 10 and 11 processor activity instrumentation counters to perf
   and update PAI Extension 1 NNPA counters
 
 - Cleanup cpu sampling facility code and rework debug/WARN_ON_ONCE
   statements
 
 - Add support for SHA3 performance enhancements introduced with MSA 12
 
 - Add support for the query authentication information feature of
   MSA 13 and introduce the KDSA CPACF instruction. Provide query and query
   authentication information in sysfs, enabling tools like cpacfinfo to
   present this data in a human-readable form
 
 - Update kernel disassembler instructions
 
 - Always enable EXPOLINE_EXTERN if supported by the compiler to ensure
   kpatch compatibility
 
 - Add missing warning handling and relocated lowcore support to the
   early program check handler
 
 - Optimize ftrace_return_address() and avoid calling unwinder
 
 - Make modules use kernel ftrace trampolines
 
 - Strip relocs from the final vmlinux ELF file to make it roughly 2
   times smaller
 
 - Dump register contents and call trace for early crashes to the console
 
 - Generate ptdump address marker array dynamically
 
 - Fix rcu_sched stalls that might occur when adding or removing large
   amounts of pages at once to or from the CMM balloon
 
 - Fix deadlock caused by recursive lock of the AP bus scan mutex
 
 - Unify sync and async register save areas in entry code
 
 - Cleanup debug prints in crypto code
 
 - Various cleanup and sanitizing patches for the decompressor
 
 - Various small ftrace cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE3QHqV+H2a8xAv27vjYWKoQLXFBgFAmbsZawACgkQjYWKoQLX
 FBg+Ogf+NiKPfvI14NcTwnOHB6qz8ApPdGfN9bNVtQxtK3epeAvtj0cMonAuKpRg
 xckTRRd8y0guhCT7Q2+WitSgA5eYDn+u9/Ux5YuKUdUdXolQ0D64BJNtVeEFkmJj
 s+Lesb8cVI9T2VBZOpuF9lJigfsDALBkFroqN4MDudDeahS+qy33bAc0OoqYNXHo
 S6OwPK1/tEG9O/oTN2V4mN+aP0B3/dl7Msezb0gfAXQJA+WUAyMNK0RHvoG9uzaa
 BWAyWWYABj6woGZEAQAzXcbzkQiRPixTqZVe6e4YndXhIlEnB/Z2AQFdTpT9V7En
 eOmmve3QuJa0hkF9q4H/anvOMPntTg==
 =Xagq
 -----END PGP SIGNATURE-----

Merge tag 's390-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

 - Optimize ftrace and kprobes code patching and avoid stop machine for
   kprobes if sequential instruction fetching facility is available

 - Add hiperdispatch feature to dynamically adjust CPU capacity in
   vertical polarization to improve scheduling efficiency and overall
   performance. Also add infrastructure for handling warning track
   interrupts (WTI), allowing for graceful CPU preemption

 - Rework crypto code pkey module and split it into separate,
   independent modules for sysfs, PCKMO, CCA, and EP11, allowing modules
   to load only when the relevant hardware is available

 - Add hardware acceleration for HMAC modes and the full AES-XTS cipher,
   utilizing message-security assist extensions (MSA) 10 and 11. It
   introduces new shash implementations for HMAC-SHA224/256/384/512 and
   registers the hardware-accelerated AES-XTS cipher as the preferred
   option. Also add clear key token support

 - Add MSA 10 and 11 processor activity instrumentation counters to perf
   and update PAI Extension 1 NNPA counters

 - Cleanup cpu sampling facility code and rework debug/WARN_ON_ONCE
   statements

 - Add support for SHA3 performance enhancements introduced with MSA 12

 - Add support for the query authentication information feature of MSA
   13 and introduce the KDSA CPACF instruction. Provide query and query
   authentication information in sysfs, enabling tools like cpacfinfo to
   present this data in a human-readable form

 - Update kernel disassembler instructions

 - Always enable EXPOLINE_EXTERN if supported by the compiler to ensure
   kpatch compatibility

 - Add missing warning handling and relocated lowcore support to the
   early program check handler

 - Optimize ftrace_return_address() and avoid calling unwinder

 - Make modules use kernel ftrace trampolines

 - Strip relocs from the final vmlinux ELF file to make it roughly 2
   times smaller

 - Dump register contents and call trace for early crashes to the
   console

 - Generate ptdump address marker array dynamically

 - Fix rcu_sched stalls that might occur when adding or removing large
   amounts of pages at once to or from the CMM balloon

 - Fix deadlock caused by recursive lock of the AP bus scan mutex

 - Unify sync and async register save areas in entry code

 - Cleanup debug prints in crypto code

 - Various cleanup and sanitizing patches for the decompressor

 - Various small ftrace cleanups

* tag 's390-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (84 commits)
  s390/crypto: Display Query and Query Authentication Information in sysfs
  s390/crypto: Add Support for Query Authentication Information
  s390/crypto: Rework RRE and RRF CPACF inline functions
  s390/crypto: Add KDSA CPACF Instruction
  s390/disassembler: Remove duplicate instruction format RSY_RDRU
  s390/boot: Move boot_printk() code to own file
  s390/boot: Use boot_printk() instead of sclp_early_printk()
  s390/boot: Rename decompressor_printk() to boot_printk()
  s390/boot: Compile all files with the same march flag
  s390: Use MARCH_HAS_*_FEATURES defines
  s390: Provide MARCH_HAS_*_FEATURES defines
  s390/facility: Disable compile time optimization for decompressor code
  s390/boot: Increase minimum architecture to z10
  s390/als: Remove obsolete comment
  s390/sha3: Fix SHA3 selftests failures
  s390/pkey: Add AES xts and HMAC clear key token support
  s390/cpacf: Add MSA 10 and 11 new PCKMO functions
  s390/mm: Add cond_resched() to cmm_alloc/free_pages()
  s390/pai_ext: Update PAI extension 1 counters
  s390/pai_crypto: Add support for MSA 10 and 11 pai counters
  ...
This commit is contained in:
Linus Torvalds 2024-09-21 09:02:54 -07:00
commit 1ec6d09789
92 changed files with 6269 additions and 3231 deletions

View File

@ -514,6 +514,26 @@ config SCHED_TOPOLOGY
making when dealing with machines that have multi-threading,
multiple cores or multiple books.
config SCHED_TOPOLOGY_VERTICAL
def_bool y
bool "Use vertical CPU polarization by default"
depends on SCHED_TOPOLOGY
help
Use vertical CPU polarization by default if available.
The default CPU polarization is horizontal.
config HIPERDISPATCH_ON
def_bool y
bool "Use hiperdispatch on vertical polarization by default"
depends on SCHED_TOPOLOGY
depends on PROC_SYSCTL
help
Hiperdispatch aims to improve the CPU scheduler's decision
making when using vertical polarization by adjusting CPU
capacities dynamically. Set this option to use hiperdispatch
on vertical polarization by default. This can be overwritten
by sysctl's s390.hiperdispatch attribute later on.
source "kernel/Kconfig.hz"
config CERT_STORE
@ -558,17 +578,13 @@ config EXPOLINE
If unsure, say N.
config EXPOLINE_EXTERN
def_bool y if EXPOLINE
depends on EXPOLINE
depends on CC_IS_GCC && GCC_VERSION >= 110200
depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC))
prompt "Generate expolines as extern functions."
def_bool EXPOLINE && CC_IS_GCC && GCC_VERSION >= 110200 && \
$(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC))
help
This option is required for some tooling like kpatch. The kernel is
compiled with -mindirect-branch=thunk-extern and requires a newer
compiler.
If unsure, say N.
Generate expolines as external functions if the compiler supports it.
This option is required for some tooling like kpatch, if expolines
are enabled. The kernel is compiled with
-mindirect-branch=thunk-extern, which requires a newer compiler.
choice
prompt "Expoline default"

View File

@ -0,0 +1,38 @@
# SPDX-License-Identifier: GPL-2.0
# ===========================================================================
# Post-link s390 pass
# ===========================================================================
#
# 1. Separate relocations from vmlinux into relocs.S.
# 2. Strip relocations from vmlinux.
PHONY := __archpost
__archpost:
-include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
CMD_RELOCS=arch/s390/tools/relocs
OUT_RELOCS = arch/s390/boot
quiet_cmd_relocs = RELOCS $(OUT_RELOCS)/relocs.S
cmd_relocs = \
mkdir -p $(OUT_RELOCS); \
$(CMD_RELOCS) $@ > $(OUT_RELOCS)/relocs.S
quiet_cmd_strip_relocs = RSTRIP $@
cmd_strip_relocs = \
$(OBJCOPY) --remove-section='.rel.*' --remove-section='.rel__*' \
--remove-section='.rela.*' --remove-section='.rela__*' $@
vmlinux: FORCE
$(call cmd,relocs)
$(call cmd,strip_relocs)
clean:
@rm -f $(OUT_RELOCS)/relocs.S
PHONY += FORCE clean
FORCE:
.PHONY: $(PHONY)

View File

@ -11,35 +11,23 @@ KASAN_SANITIZE := n
KCSAN_SANITIZE := n
KMSAN_SANITIZE := n
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
#
# Use minimum architecture for als.c to be able to print an error
# Use minimum architecture level so it is possible to print an error
# message if the kernel is started on a machine which is too old
#
ifndef CONFIG_CC_IS_CLANG
CC_FLAGS_MARCH_MINIMUM := -march=z900
else
CC_FLAGS_MARCH_MINIMUM := -march=z10
endif
ifneq ($(CC_FLAGS_MARCH),$(CC_FLAGS_MARCH_MINIMUM))
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += $(CC_FLAGS_MARCH_MINIMUM)
AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH)
AFLAGS_mem.o += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
CFLAGS_als.o += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp_early_core.o += $(CC_FLAGS_MARCH_MINIMUM)
endif
KBUILD_AFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_AFLAGS_DECOMPRESSOR))
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_CFLAGS_DECOMPRESSOR))
KBUILD_AFLAGS += $(CC_FLAGS_MARCH_MINIMUM)
KBUILD_CFLAGS += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o uv.o
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o
obj-y += uv.o printk.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
@ -109,11 +97,9 @@ OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
CMD_RELOCS=arch/s390/tools/relocs
quiet_cmd_relocs = RELOCS $@
cmd_relocs = $(CMD_RELOCS) $< > $@
$(obj)/relocs.S: vmlinux FORCE
$(call if_changed,relocs)
# relocs.S is created by the vmlinux postlink step.
$(obj)/relocs.S: vmlinux
@true
suffix-$(CONFIG_KERNEL_GZIP) := .gz
suffix-$(CONFIG_KERNEL_BZIP2) := .bz2

View File

@ -9,42 +9,8 @@
#include <asm/sclp.h>
#include "boot.h"
/*
* The code within this file will be called very early. It may _not_
* access anything within the bss section, since that is not cleared
* yet and may contain data (e.g. initrd) that must be saved by other
* code.
* For temporary objects the stack (16k) should be used.
*/
static unsigned long als[] = { FACILITIES_ALS };
static void u16_to_hex(char *str, u16 val)
{
int i, num;
for (i = 1; i <= 4; i++) {
num = (val >> (16 - 4 * i)) & 0xf;
if (num >= 10)
num += 7;
*str++ = '0' + num;
}
*str = '\0';
}
static void print_machine_type(void)
{
static char mach_str[80] = "Detected machine-type number: ";
char type_str[5];
struct cpuid id;
get_cpu_id(&id);
u16_to_hex(type_str, id.machine);
strcat(mach_str, type_str);
strcat(mach_str, "\n");
sclp_early_printk(mach_str);
}
static void u16_to_decimal(char *str, u16 val)
{
int div = 1;
@ -80,8 +46,7 @@ void print_missing_facilities(void)
* z/VM adds a four character prefix.
*/
if (strlen(als_str) > 70) {
strcat(als_str, "\n");
sclp_early_printk(als_str);
boot_printk("%s\n", als_str);
*als_str = '\0';
}
u16_to_decimal(val_str, i * BITS_PER_LONG + j);
@ -89,16 +54,18 @@ void print_missing_facilities(void)
first = 0;
}
}
strcat(als_str, "\n");
sclp_early_printk(als_str);
boot_printk("%s\n", als_str);
}
static void facility_mismatch(void)
{
sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
print_machine_type();
struct cpuid id;
get_cpu_id(&id);
boot_printk("The Linux kernel requires more recent processor hardware\n");
boot_printk("Detected machine-type number: %4x\n", id.machine);
print_missing_facilities();
sclp_early_printk("See Principles of Operations for facility bits\n");
boot_printk("See Principles of Operations for facility bits\n");
disabled_wait();
}

View File

@ -70,7 +70,7 @@ void print_pgm_check_info(void);
unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max);
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);
void __printf(1, 2) decompressor_printk(const char *fmt, ...);
void __printf(1, 2) boot_printk(const char *fmt, ...);
void print_stacktrace(unsigned long sp);
void error(char *m);
int get_random(unsigned long limit, unsigned long *value);

View File

@ -299,11 +299,11 @@ SYM_CODE_END(startup_normal)
# the save area and does disabled wait with a faulty address.
#
SYM_CODE_START_LOCAL(startup_pgm_check_handler)
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
stmg %r8,%r15,__LC_SAVE_AREA
la %r8,4095
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8)
stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8)
mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC
mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA
mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW
mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW
ni __LC_RETURN_PSW,0xfc # remove IO and EX bits

View File

@ -215,7 +215,7 @@ static void check_cleared_facilities(void)
for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((stfle_fac_list[i] & als[i]) != als[i]) {
sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
boot_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
print_missing_facilities();
break;
}

View File

@ -32,7 +32,7 @@ struct prng_parm {
static int check_prng(void)
{
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
boot_printk("KASLR disabled: CPU has no PRNG\n");
return 0;
}
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))

View File

@ -11,131 +11,19 @@
#include <asm/uv.h>
#include "boot.h"
const char hex_asc[] = "0123456789abcdef";
static char *as_hex(char *dst, unsigned long val, int pad)
{
char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
for (*p-- = 0; p >= dst; val >>= 4)
*p-- = hex_asc[val & 0x0f];
return end;
}
static char *symstart(char *p)
{
while (*p)
p--;
return p + 1;
}
static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
{
/* symbol entries are in a form "10000 c4 startup\0" */
char *a = _decompressor_syms_start;
char *b = _decompressor_syms_end;
unsigned long start;
unsigned long size;
char *pivot;
char *endp;
while (a < b) {
pivot = symstart(a + (b - a) / 2);
start = simple_strtoull(pivot, &endp, 16);
size = simple_strtoull(endp + 1, &endp, 16);
if (ip < start) {
b = pivot;
continue;
}
if (ip > start + size) {
a = pivot + strlen(pivot) + 1;
continue;
}
*off = ip - start;
*len = size;
return endp + 1;
}
return NULL;
}
static noinline char *strsym(void *ip)
{
static char buf[64];
unsigned short off;
unsigned short len;
char *p;
p = findsym((unsigned long)ip, &off, &len);
if (p) {
strncpy(buf, p, sizeof(buf));
/* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
p = buf + strnlen(buf, sizeof(buf) - 15);
strcpy(p, "+0x");
p = as_hex(p + 3, off, 0);
strcpy(p, "/0x");
as_hex(p + 3, len, 0);
} else {
as_hex(buf, (unsigned long)ip, 16);
}
return buf;
}
void decompressor_printk(const char *fmt, ...)
{
char buf[1024] = { 0 };
char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
unsigned long pad;
char *p = buf;
va_list args;
va_start(args, fmt);
for (; p < end && *fmt; fmt++) {
if (*fmt != '%') {
*p++ = *fmt;
continue;
}
pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0;
switch (*fmt) {
case 's':
p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf));
break;
case 'p':
if (*++fmt != 'S')
goto out;
p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf));
break;
case 'l':
if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad))
goto out;
p = as_hex(p, va_arg(args, unsigned long), pad);
break;
case 'x':
if (end - p <= max(sizeof(int) * 2, pad))
goto out;
p = as_hex(p, va_arg(args, unsigned int), pad);
break;
default:
goto out;
}
}
out:
va_end(args);
sclp_early_printk(buf);
}
void print_stacktrace(unsigned long sp)
{
struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
(unsigned long)_stack_end };
bool first = true;
decompressor_printk("Call Trace:\n");
boot_printk("Call Trace:\n");
while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) {
struct stack_frame *sf = (struct stack_frame *)sp;
decompressor_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" :
" sp:%016lx [<%016lx>] %pS\n",
sp, sf->gprs[8], (void *)sf->gprs[8]);
boot_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" :
" sp:%016lx [<%016lx>] %pS\n",
sp, sf->gprs[8], (void *)sf->gprs[8]);
if (sf->back_chain <= sp)
break;
sp = sf->back_chain;
@ -148,34 +36,30 @@ void print_pgm_check_info(void)
unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area;
struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area);
decompressor_printk("Linux version %s\n", kernel_version);
boot_printk("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
decompressor_printk("Kernel command line: %s\n", early_command_line);
decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
boot_printk("Kernel command line: %s\n", early_command_line);
boot_printk("Kernel fault: interruption code %04x ilc:%x\n",
get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
if (kaslr_enabled()) {
decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
decompressor_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys);
boot_printk("Kernel random base: %lx\n", __kaslr_offset);
boot_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys);
}
decompressor_printk("PSW : %016lx %016lx (%pS)\n",
get_lowcore()->psw_save_area.mask,
get_lowcore()->psw_save_area.addr,
(void *)get_lowcore()->psw_save_area.addr);
decompressor_printk(
boot_printk("PSW : %016lx %016lx (%pS)\n",
get_lowcore()->psw_save_area.mask,
get_lowcore()->psw_save_area.addr,
(void *)get_lowcore()->psw_save_area.addr);
boot_printk(
" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck,
psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri,
psw->eaba);
decompressor_printk("GPRS: %016lx %016lx %016lx %016lx\n",
gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
decompressor_printk(" %016lx %016lx %016lx %016lx\n",
gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
decompressor_printk(" %016lx %016lx %016lx %016lx\n",
gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
decompressor_printk(" %016lx %016lx %016lx %016lx\n",
gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
boot_printk("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
print_stacktrace(get_lowcore()->gpregs_save_area[15]);
decompressor_printk("Last Breaking-Event-Address:\n");
decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
(void *)get_lowcore()->pgm_last_break);
boot_printk("Last Breaking-Event-Address:\n");
boot_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
(void *)get_lowcore()->pgm_last_break);
}

View File

@ -190,27 +190,27 @@ static void die_oom(unsigned long size, unsigned long align, unsigned long min,
enum reserved_range_type t;
int i;
decompressor_printk("Linux version %s\n", kernel_version);
boot_printk("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
decompressor_printk("Kernel command line: %s\n", early_command_line);
decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
size, align, min, max);
decompressor_printk("Reserved memory ranges:\n");
boot_printk("Kernel command line: %s\n", early_command_line);
boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
size, align, min, max);
boot_printk("Reserved memory ranges:\n");
for_each_physmem_reserved_range(t, range, &start, &end) {
decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
total_reserved_mem += end - start;
}
decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
get_physmem_info_source(), physmem_info.info_source);
boot_printk("Usable online memory ranges (info source: %s [%x]):\n",
get_physmem_info_source(), physmem_info.info_source);
for_each_physmem_usable_range(i, &start, &end) {
decompressor_printk("%016lx %016lx\n", start, end);
boot_printk("%016lx %016lx\n", start, end);
total_mem += end - start;
}
decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
total_mem, total_reserved_mem,
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
total_mem, total_reserved_mem,
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
print_stacktrace(current_frame_address());
sclp_early_printk("\n\n -- System halted\n");
boot_printk("\n\n -- System halted\n");
disabled_wait();
}

124
arch/s390/boot/printk.c Normal file
View File

@ -0,0 +1,124 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/stdarg.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <asm/stacktrace.h>
#include <asm/boot_data.h>
#include <asm/lowcore.h>
#include <asm/setup.h>
#include <asm/sclp.h>
#include <asm/uv.h>
#include "boot.h"
const char hex_asc[] = "0123456789abcdef";
static char *as_hex(char *dst, unsigned long val, int pad)
{
char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
for (*p-- = 0; p >= dst; val >>= 4)
*p-- = hex_asc[val & 0x0f];
return end;
}
static char *symstart(char *p)
{
while (*p)
p--;
return p + 1;
}
static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
{
/* symbol entries are in a form "10000 c4 startup\0" */
char *a = _decompressor_syms_start;
char *b = _decompressor_syms_end;
unsigned long start;
unsigned long size;
char *pivot;
char *endp;
while (a < b) {
pivot = symstart(a + (b - a) / 2);
start = simple_strtoull(pivot, &endp, 16);
size = simple_strtoull(endp + 1, &endp, 16);
if (ip < start) {
b = pivot;
continue;
}
if (ip > start + size) {
a = pivot + strlen(pivot) + 1;
continue;
}
*off = ip - start;
*len = size;
return endp + 1;
}
return NULL;
}
static noinline char *strsym(void *ip)
{
static char buf[64];
unsigned short off;
unsigned short len;
char *p;
p = findsym((unsigned long)ip, &off, &len);
if (p) {
strncpy(buf, p, sizeof(buf));
/* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
p = buf + strnlen(buf, sizeof(buf) - 15);
strcpy(p, "+0x");
p = as_hex(p + 3, off, 0);
strcpy(p, "/0x");
as_hex(p + 3, len, 0);
} else {
as_hex(buf, (unsigned long)ip, 16);
}
return buf;
}
void boot_printk(const char *fmt, ...)
{
char buf[1024] = { 0 };
char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
unsigned long pad;
char *p = buf;
va_list args;
va_start(args, fmt);
for (; p < end && *fmt; fmt++) {
if (*fmt != '%') {
*p++ = *fmt;
continue;
}
pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0;
switch (*fmt) {
case 's':
p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf));
break;
case 'p':
if (*++fmt != 'S')
goto out;
p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf));
break;
case 'l':
if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad))
goto out;
p = as_hex(p, va_arg(args, unsigned long), pad);
break;
case 'x':
if (end - p <= max(sizeof(int) * 2, pad))
goto out;
p = as_hex(p, va_arg(args, unsigned int), pad);
break;
default:
goto out;
}
}
out:
va_end(args);
sclp_early_printk(buf);
}

View File

@ -39,10 +39,7 @@ struct machine_info machine;
void error(char *x)
{
sclp_early_printk("\n\n");
sclp_early_printk(x);
sclp_early_printk("\n\n -- System halted");
boot_printk("\n\n%s\n\n -- System halted", x);
disabled_wait();
}
@ -296,7 +293,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
decompressor_printk("The kernel base address is forced to %lx\n", kernel_start);
boot_printk("The kernel base address is forced to %lx\n", kernel_start);
} else {
kernel_start = __NO_KASLR_START_KERNEL;
}

View File

@ -794,8 +794,12 @@ CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_PKEY_CCA=m
CONFIG_PKEY_EP11=m
CONFIG_PKEY_PCKMO=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y

View File

@ -781,8 +781,12 @@ CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_PKEY_CCA=m
CONFIG_PKEY_EP11=m
CONFIG_PKEY_PCKMO=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y

View File

@ -132,4 +132,14 @@ config CRYPTO_CHACHA_S390
It is available as of z13.
config CRYPTO_HMAC_S390
tristate "Keyed-hash message authentication code: HMAC"
depends on S390
select CRYPTO_HASH
help
s390 specific HMAC hardware support for SHA224, SHA256, SHA384 and
SHA512.
Architecture: s390
endmenu

View File

@ -15,6 +15,7 @@ obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o
obj-y += arch_random.o
crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o

View File

@ -51,8 +51,13 @@ struct s390_aes_ctx {
};
struct s390_xts_ctx {
u8 key[32];
u8 pcc_key[32];
union {
u8 keys[64];
struct {
u8 key[32];
u8 pcc_key[32];
};
};
int key_len;
unsigned long fc;
struct crypto_skcipher *fallback;
@ -526,6 +531,108 @@ static struct skcipher_alg xts_aes_alg = {
.decrypt = xts_aes_decrypt,
};
static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
int err;
err = xts_fallback_setkey(tfm, in_key, key_len);
if (err)
return err;
/* Pick the correct function code based on the key length */
fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL :
(key_len == 64) ? CPACF_KM_XTS_256_FULL : 0;
/* Check if the function code is available */
xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
if (!xts_ctx->fc)
return 0;
/* Store double-key */
memcpy(xts_ctx->keys, in_key, key_len);
xts_ctx->key_len = key_len;
return 0;
}
static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
unsigned int offset, nbytes, n;
struct skcipher_walk walk;
int ret;
struct {
__u8 key[64];
__u8 tweak[16];
__u8 nap[16];
} fxts_param = {
.nap = {0},
};
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
struct skcipher_request *subreq = skcipher_request_ctx(req);
*subreq = *req;
skcipher_request_set_tfm(subreq, xts_ctx->fallback);
return (modifier & CPACF_DECRYPT) ?
crypto_skcipher_decrypt(subreq) :
crypto_skcipher_encrypt(subreq);
}
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
offset = xts_ctx->key_len & 0x20;
memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len);
memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE);
fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset,
walk.dst.virt.addr, walk.src.virt.addr, n);
ret = skcipher_walk_done(&walk, nbytes - n);
}
memzero_explicit(&fxts_param, sizeof(fxts_param));
return ret;
}
static int fullxts_aes_encrypt(struct skcipher_request *req)
{
return fullxts_aes_crypt(req, 0);
}
static int fullxts_aes_decrypt(struct skcipher_request *req)
{
return fullxts_aes_crypt(req, CPACF_DECRYPT);
}
static struct skcipher_alg fullxts_aes_alg = {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "full-xts-aes-s390",
.base.cra_priority = 403, /* aes-xts-s390 + 1 */
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_xts_ctx),
.base.cra_module = THIS_MODULE,
.init = xts_fallback_init,
.exit = xts_fallback_exit,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = fullxts_aes_set_key,
.encrypt = fullxts_aes_encrypt,
.decrypt = fullxts_aes_decrypt,
};
static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@ -955,7 +1062,7 @@ static struct aead_alg gcm_aes_aead = {
};
static struct crypto_alg *aes_s390_alg;
static struct skcipher_alg *aes_s390_skcipher_algs[4];
static struct skcipher_alg *aes_s390_skcipher_algs[5];
static int aes_s390_skciphers_num;
static struct aead_alg *aes_s390_aead_alg;
@ -1012,6 +1119,13 @@ static int __init aes_s390_init(void)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) ||
cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) {
ret = aes_s390_register_skcipher(&fullxts_aes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
ret = aes_s390_register_skcipher(&xts_aes_alg);

View File

@ -0,0 +1,359 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2024
*
* s390 specific HMAC support.
*/
#define KMSG_COMPONENT "hmac_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <asm/cpacf.h>
#include <crypto/sha2.h>
#include <crypto/internal/hash.h>
#include <linux/cpufeature.h>
#include <linux/module.h>
/*
* KMAC param block layout for sha2 function codes:
* The layout of the param block for the KMAC instruction depends on the
* blocksize of the used hashing sha2-algorithm function codes. The param block
* contains the hash chaining value (cv), the input message bit-length (imbl)
* and the hmac-secret (key). To prevent code duplication, the sizes of all
* these are calculated based on the blocksize.
*
* param-block:
* +-------+
* | cv |
* +-------+
* | imbl |
* +-------+
* | key |
* +-------+
*
* sizes:
* part | sh2-alg | calculation | size | type
* -----+---------+-------------+------+--------
* cv | 224/256 | blocksize/2 | 32 | u64[8]
* | 384/512 | | 64 | u128[8]
* imbl | 224/256 | blocksize/8 | 8 | u64
* | 384/512 | | 16 | u128
* key | 224/256 | blocksize | 64 | u8[64]
* | 384/512 | | 128 | u8[128]
*/
#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
#define MAX_IMBL_SIZE sizeof(u128)
#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
#define SHA2_CV_SIZE(bs) ((bs) >> 1)
#define SHA2_IMBL_SIZE(bs) ((bs) >> 3)
#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs))
#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs))
struct s390_hmac_ctx {
u8 key[MAX_BLOCK_SIZE];
};
union s390_kmac_gr0 {
unsigned long reg;
struct {
unsigned long : 48;
unsigned long ikp : 1;
unsigned long iimp : 1;
unsigned long ccup : 1;
unsigned long : 6;
unsigned long fc : 7;
};
};
struct s390_kmac_sha2_ctx {
u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + MAX_BLOCK_SIZE];
union s390_kmac_gr0 gr0;
u8 buf[MAX_BLOCK_SIZE];
unsigned int buflen;
};
/*
* kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize
*/
static inline void kmac_sha2_set_imbl(u8 *param, unsigned int buflen,
unsigned int blocksize)
{
u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
switch (blocksize) {
case SHA256_BLOCK_SIZE:
*(u64 *)imbl = (u64)buflen * BITS_PER_BYTE;
break;
case SHA512_BLOCK_SIZE:
*(u128 *)imbl = (u128)buflen * BITS_PER_BYTE;
break;
default:
break;
}
}
static int hash_key(const u8 *in, unsigned int inlen,
u8 *digest, unsigned int digestsize)
{
unsigned long func;
union {
struct sha256_paramblock {
u32 h[8];
u64 mbl;
} sha256;
struct sha512_paramblock {
u64 h[8];
u128 mbl;
} sha512;
} __packed param;
#define PARAM_INIT(x, y, z) \
param.sha##x.h[0] = SHA##y ## _H0; \
param.sha##x.h[1] = SHA##y ## _H1; \
param.sha##x.h[2] = SHA##y ## _H2; \
param.sha##x.h[3] = SHA##y ## _H3; \
param.sha##x.h[4] = SHA##y ## _H4; \
param.sha##x.h[5] = SHA##y ## _H5; \
param.sha##x.h[6] = SHA##y ## _H6; \
param.sha##x.h[7] = SHA##y ## _H7; \
param.sha##x.mbl = (z)
switch (digestsize) {
case SHA224_DIGEST_SIZE:
func = CPACF_KLMD_SHA_256;
PARAM_INIT(256, 224, inlen * 8);
break;
case SHA256_DIGEST_SIZE:
func = CPACF_KLMD_SHA_256;
PARAM_INIT(256, 256, inlen * 8);
break;
case SHA384_DIGEST_SIZE:
func = CPACF_KLMD_SHA_512;
PARAM_INIT(512, 384, inlen * 8);
break;
case SHA512_DIGEST_SIZE:
func = CPACF_KLMD_SHA_512;
PARAM_INIT(512, 512, inlen * 8);
break;
default:
return -EINVAL;
}
#undef PARAM_INIT
cpacf_klmd(func, &param, in, inlen);
memcpy(digest, &param, digestsize);
return 0;
}
static int s390_hmac_sha2_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(tfm);
unsigned int ds = crypto_shash_digestsize(tfm);
unsigned int bs = crypto_shash_blocksize(tfm);
memset(tfm_ctx, 0, sizeof(*tfm_ctx));
if (keylen > bs)
return hash_key(key, keylen, tfm_ctx->key, ds);
memcpy(tfm_ctx->key, key, keylen);
return 0;
}
static int s390_hmac_sha2_init(struct shash_desc *desc)
{
struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(desc->tfm);
struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
unsigned int bs = crypto_shash_blocksize(desc->tfm);
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
tfm_ctx->key, bs);
ctx->buflen = 0;
ctx->gr0.reg = 0;
switch (crypto_shash_digestsize(desc->tfm)) {
case SHA224_DIGEST_SIZE:
ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_224;
break;
case SHA256_DIGEST_SIZE:
ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_256;
break;
case SHA384_DIGEST_SIZE:
ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_384;
break;
case SHA512_DIGEST_SIZE:
ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_512;
break;
default:
return -EINVAL;
}
return 0;
}
static int s390_hmac_sha2_update(struct shash_desc *desc,
const u8 *data, unsigned int len)
{
struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
unsigned int bs = crypto_shash_blocksize(desc->tfm);
unsigned int offset, n;
/* check current buffer */
offset = ctx->buflen % bs;
ctx->buflen += len;
if (offset + len < bs)
goto store;
/* process one stored block */
if (offset) {
n = bs - offset;
memcpy(ctx->buf + offset, data, n);
ctx->gr0.iimp = 1;
_cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
data += n;
len -= n;
offset = 0;
}
/* process as many blocks as possible */
if (len >= bs) {
n = (len / bs) * bs;
ctx->gr0.iimp = 1;
_cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n);
data += n;
len -= n;
}
store:
/* store incomplete block in buffer */
if (len)
memcpy(ctx->buf + offset, data, len);
return 0;
}
static int s390_hmac_sha2_final(struct shash_desc *desc, u8 *out)
{
struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
unsigned int bs = crypto_shash_blocksize(desc->tfm);
ctx->gr0.iimp = 0;
kmac_sha2_set_imbl(ctx->param, ctx->buflen, bs);
_cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, ctx->buflen % bs);
memcpy(out, ctx->param, crypto_shash_digestsize(desc->tfm));
return 0;
}
static int s390_hmac_sha2_digest(struct shash_desc *desc,
const u8 *data, unsigned int len, u8 *out)
{
struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
unsigned int ds = crypto_shash_digestsize(desc->tfm);
int rc;
rc = s390_hmac_sha2_init(desc);
if (rc)
return rc;
ctx->gr0.iimp = 0;
kmac_sha2_set_imbl(ctx->param, len,
crypto_shash_blocksize(desc->tfm));
_cpacf_kmac(&ctx->gr0.reg, ctx->param, data, len);
memcpy(out, ctx->param, ds);
return 0;
}
#define S390_HMAC_SHA2_ALG(x) { \
.fc = CPACF_KMAC_HMAC_SHA_##x, \
.alg = { \
.init = s390_hmac_sha2_init, \
.update = s390_hmac_sha2_update, \
.final = s390_hmac_sha2_final, \
.digest = s390_hmac_sha2_digest, \
.setkey = s390_hmac_sha2_setkey, \
.descsize = sizeof(struct s390_kmac_sha2_ctx), \
.halg = { \
.digestsize = SHA##x##_DIGEST_SIZE, \
.base = { \
.cra_name = "hmac(sha" #x ")", \
.cra_driver_name = "hmac_s390_sha" #x, \
.cra_blocksize = SHA##x##_BLOCK_SIZE, \
.cra_priority = 400, \
.cra_ctxsize = sizeof(struct s390_hmac_ctx), \
.cra_module = THIS_MODULE, \
}, \
}, \
}, \
}
static struct s390_hmac_alg {
bool registered;
unsigned int fc;
struct shash_alg alg;
} s390_hmac_algs[] = {
S390_HMAC_SHA2_ALG(224),
S390_HMAC_SHA2_ALG(256),
S390_HMAC_SHA2_ALG(384),
S390_HMAC_SHA2_ALG(512),
};
static __always_inline void _s390_hmac_algs_unregister(void)
{
struct s390_hmac_alg *hmac;
int i;
for (i = ARRAY_SIZE(s390_hmac_algs) - 1; i >= 0; i--) {
hmac = &s390_hmac_algs[i];
if (!hmac->registered)
continue;
crypto_unregister_shash(&hmac->alg);
}
}
static int __init hmac_s390_init(void)
{
struct s390_hmac_alg *hmac;
int i, rc = -ENODEV;
if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256))
return -ENODEV;
if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512))
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(s390_hmac_algs); i++) {
hmac = &s390_hmac_algs[i];
if (!cpacf_query_func(CPACF_KMAC, hmac->fc))
continue;
rc = crypto_register_shash(&hmac->alg);
if (rc) {
pr_err("unable to register %s\n",
hmac->alg.halg.base.cra_name);
goto out;
}
hmac->registered = true;
pr_debug("registered %s\n", hmac->alg.halg.base.cra_name);
}
return rc;
out:
_s390_hmac_algs_unregister();
return rc;
}
static void __exit hmac_s390_exit(void)
{
_s390_hmac_algs_unregister();
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, hmac_s390_init);
module_exit(hmac_s390_exit);
MODULE_DESCRIPTION("S390 HMAC driver");
MODULE_LICENSE("GPL");

View File

@ -133,8 +133,8 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb,
if (msleep_interruptible(1000))
return -EINTR;
}
ret = pkey_keyblob2pkey(kb->key, kb->keylen,
pk->protkey, &pk->len, &pk->type);
ret = pkey_key2protkey(kb->key, kb->keylen,
pk->protkey, &pk->len, &pk->type);
}
return ret;

View File

@ -25,6 +25,7 @@ struct s390_sha_ctx {
u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)];
u8 buf[SHA_MAX_BLOCK_SIZE];
int func; /* KIMD function to use */
int first_message_part;
};
struct shash_desc;

View File

@ -21,9 +21,11 @@ static int sha3_256_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
memset(sctx->state, 0, sizeof(sctx->state));
if (!test_facility(86)) /* msa 12 */
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_256;
sctx->first_message_part = 1;
return 0;
}
@ -36,6 +38,7 @@ static int sha3_256_export(struct shash_desc *desc, void *out)
octx->rsiz = sctx->count;
memcpy(octx->st, sctx->state, sizeof(octx->st));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
octx->partial = sctx->first_message_part;
return 0;
}
@ -48,6 +51,7 @@ static int sha3_256_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_256;
return 0;
@ -61,6 +65,7 @@ static int sha3_224_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_224;
return 0;
@ -88,9 +93,11 @@ static int sha3_224_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
memset(sctx->state, 0, sizeof(sctx->state));
if (!test_facility(86)) /* msa 12 */
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_224;
sctx->first_message_part = 1;
return 0;
}

View File

@ -20,9 +20,11 @@ static int sha3_512_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
memset(sctx->state, 0, sizeof(sctx->state));
if (!test_facility(86)) /* msa 12 */
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_512;
sctx->first_message_part = 1;
return 0;
}
@ -37,6 +39,7 @@ static int sha3_512_export(struct shash_desc *desc, void *out)
memcpy(octx->st, sctx->state, sizeof(octx->st));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
octx->partial = sctx->first_message_part;
return 0;
}
@ -52,6 +55,7 @@ static int sha3_512_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_512;
return 0;
@ -68,6 +72,7 @@ static int sha3_384_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_384;
return 0;
@ -97,9 +102,11 @@ static int sha3_384_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
memset(sctx->state, 0, sizeof(sctx->state));
if (!test_facility(86)) /* msa 12 */
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_384;
sctx->first_message_part = 1;
return 0;
}

View File

@ -18,6 +18,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
unsigned int index, n;
int fc;
/* how much is already in the buffer? */
index = ctx->count % bsize;
@ -26,10 +27,16 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if ((index + len) < bsize)
goto store;
fc = ctx->func;
if (ctx->first_message_part)
fc |= test_facility(86) ? CPACF_KIMD_NIP : 0;
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
cpacf_kimd(fc, ctx->state, ctx->buf, bsize);
ctx->first_message_part = 0;
fc &= ~CPACF_KIMD_NIP;
data += bsize - index;
len -= bsize - index;
index = 0;
@ -38,7 +45,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process as many blocks as possible */
if (len >= bsize) {
n = (len / bsize) * bsize;
cpacf_kimd(ctx->func, ctx->state, data, n);
cpacf_kimd(fc, ctx->state, data, n);
ctx->first_message_part = 0;
data += n;
len -= n;
}
@ -75,7 +83,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits;
unsigned int n;
int mbl_offset;
int mbl_offset, fc;
n = ctx->count % bsize;
bits = ctx->count * 8;
@ -109,7 +117,11 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
return -EINVAL;
}
cpacf_klmd(ctx->func, ctx->state, ctx->buf, n);
fc = ctx->func;
fc |= test_facility(86) ? CPACF_KLMD_DUFOP : 0;
if (ctx->first_message_part)
fc |= CPACF_KLMD_NIP;
cpacf_klmd(fc, ctx->state, ctx->buf, n);
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));

View File

@ -78,7 +78,6 @@ struct hypfs_dbfs_file {
struct dentry *dentry;
};
extern void hypfs_dbfs_exit(void);
extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);

View File

@ -29,8 +29,6 @@ static enum diag204_format diag204_info_type; /* used diag 204 data format */
static void *diag204_buf; /* 4K aligned buffer for diag204 data */
static int diag204_buf_pages; /* number of pages for diag204 data */
static struct dentry *dbfs_d204_file;
enum diag204_format diag204_get_info_type(void)
{
return diag204_info_type;
@ -214,16 +212,13 @@ __init int hypfs_diag_init(void)
hypfs_dbfs_create_file(&dbfs_file_d204);
rc = hypfs_diag_fs_init();
if (rc) {
if (rc)
pr_err("The hardware system does not provide all functions required by hypfs\n");
debugfs_remove(dbfs_d204_file);
}
return rc;
}
void hypfs_diag_exit(void)
{
debugfs_remove(dbfs_d204_file);
hypfs_diag_fs_exit();
diag204_free_buffer();
hypfs_dbfs_remove_file(&dbfs_file_d204);

View File

@ -4,6 +4,7 @@
#define _ASM_S390_ARCH_HWEIGHT_H
#include <linux/types.h>
#include <asm/march.h>
static __always_inline unsigned long popcnt_z196(unsigned long w)
{
@ -29,9 +30,9 @@ static __always_inline unsigned long popcnt_z15(unsigned long w)
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15(w);
if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 32;
w += w >> 16;
@ -43,9 +44,9 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15(w);
if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 16;
w += w >> 8;
@ -56,9 +57,9 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
static __always_inline unsigned int __arch_hweight16(unsigned int w)
{
if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15((unsigned short)w);
if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 8;
return w & 0xff;
@ -68,7 +69,7 @@ static __always_inline unsigned int __arch_hweight16(unsigned int w)
static __always_inline unsigned int __arch_hweight8(unsigned int w)
{
if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES))
if (__is_defined(MARCH_HAS_Z196_FEATURES))
return popcnt_z196((unsigned char)w);
return __sw_hweight8(w);
}

View File

@ -9,6 +9,7 @@
#define __ARCH_S390_ATOMIC_OPS__
#include <linux/limits.h>
#include <asm/march.h>
static __always_inline int __atomic_read(const atomic_t *v)
{
@ -56,7 +57,7 @@ static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
}
}
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#ifdef MARCH_HAS_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
static __always_inline op_type op_name(op_type val, op_type *ptr) \
@ -107,7 +108,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
#undef __ATOMIC_CONST_OPS
#undef __ATOMIC_CONST_OP
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#else /* MARCH_HAS_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \
static __always_inline int op_name(int val, int *ptr) \
@ -166,7 +167,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#endif /* MARCH_HAS_Z196_FEATURES */
static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
{

View File

@ -8,13 +8,15 @@
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#include <asm/march.h>
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#ifdef MARCH_HAS_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
#else

View File

@ -54,6 +54,8 @@
#define CPACF_KM_XTS_256 0x34
#define CPACF_KM_PXTS_128 0x3a
#define CPACF_KM_PXTS_256 0x3c
#define CPACF_KM_XTS_128_FULL 0x52
#define CPACF_KM_XTS_256_FULL 0x54
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
@ -121,23 +123,31 @@
#define CPACF_KMAC_DEA 0x01
#define CPACF_KMAC_TDEA_128 0x02
#define CPACF_KMAC_TDEA_192 0x03
#define CPACF_KMAC_HMAC_SHA_224 0x70
#define CPACF_KMAC_HMAC_SHA_256 0x71
#define CPACF_KMAC_HMAC_SHA_384 0x72
#define CPACF_KMAC_HMAC_SHA_512 0x73
/*
* Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
* instruction
*/
#define CPACF_PCKMO_QUERY 0x00
#define CPACF_PCKMO_ENC_DES_KEY 0x01
#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
#define CPACF_PCKMO_QUERY 0x00
#define CPACF_PCKMO_ENC_DES_KEY 0x01
#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
#define CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY 0x15
#define CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY 0x16
#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
#define CPACF_PCKMO_ENC_HMAC_512_KEY 0x76
#define CPACF_PCKMO_ENC_HMAC_1024_KEY 0x7a
/*
* Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION)
@ -165,7 +175,40 @@
#define CPACF_KMA_LAAD 0x200 /* Last-AAD */
#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */
/*
* Flags for the KIMD/KLMD (COMPUTE INTERMEDIATE/LAST MESSAGE DIGEST)
* instructions
*/
#define CPACF_KIMD_NIP 0x8000
#define CPACF_KLMD_DUFOP 0x4000
#define CPACF_KLMD_NIP 0x8000
/*
* Function codes for KDSA (COMPUTE DIGITAL SIGNATURE AUTHENTICATION)
* instruction
*/
#define CPACF_KDSA_QUERY 0x00
#define CPACF_KDSA_ECDSA_VERIFY_P256 0x01
#define CPACF_KDSA_ECDSA_VERIFY_P384 0x02
#define CPACF_KDSA_ECDSA_VERIFY_P521 0x03
#define CPACF_KDSA_ECDSA_SIGN_P256 0x09
#define CPACF_KDSA_ECDSA_SIGN_P384 0x0a
#define CPACF_KDSA_ECDSA_SIGN_P521 0x0b
#define CPACF_KDSA_ENC_ECDSA_SIGN_P256 0x11
#define CPACF_KDSA_ENC_ECDSA_SIGN_P384 0x12
#define CPACF_KDSA_ENC_ECDSA_SIGN_P521 0x13
#define CPACF_KDSA_EDDSA_VERIFY_ED25519 0x20
#define CPACF_KDSA_EDDSA_VERIFY_ED448 0x24
#define CPACF_KDSA_EDDSA_SIGN_ED25519 0x28
#define CPACF_KDSA_EDDSA_SIGN_ED448 0x2c
#define CPACF_KDSA_ENC_EDDSA_SIGN_ED25519 0x30
#define CPACF_KDSA_ENC_EDDSA_SIGN_ED448 0x34
#define CPACF_FC_QUERY 0x00
#define CPACF_FC_QUERY_AUTH_INFO 0x7F
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
typedef struct { unsigned char bytes[256]; } cpacf_qai_t;
/*
* Prototype for a not existing function to produce a link
@ -175,78 +218,83 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
void __cpacf_bad_opcode(void);
static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
cpacf_mask_t *mask)
u8 *pb, u8 fc)
{
asm volatile(
" la %%r1,%[mask]\n"
" xgr %%r0,%%r0\n"
" la %%r1,%[pb]\n"
" lghi %%r0,%[fc]\n"
" .insn rre,%[opc] << 16,%[r1],%[r2]\n"
: [mask] "=R" (*mask)
: [opc] "i" (opc),
: [pb] "=R" (*pb)
: [opc] "i" (opc), [fc] "i" (fc),
[r1] "i" (r1), [r2] "i" (r2)
: "cc", "r0", "r1");
: "cc", "memory", "r0", "r1");
}
static __always_inline void __cpacf_query_rrf(u32 opc,
u8 r1, u8 r2, u8 r3, u8 m4,
cpacf_mask_t *mask)
static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3,
u8 m4, u8 *pb, u8 fc)
{
asm volatile(
" la %%r1,%[mask]\n"
" xgr %%r0,%%r0\n"
" la %%r1,%[pb]\n"
" lghi %%r0,%[fc]\n"
" .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
: [mask] "=R" (*mask)
: [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
[r3] "i" (r3), [m4] "i" (m4)
: "cc", "r0", "r1");
: [pb] "=R" (*pb)
: [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1),
[r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4)
: "cc", "memory", "r0", "r1");
}
static __always_inline void __cpacf_query_insn(unsigned int opcode, void *pb,
u8 fc)
{
switch (opcode) {
case CPACF_KDSA:
__cpacf_query_rre(CPACF_KDSA, 0, 2, pb, fc);
break;
case CPACF_KIMD:
__cpacf_query_rre(CPACF_KIMD, 0, 2, pb, fc);
break;
case CPACF_KLMD:
__cpacf_query_rre(CPACF_KLMD, 0, 2, pb, fc);
break;
case CPACF_KM:
__cpacf_query_rre(CPACF_KM, 2, 4, pb, fc);
break;
case CPACF_KMA:
__cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, pb, fc);
break;
case CPACF_KMAC:
__cpacf_query_rre(CPACF_KMAC, 0, 2, pb, fc);
break;
case CPACF_KMC:
__cpacf_query_rre(CPACF_KMC, 2, 4, pb, fc);
break;
case CPACF_KMCTR:
__cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, pb, fc);
break;
case CPACF_KMF:
__cpacf_query_rre(CPACF_KMF, 2, 4, pb, fc);
break;
case CPACF_KMO:
__cpacf_query_rre(CPACF_KMO, 2, 4, pb, fc);
break;
case CPACF_PCC:
__cpacf_query_rre(CPACF_PCC, 0, 0, pb, fc);
break;
case CPACF_PCKMO:
__cpacf_query_rre(CPACF_PCKMO, 0, 0, pb, fc);
break;
case CPACF_PRNO:
__cpacf_query_rre(CPACF_PRNO, 2, 4, pb, fc);
break;
default:
__cpacf_bad_opcode();
}
}
static __always_inline void __cpacf_query(unsigned int opcode,
cpacf_mask_t *mask)
{
switch (opcode) {
case CPACF_KDSA:
__cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
break;
case CPACF_KIMD:
__cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
break;
case CPACF_KLMD:
__cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
break;
case CPACF_KM:
__cpacf_query_rre(CPACF_KM, 2, 4, mask);
break;
case CPACF_KMA:
__cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
break;
case CPACF_KMAC:
__cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
break;
case CPACF_KMC:
__cpacf_query_rre(CPACF_KMC, 2, 4, mask);
break;
case CPACF_KMCTR:
__cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
break;
case CPACF_KMF:
__cpacf_query_rre(CPACF_KMF, 2, 4, mask);
break;
case CPACF_KMO:
__cpacf_query_rre(CPACF_KMO, 2, 4, mask);
break;
case CPACF_PCC:
__cpacf_query_rre(CPACF_PCC, 0, 0, mask);
break;
case CPACF_PCKMO:
__cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
break;
case CPACF_PRNO:
__cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
break;
default:
__cpacf_bad_opcode();
}
__cpacf_query_insn(opcode, mask, CPACF_FC_QUERY);
}
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
@ -269,6 +317,8 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
return test_facility(57); /* check for MSA5 */
case CPACF_KMA:
return test_facility(146); /* check for MSA8 */
case CPACF_KDSA:
return test_facility(155); /* check for MSA9 */
default:
__cpacf_bad_opcode();
return 0;
@ -276,14 +326,15 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
}
/**
* cpacf_query() - check if a specific CPACF function is available
* cpacf_query() - Query the function code mask for this CPACF opcode
* @opcode: the opcode of the crypto instruction
* @func: the function code to test for
* @mask: ptr to struct cpacf_mask_t
*
* Executes the query function for the given crypto instruction @opcode
* and checks if @func is available
*
* Returns 1 if @func is available for @opcode, 0 otherwise
* On success 1 is returned and the mask is filled with the function
* code mask for this CPACF opcode, otherwise 0 is returned.
*/
static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
@ -300,7 +351,8 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
}
static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func)
static __always_inline int cpacf_query_func(unsigned int opcode,
unsigned int func)
{
cpacf_mask_t mask;
@ -309,6 +361,32 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu
return 0;
}
static __always_inline void __cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
{
__cpacf_query_insn(opcode, qai, CPACF_FC_QUERY_AUTH_INFO);
}
/**
* cpacf_qai() - Get the query authentication information for a CPACF opcode
* @opcode: the opcode of the crypto instruction
* @mask: ptr to struct cpacf_qai_t
*
* Executes the query authentication information function for the given crypto
* instruction @opcode and checks if @func is available
*
* On success 1 is returned and the mask is filled with the query authentication
* information for this CPACF opcode, otherwise 0 is returned.
*/
static __always_inline int cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
{
if (cpacf_query_func(opcode, CPACF_FC_QUERY_AUTH_INFO)) {
__cpacf_qai(opcode, qai);
return 1;
}
memset(qai, 0, sizeof(*qai));
return 0;
}
/**
* cpacf_km() - executes the KM (CIPHER MESSAGE) instruction
* @func: the function code passed to KM; see CPACF_KM_xxx defines
@ -391,7 +469,7 @@ static inline void cpacf_kimd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,%[src]\n"
"0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
@ -416,7 +494,7 @@ static inline void cpacf_klmd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,%[src]\n"
"0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
@ -424,10 +502,41 @@ static inline void cpacf_klmd(unsigned long func, void *param,
: "cc", "memory", "0", "1");
}
/**
* _cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
* instruction and updates flags in gr0
* @gr0: pointer to gr0 (fc and flags) passed to KMAC; see CPACF_KMAC_xxx defines
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
static inline int _cpacf_kmac(unsigned long *gr0, void *param,
const u8 *src, long src_len)
{
union register_pair s;
s.even = (unsigned long)src;
s.odd = (unsigned long)src_len;
asm volatile(
" lgr 0,%[r0]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
" lgr %[r0],0\n"
: [r0] "+d" (*gr0), [src] "+&d" (s.pair)
: [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KMAC)
: "cc", "memory", "0", "1");
return src_len - s.odd;
}
/**
* cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
* instruction
* @func: the function code passed to KM; see CPACF_KMAC_xxx defines
* instruction
* @func: function code passed to KMAC; see CPACF_KMAC_xxx defines
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
@ -437,21 +546,7 @@ static inline void cpacf_klmd(unsigned long func, void *param,
static inline int cpacf_kmac(unsigned long func, void *param,
const u8 *src, long src_len)
{
union register_pair s;
s.even = (unsigned long)src;
s.odd = (unsigned long)src_len;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KMAC)
: "cc", "memory", "0", "1");
return src_len - s.odd;
return _cpacf_kmac(&func, param, src, src_len);
}
/**

View File

@ -202,8 +202,9 @@ union ctlreg0 {
unsigned long : 3;
unsigned long ccc : 1; /* Cryptography counter control */
unsigned long pec : 1; /* PAI extension control */
unsigned long : 17;
unsigned long : 3;
unsigned long : 15;
unsigned long wti : 1; /* Warning-track */
unsigned long : 4;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
unsigned long edat : 1; /* Enhanced-DAT-enablement control */

View File

@ -38,6 +38,7 @@ enum diag_stat_enum {
DIAG_STAT_X308,
DIAG_STAT_X318,
DIAG_STAT_X320,
DIAG_STAT_X49C,
DIAG_STAT_X500,
NR_DIAG_STAT
};
@ -363,4 +364,12 @@ void _diag0c_amode31(unsigned long rx);
void _diag308_reset_amode31(void);
int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
/* diag 49c subcodes */
enum diag49c_sc {
DIAG49C_SUBC_ACK = 0,
DIAG49C_SUBC_REG = 1
};
int diag49c(unsigned long subcode);
#endif /* _ASM_S390_DIAG_H */

View File

@ -6,8 +6,23 @@
#define MCOUNT_INSN_SIZE 6
#ifndef __ASSEMBLY__
#include <asm/stacktrace.h>
unsigned long return_address(unsigned int n);
static __always_inline unsigned long return_address(unsigned int n)
{
struct stack_frame *sf;
if (!n)
return (unsigned long)__builtin_return_address(0);
sf = (struct stack_frame *)current_frame_address();
do {
sf = (struct stack_frame *)sf->back_chain;
if (!sf)
return 0;
} while (--n);
return sf->gprs[8];
}
#define ftrace_return_address(n) return_address(n)
void ftrace_caller(void);

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2024
*/
#ifndef _ASM_HIPERDISPATCH_H
#define _ASM_HIPERDISPATCH_H
void hd_reset_state(void);
void hd_add_core(int cpu);
void hd_disable_hiperdispatch(void);
int hd_enable_hiperdispatch(void);
#endif /* _ASM_HIPERDISPATCH_H */

View File

@ -47,6 +47,7 @@ enum interruption_class {
IRQEXT_CMS,
IRQEXT_CMC,
IRQEXT_FTP,
IRQEXT_WTI,
IRQIO_CIO,
IRQIO_DAS,
IRQIO_C15,
@ -99,6 +100,7 @@ int unregister_external_irq(u16 code, ext_int_handler_t handler);
enum irq_subclass {
IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
IRQ_SUBCLASS_WARNING_TRACK = 33,
};
#define CR0_IRQ_SUBCLASS_MASK \

View File

@ -98,8 +98,8 @@ struct lowcore {
psw_t io_new_psw; /* 0x01f0 */
/* Save areas. */
__u64 save_area_sync[8]; /* 0x0200 */
__u64 save_area_async[8]; /* 0x0240 */
__u64 save_area[8]; /* 0x0200 */
__u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */
__u64 save_area_restart[1]; /* 0x0280 */
__u64 pcpu; /* 0x0288 */

View File

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_S390_MARCH_H
#define __ASM_S390_MARCH_H
#include <linux/kconfig.h>
#define MARCH_HAS_Z10_FEATURES 1
#ifndef __DECOMPRESSOR
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define MARCH_HAS_Z196_FEATURES 1
#endif
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
#define MARCH_HAS_ZEC12_FEATURES 1
#endif
#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
#define MARCH_HAS_Z13_FEATURES 1
#endif
#ifdef CONFIG_HAVE_MARCH_Z14_FEATURES
#define MARCH_HAS_Z14_FEATURES 1
#endif
#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
#define MARCH_HAS_Z15_FEATURES 1
#endif
#ifdef CONFIG_HAVE_MARCH_Z16_FEATURES
#define MARCH_HAS_Z16_FEATURES 1
#endif
#endif /* __DECOMPRESSOR */
#endif /* __ASM_S390_MARCH_H */

View File

@ -4,6 +4,7 @@
#include <linux/preempt.h>
#include <asm/cmpxchg.h>
#include <asm/march.h>
/*
* s390 uses its own implementation for per cpu data, the offset of
@ -50,7 +51,7 @@
#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
#ifndef MARCH_HAS_Z196_FEATURES
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
@ -61,7 +62,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#else /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
{ \
@ -129,7 +130,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#endif /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \

View File

@ -48,30 +48,6 @@ struct perf_sf_sde_regs {
unsigned long reserved:63; /* reserved */
};
/* Perf PMU definitions for the counter facility */
#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */
/* Perf PMU definitions for the sampling facility */
#define PERF_CPUM_SF_MAX_CTR 2
#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
PERF_CPUM_SF_DIAG_MODE)
#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
#define REG_NONE 0
#define REG_OVERFLOW 1
#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
#define TEAR_REG(hwc) ((hwc)->last_tag)
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
(regs)->psw.addr = (__ip); \
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \

View File

@ -22,7 +22,7 @@
* @param protkey pointer to buffer receiving the protected key
* @return 0 on success, negative errno value on failure
*/
int pkey_keyblob2pkey(const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype);
int pkey_key2protkey(const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype);
#endif /* _KAPI_PKEY_H */

View File

@ -5,8 +5,9 @@
#include <asm/current.h>
#include <linux/thread_info.h>
#include <asm/atomic_ops.h>
#include <asm/march.h>
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#ifdef MARCH_HAS_Z196_FEATURES
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
@ -75,7 +76,7 @@ static __always_inline bool should_resched(int preempt_offset)
preempt_offset);
}
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#else /* MARCH_HAS_Z196_FEATURES */
#define PREEMPT_ENABLED (0)
@ -123,7 +124,7 @@ static __always_inline bool should_resched(int preempt_offset)
tif_need_resched());
}
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#endif /* MARCH_HAS_Z196_FEATURES */
#define init_task_preempt_count(p) do { } while (0)
/* Deferred to CPU bringup time */

View File

@ -44,6 +44,7 @@ struct pcpu {
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
unsigned long flags; /* per CPU flags */
unsigned long capacity; /* cpu capacity for scheduler */
signed char state; /* physical cpu state */
signed char polarization; /* physical polarization */
u16 address; /* physical cpu address */

View File

@ -72,6 +72,7 @@ struct sclp_info {
unsigned char has_core_type : 1;
unsigned char has_sprp : 1;
unsigned char has_hvs : 1;
unsigned char has_wti : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
unsigned char has_64bscao : 1;

View File

@ -34,6 +34,7 @@
#define MACHINE_FLAG_SCC BIT(17)
#define MACHINE_FLAG_PCI_MIO BIT(18)
#define MACHINE_FLAG_RDP BIT(19)
#define MACHINE_FLAG_SEQ_INSN BIT(20)
#define LPP_MAGIC BIT(31)
#define LPP_PID_MASK _AC(0xffffffff, UL)
@ -95,6 +96,7 @@ extern unsigned long mio_wb_bit_mask;
#define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC)
#define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO)
#define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP)
#define MACHINE_HAS_SEQ_INSN (get_lowcore()->machine_flags & MACHINE_FLAG_SEQ_INSN)
/*
* Console mode. Override with conmode=
@ -115,6 +117,8 @@ extern unsigned int console_irq;
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
void register_early_console(void);
#ifdef CONFIG_VMCP
void vmcp_cma_reserve(void);
#else

View File

@ -12,6 +12,7 @@
#include <asm/processor.h>
#define raw_smp_processor_id() (get_lowcore()->cpu_nr)
#define arch_scale_cpu_capacity smp_cpu_get_capacity
extern struct mutex smp_cpu_state_mutex;
extern unsigned int smp_cpu_mt_shift;
@ -34,6 +35,9 @@ extern void smp_save_dump_secondary_cpus(void);
extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
extern void smp_cpu_set_capacity(int cpu, unsigned long val);
extern void smp_set_core_capacity(int cpu, unsigned long val);
extern unsigned long smp_cpu_get_capacity(int cpu);
extern int smp_cpu_get_cpu_address(int cpu);
extern void smp_fill_possible_mask(void);
extern void smp_detect_cpus(void);

View File

@ -67,6 +67,9 @@ static inline void topology_expect_change(void) { }
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
#define CPU_CAPACITY_HIGH SCHED_CAPACITY_SCALE
#define CPU_CAPACITY_LOW (SCHED_CAPACITY_SCALE >> 3)
#define SD_BOOK_INIT SD_CPU_INIT
#ifdef CONFIG_NUMA

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Tracepoint header for hiperdispatch
*
* Copyright IBM Corp. 2024
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM s390
#if !defined(_TRACE_S390_HIPERDISPATCH_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_S390_HIPERDISPATCH_H
#include <linux/tracepoint.h>
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH asm/trace
#define TRACE_INCLUDE_FILE hiperdispatch
TRACE_EVENT(s390_hd_work_fn,
TP_PROTO(int steal_time_percentage,
int entitled_core_count,
int highcap_core_count),
TP_ARGS(steal_time_percentage,
entitled_core_count,
highcap_core_count),
TP_STRUCT__entry(__field(int, steal_time_percentage)
__field(int, entitled_core_count)
__field(int, highcap_core_count)),
TP_fast_assign(__entry->steal_time_percentage = steal_time_percentage;
__entry->entitled_core_count = entitled_core_count;
__entry->highcap_core_count = highcap_core_count;),
TP_printk("steal: %d entitled_core_count: %d highcap_core_count: %d",
__entry->steal_time_percentage,
__entry->entitled_core_count,
__entry->highcap_core_count)
);
TRACE_EVENT(s390_hd_rebuild_domains,
TP_PROTO(int current_highcap_core_count,
int new_highcap_core_count),
TP_ARGS(current_highcap_core_count,
new_highcap_core_count),
TP_STRUCT__entry(__field(int, current_highcap_core_count)
__field(int, new_highcap_core_count)),
TP_fast_assign(__entry->current_highcap_core_count = current_highcap_core_count;
__entry->new_highcap_core_count = new_highcap_core_count),
TP_printk("change highcap_core_count: %u -> %u",
__entry->current_highcap_core_count,
__entry->new_highcap_core_count)
);
#endif /* _TRACE_S390_HIPERDISPATCH_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -41,6 +41,10 @@
#define PKEY_KEYTYPE_ECC_P521 7
#define PKEY_KEYTYPE_ECC_ED25519 8
#define PKEY_KEYTYPE_ECC_ED448 9
#define PKEY_KEYTYPE_AES_XTS_128 10
#define PKEY_KEYTYPE_AES_XTS_256 11
#define PKEY_KEYTYPE_HMAC_512 12
#define PKEY_KEYTYPE_HMAC_1024 13
/* the newer ioctls use a pkey_key_type enum for type information */
enum pkey_key_type {
@ -50,6 +54,7 @@ enum pkey_key_type {
PKEY_TYPE_CCA_ECC = (__u32) 0x1f,
PKEY_TYPE_EP11_AES = (__u32) 6,
PKEY_TYPE_EP11_ECC = (__u32) 7,
PKEY_TYPE_PROTKEY = (__u32) 8,
};
/* the newer ioctls use a pkey_key_size enum for key size information */

View File

@ -36,22 +36,23 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
obj-y := head64.o traps.o time.o process.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o
obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o
obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o wti.o
extra-y += vmlinux.lds
obj-$(CONFIG_SYSFS) += nospec-sysfs.o
CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
obj-$(CONFIG_SYSFS) += cpacf.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o hiperdispatch.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o

View File

@ -112,8 +112,7 @@ int main(void)
OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw);
OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw);
/* software defined lowcore locations 0x200 - 0xdff*/
OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
OFFSET(__LC_SAVE_AREA, lowcore, save_area);
OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
OFFSET(__LC_PCPU, lowcore, pcpu);
OFFSET(__LC_RETURN_PSW, lowcore, return_psw);

119
arch/s390/kernel/cpacf.c Normal file
View File

@ -0,0 +1,119 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2024
*/
#define KMSG_COMPONENT "cpacf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/sysfs.h>
#include <asm/cpacf.h>
#define CPACF_QUERY(name, instruction) \
static ssize_t name##_query_raw_read(struct file *fp, \
struct kobject *kobj, \
struct bin_attribute *attr, \
char *buf, loff_t offs, \
size_t count) \
{ \
cpacf_mask_t mask; \
\
if (!cpacf_query(CPACF_##instruction, &mask)) \
return -EOPNOTSUPP; \
return memory_read_from_buffer(buf, count, &offs, &mask, sizeof(mask)); \
} \
static BIN_ATTR_RO(name##_query_raw, sizeof(cpacf_mask_t))
CPACF_QUERY(km, KM);
CPACF_QUERY(kmc, KMC);
CPACF_QUERY(kimd, KIMD);
CPACF_QUERY(klmd, KLMD);
CPACF_QUERY(kmac, KMAC);
CPACF_QUERY(pckmo, PCKMO);
CPACF_QUERY(kmf, KMF);
CPACF_QUERY(kmctr, KMCTR);
CPACF_QUERY(kmo, KMO);
CPACF_QUERY(pcc, PCC);
CPACF_QUERY(prno, PRNO);
CPACF_QUERY(kma, KMA);
CPACF_QUERY(kdsa, KDSA);
#define CPACF_QAI(name, instruction) \
static ssize_t name##_query_auth_info_raw_read( \
struct file *fp, struct kobject *kobj, \
struct bin_attribute *attr, char *buf, loff_t offs, \
size_t count) \
{ \
cpacf_qai_t qai; \
\
if (!cpacf_qai(CPACF_##instruction, &qai)) \
return -EOPNOTSUPP; \
return memory_read_from_buffer(buf, count, &offs, &qai, \
sizeof(qai)); \
} \
static BIN_ATTR_RO(name##_query_auth_info_raw, sizeof(cpacf_qai_t))
CPACF_QAI(km, KM);
CPACF_QAI(kmc, KMC);
CPACF_QAI(kimd, KIMD);
CPACF_QAI(klmd, KLMD);
CPACF_QAI(kmac, KMAC);
CPACF_QAI(pckmo, PCKMO);
CPACF_QAI(kmf, KMF);
CPACF_QAI(kmctr, KMCTR);
CPACF_QAI(kmo, KMO);
CPACF_QAI(pcc, PCC);
CPACF_QAI(prno, PRNO);
CPACF_QAI(kma, KMA);
CPACF_QAI(kdsa, KDSA);
static struct bin_attribute *cpacf_attrs[] = {
&bin_attr_km_query_raw,
&bin_attr_kmc_query_raw,
&bin_attr_kimd_query_raw,
&bin_attr_klmd_query_raw,
&bin_attr_kmac_query_raw,
&bin_attr_pckmo_query_raw,
&bin_attr_kmf_query_raw,
&bin_attr_kmctr_query_raw,
&bin_attr_kmo_query_raw,
&bin_attr_pcc_query_raw,
&bin_attr_prno_query_raw,
&bin_attr_kma_query_raw,
&bin_attr_kdsa_query_raw,
&bin_attr_km_query_auth_info_raw,
&bin_attr_kmc_query_auth_info_raw,
&bin_attr_kimd_query_auth_info_raw,
&bin_attr_klmd_query_auth_info_raw,
&bin_attr_kmac_query_auth_info_raw,
&bin_attr_pckmo_query_auth_info_raw,
&bin_attr_kmf_query_auth_info_raw,
&bin_attr_kmctr_query_auth_info_raw,
&bin_attr_kmo_query_auth_info_raw,
&bin_attr_pcc_query_auth_info_raw,
&bin_attr_prno_query_auth_info_raw,
&bin_attr_kma_query_auth_info_raw,
&bin_attr_kdsa_query_auth_info_raw,
NULL,
};
static const struct attribute_group cpacf_attr_grp = {
.name = "cpacf",
.bin_attrs = cpacf_attrs,
};
static int __init cpacf_init(void)
{
struct device *cpu_root;
int rc = 0;
cpu_root = bus_get_dev_root(&cpu_subsys);
if (cpu_root) {
rc = sysfs_create_group(&cpu_root->kobj, &cpacf_attr_grp);
put_device(cpu_root);
}
return rc;
}
device_initcall(cpacf_init);

View File

@ -52,6 +52,7 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" },
[DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" },
[DIAG_STAT_X320] = { .code = 0x320, .name = "Certificate Store" },
[DIAG_STAT_X49C] = { .code = 0x49c, .name = "Warning-Track Interruption" },
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
@ -303,3 +304,19 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode)
return diag_amode31_ops.diag26c(virt_to_phys(req), virt_to_phys(resp), subcode);
}
EXPORT_SYMBOL(diag26c);
int diag49c(unsigned long subcode)
{
int rc;
diag_stat_inc(DIAG_STAT_X49C);
asm volatile(
" diag %[subcode],0,0x49c\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
: [rc] "=d" (rc)
: [subcode] "d" (subcode)
: "cc");
return rc;
}
EXPORT_SYMBOL(diag49c);

View File

@ -122,6 +122,7 @@ enum {
U8_32, /* 8 bit unsigned value starting at 32 */
U12_16, /* 12 bit unsigned value starting at 16 */
U16_16, /* 16 bit unsigned value starting at 16 */
U16_20, /* 16 bit unsigned value starting at 20 */
U16_32, /* 16 bit unsigned value starting at 32 */
U32_16, /* 32 bit unsigned value starting at 16 */
VX_12, /* Vector index register starting at position 12 */
@ -184,6 +185,7 @@ static const struct s390_operand operands[] = {
[U8_32] = { 8, 32, 0 },
[U12_16] = { 12, 16, 0 },
[U16_16] = { 16, 16, 0 },
[U16_20] = { 16, 20, 0 },
[U16_32] = { 16, 32, 0 },
[U32_16] = { 32, 16, 0 },
[VX_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
@ -257,7 +259,6 @@ static const unsigned char formats[][6] = {
[INSTR_RSL_R0RD] = { D_20, L4_8, B_16, 0, 0, 0 },
[INSTR_RSY_AARD] = { A_8, A_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_CCRD] = { C_8, C_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RDRU] = { R_8, D20_20, B_16, U4_12, 0, 0 },
[INSTR_RSY_RRRD] = { R_8, R_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RURD] = { R_8, U4_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RURD2] = { R_8, D20_20, B_16, U4_12, 0, 0 },
@ -300,14 +301,17 @@ static const unsigned char formats[][6] = {
[INSTR_VRI_V0UU2] = { V_8, U16_16, U4_32, 0, 0, 0 },
[INSTR_VRI_V0UUU] = { V_8, U8_16, U8_24, U4_32, 0, 0 },
[INSTR_VRI_VR0UU] = { V_8, R_12, U8_28, U4_24, 0, 0 },
[INSTR_VRI_VV0UU] = { V_8, V_12, U8_28, U4_24, 0, 0 },
[INSTR_VRI_VVUU] = { V_8, V_12, U16_16, U4_32, 0, 0 },
[INSTR_VRI_VVUUU] = { V_8, V_12, U12_16, U4_32, U4_28, 0 },
[INSTR_VRI_VVUUU2] = { V_8, V_12, U8_28, U8_16, U4_24, 0 },
[INSTR_VRI_VVV0U] = { V_8, V_12, V_16, U8_24, 0, 0 },
[INSTR_VRI_VVV0UU] = { V_8, V_12, V_16, U8_24, U4_32, 0 },
[INSTR_VRI_VVV0UU2] = { V_8, V_12, V_16, U8_28, U4_24, 0 },
[INSTR_VRR_0V] = { V_12, 0, 0, 0, 0, 0 },
[INSTR_VRI_VVV0UV] = { V_8, V_12, V_16, V_32, U8_24, 0 },
[INSTR_VRR_0V0U] = { V_12, U16_20, 0, 0, 0, 0 },
[INSTR_VRR_0VV0U] = { V_12, V_16, U4_24, 0, 0, 0 },
[INSTR_VRR_0VVU] = { V_12, V_16, U16_20, 0, 0, 0 },
[INSTR_VRR_RV0UU] = { R_8, V_12, U4_24, U4_28, 0, 0 },
[INSTR_VRR_VRR] = { V_8, R_12, R_16, 0, 0, 0 },
[INSTR_VRR_VV] = { V_8, V_12, 0, 0, 0, 0 },
@ -455,21 +459,21 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
if (separator)
ptr += sprintf(ptr, "%c", separator);
if (operand->flags & OPERAND_GPR)
ptr += sprintf(ptr, "%%r%i", value);
ptr += sprintf(ptr, "%%r%u", value);
else if (operand->flags & OPERAND_FPR)
ptr += sprintf(ptr, "%%f%i", value);
ptr += sprintf(ptr, "%%f%u", value);
else if (operand->flags & OPERAND_AR)
ptr += sprintf(ptr, "%%a%i", value);
ptr += sprintf(ptr, "%%a%u", value);
else if (operand->flags & OPERAND_CR)
ptr += sprintf(ptr, "%%c%i", value);
ptr += sprintf(ptr, "%%c%u", value);
else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value);
ptr += sprintf(ptr, "%%v%u", value);
else if (operand->flags & OPERAND_PCREL) {
void *pcrel = (void *)((int)value + addr);
ptr += sprintf(ptr, "%px", pcrel);
} else if (operand->flags & OPERAND_SIGNED)
ptr += sprintf(ptr, "%i", value);
ptr += sprintf(ptr, "%i", (int)value);
else
ptr += sprintf(ptr, "%u", value);
if (operand->flags & OPERAND_DISP)

View File

@ -7,6 +7,7 @@
#define KMSG_COMPONENT "setup"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/sched/debug.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
@ -175,20 +176,45 @@ static __init void setup_topology(void)
topology_max_mnest = max_mnest;
}
void __do_early_pgm_check(struct pt_regs *regs)
void __init __do_early_pgm_check(struct pt_regs *regs)
{
if (!fixup_exception(regs))
disabled_wait();
struct lowcore *lc = get_lowcore();
unsigned long ip;
regs->int_code = lc->pgm_int_code;
regs->int_parm_long = lc->trans_exc_code;
ip = __rewind_psw(regs->psw, regs->int_code >> 16);
/* Monitor Event? Might be a warning */
if ((regs->int_code & PGM_INT_CODE_MASK) == 0x40) {
if (report_bug(ip, regs) == BUG_TRAP_TYPE_WARN)
return;
}
if (fixup_exception(regs))
return;
/*
* Unhandled exception - system cannot continue but try to get some
* helpful messages to the console. Use early_printk() to print
* some basic information in case it is too early for printk().
*/
register_early_console();
early_printk("PANIC: early exception %04x PSW: %016lx %016lx\n",
regs->int_code & 0xffff, regs->psw.mask, regs->psw.addr);
show_regs(regs);
disabled_wait();
}
static noinline __init void setup_lowcore_early(void)
{
struct lowcore *lc = get_lowcore();
psw_t psw;
psw.addr = (unsigned long)early_pgm_check_handler;
psw.mask = PSW_KERNEL_BITS;
get_lowcore()->program_new_psw = psw;
get_lowcore()->preempt_count = INIT_PREEMPT_COUNT;
lc->program_new_psw = psw;
lc->preempt_count = INIT_PREEMPT_COUNT;
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
}
static __init void detect_diag9c(void)
@ -242,6 +268,8 @@ static __init void detect_machine_facilities(void)
}
if (test_facility(194))
get_lowcore()->machine_flags |= MACHINE_FLAG_RDP;
if (test_facility(85))
get_lowcore()->machine_flags |= MACHINE_FLAG_SEQ_INSN;
}
static inline void save_vector_registers(void)

View File

@ -6,6 +6,7 @@
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/setup.h>
#include <asm/sclp.h>
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
@ -20,6 +21,16 @@ static struct console sclp_early_console = {
.index = -1,
};
void __init register_early_console(void)
{
if (early_console)
return;
if (!sclp.has_linemode && !sclp.has_vt220)
return;
early_console = &sclp_early_console;
register_console(early_console);
}
static int __init setup_early_printk(char *buf)
{
if (early_console)
@ -27,10 +38,7 @@ static int __init setup_early_printk(char *buf)
/* Accept only "earlyprintk" and "earlyprintk=sclp" */
if (buf && !str_has_prefix(buf, "sclp"))
return 0;
if (!sclp.has_linemode && !sclp.has_vt220)
return 0;
early_console = &sclp_early_console;
register_console(early_console);
register_early_console();
return 0;
}
early_param("earlyprintk", setup_early_printk);

View File

@ -1,23 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2006, 2007
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
SYM_CODE_START(early_pgm_check_handler)
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
lgr %r2,%r11
brasl %r14,__do_early_pgm_check
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
lpswe __LC_RETURN_PSW
SYM_CODE_END(early_pgm_check_handler)

View File

@ -264,7 +264,7 @@ EXPORT_SYMBOL(sie_exit)
*/
SYM_CODE_START(system_call)
STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC
STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
stpt __LC_SYS_ENTER_TIMER(%r13)
BPOFF
@ -287,7 +287,7 @@ SYM_CODE_START(system_call)
xgr %r10,%r10
xgr %r11,%r11
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC(%r13)
mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13)
MBEAR %r2,%r13
lgr %r3,%r14
brasl %r14,__do_syscall
@ -323,7 +323,7 @@ SYM_CODE_END(ret_from_fork)
*/
SYM_CODE_START(pgm_check_handler)
STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC
STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
stpt __LC_SYS_ENTER_TIMER(%r13)
BPOFF
@ -338,16 +338,16 @@ SYM_CODE_START(pgm_check_handler)
jnz 2f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
2: CHECK_STACK __LC_SAVE_AREA_SYNC,%r13
2: CHECK_STACK __LC_SAVE_AREA,%r13
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
# CHECK_VMAP_STACK branches to stack_overflow or 4f
CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,%r13,4f
CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
3: lg %r15,__LC_KERNEL_STACK(%r13)
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC(%r13)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
stctg %c1,%c1,__PT_CR1(%r11)
#if IS_ENABLED(CONFIG_KVM)
@ -398,7 +398,7 @@ SYM_CODE_END(pgm_check_handler)
*/
.macro INT_HANDLER name,lc_old_psw,handler
SYM_CODE_START(\name)
STMG_LC %r8,%r15,__LC_SAVE_AREA_ASYNC
STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
stckf __LC_INT_CLOCK(%r13)
stpt __LC_SYS_ENTER_TIMER(%r13)
@ -414,7 +414,7 @@ SYM_CODE_START(\name)
BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT __SF_SIE_CONTROL(%r15),%r13
#endif
0: CHECK_STACK __LC_SAVE_AREA_ASYNC,%r13
0: CHECK_STACK __LC_SAVE_AREA,%r13
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
@ -432,7 +432,7 @@ SYM_CODE_START(\name)
xgr %r7,%r7
xgr %r10,%r10
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC(%r13)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
MBEAR %r11,%r13
stmg %r8,%r9,__PT_PSW(%r11)
lgr %r2,%r11 # pass pointer to pt_regs
@ -599,6 +599,24 @@ SYM_CODE_START(restart_int_handler)
3: j 3b
SYM_CODE_END(restart_int_handler)
__INIT
SYM_CODE_START(early_pgm_check_handler)
STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
lgr %r2,%r11
brasl %r14,__do_early_pgm_check
mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
SYM_CODE_END(early_pgm_check_handler)
__FINIT
.section .kprobes.text, "ax"
#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)

View File

@ -50,10 +50,6 @@ struct ftrace_insn {
s32 disp;
} __packed;
#ifdef CONFIG_MODULES
static char *ftrace_plt;
#endif /* CONFIG_MODULES */
static const char *ftrace_shared_hotpatch_trampoline(const char **end)
{
const char *tstart, *tend;
@ -73,19 +69,20 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end)
bool ftrace_need_init_nop(void)
{
return true;
return !MACHINE_HAS_SEQ_INSN;
}
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
__ftrace_hotpatch_trampolines_start;
static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
static const struct ftrace_insn orig = { .opc = 0xc004, .disp = 0 };
static struct ftrace_hotpatch_trampoline *trampoline;
struct ftrace_hotpatch_trampoline **next_trampoline;
struct ftrace_hotpatch_trampoline *trampolines_end;
struct ftrace_hotpatch_trampoline tmp;
struct ftrace_insn *insn;
struct ftrace_insn old;
const char *shared;
s32 disp;
@ -99,7 +96,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
if (mod) {
next_trampoline = &mod->arch.next_trampoline;
trampolines_end = mod->arch.trampolines_end;
shared = ftrace_plt;
}
#endif
@ -107,8 +103,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
return -ENOMEM;
trampoline = (*next_trampoline)++;
if (copy_from_kernel_nofault(&old, (void *)rec->ip, sizeof(old)))
return -EFAULT;
/* Check for the compiler-generated fentry nop (brcl 0, .). */
if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
if (WARN_ON_ONCE(memcmp(&orig, &old, sizeof(old))))
return -EINVAL;
/* Generate the trampoline. */
@ -144,8 +142,35 @@ static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrac
return trampoline;
}
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
static inline struct ftrace_insn
ftrace_generate_branch_insn(unsigned long ip, unsigned long target)
{
/* brasl r0,target or brcl 0,0 */
return (struct ftrace_insn){ .opc = target ? 0xc005 : 0xc004,
.disp = target ? (target - ip) / 2 : 0 };
}
static int ftrace_patch_branch_insn(unsigned long ip, unsigned long old_target,
unsigned long target)
{
struct ftrace_insn orig = ftrace_generate_branch_insn(ip, old_target);
struct ftrace_insn new = ftrace_generate_branch_insn(ip, target);
struct ftrace_insn old;
if (!IS_ALIGNED(ip, 8))
return -EINVAL;
if (copy_from_kernel_nofault(&old, (void *)ip, sizeof(old)))
return -EFAULT;
/* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL;
s390_kernel_write((void *)ip, &new, sizeof(new));
return 0;
}
static int ftrace_modify_trampoline_call(struct dyn_ftrace *rec,
unsigned long old_addr,
unsigned long addr)
{
struct ftrace_hotpatch_trampoline *trampoline;
u64 old;
@ -161,6 +186,15 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return 0;
}
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
if (MACHINE_HAS_SEQ_INSN)
return ftrace_patch_branch_insn(rec->ip, old_addr, addr);
else
return ftrace_modify_trampoline_call(rec, old_addr, addr);
}
static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
{
u16 old;
@ -179,11 +213,14 @@ static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
/* Expect brcl 0xf,... */
return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
/* Expect brcl 0xf,... for the !MACHINE_HAS_SEQ_INSN case */
if (MACHINE_HAS_SEQ_INSN)
return ftrace_patch_branch_insn(rec->ip, addr, 0);
else
return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
static int ftrace_make_trampoline_call(struct dyn_ftrace *rec, unsigned long addr)
{
struct ftrace_hotpatch_trampoline *trampoline;
@ -195,6 +232,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
if (MACHINE_HAS_SEQ_INSN)
return ftrace_patch_branch_insn(rec->ip, 0, addr);
else
return ftrace_make_trampoline_call(rec, addr);
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
ftrace_func = func;
@ -215,25 +260,6 @@ void ftrace_arch_code_modify_post_process(void)
text_poke_sync_lock();
}
#ifdef CONFIG_MODULES
static int __init ftrace_plt_init(void)
{
const char *start, *end;
ftrace_plt = execmem_alloc(EXECMEM_FTRACE, PAGE_SIZE);
if (!ftrace_plt)
panic("cannot allocate ftrace plt\n");
start = ftrace_shared_hotpatch_trampoline(&end);
memcpy(ftrace_plt, start, end - start);
set_memory_rox((unsigned long)ftrace_plt, 1);
return 0;
}
device_initcall(ftrace_plt_init);
#endif /* CONFIG_MODULES */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
@ -264,26 +290,14 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
*/
int ftrace_enable_ftrace_graph_caller(void)
{
int rc;
/* Expect brc 0xf,... */
rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
if (rc)
return rc;
text_poke_sync_lock();
return 0;
return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
}
int ftrace_disable_ftrace_graph_caller(void)
{
int rc;
/* Expect brc 0x0,... */
rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
if (rc)
return rc;
text_poke_sync_lock();
return 0;
return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

View File

@ -18,7 +18,5 @@ extern const char ftrace_shared_hotpatch_trampoline_br[];
extern const char ftrace_shared_hotpatch_trampoline_br_end[];
extern const char ftrace_shared_hotpatch_trampoline_exrl[];
extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
extern const char ftrace_plt_template[];
extern const char ftrace_plt_template_end[];
#endif /* _FTRACE_H */

View File

@ -0,0 +1,430 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2024
*/
#define KMSG_COMPONENT "hd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
/*
* Hiperdispatch:
* Dynamically calculates the optimum number of high capacity COREs
* by considering the state the system is in. When hiperdispatch decides
* that a capacity update is necessary, it schedules a topology update.
* During topology updates the CPU capacities are always re-adjusted.
*
* There is two places where CPU capacities are being accessed within
* hiperdispatch.
* -> hiperdispatch's reoccuring work function reads CPU capacities to
* determine high capacity CPU count.
* -> during a topology update hiperdispatch's adjustment function
* updates CPU capacities.
* These two can run on different CPUs in parallel which can cause
* hiperdispatch to make wrong decisions. This can potentially cause
* some overhead by leading to extra rebuild_sched_domains() calls
* for correction. Access to capacities within hiperdispatch has to be
* serialized to prevent the overhead.
*
* Hiperdispatch decision making revolves around steal time.
* HD_STEAL_THRESHOLD value is taken as reference. Whenever steal time
* crosses the threshold value hiperdispatch falls back to giving high
* capacities to entitled CPUs. When steal time drops below the
* threshold boundary, hiperdispatch utilizes all CPUs by giving all
* of them high capacity.
*
* The theory behind HD_STEAL_THRESHOLD is related to the SMP thread
* performance. Comparing the throughput of;
* - single CORE, with N threads, running N tasks
* - N separate COREs running N tasks,
* using individual COREs for individual tasks yield better
* performance. This performance difference is roughly ~30% (can change
* between machine generations)
*
* Hiperdispatch tries to hint scheduler to use individual COREs for
* each task, as long as steal time on those COREs are less than 30%,
* therefore delaying the throughput loss caused by using SMP threads.
*/
#include <linux/cpumask.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/kernel_stat.h>
#include <linux/kstrtox.h>
#include <linux/ktime.h>
#include <linux/sysctl.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <asm/hiperdispatch.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/topology.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/hiperdispatch.h>
#define HD_DELAY_FACTOR (4)
#define HD_DELAY_INTERVAL (HZ / 4)
#define HD_STEAL_THRESHOLD 30
#define HD_STEAL_AVG_WEIGHT 16
static cpumask_t hd_vl_coremask; /* Mask containing all vertical low COREs */
static cpumask_t hd_vmvl_cpumask; /* Mask containing vertical medium and low CPUs */
static int hd_high_capacity_cores; /* Current CORE count with high capacity */
static int hd_entitled_cores; /* Total vertical high and medium CORE count */
static int hd_online_cores; /* Current online CORE count */
static unsigned long hd_previous_steal; /* Previous iteration's CPU steal timer total */
static unsigned long hd_high_time; /* Total time spent while all cpus have high capacity */
static unsigned long hd_low_time; /* Total time spent while vl cpus have low capacity */
static atomic64_t hd_adjustments; /* Total occurrence count of hiperdispatch adjustments */
static unsigned int hd_steal_threshold = HD_STEAL_THRESHOLD;
static unsigned int hd_delay_factor = HD_DELAY_FACTOR;
static int hd_enabled;
static void hd_capacity_work_fn(struct work_struct *work);
static DECLARE_DELAYED_WORK(hd_capacity_work, hd_capacity_work_fn);
static int hd_set_hiperdispatch_mode(int enable)
{
if (!MACHINE_HAS_TOPOLOGY)
enable = 0;
if (hd_enabled == enable)
return 0;
hd_enabled = enable;
return 1;
}
void hd_reset_state(void)
{
cpumask_clear(&hd_vl_coremask);
cpumask_clear(&hd_vmvl_cpumask);
hd_entitled_cores = 0;
hd_online_cores = 0;
}
void hd_add_core(int cpu)
{
const struct cpumask *siblings;
int polarization;
hd_online_cores++;
polarization = smp_cpu_get_polarization(cpu);
siblings = topology_sibling_cpumask(cpu);
switch (polarization) {
case POLARIZATION_VH:
hd_entitled_cores++;
break;
case POLARIZATION_VM:
hd_entitled_cores++;
cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings);
break;
case POLARIZATION_VL:
cpumask_set_cpu(cpu, &hd_vl_coremask);
cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings);
break;
}
}
/* Serialize update and read operations of debug counters. */
static DEFINE_MUTEX(hd_counter_mutex);
static void hd_update_times(void)
{
static ktime_t prev;
ktime_t now;
/*
* Check if hiperdispatch is active, if not set the prev to 0.
* This way it is possible to differentiate the first update iteration after
* enabling hiperdispatch.
*/
if (hd_entitled_cores == 0 || hd_enabled == 0) {
prev = ktime_set(0, 0);
return;
}
now = ktime_get();
if (ktime_after(prev, 0)) {
if (hd_high_capacity_cores == hd_online_cores)
hd_high_time += ktime_ms_delta(now, prev);
else
hd_low_time += ktime_ms_delta(now, prev);
}
prev = now;
}
static void hd_update_capacities(void)
{
int cpu, upscaling_cores;
unsigned long capacity;
upscaling_cores = hd_high_capacity_cores - hd_entitled_cores;
capacity = upscaling_cores > 0 ? CPU_CAPACITY_HIGH : CPU_CAPACITY_LOW;
hd_high_capacity_cores = hd_entitled_cores;
for_each_cpu(cpu, &hd_vl_coremask) {
smp_set_core_capacity(cpu, capacity);
if (capacity != CPU_CAPACITY_HIGH)
continue;
hd_high_capacity_cores++;
upscaling_cores--;
if (upscaling_cores == 0)
capacity = CPU_CAPACITY_LOW;
}
}
void hd_disable_hiperdispatch(void)
{
cancel_delayed_work_sync(&hd_capacity_work);
hd_high_capacity_cores = hd_online_cores;
hd_previous_steal = 0;
}
int hd_enable_hiperdispatch(void)
{
mutex_lock(&hd_counter_mutex);
hd_update_times();
mutex_unlock(&hd_counter_mutex);
if (hd_enabled == 0)
return 0;
if (hd_entitled_cores == 0)
return 0;
if (hd_online_cores <= hd_entitled_cores)
return 0;
mod_delayed_work(system_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor);
hd_update_capacities();
return 1;
}
static unsigned long hd_steal_avg(unsigned long new)
{
static unsigned long steal;
steal = (steal * (HD_STEAL_AVG_WEIGHT - 1) + new) / HD_STEAL_AVG_WEIGHT;
return steal;
}
static unsigned long hd_calculate_steal_percentage(void)
{
unsigned long time_delta, steal_delta, steal, percentage;
static ktime_t prev;
int cpus, cpu;
ktime_t now;
cpus = 0;
steal = 0;
percentage = 0;
for_each_cpu(cpu, &hd_vmvl_cpumask) {
steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
cpus++;
}
/*
* If there is no vertical medium and low CPUs steal time
* is 0 as vertical high CPUs shouldn't experience steal time.
*/
if (cpus == 0)
return percentage;
now = ktime_get();
time_delta = ktime_to_ns(ktime_sub(now, prev));
if (steal > hd_previous_steal && hd_previous_steal != 0) {
steal_delta = (steal - hd_previous_steal) * 100 / time_delta;
percentage = steal_delta / cpus;
}
hd_previous_steal = steal;
prev = now;
return percentage;
}
static void hd_capacity_work_fn(struct work_struct *work)
{
unsigned long steal_percentage, new_cores;
mutex_lock(&smp_cpu_state_mutex);
/*
* If online cores are less or equal to entitled cores hiperdispatch
* does not need to make any adjustments, call a topology update to
* disable hiperdispatch.
* Normally this check is handled on topology update, but during cpu
* unhotplug, topology and cpu mask updates are done in reverse
* order, causing hd_enable_hiperdispatch() to get stale data.
*/
if (hd_online_cores <= hd_entitled_cores) {
topology_schedule_update();
mutex_unlock(&smp_cpu_state_mutex);
return;
}
steal_percentage = hd_steal_avg(hd_calculate_steal_percentage());
if (steal_percentage < hd_steal_threshold)
new_cores = hd_online_cores;
else
new_cores = hd_entitled_cores;
if (hd_high_capacity_cores != new_cores) {
trace_s390_hd_rebuild_domains(hd_high_capacity_cores, new_cores);
hd_high_capacity_cores = new_cores;
atomic64_inc(&hd_adjustments);
topology_schedule_update();
}
trace_s390_hd_work_fn(steal_percentage, hd_entitled_cores, hd_high_capacity_cores);
mutex_unlock(&smp_cpu_state_mutex);
schedule_delayed_work(&hd_capacity_work, HD_DELAY_INTERVAL);
}
static int hiperdispatch_ctl_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int hiperdispatch;
int rc;
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &hiperdispatch,
.maxlen = sizeof(int),
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
};
hiperdispatch = hd_enabled;
rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write)
return rc;
mutex_lock(&smp_cpu_state_mutex);
if (hd_set_hiperdispatch_mode(hiperdispatch))
topology_schedule_update();
mutex_unlock(&smp_cpu_state_mutex);
return 0;
}
static struct ctl_table hiperdispatch_ctl_table[] = {
{
.procname = "hiperdispatch",
.mode = 0644,
.proc_handler = hiperdispatch_ctl_handler,
},
};
static ssize_t hd_steal_threshold_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%u\n", hd_steal_threshold);
}
static ssize_t hd_steal_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
unsigned int val;
int rc;
rc = kstrtouint(buf, 0, &val);
if (rc)
return rc;
if (val > 100)
return -ERANGE;
hd_steal_threshold = val;
return count;
}
static DEVICE_ATTR_RW(hd_steal_threshold);
static ssize_t hd_delay_factor_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%u\n", hd_delay_factor);
}
static ssize_t hd_delay_factor_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
unsigned int val;
int rc;
rc = kstrtouint(buf, 0, &val);
if (rc)
return rc;
if (!val)
return -ERANGE;
hd_delay_factor = val;
return count;
}
static DEVICE_ATTR_RW(hd_delay_factor);
static struct attribute *hd_attrs[] = {
&dev_attr_hd_steal_threshold.attr,
&dev_attr_hd_delay_factor.attr,
NULL,
};
static const struct attribute_group hd_attr_group = {
.name = "hiperdispatch",
.attrs = hd_attrs,
};
static int hd_greedy_time_get(void *unused, u64 *val)
{
mutex_lock(&hd_counter_mutex);
hd_update_times();
*val = hd_high_time;
mutex_unlock(&hd_counter_mutex);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(hd_greedy_time_fops, hd_greedy_time_get, NULL, "%llu\n");
static int hd_conservative_time_get(void *unused, u64 *val)
{
mutex_lock(&hd_counter_mutex);
hd_update_times();
*val = hd_low_time;
mutex_unlock(&hd_counter_mutex);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(hd_conservative_time_fops, hd_conservative_time_get, NULL, "%llu\n");
static int hd_adjustment_count_get(void *unused, u64 *val)
{
*val = atomic64_read(&hd_adjustments);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(hd_adjustments_fops, hd_adjustment_count_get, NULL, "%llu\n");
static void __init hd_create_debugfs_counters(void)
{
struct dentry *dir;
dir = debugfs_create_dir("hiperdispatch", arch_debugfs_dir);
debugfs_create_file("conservative_time_ms", 0400, dir, NULL, &hd_conservative_time_fops);
debugfs_create_file("greedy_time_ms", 0400, dir, NULL, &hd_greedy_time_fops);
debugfs_create_file("adjustment_count", 0400, dir, NULL, &hd_adjustments_fops);
}
static void __init hd_create_attributes(void)
{
struct device *dev;
dev = bus_get_dev_root(&cpu_subsys);
if (!dev)
return;
if (sysfs_create_group(&dev->kobj, &hd_attr_group))
pr_warn("Unable to create hiperdispatch attribute group\n");
put_device(dev);
}
static int __init hd_init(void)
{
if (IS_ENABLED(CONFIG_HIPERDISPATCH_ON)) {
hd_set_hiperdispatch_mode(1);
topology_schedule_update();
}
if (!register_sysctl("s390", hiperdispatch_ctl_table))
pr_warn("Failed to register s390.hiperdispatch sysctl attribute\n");
hd_create_debugfs_counters();
hd_create_attributes();
return 0;
}
late_initcall(hd_init);

View File

@ -76,6 +76,7 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
{.irq = IRQEXT_WTI, .name = "WTI", .desc = "[EXT] Warning Track"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
{.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},

View File

@ -21,6 +21,7 @@
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <linux/execmem.h>
#include <asm/text-patching.h>
#include <asm/set_memory.h>
#include <asm/sections.h>
#include <asm/dis.h>
@ -152,7 +153,12 @@ void arch_arm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
stop_machine_cpuslocked(swap_instruction, &args, NULL);
if (MACHINE_HAS_SEQ_INSN) {
swap_instruction(&args);
text_poke_sync();
} else {
stop_machine_cpuslocked(swap_instruction, &args, NULL);
}
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
@ -160,7 +166,12 @@ void arch_disarm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
stop_machine_cpuslocked(swap_instruction, &args, NULL);
if (MACHINE_HAS_SEQ_INSN) {
swap_instruction(&args);
text_poke_sync();
} else {
stop_machine_cpuslocked(swap_instruction, &args, NULL);
}
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);

View File

@ -9,6 +9,7 @@
#include <asm/ftrace.h>
#include <asm/nospec-insn.h>
#include <asm/ptrace.h>
#include <asm/march.h>
#define STACK_FRAME_SIZE_PTREGS (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@ -88,7 +89,7 @@ SYM_CODE_START(ftrace_caller)
SYM_CODE_END(ftrace_caller)
SYM_CODE_START(ftrace_common)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#ifdef MARCH_HAS_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
lgrl %r1,ftrace_func
@ -115,7 +116,7 @@ SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
.Lftrace_graph_caller_end:
#endif
lg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#ifdef MARCH_HAS_Z196_FEATURES
ltg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
locgrz %r1,%r0
#else

View File

@ -22,6 +22,10 @@
#include <asm/hwctrset.h>
#include <asm/debug.h>
/* Perf PMU definitions for the counter facility */
#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */
#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
enum cpumf_ctr_set {
CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */

View File

@ -24,6 +24,22 @@
#include <asm/timex.h>
#include <linux/io.h>
/* Perf PMU definitions for the sampling facility */
#define PERF_CPUM_SF_MAX_CTR 2
#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
#define TEAR_REG(hwc) ((hwc)->last_tag)
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
#define SAMPL_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
/* Minimum number of sample-data-block-tables:
* At least one table is required for the sampling buffer structure.
* A single table contains up to 511 pointers to sample-data-blocks.
@ -113,17 +129,6 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
return USEC_PER_SEC * qsi->cpu_speed / rate;
}
/* Return TOD timestamp contained in an trailer entry */
static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
{
/* TOD in STCKE format */
if (te->header.t)
return *((unsigned long long *)&te->timestamp[1]);
/* TOD in STCK format */
return *((unsigned long long *)&te->timestamp[0]);
}
/* Return pointer to trailer entry of an sample data block */
static inline struct hws_trailer_entry *trailer_entry_ptr(unsigned long v)
{
@ -154,12 +159,12 @@ static inline unsigned long *get_next_sdbt(unsigned long *s)
/*
* sf_disable() - Switch off sampling facility
*/
static int sf_disable(void)
static void sf_disable(void)
{
struct hws_lsctl_request_block sreq;
memset(&sreq, 0, sizeof(sreq));
return lsctl(&sreq);
lsctl(&sreq);
}
/*
@ -208,8 +213,6 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
}
}
debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__,
(unsigned long)sfb->sdbt);
memset(sfb, 0, sizeof(*sfb));
}
@ -265,10 +268,8 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* the sampling buffer origin.
*/
if (sfb->sdbt != get_next_sdbt(tail)) {
debug_sprintf_event(sfdbg, 3, "%s: "
"sampling buffer is not linked: origin %#lx"
" tail %#lx\n", __func__,
(unsigned long)sfb->sdbt,
debug_sprintf_event(sfdbg, 3, "%s buffer not linked origin %#lx tail %#lx\n",
__func__, (unsigned long)sfb->sdbt,
(unsigned long)tail);
return -EINVAL;
}
@ -318,9 +319,6 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
*tail = virt_to_phys(sfb->sdbt) + 1;
sfb->tail = tail;
debug_sprintf_event(sfdbg, 4, "%s: new buffer"
" settings: sdbt %lu sdb %lu\n", __func__,
sfb->num_sdbt, sfb->num_sdb);
return rc;
}
@ -357,15 +355,8 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
/* Allocate requested number of sample-data-blocks */
rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
if (rc) {
if (rc)
free_sampling_buffer(sfb);
debug_sprintf_event(sfdbg, 4, "%s: "
"realloc_sampling_buffer failed with rc %i\n",
__func__, rc);
} else
debug_sprintf_event(sfdbg, 4,
"%s: tear %#lx dear %#lx\n", __func__,
(unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt);
return rc;
}
@ -377,8 +368,8 @@ static void sfb_set_limits(unsigned long min, unsigned long max)
CPUM_SF_MAX_SDB = max;
memset(&si, 0, sizeof(si));
if (!qsi(&si))
CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
qsi(&si);
CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
}
static unsigned long sfb_max_limit(struct hw_perf_event *hwc)
@ -397,12 +388,6 @@ static unsigned long sfb_pending_allocs(struct sf_buffer *sfb,
return 0;
}
static int sfb_has_pending_allocs(struct sf_buffer *sfb,
struct hw_perf_event *hwc)
{
return sfb_pending_allocs(sfb, hwc) > 0;
}
static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc)
{
/* Limit the number of SDBs to not exceed the maximum */
@ -426,7 +411,6 @@ static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
{
unsigned long n_sdb, freq;
size_t sample_size;
/* Calculate sampling buffers using 4K pages
*
@ -457,7 +441,6 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
* ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
* to 511 SDBs).
*/
sample_size = sizeof(struct hws_basic_entry);
freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000);
@ -473,12 +456,6 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
if (sf_buffer_available(cpuhw))
return 0;
debug_sprintf_event(sfdbg, 3,
"%s: rate %lu f %lu sdb %lu/%lu"
" sample_size %lu cpuhw %p\n", __func__,
SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
sample_size, cpuhw);
return alloc_sampling_buffer(&cpuhw->sfb,
sfb_pending_allocs(&cpuhw->sfb, hwc));
}
@ -535,8 +512,6 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
if (num)
sfb_account_allocs(num, hwc);
debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n",
__func__, OVERFLOW_REG(hwc), ratio, num);
OVERFLOW_REG(hwc) = 0;
}
@ -554,13 +529,11 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
static void extend_sampling_buffer(struct sf_buffer *sfb,
struct hw_perf_event *hwc)
{
unsigned long num, num_old;
int rc;
unsigned long num;
num = sfb_pending_allocs(sfb, hwc);
if (!num)
return;
num_old = sfb->num_sdb;
/* Disable the sampling facility to reset any states and also
* clear pending measurement alerts.
@ -572,51 +545,33 @@ static void extend_sampling_buffer(struct sf_buffer *sfb,
* called by perf. Because this is a reallocation, it is fine if the
* new SDB-request cannot be satisfied immediately.
*/
rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
if (rc)
debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n",
__func__, rc);
if (sfb_has_pending_allocs(sfb, hwc))
debug_sprintf_event(sfdbg, 5, "%s: "
"req %lu alloc %lu remaining %lu\n",
__func__, num, sfb->num_sdb - num_old,
sfb_pending_allocs(sfb, hwc));
realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
}
/* Number of perf events counting hardware events */
static atomic_t num_events;
static refcount_t num_events;
/* Used to avoid races in calling reserve/release_cpumf_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
#define PMC_INIT 0
#define PMC_RELEASE 1
#define PMC_FAILURE 2
static void setup_pmc_cpu(void *flags)
{
struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
int err = 0;
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
switch (*((int *)flags)) {
case PMC_INIT:
memset(cpusf, 0, sizeof(*cpusf));
err = qsi(&cpusf->qsi);
if (err)
break;
cpusf->flags |= PMU_F_RESERVED;
err = sf_disable();
memset(cpuhw, 0, sizeof(*cpuhw));
qsi(&cpuhw->qsi);
cpuhw->flags |= PMU_F_RESERVED;
sf_disable();
break;
case PMC_RELEASE:
cpusf->flags &= ~PMU_F_RESERVED;
err = sf_disable();
if (!err)
deallocate_buffers(cpusf);
cpuhw->flags &= ~PMU_F_RESERVED;
sf_disable();
deallocate_buffers(cpuhw);
break;
}
if (err) {
*((int *)flags) |= PMC_FAILURE;
pr_err("Switching off the sampling facility failed with rc %i\n", err);
}
}
static void release_pmc_hardware(void)
@ -627,27 +582,19 @@ static void release_pmc_hardware(void)
on_each_cpu(setup_pmc_cpu, &flags, 1);
}
static int reserve_pmc_hardware(void)
static void reserve_pmc_hardware(void)
{
int flags = PMC_INIT;
on_each_cpu(setup_pmc_cpu, &flags, 1);
if (flags & PMC_FAILURE) {
release_pmc_hardware();
return -ENODEV;
}
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
}
static void hw_perf_event_destroy(struct perf_event *event)
{
/* Release PMC if this is the last perf event */
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
if (refcount_dec_and_mutex_lock(&num_events, &pmc_reserve_mutex)) {
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
@ -751,9 +698,6 @@ static unsigned long getrate(bool freq, unsigned long sample,
*/
if (sample_rate_to_freq(si, rate) >
sysctl_perf_event_sample_rate) {
debug_sprintf_event(sfdbg, 1, "%s: "
"Sampling rate exceeds maximum "
"perf sample rate\n", __func__);
rate = 0;
}
}
@ -798,9 +742,6 @@ static int __hw_perf_event_init_rate(struct perf_event *event,
attr->sample_period = rate;
SAMPL_RATE(hwc) = rate;
hw_init_period(hwc, SAMPL_RATE(hwc));
debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n",
__func__, event->cpu, event->attr.sample_period,
event->attr.freq, SAMPLE_FREQ_MODE(hwc));
return 0;
}
@ -810,23 +751,17 @@ static int __hw_perf_event_init(struct perf_event *event)
struct hws_qsi_info_block si;
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
int cpu, err;
int cpu, err = 0;
/* Reserve CPU-measurement sampling facility */
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
mutex_lock(&pmc_reserve_mutex);
if (!refcount_inc_not_zero(&num_events)) {
reserve_pmc_hardware();
refcount_set(&num_events, 1);
}
mutex_unlock(&pmc_reserve_mutex);
event->destroy = hw_perf_event_destroy;
if (err)
goto out;
/* Access per-CPU sampling information (query sampling info) */
/*
* The event->cpu value can be -1 to count on every CPU, for example,
@ -838,9 +773,9 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
memset(&si, 0, sizeof(si));
cpuhw = NULL;
if (event->cpu == -1)
if (event->cpu == -1) {
qsi(&si);
else {
} else {
/* Event is pinned to a particular CPU, retrieve the per-CPU
* sampling structure for accessing the CPU-specific QSI.
*/
@ -881,10 +816,6 @@ static int __hw_perf_event_init(struct perf_event *event)
if (err)
goto out;
/* Initialize sample data overflow accounting */
hwc->extra_reg.reg = REG_OVERFLOW;
OVERFLOW_REG(hwc) = 0;
/* Use AUX buffer. No need to allocate it by ourself */
if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
return 0;
@ -1007,7 +938,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
extend_sampling_buffer(&cpuhw->sfb, hwc);
}
/* Rate may be adjusted with ioctl() */
cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw);
cpuhw->lsctl.interval = SAMPL_RATE(hwc);
}
/* (Re)enable the PMU and sampling facility */
@ -1023,12 +954,6 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
/* Load current program parameter */
lpp(&get_lowcore()->lpp);
debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i "
"interval %#lx tear %#lx dear %#lx\n", __func__,
cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
cpuhw->lsctl.cd, cpuhw->lsctl.interval,
cpuhw->lsctl.tear, cpuhw->lsctl.dear);
}
static void cpumsf_pmu_disable(struct pmu *pmu)
@ -1055,21 +980,18 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
return;
}
/* Save state of TEAR and DEAR register contents */
err = qsi(&si);
if (!err) {
/* TEAR/DEAR values are valid only if the sampling facility is
* enabled. Note that cpumsf_pmu_disable() might be called even
* for a disabled sampling facility because cpumsf_pmu_enable()
* controls the enable/disable state.
*/
if (si.es) {
cpuhw->lsctl.tear = si.tear;
cpuhw->lsctl.dear = si.dear;
}
} else
debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n",
__func__, err);
/*
* Save state of TEAR and DEAR register contents.
* TEAR/DEAR values are valid only if the sampling facility is
* enabled. Note that cpumsf_pmu_disable() might be called even
* for a disabled sampling facility because cpumsf_pmu_enable()
* controls the enable/disable state.
*/
qsi(&si);
if (si.es) {
cpuhw->lsctl.tear = si.tear;
cpuhw->lsctl.dear = si.dear;
}
cpuhw->flags &= ~PMU_F_ENABLED;
}
@ -1235,11 +1157,6 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
/* Count discarded samples */
*overflow += 1;
} else {
debug_sprintf_event(sfdbg, 4,
"%s: Found unknown"
" sampling data entry: te->f %i"
" basic.def %#4x (%p)\n", __func__,
te->header.f, sample->def, sample);
/* Sample slot is not yet written or other record.
*
* This condition can occur if the buffer was reused
@ -1284,7 +1201,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
* AUX buffer is used when in diagnostic sampling mode.
* No perf events/samples are created.
*/
if (SAMPL_DIAG_MODE(&event->hw))
if (SAMPL_DIAG_MODE(hwc))
return;
sdbt = (unsigned long *)TEAR_REG(hwc);
@ -1309,13 +1226,6 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
*/
sampl_overflow += te->header.overflow;
/* Timestamps are valid for full sample-data-blocks only */
debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx/%#lx "
"overflow %llu timestamp %#llx\n",
__func__, sdb, (unsigned long)sdbt,
te->header.overflow,
(te->header.f) ? trailer_timestamp(te) : 0ULL);
/* Collect all samples from a single sample-data-block and
* flag if an (perf) event overflow happened. If so, the PMU
* is stopped and remaining samples will be discarded.
@ -1340,7 +1250,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sdbt = get_next_sdbt(sdbt);
/* Update event hardware registers */
TEAR_REG(hwc) = (unsigned long) sdbt;
TEAR_REG(hwc) = (unsigned long)sdbt;
/* Stop processing sample-data if all samples of the current
* sample-data-block were flushed even if it was not full.
@ -1362,19 +1272,8 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
* are dropped.
* Slightly increase the interval to avoid hitting this limit.
*/
if (event_overflow) {
if (event_overflow)
SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
__func__,
DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
}
if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "%s: "
"overflows: sample %llu event %llu"
" total %llu num_sdb %llu\n",
__func__, sampl_overflow, event_overflow,
OVERFLOW_REG(hwc), num_sdb);
}
static inline unsigned long aux_sdb_index(struct aux_buffer *aux,
@ -1442,9 +1341,6 @@ static void aux_output_end(struct perf_output_handle *handle)
/* Remove alert indicators in the buffer */
te = aux_sdb_trailer(aux, aux->alert_mark);
te->header.a = 0;
debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
__func__, i, range_scan, aux->head);
}
/*
@ -1463,7 +1359,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
unsigned long range, i, range_scan, idx, head, base, offset;
struct hws_trailer_entry *te;
if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
if (handle->head & ~PAGE_MASK)
return -EINVAL;
aux->head = handle->head >> PAGE_SHIFT;
@ -1475,10 +1371,6 @@ static int aux_output_begin(struct perf_output_handle *handle,
* SDBs between aux->head and aux->empty_mark are already ready
* for new data. range_scan is num of SDBs not within them.
*/
debug_sprintf_event(sfdbg, 6,
"%s: range %ld head %ld alert %ld empty %ld\n",
__func__, range, aux->head, aux->alert_mark,
aux->empty_mark);
if (range > aux_sdb_num_empty(aux)) {
range_scan = range - aux_sdb_num_empty(aux);
idx = aux->empty_mark + 1;
@ -1504,12 +1396,6 @@ static int aux_output_begin(struct perf_output_handle *handle,
cpuhw->lsctl.tear = virt_to_phys((void *)base) + offset * sizeof(unsigned long);
cpuhw->lsctl.dear = virt_to_phys((void *)aux->sdb_index[head]);
debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld empty %ld "
"index %ld tear %#lx dear %#lx\n", __func__,
aux->head, aux->alert_mark, aux->empty_mark,
head / CPUM_SF_SDB_PER_TABLE,
cpuhw->lsctl.tear, cpuhw->lsctl.dear);
return 0;
}
@ -1571,14 +1457,11 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
unsigned long long *overflow)
{
unsigned long i, range_scan, idx, idx_old;
union hws_trailer_header old, prev, new;
unsigned long i, range_scan, idx;
unsigned long long orig_overflow;
struct hws_trailer_entry *te;
debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
"empty %ld\n", __func__, range, aux->head,
aux->alert_mark, aux->empty_mark);
if (range <= aux_sdb_num_empty(aux))
/*
* No need to scan. All SDBs in range are marked as empty.
@ -1601,7 +1484,7 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
* indicator fall into this range, set it.
*/
range_scan = range - aux_sdb_num_empty(aux);
idx_old = idx = aux->empty_mark + 1;
idx = aux->empty_mark + 1;
for (i = 0; i < range_scan; i++, idx++) {
te = aux_sdb_trailer(aux, idx);
prev.val = READ_ONCE_ALIGNED_128(te->header.val);
@ -1623,9 +1506,6 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
/* Update empty_mark to new position */
aux->empty_mark = aux->head + range - 1;
debug_sprintf_event(sfdbg, 6, "%s: range_scan %ld idx %ld..%ld "
"empty %ld\n", __func__, range_scan, idx_old,
idx - 1, aux->empty_mark);
return true;
}
@ -1642,12 +1522,12 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
unsigned long num_sdb;
aux = perf_get_aux(handle);
if (WARN_ON_ONCE(!aux))
if (!aux)
return;
/* Inform user space new data arrived */
size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
debug_sprintf_event(sfdbg, 6, "%s: #alert %ld\n", __func__,
debug_sprintf_event(sfdbg, 6, "%s #alert %ld\n", __func__,
size >> PAGE_SHIFT);
perf_aux_output_end(handle, size);
@ -1661,7 +1541,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
num_sdb);
break;
}
if (WARN_ON_ONCE(!aux))
if (!aux)
return;
/* Update head and alert_mark to new position */
@ -1681,23 +1561,11 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
perf_aux_output_end(&cpuhw->handle, size);
pr_err("Sample data caused the AUX buffer with %lu "
"pages to overflow\n", aux->sfb.num_sdb);
debug_sprintf_event(sfdbg, 1, "%s: head %ld range %ld "
"overflow %lld\n", __func__,
aux->head, range, overflow);
} else {
size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
perf_aux_output_end(&cpuhw->handle, size);
debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
"already full, try another\n",
__func__,
aux->head, aux->alert_mark);
}
}
if (done)
debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
"empty %ld\n", __func__, aux->head,
aux->alert_mark, aux->empty_mark);
}
/*
@ -1719,8 +1587,6 @@ static void aux_buffer_free(void *data)
kfree(aux->sdbt_index);
kfree(aux->sdb_index);
kfree(aux);
debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu\n", __func__, num_sdbt);
}
static void aux_sdb_init(unsigned long sdb)
@ -1828,9 +1694,6 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
*/
aux->empty_mark = sfb->num_sdb - 1;
debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu SDBs %lu\n", __func__,
sfb->num_sdbt, sfb->num_sdb);
return aux;
no_sdbt:
@ -1863,8 +1726,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
memset(&si, 0, sizeof(si));
if (event->cpu == -1) {
if (qsi(&si))
return -ENODEV;
qsi(&si);
} else {
/* Event is pinned to a particular CPU, retrieve the per-CPU
* sampling structure for accessing the CPU-specific QSI.
@ -1874,7 +1736,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
si = cpuhw->qsi;
}
do_freq = !!SAMPLE_FREQ_MODE(&event->hw);
do_freq = !!SAMPL_FREQ_MODE(&event->hw);
rate = getrate(do_freq, value, &si);
if (!rate)
return -EINVAL;
@ -1882,10 +1744,6 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
event->attr.sample_period = rate;
SAMPL_RATE(&event->hw) = rate;
hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
debug_sprintf_event(sfdbg, 4, "%s:"
" cpu %d value %#llx period %#llx freq %d\n",
__func__, event->cpu, value,
event->attr.sample_period, do_freq);
return 0;
}
@ -1896,12 +1754,8 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
perf_pmu_disable(event->pmu);
event->hw.state = 0;
cpuhw->lsctl.cs = 1;
@ -1936,7 +1790,7 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
struct aux_buffer *aux;
int err;
int err = 0;
if (cpuhw->flags & PMU_F_IN_USE)
return -EAGAIN;
@ -1944,7 +1798,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt)
return -EINVAL;
err = 0;
perf_pmu_disable(event->pmu);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
@ -2115,7 +1968,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
/* Report measurement alerts only for non-PRA codes */
if (alert != CPU_MF_INT_SF_PRA)
debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__,
debug_sprintf_event(sfdbg, 6, "%s alert %#x\n", __func__,
alert);
/* Sampling authorization change request */
@ -2143,7 +1996,7 @@ static int cpusf_pmu_setup(unsigned int cpu, int flags)
/* Ignore the notification if no events are scheduled on the PMU.
* This might be racy...
*/
if (!atomic_read(&num_events))
if (!refcount_read(&num_events))
return 0;
local_irq_disable();
@ -2205,10 +2058,12 @@ static const struct kernel_param_ops param_ops_sfb_size = {
.get = param_get_sfb_size,
};
#define RS_INIT_FAILURE_QSI 0x0001
#define RS_INIT_FAILURE_BSDES 0x0002
#define RS_INIT_FAILURE_ALRT 0x0003
#define RS_INIT_FAILURE_PERF 0x0004
enum {
RS_INIT_FAILURE_BSDES = 2, /* Bad basic sampling size */
RS_INIT_FAILURE_ALRT = 3, /* IRQ registration failure */
RS_INIT_FAILURE_PERF = 4 /* PMU registration failure */
};
static void __init pr_cpumsf_err(unsigned int reason)
{
pr_err("Sampling facility support for perf is not available: "
@ -2224,11 +2079,7 @@ static int __init init_cpum_sampling_pmu(void)
return -ENODEV;
memset(&si, 0, sizeof(si));
if (qsi(&si)) {
pr_cpumsf_err(RS_INIT_FAILURE_QSI);
return -ENODEV;
}
qsi(&si);
if (!si.as && !si.ad)
return -ENODEV;

View File

@ -738,6 +738,22 @@ static const char * const paicrypt_ctrnames[] = {
[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
[155] = "IBM_RESERVED_155",
[156] = "IBM_RESERVED_156",
[157] = "KM_FULL_XTS_AES_128",
[158] = "KM_FULL_XTS_AES_256",
[159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
[160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
[161] = "KMAC_HMAC_SHA_224",
[162] = "KMAC_HMAC_SHA_256",
[163] = "KMAC_HMAC_SHA_384",
[164] = "KMAC_HMAC_SHA_512",
[165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
[166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
[167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
[168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
[169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
[170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
[171] = "PCKMO_ENCRYPT_AES_XTS_128",
[172] = "PCKMO_ENCRYPT_AES_XTS_256",
};
static void __init attr_event_free(struct attribute **attrs, int num)

View File

@ -635,6 +635,15 @@ static const char * const paiext_ctrnames[] = {
[25] = "NNPA_1MFRAME",
[26] = "NNPA_2GFRAME",
[27] = "NNPA_ACCESSEXCEPT",
[28] = "NNPA_TRANSFORM",
[29] = "NNPA_GELU",
[30] = "NNPA_MOMENTS",
[31] = "NNPA_LAYERNORM",
[32] = "NNPA_MATMUL_OP_BCAST1",
[33] = "NNPA_SQRT",
[34] = "NNPA_INVSQRT",
[35] = "NNPA_NORM",
[36] = "NNPA_REDUCE",
};
static void __init attr_event_free(struct attribute **attrs, int num)

View File

@ -671,6 +671,25 @@ int smp_cpu_get_polarization(int cpu)
return per_cpu(pcpu_devices, cpu).polarization;
}
void smp_cpu_set_capacity(int cpu, unsigned long val)
{
per_cpu(pcpu_devices, cpu).capacity = val;
}
unsigned long smp_cpu_get_capacity(int cpu)
{
return per_cpu(pcpu_devices, cpu).capacity;
}
void smp_set_core_capacity(int cpu, unsigned long val)
{
int i;
cpu = smp_get_base_cpu(cpu);
for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++)
smp_cpu_set_capacity(i, val);
}
int smp_cpu_get_cpu_address(int cpu)
{
return per_cpu(pcpu_devices, cpu).address;
@ -719,6 +738,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
else
pcpu->state = CPU_STATE_STANDBY;
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
set_cpu_present(cpu, true);
if (!early && arch_register_cpu(cpu))
set_cpu_present(cpu, false);
@ -961,6 +981,7 @@ void __init smp_prepare_boot_cpu(void)
ipl_pcpu->state = CPU_STATE_CONFIGURED;
lc->pcpu = (unsigned long)ipl_pcpu;
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
smp_cpu_set_capacity(0, CPU_CAPACITY_HIGH);
}
void __init smp_setup_processor_id(void)

View File

@ -162,22 +162,3 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
{
arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
}
unsigned long return_address(unsigned int n)
{
struct unwind_state state;
unsigned long addr;
/* Increment to skip current stack entry */
n++;
unwind_for_each_frame(&state, NULL, NULL, 0) {
addr = unwind_get_return_address(&state);
if (!addr)
break;
if (!n--)
return addr;
}
return 0;
}
EXPORT_SYMBOL_GPL(return_address);

View File

@ -24,6 +24,7 @@
#include <linux/mm.h>
#include <linux/nodemask.h>
#include <linux/node.h>
#include <asm/hiperdispatch.h>
#include <asm/sysinfo.h>
#define PTF_HORIZONTAL (0UL)
@ -47,6 +48,7 @@ static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
static int cpu_management;
static DECLARE_WORK(topology_work, topology_work_fn);
@ -144,6 +146,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
cpumask_set_cpu(cpu, &book->mask);
cpumask_set_cpu(cpu, &socket->mask);
smp_cpu_set_polarization(cpu, tl_core->pp);
smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
}
}
}
@ -270,6 +273,7 @@ void update_cpu_masks(void)
topo->drawer_id = id;
}
}
hd_reset_state();
for_each_online_cpu(cpu) {
topo = &cpu_topology[cpu];
pkg_first = cpumask_first(&topo->core_mask);
@ -278,8 +282,10 @@ void update_cpu_masks(void)
for_each_cpu(sibling, &topo->core_mask) {
topo_sibling = &cpu_topology[sibling];
smt_first = cpumask_first(&topo_sibling->thread_mask);
if (sibling == smt_first)
if (sibling == smt_first) {
topo_package->booted_cores++;
hd_add_core(sibling);
}
}
} else {
topo->booted_cores = topo_package->booted_cores;
@ -303,8 +309,10 @@ static void __arch_update_dedicated_flag(void *arg)
static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
int rc = 0;
int rc, hd_status;
hd_status = 0;
rc = 0;
mutex_lock(&smp_cpu_state_mutex);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
@ -314,7 +322,11 @@ static int __arch_update_cpu_topology(void)
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple();
if (cpu_management == 1)
hd_status = hd_enable_hiperdispatch();
mutex_unlock(&smp_cpu_state_mutex);
if (hd_status == 0)
hd_disable_hiperdispatch();
return rc;
}
@ -374,7 +386,24 @@ void topology_expect_change(void)
set_topology_timer();
}
static int cpu_management;
static int set_polarization(int polarization)
{
int rc = 0;
cpus_read_lock();
mutex_lock(&smp_cpu_state_mutex);
if (cpu_management == polarization)
goto out;
rc = topology_set_cpu_management(polarization);
if (rc)
goto out;
cpu_management = polarization;
topology_expect_change();
out:
mutex_unlock(&smp_cpu_state_mutex);
cpus_read_unlock();
return rc;
}
static ssize_t dispatching_show(struct device *dev,
struct device_attribute *attr,
@ -400,19 +429,7 @@ static ssize_t dispatching_store(struct device *dev,
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
rc = 0;
cpus_read_lock();
mutex_lock(&smp_cpu_state_mutex);
if (cpu_management == val)
goto out;
rc = topology_set_cpu_management(val);
if (rc)
goto out;
cpu_management = val;
topology_expect_change();
out:
mutex_unlock(&smp_cpu_state_mutex);
cpus_read_unlock();
rc = set_polarization(val);
return rc ? rc : count;
}
static DEVICE_ATTR_RW(dispatching);
@ -624,12 +641,37 @@ static int topology_ctl_handler(const struct ctl_table *ctl, int write,
return rc;
}
static int polarization_ctl_handler(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int polarization;
int rc;
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &polarization,
.maxlen = sizeof(int),
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
};
polarization = cpu_management;
rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write)
return rc;
return set_polarization(polarization);
}
static struct ctl_table topology_ctl_table[] = {
{
.procname = "topology",
.mode = 0644,
.proc_handler = topology_ctl_handler,
},
{
.procname = "polarization",
.mode = 0644,
.proc_handler = polarization_ctl_handler,
},
};
static int __init topology_init(void)
@ -642,6 +684,8 @@ static int __init topology_init(void)
set_topology_timer();
else
topology_update_polarization_simple();
if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY_VERTICAL))
set_polarization(1);
register_sysctl("s390", topology_ctl_table);
dev_root = bus_get_dev_root(&cpu_subsys);

215
arch/s390/kernel/wti.c Normal file
View File

@ -0,0 +1,215 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Support for warning track interruption
*
* Copyright IBM Corp. 2023
*/
#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/kallsyms.h>
#include <linux/smpboot.h>
#include <linux/irq.h>
#include <uapi/linux/sched/types.h>
#include <asm/debug.h>
#include <asm/diag.h>
#include <asm/sclp.h>
#define WTI_DBF_LEN 64
struct wti_debug {
unsigned long missed;
unsigned long addr;
pid_t pid;
};
struct wti_state {
/* debug data for s390dbf */
struct wti_debug dbg;
/*
* Represents the real-time thread responsible to
* acknowledge the warning-track interrupt and trigger
* preliminary and postliminary precautions.
*/
struct task_struct *thread;
/*
* If pending is true, the real-time thread must be scheduled.
* If not, a wake up of that thread will remain a noop.
*/
bool pending;
};
static DEFINE_PER_CPU(struct wti_state, wti_state);
static debug_info_t *wti_dbg;
/*
* During a warning-track grace period, interrupts are disabled
* to prevent delays of the warning-track acknowledgment.
*
* Once the CPU is physically dispatched again, interrupts are
* re-enabled.
*/
static void wti_irq_disable(void)
{
unsigned long flags;
struct ctlreg cr6;
local_irq_save(flags);
local_ctl_store(6, &cr6);
/* disable all I/O interrupts */
cr6.val &= ~0xff000000UL;
local_ctl_load(6, &cr6);
local_irq_restore(flags);
}
static void wti_irq_enable(void)
{
unsigned long flags;
struct ctlreg cr6;
local_irq_save(flags);
local_ctl_store(6, &cr6);
/* enable all I/O interrupts */
cr6.val |= 0xff000000UL;
local_ctl_load(6, &cr6);
local_irq_restore(flags);
}
static void store_debug_data(struct wti_state *st)
{
struct pt_regs *regs = get_irq_regs();
st->dbg.pid = current->pid;
st->dbg.addr = 0;
if (!user_mode(regs))
st->dbg.addr = regs->psw.addr;
}
static void wti_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct wti_state *st = this_cpu_ptr(&wti_state);
inc_irq_stat(IRQEXT_WTI);
wti_irq_disable();
store_debug_data(st);
st->pending = true;
wake_up_process(st->thread);
}
static int wti_pending(unsigned int cpu)
{
struct wti_state *st = per_cpu_ptr(&wti_state, cpu);
return st->pending;
}
static void wti_dbf_grace_period(struct wti_state *st)
{
struct wti_debug *wdi = &st->dbg;
char buf[WTI_DBF_LEN];
if (wdi->addr)
snprintf(buf, sizeof(buf), "%d %pS", wdi->pid, (void *)wdi->addr);
else
snprintf(buf, sizeof(buf), "%d <user>", wdi->pid);
debug_text_event(wti_dbg, 2, buf);
wdi->missed++;
}
static int wti_show(struct seq_file *seq, void *v)
{
struct wti_state *st;
int cpu;
cpus_read_lock();
seq_puts(seq, " ");
for_each_online_cpu(cpu)
seq_printf(seq, "CPU%-8d", cpu);
seq_putc(seq, '\n');
for_each_online_cpu(cpu) {
st = per_cpu_ptr(&wti_state, cpu);
seq_printf(seq, " %10lu", st->dbg.missed);
}
seq_putc(seq, '\n');
cpus_read_unlock();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wti);
static void wti_thread_fn(unsigned int cpu)
{
struct wti_state *st = per_cpu_ptr(&wti_state, cpu);
st->pending = false;
/*
* Yield CPU voluntarily to the hypervisor. Control
* resumes when hypervisor decides to dispatch CPU
* to this LPAR again.
*/
if (diag49c(DIAG49C_SUBC_ACK))
wti_dbf_grace_period(st);
wti_irq_enable();
}
static struct smp_hotplug_thread wti_threads = {
.store = &wti_state.thread,
.thread_should_run = wti_pending,
.thread_fn = wti_thread_fn,
.thread_comm = "cpuwti/%u",
.selfparking = false,
};
static int __init wti_init(void)
{
struct sched_param wti_sched_param = { .sched_priority = MAX_RT_PRIO - 1 };
struct dentry *wti_dir;
struct wti_state *st;
int cpu, rc;
rc = -EOPNOTSUPP;
if (!sclp.has_wti)
goto out;
rc = smpboot_register_percpu_thread(&wti_threads);
if (WARN_ON(rc))
goto out;
for_each_online_cpu(cpu) {
st = per_cpu_ptr(&wti_state, cpu);
sched_setscheduler(st->thread, SCHED_FIFO, &wti_sched_param);
}
rc = register_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt);
if (rc) {
pr_warn("Couldn't request external interrupt 0x1007\n");
goto out_thread;
}
irq_subclass_register(IRQ_SUBCLASS_WARNING_TRACK);
rc = diag49c(DIAG49C_SUBC_REG);
if (rc) {
pr_warn("Failed to register warning track interrupt through DIAG 49C\n");
rc = -EOPNOTSUPP;
goto out_subclass;
}
wti_dir = debugfs_create_dir("wti", arch_debugfs_dir);
debugfs_create_file("stat", 0400, wti_dir, NULL, &wti_fops);
wti_dbg = debug_register("wti", 1, 1, WTI_DBF_LEN);
if (!wti_dbg) {
rc = -ENOMEM;
goto out_debug_register;
}
rc = debug_register_view(wti_dbg, &debug_hex_ascii_view);
if (rc)
goto out_debug_register;
goto out;
out_debug_register:
debug_unregister(wti_dbg);
out_subclass:
irq_subclass_unregister(IRQ_SUBCLASS_WARNING_TRACK);
unregister_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt);
out_thread:
smpboot_unregister_percpu_thread(&wti_threads);
out:
return rc;
}
late_initcall(wti_init);

View File

@ -95,11 +95,12 @@ static long cmm_alloc_pages(long nr, long *counter,
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
cond_resched();
}
return nr;
}
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
@ -123,6 +124,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
return nr;
}
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
long inc = 0;
while (nr) {
inc = min(256L, nr);
nr -= inc;
inc = __cmm_free_pages(inc, counter, list);
if (inc)
break;
cond_resched();
}
return nr + inc;
}
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{

View File

@ -18,89 +18,12 @@ static unsigned long max_addr;
struct addr_marker {
int is_start;
unsigned long start_address;
unsigned long size;
const char *name;
};
enum address_markers_idx {
KVA_NR = 0,
LOWCORE_START_NR,
LOWCORE_END_NR,
AMODE31_START_NR,
AMODE31_END_NR,
KERNEL_START_NR,
KERNEL_END_NR,
#ifdef CONFIG_KFENCE
KFENCE_START_NR,
KFENCE_END_NR,
#endif
IDENTITY_START_NR,
IDENTITY_END_NR,
VMEMMAP_NR,
VMEMMAP_END_NR,
VMALLOC_NR,
VMALLOC_END_NR,
#ifdef CONFIG_KMSAN
KMSAN_VMALLOC_SHADOW_START_NR,
KMSAN_VMALLOC_SHADOW_END_NR,
KMSAN_VMALLOC_ORIGIN_START_NR,
KMSAN_VMALLOC_ORIGIN_END_NR,
KMSAN_MODULES_SHADOW_START_NR,
KMSAN_MODULES_SHADOW_END_NR,
KMSAN_MODULES_ORIGIN_START_NR,
KMSAN_MODULES_ORIGIN_END_NR,
#endif
MODULES_NR,
MODULES_END_NR,
ABS_LOWCORE_NR,
ABS_LOWCORE_END_NR,
MEMCPY_REAL_NR,
MEMCPY_REAL_END_NR,
#ifdef CONFIG_KASAN
KASAN_SHADOW_START_NR,
KASAN_SHADOW_END_NR,
#endif
};
static struct addr_marker address_markers[] = {
[KVA_NR] = {0, 0, "Kernel Virtual Address Space"},
[LOWCORE_START_NR] = {1, 0, "Lowcore Start"},
[LOWCORE_END_NR] = {0, 0, "Lowcore End"},
[IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"},
[IDENTITY_END_NR] = {0, 0, "Identity Mapping End"},
[AMODE31_START_NR] = {1, 0, "Amode31 Area Start"},
[AMODE31_END_NR] = {0, 0, "Amode31 Area End"},
[KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"},
#ifdef CONFIG_KFENCE
[KFENCE_START_NR] = {1, 0, "KFence Pool Start"},
[KFENCE_END_NR] = {0, 0, "KFence Pool End"},
#endif
[VMEMMAP_NR] = {1, 0, "vmemmap Area Start"},
[VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"},
[VMALLOC_NR] = {1, 0, "vmalloc Area Start"},
[VMALLOC_END_NR] = {0, 0, "vmalloc Area End"},
#ifdef CONFIG_KMSAN
[KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"},
[KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"},
[KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"},
[KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"},
[KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"},
[KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"},
[KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"},
[KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"},
#endif
[MODULES_NR] = {1, 0, "Modules Area Start"},
[MODULES_END_NR] = {0, 0, "Modules Area End"},
[ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"},
[ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"},
[MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"},
[MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"},
#ifdef CONFIG_KASAN
[KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"},
[KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"},
#endif
{1, -1UL, NULL}
};
static struct addr_marker *markers;
static unsigned int markers_cnt;
struct pg_state {
struct ptdump_state ptdump;
@ -173,7 +96,8 @@ static void note_page_update_state(struct pg_state *st, unsigned long addr, unsi
while (addr >= st->marker[1].start_address) {
st->marker++;
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
pt_dump_seq_printf(m, "---[ %s %s ]---\n", st->marker->name,
st->marker->is_start ? "Start" : "End");
}
st->start_address = addr;
st->current_prot = prot;
@ -202,7 +126,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
if (level == -1)
addr = max_addr;
if (st->level == -1) {
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
pt_dump_seq_puts(m, "---[ Kernel Virtual Address Space ]---\n");
note_page_update_state(st, addr, prot, level);
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
@ -276,7 +200,7 @@ static int ptdump_show(struct seq_file *m, void *v)
.check_wx = false,
.wx_pages = 0,
.start_address = 0,
.marker = address_markers,
.marker = markers,
};
get_online_mems();
@ -299,10 +223,23 @@ static int ptdump_cmp(const void *a, const void *b)
if (ama->start_address < amb->start_address)
return -1;
/*
* If the start addresses of two markers are identical consider the
* marker which defines the start of an area higher than the one which
* defines the end of an area. This keeps pairs of markers sorted.
* If the start addresses of two markers are identical sort markers in an
* order that considers areas contained within other areas correctly.
*/
if (ama->is_start && amb->is_start) {
if (ama->size > amb->size)
return -1;
if (ama->size < amb->size)
return 1;
return 0;
}
if (!ama->is_start && !amb->is_start) {
if (ama->size > amb->size)
return 1;
if (ama->size < amb->size)
return -1;
return 0;
}
if (ama->is_start)
return 1;
if (amb->is_start)
@ -310,12 +247,41 @@ static int ptdump_cmp(const void *a, const void *b)
return 0;
}
static int add_marker(unsigned long start, unsigned long end, const char *name)
{
size_t oldsize, newsize;
oldsize = markers_cnt * sizeof(*markers);
newsize = oldsize + 2 * sizeof(*markers);
if (!oldsize)
markers = kvmalloc(newsize, GFP_KERNEL);
else
markers = kvrealloc(markers, newsize, GFP_KERNEL);
if (!markers)
goto error;
markers[markers_cnt].is_start = 1;
markers[markers_cnt].start_address = start;
markers[markers_cnt].size = end - start;
markers[markers_cnt].name = name;
markers_cnt++;
markers[markers_cnt].is_start = 0;
markers[markers_cnt].start_address = end;
markers[markers_cnt].size = end - start;
markers[markers_cnt].name = name;
markers_cnt++;
return 0;
error:
markers_cnt = 0;
return -ENOMEM;
}
static int pt_dump_init(void)
{
#ifdef CONFIG_KFENCE
unsigned long kfence_start = (unsigned long)__kfence_pool;
#endif
unsigned long lowcore = (unsigned long)get_lowcore();
int rc;
/*
* Figure out the maximum virtual address being accessible with the
@ -324,41 +290,38 @@ static int pt_dump_init(void)
*/
max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[LOWCORE_START_NR].start_address = lowcore;
address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore);
address_markers[IDENTITY_START_NR].start_address = __identity_base;
address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
address_markers[MODULES_END_NR].start_address = MODULES_END;
address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
address_markers[ABS_LOWCORE_END_NR].start_address = __abs_lowcore + ABS_LOWCORE_MAP_SIZE;
address_markers[MEMCPY_REAL_NR].start_address = __memcpy_real_area;
address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + MEMCPY_REAL_SIZE;
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
/* start + end markers - must be added first */
rc = add_marker(0, -1UL, NULL);
rc |= add_marker((unsigned long)_stext, (unsigned long)_end, "Kernel Image");
rc |= add_marker(lowcore, lowcore + sizeof(struct lowcore), "Lowcore");
rc |= add_marker(__identity_base, __identity_base + ident_map_size, "Identity Mapping");
rc |= add_marker((unsigned long)__samode31, (unsigned long)__eamode31, "Amode31 Area");
rc |= add_marker(MODULES_VADDR, MODULES_END, "Modules Area");
rc |= add_marker(__abs_lowcore, __abs_lowcore + ABS_LOWCORE_MAP_SIZE, "Lowcore Area");
rc |= add_marker(__memcpy_real_area, __memcpy_real_area + MEMCPY_REAL_SIZE, "Real Memory Copy Area");
rc |= add_marker((unsigned long)vmemmap, (unsigned long)vmemmap + vmemmap_size, "vmemmap Area");
rc |= add_marker(VMALLOC_START, VMALLOC_END, "vmalloc Area");
#ifdef CONFIG_KFENCE
address_markers[KFENCE_START_NR].start_address = kfence_start;
address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
rc |= add_marker(kfence_start, kfence_start + KFENCE_POOL_SIZE, "KFence Pool");
#endif
#ifdef CONFIG_KMSAN
address_markers[KMSAN_VMALLOC_SHADOW_START_NR].start_address = KMSAN_VMALLOC_SHADOW_START;
address_markers[KMSAN_VMALLOC_SHADOW_END_NR].start_address = KMSAN_VMALLOC_SHADOW_END;
address_markers[KMSAN_VMALLOC_ORIGIN_START_NR].start_address = KMSAN_VMALLOC_ORIGIN_START;
address_markers[KMSAN_VMALLOC_ORIGIN_END_NR].start_address = KMSAN_VMALLOC_ORIGIN_END;
address_markers[KMSAN_MODULES_SHADOW_START_NR].start_address = KMSAN_MODULES_SHADOW_START;
address_markers[KMSAN_MODULES_SHADOW_END_NR].start_address = KMSAN_MODULES_SHADOW_END;
address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
rc |= add_marker(KMSAN_VMALLOC_SHADOW_START, KMSAN_VMALLOC_SHADOW_END, "Kmsan vmalloc Shadow");
rc |= add_marker(KMSAN_VMALLOC_ORIGIN_START, KMSAN_VMALLOC_ORIGIN_END, "Kmsan vmalloc Origins");
rc |= add_marker(KMSAN_MODULES_SHADOW_START, KMSAN_MODULES_SHADOW_END, "Kmsan Modules Shadow");
rc |= add_marker(KMSAN_MODULES_ORIGIN_START, KMSAN_MODULES_ORIGIN_END, "Kmsan Modules Origins");
#endif
sort(address_markers, ARRAY_SIZE(address_markers) - 1,
sizeof(address_markers[0]), ptdump_cmp, NULL);
#ifdef CONFIG_KASAN
rc |= add_marker(KASAN_SHADOW_START, KASAN_SHADOW_END, "Kasan Shadow");
#endif
if (rc)
goto error;
sort(&markers[1], markers_cnt - 1, sizeof(*markers), ptdump_cmp, NULL);
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
#endif /* CONFIG_PTDUMP_DEBUGFS */
return 0;
error:
kvfree(markers);
return -ENOMEM;
}
device_initcall(pt_dump_init);

View File

@ -527,9 +527,9 @@ b938 sortl RRE_RR
b939 dfltcc RRF_R0RR2
b93a kdsa RRE_RR
b93b nnpa RRE_00
b93c ppno RRE_RR
b93e kimd RRE_RR
b93f klmd RRE_RR
b93c prno RRE_RR
b93e kimd RRF_U0RR
b93f klmd RRF_U0RR
b941 cfdtr RRF_UURF
b942 clgdtr RRF_UURF
b943 clfdtr RRF_UURF
@ -549,6 +549,10 @@ b964 nngrk RRF_R0RR2
b965 ocgrk RRF_R0RR2
b966 nogrk RRF_R0RR2
b967 nxgrk RRF_R0RR2
b968 clzg RRE_RR
b969 ctzg RRE_RR
b96c bextg RRF_R0RR2
b96d bdepg RRF_R0RR2
b972 crt RRF_U0RR
b973 clrt RRF_U0RR
b974 nnrk RRF_R0RR2
@ -796,6 +800,16 @@ e35b sy RXY_RRRD
e35c mfy RXY_RRRD
e35e aly RXY_RRRD
e35f sly RXY_RRRD
e360 lxab RXY_RRRD
e361 llxab RXY_RRRD
e362 lxah RXY_RRRD
e363 llxah RXY_RRRD
e364 lxaf RXY_RRRD
e365 llxaf RXY_RRRD
e366 lxag RXY_RRRD
e367 llxag RXY_RRRD
e368 lxaq RXY_RRRD
e369 llxaq RXY_RRRD
e370 sthy RXY_RRRD
e371 lay RXY_RRRD
e372 stcy RXY_RRRD
@ -880,6 +894,8 @@ e63c vupkz VSI_URDV
e63d vstrl VSI_URDV
e63f vstrlr VRS_RRDV
e649 vlip VRI_V0UU2
e64a vcvdq VRI_VV0UU
e64e vcvbq VRR_VV0U2
e650 vcvb VRR_RV0UU
e651 vclzdp VRR_VV0U2
e652 vcvbg VRR_RV0UU
@ -893,7 +909,7 @@ e65b vpsop VRI_VVUUU2
e65c vupkzl VRR_VV0U2
e65d vcfn VRR_VV0UU2
e65e vclfnl VRR_VV0UU2
e65f vtp VRR_0V
e65f vtp VRR_0V0U
e670 vpkzr VRI_VVV0UU2
e671 vap VRI_VVV0UU2
e672 vsrpr VRI_VVV0UU2
@ -908,6 +924,7 @@ e67b vrp VRI_VVV0UU2
e67c vscshp VRR_VVV
e67d vcsph VRR_VVV0U0
e67e vsdp VRI_VVV0UU2
e67f vtz VRR_0VVU
e700 vleb VRX_VRRDU
e701 vleh VRX_VRRDU
e702 vleg VRX_VRRDU
@ -948,6 +965,7 @@ e74d vrep VRI_VVUU
e750 vpopct VRR_VV0U
e752 vctz VRR_VV0U
e753 vclz VRR_VV0U
e754 vgem VRR_VV0U
e756 vlr VRX_VV
e75c vistr VRR_VV0U0U
e75f vseg VRR_VV0U
@ -985,6 +1003,8 @@ e784 vpdi VRR_VVV0U
e785 vbperm VRR_VVV
e786 vsld VRI_VVV0U
e787 vsrd VRI_VVV0U
e788 veval VRI_VVV0UV
e789 vblend VRR_VVVU0V
e78a vstrc VRR_VVVUU0V
e78b vstrs VRR_VVVUU0V
e78c vperm VRR_VVV0V
@ -1010,6 +1030,10 @@ e7ac vmale VRR_VVVU0V
e7ad vmalo VRR_VVVU0V
e7ae vmae VRR_VVVU0V
e7af vmao VRR_VVVU0V
e7b0 vdl VRR_VVV0UU
e7b1 vrl VRR_VVV0UU
e7b2 vd VRR_VVV0UU
e7b3 vr VRR_VVV0UU
e7b4 vgfm VRR_VVV0U
e7b8 vmsl VRR_VVVUU0V
e7b9 vaccc VRR_VVVU0V
@ -1017,12 +1041,12 @@ e7bb vac VRR_VVVU0V
e7bc vgfma VRR_VVVU0V
e7bd vsbcbi VRR_VVVU0V
e7bf vsbi VRR_VVVU0V
e7c0 vclgd VRR_VV0UUU
e7c1 vcdlg VRR_VV0UUU
e7c2 vcgd VRR_VV0UUU
e7c3 vcdg VRR_VV0UUU
e7c4 vlde VRR_VV0UU2
e7c5 vled VRR_VV0UUU
e7c0 vclfp VRR_VV0UUU
e7c1 vcfpl VRR_VV0UUU
e7c2 vcsfp VRR_VV0UUU
e7c3 vcfps VRR_VV0UUU
e7c4 vfll VRR_VV0UU2
e7c5 vflr VRR_VV0UUU
e7c7 vfi VRR_VV0UUU
e7ca wfk VRR_VV0UU2
e7cb wfc VRR_VV0UU2
@ -1094,9 +1118,9 @@ eb54 niy SIY_URD
eb55 cliy SIY_URD
eb56 oiy SIY_URD
eb57 xiy SIY_URD
eb60 lric RSY_RDRU
eb61 stric RSY_RDRU
eb62 mric RSY_RDRU
eb60 lric RSY_RURD2
eb61 stric RSY_RURD2
eb62 mric RSY_RURD2
eb6a asi SIY_IRD
eb6e alsi SIY_IRD
eb71 lpswey SIY_RD
@ -1104,7 +1128,7 @@ eb7a agsi SIY_IRD
eb7e algsi SIY_IRD
eb80 icmh RSY_RURD
eb81 icmy RSY_RURD
eb8a sqbs RSY_RDRU
eb8a sqbs RSY_RURD2
eb8e mvclu RSY_RRRD
eb8f clclu RSY_RRRD
eb90 stmy RSY_RRRD

View File

@ -21,7 +21,7 @@ config CRYPTO_DEV_PADLOCK
(so called VIA PadLock ACE, Advanced Cryptography Engine)
that provides instructions for very fast cryptographic
operations with supported algorithms.
The instructions are used only when the CPU supports them.
Otherwise software encryption is used.
@ -78,18 +78,79 @@ config ZCRYPT
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
depends on ZCRYPT
help
With this option enabled the pkey kernel module provides an API
With this option enabled the pkey kernel modules provide an API
for creation and handling of protected keys. Other parts of the
kernel or userspace applications may use these functions.
Select this option if you want to enable the kernel and userspace
API for proteced key handling.
The protected key support is distributed into:
- A pkey base and API kernel module (pkey.ko) which offers the
infrastructure for the pkey handler kernel modules, the ioctl
and the sysfs API and the in-kernel API to the crypto cipher
implementations using protected key.
- A pkey pckmo kernel module (pkey-pckmo.ko) which is automatically
loaded when pckmo support (that is generation of protected keys
from clear key values) is available.
- A pkey CCA kernel module (pkey-cca.ko) which is automatically
loaded when a CEX crypto card is available.
- A pkey EP11 kernel module (pkey-ep11.ko) which is automatically
loaded when a CEX crypto card is available.
Please note that creation of protected keys from secure keys
requires to have at least one CEX card in coprocessor mode
available at runtime.
Select this option if you want to enable the kernel and userspace
API for protected key handling.
config PKEY_CCA
tristate "PKEY CCA support handler"
depends on PKEY
depends on ZCRYPT
help
This is the CCA support handler for deriving protected keys
from CCA (secure) keys. Also this handler provides an alternate
way to make protected keys from clear key values.
The PKEY CCA support handler needs a Crypto Express card (CEX)
in CCA mode.
If you have selected the PKEY option then you should also enable
this option unless you are sure you never need to derive protected
keys from CCA key material.
config PKEY_EP11
tristate "PKEY EP11 support handler"
depends on PKEY
depends on ZCRYPT
help
This is the EP11 support handler for deriving protected keys
from EP11 (secure) keys. Also this handler provides an alternate
way to make protected keys from clear key values.
The PKEY EP11 support handler needs a Crypto Express card (CEX)
in EP11 mode.
If you have selected the PKEY option then you should also enable
this option unless you are sure you never need to derive protected
keys from EP11 key material.
config PKEY_PCKMO
tristate "PKEY PCKMO support handler"
depends on PKEY
help
This is the PCKMO support handler for deriving protected keys
from clear key values via invoking the PCKMO instruction.
The PCKMO instruction can be enabled and disabled in the crypto
settings at the LPAR profile. This handler checks for availability
during initialization and if build as a kernel module unloads
itself if PCKMO is disabled.
The PCKMO way of deriving protected keys from clear key material
is especially used during self test of protected key ciphers like
PAES but the CCA and EP11 handler provide alternate ways to
generate protected keys from clear key values.
If you have selected the PKEY option then you should also enable
this option unless you are sure you never need to derive protected
keys from clear key values directly via PCKMO.
config CRYPTO_PAES_S390
tristate "PAES cipher algorithms"

View File

@ -44,6 +44,7 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_ibs = !!(sccb->fac117 & 0x20);
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_wti = !!(sccb->fac119 & 0x40);
sclp.has_kss = !!(sccb->fac98 & 0x01);
sclp.has_aisii = !!(sccb->fac118 & 0x40);
sclp.has_aeni = !!(sccb->fac118 & 0x20);

View File

@ -13,10 +13,22 @@ obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o
# pkey kernel module
pkey-objs := pkey_api.o
# pkey base and api module
pkey-objs := pkey_base.o pkey_api.o pkey_sysfs.o
obj-$(CONFIG_PKEY) += pkey.o
# pkey cca handler module
pkey-cca-objs := pkey_cca.o
obj-$(CONFIG_PKEY_CCA) += pkey-cca.o
# pkey ep11 handler module
pkey-ep11-objs := pkey_ep11.o
obj-$(CONFIG_PKEY_EP11) += pkey-ep11.o
# pkey pckmo handler module
pkey-pckmo-objs := pkey_pckmo.o
obj-$(CONFIG_PKEY_PCKMO) += pkey-pckmo.o
# adjunct processor matrix
vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
obj-$(CONFIG_VFIO_AP) += vfio_ap.o

View File

@ -107,6 +107,7 @@ debug_info_t *ap_dbf_info;
static bool ap_scan_bus(void);
static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */
static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
static int ap_scan_bus_time = AP_CONFIG_TIME;
static struct timer_list ap_scan_bus_timer;
@ -733,7 +734,7 @@ static void ap_check_bindings_complete(void)
if (!completion_done(&ap_apqn_bindings_complete)) {
complete_all(&ap_apqn_bindings_complete);
ap_send_bindings_complete_uevent();
pr_debug("%s all apqn bindings complete\n", __func__);
pr_debug("all apqn bindings complete\n");
}
}
}
@ -768,7 +769,7 @@ int ap_wait_apqn_bindings_complete(unsigned long timeout)
else if (l == 0 && timeout)
rc = -ETIME;
pr_debug("%s rc=%d\n", __func__, rc);
pr_debug("rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
@ -795,8 +796,7 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
drvres = to_ap_drv(dev->driver)->flags
& AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres) {
pr_debug("%s reprobing queue=%02x.%04x\n",
__func__, card, queue);
pr_debug("reprobing queue=%02x.%04x\n", card, queue);
rc = device_reprobe(dev);
if (rc)
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
@ -1000,17 +1000,31 @@ bool ap_bus_force_rescan(void)
unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
bool rc = false;
pr_debug(">%s scan counter=%lu\n", __func__, scan_counter);
pr_debug("> scan counter=%lu\n", scan_counter);
/* Only trigger AP bus scans after the initial scan is done */
if (scan_counter <= 0)
goto out;
/*
* There is one unlikely but nevertheless valid scenario where the
* thread holding the mutex may try to send some crypto load but
* all cards are offline so a rescan is triggered which causes
* a recursive call of ap_bus_force_rescan(). A simple return if
* the mutex is already locked by this thread solves this.
*/
if (mutex_is_locked(&ap_scan_bus_mutex)) {
if (ap_scan_bus_task == current)
goto out;
}
/* Try to acquire the AP scan bus mutex */
if (mutex_trylock(&ap_scan_bus_mutex)) {
/* mutex acquired, run the AP bus scan */
ap_scan_bus_task = current;
ap_scan_bus_result = ap_scan_bus();
rc = ap_scan_bus_result;
ap_scan_bus_task = NULL;
mutex_unlock(&ap_scan_bus_mutex);
goto out;
}
@ -1029,7 +1043,7 @@ bool ap_bus_force_rescan(void)
mutex_unlock(&ap_scan_bus_mutex);
out:
pr_debug("%s rc=%d\n", __func__, rc);
pr_debug("rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL(ap_bus_force_rescan);
@ -1043,7 +1057,7 @@ static int ap_bus_cfg_chg(struct notifier_block *nb,
if (action != CHSC_NOTIFY_AP_CFG)
return NOTIFY_DONE;
pr_debug("%s config change, forcing bus rescan\n", __func__);
pr_debug("config change, forcing bus rescan\n");
ap_bus_force_rescan();
@ -1900,8 +1914,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
}
spin_unlock_bh(&aq->lock);
pr_debug("%s(%d,%d) queue dev checkstop on\n",
__func__, ac->id, dom);
pr_debug("(%d,%d) queue dev checkstop on\n",
ac->id, dom);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
@ -1911,8 +1925,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
pr_debug("%s(%d,%d) queue dev checkstop off\n",
__func__, ac->id, dom);
pr_debug("(%d,%d) queue dev checkstop off\n",
ac->id, dom);
goto put_dev_and_continue;
}
/* config state change */
@ -1924,8 +1938,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
}
spin_unlock_bh(&aq->lock);
pr_debug("%s(%d,%d) queue dev config off\n",
__func__, ac->id, dom);
pr_debug("(%d,%d) queue dev config off\n",
ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
@ -1936,8 +1950,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
pr_debug("%s(%d,%d) queue dev config on\n",
__func__, ac->id, dom);
pr_debug("(%d,%d) queue dev config on\n",
ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
@ -2009,8 +2023,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
pr_debug("%s(%d) no type info (no APQN found), ignored\n",
__func__, ap);
pr_debug("(%d) no type info (no APQN found), ignored\n",
ap);
}
return;
}
@ -2022,8 +2036,7 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
pr_debug("%s(%d) no valid type (0) info, ignored\n",
__func__, ap);
pr_debug("(%d) no valid type (0) info, ignored\n", ap);
}
return;
}
@ -2202,7 +2215,7 @@ static bool ap_scan_bus(void)
bool config_changed;
int ap;
pr_debug(">%s\n", __func__);
pr_debug(">\n");
/* (re-)fetch configuration via QCI */
config_changed = ap_get_configuration();
@ -2243,7 +2256,7 @@ static bool ap_scan_bus(void)
}
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
pr_debug("%s init scan complete\n", __func__);
pr_debug("init scan complete\n");
ap_send_init_scan_done_uevent();
}
@ -2251,7 +2264,7 @@ static bool ap_scan_bus(void)
mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
pr_debug("<%s config_changed=%d\n", __func__, config_changed);
pr_debug("< config_changed=%d\n", config_changed);
return config_changed;
}
@ -2284,7 +2297,9 @@ static void ap_scan_bus_wq_callback(struct work_struct *unused)
* system_long_wq which invokes this function here again.
*/
if (mutex_trylock(&ap_scan_bus_mutex)) {
ap_scan_bus_task = current;
ap_scan_bus_result = ap_scan_bus();
ap_scan_bus_task = NULL;
mutex_unlock(&ap_scan_bus_mutex);
}
}

View File

@ -171,8 +171,8 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
__func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
aq->pendingq_count, aq->requestq_count);
aq->pendingq_count = 0;
break;
@ -453,8 +453,8 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
case AP_BS_Q_USABLE:
/* association is through */
aq->sm_state = AP_SM_STATE_IDLE;
pr_debug("%s queue 0x%02x.%04x associated with %u\n",
__func__, AP_QID_CARD(aq->qid),
pr_debug("queue 0x%02x.%04x associated with %u\n",
AP_QID_CARD(aq->qid),
AP_QID_QUEUE(aq->qid), aq->assoc_idx);
return AP_SM_WAIT_NONE;
case AP_BS_Q_USABLE_NO_SECURE_KEY:
@ -697,8 +697,8 @@ static ssize_t ap_functions_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
__func__, status.response_code,
pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@ -853,8 +853,8 @@ static ssize_t se_bind_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
__func__, status.response_code,
pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@ -981,8 +981,8 @@ static ssize_t se_associate_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
__func__, status.response_code,
pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,362 @@
// SPDX-License-Identifier: GPL-2.0
/*
* pkey base: debug feature, pkey handler registry
*
* Copyright IBM Corp. 2024
*/
#define KMSG_COMPONENT "pkey"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/rculist.h>
#include "pkey_base.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key base and api");
/*
* pkey debug feature
*/
debug_info_t *pkey_dbf_info;
EXPORT_SYMBOL(pkey_dbf_info);
/*
* pkey handler registry
*/
static DEFINE_SPINLOCK(handler_list_write_lock);
static LIST_HEAD(handler_list);
int pkey_handler_register(struct pkey_handler *handler)
{
const struct pkey_handler *h;
if (!handler ||
!handler->is_supported_key ||
!handler->is_supported_keytype)
return -EINVAL;
if (!try_module_get(handler->module))
return -ENXIO;
spin_lock(&handler_list_write_lock);
rcu_read_lock();
list_for_each_entry_rcu(h, &handler_list, list) {
if (h == handler) {
rcu_read_unlock();
spin_unlock(&handler_list_write_lock);
module_put(handler->module);
return -EEXIST;
}
}
rcu_read_unlock();
list_add_rcu(&handler->list, &handler_list);
spin_unlock(&handler_list_write_lock);
synchronize_rcu();
module_put(handler->module);
PKEY_DBF_INFO("%s pkey handler '%s' registered\n", __func__,
handler->name ?: "<no name>");
return 0;
}
EXPORT_SYMBOL(pkey_handler_register);
int pkey_handler_unregister(struct pkey_handler *handler)
{
spin_lock(&handler_list_write_lock);
list_del_rcu(&handler->list);
INIT_LIST_HEAD_RCU(&handler->list);
spin_unlock(&handler_list_write_lock);
synchronize_rcu();
PKEY_DBF_INFO("%s pkey handler '%s' unregistered\n", __func__,
handler->name ?: "<no name>");
return 0;
}
EXPORT_SYMBOL(pkey_handler_unregister);
/*
* Handler invocation functions.
*/
const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen)
{
const struct pkey_handler *h;
rcu_read_lock();
list_for_each_entry_rcu(h, &handler_list, list) {
if (!try_module_get(h->module))
continue;
if (h->is_supported_key(key, keylen)) {
rcu_read_unlock();
return h;
}
module_put(h->module);
}
rcu_read_unlock();
return NULL;
}
EXPORT_SYMBOL(pkey_handler_get_keybased);
const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt)
{
const struct pkey_handler *h;
rcu_read_lock();
list_for_each_entry_rcu(h, &handler_list, list) {
if (!try_module_get(h->module))
continue;
if (h->is_supported_keytype(kt)) {
rcu_read_unlock();
return h;
}
module_put(h->module);
}
rcu_read_unlock();
return NULL;
}
EXPORT_SYMBOL(pkey_handler_get_keytypebased);
void pkey_handler_put(const struct pkey_handler *handler)
{
const struct pkey_handler *h;
if (!handler)
return;
rcu_read_lock();
list_for_each_entry_rcu(h, &handler_list, list) {
if (h == handler) {
module_put(h->module);
break;
}
}
rcu_read_unlock();
}
EXPORT_SYMBOL(pkey_handler_put);
int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
const struct pkey_handler *h;
int rc = -ENODEV;
h = pkey_handler_get_keybased(key, keylen);
if (h && h->key_to_protkey) {
rc = h->key_to_protkey(apqns, nr_apqns, key, keylen,
protkey, protkeylen,
protkeytype);
}
pkey_handler_put(h);
return rc;
}
EXPORT_SYMBOL(pkey_handler_key_to_protkey);
/*
* This handler invocation is special as there may be more than
* one handler providing support for the very same key (type).
* And the handler may not respond true on is_supported_key(),
* so simple try and check return value here.
*/
int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
u32 *protkeytype)
{
const struct pkey_handler *h, *htmp[10];
int i, n = 0, rc = -ENODEV;
rcu_read_lock();
list_for_each_entry_rcu(h, &handler_list, list) {
if (!try_module_get(h->module))
continue;
if (h->slowpath_key_to_protkey && n < ARRAY_SIZE(htmp))
htmp[n++] = h;
else
module_put(h->module);
}
rcu_read_unlock();
for (i = 0; i < n; i++) {
h = htmp[i];
if (rc)
rc = h->slowpath_key_to_protkey(apqns, nr_apqns,
key, keylen,
protkey, protkeylen,
protkeytype);
module_put(h->module);
}
return rc;
}
EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey);
int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
{
const struct pkey_handler *h;
int rc = -ENODEV;
h = pkey_handler_get_keytypebased(keysubtype);
if (h && h->gen_key) {
rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype,
keybitsize, flags,
keybuf, keybuflen, keyinfo);
}
pkey_handler_put(h);
return rc;
}
EXPORT_SYMBOL(pkey_handler_gen_key);
int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
{
const struct pkey_handler *h;
int rc = -ENODEV;
h = pkey_handler_get_keytypebased(keysubtype);
if (h && h->clr_to_key) {
rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype,
keybitsize, flags, clrkey, clrkeylen,
keybuf, keybuflen, keyinfo);
}
pkey_handler_put(h);
return rc;
}
EXPORT_SYMBOL(pkey_handler_clr_to_key);
int pkey_handler_verify_key(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
u32 *keytype, u32 *keybitsize, u32 *flags)
{
const struct pkey_handler *h;
int rc = -ENODEV;
h = pkey_handler_get_keybased(key, keylen);
if (h && h->verify_key) {
rc = h->verify_key(key, keylen, card, dom,
keytype, keybitsize, flags);
}
pkey_handler_put(h);
return rc;
}
EXPORT_SYMBOL(pkey_handler_verify_key);
int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
const struct pkey_handler *h;
int rc = -ENODEV;
h = pkey_handler_get_keybased(key, keylen);
if (h && h->apqns_for_key)
rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns);
pkey_handler_put(h);
return rc;
}
EXPORT_SYMBOL(pkey_handler_apqns_for_key);
int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
const struct pkey_handler *h;
int rc = -ENODEV;
h = pkey_handler_get_keytypebased(keysubtype);
if (h && h->apqns_for_keytype) {
rc = h->apqns_for_keytype(keysubtype,
cur_mkvp, alt_mkvp, flags,
apqns, nr_apqns);
}
pkey_handler_put(h);
return rc;
}
EXPORT_SYMBOL(pkey_handler_apqns_for_keytype);
void pkey_handler_request_modules(void)
{
#ifdef CONFIG_MODULES
static const char * const pkey_handler_modules[] = {
"pkey_cca", "pkey_ep11", "pkey_pckmo" };
int i;
for (i = 0; i < ARRAY_SIZE(pkey_handler_modules); i++) {
const struct pkey_handler *h;
bool found = false;
rcu_read_lock();
list_for_each_entry_rcu(h, &handler_list, list) {
if (h->module &&
!strcmp(h->module->name, pkey_handler_modules[i])) {
found = true;
break;
}
}
rcu_read_unlock();
if (!found) {
pr_debug("request_module(%s)\n", pkey_handler_modules[i]);
request_module(pkey_handler_modules[i]);
}
}
#endif
}
EXPORT_SYMBOL(pkey_handler_request_modules);
/*
* Module init
*/
static int __init pkey_init(void)
{
int rc;
/* init debug feature */
pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
debug_register_view(pkey_dbf_info, &debug_sprintf_view);
debug_set_level(pkey_dbf_info, 4);
/* the handler registry does not need any init */
rc = pkey_api_init();
if (rc)
debug_unregister(pkey_dbf_info);
return rc;
}
/*
* Module exit
*/
static void __exit pkey_exit(void)
{
pkey_api_exit();
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
module_exit(pkey_exit);

View File

@ -0,0 +1,195 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright IBM Corp. 2024
*
* Pkey base: debug feature, defines and structs
* common to all pkey code.
*/
#ifndef _PKEY_BASE_H_
#define _PKEY_BASE_H_
#include <linux/types.h>
#include <asm/debug.h>
#include <asm/pkey.h>
/*
* pkey debug feature
*/
extern debug_info_t *pkey_dbf_info;
#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__)
#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__)
#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__)
/*
* common defines and common structs
*/
#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header))
#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */
/* inside view of a generic protected key token */
struct protkeytoken {
u8 type; /* 0x00 for PAES specific key tokens */
u8 res0[3];
u8 version; /* should be 0x01 for protected key token */
u8 res1[3];
u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
u32 len; /* bytes actually stored in protkey[] */
u8 protkey[]; /* the protected key blob */
} __packed;
/* inside view of a protected AES key token */
struct protaeskeytoken {
u8 type; /* 0x00 for PAES specific key tokens */
u8 res0[3];
u8 version; /* should be 0x01 for protected key token */
u8 res1[3];
u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
u32 len; /* bytes actually stored in protkey[] */
u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
} __packed;
/* inside view of a clear key token (type 0x00 version 0x02) */
struct clearkeytoken {
u8 type; /* 0x00 for PAES specific key tokens */
u8 res0[3];
u8 version; /* 0x02 for clear key token */
u8 res1[3];
u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */
u32 len; /* bytes actually stored in clearkey[] */
u8 clearkey[]; /* clear key value */
} __packed;
/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */
static inline u32 pkey_keytype_aes_to_size(u32 keytype)
{
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
return 16;
case PKEY_KEYTYPE_AES_192:
return 24;
case PKEY_KEYTYPE_AES_256:
return 32;
default:
return 0;
}
}
/* helper function which translates AES key bit size into PKEY_KEYTYPE_AES_* */
static inline u32 pkey_aes_bitsize_to_keytype(u32 keybitsize)
{
switch (keybitsize) {
case 128:
return PKEY_KEYTYPE_AES_128;
case 192:
return PKEY_KEYTYPE_AES_192;
case 256:
return PKEY_KEYTYPE_AES_256;
default:
return 0;
}
}
/*
* pkey_api.c:
*/
int __init pkey_api_init(void);
void __exit pkey_api_exit(void);
/*
* pkey_sysfs.c:
*/
extern const struct attribute_group *pkey_attr_groups[];
/*
* pkey handler registry
*/
struct pkey_handler {
struct module *module;
const char *name;
/*
* is_supported_key() and is_supported_keytype() are called
* within an rcu_read_lock() scope and thus must not sleep!
*/
bool (*is_supported_key)(const u8 *key, u32 keylen);
bool (*is_supported_keytype)(enum pkey_key_type);
int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype);
int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
u32 *protkeytype);
int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
int (*verify_key)(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
u32 *keytype, u32 *keybitsize, u32 *flags);
int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns);
int (*apqns_for_keytype)(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns);
/* used internal by pkey base */
struct list_head list;
};
int pkey_handler_register(struct pkey_handler *handler);
int pkey_handler_unregister(struct pkey_handler *handler);
/*
* invocation function for the registered pkey handlers
*/
const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen);
const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt);
void pkey_handler_put(const struct pkey_handler *handler);
int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype);
int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
u32 *protkeytype);
int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
int pkey_handler_verify_key(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
u32 *keytype, u32 *keybitsize, u32 *flags);
int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns);
int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns);
/*
* Unconditional try to load all handler modules
*/
void pkey_handler_request_modules(void);
#endif /* _PKEY_BASE_H_ */

View File

@ -0,0 +1,629 @@
// SPDX-License-Identifier: GPL-2.0
/*
* pkey cca specific code
*
* Copyright IBM Corp. 2024
*/
#define KMSG_COMPONENT "pkey"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
#include "pkey_base.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key CCA handler");
#if IS_MODULE(CONFIG_PKEY_CCA)
static struct ap_device_id pkey_cca_card_ids[] = {
{ .dev_type = AP_DEVICE_TYPE_CEX4 },
{ .dev_type = AP_DEVICE_TYPE_CEX5 },
{ .dev_type = AP_DEVICE_TYPE_CEX6 },
{ .dev_type = AP_DEVICE_TYPE_CEX7 },
{ .dev_type = AP_DEVICE_TYPE_CEX8 },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, pkey_cca_card_ids);
#endif
/*
* Check key blob for known and supported CCA key.
*/
static bool is_cca_key(const u8 *key, u32 keylen)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
if (keylen < sizeof(*hdr))
return false;
switch (hdr->type) {
case TOKTYPE_CCA_INTERNAL:
switch (hdr->version) {
case TOKVER_CCA_AES:
case TOKVER_CCA_VLSC:
return true;
default:
return false;
}
case TOKTYPE_CCA_INTERNAL_PKA:
return true;
default:
return false;
}
}
static bool is_cca_keytype(enum pkey_key_type key_type)
{
switch (key_type) {
case PKEY_TYPE_CCA_DATA:
case PKEY_TYPE_CCA_CIPHER:
case PKEY_TYPE_CCA_ECC:
return true;
default:
return false;
}
}
static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
u32 _nr_apqns, *_apqns = NULL;
int rc;
if (!flags)
flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP;
if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
zcrypt_wait_api_operational();
if (hdr->type == TOKTYPE_CCA_INTERNAL) {
u64 cur_mkvp = 0, old_mkvp = 0;
int minhwtype = ZCRYPT_CEX3C;
if (hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = t->mkvp;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp;
} else if (hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
minhwtype = ZCRYPT_CEX6;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = t->mkvp0;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp0;
} else {
/* unknown CCA internal token type */
return -EINVAL;
}
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, AES_MK_SET,
cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
u64 cur_mkvp = 0, old_mkvp = 0;
if (t->secid == 0x20) {
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = t->mkvp;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp;
} else {
/* unknown CCA internal 2 token type */
return -EINVAL;
}
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7, APKA_MK_SET,
cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
} else {
PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
__func__, hdr->type, hdr->version);
return -EINVAL;
}
if (apqns) {
if (*nr_apqns < _nr_apqns)
rc = -ENOSPC;
else
memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
*nr_apqns = _nr_apqns;
out:
kfree(_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int cca_apqns4type(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
u32 _nr_apqns, *_apqns = NULL;
int rc;
zcrypt_wait_api_operational();
if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
u64 cur_mkvp = 0, old_mkvp = 0;
int minhwtype = ZCRYPT_CEX3C;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = *((u64 *)cur_mkvp);
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = *((u64 *)alt_mkvp);
if (ktype == PKEY_TYPE_CCA_CIPHER)
minhwtype = ZCRYPT_CEX6;
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, AES_MK_SET,
cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
} else if (ktype == PKEY_TYPE_CCA_ECC) {
u64 cur_mkvp = 0, old_mkvp = 0;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
cur_mkvp = *((u64 *)cur_mkvp);
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = *((u64 *)alt_mkvp);
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7, APKA_MK_SET,
cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
} else {
PKEY_DBF_ERR("%s unknown/unsupported key type %d",
__func__, (int)ktype);
return -EINVAL;
}
if (apqns) {
if (*nr_apqns < _nr_apqns)
rc = -ENOSPC;
else
memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
*nr_apqns = _nr_apqns;
out:
kfree(_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
struct pkey_apqn *local_apqns = NULL;
int i, rc;
if (keylen < sizeof(*hdr))
return -EINVAL;
if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_AES) {
/* CCA AES data key */
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
return -EINVAL;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_VLSC) {
/* CCA AES cipher key */
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
if (cca_check_secaescipherkey(pkey_dbf_info,
3, key, 0, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
/* CCA ECC (private) key */
if (keylen < sizeof(struct eccprivkeytoken))
return -EINVAL;
if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1))
return -EINVAL;
} else {
PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
__func__, hdr->type, hdr->version);
return -EINVAL;
}
zcrypt_wait_api_operational();
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
GFP_KERNEL);
if (!local_apqns)
return -ENOMEM;
rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns);
if (rc)
goto out;
apqns = local_apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_AES) {
rc = cca_sec2protkey(apqns[i].card, apqns[i].domain,
key, protkey,
protkeylen, protkeytype);
} else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_VLSC) {
rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain,
key, protkey,
protkeylen, protkeytype);
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain,
key, protkey,
protkeylen, protkeytype);
} else {
rc = -EINVAL;
break;
}
}
out:
kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* Generate CCA secure key.
* As of now only CCA AES Data or Cipher secure keys are
* supported.
* keytype is one of the PKEY_KEYTYPE_* constants,
* subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER,
* keybitsize is the bit size of the key (may be 0 for
* keytype PKEY_KEYTYPE_AES_*).
*/
static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 subtype,
u32 keybitsize, u32 flags,
u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
{
struct pkey_apqn *local_apqns = NULL;
int i, len, rc;
/* check keytype, subtype, keybitsize */
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
len = pkey_keytype_aes_to_size(keytype);
if (keybitsize && keybitsize != 8 * len) {
PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
return -EINVAL;
}
keybitsize = 8 * len;
switch (subtype) {
case PKEY_TYPE_CCA_DATA:
case PKEY_TYPE_CCA_CIPHER:
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
__func__, subtype);
return -EINVAL;
}
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
__func__, keytype);
return -EINVAL;
}
zcrypt_wait_api_operational();
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
GFP_KERNEL);
if (!local_apqns)
return -ENOMEM;
rc = cca_apqns4type(subtype, NULL, NULL, 0,
local_apqns, &nr_apqns);
if (rc)
goto out;
apqns = local_apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
if (subtype == PKEY_TYPE_CCA_CIPHER) {
rc = cca_gencipherkey(apqns[i].card, apqns[i].domain,
keybitsize, flags,
keybuf, keybuflen);
} else {
/* PKEY_TYPE_CCA_DATA */
rc = cca_genseckey(apqns[i].card, apqns[i].domain,
keybitsize, keybuf);
*keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
}
}
out:
kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* Generate CCA secure key with given clear key value.
* As of now only CCA AES Data or Cipher secure keys are
* supported.
* keytype is one of the PKEY_KEYTYPE_* constants,
* subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER,
* keybitsize is the bit size of the key (may be 0 for
* keytype PKEY_KEYTYPE_AES_*).
*/
static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 subtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
{
struct pkey_apqn *local_apqns = NULL;
int i, len, rc;
/* check keytype, subtype, clrkeylen, keybitsize */
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
len = pkey_keytype_aes_to_size(keytype);
if (keybitsize && keybitsize != 8 * len) {
PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
return -EINVAL;
}
keybitsize = 8 * len;
if (clrkeylen != len) {
PKEY_DBF_ERR("%s invalid clear key len %d != %d\n",
__func__, clrkeylen, len);
return -EINVAL;
}
switch (subtype) {
case PKEY_TYPE_CCA_DATA:
case PKEY_TYPE_CCA_CIPHER:
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
__func__, subtype);
return -EINVAL;
}
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
__func__, keytype);
return -EINVAL;
}
zcrypt_wait_api_operational();
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
GFP_KERNEL);
if (!local_apqns)
return -ENOMEM;
rc = cca_apqns4type(subtype, NULL, NULL, 0,
local_apqns, &nr_apqns);
if (rc)
goto out;
apqns = local_apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
if (subtype == PKEY_TYPE_CCA_CIPHER) {
rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain,
keybitsize, flags, clrkey,
keybuf, keybuflen);
} else {
/* PKEY_TYPE_CCA_DATA */
rc = cca_clr2seckey(apqns[i].card, apqns[i].domain,
keybitsize, clrkey, keybuf);
*keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
}
}
out:
kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int cca_verifykey(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
u32 *keytype, u32 *keybitsize, u32 *flags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
u32 nr_apqns, *apqns = NULL;
int rc;
if (keylen < sizeof(*hdr))
return -EINVAL;
zcrypt_wait_api_operational();
if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0);
if (rc)
goto out;
*keytype = PKEY_TYPE_CCA_DATA;
*keybitsize = t->bitsize;
rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX3C, AES_MK_SET,
t->mkvp, 0, 1);
if (!rc)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
if (rc == -ENODEV) {
rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX3C, AES_MK_SET,
0, t->mkvp, 1);
if (!rc)
*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
}
if (rc)
goto out;
*card = ((struct pkey_apqn *)apqns)->card;
*dom = ((struct pkey_apqn *)apqns)->domain;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1);
if (rc)
goto out;
*keytype = PKEY_TYPE_CCA_CIPHER;
*keybitsize = PKEY_SIZE_UNKNOWN;
if (!t->plfver && t->wpllen == 512)
*keybitsize = PKEY_SIZE_AES_128;
else if (!t->plfver && t->wpllen == 576)
*keybitsize = PKEY_SIZE_AES_192;
else if (!t->plfver && t->wpllen == 640)
*keybitsize = PKEY_SIZE_AES_256;
rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX6, AES_MK_SET,
t->mkvp0, 0, 1);
if (!rc)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
if (rc == -ENODEV) {
rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX6, AES_MK_SET,
0, t->mkvp0, 1);
if (!rc)
*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
}
if (rc)
goto out;
*card = ((struct pkey_apqn *)apqns)->card;
*dom = ((struct pkey_apqn *)apqns)->domain;
} else {
/* unknown/unsupported key blob */
rc = -EINVAL;
}
out:
kfree(apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* This function provides an alternate but usually slow way
* to convert a 'clear key token' with AES key material into
* a protected key. This is done via an intermediate step
* which creates a CCA AES DATA secure key first and then
* derives the protected key from this secure key.
*/
static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
u32 *protkeytype)
{
const struct keytoken_header *hdr = (const struct keytoken_header *)key;
const struct clearkeytoken *t = (const struct clearkeytoken *)key;
u32 tmplen, keysize = 0;
u8 *tmpbuf;
int i, rc;
if (keylen < sizeof(*hdr))
return -EINVAL;
if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_CLEAR_KEY)
keysize = pkey_keytype_aes_to_size(t->keytype);
if (!keysize || t->len != keysize)
return -EINVAL;
/* alloc tmp key buffer */
tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC);
if (!tmpbuf)
return -ENOMEM;
/* try two times in case of failure */
for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
tmplen = SECKEYBLOBSIZE;
rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA,
8 * keysize, 0, t->clearkey, t->len,
tmpbuf, &tmplen, NULL);
pr_debug("cca_clr2key()=%d\n", rc);
if (rc)
continue;
rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen,
protkey, protkeylen, protkeytype);
pr_debug("cca_key2protkey()=%d\n", rc);
}
kfree(tmpbuf);
pr_debug("rc=%d\n", rc);
return rc;
}
static struct pkey_handler cca_handler = {
.module = THIS_MODULE,
.name = "PKEY CCA handler",
.is_supported_key = is_cca_key,
.is_supported_keytype = is_cca_keytype,
.key_to_protkey = cca_key2protkey,
.slowpath_key_to_protkey = cca_slowpath_key2protkey,
.gen_key = cca_gen_key,
.clr_to_key = cca_clr2key,
.verify_key = cca_verifykey,
.apqns_for_key = cca_apqns4key,
.apqns_for_keytype = cca_apqns4type,
};
/*
* Module init
*/
static int __init pkey_cca_init(void)
{
/* register this module as pkey handler for all the cca stuff */
return pkey_handler_register(&cca_handler);
}
/*
* Module exit
*/
static void __exit pkey_cca_exit(void)
{
/* unregister this module as pkey handler */
pkey_handler_unregister(&cca_handler);
}
module_init(pkey_cca_init);
module_exit(pkey_cca_exit);

View File

@ -0,0 +1,578 @@
// SPDX-License-Identifier: GPL-2.0
/*
* pkey ep11 specific code
*
* Copyright IBM Corp. 2024
*/
#define KMSG_COMPONENT "pkey"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
#include "zcrypt_ep11misc.h"
#include "pkey_base.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key EP11 handler");
#if IS_MODULE(CONFIG_PKEY_EP11)
static struct ap_device_id pkey_ep11_card_ids[] = {
{ .dev_type = AP_DEVICE_TYPE_CEX4 },
{ .dev_type = AP_DEVICE_TYPE_CEX5 },
{ .dev_type = AP_DEVICE_TYPE_CEX6 },
{ .dev_type = AP_DEVICE_TYPE_CEX7 },
{ .dev_type = AP_DEVICE_TYPE_CEX8 },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, pkey_ep11_card_ids);
#endif
/*
* Check key blob for known and supported EP11 key.
*/
static bool is_ep11_key(const u8 *key, u32 keylen)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
if (keylen < sizeof(*hdr))
return false;
switch (hdr->type) {
case TOKTYPE_NON_CCA:
switch (hdr->version) {
case TOKVER_EP11_AES:
case TOKVER_EP11_AES_WITH_HEADER:
case TOKVER_EP11_ECC_WITH_HEADER:
return true;
default:
return false;
}
default:
return false;
}
}
static bool is_ep11_keytype(enum pkey_key_type key_type)
{
switch (key_type) {
case PKEY_TYPE_EP11:
case PKEY_TYPE_EP11_AES:
case PKEY_TYPE_EP11_ECC:
return true;
default:
return false;
}
}
static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
u32 _nr_apqns, *_apqns = NULL;
int rc;
if (!flags)
flags = PKEY_FLAGS_MATCH_CUR_MKVP;
if (keylen < sizeof(struct keytoken_header) || flags == 0)
return -EINVAL;
zcrypt_wait_api_operational();
if (hdr->type == TOKTYPE_NON_CCA &&
(hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
struct ep11keyblob *kb = (struct ep11keyblob *)
(key + sizeof(struct ep11kblob_header));
int minhwtype = 0, api = 0;
if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
return -EINVAL;
if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
minhwtype = ZCRYPT_CEX7;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
}
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, api, kb->wkvp);
if (rc)
goto out;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) {
struct ep11keyblob *kb = (struct ep11keyblob *)key;
int minhwtype = 0, api = 0;
if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
return -EINVAL;
if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
minhwtype = ZCRYPT_CEX7;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
}
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, api, kb->wkvp);
if (rc)
goto out;
} else {
PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
__func__, hdr->type, hdr->version);
return -EINVAL;
}
if (apqns) {
if (*nr_apqns < _nr_apqns)
rc = -ENOSPC;
else
memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
*nr_apqns = _nr_apqns;
out:
kfree(_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int ep11_apqns4type(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
struct pkey_apqn *apqns, size_t *nr_apqns)
{
u32 _nr_apqns, *_apqns = NULL;
int rc;
zcrypt_wait_api_operational();
if (ktype == PKEY_TYPE_EP11 ||
ktype == PKEY_TYPE_EP11_AES ||
ktype == PKEY_TYPE_EP11_ECC) {
u8 *wkvp = NULL;
int api;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
wkvp = cur_mkvp;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7, api, wkvp);
if (rc)
goto out;
} else {
PKEY_DBF_ERR("%s unknown/unsupported key type %d\n",
__func__, (int)ktype);
return -EINVAL;
}
if (apqns) {
if (*nr_apqns < _nr_apqns)
rc = -ENOSPC;
else
memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
*nr_apqns = _nr_apqns;
out:
kfree(_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
struct pkey_apqn *local_apqns = NULL;
int i, rc;
if (keylen < sizeof(*hdr))
return -EINVAL;
if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 AES key blob with header */
if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 ECC key blob with header */
if (ep11_check_ecc_key_with_hdr(pkey_dbf_info,
3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) {
/* EP11 AES key blob with header in session field */
if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1))
return -EINVAL;
} else {
PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
__func__, hdr->type, hdr->version);
return -EINVAL;
}
zcrypt_wait_api_operational();
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
GFP_KERNEL);
if (!local_apqns)
return -ENOMEM;
rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns);
if (rc)
goto out;
apqns = local_apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
key, hdr->len, protkey,
protkeylen, protkeytype);
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
key, hdr->len, protkey,
protkeylen, protkeytype);
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) {
rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
key, hdr->len, protkey,
protkeylen, protkeytype);
} else {
rc = -EINVAL;
break;
}
}
out:
kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* Generate EP11 secure key.
* As of now only EP11 AES secure keys are supported.
* keytype is one of the PKEY_KEYTYPE_* constants,
* subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES
* or 0 (results in subtype PKEY_TYPE_EP11_AES),
* keybitsize is the bit size of the key (may be 0 for
* keytype PKEY_KEYTYPE_AES_*).
*/
static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 subtype,
u32 keybitsize, u32 flags,
u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
{
struct pkey_apqn *local_apqns = NULL;
int i, len, rc;
/* check keytype, subtype, keybitsize */
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
len = pkey_keytype_aes_to_size(keytype);
if (keybitsize && keybitsize != 8 * len) {
PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
return -EINVAL;
}
keybitsize = 8 * len;
switch (subtype) {
case PKEY_TYPE_EP11:
case PKEY_TYPE_EP11_AES:
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
__func__, subtype);
return -EINVAL;
}
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
__func__, keytype);
return -EINVAL;
}
zcrypt_wait_api_operational();
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
GFP_KERNEL);
if (!local_apqns)
return -ENOMEM;
rc = ep11_apqns4type(subtype, NULL, NULL, 0,
local_apqns, &nr_apqns);
if (rc)
goto out;
apqns = local_apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
rc = ep11_genaeskey(apqns[i].card, apqns[i].domain,
keybitsize, flags,
keybuf, keybuflen, subtype);
}
out:
kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* Generate EP11 secure key with given clear key value.
* As of now only EP11 AES secure keys are supported.
* keytype is one of the PKEY_KEYTYPE_* constants,
* subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES
* or 0 (assumes PKEY_TYPE_EP11_AES then).
* keybitsize is the bit size of the key (may be 0 for
* keytype PKEY_KEYTYPE_AES_*).
*/
static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 subtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
{
struct pkey_apqn *local_apqns = NULL;
int i, len, rc;
/* check keytype, subtype, clrkeylen, keybitsize */
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
len = pkey_keytype_aes_to_size(keytype);
if (keybitsize && keybitsize != 8 * len) {
PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
__func__, keybitsize);
return -EINVAL;
}
keybitsize = 8 * len;
if (clrkeylen != len) {
PKEY_DBF_ERR("%s invalid clear key len %d != %d\n",
__func__, clrkeylen, len);
return -EINVAL;
}
switch (subtype) {
case PKEY_TYPE_EP11:
case PKEY_TYPE_EP11_AES:
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
__func__, subtype);
return -EINVAL;
}
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
__func__, keytype);
return -EINVAL;
}
zcrypt_wait_api_operational();
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
GFP_KERNEL);
if (!local_apqns)
return -ENOMEM;
rc = ep11_apqns4type(subtype, NULL, NULL, 0,
local_apqns, &nr_apqns);
if (rc)
goto out;
apqns = local_apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain,
keybitsize, flags, clrkey,
keybuf, keybuflen, subtype);
}
out:
kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int ep11_verifykey(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
u32 *keytype, u32 *keybitsize, u32 *flags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
u32 nr_apqns, *apqns = NULL;
int rc;
if (keylen < sizeof(*hdr))
return -EINVAL;
zcrypt_wait_api_operational();
if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES) {
struct ep11keyblob *kb = (struct ep11keyblob *)key;
int api;
rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
if (rc)
goto out;
*keytype = PKEY_TYPE_EP11;
*keybitsize = kb->head.bitlen;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX7, api,
ep11_kb_wkvp(key, keylen));
if (rc)
goto out;
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
*card = ((struct pkey_apqn *)apqns)->card;
*dom = ((struct pkey_apqn *)apqns)->domain;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
int api;
rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
3, key, keylen, 1);
if (rc)
goto out;
*keytype = PKEY_TYPE_EP11_AES;
*keybitsize = kh->bitlen;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX7, api,
ep11_kb_wkvp(key, keylen));
if (rc)
goto out;
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
*card = ((struct pkey_apqn *)apqns)->card;
*dom = ((struct pkey_apqn *)apqns)->domain;
} else {
/* unknown/unsupported key blob */
rc = -EINVAL;
}
out:
kfree(apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* This function provides an alternate but usually slow way
* to convert a 'clear key token' with AES key material into
* a protected key. That is done via an intermediate step
* which creates an EP11 AES secure key first and then derives
* the protected key from this secure key.
*/
static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
u32 *protkeytype)
{
const struct keytoken_header *hdr = (const struct keytoken_header *)key;
const struct clearkeytoken *t = (const struct clearkeytoken *)key;
u32 tmplen, keysize = 0;
u8 *tmpbuf;
int i, rc;
if (keylen < sizeof(*hdr))
return -EINVAL;
if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_CLEAR_KEY)
keysize = pkey_keytype_aes_to_size(t->keytype);
if (!keysize || t->len != keysize)
return -EINVAL;
/* alloc tmp key buffer */
tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC);
if (!tmpbuf)
return -ENOMEM;
/* try two times in case of failure */
for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
tmplen = MAXEP11AESKEYBLOBSIZE;
rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11,
8 * keysize, 0, t->clearkey, t->len,
tmpbuf, &tmplen, NULL);
pr_debug("ep11_clr2key()=%d\n", rc);
if (rc)
continue;
rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen,
protkey, protkeylen, protkeytype);
pr_debug("ep11_key2protkey()=%d\n", rc);
}
kfree(tmpbuf);
pr_debug("rc=%d\n", rc);
return rc;
}
static struct pkey_handler ep11_handler = {
.module = THIS_MODULE,
.name = "PKEY EP11 handler",
.is_supported_key = is_ep11_key,
.is_supported_keytype = is_ep11_keytype,
.key_to_protkey = ep11_key2protkey,
.slowpath_key_to_protkey = ep11_slowpath_key2protkey,
.gen_key = ep11_gen_key,
.clr_to_key = ep11_clr2key,
.verify_key = ep11_verifykey,
.apqns_for_key = ep11_apqns4key,
.apqns_for_keytype = ep11_apqns4type,
};
/*
* Module init
*/
static int __init pkey_ep11_init(void)
{
/* register this module as pkey handler for all the ep11 stuff */
return pkey_handler_register(&ep11_handler);
}
/*
* Module exit
*/
static void __exit pkey_ep11_exit(void)
{
/* unregister this module as pkey handler */
pkey_handler_unregister(&ep11_handler);
}
module_init(pkey_ep11_init);
module_exit(pkey_ep11_exit);

View File

@ -0,0 +1,557 @@
// SPDX-License-Identifier: GPL-2.0
/*
* pkey pckmo specific code
*
* Copyright IBM Corp. 2024
*/
#define KMSG_COMPONENT "pkey"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <asm/cpacf.h>
#include <crypto/aes.h>
#include <linux/random.h>
#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
#include "pkey_base.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key PCKMO handler");
/*
* Check key blob for known and supported here.
*/
static bool is_pckmo_key(const u8 *key, u32 keylen)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
struct clearkeytoken *t = (struct clearkeytoken *)key;
if (keylen < sizeof(*hdr))
return false;
switch (hdr->type) {
case TOKTYPE_NON_CCA:
switch (hdr->version) {
case TOKVER_CLEAR_KEY:
switch (t->keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
case PKEY_KEYTYPE_ECC_P256:
case PKEY_KEYTYPE_ECC_P384:
case PKEY_KEYTYPE_ECC_P521:
case PKEY_KEYTYPE_ECC_ED25519:
case PKEY_KEYTYPE_ECC_ED448:
case PKEY_KEYTYPE_AES_XTS_128:
case PKEY_KEYTYPE_AES_XTS_256:
case PKEY_KEYTYPE_HMAC_512:
case PKEY_KEYTYPE_HMAC_1024:
return true;
default:
return false;
}
case TOKVER_PROTECTED_KEY:
return true;
default:
return false;
}
default:
return false;
}
}
static bool is_pckmo_keytype(enum pkey_key_type keytype)
{
switch (keytype) {
case PKEY_TYPE_PROTKEY:
return true;
default:
return false;
}
}
/*
* Create a protected key from a clear key value via PCKMO instruction.
*/
static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
/* mask of available pckmo subfunctions */
static cpacf_mask_t pckmo_functions;
int keysize, rc = -EINVAL;
u8 paramblock[160];
u32 pkeytype;
long fc;
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
/* 16 byte key, 32 byte aes wkvp, total 48 bytes */
keysize = 16;
pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_128_KEY;
break;
case PKEY_KEYTYPE_AES_192:
/* 24 byte key, 32 byte aes wkvp, total 56 bytes */
keysize = 24;
pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_192_KEY;
break;
case PKEY_KEYTYPE_AES_256:
/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
keysize = 32;
pkeytype = keytype;
fc = CPACF_PCKMO_ENC_AES_256_KEY;
break;
case PKEY_KEYTYPE_ECC_P256:
/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
keysize = 32;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
break;
case PKEY_KEYTYPE_ECC_P384:
/* 48 byte key, 32 byte aes wkvp, total 80 bytes */
keysize = 48;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
break;
case PKEY_KEYTYPE_ECC_P521:
/* 80 byte key, 32 byte aes wkvp, total 112 bytes */
keysize = 80;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
break;
case PKEY_KEYTYPE_ECC_ED25519:
/* 32 byte key, 32 byte aes wkvp, total 64 bytes */
keysize = 32;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
break;
case PKEY_KEYTYPE_ECC_ED448:
/* 64 byte key, 32 byte aes wkvp, total 96 bytes */
keysize = 64;
pkeytype = PKEY_KEYTYPE_ECC;
fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
break;
case PKEY_KEYTYPE_AES_XTS_128:
/* 2x16 byte keys, 32 byte aes wkvp, total 64 bytes */
keysize = 32;
pkeytype = PKEY_KEYTYPE_AES_XTS_128;
fc = CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY;
break;
case PKEY_KEYTYPE_AES_XTS_256:
/* 2x32 byte keys, 32 byte aes wkvp, total 96 bytes */
keysize = 64;
pkeytype = PKEY_KEYTYPE_AES_XTS_256;
fc = CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY;
break;
case PKEY_KEYTYPE_HMAC_512:
/* 64 byte key, 32 byte aes wkvp, total 96 bytes */
keysize = 64;
pkeytype = PKEY_KEYTYPE_HMAC_512;
fc = CPACF_PCKMO_ENC_HMAC_512_KEY;
break;
case PKEY_KEYTYPE_HMAC_1024:
/* 128 byte key, 32 byte aes wkvp, total 160 bytes */
keysize = 128;
pkeytype = PKEY_KEYTYPE_HMAC_1024;
fc = CPACF_PCKMO_ENC_HMAC_1024_KEY;
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
__func__, keytype);
goto out;
}
if (clrkeylen && clrkeylen < keysize) {
PKEY_DBF_ERR("%s clear key size too small: %u < %d\n",
__func__, clrkeylen, keysize);
goto out;
}
if (*protkeylen < keysize + AES_WK_VP_SIZE) {
PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n",
__func__, *protkeylen, keysize + AES_WK_VP_SIZE);
goto out;
}
/* Did we already check for PCKMO ? */
if (!pckmo_functions.bytes[0]) {
/* no, so check now */
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) {
PKEY_DBF_ERR("%s cpacf_query() failed\n", __func__);
rc = -ENODEV;
goto out;
}
}
/* check for the pckmo subfunction we need now */
if (!cpacf_test_func(&pckmo_functions, fc)) {
PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
rc = -ENODEV;
goto out;
}
/* prepare param block */
memset(paramblock, 0, sizeof(paramblock));
memcpy(paramblock, clrkey, keysize);
/* call the pckmo instruction */
cpacf_pckmo(fc, paramblock);
/* copy created protected key to key buffer including the wkvp block */
*protkeylen = keysize + AES_WK_VP_SIZE;
memcpy(protkey, paramblock, *protkeylen);
*protkeytype = pkeytype;
rc = 0;
out:
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* Verify a raw protected key blob.
* Currently only AES protected keys are supported.
*/
static int pckmo_verify_protkey(const u8 *protkey, u32 protkeylen,
u32 protkeytype)
{
struct {
u8 iv[AES_BLOCK_SIZE];
u8 key[MAXPROTKEYSIZE];
} param;
u8 null_msg[AES_BLOCK_SIZE];
u8 dest_buf[AES_BLOCK_SIZE];
unsigned int k, pkeylen;
unsigned long fc;
int rc = -EINVAL;
switch (protkeytype) {
case PKEY_KEYTYPE_AES_128:
pkeylen = 16 + AES_WK_VP_SIZE;
fc = CPACF_KMC_PAES_128;
break;
case PKEY_KEYTYPE_AES_192:
pkeylen = 24 + AES_WK_VP_SIZE;
fc = CPACF_KMC_PAES_192;
break;
case PKEY_KEYTYPE_AES_256:
pkeylen = 32 + AES_WK_VP_SIZE;
fc = CPACF_KMC_PAES_256;
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
protkeytype);
goto out;
}
if (protkeylen != pkeylen) {
PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
__func__, protkeylen, protkeytype);
goto out;
}
memset(null_msg, 0, sizeof(null_msg));
memset(param.iv, 0, sizeof(param.iv));
memcpy(param.key, protkey, protkeylen);
k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
sizeof(null_msg));
if (k != sizeof(null_msg)) {
PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
rc = -EKEYREJECTED;
goto out;
}
rc = 0;
out:
pr_debug("rc=%d\n", rc);
return rc;
}
static int pckmo_key2protkey(const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
int rc = -EINVAL;
if (keylen < sizeof(*hdr))
return -EINVAL;
if (hdr->type != TOKTYPE_NON_CCA)
return -EINVAL;
switch (hdr->version) {
case TOKVER_PROTECTED_KEY: {
struct protkeytoken *t = (struct protkeytoken *)key;
if (keylen < sizeof(*t))
goto out;
switch (t->keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
if (keylen != sizeof(struct protaeskeytoken))
goto out;
rc = pckmo_verify_protkey(t->protkey, t->len,
t->keytype);
if (rc)
goto out;
break;
case PKEY_KEYTYPE_AES_XTS_128:
if (t->len != 64 || keylen != sizeof(*t) + t->len)
goto out;
break;
case PKEY_KEYTYPE_AES_XTS_256:
case PKEY_KEYTYPE_HMAC_512:
if (t->len != 96 || keylen != sizeof(*t) + t->len)
goto out;
break;
case PKEY_KEYTYPE_HMAC_1024:
if (t->len != 160 || keylen != sizeof(*t) + t->len)
goto out;
break;
default:
PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n",
__func__, t->keytype);
goto out;
}
memcpy(protkey, t->protkey, t->len);
*protkeylen = t->len;
*protkeytype = t->keytype;
break;
}
case TOKVER_CLEAR_KEY: {
struct clearkeytoken *t = (struct clearkeytoken *)key;
u32 keysize = 0;
if (keylen < sizeof(struct clearkeytoken) ||
keylen != sizeof(*t) + t->len)
goto out;
switch (t->keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
keysize = pkey_keytype_aes_to_size(t->keytype);
break;
case PKEY_KEYTYPE_ECC_P256:
keysize = 32;
break;
case PKEY_KEYTYPE_ECC_P384:
keysize = 48;
break;
case PKEY_KEYTYPE_ECC_P521:
keysize = 80;
break;
case PKEY_KEYTYPE_ECC_ED25519:
keysize = 32;
break;
case PKEY_KEYTYPE_ECC_ED448:
keysize = 64;
break;
case PKEY_KEYTYPE_AES_XTS_128:
keysize = 32;
break;
case PKEY_KEYTYPE_AES_XTS_256:
keysize = 64;
break;
case PKEY_KEYTYPE_HMAC_512:
keysize = 64;
break;
case PKEY_KEYTYPE_HMAC_1024:
keysize = 128;
break;
default:
break;
}
if (!keysize) {
PKEY_DBF_ERR("%s clear key token: unknown keytype %u\n",
__func__, t->keytype);
goto out;
}
if (t->len != keysize) {
PKEY_DBF_ERR("%s clear key token: invalid key len %u\n",
__func__, t->len);
goto out;
}
rc = pckmo_clr2protkey(t->keytype, t->clearkey, t->len,
protkey, protkeylen, protkeytype);
break;
}
default:
PKEY_DBF_ERR("%s unknown non-CCA token version %d\n",
__func__, hdr->version);
break;
}
out:
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* Generate a random protected key.
* Currently only the generation of AES protected keys
* is supported.
*/
static int pckmo_gen_protkey(u32 keytype, u32 subtype,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
u8 clrkey[128];
int keysize;
int rc;
switch (keytype) {
case PKEY_KEYTYPE_AES_128:
case PKEY_KEYTYPE_AES_192:
case PKEY_KEYTYPE_AES_256:
keysize = pkey_keytype_aes_to_size(keytype);
break;
case PKEY_KEYTYPE_AES_XTS_128:
keysize = 32;
break;
case PKEY_KEYTYPE_AES_XTS_256:
case PKEY_KEYTYPE_HMAC_512:
keysize = 64;
break;
case PKEY_KEYTYPE_HMAC_1024:
keysize = 128;
break;
default:
PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
__func__, keytype);
return -EINVAL;
}
if (subtype != PKEY_TYPE_PROTKEY) {
PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
__func__, subtype);
return -EINVAL;
}
/* generate a dummy random clear key */
get_random_bytes(clrkey, keysize);
/* convert it to a dummy protected key */
rc = pckmo_clr2protkey(keytype, clrkey, keysize,
protkey, protkeylen, protkeytype);
if (rc)
goto out;
/* replace the key part of the protected key with random bytes */
get_random_bytes(protkey, keysize);
out:
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* Verify a protected key token blob.
* Currently only AES protected keys are supported.
*/
static int pckmo_verify_key(const u8 *key, u32 keylen)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
int rc = -EINVAL;
if (keylen < sizeof(*hdr))
return -EINVAL;
if (hdr->type != TOKTYPE_NON_CCA)
return -EINVAL;
switch (hdr->version) {
case TOKVER_PROTECTED_KEY: {
struct protaeskeytoken *t;
if (keylen != sizeof(struct protaeskeytoken))
goto out;
t = (struct protaeskeytoken *)key;
rc = pckmo_verify_protkey(t->protkey, t->len, t->keytype);
break;
}
default:
PKEY_DBF_ERR("%s unknown non-CCA token version %d\n",
__func__, hdr->version);
break;
}
out:
pr_debug("rc=%d\n", rc);
return rc;
}
/*
* Wrapper functions used for the pkey handler struct
*/
static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns,
size_t _nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *keyinfo)
{
return pckmo_key2protkey(key, keylen,
protkey, protkeylen, keyinfo);
}
static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns,
u32 keytype, u32 keysubtype,
u32 _keybitsize, u32 _flags,
u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
{
return pckmo_gen_protkey(keytype, keysubtype,
keybuf, keybuflen, keyinfo);
}
static int pkey_pckmo_verifykey(const u8 *key, u32 keylen,
u16 *_card, u16 *_dom,
u32 *_keytype, u32 *_keybitsize, u32 *_flags)
{
return pckmo_verify_key(key, keylen);
}
static struct pkey_handler pckmo_handler = {
.module = THIS_MODULE,
.name = "PKEY PCKMO handler",
.is_supported_key = is_pckmo_key,
.is_supported_keytype = is_pckmo_keytype,
.key_to_protkey = pkey_pckmo_key2protkey,
.gen_key = pkey_pckmo_gen_key,
.verify_key = pkey_pckmo_verifykey,
};
/*
* Module init
*/
static int __init pkey_pckmo_init(void)
{
cpacf_mask_t func_mask;
/*
* The pckmo instruction should be available - even if we don't
* actually invoke it. This instruction comes with MSA 3 which
* is also the minimum level for the kmc instructions which
* are able to work with protected keys.
*/
if (!cpacf_query(CPACF_PCKMO, &func_mask))
return -ENODEV;
/* register this module as pkey handler for all the pckmo stuff */
return pkey_handler_register(&pckmo_handler);
}
/*
* Module exit
*/
static void __exit pkey_pckmo_exit(void)
{
/* unregister this module as pkey handler */
pkey_handler_unregister(&pckmo_handler);
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_pckmo_init);
module_exit(pkey_pckmo_exit);

View File

@ -0,0 +1,648 @@
// SPDX-License-Identifier: GPL-2.0
/*
* pkey module sysfs related functions
*
* Copyright IBM Corp. 2024
*/
#define KMSG_COMPONENT "pkey"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/sysfs.h>
#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
#include "zcrypt_ep11misc.h"
#include "pkey_base.h"
/*
* Wrapper around pkey_handler_gen_key() which deals with the
* ENODEV return code and then tries to enforce a pkey handler
* module load.
*/
static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
{
int rc;
rc = pkey_handler_gen_key(NULL, 0,
keytype, keysubtype,
keybitsize, flags,
keybuf, keybuflen, keyinfo);
if (rc == -ENODEV) {
pkey_handler_request_modules();
rc = pkey_handler_gen_key(NULL, 0,
keytype, keysubtype,
keybitsize, flags,
keybuf, keybuflen, keyinfo);
}
return rc;
}
/*
* Sysfs attribute read function for all protected key binary attributes.
* The implementation can not deal with partial reads, because a new random
* protected key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
*/
static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
loff_t off, size_t count)
{
struct protaeskeytoken protkeytoken;
struct pkey_protkey protkey;
int rc;
if (off != 0 || count < sizeof(protkeytoken))
return -EINVAL;
if (is_xts)
if (count < 2 * sizeof(protkeytoken))
return -EINVAL;
memset(&protkeytoken, 0, sizeof(protkeytoken));
protkeytoken.type = TOKTYPE_NON_CCA;
protkeytoken.version = TOKVER_PROTECTED_KEY;
protkeytoken.keytype = keytype;
protkey.len = sizeof(protkey.protkey);
rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
protkey.protkey, &protkey.len,
&protkey.type);
if (rc)
return rc;
protkeytoken.len = protkey.len;
memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
memcpy(buf, &protkeytoken, sizeof(protkeytoken));
if (is_xts) {
/* xts needs a second protected key, reuse protkey struct */
protkey.len = sizeof(protkey.protkey);
rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
protkey.protkey, &protkey.len,
&protkey.type);
if (rc)
return rc;
protkeytoken.len = protkey.len;
memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
memcpy(buf + sizeof(protkeytoken), &protkeytoken,
sizeof(protkeytoken));
return 2 * sizeof(protkeytoken);
}
return sizeof(protkeytoken);
}
/*
* Sysfs attribute read function for the AES XTS prot key binary attributes.
* The implementation can not deal with partial reads, because a new random
* protected key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
*/
static ssize_t pkey_protkey_aes_xts_attr_read(u32 keytype, char *buf,
loff_t off, size_t count)
{
struct protkeytoken *t = (struct protkeytoken *)buf;
u32 protlen, prottype;
int rc;
switch (keytype) {
case PKEY_KEYTYPE_AES_XTS_128:
protlen = 64;
break;
case PKEY_KEYTYPE_AES_XTS_256:
protlen = 96;
break;
default:
return -EINVAL;
}
if (off != 0 || count < sizeof(*t) + protlen)
return -EINVAL;
memset(t, 0, sizeof(*t) + protlen);
t->type = TOKTYPE_NON_CCA;
t->version = TOKVER_PROTECTED_KEY;
t->keytype = keytype;
rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
t->protkey, &protlen, &prottype);
if (rc)
return rc;
t->len = protlen;
return sizeof(*t) + protlen;
}
/*
* Sysfs attribute read function for the HMAC prot key binary attributes.
* The implementation can not deal with partial reads, because a new random
* protected key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
*/
static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf,
loff_t off, size_t count)
{
struct protkeytoken *t = (struct protkeytoken *)buf;
u32 protlen, prottype;
int rc;
switch (keytype) {
case PKEY_KEYTYPE_HMAC_512:
protlen = 96;
break;
case PKEY_KEYTYPE_HMAC_1024:
protlen = 160;
break;
default:
return -EINVAL;
}
if (off != 0 || count < sizeof(*t) + protlen)
return -EINVAL;
memset(t, 0, sizeof(*t) + protlen);
t->type = TOKTYPE_NON_CCA;
t->version = TOKVER_PROTECTED_KEY;
t->keytype = keytype;
rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
t->protkey, &protlen, &prottype);
if (rc)
return rc;
t->len = protlen;
return sizeof(*t) + protlen;
}
static ssize_t protkey_aes_128_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
off, count);
}
static ssize_t protkey_aes_192_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
off, count);
}
static ssize_t protkey_aes_256_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
off, count);
}
static ssize_t protkey_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
off, count);
}
static ssize_t protkey_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
off, count);
}
static ssize_t protkey_aes_xts_128_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_128,
buf, off, count);
}
static ssize_t protkey_aes_xts_256_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_256,
buf, off, count);
}
static ssize_t protkey_hmac_512_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_512,
buf, off, count);
}
static ssize_t protkey_hmac_1024_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_1024,
buf, off, count);
}
static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
static BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
static BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
static BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
static BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
static struct bin_attribute *protkey_attrs[] = {
&bin_attr_protkey_aes_128,
&bin_attr_protkey_aes_192,
&bin_attr_protkey_aes_256,
&bin_attr_protkey_aes_128_xts,
&bin_attr_protkey_aes_256_xts,
&bin_attr_protkey_aes_xts_128,
&bin_attr_protkey_aes_xts_256,
&bin_attr_protkey_hmac_512,
&bin_attr_protkey_hmac_1024,
NULL
};
static struct attribute_group protkey_attr_group = {
.name = "protkey",
.bin_attrs = protkey_attrs,
};
/*
* Sysfs attribute read function for all secure key ccadata binary attributes.
* The implementation can not deal with partial reads, because a new random
* protected key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
*/
static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
loff_t off, size_t count)
{
struct pkey_seckey *seckey = (struct pkey_seckey *)buf;
u32 buflen;
int rc;
if (off != 0 || count < sizeof(struct secaeskeytoken))
return -EINVAL;
if (is_xts)
if (count < 2 * sizeof(struct secaeskeytoken))
return -EINVAL;
buflen = sizeof(seckey->seckey);
rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0,
seckey->seckey, &buflen, NULL);
if (rc)
return rc;
if (is_xts) {
seckey++;
buflen = sizeof(seckey->seckey);
rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0,
seckey->seckey, &buflen, NULL);
if (rc)
return rc;
return 2 * sizeof(struct secaeskeytoken);
}
return sizeof(struct secaeskeytoken);
}
static ssize_t ccadata_aes_128_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
off, count);
}
static ssize_t ccadata_aes_192_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
off, count);
}
static ssize_t ccadata_aes_256_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
off, count);
}
static ssize_t ccadata_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
off, count);
}
static ssize_t ccadata_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
off, count);
}
static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
static struct bin_attribute *ccadata_attrs[] = {
&bin_attr_ccadata_aes_128,
&bin_attr_ccadata_aes_192,
&bin_attr_ccadata_aes_256,
&bin_attr_ccadata_aes_128_xts,
&bin_attr_ccadata_aes_256_xts,
NULL
};
static struct attribute_group ccadata_attr_group = {
.name = "ccadata",
.bin_attrs = ccadata_attrs,
};
#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
/*
* Sysfs attribute read function for all secure key ccacipher binary attributes.
* The implementation can not deal with partial reads, because a new random
* secure key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
*/
static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
bool is_xts, char *buf, loff_t off,
size_t count)
{
u32 keysize = CCACIPHERTOKENSIZE;
int rc;
if (off != 0 || count < CCACIPHERTOKENSIZE)
return -EINVAL;
if (is_xts)
if (count < 2 * CCACIPHERTOKENSIZE)
return -EINVAL;
memset(buf, 0, is_xts ? 2 * keysize : keysize);
rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits),
PKEY_TYPE_CCA_CIPHER, keybits, 0,
buf, &keysize, NULL);
if (rc)
return rc;
if (is_xts) {
keysize = CCACIPHERTOKENSIZE;
buf += CCACIPHERTOKENSIZE;
rc = sys_pkey_handler_gen_key(
pkey_aes_bitsize_to_keytype(keybits),
PKEY_TYPE_CCA_CIPHER, keybits, 0,
buf, &keysize, NULL);
if (rc)
return rc;
return 2 * CCACIPHERTOKENSIZE;
}
return CCACIPHERTOKENSIZE;
}
static ssize_t ccacipher_aes_128_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
off, count);
}
static ssize_t ccacipher_aes_192_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
off, count);
}
static ssize_t ccacipher_aes_256_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
off, count);
}
static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
off, count);
}
static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
off, count);
}
static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
static struct bin_attribute *ccacipher_attrs[] = {
&bin_attr_ccacipher_aes_128,
&bin_attr_ccacipher_aes_192,
&bin_attr_ccacipher_aes_256,
&bin_attr_ccacipher_aes_128_xts,
&bin_attr_ccacipher_aes_256_xts,
NULL
};
static struct attribute_group ccacipher_attr_group = {
.name = "ccacipher",
.bin_attrs = ccacipher_attrs,
};
/*
* Sysfs attribute read function for all ep11 aes key binary attributes.
* The implementation can not deal with partial reads, because a new random
* secure key blob is generated with each read. In case of partial reads
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
* This function and the sysfs attributes using it provide EP11 key blobs
* padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
* 336 bytes.
*/
static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
bool is_xts, char *buf, loff_t off,
size_t count)
{
u32 keysize = MAXEP11AESKEYBLOBSIZE;
int rc;
if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
return -EINVAL;
if (is_xts)
if (count < 2 * MAXEP11AESKEYBLOBSIZE)
return -EINVAL;
memset(buf, 0, is_xts ? 2 * keysize : keysize);
rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits),
PKEY_TYPE_EP11_AES, keybits, 0,
buf, &keysize, NULL);
if (rc)
return rc;
if (is_xts) {
keysize = MAXEP11AESKEYBLOBSIZE;
buf += MAXEP11AESKEYBLOBSIZE;
rc = sys_pkey_handler_gen_key(
pkey_aes_bitsize_to_keytype(keybits),
PKEY_TYPE_EP11_AES, keybits, 0,
buf, &keysize, NULL);
if (rc)
return rc;
return 2 * MAXEP11AESKEYBLOBSIZE;
}
return MAXEP11AESKEYBLOBSIZE;
}
static ssize_t ep11_aes_128_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
off, count);
}
static ssize_t ep11_aes_192_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
off, count);
}
static ssize_t ep11_aes_256_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
off, count);
}
static ssize_t ep11_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
off, count);
}
static ssize_t ep11_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
off, count);
}
static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
static struct bin_attribute *ep11_attrs[] = {
&bin_attr_ep11_aes_128,
&bin_attr_ep11_aes_192,
&bin_attr_ep11_aes_256,
&bin_attr_ep11_aes_128_xts,
&bin_attr_ep11_aes_256_xts,
NULL
};
static struct attribute_group ep11_attr_group = {
.name = "ep11",
.bin_attrs = ep11_attrs,
};
const struct attribute_group *pkey_attr_groups[] = {
&protkey_attr_group,
&ccadata_attr_group,
&ccacipher_attr_group,
&ep11_attr_group,
NULL,
};

View File

@ -715,7 +715,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
pr_debug("%s no matching queue found => ENODEV\n", __func__);
pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@ -819,7 +819,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
pr_debug("%s no matching queue found => ENODEV\n", __func__);
pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@ -940,8 +940,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
pr_debug("%s no match for address %02x.%04x => ENODEV\n",
__func__, xcrb->user_defined, *domain);
pr_debug("no match for address %02x.%04x => ENODEV\n",
xcrb->user_defined, *domain);
rc = -ENODEV;
goto out;
}
@ -991,7 +991,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcrb)
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
pr_debug("%s rc=%d\n", __func__, rc);
pr_debug("rc=%d\n", rc);
return rc;
}
@ -1138,15 +1138,13 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (!pref_zq) {
if (targets && target_num == 1) {
pr_debug("%s no match for address %02x.%04x => ENODEV\n",
__func__, (int)targets->ap_id,
(int)targets->dom_id);
pr_debug("no match for address %02x.%04x => ENODEV\n",
(int)targets->ap_id, (int)targets->dom_id);
} else if (targets) {
pr_debug("%s no match for %d target addrs => ENODEV\n",
__func__, (int)target_num);
pr_debug("no match for %d target addrs => ENODEV\n",
(int)target_num);
} else {
pr_debug("%s no match for address ff.ffff => ENODEV\n",
__func__);
pr_debug("no match for address ff.ffff => ENODEV\n");
}
rc = -ENODEV;
goto out_free;
@ -1195,7 +1193,7 @@ long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
pr_debug("%s rc=%d\n", __func__, rc);
pr_debug("rc=%d\n", rc);
return rc;
}
@ -1247,7 +1245,7 @@ static long zcrypt_rng(char *buffer)
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
pr_debug("%s no matching queue found => ENODEV\n", __func__);
pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@ -2037,8 +2035,7 @@ int zcrypt_wait_api_operational(void)
break;
default:
/* other failure */
pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n",
__func__, rc);
pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc);
break;
}
break;

View File

@ -172,7 +172,7 @@ EXPORT_SYMBOL(cca_check_secaescipherkey);
* key token. Returns 0 on success or errno value on failure.
*/
int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
const u8 *token, size_t keysize,
const u8 *token, u32 keysize,
int checkcpacfexport)
{
struct eccprivkeytoken *t = (struct eccprivkeytoken *)token;
@ -187,7 +187,7 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
}
if (t->len > keysize) {
if (dbg)
DBF("%s token check failed, len %d > keysize %zu\n",
DBF("%s token check failed, len %d > keysize %u\n",
__func__, (int)t->len, keysize);
return -EINVAL;
}
@ -737,7 +737,7 @@ static const u8 aes_cipher_key_skeleton[] = {
* Generate (random) CCA AES CIPHER secure key.
*/
int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize)
u8 *keybuf, u32 *keybufsize)
{
int rc;
u8 *mem, *ptr;
@ -1085,7 +1085,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
* Build CCA AES CIPHER secure key with a given clear key value.
*/
int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
const u8 *clrkey, u8 *keybuf, u32 *keybufsize)
{
int rc;
u8 *token;

View File

@ -153,7 +153,7 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
* key token. Returns 0 on success or errno value on failure.
*/
int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
const u8 *token, size_t keysize,
const u8 *token, u32 keysize,
int checkcpacfexport);
/*
@ -178,7 +178,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
* Generate (random) CCA AES CIPHER secure key.
*/
int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize);
u8 *keybuf, u32 *keybufsize);
/*
* Derive proteced key from CCA AES cipher secure key.
@ -190,7 +190,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
* Build CCA AES CIPHER secure key with a given clear key value.
*/
int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
const u8 *clrkey, u8 *keybuf, u32 *keybufsize);
/*
* Derive proteced key from CCA ECC secure private key.

View File

@ -203,7 +203,7 @@ static int ep11_kb_decode(const u8 *kb, size_t kblen,
* For valid ep11 keyblobs, returns a reference to the wrappingkey verification
* pattern. Otherwise NULL.
*/
const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen)
const u8 *ep11_kb_wkvp(const u8 *keyblob, u32 keybloblen)
{
struct ep11keyblob *kb;
@ -217,7 +217,7 @@ EXPORT_SYMBOL(ep11_kb_wkvp);
* Simple check if the key blob is a valid EP11 AES key blob with header.
*/
int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp)
const u8 *key, u32 keylen, int checkcpacfexp)
{
struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
@ -225,7 +225,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*hdr) + sizeof(*kb)) {
DBF("%s key check failed, keylen %zu < %zu\n",
DBF("%s key check failed, keylen %u < %zu\n",
__func__, keylen, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
@ -250,7 +250,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
}
if (hdr->len > keylen) {
if (dbg)
DBF("%s key check failed, header len %d keylen %zu mismatch\n",
DBF("%s key check failed, header len %d keylen %u mismatch\n",
__func__, (int)hdr->len, keylen);
return -EINVAL;
}
@ -284,7 +284,7 @@ EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
* Simple check if the key blob is a valid EP11 ECC key blob with header.
*/
int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp)
const u8 *key, u32 keylen, int checkcpacfexp)
{
struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
@ -292,7 +292,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*hdr) + sizeof(*kb)) {
DBF("%s key check failed, keylen %zu < %zu\n",
DBF("%s key check failed, keylen %u < %zu\n",
__func__, keylen, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
@ -317,7 +317,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
}
if (hdr->len > keylen) {
if (dbg)
DBF("%s key check failed, header len %d keylen %zu mismatch\n",
DBF("%s key check failed, header len %d keylen %u mismatch\n",
__func__, (int)hdr->len, keylen);
return -EINVAL;
}
@ -352,14 +352,14 @@ EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
* the header in the session field (old style EP11 AES key).
*/
int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp)
const u8 *key, u32 keylen, int checkcpacfexp)
{
struct ep11keyblob *kb = (struct ep11keyblob *)key;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*kb)) {
DBF("%s key check failed, keylen %zu < %zu\n",
DBF("%s key check failed, keylen %u < %zu\n",
__func__, keylen, sizeof(*kb));
return -EINVAL;
}
@ -378,7 +378,7 @@ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
}
if (kb->head.len > keylen) {
if (dbg)
DBF("%s key check failed, header len %d keylen %zu mismatch\n",
DBF("%s key check failed, header len %d keylen %u mismatch\n",
__func__, (int)kb->head.len, keylen);
return -EINVAL;
}
@ -932,7 +932,7 @@ static int _ep11_genaeskey(u16 card, u16 domain,
}
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize, u32 keybufver)
u8 *keybuf, u32 *keybufsize, u32 keybufver)
{
struct ep11kblob_header *hdr;
size_t hdr_size, pl_size;
@ -1256,7 +1256,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
const u8 *enckey, size_t enckeysize,
u32 mech, const u8 *iv,
u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize,
u8 *keybuf, u32 *keybufsize,
u8 keybufver)
{
struct ep11kblob_header *hdr;
@ -1412,7 +1412,7 @@ static int _ep11_wrapkey(u16 card, u16 domain,
}
int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
u32 keytype)
{
int rc;
@ -1471,7 +1471,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
EXPORT_SYMBOL(ep11_clr2keyblob);
int ep11_kblob2protkey(u16 card, u16 dom,
const u8 *keyblob, size_t keybloblen,
const u8 *keyblob, u32 keybloblen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct ep11kblob_header *hdr;

View File

@ -54,7 +54,7 @@ static inline bool is_ep11_keyblob(const u8 *key)
* For valid ep11 keyblobs, returns a reference to the wrappingkey verification
* pattern. Otherwise NULL.
*/
const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen);
const u8 *ep11_kb_wkvp(const u8 *kblob, u32 kbloblen);
/*
* Simple check if the key blob is a valid EP11 AES key blob with header.
@ -63,7 +63,7 @@ const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen);
* Returns 0 on success or errno value on failure.
*/
int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp);
const u8 *key, u32 keylen, int checkcpacfexp);
/*
* Simple check if the key blob is a valid EP11 ECC key blob with header.
@ -72,7 +72,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
* Returns 0 on success or errno value on failure.
*/
int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp);
const u8 *key, u32 keylen, int checkcpacfexp);
/*
* Simple check if the key blob is a valid EP11 AES key blob with
@ -82,7 +82,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
* Returns 0 on success or errno value on failure.
*/
int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
const u8 *key, size_t keylen, int checkcpacfexp);
const u8 *key, u32 keylen, int checkcpacfexp);
/* EP11 card info struct */
struct ep11_card_info {
@ -115,13 +115,13 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
* Generate (random) EP11 AES secure key.
*/
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize, u32 keybufver);
u8 *keybuf, u32 *keybufsize, u32 keybufver);
/*
* Generate EP11 AES secure key with given clear key value.
*/
int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
u32 keytype);
/*
@ -149,7 +149,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
/*
* Derive proteced key from EP11 key blob (AES and ECC keys).
*/
int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype);
void zcrypt_ep11misc_exit(void);

View File

@ -427,7 +427,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq,
len = t80h->len;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
pr_debug("%s len mismatch => EMSGSIZE\n", __func__);
pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@ -487,8 +487,8 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
pr_debug("%s send me cprb at dev=%02x.%04x rc=%d\n",
__func__, AP_QID_CARD(zq->queue->qid),
pr_debug("send me cprb at dev=%02x.%04x rc=%d\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@ -537,8 +537,8 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
pr_debug("%s send crt cprb at dev=%02x.%04x rc=%d\n",
__func__, AP_QID_CARD(zq->queue->qid),
pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}

View File

@ -437,9 +437,8 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
break;
default:
pr_debug("%s unknown CPRB minor version '%c%c'\n",
__func__, msg->cprbx.func_id[0],
msg->cprbx.func_id[1]);
pr_debug("unknown CPRB minor version '%c%c'\n",
msg->cprbx.func_id[0], msg->cprbx.func_id[1]);
}
/* copy data block */
@ -629,9 +628,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy CPRB to user */
if (xcrb->reply_control_blk_length < msg->fmt2.count1) {
pr_debug("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
__func__, xcrb->reply_control_blk_length,
msg->fmt2.count1);
pr_debug("reply_control_blk_length %u < required %u => EMSGSIZE\n",
xcrb->reply_control_blk_length, msg->fmt2.count1);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr,
@ -642,9 +640,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy data buffer to user */
if (msg->fmt2.count2) {
if (xcrb->reply_data_length < msg->fmt2.count2) {
pr_debug("%s reply_data_length %u < required %u => EMSGSIZE\n",
__func__, xcrb->reply_data_length,
msg->fmt2.count2);
pr_debug("reply_data_length %u < required %u => EMSGSIZE\n",
xcrb->reply_data_length, msg->fmt2.count2);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_data_addr,
@ -673,9 +670,8 @@ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
char *data = reply->msg;
if (xcrb->resp_len < msg->fmt2.count1) {
pr_debug("%s resp_len %u < required %u => EMSGSIZE\n",
__func__, (unsigned int)xcrb->resp_len,
msg->fmt2.count1);
pr_debug("resp_len %u < required %u => EMSGSIZE\n",
(unsigned int)xcrb->resp_len, msg->fmt2.count1);
return -EMSGSIZE;
}
@ -875,8 +871,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = sizeof(struct type86x_reply) + t86r->length;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
pr_debug("%s len mismatch => EMSGSIZE\n",
__func__);
pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@ -890,8 +885,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
pr_debug("%s len mismatch => EMSGSIZE\n",
__func__);
pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@ -941,8 +935,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
pr_debug("%s len mismatch => EMSGSIZE\n",
__func__);
pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@ -1154,8 +1147,8 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
out:
if (rc)
pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
__func__, AP_QID_CARD(zq->queue->qid),
pr_debug("send cprb at dev=%02x.%04x rc=%d\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@ -1277,8 +1270,8 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
out:
if (rc)
pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
__func__, AP_QID_CARD(zq->queue->qid),
pr_debug("send cprb at dev=%02x.%04x rc=%d\n",
AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}