Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

- Optimize ftrace and kprobes code patching and avoid stop machine for
kprobes if sequential instruction fetching facility is available

- Add hiperdispatch feature to dynamically adjust CPU capacity in
vertical polarization to improve scheduling efficiency and overall
performance. Also add infrastructure for handling warning track
interrupts (WTI), allowing for graceful CPU preemption

- Rework crypto code pkey module and split it into separate,
independent modules for sysfs, PCKMO, CCA, and EP11, allowing modules
to load only when the relevant hardware is available

- Add hardware acceleration for HMAC modes and the full AES-XTS cipher,
utilizing message-security assist extensions (MSA) 10 and 11. It
introduces new shash implementations for HMAC-SHA224/256/384/512 and
registers the hardware-accelerated AES-XTS cipher as the preferred
option. Also add clear key token support

- Add MSA 10 and 11 processor activity instrumentation counters to perf
and update PAI Extension 1 NNPA counters

- Cleanup cpu sampling facility code and rework debug/WARN_ON_ONCE
statements

- Add support for SHA3 performance enhancements introduced with MSA 12

- Add support for the query authentication information feature of MSA
13 and introduce the KDSA CPACF instruction. Provide query and query
authentication information in sysfs, enabling tools like cpacfinfo to
present this data in a human-readable form

- Update kernel disassembler instructions

- Always enable EXPOLINE_EXTERN if supported by the compiler to ensure
kpatch compatibility

- Add missing warning handling and relocated lowcore support to the
early program check handler

- Optimize ftrace_return_address() and avoid calling unwinder

- Make modules use kernel ftrace trampolines

- Strip relocs from the final vmlinux ELF file to make it roughly 2
times smaller

- Dump register contents and call trace for early crashes to the
console

- Generate ptdump address marker array dynamically

- Fix rcu_sched stalls that might occur when adding or removing large
amounts of pages at once to or from the CMM balloon

- Fix deadlock caused by recursive lock of the AP bus scan mutex

- Unify sync and async register save areas in entry code

- Cleanup debug prints in crypto code

- Various cleanup and sanitizing patches for the decompressor

- Various small ftrace cleanups

* tag 's390-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (84 commits)
s390/crypto: Display Query and Query Authentication Information in sysfs
s390/crypto: Add Support for Query Authentication Information
s390/crypto: Rework RRE and RRF CPACF inline functions
s390/crypto: Add KDSA CPACF Instruction
s390/disassembler: Remove duplicate instruction format RSY_RDRU
s390/boot: Move boot_printk() code to own file
s390/boot: Use boot_printk() instead of sclp_early_printk()
s390/boot: Rename decompressor_printk() to boot_printk()
s390/boot: Compile all files with the same march flag
s390: Use MARCH_HAS_*_FEATURES defines
s390: Provide MARCH_HAS_*_FEATURES defines
s390/facility: Disable compile time optimization for decompressor code
s390/boot: Increase minimum architecture to z10
s390/als: Remove obsolete comment
s390/sha3: Fix SHA3 selftests failures
s390/pkey: Add AES xts and HMAC clear key token support
s390/cpacf: Add MSA 10 and 11 new PCKMO functions
s390/mm: Add cond_resched() to cmm_alloc/free_pages()
s390/pai_ext: Update PAI extension 1 counters
s390/pai_crypto: Add support for MSA 10 and 11 pai counters
...

+6269 -3231
+26 -10
arch/s390/Kconfig
··· 514 514 making when dealing with machines that have multi-threading, 515 515 multiple cores or multiple books. 516 516 517 + config SCHED_TOPOLOGY_VERTICAL 518 + def_bool y 519 + bool "Use vertical CPU polarization by default" 520 + depends on SCHED_TOPOLOGY 521 + help 522 + Use vertical CPU polarization by default if available. 523 + The default CPU polarization is horizontal. 524 + 525 + config HIPERDISPATCH_ON 526 + def_bool y 527 + bool "Use hiperdispatch on vertical polarization by default" 528 + depends on SCHED_TOPOLOGY 529 + depends on PROC_SYSCTL 530 + help 531 + Hiperdispatch aims to improve the CPU scheduler's decision 532 + making when using vertical polarization by adjusting CPU 533 + capacities dynamically. Set this option to use hiperdispatch 534 + on vertical polarization by default. This can be overwritten 535 + by sysctl's s390.hiperdispatch attribute later on. 536 + 517 537 source "kernel/Kconfig.hz" 518 538 519 539 config CERT_STORE ··· 578 558 If unsure, say N. 579 559 580 560 config EXPOLINE_EXTERN 581 - def_bool y if EXPOLINE 582 - depends on EXPOLINE 583 - depends on CC_IS_GCC && GCC_VERSION >= 110200 584 - depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC)) 585 - prompt "Generate expolines as extern functions." 561 + def_bool EXPOLINE && CC_IS_GCC && GCC_VERSION >= 110200 && \ 562 + $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC)) 586 563 help 587 - This option is required for some tooling like kpatch. The kernel is 588 - compiled with -mindirect-branch=thunk-extern and requires a newer 589 - compiler. 590 - 591 - If unsure, say N. 564 + Generate expolines as external functions if the compiler supports it. 565 + This option is required for some tooling like kpatch, if expolines 566 + are enabled. The kernel is compiled with 567 + -mindirect-branch=thunk-extern, which requires a newer compiler. 592 568 593 569 choice 594 570 prompt "Expoline default"
+10 -24
arch/s390/boot/Makefile
··· 11 11 KCSAN_SANITIZE := n 12 12 KMSAN_SANITIZE := n 13 13 14 - KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR) 15 - KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR) 16 - 17 14 # 18 - # Use minimum architecture for als.c to be able to print an error 15 + # Use minimum architecture level so it is possible to print an error 19 16 # message if the kernel is started on a machine which is too old 20 17 # 21 - ifndef CONFIG_CC_IS_CLANG 22 - CC_FLAGS_MARCH_MINIMUM := -march=z900 23 - else 24 18 CC_FLAGS_MARCH_MINIMUM := -march=z10 25 - endif 26 19 27 - ifneq ($(CC_FLAGS_MARCH),$(CC_FLAGS_MARCH_MINIMUM)) 28 - AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH) 29 - AFLAGS_head.o += $(CC_FLAGS_MARCH_MINIMUM) 30 - AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH) 31 - AFLAGS_mem.o += $(CC_FLAGS_MARCH_MINIMUM) 32 - CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH) 33 - CFLAGS_als.o += $(CC_FLAGS_MARCH_MINIMUM) 34 - CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH) 35 - CFLAGS_sclp_early_core.o += $(CC_FLAGS_MARCH_MINIMUM) 36 - endif 20 + KBUILD_AFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_AFLAGS_DECOMPRESSOR)) 21 + KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_CFLAGS_DECOMPRESSOR)) 22 + KBUILD_AFLAGS += $(CC_FLAGS_MARCH_MINIMUM) 23 + KBUILD_CFLAGS += $(CC_FLAGS_MARCH_MINIMUM) 37 24 38 25 CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char 39 26 40 27 obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o 41 28 obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o 42 - obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o uv.o 29 + obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o 30 + obj-y += uv.o printk.o 43 31 obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 44 32 obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o 45 33 obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o ··· 97 109 $(obj)/vmlinux.bin: vmlinux FORCE 98 110 $(call if_changed,objcopy) 99 111 100 - CMD_RELOCS=arch/s390/tools/relocs 101 - quiet_cmd_relocs = RELOCS $@ 102 - cmd_relocs = $(CMD_RELOCS) $< > $@ 103 - $(obj)/relocs.S: vmlinux FORCE 104 - $(call if_changed,relocs) 112 + # relocs.S is created by the vmlinux postlink step. 113 + $(obj)/relocs.S: vmlinux 114 + @true 105 115 106 116 suffix-$(CONFIG_KERNEL_GZIP) := .gz 107 117 suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
+8 -41
arch/s390/boot/als.c
··· 9 9 #include <asm/sclp.h> 10 10 #include "boot.h" 11 11 12 - /* 13 - * The code within this file will be called very early. It may _not_ 14 - * access anything within the bss section, since that is not cleared 15 - * yet and may contain data (e.g. initrd) that must be saved by other 16 - * code. 17 - * For temporary objects the stack (16k) should be used. 18 - */ 19 - 20 12 static unsigned long als[] = { FACILITIES_ALS }; 21 - 22 - static void u16_to_hex(char *str, u16 val) 23 - { 24 - int i, num; 25 - 26 - for (i = 1; i <= 4; i++) { 27 - num = (val >> (16 - 4 * i)) & 0xf; 28 - if (num >= 10) 29 - num += 7; 30 - *str++ = '0' + num; 31 - } 32 - *str = '\0'; 33 - } 34 - 35 - static void print_machine_type(void) 36 - { 37 - static char mach_str[80] = "Detected machine-type number: "; 38 - char type_str[5]; 39 - struct cpuid id; 40 - 41 - get_cpu_id(&id); 42 - u16_to_hex(type_str, id.machine); 43 - strcat(mach_str, type_str); 44 - strcat(mach_str, "\n"); 45 - sclp_early_printk(mach_str); 46 - } 47 13 48 14 static void u16_to_decimal(char *str, u16 val) 49 15 { ··· 46 80 * z/VM adds a four character prefix. 47 81 */ 48 82 if (strlen(als_str) > 70) { 49 - strcat(als_str, "\n"); 50 - sclp_early_printk(als_str); 83 + boot_printk("%s\n", als_str); 51 84 *als_str = '\0'; 52 85 } 53 86 u16_to_decimal(val_str, i * BITS_PER_LONG + j); ··· 54 89 first = 0; 55 90 } 56 91 } 57 - strcat(als_str, "\n"); 58 - sclp_early_printk(als_str); 92 + boot_printk("%s\n", als_str); 59 93 } 60 94 61 95 static void facility_mismatch(void) 62 96 { 63 - sclp_early_printk("The Linux kernel requires more recent processor hardware\n"); 64 - print_machine_type(); 97 + struct cpuid id; 98 + 99 + get_cpu_id(&id); 100 + boot_printk("The Linux kernel requires more recent processor hardware\n"); 101 + boot_printk("Detected machine-type number: %4x\n", id.machine); 65 102 print_missing_facilities(); 66 - sclp_early_printk("See Principles of Operations for facility bits\n"); 103 + boot_printk("See Principles of Operations for facility bits\n"); 67 104 disabled_wait(); 68 105 } 69 106
+1 -1
arch/s390/boot/boot.h
··· 70 70 unsigned long randomize_within_range(unsigned long size, unsigned long align, 71 71 unsigned long min, unsigned long max); 72 72 void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit); 73 - void __printf(1, 2) decompressor_printk(const char *fmt, ...); 73 + void __printf(1, 2) boot_printk(const char *fmt, ...); 74 74 void print_stacktrace(unsigned long sp); 75 75 void error(char *m); 76 76 int get_random(unsigned long limit, unsigned long *value);
+2 -2
arch/s390/boot/head.S
··· 299 299 # the save area and does disabled wait with a faulty address. 300 300 # 301 301 SYM_CODE_START_LOCAL(startup_pgm_check_handler) 302 - stmg %r8,%r15,__LC_SAVE_AREA_SYNC 302 + stmg %r8,%r15,__LC_SAVE_AREA 303 303 la %r8,4095 304 304 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8) 305 305 stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8) 306 - mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC 306 + mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA 307 307 mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW 308 308 mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW 309 309 ni __LC_RETURN_PSW,0xfc # remove IO and EX bits
+1 -1
arch/s390/boot/ipl_parm.c
··· 215 215 216 216 for (i = 0; i < ARRAY_SIZE(als); i++) { 217 217 if ((stfle_fac_list[i] & als[i]) != als[i]) { 218 - sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n"); 218 + boot_printk("Warning: The Linux kernel requires facilities cleared via command line option\n"); 219 219 print_missing_facilities(); 220 220 break; 221 221 }
+1 -1
arch/s390/boot/kaslr.c
··· 32 32 static int check_prng(void) 33 33 { 34 34 if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) { 35 - sclp_early_printk("KASLR disabled: CPU has no PRNG\n"); 35 + boot_printk("KASLR disabled: CPU has no PRNG\n"); 36 36 return 0; 37 37 } 38 38 if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
+22 -138
arch/s390/boot/pgm_check_info.c
··· 11 11 #include <asm/uv.h> 12 12 #include "boot.h" 13 13 14 - const char hex_asc[] = "0123456789abcdef"; 15 - 16 - static char *as_hex(char *dst, unsigned long val, int pad) 17 - { 18 - char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1); 19 - 20 - for (*p-- = 0; p >= dst; val >>= 4) 21 - *p-- = hex_asc[val & 0x0f]; 22 - return end; 23 - } 24 - 25 - static char *symstart(char *p) 26 - { 27 - while (*p) 28 - p--; 29 - return p + 1; 30 - } 31 - 32 - static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len) 33 - { 34 - /* symbol entries are in a form "10000 c4 startup\0" */ 35 - char *a = _decompressor_syms_start; 36 - char *b = _decompressor_syms_end; 37 - unsigned long start; 38 - unsigned long size; 39 - char *pivot; 40 - char *endp; 41 - 42 - while (a < b) { 43 - pivot = symstart(a + (b - a) / 2); 44 - start = simple_strtoull(pivot, &endp, 16); 45 - size = simple_strtoull(endp + 1, &endp, 16); 46 - if (ip < start) { 47 - b = pivot; 48 - continue; 49 - } 50 - if (ip > start + size) { 51 - a = pivot + strlen(pivot) + 1; 52 - continue; 53 - } 54 - *off = ip - start; 55 - *len = size; 56 - return endp + 1; 57 - } 58 - return NULL; 59 - } 60 - 61 - static noinline char *strsym(void *ip) 62 - { 63 - static char buf[64]; 64 - unsigned short off; 65 - unsigned short len; 66 - char *p; 67 - 68 - p = findsym((unsigned long)ip, &off, &len); 69 - if (p) { 70 - strncpy(buf, p, sizeof(buf)); 71 - /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */ 72 - p = buf + strnlen(buf, sizeof(buf) - 15); 73 - strcpy(p, "+0x"); 74 - p = as_hex(p + 3, off, 0); 75 - strcpy(p, "/0x"); 76 - as_hex(p + 3, len, 0); 77 - } else { 78 - as_hex(buf, (unsigned long)ip, 16); 79 - } 80 - return buf; 81 - } 82 - 83 - void decompressor_printk(const char *fmt, ...) 84 - { 85 - char buf[1024] = { 0 }; 86 - char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */ 87 - unsigned long pad; 88 - char *p = buf; 89 - va_list args; 90 - 91 - va_start(args, fmt); 92 - for (; p < end && *fmt; fmt++) { 93 - if (*fmt != '%') { 94 - *p++ = *fmt; 95 - continue; 96 - } 97 - pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0; 98 - switch (*fmt) { 99 - case 's': 100 - p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf)); 101 - break; 102 - case 'p': 103 - if (*++fmt != 'S') 104 - goto out; 105 - p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf)); 106 - break; 107 - case 'l': 108 - if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad)) 109 - goto out; 110 - p = as_hex(p, va_arg(args, unsigned long), pad); 111 - break; 112 - case 'x': 113 - if (end - p <= max(sizeof(int) * 2, pad)) 114 - goto out; 115 - p = as_hex(p, va_arg(args, unsigned int), pad); 116 - break; 117 - default: 118 - goto out; 119 - } 120 - } 121 - out: 122 - va_end(args); 123 - sclp_early_printk(buf); 124 - } 125 - 126 14 void print_stacktrace(unsigned long sp) 127 15 { 128 16 struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start, 129 17 (unsigned long)_stack_end }; 130 18 bool first = true; 131 19 132 - decompressor_printk("Call Trace:\n"); 20 + boot_printk("Call Trace:\n"); 133 21 while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) { 134 22 struct stack_frame *sf = (struct stack_frame *)sp; 135 23 136 - decompressor_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" : 137 - " sp:%016lx [<%016lx>] %pS\n", 138 - sp, sf->gprs[8], (void *)sf->gprs[8]); 24 + boot_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" : 25 + " sp:%016lx [<%016lx>] %pS\n", 26 + sp, sf->gprs[8], (void *)sf->gprs[8]); 139 27 if (sf->back_chain <= sp) 140 28 break; 141 29 sp = sf->back_chain; ··· 36 148 unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area; 37 149 struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area); 38 150 39 - decompressor_printk("Linux version %s\n", kernel_version); 151 + boot_printk("Linux version %s\n", kernel_version); 40 152 if (!is_prot_virt_guest() && early_command_line[0]) 41 - decompressor_printk("Kernel command line: %s\n", early_command_line); 42 - decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n", 43 - get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1); 153 + boot_printk("Kernel command line: %s\n", early_command_line); 154 + boot_printk("Kernel fault: interruption code %04x ilc:%x\n", 155 + get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1); 44 156 if (kaslr_enabled()) { 45 - decompressor_printk("Kernel random base: %lx\n", __kaslr_offset); 46 - decompressor_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys); 157 + boot_printk("Kernel random base: %lx\n", __kaslr_offset); 158 + boot_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys); 47 159 } 48 - decompressor_printk("PSW : %016lx %016lx (%pS)\n", 49 - get_lowcore()->psw_save_area.mask, 50 - get_lowcore()->psw_save_area.addr, 51 - (void *)get_lowcore()->psw_save_area.addr); 52 - decompressor_printk( 160 + boot_printk("PSW : %016lx %016lx (%pS)\n", 161 + get_lowcore()->psw_save_area.mask, 162 + get_lowcore()->psw_save_area.addr, 163 + (void *)get_lowcore()->psw_save_area.addr); 164 + boot_printk( 53 165 " R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n", 54 166 psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck, 55 167 psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, 56 168 psw->eaba); 57 - decompressor_printk("GPRS: %016lx %016lx %016lx %016lx\n", 58 - gpregs[0], gpregs[1], gpregs[2], gpregs[3]); 59 - decompressor_printk(" %016lx %016lx %016lx %016lx\n", 60 - gpregs[4], gpregs[5], gpregs[6], gpregs[7]); 61 - decompressor_printk(" %016lx %016lx %016lx %016lx\n", 62 - gpregs[8], gpregs[9], gpregs[10], gpregs[11]); 63 - decompressor_printk(" %016lx %016lx %016lx %016lx\n", 64 - gpregs[12], gpregs[13], gpregs[14], gpregs[15]); 169 + boot_printk("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]); 170 + boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]); 171 + boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]); 172 + boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]); 65 173 print_stacktrace(get_lowcore()->gpregs_save_area[15]); 66 - decompressor_printk("Last Breaking-Event-Address:\n"); 67 - decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break, 68 - (void *)get_lowcore()->pgm_last_break); 174 + boot_printk("Last Breaking-Event-Address:\n"); 175 + boot_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break, 176 + (void *)get_lowcore()->pgm_last_break); 69 177 }
+13 -13
arch/s390/boot/physmem_info.c
··· 190 190 enum reserved_range_type t; 191 191 int i; 192 192 193 - decompressor_printk("Linux version %s\n", kernel_version); 193 + boot_printk("Linux version %s\n", kernel_version); 194 194 if (!is_prot_virt_guest() && early_command_line[0]) 195 - decompressor_printk("Kernel command line: %s\n", early_command_line); 196 - decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n", 197 - size, align, min, max); 198 - decompressor_printk("Reserved memory ranges:\n"); 195 + boot_printk("Kernel command line: %s\n", early_command_line); 196 + boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n", 197 + size, align, min, max); 198 + boot_printk("Reserved memory ranges:\n"); 199 199 for_each_physmem_reserved_range(t, range, &start, &end) { 200 - decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t)); 200 + boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t)); 201 201 total_reserved_mem += end - start; 202 202 } 203 - decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n", 204 - get_physmem_info_source(), physmem_info.info_source); 203 + boot_printk("Usable online memory ranges (info source: %s [%x]):\n", 204 + get_physmem_info_source(), physmem_info.info_source); 205 205 for_each_physmem_usable_range(i, &start, &end) { 206 - decompressor_printk("%016lx %016lx\n", start, end); 206 + boot_printk("%016lx %016lx\n", start, end); 207 207 total_mem += end - start; 208 208 } 209 - decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n", 210 - total_mem, total_reserved_mem, 211 - total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0); 209 + boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n", 210 + total_mem, total_reserved_mem, 211 + total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0); 212 212 print_stacktrace(current_frame_address()); 213 - sclp_early_printk("\n\n -- System halted\n"); 213 + boot_printk("\n\n -- System halted\n"); 214 214 disabled_wait(); 215 215 } 216 216
+124
arch/s390/boot/printk.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/kernel.h> 3 + #include <linux/stdarg.h> 4 + #include <linux/string.h> 5 + #include <linux/ctype.h> 6 + #include <asm/stacktrace.h> 7 + #include <asm/boot_data.h> 8 + #include <asm/lowcore.h> 9 + #include <asm/setup.h> 10 + #include <asm/sclp.h> 11 + #include <asm/uv.h> 12 + #include "boot.h" 13 + 14 + const char hex_asc[] = "0123456789abcdef"; 15 + 16 + static char *as_hex(char *dst, unsigned long val, int pad) 17 + { 18 + char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1); 19 + 20 + for (*p-- = 0; p >= dst; val >>= 4) 21 + *p-- = hex_asc[val & 0x0f]; 22 + return end; 23 + } 24 + 25 + static char *symstart(char *p) 26 + { 27 + while (*p) 28 + p--; 29 + return p + 1; 30 + } 31 + 32 + static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len) 33 + { 34 + /* symbol entries are in a form "10000 c4 startup\0" */ 35 + char *a = _decompressor_syms_start; 36 + char *b = _decompressor_syms_end; 37 + unsigned long start; 38 + unsigned long size; 39 + char *pivot; 40 + char *endp; 41 + 42 + while (a < b) { 43 + pivot = symstart(a + (b - a) / 2); 44 + start = simple_strtoull(pivot, &endp, 16); 45 + size = simple_strtoull(endp + 1, &endp, 16); 46 + if (ip < start) { 47 + b = pivot; 48 + continue; 49 + } 50 + if (ip > start + size) { 51 + a = pivot + strlen(pivot) + 1; 52 + continue; 53 + } 54 + *off = ip - start; 55 + *len = size; 56 + return endp + 1; 57 + } 58 + return NULL; 59 + } 60 + 61 + static noinline char *strsym(void *ip) 62 + { 63 + static char buf[64]; 64 + unsigned short off; 65 + unsigned short len; 66 + char *p; 67 + 68 + p = findsym((unsigned long)ip, &off, &len); 69 + if (p) { 70 + strncpy(buf, p, sizeof(buf)); 71 + /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */ 72 + p = buf + strnlen(buf, sizeof(buf) - 15); 73 + strcpy(p, "+0x"); 74 + p = as_hex(p + 3, off, 0); 75 + strcpy(p, "/0x"); 76 + as_hex(p + 3, len, 0); 77 + } else { 78 + as_hex(buf, (unsigned long)ip, 16); 79 + } 80 + return buf; 81 + } 82 + 83 + void boot_printk(const char *fmt, ...) 84 + { 85 + char buf[1024] = { 0 }; 86 + char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */ 87 + unsigned long pad; 88 + char *p = buf; 89 + va_list args; 90 + 91 + va_start(args, fmt); 92 + for (; p < end && *fmt; fmt++) { 93 + if (*fmt != '%') { 94 + *p++ = *fmt; 95 + continue; 96 + } 97 + pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0; 98 + switch (*fmt) { 99 + case 's': 100 + p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf)); 101 + break; 102 + case 'p': 103 + if (*++fmt != 'S') 104 + goto out; 105 + p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf)); 106 + break; 107 + case 'l': 108 + if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad)) 109 + goto out; 110 + p = as_hex(p, va_arg(args, unsigned long), pad); 111 + break; 112 + case 'x': 113 + if (end - p <= max(sizeof(int) * 2, pad)) 114 + goto out; 115 + p = as_hex(p, va_arg(args, unsigned int), pad); 116 + break; 117 + default: 118 + goto out; 119 + } 120 + } 121 + out: 122 + va_end(args); 123 + sclp_early_printk(buf); 124 + }
+2 -5
arch/s390/boot/startup.c
··· 39 39 40 40 void error(char *x) 41 41 { 42 - sclp_early_printk("\n\n"); 43 - sclp_early_printk(x); 44 - sclp_early_printk("\n\n -- System halted"); 45 - 42 + boot_printk("\n\n%s\n\n -- System halted", x); 46 43 disabled_wait(); 47 44 } 48 45 ··· 293 296 kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); 294 297 } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { 295 298 kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); 296 - decompressor_printk("The kernel base address is forced to %lx\n", kernel_start); 299 + boot_printk("The kernel base address is forced to %lx\n", kernel_start); 297 300 } else { 298 301 kernel_start = __NO_KASLR_START_KERNEL; 299 302 }
+4
arch/s390/configs/debug_defconfig
··· 794 794 CONFIG_CRYPTO_AES_S390=m 795 795 CONFIG_CRYPTO_DES_S390=m 796 796 CONFIG_CRYPTO_CHACHA_S390=m 797 + CONFIG_CRYPTO_HMAC_S390=m 797 798 CONFIG_ZCRYPT=m 798 799 CONFIG_PKEY=m 800 + CONFIG_PKEY_CCA=m 801 + CONFIG_PKEY_EP11=m 802 + CONFIG_PKEY_PCKMO=m 799 803 CONFIG_CRYPTO_PAES_S390=m 800 804 CONFIG_CRYPTO_DEV_VIRTIO=m 801 805 CONFIG_SYSTEM_BLACKLIST_KEYRING=y
+4
arch/s390/configs/defconfig
··· 781 781 CONFIG_CRYPTO_AES_S390=m 782 782 CONFIG_CRYPTO_DES_S390=m 783 783 CONFIG_CRYPTO_CHACHA_S390=m 784 + CONFIG_CRYPTO_HMAC_S390=m 784 785 CONFIG_ZCRYPT=m 785 786 CONFIG_PKEY=m 787 + CONFIG_PKEY_CCA=m 788 + CONFIG_PKEY_EP11=m 789 + CONFIG_PKEY_PCKMO=m 786 790 CONFIG_CRYPTO_PAES_S390=m 787 791 CONFIG_CRYPTO_DEV_VIRTIO=m 788 792 CONFIG_SYSTEM_BLACKLIST_KEYRING=y
+10
arch/s390/crypto/Kconfig
··· 132 132 133 133 It is available as of z13. 134 134 135 + config CRYPTO_HMAC_S390 136 + tristate "Keyed-hash message authentication code: HMAC" 137 + depends on S390 138 + select CRYPTO_HASH 139 + help 140 + s390 specific HMAC hardware support for SHA224, SHA256, SHA384 and 141 + SHA512. 142 + 143 + Architecture: s390 144 + 135 145 endmenu
+1
arch/s390/crypto/Makefile
··· 15 15 obj-$(CONFIG_S390_PRNG) += prng.o 16 16 obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o 17 17 obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o 18 + obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o 18 19 obj-y += arch_random.o 19 20 20 21 crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o
+117 -3
arch/s390/crypto/aes_s390.c
··· 51 51 }; 52 52 53 53 struct s390_xts_ctx { 54 - u8 key[32]; 55 - u8 pcc_key[32]; 54 + union { 55 + u8 keys[64]; 56 + struct { 57 + u8 key[32]; 58 + u8 pcc_key[32]; 59 + }; 60 + }; 56 61 int key_len; 57 62 unsigned long fc; 58 63 struct crypto_skcipher *fallback; ··· 531 526 .decrypt = xts_aes_decrypt, 532 527 }; 533 528 529 + static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 530 + unsigned int key_len) 531 + { 532 + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 533 + unsigned long fc; 534 + int err; 535 + 536 + err = xts_fallback_setkey(tfm, in_key, key_len); 537 + if (err) 538 + return err; 539 + 540 + /* Pick the correct function code based on the key length */ 541 + fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL : 542 + (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0; 543 + 544 + /* Check if the function code is available */ 545 + xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 546 + if (!xts_ctx->fc) 547 + return 0; 548 + 549 + /* Store double-key */ 550 + memcpy(xts_ctx->keys, in_key, key_len); 551 + xts_ctx->key_len = key_len; 552 + return 0; 553 + } 554 + 555 + static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier) 556 + { 557 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 558 + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 559 + unsigned int offset, nbytes, n; 560 + struct skcipher_walk walk; 561 + int ret; 562 + struct { 563 + __u8 key[64]; 564 + __u8 tweak[16]; 565 + __u8 nap[16]; 566 + } fxts_param = { 567 + .nap = {0}, 568 + }; 569 + 570 + if (req->cryptlen < AES_BLOCK_SIZE) 571 + return -EINVAL; 572 + 573 + if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { 574 + struct skcipher_request *subreq = skcipher_request_ctx(req); 575 + 576 + *subreq = *req; 577 + skcipher_request_set_tfm(subreq, xts_ctx->fallback); 578 + return (modifier & CPACF_DECRYPT) ? 579 + crypto_skcipher_decrypt(subreq) : 580 + crypto_skcipher_encrypt(subreq); 581 + } 582 + 583 + ret = skcipher_walk_virt(&walk, req, false); 584 + if (ret) 585 + return ret; 586 + 587 + offset = xts_ctx->key_len & 0x20; 588 + memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len); 589 + memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE); 590 + fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */ 591 + 592 + while ((nbytes = walk.nbytes) != 0) { 593 + /* only use complete blocks */ 594 + n = nbytes & ~(AES_BLOCK_SIZE - 1); 595 + cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset, 596 + walk.dst.virt.addr, walk.src.virt.addr, n); 597 + ret = skcipher_walk_done(&walk, nbytes - n); 598 + } 599 + memzero_explicit(&fxts_param, sizeof(fxts_param)); 600 + return ret; 601 + } 602 + 603 + static int fullxts_aes_encrypt(struct skcipher_request *req) 604 + { 605 + return fullxts_aes_crypt(req, 0); 606 + } 607 + 608 + static int fullxts_aes_decrypt(struct skcipher_request *req) 609 + { 610 + return fullxts_aes_crypt(req, CPACF_DECRYPT); 611 + } 612 + 613 + static struct skcipher_alg fullxts_aes_alg = { 614 + .base.cra_name = "xts(aes)", 615 + .base.cra_driver_name = "full-xts-aes-s390", 616 + .base.cra_priority = 403, /* aes-xts-s390 + 1 */ 617 + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 618 + .base.cra_blocksize = AES_BLOCK_SIZE, 619 + .base.cra_ctxsize = sizeof(struct s390_xts_ctx), 620 + .base.cra_module = THIS_MODULE, 621 + .init = xts_fallback_init, 622 + .exit = xts_fallback_exit, 623 + .min_keysize = 2 * AES_MIN_KEY_SIZE, 624 + .max_keysize = 2 * AES_MAX_KEY_SIZE, 625 + .ivsize = AES_BLOCK_SIZE, 626 + .setkey = fullxts_aes_set_key, 627 + .encrypt = fullxts_aes_encrypt, 628 + .decrypt = fullxts_aes_decrypt, 629 + }; 630 + 534 631 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 535 632 unsigned int key_len) 536 633 { ··· 1062 955 }; 1063 956 1064 957 static struct crypto_alg *aes_s390_alg; 1065 - static struct skcipher_alg *aes_s390_skcipher_algs[4]; 958 + static struct skcipher_alg *aes_s390_skcipher_algs[5]; 1066 959 static int aes_s390_skciphers_num; 1067 960 static struct aead_alg *aes_s390_aead_alg; 1068 961 ··· 1115 1008 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || 1116 1009 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { 1117 1010 ret = aes_s390_register_skcipher(&cbc_aes_alg); 1011 + if (ret) 1012 + goto out_err; 1013 + } 1014 + 1015 + if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) || 1016 + cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) { 1017 + ret = aes_s390_register_skcipher(&fullxts_aes_alg); 1118 1018 if (ret) 1119 1019 goto out_err; 1120 1020 }
+359
arch/s390/crypto/hmac_s390.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright IBM Corp. 2024 4 + * 5 + * s390 specific HMAC support. 6 + */ 7 + 8 + #define KMSG_COMPONENT "hmac_s390" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <asm/cpacf.h> 12 + #include <crypto/sha2.h> 13 + #include <crypto/internal/hash.h> 14 + #include <linux/cpufeature.h> 15 + #include <linux/module.h> 16 + 17 + /* 18 + * KMAC param block layout for sha2 function codes: 19 + * The layout of the param block for the KMAC instruction depends on the 20 + * blocksize of the used hashing sha2-algorithm function codes. The param block 21 + * contains the hash chaining value (cv), the input message bit-length (imbl) 22 + * and the hmac-secret (key). To prevent code duplication, the sizes of all 23 + * these are calculated based on the blocksize. 24 + * 25 + * param-block: 26 + * +-------+ 27 + * | cv | 28 + * +-------+ 29 + * | imbl | 30 + * +-------+ 31 + * | key | 32 + * +-------+ 33 + * 34 + * sizes: 35 + * part | sh2-alg | calculation | size | type 36 + * -----+---------+-------------+------+-------- 37 + * cv | 224/256 | blocksize/2 | 32 | u64[8] 38 + * | 384/512 | | 64 | u128[8] 39 + * imbl | 224/256 | blocksize/8 | 8 | u64 40 + * | 384/512 | | 16 | u128 41 + * key | 224/256 | blocksize | 64 | u8[64] 42 + * | 384/512 | | 128 | u8[128] 43 + */ 44 + 45 + #define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE 46 + #define MAX_IMBL_SIZE sizeof(u128) 47 + #define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE 48 + 49 + #define SHA2_CV_SIZE(bs) ((bs) >> 1) 50 + #define SHA2_IMBL_SIZE(bs) ((bs) >> 3) 51 + 52 + #define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs)) 53 + #define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs)) 54 + 55 + struct s390_hmac_ctx { 56 + u8 key[MAX_BLOCK_SIZE]; 57 + }; 58 + 59 + union s390_kmac_gr0 { 60 + unsigned long reg; 61 + struct { 62 + unsigned long : 48; 63 + unsigned long ikp : 1; 64 + unsigned long iimp : 1; 65 + unsigned long ccup : 1; 66 + unsigned long : 6; 67 + unsigned long fc : 7; 68 + }; 69 + }; 70 + 71 + struct s390_kmac_sha2_ctx { 72 + u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + MAX_BLOCK_SIZE]; 73 + union s390_kmac_gr0 gr0; 74 + u8 buf[MAX_BLOCK_SIZE]; 75 + unsigned int buflen; 76 + }; 77 + 78 + /* 79 + * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize 80 + */ 81 + static inline void kmac_sha2_set_imbl(u8 *param, unsigned int buflen, 82 + unsigned int blocksize) 83 + { 84 + u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize); 85 + 86 + switch (blocksize) { 87 + case SHA256_BLOCK_SIZE: 88 + *(u64 *)imbl = (u64)buflen * BITS_PER_BYTE; 89 + break; 90 + case SHA512_BLOCK_SIZE: 91 + *(u128 *)imbl = (u128)buflen * BITS_PER_BYTE; 92 + break; 93 + default: 94 + break; 95 + } 96 + } 97 + 98 + static int hash_key(const u8 *in, unsigned int inlen, 99 + u8 *digest, unsigned int digestsize) 100 + { 101 + unsigned long func; 102 + union { 103 + struct sha256_paramblock { 104 + u32 h[8]; 105 + u64 mbl; 106 + } sha256; 107 + struct sha512_paramblock { 108 + u64 h[8]; 109 + u128 mbl; 110 + } sha512; 111 + } __packed param; 112 + 113 + #define PARAM_INIT(x, y, z) \ 114 + param.sha##x.h[0] = SHA##y ## _H0; \ 115 + param.sha##x.h[1] = SHA##y ## _H1; \ 116 + param.sha##x.h[2] = SHA##y ## _H2; \ 117 + param.sha##x.h[3] = SHA##y ## _H3; \ 118 + param.sha##x.h[4] = SHA##y ## _H4; \ 119 + param.sha##x.h[5] = SHA##y ## _H5; \ 120 + param.sha##x.h[6] = SHA##y ## _H6; \ 121 + param.sha##x.h[7] = SHA##y ## _H7; \ 122 + param.sha##x.mbl = (z) 123 + 124 + switch (digestsize) { 125 + case SHA224_DIGEST_SIZE: 126 + func = CPACF_KLMD_SHA_256; 127 + PARAM_INIT(256, 224, inlen * 8); 128 + break; 129 + case SHA256_DIGEST_SIZE: 130 + func = CPACF_KLMD_SHA_256; 131 + PARAM_INIT(256, 256, inlen * 8); 132 + break; 133 + case SHA384_DIGEST_SIZE: 134 + func = CPACF_KLMD_SHA_512; 135 + PARAM_INIT(512, 384, inlen * 8); 136 + break; 137 + case SHA512_DIGEST_SIZE: 138 + func = CPACF_KLMD_SHA_512; 139 + PARAM_INIT(512, 512, inlen * 8); 140 + break; 141 + default: 142 + return -EINVAL; 143 + } 144 + 145 + #undef PARAM_INIT 146 + 147 + cpacf_klmd(func, &param, in, inlen); 148 + 149 + memcpy(digest, &param, digestsize); 150 + 151 + return 0; 152 + } 153 + 154 + static int s390_hmac_sha2_setkey(struct crypto_shash *tfm, 155 + const u8 *key, unsigned int keylen) 156 + { 157 + struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(tfm); 158 + unsigned int ds = crypto_shash_digestsize(tfm); 159 + unsigned int bs = crypto_shash_blocksize(tfm); 160 + 161 + memset(tfm_ctx, 0, sizeof(*tfm_ctx)); 162 + 163 + if (keylen > bs) 164 + return hash_key(key, keylen, tfm_ctx->key, ds); 165 + 166 + memcpy(tfm_ctx->key, key, keylen); 167 + return 0; 168 + } 169 + 170 + static int s390_hmac_sha2_init(struct shash_desc *desc) 171 + { 172 + struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(desc->tfm); 173 + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); 174 + unsigned int bs = crypto_shash_blocksize(desc->tfm); 175 + 176 + memcpy(ctx->param + SHA2_KEY_OFFSET(bs), 177 + tfm_ctx->key, bs); 178 + 179 + ctx->buflen = 0; 180 + ctx->gr0.reg = 0; 181 + switch (crypto_shash_digestsize(desc->tfm)) { 182 + case SHA224_DIGEST_SIZE: 183 + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_224; 184 + break; 185 + case SHA256_DIGEST_SIZE: 186 + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_256; 187 + break; 188 + case SHA384_DIGEST_SIZE: 189 + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_384; 190 + break; 191 + case SHA512_DIGEST_SIZE: 192 + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_512; 193 + break; 194 + default: 195 + return -EINVAL; 196 + } 197 + 198 + return 0; 199 + } 200 + 201 + static int s390_hmac_sha2_update(struct shash_desc *desc, 202 + const u8 *data, unsigned int len) 203 + { 204 + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); 205 + unsigned int bs = crypto_shash_blocksize(desc->tfm); 206 + unsigned int offset, n; 207 + 208 + /* check current buffer */ 209 + offset = ctx->buflen % bs; 210 + ctx->buflen += len; 211 + if (offset + len < bs) 212 + goto store; 213 + 214 + /* process one stored block */ 215 + if (offset) { 216 + n = bs - offset; 217 + memcpy(ctx->buf + offset, data, n); 218 + ctx->gr0.iimp = 1; 219 + _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs); 220 + data += n; 221 + len -= n; 222 + offset = 0; 223 + } 224 + /* process as many blocks as possible */ 225 + if (len >= bs) { 226 + n = (len / bs) * bs; 227 + ctx->gr0.iimp = 1; 228 + _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n); 229 + data += n; 230 + len -= n; 231 + } 232 + store: 233 + /* store incomplete block in buffer */ 234 + if (len) 235 + memcpy(ctx->buf + offset, data, len); 236 + 237 + return 0; 238 + } 239 + 240 + static int s390_hmac_sha2_final(struct shash_desc *desc, u8 *out) 241 + { 242 + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); 243 + unsigned int bs = crypto_shash_blocksize(desc->tfm); 244 + 245 + ctx->gr0.iimp = 0; 246 + kmac_sha2_set_imbl(ctx->param, ctx->buflen, bs); 247 + _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, ctx->buflen % bs); 248 + memcpy(out, ctx->param, crypto_shash_digestsize(desc->tfm)); 249 + 250 + return 0; 251 + } 252 + 253 + static int s390_hmac_sha2_digest(struct shash_desc *desc, 254 + const u8 *data, unsigned int len, u8 *out) 255 + { 256 + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); 257 + unsigned int ds = crypto_shash_digestsize(desc->tfm); 258 + int rc; 259 + 260 + rc = s390_hmac_sha2_init(desc); 261 + if (rc) 262 + return rc; 263 + 264 + ctx->gr0.iimp = 0; 265 + kmac_sha2_set_imbl(ctx->param, len, 266 + crypto_shash_blocksize(desc->tfm)); 267 + _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, len); 268 + memcpy(out, ctx->param, ds); 269 + 270 + return 0; 271 + } 272 + 273 + #define S390_HMAC_SHA2_ALG(x) { \ 274 + .fc = CPACF_KMAC_HMAC_SHA_##x, \ 275 + .alg = { \ 276 + .init = s390_hmac_sha2_init, \ 277 + .update = s390_hmac_sha2_update, \ 278 + .final = s390_hmac_sha2_final, \ 279 + .digest = s390_hmac_sha2_digest, \ 280 + .setkey = s390_hmac_sha2_setkey, \ 281 + .descsize = sizeof(struct s390_kmac_sha2_ctx), \ 282 + .halg = { \ 283 + .digestsize = SHA##x##_DIGEST_SIZE, \ 284 + .base = { \ 285 + .cra_name = "hmac(sha" #x ")", \ 286 + .cra_driver_name = "hmac_s390_sha" #x, \ 287 + .cra_blocksize = SHA##x##_BLOCK_SIZE, \ 288 + .cra_priority = 400, \ 289 + .cra_ctxsize = sizeof(struct s390_hmac_ctx), \ 290 + .cra_module = THIS_MODULE, \ 291 + }, \ 292 + }, \ 293 + }, \ 294 + } 295 + 296 + static struct s390_hmac_alg { 297 + bool registered; 298 + unsigned int fc; 299 + struct shash_alg alg; 300 + } s390_hmac_algs[] = { 301 + S390_HMAC_SHA2_ALG(224), 302 + S390_HMAC_SHA2_ALG(256), 303 + S390_HMAC_SHA2_ALG(384), 304 + S390_HMAC_SHA2_ALG(512), 305 + }; 306 + 307 + static __always_inline void _s390_hmac_algs_unregister(void) 308 + { 309 + struct s390_hmac_alg *hmac; 310 + int i; 311 + 312 + for (i = ARRAY_SIZE(s390_hmac_algs) - 1; i >= 0; i--) { 313 + hmac = &s390_hmac_algs[i]; 314 + if (!hmac->registered) 315 + continue; 316 + crypto_unregister_shash(&hmac->alg); 317 + } 318 + } 319 + 320 + static int __init hmac_s390_init(void) 321 + { 322 + struct s390_hmac_alg *hmac; 323 + int i, rc = -ENODEV; 324 + 325 + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256)) 326 + return -ENODEV; 327 + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512)) 328 + return -ENODEV; 329 + 330 + for (i = 0; i < ARRAY_SIZE(s390_hmac_algs); i++) { 331 + hmac = &s390_hmac_algs[i]; 332 + if (!cpacf_query_func(CPACF_KMAC, hmac->fc)) 333 + continue; 334 + 335 + rc = crypto_register_shash(&hmac->alg); 336 + if (rc) { 337 + pr_err("unable to register %s\n", 338 + hmac->alg.halg.base.cra_name); 339 + goto out; 340 + } 341 + hmac->registered = true; 342 + pr_debug("registered %s\n", hmac->alg.halg.base.cra_name); 343 + } 344 + return rc; 345 + out: 346 + _s390_hmac_algs_unregister(); 347 + return rc; 348 + } 349 + 350 + static void __exit hmac_s390_exit(void) 351 + { 352 + _s390_hmac_algs_unregister(); 353 + } 354 + 355 + module_cpu_feature_match(S390_CPU_FEATURE_MSA, hmac_s390_init); 356 + module_exit(hmac_s390_exit); 357 + 358 + MODULE_DESCRIPTION("S390 HMAC driver"); 359 + MODULE_LICENSE("GPL");
+2 -2
arch/s390/crypto/paes_s390.c
··· 133 133 if (msleep_interruptible(1000)) 134 134 return -EINTR; 135 135 } 136 - ret = pkey_keyblob2pkey(kb->key, kb->keylen, 137 - pk->protkey, &pk->len, &pk->type); 136 + ret = pkey_key2protkey(kb->key, kb->keylen, 137 + pk->protkey, &pk->len, &pk->type); 138 138 } 139 139 140 140 return ret;
+1
arch/s390/crypto/sha.h
··· 25 25 u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)]; 26 26 u8 buf[SHA_MAX_BLOCK_SIZE]; 27 27 int func; /* KIMD function to use */ 28 + int first_message_part; 28 29 }; 29 30 30 31 struct shash_desc;
+9 -2
arch/s390/crypto/sha3_256_s390.c
··· 21 21 { 22 22 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); 23 23 24 - memset(sctx->state, 0, sizeof(sctx->state)); 24 + if (!test_facility(86)) /* msa 12 */ 25 + memset(sctx->state, 0, sizeof(sctx->state)); 25 26 sctx->count = 0; 26 27 sctx->func = CPACF_KIMD_SHA3_256; 28 + sctx->first_message_part = 1; 27 29 28 30 return 0; 29 31 } ··· 38 36 octx->rsiz = sctx->count; 39 37 memcpy(octx->st, sctx->state, sizeof(octx->st)); 40 38 memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); 39 + octx->partial = sctx->first_message_part; 41 40 42 41 return 0; 43 42 } ··· 51 48 sctx->count = ictx->rsiz; 52 49 memcpy(sctx->state, ictx->st, sizeof(ictx->st)); 53 50 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 51 + sctx->first_message_part = ictx->partial; 54 52 sctx->func = CPACF_KIMD_SHA3_256; 55 53 56 54 return 0; ··· 65 61 sctx->count = ictx->rsiz; 66 62 memcpy(sctx->state, ictx->st, sizeof(ictx->st)); 67 63 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 64 + sctx->first_message_part = ictx->partial; 68 65 sctx->func = CPACF_KIMD_SHA3_224; 69 66 70 67 return 0; ··· 93 88 { 94 89 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); 95 90 96 - memset(sctx->state, 0, sizeof(sctx->state)); 91 + if (!test_facility(86)) /* msa 12 */ 92 + memset(sctx->state, 0, sizeof(sctx->state)); 97 93 sctx->count = 0; 98 94 sctx->func = CPACF_KIMD_SHA3_224; 95 + sctx->first_message_part = 1; 99 96 100 97 return 0; 101 98 }
+9 -2
arch/s390/crypto/sha3_512_s390.c
··· 20 20 { 21 21 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); 22 22 23 - memset(sctx->state, 0, sizeof(sctx->state)); 23 + if (!test_facility(86)) /* msa 12 */ 24 + memset(sctx->state, 0, sizeof(sctx->state)); 24 25 sctx->count = 0; 25 26 sctx->func = CPACF_KIMD_SHA3_512; 27 + sctx->first_message_part = 1; 26 28 27 29 return 0; 28 30 } ··· 39 37 40 38 memcpy(octx->st, sctx->state, sizeof(octx->st)); 41 39 memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); 40 + octx->partial = sctx->first_message_part; 42 41 43 42 return 0; 44 43 } ··· 55 52 56 53 memcpy(sctx->state, ictx->st, sizeof(ictx->st)); 57 54 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 55 + sctx->first_message_part = ictx->partial; 58 56 sctx->func = CPACF_KIMD_SHA3_512; 59 57 60 58 return 0; ··· 72 68 73 69 memcpy(sctx->state, ictx->st, sizeof(ictx->st)); 74 70 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 71 + sctx->first_message_part = ictx->partial; 75 72 sctx->func = CPACF_KIMD_SHA3_384; 76 73 77 74 return 0; ··· 102 97 { 103 98 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); 104 99 105 - memset(sctx->state, 0, sizeof(sctx->state)); 100 + if (!test_facility(86)) /* msa 12 */ 101 + memset(sctx->state, 0, sizeof(sctx->state)); 106 102 sctx->count = 0; 107 103 sctx->func = CPACF_KIMD_SHA3_384; 104 + sctx->first_message_part = 1; 108 105 109 106 return 0; 110 107 }
+16 -4
arch/s390/crypto/sha_common.c
··· 18 18 struct s390_sha_ctx *ctx = shash_desc_ctx(desc); 19 19 unsigned int bsize = crypto_shash_blocksize(desc->tfm); 20 20 unsigned int index, n; 21 + int fc; 21 22 22 23 /* how much is already in the buffer? */ 23 24 index = ctx->count % bsize; ··· 27 26 if ((index + len) < bsize) 28 27 goto store; 29 28 29 + fc = ctx->func; 30 + if (ctx->first_message_part) 31 + fc |= test_facility(86) ? CPACF_KIMD_NIP : 0; 32 + 30 33 /* process one stored block */ 31 34 if (index) { 32 35 memcpy(ctx->buf + index, data, bsize - index); 33 - cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize); 36 + cpacf_kimd(fc, ctx->state, ctx->buf, bsize); 37 + ctx->first_message_part = 0; 38 + fc &= ~CPACF_KIMD_NIP; 34 39 data += bsize - index; 35 40 len -= bsize - index; 36 41 index = 0; ··· 45 38 /* process as many blocks as possible */ 46 39 if (len >= bsize) { 47 40 n = (len / bsize) * bsize; 48 - cpacf_kimd(ctx->func, ctx->state, data, n); 41 + cpacf_kimd(fc, ctx->state, data, n); 42 + ctx->first_message_part = 0; 49 43 data += n; 50 44 len -= n; 51 45 } ··· 83 75 unsigned int bsize = crypto_shash_blocksize(desc->tfm); 84 76 u64 bits; 85 77 unsigned int n; 86 - int mbl_offset; 78 + int mbl_offset, fc; 87 79 88 80 n = ctx->count % bsize; 89 81 bits = ctx->count * 8; ··· 117 109 return -EINVAL; 118 110 } 119 111 120 - cpacf_klmd(ctx->func, ctx->state, ctx->buf, n); 112 + fc = ctx->func; 113 + fc |= test_facility(86) ? CPACF_KLMD_DUFOP : 0; 114 + if (ctx->first_message_part) 115 + fc |= CPACF_KLMD_NIP; 116 + cpacf_klmd(fc, ctx->state, ctx->buf, n); 121 117 122 118 /* copy digest to out */ 123 119 memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
-1
arch/s390/hypfs/hypfs.h
··· 78 78 struct dentry *dentry; 79 79 }; 80 80 81 - extern void hypfs_dbfs_exit(void); 82 81 extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df); 83 82 extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df); 84 83
+1 -6
arch/s390/hypfs/hypfs_diag.c
··· 29 29 static void *diag204_buf; /* 4K aligned buffer for diag204 data */ 30 30 static int diag204_buf_pages; /* number of pages for diag204 data */ 31 31 32 - static struct dentry *dbfs_d204_file; 33 - 34 32 enum diag204_format diag204_get_info_type(void) 35 33 { 36 34 return diag204_info_type; ··· 212 214 hypfs_dbfs_create_file(&dbfs_file_d204); 213 215 214 216 rc = hypfs_diag_fs_init(); 215 - if (rc) { 217 + if (rc) 216 218 pr_err("The hardware system does not provide all functions required by hypfs\n"); 217 - debugfs_remove(dbfs_d204_file); 218 - } 219 219 return rc; 220 220 } 221 221 222 222 void hypfs_diag_exit(void) 223 223 { 224 - debugfs_remove(dbfs_d204_file); 225 224 hypfs_diag_fs_exit(); 226 225 diag204_free_buffer(); 227 226 hypfs_dbfs_remove_file(&dbfs_file_d204);
+8 -7
arch/s390/include/asm/arch_hweight.h
··· 4 4 #define _ASM_S390_ARCH_HWEIGHT_H 5 5 6 6 #include <linux/types.h> 7 + #include <asm/march.h> 7 8 8 9 static __always_inline unsigned long popcnt_z196(unsigned long w) 9 10 { ··· 30 29 31 30 static __always_inline unsigned long __arch_hweight64(__u64 w) 32 31 { 33 - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES)) 32 + if (__is_defined(MARCH_HAS_Z15_FEATURES)) 34 33 return popcnt_z15(w); 35 - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) { 34 + if (__is_defined(MARCH_HAS_Z196_FEATURES)) { 36 35 w = popcnt_z196(w); 37 36 w += w >> 32; 38 37 w += w >> 16; ··· 44 43 45 44 static __always_inline unsigned int __arch_hweight32(unsigned int w) 46 45 { 47 - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES)) 46 + if (__is_defined(MARCH_HAS_Z15_FEATURES)) 48 47 return popcnt_z15(w); 49 - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) { 48 + if (__is_defined(MARCH_HAS_Z196_FEATURES)) { 50 49 w = popcnt_z196(w); 51 50 w += w >> 16; 52 51 w += w >> 8; ··· 57 56 58 57 static __always_inline unsigned int __arch_hweight16(unsigned int w) 59 58 { 60 - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES)) 59 + if (__is_defined(MARCH_HAS_Z15_FEATURES)) 61 60 return popcnt_z15((unsigned short)w); 62 - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) { 61 + if (__is_defined(MARCH_HAS_Z196_FEATURES)) { 63 62 w = popcnt_z196(w); 64 63 w += w >> 8; 65 64 return w & 0xff; ··· 69 68 70 69 static __always_inline unsigned int __arch_hweight8(unsigned int w) 71 70 { 72 - if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) 71 + if (__is_defined(MARCH_HAS_Z196_FEATURES)) 73 72 return popcnt_z196((unsigned char)w); 74 73 return __sw_hweight8(w); 75 74 }
+4 -3
arch/s390/include/asm/atomic_ops.h
··· 9 9 #define __ARCH_S390_ATOMIC_OPS__ 10 10 11 11 #include <linux/limits.h> 12 + #include <asm/march.h> 12 13 13 14 static __always_inline int __atomic_read(const atomic_t *v) 14 15 { ··· 57 56 } 58 57 } 59 58 60 - #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 59 + #ifdef MARCH_HAS_Z196_FEATURES 61 60 62 61 #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \ 63 62 static __always_inline op_type op_name(op_type val, op_type *ptr) \ ··· 108 107 #undef __ATOMIC_CONST_OPS 109 108 #undef __ATOMIC_CONST_OP 110 109 111 - #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 110 + #else /* MARCH_HAS_Z196_FEATURES */ 112 111 113 112 #define __ATOMIC_OP(op_name, op_string) \ 114 113 static __always_inline int op_name(int val, int *ptr) \ ··· 167 166 #define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr) 168 167 #define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr) 169 168 170 - #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 169 + #endif /* MARCH_HAS_Z196_FEATURES */ 171 170 172 171 static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new) 173 172 {
+3 -1
arch/s390/include/asm/barrier.h
··· 8 8 #ifndef __ASM_BARRIER_H 9 9 #define __ASM_BARRIER_H 10 10 11 + #include <asm/march.h> 12 + 11 13 /* 12 14 * Force strict CPU ordering. 13 15 * And yes, this is required on UP too when we're talking 14 16 * to devices. 15 17 */ 16 18 17 - #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 19 + #ifdef MARCH_HAS_Z196_FEATURES 18 20 /* Fast-BCR without checkpoint synchronization */ 19 21 #define __ASM_BCR_SERIALIZE "bcr 14,0\n" 20 22 #else
+188 -93
arch/s390/include/asm/cpacf.h
··· 54 54 #define CPACF_KM_XTS_256 0x34 55 55 #define CPACF_KM_PXTS_128 0x3a 56 56 #define CPACF_KM_PXTS_256 0x3c 57 + #define CPACF_KM_XTS_128_FULL 0x52 58 + #define CPACF_KM_XTS_256_FULL 0x54 57 59 58 60 /* 59 61 * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING) ··· 123 121 #define CPACF_KMAC_DEA 0x01 124 122 #define CPACF_KMAC_TDEA_128 0x02 125 123 #define CPACF_KMAC_TDEA_192 0x03 124 + #define CPACF_KMAC_HMAC_SHA_224 0x70 125 + #define CPACF_KMAC_HMAC_SHA_256 0x71 126 + #define CPACF_KMAC_HMAC_SHA_384 0x72 127 + #define CPACF_KMAC_HMAC_SHA_512 0x73 126 128 127 129 /* 128 130 * Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT) 129 131 * instruction 130 132 */ 131 - #define CPACF_PCKMO_QUERY 0x00 132 - #define CPACF_PCKMO_ENC_DES_KEY 0x01 133 - #define CPACF_PCKMO_ENC_TDES_128_KEY 0x02 134 - #define CPACF_PCKMO_ENC_TDES_192_KEY 0x03 135 - #define CPACF_PCKMO_ENC_AES_128_KEY 0x12 136 - #define CPACF_PCKMO_ENC_AES_192_KEY 0x13 137 - #define CPACF_PCKMO_ENC_AES_256_KEY 0x14 138 - #define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20 139 - #define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21 140 - #define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22 141 - #define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28 142 - #define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29 133 + #define CPACF_PCKMO_QUERY 0x00 134 + #define CPACF_PCKMO_ENC_DES_KEY 0x01 135 + #define CPACF_PCKMO_ENC_TDES_128_KEY 0x02 136 + #define CPACF_PCKMO_ENC_TDES_192_KEY 0x03 137 + #define CPACF_PCKMO_ENC_AES_128_KEY 0x12 138 + #define CPACF_PCKMO_ENC_AES_192_KEY 0x13 139 + #define CPACF_PCKMO_ENC_AES_256_KEY 0x14 140 + #define CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY 0x15 141 + #define CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY 0x16 142 + #define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20 143 + #define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21 144 + #define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22 145 + #define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28 146 + #define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29 147 + #define CPACF_PCKMO_ENC_HMAC_512_KEY 0x76 148 + #define CPACF_PCKMO_ENC_HMAC_1024_KEY 0x7a 143 149 144 150 /* 145 151 * Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION) ··· 175 165 #define CPACF_KMA_LAAD 0x200 /* Last-AAD */ 176 166 #define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */ 177 167 168 + /* 169 + * Flags for the KIMD/KLMD (COMPUTE INTERMEDIATE/LAST MESSAGE DIGEST) 170 + * instructions 171 + */ 172 + #define CPACF_KIMD_NIP 0x8000 173 + #define CPACF_KLMD_DUFOP 0x4000 174 + #define CPACF_KLMD_NIP 0x8000 175 + 176 + /* 177 + * Function codes for KDSA (COMPUTE DIGITAL SIGNATURE AUTHENTICATION) 178 + * instruction 179 + */ 180 + #define CPACF_KDSA_QUERY 0x00 181 + #define CPACF_KDSA_ECDSA_VERIFY_P256 0x01 182 + #define CPACF_KDSA_ECDSA_VERIFY_P384 0x02 183 + #define CPACF_KDSA_ECDSA_VERIFY_P521 0x03 184 + #define CPACF_KDSA_ECDSA_SIGN_P256 0x09 185 + #define CPACF_KDSA_ECDSA_SIGN_P384 0x0a 186 + #define CPACF_KDSA_ECDSA_SIGN_P521 0x0b 187 + #define CPACF_KDSA_ENC_ECDSA_SIGN_P256 0x11 188 + #define CPACF_KDSA_ENC_ECDSA_SIGN_P384 0x12 189 + #define CPACF_KDSA_ENC_ECDSA_SIGN_P521 0x13 190 + #define CPACF_KDSA_EDDSA_VERIFY_ED25519 0x20 191 + #define CPACF_KDSA_EDDSA_VERIFY_ED448 0x24 192 + #define CPACF_KDSA_EDDSA_SIGN_ED25519 0x28 193 + #define CPACF_KDSA_EDDSA_SIGN_ED448 0x2c 194 + #define CPACF_KDSA_ENC_EDDSA_SIGN_ED25519 0x30 195 + #define CPACF_KDSA_ENC_EDDSA_SIGN_ED448 0x34 196 + 197 + #define CPACF_FC_QUERY 0x00 198 + #define CPACF_FC_QUERY_AUTH_INFO 0x7F 199 + 178 200 typedef struct { unsigned char bytes[16]; } cpacf_mask_t; 201 + typedef struct { unsigned char bytes[256]; } cpacf_qai_t; 179 202 180 203 /* 181 204 * Prototype for a not existing function to produce a link ··· 218 175 void __cpacf_bad_opcode(void); 219 176 220 177 static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2, 221 - cpacf_mask_t *mask) 178 + u8 *pb, u8 fc) 222 179 { 223 180 asm volatile( 224 - " la %%r1,%[mask]\n" 225 - " xgr %%r0,%%r0\n" 181 + " la %%r1,%[pb]\n" 182 + " lghi %%r0,%[fc]\n" 226 183 " .insn rre,%[opc] << 16,%[r1],%[r2]\n" 227 - : [mask] "=R" (*mask) 228 - : [opc] "i" (opc), 184 + : [pb] "=R" (*pb) 185 + : [opc] "i" (opc), [fc] "i" (fc), 229 186 [r1] "i" (r1), [r2] "i" (r2) 230 - : "cc", "r0", "r1"); 187 + : "cc", "memory", "r0", "r1"); 231 188 } 232 189 233 - static __always_inline void __cpacf_query_rrf(u32 opc, 234 - u8 r1, u8 r2, u8 r3, u8 m4, 235 - cpacf_mask_t *mask) 190 + static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3, 191 + u8 m4, u8 *pb, u8 fc) 236 192 { 237 193 asm volatile( 238 - " la %%r1,%[mask]\n" 239 - " xgr %%r0,%%r0\n" 194 + " la %%r1,%[pb]\n" 195 + " lghi %%r0,%[fc]\n" 240 196 " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n" 241 - : [mask] "=R" (*mask) 242 - : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2), 243 - [r3] "i" (r3), [m4] "i" (m4) 244 - : "cc", "r0", "r1"); 197 + : [pb] "=R" (*pb) 198 + : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1), 199 + [r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4) 200 + : "cc", "memory", "r0", "r1"); 201 + } 202 + 203 + static __always_inline void __cpacf_query_insn(unsigned int opcode, void *pb, 204 + u8 fc) 205 + { 206 + switch (opcode) { 207 + case CPACF_KDSA: 208 + __cpacf_query_rre(CPACF_KDSA, 0, 2, pb, fc); 209 + break; 210 + case CPACF_KIMD: 211 + __cpacf_query_rre(CPACF_KIMD, 0, 2, pb, fc); 212 + break; 213 + case CPACF_KLMD: 214 + __cpacf_query_rre(CPACF_KLMD, 0, 2, pb, fc); 215 + break; 216 + case CPACF_KM: 217 + __cpacf_query_rre(CPACF_KM, 2, 4, pb, fc); 218 + break; 219 + case CPACF_KMA: 220 + __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, pb, fc); 221 + break; 222 + case CPACF_KMAC: 223 + __cpacf_query_rre(CPACF_KMAC, 0, 2, pb, fc); 224 + break; 225 + case CPACF_KMC: 226 + __cpacf_query_rre(CPACF_KMC, 2, 4, pb, fc); 227 + break; 228 + case CPACF_KMCTR: 229 + __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, pb, fc); 230 + break; 231 + case CPACF_KMF: 232 + __cpacf_query_rre(CPACF_KMF, 2, 4, pb, fc); 233 + break; 234 + case CPACF_KMO: 235 + __cpacf_query_rre(CPACF_KMO, 2, 4, pb, fc); 236 + break; 237 + case CPACF_PCC: 238 + __cpacf_query_rre(CPACF_PCC, 0, 0, pb, fc); 239 + break; 240 + case CPACF_PCKMO: 241 + __cpacf_query_rre(CPACF_PCKMO, 0, 0, pb, fc); 242 + break; 243 + case CPACF_PRNO: 244 + __cpacf_query_rre(CPACF_PRNO, 2, 4, pb, fc); 245 + break; 246 + default: 247 + __cpacf_bad_opcode(); 248 + } 245 249 } 246 250 247 251 static __always_inline void __cpacf_query(unsigned int opcode, 248 252 cpacf_mask_t *mask) 249 253 { 250 - switch (opcode) { 251 - case CPACF_KDSA: 252 - __cpacf_query_rre(CPACF_KDSA, 0, 2, mask); 253 - break; 254 - case CPACF_KIMD: 255 - __cpacf_query_rre(CPACF_KIMD, 0, 2, mask); 256 - break; 257 - case CPACF_KLMD: 258 - __cpacf_query_rre(CPACF_KLMD, 0, 2, mask); 259 - break; 260 - case CPACF_KM: 261 - __cpacf_query_rre(CPACF_KM, 2, 4, mask); 262 - break; 263 - case CPACF_KMA: 264 - __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask); 265 - break; 266 - case CPACF_KMAC: 267 - __cpacf_query_rre(CPACF_KMAC, 0, 2, mask); 268 - break; 269 - case CPACF_KMC: 270 - __cpacf_query_rre(CPACF_KMC, 2, 4, mask); 271 - break; 272 - case CPACF_KMCTR: 273 - __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask); 274 - break; 275 - case CPACF_KMF: 276 - __cpacf_query_rre(CPACF_KMF, 2, 4, mask); 277 - break; 278 - case CPACF_KMO: 279 - __cpacf_query_rre(CPACF_KMO, 2, 4, mask); 280 - break; 281 - case CPACF_PCC: 282 - __cpacf_query_rre(CPACF_PCC, 0, 0, mask); 283 - break; 284 - case CPACF_PCKMO: 285 - __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask); 286 - break; 287 - case CPACF_PRNO: 288 - __cpacf_query_rre(CPACF_PRNO, 2, 4, mask); 289 - break; 290 - default: 291 - __cpacf_bad_opcode(); 292 - } 254 + __cpacf_query_insn(opcode, mask, CPACF_FC_QUERY); 293 255 } 294 256 295 257 static __always_inline int __cpacf_check_opcode(unsigned int opcode) ··· 317 269 return test_facility(57); /* check for MSA5 */ 318 270 case CPACF_KMA: 319 271 return test_facility(146); /* check for MSA8 */ 272 + case CPACF_KDSA: 273 + return test_facility(155); /* check for MSA9 */ 320 274 default: 321 275 __cpacf_bad_opcode(); 322 276 return 0; ··· 326 276 } 327 277 328 278 /** 329 - * cpacf_query() - check if a specific CPACF function is available 279 + * cpacf_query() - Query the function code mask for this CPACF opcode 330 280 * @opcode: the opcode of the crypto instruction 331 - * @func: the function code to test for 281 + * @mask: ptr to struct cpacf_mask_t 332 282 * 333 283 * Executes the query function for the given crypto instruction @opcode 334 284 * and checks if @func is available 335 285 * 336 - * Returns 1 if @func is available for @opcode, 0 otherwise 286 + * On success 1 is returned and the mask is filled with the function 287 + * code mask for this CPACF opcode, otherwise 0 is returned. 337 288 */ 338 289 static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask) 339 290 { ··· 351 300 return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0; 352 301 } 353 302 354 - static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func) 303 + static __always_inline int cpacf_query_func(unsigned int opcode, 304 + unsigned int func) 355 305 { 356 306 cpacf_mask_t mask; 357 307 358 308 if (cpacf_query(opcode, &mask)) 359 309 return cpacf_test_func(&mask, func); 310 + return 0; 311 + } 312 + 313 + static __always_inline void __cpacf_qai(unsigned int opcode, cpacf_qai_t *qai) 314 + { 315 + __cpacf_query_insn(opcode, qai, CPACF_FC_QUERY_AUTH_INFO); 316 + } 317 + 318 + /** 319 + * cpacf_qai() - Get the query authentication information for a CPACF opcode 320 + * @opcode: the opcode of the crypto instruction 321 + * @mask: ptr to struct cpacf_qai_t 322 + * 323 + * Executes the query authentication information function for the given crypto 324 + * instruction @opcode and checks if @func is available 325 + * 326 + * On success 1 is returned and the mask is filled with the query authentication 327 + * information for this CPACF opcode, otherwise 0 is returned. 328 + */ 329 + static __always_inline int cpacf_qai(unsigned int opcode, cpacf_qai_t *qai) 330 + { 331 + if (cpacf_query_func(opcode, CPACF_FC_QUERY_AUTH_INFO)) { 332 + __cpacf_qai(opcode, qai); 333 + return 1; 334 + } 335 + memset(qai, 0, sizeof(*qai)); 360 336 return 0; 361 337 } 362 338 ··· 469 391 asm volatile( 470 392 " lgr 0,%[fc]\n" 471 393 " lgr 1,%[pba]\n" 472 - "0: .insn rre,%[opc] << 16,0,%[src]\n" 394 + "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n" 473 395 " brc 1,0b\n" /* handle partial completion */ 474 396 : [src] "+&d" (s.pair) 475 397 : [fc] "d" (func), [pba] "d" ((unsigned long)(param)), ··· 494 416 asm volatile( 495 417 " lgr 0,%[fc]\n" 496 418 " lgr 1,%[pba]\n" 497 - "0: .insn rre,%[opc] << 16,0,%[src]\n" 419 + "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n" 498 420 " brc 1,0b\n" /* handle partial completion */ 499 421 : [src] "+&d" (s.pair) 500 422 : [fc] "d" (func), [pba] "d" ((unsigned long)param), ··· 503 425 } 504 426 505 427 /** 428 + * _cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) 429 + * instruction and updates flags in gr0 430 + * @gr0: pointer to gr0 (fc and flags) passed to KMAC; see CPACF_KMAC_xxx defines 431 + * @param: address of parameter block; see POP for details on each func 432 + * @src: address of source memory area 433 + * @src_len: length of src operand in bytes 434 + * 435 + * Returns 0 for the query func, number of processed bytes for digest funcs 436 + */ 437 + static inline int _cpacf_kmac(unsigned long *gr0, void *param, 438 + const u8 *src, long src_len) 439 + { 440 + union register_pair s; 441 + 442 + s.even = (unsigned long)src; 443 + s.odd = (unsigned long)src_len; 444 + asm volatile( 445 + " lgr 0,%[r0]\n" 446 + " lgr 1,%[pba]\n" 447 + "0: .insn rre,%[opc] << 16,0,%[src]\n" 448 + " brc 1,0b\n" /* handle partial completion */ 449 + " lgr %[r0],0\n" 450 + : [r0] "+d" (*gr0), [src] "+&d" (s.pair) 451 + : [pba] "d" ((unsigned long)param), 452 + [opc] "i" (CPACF_KMAC) 453 + : "cc", "memory", "0", "1"); 454 + 455 + return src_len - s.odd; 456 + } 457 + 458 + /** 506 459 * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) 507 - * instruction 508 - * @func: the function code passed to KM; see CPACF_KMAC_xxx defines 460 + * instruction 461 + * @func: function code passed to KMAC; see CPACF_KMAC_xxx defines 509 462 * @param: address of parameter block; see POP for details on each func 510 463 * @src: address of source memory area 511 464 * @src_len: length of src operand in bytes ··· 546 437 static inline int cpacf_kmac(unsigned long func, void *param, 547 438 const u8 *src, long src_len) 548 439 { 549 - union register_pair s; 550 - 551 - s.even = (unsigned long)src; 552 - s.odd = (unsigned long)src_len; 553 - asm volatile( 554 - " lgr 0,%[fc]\n" 555 - " lgr 1,%[pba]\n" 556 - "0: .insn rre,%[opc] << 16,0,%[src]\n" 557 - " brc 1,0b\n" /* handle partial completion */ 558 - : [src] "+&d" (s.pair) 559 - : [fc] "d" (func), [pba] "d" ((unsigned long)param), 560 - [opc] "i" (CPACF_KMAC) 561 - : "cc", "memory", "0", "1"); 562 - 563 - return src_len - s.odd; 440 + return _cpacf_kmac(&func, param, src, src_len); 564 441 } 565 442 566 443 /**
+3 -2
arch/s390/include/asm/ctlreg.h
··· 202 202 unsigned long : 3; 203 203 unsigned long ccc : 1; /* Cryptography counter control */ 204 204 unsigned long pec : 1; /* PAI extension control */ 205 - unsigned long : 17; 206 - unsigned long : 3; 205 + unsigned long : 15; 206 + unsigned long wti : 1; /* Warning-track */ 207 + unsigned long : 4; 207 208 unsigned long lap : 1; /* Low-address-protection control */ 208 209 unsigned long : 4; 209 210 unsigned long edat : 1; /* Enhanced-DAT-enablement control */
+9
arch/s390/include/asm/diag.h
··· 38 38 DIAG_STAT_X308, 39 39 DIAG_STAT_X318, 40 40 DIAG_STAT_X320, 41 + DIAG_STAT_X49C, 41 42 DIAG_STAT_X500, 42 43 NR_DIAG_STAT 43 44 }; ··· 363 362 void _diag0c_amode31(unsigned long rx); 364 363 void _diag308_reset_amode31(void); 365 364 int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len); 365 + 366 + /* diag 49c subcodes */ 367 + enum diag49c_sc { 368 + DIAG49C_SUBC_ACK = 0, 369 + DIAG49C_SUBC_REG = 1 370 + }; 371 + 372 + int diag49c(unsigned long subcode); 366 373 367 374 #endif /* _ASM_S390_DIAG_H */
+16 -1
arch/s390/include/asm/ftrace.h
··· 6 6 #define MCOUNT_INSN_SIZE 6 7 7 8 8 #ifndef __ASSEMBLY__ 9 + #include <asm/stacktrace.h> 9 10 10 - unsigned long return_address(unsigned int n); 11 + static __always_inline unsigned long return_address(unsigned int n) 12 + { 13 + struct stack_frame *sf; 14 + 15 + if (!n) 16 + return (unsigned long)__builtin_return_address(0); 17 + 18 + sf = (struct stack_frame *)current_frame_address(); 19 + do { 20 + sf = (struct stack_frame *)sf->back_chain; 21 + if (!sf) 22 + return 0; 23 + } while (--n); 24 + return sf->gprs[8]; 25 + } 11 26 #define ftrace_return_address(n) return_address(n) 12 27 13 28 void ftrace_caller(void);
+14
arch/s390/include/asm/hiperdispatch.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright IBM Corp. 2024 4 + */ 5 + 6 + #ifndef _ASM_HIPERDISPATCH_H 7 + #define _ASM_HIPERDISPATCH_H 8 + 9 + void hd_reset_state(void); 10 + void hd_add_core(int cpu); 11 + void hd_disable_hiperdispatch(void); 12 + int hd_enable_hiperdispatch(void); 13 + 14 + #endif /* _ASM_HIPERDISPATCH_H */
+2
arch/s390/include/asm/irq.h
··· 47 47 IRQEXT_CMS, 48 48 IRQEXT_CMC, 49 49 IRQEXT_FTP, 50 + IRQEXT_WTI, 50 51 IRQIO_CIO, 51 52 IRQIO_DAS, 52 53 IRQIO_C15, ··· 100 99 enum irq_subclass { 101 100 IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, 102 101 IRQ_SUBCLASS_SERVICE_SIGNAL = 9, 102 + IRQ_SUBCLASS_WARNING_TRACK = 33, 103 103 }; 104 104 105 105 #define CR0_IRQ_SUBCLASS_MASK \
+2 -2
arch/s390/include/asm/lowcore.h
··· 98 98 psw_t io_new_psw; /* 0x01f0 */ 99 99 100 100 /* Save areas. */ 101 - __u64 save_area_sync[8]; /* 0x0200 */ 102 - __u64 save_area_async[8]; /* 0x0240 */ 101 + __u64 save_area[8]; /* 0x0200 */ 102 + __u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */ 103 103 __u64 save_area_restart[1]; /* 0x0280 */ 104 104 105 105 __u64 pcpu; /* 0x0288 */
+38
arch/s390/include/asm/march.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_S390_MARCH_H 4 + #define __ASM_S390_MARCH_H 5 + 6 + #include <linux/kconfig.h> 7 + 8 + #define MARCH_HAS_Z10_FEATURES 1 9 + 10 + #ifndef __DECOMPRESSOR 11 + 12 + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 13 + #define MARCH_HAS_Z196_FEATURES 1 14 + #endif 15 + 16 + #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES 17 + #define MARCH_HAS_ZEC12_FEATURES 1 18 + #endif 19 + 20 + #ifdef CONFIG_HAVE_MARCH_Z13_FEATURES 21 + #define MARCH_HAS_Z13_FEATURES 1 22 + #endif 23 + 24 + #ifdef CONFIG_HAVE_MARCH_Z14_FEATURES 25 + #define MARCH_HAS_Z14_FEATURES 1 26 + #endif 27 + 28 + #ifdef CONFIG_HAVE_MARCH_Z15_FEATURES 29 + #define MARCH_HAS_Z15_FEATURES 1 30 + #endif 31 + 32 + #ifdef CONFIG_HAVE_MARCH_Z16_FEATURES 33 + #define MARCH_HAS_Z16_FEATURES 1 34 + #endif 35 + 36 + #endif /* __DECOMPRESSOR */ 37 + 38 + #endif /* __ASM_S390_MARCH_H */
+4 -3
arch/s390/include/asm/percpu.h
··· 4 4 5 5 #include <linux/preempt.h> 6 6 #include <asm/cmpxchg.h> 7 + #include <asm/march.h> 7 8 8 9 /* 9 10 * s390 uses its own implementation for per cpu data, the offset of ··· 51 50 #define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) 52 51 #define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) 53 52 54 - #ifndef CONFIG_HAVE_MARCH_Z196_FEATURES 53 + #ifndef MARCH_HAS_Z196_FEATURES 55 54 56 55 #define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) 57 56 #define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) ··· 62 61 #define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) 63 62 #define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) 64 63 65 - #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 64 + #else /* MARCH_HAS_Z196_FEATURES */ 66 65 67 66 #define arch_this_cpu_add(pcp, val, op1, op2, szcast) \ 68 67 { \ ··· 130 129 #define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao") 131 130 #define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog") 132 131 133 - #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 132 + #endif /* MARCH_HAS_Z196_FEATURES */ 134 133 135 134 #define arch_this_cpu_cmpxchg(pcp, oval, nval) \ 136 135 ({ \
-24
arch/s390/include/asm/perf_event.h
··· 48 48 unsigned long reserved:63; /* reserved */ 49 49 }; 50 50 51 - /* Perf PMU definitions for the counter facility */ 52 - #define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */ 53 - 54 - /* Perf PMU definitions for the sampling facility */ 55 - #define PERF_CPUM_SF_MAX_CTR 2 56 - #define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ 57 - #define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ 58 - #define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */ 59 - #define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ 60 - #define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ 61 - #define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ 62 - PERF_CPUM_SF_DIAG_MODE) 63 - #define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */ 64 - 65 - #define REG_NONE 0 66 - #define REG_OVERFLOW 1 67 - #define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) 68 - #define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) 69 - #define TEAR_REG(hwc) ((hwc)->last_tag) 70 - #define SAMPL_RATE(hwc) ((hwc)->event_base) 71 - #define SAMPL_FLAGS(hwc) ((hwc)->config_base) 72 - #define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) 73 - #define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) 74 - 75 51 #define perf_arch_fetch_caller_regs(regs, __ip) do { \ 76 52 (regs)->psw.addr = (__ip); \ 77 53 (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
+2 -2
arch/s390/include/asm/pkey.h
··· 22 22 * @param protkey pointer to buffer receiving the protected key 23 23 * @return 0 on success, negative errno value on failure 24 24 */ 25 - int pkey_keyblob2pkey(const u8 *key, u32 keylen, 26 - u8 *protkey, u32 *protkeylen, u32 *protkeytype); 25 + int pkey_key2protkey(const u8 *key, u32 keylen, 26 + u8 *protkey, u32 *protkeylen, u32 *protkeytype); 27 27 28 28 #endif /* _KAPI_PKEY_H */
+4 -3
arch/s390/include/asm/preempt.h
··· 5 5 #include <asm/current.h> 6 6 #include <linux/thread_info.h> 7 7 #include <asm/atomic_ops.h> 8 + #include <asm/march.h> 8 9 9 - #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 10 + #ifdef MARCH_HAS_Z196_FEATURES 10 11 11 12 /* We use the MSB mostly because its available */ 12 13 #define PREEMPT_NEED_RESCHED 0x80000000 ··· 76 75 preempt_offset); 77 76 } 78 77 79 - #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 78 + #else /* MARCH_HAS_Z196_FEATURES */ 80 79 81 80 #define PREEMPT_ENABLED (0) 82 81 ··· 124 123 tif_need_resched()); 125 124 } 126 125 127 - #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 126 + #endif /* MARCH_HAS_Z196_FEATURES */ 128 127 129 128 #define init_task_preempt_count(p) do { } while (0) 130 129 /* Deferred to CPU bringup time */
+1
arch/s390/include/asm/processor.h
··· 44 44 unsigned long ec_mask; /* bit mask for ec_xxx functions */ 45 45 unsigned long ec_clk; /* sigp timestamp for ec_xxx */ 46 46 unsigned long flags; /* per CPU flags */ 47 + unsigned long capacity; /* cpu capacity for scheduler */ 47 48 signed char state; /* physical cpu state */ 48 49 signed char polarization; /* physical polarization */ 49 50 u16 address; /* physical cpu address */
+1
arch/s390/include/asm/sclp.h
··· 72 72 unsigned char has_core_type : 1; 73 73 unsigned char has_sprp : 1; 74 74 unsigned char has_hvs : 1; 75 + unsigned char has_wti : 1; 75 76 unsigned char has_esca : 1; 76 77 unsigned char has_sief2 : 1; 77 78 unsigned char has_64bscao : 1;
+4
arch/s390/include/asm/setup.h
··· 34 34 #define MACHINE_FLAG_SCC BIT(17) 35 35 #define MACHINE_FLAG_PCI_MIO BIT(18) 36 36 #define MACHINE_FLAG_RDP BIT(19) 37 + #define MACHINE_FLAG_SEQ_INSN BIT(20) 37 38 38 39 #define LPP_MAGIC BIT(31) 39 40 #define LPP_PID_MASK _AC(0xffffffff, UL) ··· 96 95 #define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC) 97 96 #define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO) 98 97 #define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP) 98 + #define MACHINE_HAS_SEQ_INSN (get_lowcore()->machine_flags & MACHINE_FLAG_SEQ_INSN) 99 99 100 100 /* 101 101 * Console mode. Override with conmode= ··· 116 114 #define SET_CONSOLE_3270 do { console_mode = 3; } while (0) 117 115 #define SET_CONSOLE_VT220 do { console_mode = 4; } while (0) 118 116 #define SET_CONSOLE_HVC do { console_mode = 5; } while (0) 117 + 118 + void register_early_console(void); 119 119 120 120 #ifdef CONFIG_VMCP 121 121 void vmcp_cma_reserve(void);
+4
arch/s390/include/asm/smp.h
··· 12 12 #include <asm/processor.h> 13 13 14 14 #define raw_smp_processor_id() (get_lowcore()->cpu_nr) 15 + #define arch_scale_cpu_capacity smp_cpu_get_capacity 15 16 16 17 extern struct mutex smp_cpu_state_mutex; 17 18 extern unsigned int smp_cpu_mt_shift; ··· 35 34 extern void smp_yield_cpu(int cpu); 36 35 extern void smp_cpu_set_polarization(int cpu, int val); 37 36 extern int smp_cpu_get_polarization(int cpu); 37 + extern void smp_cpu_set_capacity(int cpu, unsigned long val); 38 + extern void smp_set_core_capacity(int cpu, unsigned long val); 39 + extern unsigned long smp_cpu_get_capacity(int cpu); 38 40 extern int smp_cpu_get_cpu_address(int cpu); 39 41 extern void smp_fill_possible_mask(void); 40 42 extern void smp_detect_cpus(void);
+3
arch/s390/include/asm/topology.h
··· 67 67 #define POLARIZATION_VM (2) 68 68 #define POLARIZATION_VH (3) 69 69 70 + #define CPU_CAPACITY_HIGH SCHED_CAPACITY_SCALE 71 + #define CPU_CAPACITY_LOW (SCHED_CAPACITY_SCALE >> 3) 72 + 70 73 #define SD_BOOK_INIT SD_CPU_INIT 71 74 72 75 #ifdef CONFIG_NUMA
+58
arch/s390/include/asm/trace/hiperdispatch.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Tracepoint header for hiperdispatch 4 + * 5 + * Copyright IBM Corp. 2024 6 + */ 7 + 8 + #undef TRACE_SYSTEM 9 + #define TRACE_SYSTEM s390 10 + 11 + #if !defined(_TRACE_S390_HIPERDISPATCH_H) || defined(TRACE_HEADER_MULTI_READ) 12 + #define _TRACE_S390_HIPERDISPATCH_H 13 + 14 + #include <linux/tracepoint.h> 15 + 16 + #undef TRACE_INCLUDE_PATH 17 + #undef TRACE_INCLUDE_FILE 18 + 19 + #define TRACE_INCLUDE_PATH asm/trace 20 + #define TRACE_INCLUDE_FILE hiperdispatch 21 + 22 + TRACE_EVENT(s390_hd_work_fn, 23 + TP_PROTO(int steal_time_percentage, 24 + int entitled_core_count, 25 + int highcap_core_count), 26 + TP_ARGS(steal_time_percentage, 27 + entitled_core_count, 28 + highcap_core_count), 29 + TP_STRUCT__entry(__field(int, steal_time_percentage) 30 + __field(int, entitled_core_count) 31 + __field(int, highcap_core_count)), 32 + TP_fast_assign(__entry->steal_time_percentage = steal_time_percentage; 33 + __entry->entitled_core_count = entitled_core_count; 34 + __entry->highcap_core_count = highcap_core_count;), 35 + TP_printk("steal: %d entitled_core_count: %d highcap_core_count: %d", 36 + __entry->steal_time_percentage, 37 + __entry->entitled_core_count, 38 + __entry->highcap_core_count) 39 + ); 40 + 41 + TRACE_EVENT(s390_hd_rebuild_domains, 42 + TP_PROTO(int current_highcap_core_count, 43 + int new_highcap_core_count), 44 + TP_ARGS(current_highcap_core_count, 45 + new_highcap_core_count), 46 + TP_STRUCT__entry(__field(int, current_highcap_core_count) 47 + __field(int, new_highcap_core_count)), 48 + TP_fast_assign(__entry->current_highcap_core_count = current_highcap_core_count; 49 + __entry->new_highcap_core_count = new_highcap_core_count), 50 + TP_printk("change highcap_core_count: %u -> %u", 51 + __entry->current_highcap_core_count, 52 + __entry->new_highcap_core_count) 53 + ); 54 + 55 + #endif /* _TRACE_S390_HIPERDISPATCH_H */ 56 + 57 + /* This part must be outside protection */ 58 + #include <trace/define_trace.h>
+5
arch/s390/include/uapi/asm/pkey.h
··· 41 41 #define PKEY_KEYTYPE_ECC_P521 7 42 42 #define PKEY_KEYTYPE_ECC_ED25519 8 43 43 #define PKEY_KEYTYPE_ECC_ED448 9 44 + #define PKEY_KEYTYPE_AES_XTS_128 10 45 + #define PKEY_KEYTYPE_AES_XTS_256 11 46 + #define PKEY_KEYTYPE_HMAC_512 12 47 + #define PKEY_KEYTYPE_HMAC_1024 13 44 48 45 49 /* the newer ioctls use a pkey_key_type enum for type information */ 46 50 enum pkey_key_type { ··· 54 50 PKEY_TYPE_CCA_ECC = (__u32) 0x1f, 55 51 PKEY_TYPE_EP11_AES = (__u32) 6, 56 52 PKEY_TYPE_EP11_ECC = (__u32) 7, 53 + PKEY_TYPE_PROTKEY = (__u32) 8, 57 54 }; 58 55 59 56 /* the newer ioctls use a pkey_key_size enum for key size information */
+4 -3
arch/s390/kernel/Makefile
··· 36 36 CFLAGS_dumpstack.o += -fno-optimize-sibling-calls 37 37 CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls 38 38 39 - obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o 39 + obj-y := head64.o traps.o time.o process.o early.o setup.o idle.o vtime.o 40 40 obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 41 41 obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o 42 42 obj-y += sysinfo.o lgr.o os_info.o ctlreg.o 43 43 obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o 44 44 obj-y += entry.o reipl.o kdebugfs.o alternative.o 45 45 obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o 46 - obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o 46 + obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o wti.o 47 47 48 48 extra-y += vmlinux.lds 49 49 50 50 obj-$(CONFIG_SYSFS) += nospec-sysfs.o 51 51 CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) 52 52 53 + obj-$(CONFIG_SYSFS) += cpacf.o 53 54 obj-$(CONFIG_MODULES) += module.o 54 - obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o 55 + obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o hiperdispatch.o 55 56 obj-$(CONFIG_NUMA) += numa.o 56 57 obj-$(CONFIG_AUDIT) += audit.o 57 58 compat-obj-$(CONFIG_AUDIT) += compat_audit.o
+1 -2
arch/s390/kernel/asm-offsets.c
··· 112 112 OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw); 113 113 OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw); 114 114 /* software defined lowcore locations 0x200 - 0xdff*/ 115 - OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync); 116 - OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async); 115 + OFFSET(__LC_SAVE_AREA, lowcore, save_area); 117 116 OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart); 118 117 OFFSET(__LC_PCPU, lowcore, pcpu); 119 118 OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
+119
arch/s390/kernel/cpacf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright IBM Corp. 2024 4 + */ 5 + 6 + #define KMSG_COMPONENT "cpacf" 7 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + 9 + #include <linux/cpu.h> 10 + #include <linux/device.h> 11 + #include <linux/sysfs.h> 12 + #include <asm/cpacf.h> 13 + 14 + #define CPACF_QUERY(name, instruction) \ 15 + static ssize_t name##_query_raw_read(struct file *fp, \ 16 + struct kobject *kobj, \ 17 + struct bin_attribute *attr, \ 18 + char *buf, loff_t offs, \ 19 + size_t count) \ 20 + { \ 21 + cpacf_mask_t mask; \ 22 + \ 23 + if (!cpacf_query(CPACF_##instruction, &mask)) \ 24 + return -EOPNOTSUPP; \ 25 + return memory_read_from_buffer(buf, count, &offs, &mask, sizeof(mask)); \ 26 + } \ 27 + static BIN_ATTR_RO(name##_query_raw, sizeof(cpacf_mask_t)) 28 + 29 + CPACF_QUERY(km, KM); 30 + CPACF_QUERY(kmc, KMC); 31 + CPACF_QUERY(kimd, KIMD); 32 + CPACF_QUERY(klmd, KLMD); 33 + CPACF_QUERY(kmac, KMAC); 34 + CPACF_QUERY(pckmo, PCKMO); 35 + CPACF_QUERY(kmf, KMF); 36 + CPACF_QUERY(kmctr, KMCTR); 37 + CPACF_QUERY(kmo, KMO); 38 + CPACF_QUERY(pcc, PCC); 39 + CPACF_QUERY(prno, PRNO); 40 + CPACF_QUERY(kma, KMA); 41 + CPACF_QUERY(kdsa, KDSA); 42 + 43 + #define CPACF_QAI(name, instruction) \ 44 + static ssize_t name##_query_auth_info_raw_read( \ 45 + struct file *fp, struct kobject *kobj, \ 46 + struct bin_attribute *attr, char *buf, loff_t offs, \ 47 + size_t count) \ 48 + { \ 49 + cpacf_qai_t qai; \ 50 + \ 51 + if (!cpacf_qai(CPACF_##instruction, &qai)) \ 52 + return -EOPNOTSUPP; \ 53 + return memory_read_from_buffer(buf, count, &offs, &qai, \ 54 + sizeof(qai)); \ 55 + } \ 56 + static BIN_ATTR_RO(name##_query_auth_info_raw, sizeof(cpacf_qai_t)) 57 + 58 + CPACF_QAI(km, KM); 59 + CPACF_QAI(kmc, KMC); 60 + CPACF_QAI(kimd, KIMD); 61 + CPACF_QAI(klmd, KLMD); 62 + CPACF_QAI(kmac, KMAC); 63 + CPACF_QAI(pckmo, PCKMO); 64 + CPACF_QAI(kmf, KMF); 65 + CPACF_QAI(kmctr, KMCTR); 66 + CPACF_QAI(kmo, KMO); 67 + CPACF_QAI(pcc, PCC); 68 + CPACF_QAI(prno, PRNO); 69 + CPACF_QAI(kma, KMA); 70 + CPACF_QAI(kdsa, KDSA); 71 + 72 + static struct bin_attribute *cpacf_attrs[] = { 73 + &bin_attr_km_query_raw, 74 + &bin_attr_kmc_query_raw, 75 + &bin_attr_kimd_query_raw, 76 + &bin_attr_klmd_query_raw, 77 + &bin_attr_kmac_query_raw, 78 + &bin_attr_pckmo_query_raw, 79 + &bin_attr_kmf_query_raw, 80 + &bin_attr_kmctr_query_raw, 81 + &bin_attr_kmo_query_raw, 82 + &bin_attr_pcc_query_raw, 83 + &bin_attr_prno_query_raw, 84 + &bin_attr_kma_query_raw, 85 + &bin_attr_kdsa_query_raw, 86 + &bin_attr_km_query_auth_info_raw, 87 + &bin_attr_kmc_query_auth_info_raw, 88 + &bin_attr_kimd_query_auth_info_raw, 89 + &bin_attr_klmd_query_auth_info_raw, 90 + &bin_attr_kmac_query_auth_info_raw, 91 + &bin_attr_pckmo_query_auth_info_raw, 92 + &bin_attr_kmf_query_auth_info_raw, 93 + &bin_attr_kmctr_query_auth_info_raw, 94 + &bin_attr_kmo_query_auth_info_raw, 95 + &bin_attr_pcc_query_auth_info_raw, 96 + &bin_attr_prno_query_auth_info_raw, 97 + &bin_attr_kma_query_auth_info_raw, 98 + &bin_attr_kdsa_query_auth_info_raw, 99 + NULL, 100 + }; 101 + 102 + static const struct attribute_group cpacf_attr_grp = { 103 + .name = "cpacf", 104 + .bin_attrs = cpacf_attrs, 105 + }; 106 + 107 + static int __init cpacf_init(void) 108 + { 109 + struct device *cpu_root; 110 + int rc = 0; 111 + 112 + cpu_root = bus_get_dev_root(&cpu_subsys); 113 + if (cpu_root) { 114 + rc = sysfs_create_group(&cpu_root->kobj, &cpacf_attr_grp); 115 + put_device(cpu_root); 116 + } 117 + return rc; 118 + } 119 + device_initcall(cpacf_init);
+17
arch/s390/kernel/diag.c
··· 52 52 [DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" }, 53 53 [DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" }, 54 54 [DIAG_STAT_X320] = { .code = 0x320, .name = "Certificate Store" }, 55 + [DIAG_STAT_X49C] = { .code = 0x49c, .name = "Warning-Track Interruption" }, 55 56 [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" }, 56 57 }; 57 58 ··· 304 303 return diag_amode31_ops.diag26c(virt_to_phys(req), virt_to_phys(resp), subcode); 305 304 } 306 305 EXPORT_SYMBOL(diag26c); 306 + 307 + int diag49c(unsigned long subcode) 308 + { 309 + int rc; 310 + 311 + diag_stat_inc(DIAG_STAT_X49C); 312 + asm volatile( 313 + " diag %[subcode],0,0x49c\n" 314 + " ipm %[rc]\n" 315 + " srl %[rc],28\n" 316 + : [rc] "=d" (rc) 317 + : [subcode] "d" (subcode) 318 + : "cc"); 319 + return rc; 320 + } 321 + EXPORT_SYMBOL(diag49c);
+12 -8
arch/s390/kernel/dis.c
··· 122 122 U8_32, /* 8 bit unsigned value starting at 32 */ 123 123 U12_16, /* 12 bit unsigned value starting at 16 */ 124 124 U16_16, /* 16 bit unsigned value starting at 16 */ 125 + U16_20, /* 16 bit unsigned value starting at 20 */ 125 126 U16_32, /* 16 bit unsigned value starting at 32 */ 126 127 U32_16, /* 32 bit unsigned value starting at 16 */ 127 128 VX_12, /* Vector index register starting at position 12 */ ··· 185 184 [U8_32] = { 8, 32, 0 }, 186 185 [U12_16] = { 12, 16, 0 }, 187 186 [U16_16] = { 16, 16, 0 }, 187 + [U16_20] = { 16, 20, 0 }, 188 188 [U16_32] = { 16, 32, 0 }, 189 189 [U32_16] = { 32, 16, 0 }, 190 190 [VX_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR }, ··· 259 257 [INSTR_RSL_R0RD] = { D_20, L4_8, B_16, 0, 0, 0 }, 260 258 [INSTR_RSY_AARD] = { A_8, A_12, D20_20, B_16, 0, 0 }, 261 259 [INSTR_RSY_CCRD] = { C_8, C_12, D20_20, B_16, 0, 0 }, 262 - [INSTR_RSY_RDRU] = { R_8, D20_20, B_16, U4_12, 0, 0 }, 263 260 [INSTR_RSY_RRRD] = { R_8, R_12, D20_20, B_16, 0, 0 }, 264 261 [INSTR_RSY_RURD] = { R_8, U4_12, D20_20, B_16, 0, 0 }, 265 262 [INSTR_RSY_RURD2] = { R_8, D20_20, B_16, U4_12, 0, 0 }, ··· 301 300 [INSTR_VRI_V0UU2] = { V_8, U16_16, U4_32, 0, 0, 0 }, 302 301 [INSTR_VRI_V0UUU] = { V_8, U8_16, U8_24, U4_32, 0, 0 }, 303 302 [INSTR_VRI_VR0UU] = { V_8, R_12, U8_28, U4_24, 0, 0 }, 303 + [INSTR_VRI_VV0UU] = { V_8, V_12, U8_28, U4_24, 0, 0 }, 304 304 [INSTR_VRI_VVUU] = { V_8, V_12, U16_16, U4_32, 0, 0 }, 305 305 [INSTR_VRI_VVUUU] = { V_8, V_12, U12_16, U4_32, U4_28, 0 }, 306 306 [INSTR_VRI_VVUUU2] = { V_8, V_12, U8_28, U8_16, U4_24, 0 }, 307 307 [INSTR_VRI_VVV0U] = { V_8, V_12, V_16, U8_24, 0, 0 }, 308 308 [INSTR_VRI_VVV0UU] = { V_8, V_12, V_16, U8_24, U4_32, 0 }, 309 309 [INSTR_VRI_VVV0UU2] = { V_8, V_12, V_16, U8_28, U4_24, 0 }, 310 - [INSTR_VRR_0V] = { V_12, 0, 0, 0, 0, 0 }, 310 + [INSTR_VRI_VVV0UV] = { V_8, V_12, V_16, V_32, U8_24, 0 }, 311 + [INSTR_VRR_0V0U] = { V_12, U16_20, 0, 0, 0, 0 }, 311 312 [INSTR_VRR_0VV0U] = { V_12, V_16, U4_24, 0, 0, 0 }, 313 + [INSTR_VRR_0VVU] = { V_12, V_16, U16_20, 0, 0, 0 }, 312 314 [INSTR_VRR_RV0UU] = { R_8, V_12, U4_24, U4_28, 0, 0 }, 313 315 [INSTR_VRR_VRR] = { V_8, R_12, R_16, 0, 0, 0 }, 314 316 [INSTR_VRR_VV] = { V_8, V_12, 0, 0, 0, 0 }, ··· 459 455 if (separator) 460 456 ptr += sprintf(ptr, "%c", separator); 461 457 if (operand->flags & OPERAND_GPR) 462 - ptr += sprintf(ptr, "%%r%i", value); 458 + ptr += sprintf(ptr, "%%r%u", value); 463 459 else if (operand->flags & OPERAND_FPR) 464 - ptr += sprintf(ptr, "%%f%i", value); 460 + ptr += sprintf(ptr, "%%f%u", value); 465 461 else if (operand->flags & OPERAND_AR) 466 - ptr += sprintf(ptr, "%%a%i", value); 462 + ptr += sprintf(ptr, "%%a%u", value); 467 463 else if (operand->flags & OPERAND_CR) 468 - ptr += sprintf(ptr, "%%c%i", value); 464 + ptr += sprintf(ptr, "%%c%u", value); 469 465 else if (operand->flags & OPERAND_VR) 470 - ptr += sprintf(ptr, "%%v%i", value); 466 + ptr += sprintf(ptr, "%%v%u", value); 471 467 else if (operand->flags & OPERAND_PCREL) { 472 468 void *pcrel = (void *)((int)value + addr); 473 469 474 470 ptr += sprintf(ptr, "%px", pcrel); 475 471 } else if (operand->flags & OPERAND_SIGNED) 476 - ptr += sprintf(ptr, "%i", value); 472 + ptr += sprintf(ptr, "%i", (int)value); 477 473 else 478 474 ptr += sprintf(ptr, "%u", value); 479 475 if (operand->flags & OPERAND_DISP)
+33 -5
arch/s390/kernel/early.c
··· 7 7 #define KMSG_COMPONENT "setup" 8 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 9 10 + #include <linux/sched/debug.h> 10 11 #include <linux/compiler.h> 11 12 #include <linux/init.h> 12 13 #include <linux/errno.h> ··· 176 175 topology_max_mnest = max_mnest; 177 176 } 178 177 179 - void __do_early_pgm_check(struct pt_regs *regs) 178 + void __init __do_early_pgm_check(struct pt_regs *regs) 180 179 { 181 - if (!fixup_exception(regs)) 182 - disabled_wait(); 180 + struct lowcore *lc = get_lowcore(); 181 + unsigned long ip; 182 + 183 + regs->int_code = lc->pgm_int_code; 184 + regs->int_parm_long = lc->trans_exc_code; 185 + ip = __rewind_psw(regs->psw, regs->int_code >> 16); 186 + 187 + /* Monitor Event? Might be a warning */ 188 + if ((regs->int_code & PGM_INT_CODE_MASK) == 0x40) { 189 + if (report_bug(ip, regs) == BUG_TRAP_TYPE_WARN) 190 + return; 191 + } 192 + if (fixup_exception(regs)) 193 + return; 194 + /* 195 + * Unhandled exception - system cannot continue but try to get some 196 + * helpful messages to the console. Use early_printk() to print 197 + * some basic information in case it is too early for printk(). 198 + */ 199 + register_early_console(); 200 + early_printk("PANIC: early exception %04x PSW: %016lx %016lx\n", 201 + regs->int_code & 0xffff, regs->psw.mask, regs->psw.addr); 202 + show_regs(regs); 203 + disabled_wait(); 183 204 } 184 205 185 206 static noinline __init void setup_lowcore_early(void) 186 207 { 208 + struct lowcore *lc = get_lowcore(); 187 209 psw_t psw; 188 210 189 211 psw.addr = (unsigned long)early_pgm_check_handler; 190 212 psw.mask = PSW_KERNEL_BITS; 191 - get_lowcore()->program_new_psw = psw; 192 - get_lowcore()->preempt_count = INIT_PREEMPT_COUNT; 213 + lc->program_new_psw = psw; 214 + lc->preempt_count = INIT_PREEMPT_COUNT; 215 + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 216 + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 193 217 } 194 218 195 219 static __init void detect_diag9c(void) ··· 268 242 } 269 243 if (test_facility(194)) 270 244 get_lowcore()->machine_flags |= MACHINE_FLAG_RDP; 245 + if (test_facility(85)) 246 + get_lowcore()->machine_flags |= MACHINE_FLAG_SEQ_INSN; 271 247 } 272 248 273 249 static inline void save_vector_registers(void)
+12 -4
arch/s390/kernel/early_printk.c
··· 6 6 #include <linux/console.h> 7 7 #include <linux/kernel.h> 8 8 #include <linux/init.h> 9 + #include <asm/setup.h> 9 10 #include <asm/sclp.h> 10 11 11 12 static void sclp_early_write(struct console *con, const char *s, unsigned int len) ··· 21 20 .index = -1, 22 21 }; 23 22 23 + void __init register_early_console(void) 24 + { 25 + if (early_console) 26 + return; 27 + if (!sclp.has_linemode && !sclp.has_vt220) 28 + return; 29 + early_console = &sclp_early_console; 30 + register_console(early_console); 31 + } 32 + 24 33 static int __init setup_early_printk(char *buf) 25 34 { 26 35 if (early_console) ··· 38 27 /* Accept only "earlyprintk" and "earlyprintk=sclp" */ 39 28 if (buf && !str_has_prefix(buf, "sclp")) 40 29 return 0; 41 - if (!sclp.has_linemode && !sclp.has_vt220) 42 - return 0; 43 - early_console = &sclp_early_console; 44 - register_console(early_console); 30 + register_early_console(); 45 31 return 0; 46 32 } 47 33 early_param("earlyprintk", setup_early_printk);
-23
arch/s390/kernel/earlypgm.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Copyright IBM Corp. 2006, 2007 4 - * Author(s): Michael Holzheu <holzheu@de.ibm.com> 5 - */ 6 - 7 - #include <linux/linkage.h> 8 - #include <asm/asm-offsets.h> 9 - 10 - SYM_CODE_START(early_pgm_check_handler) 11 - stmg %r8,%r15,__LC_SAVE_AREA_SYNC 12 - aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) 13 - la %r11,STACK_FRAME_OVERHEAD(%r15) 14 - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 15 - stmg %r0,%r7,__PT_R0(%r11) 16 - mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW 17 - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 18 - lgr %r2,%r11 19 - brasl %r14,__do_early_pgm_check 20 - mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 21 - lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 22 - lpswe __LC_RETURN_PSW 23 - SYM_CODE_END(early_pgm_check_handler)
+27 -9
arch/s390/kernel/entry.S
··· 264 264 */ 265 265 266 266 SYM_CODE_START(system_call) 267 - STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC 267 + STMG_LC %r8,%r15,__LC_SAVE_AREA 268 268 GET_LC %r13 269 269 stpt __LC_SYS_ENTER_TIMER(%r13) 270 270 BPOFF ··· 287 287 xgr %r10,%r10 288 288 xgr %r11,%r11 289 289 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 290 - mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC(%r13) 290 + mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13) 291 291 MBEAR %r2,%r13 292 292 lgr %r3,%r14 293 293 brasl %r14,__do_syscall ··· 323 323 */ 324 324 325 325 SYM_CODE_START(pgm_check_handler) 326 - STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC 326 + STMG_LC %r8,%r15,__LC_SAVE_AREA 327 327 GET_LC %r13 328 328 stpt __LC_SYS_ENTER_TIMER(%r13) 329 329 BPOFF ··· 338 338 jnz 2f # -> enabled, can't be a double fault 339 339 tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception 340 340 jnz .Lpgm_svcper # -> single stepped svc 341 - 2: CHECK_STACK __LC_SAVE_AREA_SYNC,%r13 341 + 2: CHECK_STACK __LC_SAVE_AREA,%r13 342 342 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 343 343 # CHECK_VMAP_STACK branches to stack_overflow or 4f 344 - CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,%r13,4f 344 + CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f 345 345 3: lg %r15,__LC_KERNEL_STACK(%r13) 346 346 4: la %r11,STACK_FRAME_OVERHEAD(%r15) 347 347 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 348 348 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 349 349 stmg %r0,%r7,__PT_R0(%r11) 350 - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC(%r13) 350 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 351 351 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13) 352 352 stctg %c1,%c1,__PT_CR1(%r11) 353 353 #if IS_ENABLED(CONFIG_KVM) ··· 398 398 */ 399 399 .macro INT_HANDLER name,lc_old_psw,handler 400 400 SYM_CODE_START(\name) 401 - STMG_LC %r8,%r15,__LC_SAVE_AREA_ASYNC 401 + STMG_LC %r8,%r15,__LC_SAVE_AREA 402 402 GET_LC %r13 403 403 stckf __LC_INT_CLOCK(%r13) 404 404 stpt __LC_SYS_ENTER_TIMER(%r13) ··· 414 414 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 415 415 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 416 416 #endif 417 - 0: CHECK_STACK __LC_SAVE_AREA_ASYNC,%r13 417 + 0: CHECK_STACK __LC_SAVE_AREA,%r13 418 418 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 419 419 j 2f 420 420 1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) ··· 432 432 xgr %r7,%r7 433 433 xgr %r10,%r10 434 434 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 435 - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC(%r13) 435 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 436 436 MBEAR %r11,%r13 437 437 stmg %r8,%r9,__PT_PSW(%r11) 438 438 lgr %r2,%r11 # pass pointer to pt_regs ··· 598 598 brc 2,2b 599 599 3: j 3b 600 600 SYM_CODE_END(restart_int_handler) 601 + 602 + __INIT 603 + SYM_CODE_START(early_pgm_check_handler) 604 + STMG_LC %r8,%r15,__LC_SAVE_AREA 605 + GET_LC %r13 606 + aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) 607 + la %r11,STACK_FRAME_OVERHEAD(%r15) 608 + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 609 + stmg %r0,%r7,__PT_R0(%r11) 610 + mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13) 611 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 612 + lgr %r2,%r11 613 + brasl %r14,__do_early_pgm_check 614 + mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 615 + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 616 + LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 617 + SYM_CODE_END(early_pgm_check_handler) 618 + __FINIT 601 619 602 620 .section .kprobes.text, "ax" 603 621
+60 -46
arch/s390/kernel/ftrace.c
··· 50 50 s32 disp; 51 51 } __packed; 52 52 53 - #ifdef CONFIG_MODULES 54 - static char *ftrace_plt; 55 - #endif /* CONFIG_MODULES */ 56 - 57 53 static const char *ftrace_shared_hotpatch_trampoline(const char **end) 58 54 { 59 55 const char *tstart, *tend; ··· 69 73 70 74 bool ftrace_need_init_nop(void) 71 75 { 72 - return true; 76 + return !MACHINE_HAS_SEQ_INSN; 73 77 } 74 78 75 79 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 76 80 { 77 81 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline = 78 82 __ftrace_hotpatch_trampolines_start; 79 - static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 }; 83 + static const struct ftrace_insn orig = { .opc = 0xc004, .disp = 0 }; 80 84 static struct ftrace_hotpatch_trampoline *trampoline; 81 85 struct ftrace_hotpatch_trampoline **next_trampoline; 82 86 struct ftrace_hotpatch_trampoline *trampolines_end; 83 87 struct ftrace_hotpatch_trampoline tmp; 84 88 struct ftrace_insn *insn; 89 + struct ftrace_insn old; 85 90 const char *shared; 86 91 s32 disp; 87 92 ··· 96 99 if (mod) { 97 100 next_trampoline = &mod->arch.next_trampoline; 98 101 trampolines_end = mod->arch.trampolines_end; 99 - shared = ftrace_plt; 100 102 } 101 103 #endif 102 104 ··· 103 107 return -ENOMEM; 104 108 trampoline = (*next_trampoline)++; 105 109 110 + if (copy_from_kernel_nofault(&old, (void *)rec->ip, sizeof(old))) 111 + return -EFAULT; 106 112 /* Check for the compiler-generated fentry nop (brcl 0, .). */ 107 - if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig)))) 113 + if (WARN_ON_ONCE(memcmp(&orig, &old, sizeof(old)))) 108 114 return -EINVAL; 109 115 110 116 /* Generate the trampoline. */ ··· 142 144 return trampoline; 143 145 } 144 146 145 - int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 146 - unsigned long addr) 147 + static inline struct ftrace_insn 148 + ftrace_generate_branch_insn(unsigned long ip, unsigned long target) 149 + { 150 + /* brasl r0,target or brcl 0,0 */ 151 + return (struct ftrace_insn){ .opc = target ? 0xc005 : 0xc004, 152 + .disp = target ? (target - ip) / 2 : 0 }; 153 + } 154 + 155 + static int ftrace_patch_branch_insn(unsigned long ip, unsigned long old_target, 156 + unsigned long target) 157 + { 158 + struct ftrace_insn orig = ftrace_generate_branch_insn(ip, old_target); 159 + struct ftrace_insn new = ftrace_generate_branch_insn(ip, target); 160 + struct ftrace_insn old; 161 + 162 + if (!IS_ALIGNED(ip, 8)) 163 + return -EINVAL; 164 + if (copy_from_kernel_nofault(&old, (void *)ip, sizeof(old))) 165 + return -EFAULT; 166 + /* Verify that the to be replaced code matches what we expect. */ 167 + if (memcmp(&orig, &old, sizeof(old))) 168 + return -EINVAL; 169 + s390_kernel_write((void *)ip, &new, sizeof(new)); 170 + return 0; 171 + } 172 + 173 + static int ftrace_modify_trampoline_call(struct dyn_ftrace *rec, 174 + unsigned long old_addr, 175 + unsigned long addr) 147 176 { 148 177 struct ftrace_hotpatch_trampoline *trampoline; 149 178 u64 old; ··· 184 159 return -EINVAL; 185 160 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr)); 186 161 return 0; 162 + } 163 + 164 + int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 165 + unsigned long addr) 166 + { 167 + if (MACHINE_HAS_SEQ_INSN) 168 + return ftrace_patch_branch_insn(rec->ip, old_addr, addr); 169 + else 170 + return ftrace_modify_trampoline_call(rec, old_addr, addr); 187 171 } 188 172 189 173 static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable) ··· 213 179 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 214 180 unsigned long addr) 215 181 { 216 - /* Expect brcl 0xf,... */ 217 - return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false); 182 + /* Expect brcl 0xf,... for the !MACHINE_HAS_SEQ_INSN case */ 183 + if (MACHINE_HAS_SEQ_INSN) 184 + return ftrace_patch_branch_insn(rec->ip, addr, 0); 185 + else 186 + return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false); 218 187 } 219 188 220 - int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 189 + static int ftrace_make_trampoline_call(struct dyn_ftrace *rec, unsigned long addr) 221 190 { 222 191 struct ftrace_hotpatch_trampoline *trampoline; 223 192 ··· 230 193 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr)); 231 194 /* Expect brcl 0x0,... */ 232 195 return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true); 196 + } 197 + 198 + int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 199 + { 200 + if (MACHINE_HAS_SEQ_INSN) 201 + return ftrace_patch_branch_insn(rec->ip, 0, addr); 202 + else 203 + return ftrace_make_trampoline_call(rec, addr); 233 204 } 234 205 235 206 int ftrace_update_ftrace_func(ftrace_func_t func) ··· 259 214 */ 260 215 text_poke_sync_lock(); 261 216 } 262 - 263 - #ifdef CONFIG_MODULES 264 - 265 - static int __init ftrace_plt_init(void) 266 - { 267 - const char *start, *end; 268 - 269 - ftrace_plt = execmem_alloc(EXECMEM_FTRACE, PAGE_SIZE); 270 - if (!ftrace_plt) 271 - panic("cannot allocate ftrace plt\n"); 272 - 273 - start = ftrace_shared_hotpatch_trampoline(&end); 274 - memcpy(ftrace_plt, start, end - start); 275 - set_memory_rox((unsigned long)ftrace_plt, 1); 276 - return 0; 277 - } 278 - device_initcall(ftrace_plt_init); 279 - 280 - #endif /* CONFIG_MODULES */ 281 217 282 218 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 283 219 /* ··· 290 264 */ 291 265 int ftrace_enable_ftrace_graph_caller(void) 292 266 { 293 - int rc; 294 - 295 267 /* Expect brc 0xf,... */ 296 - rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false); 297 - if (rc) 298 - return rc; 299 - text_poke_sync_lock(); 300 - return 0; 268 + return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false); 301 269 } 302 270 303 271 int ftrace_disable_ftrace_graph_caller(void) 304 272 { 305 - int rc; 306 - 307 273 /* Expect brc 0x0,... */ 308 - rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true); 309 - if (rc) 310 - return rc; 311 - text_poke_sync_lock(); 312 - return 0; 274 + return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true); 313 275 } 314 276 315 277 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-2
arch/s390/kernel/ftrace.h
··· 18 18 extern const char ftrace_shared_hotpatch_trampoline_br_end[]; 19 19 extern const char ftrace_shared_hotpatch_trampoline_exrl[]; 20 20 extern const char ftrace_shared_hotpatch_trampoline_exrl_end[]; 21 - extern const char ftrace_plt_template[]; 22 - extern const char ftrace_plt_template_end[]; 23 21 24 22 #endif /* _FTRACE_H */
+430
arch/s390/kernel/hiperdispatch.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright IBM Corp. 2024 4 + */ 5 + 6 + #define KMSG_COMPONENT "hd" 7 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + 9 + /* 10 + * Hiperdispatch: 11 + * Dynamically calculates the optimum number of high capacity COREs 12 + * by considering the state the system is in. When hiperdispatch decides 13 + * that a capacity update is necessary, it schedules a topology update. 14 + * During topology updates the CPU capacities are always re-adjusted. 15 + * 16 + * There is two places where CPU capacities are being accessed within 17 + * hiperdispatch. 18 + * -> hiperdispatch's reoccuring work function reads CPU capacities to 19 + * determine high capacity CPU count. 20 + * -> during a topology update hiperdispatch's adjustment function 21 + * updates CPU capacities. 22 + * These two can run on different CPUs in parallel which can cause 23 + * hiperdispatch to make wrong decisions. This can potentially cause 24 + * some overhead by leading to extra rebuild_sched_domains() calls 25 + * for correction. Access to capacities within hiperdispatch has to be 26 + * serialized to prevent the overhead. 27 + * 28 + * Hiperdispatch decision making revolves around steal time. 29 + * HD_STEAL_THRESHOLD value is taken as reference. Whenever steal time 30 + * crosses the threshold value hiperdispatch falls back to giving high 31 + * capacities to entitled CPUs. When steal time drops below the 32 + * threshold boundary, hiperdispatch utilizes all CPUs by giving all 33 + * of them high capacity. 34 + * 35 + * The theory behind HD_STEAL_THRESHOLD is related to the SMP thread 36 + * performance. Comparing the throughput of; 37 + * - single CORE, with N threads, running N tasks 38 + * - N separate COREs running N tasks, 39 + * using individual COREs for individual tasks yield better 40 + * performance. This performance difference is roughly ~30% (can change 41 + * between machine generations) 42 + * 43 + * Hiperdispatch tries to hint scheduler to use individual COREs for 44 + * each task, as long as steal time on those COREs are less than 30%, 45 + * therefore delaying the throughput loss caused by using SMP threads. 46 + */ 47 + 48 + #include <linux/cpumask.h> 49 + #include <linux/debugfs.h> 50 + #include <linux/device.h> 51 + #include <linux/kernel_stat.h> 52 + #include <linux/kstrtox.h> 53 + #include <linux/ktime.h> 54 + #include <linux/sysctl.h> 55 + #include <linux/types.h> 56 + #include <linux/workqueue.h> 57 + #include <asm/hiperdispatch.h> 58 + #include <asm/setup.h> 59 + #include <asm/smp.h> 60 + #include <asm/topology.h> 61 + 62 + #define CREATE_TRACE_POINTS 63 + #include <asm/trace/hiperdispatch.h> 64 + 65 + #define HD_DELAY_FACTOR (4) 66 + #define HD_DELAY_INTERVAL (HZ / 4) 67 + #define HD_STEAL_THRESHOLD 30 68 + #define HD_STEAL_AVG_WEIGHT 16 69 + 70 + static cpumask_t hd_vl_coremask; /* Mask containing all vertical low COREs */ 71 + static cpumask_t hd_vmvl_cpumask; /* Mask containing vertical medium and low CPUs */ 72 + static int hd_high_capacity_cores; /* Current CORE count with high capacity */ 73 + static int hd_entitled_cores; /* Total vertical high and medium CORE count */ 74 + static int hd_online_cores; /* Current online CORE count */ 75 + 76 + static unsigned long hd_previous_steal; /* Previous iteration's CPU steal timer total */ 77 + static unsigned long hd_high_time; /* Total time spent while all cpus have high capacity */ 78 + static unsigned long hd_low_time; /* Total time spent while vl cpus have low capacity */ 79 + static atomic64_t hd_adjustments; /* Total occurrence count of hiperdispatch adjustments */ 80 + 81 + static unsigned int hd_steal_threshold = HD_STEAL_THRESHOLD; 82 + static unsigned int hd_delay_factor = HD_DELAY_FACTOR; 83 + static int hd_enabled; 84 + 85 + static void hd_capacity_work_fn(struct work_struct *work); 86 + static DECLARE_DELAYED_WORK(hd_capacity_work, hd_capacity_work_fn); 87 + 88 + static int hd_set_hiperdispatch_mode(int enable) 89 + { 90 + if (!MACHINE_HAS_TOPOLOGY) 91 + enable = 0; 92 + if (hd_enabled == enable) 93 + return 0; 94 + hd_enabled = enable; 95 + return 1; 96 + } 97 + 98 + void hd_reset_state(void) 99 + { 100 + cpumask_clear(&hd_vl_coremask); 101 + cpumask_clear(&hd_vmvl_cpumask); 102 + hd_entitled_cores = 0; 103 + hd_online_cores = 0; 104 + } 105 + 106 + void hd_add_core(int cpu) 107 + { 108 + const struct cpumask *siblings; 109 + int polarization; 110 + 111 + hd_online_cores++; 112 + polarization = smp_cpu_get_polarization(cpu); 113 + siblings = topology_sibling_cpumask(cpu); 114 + switch (polarization) { 115 + case POLARIZATION_VH: 116 + hd_entitled_cores++; 117 + break; 118 + case POLARIZATION_VM: 119 + hd_entitled_cores++; 120 + cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings); 121 + break; 122 + case POLARIZATION_VL: 123 + cpumask_set_cpu(cpu, &hd_vl_coremask); 124 + cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings); 125 + break; 126 + } 127 + } 128 + 129 + /* Serialize update and read operations of debug counters. */ 130 + static DEFINE_MUTEX(hd_counter_mutex); 131 + 132 + static void hd_update_times(void) 133 + { 134 + static ktime_t prev; 135 + ktime_t now; 136 + 137 + /* 138 + * Check if hiperdispatch is active, if not set the prev to 0. 139 + * This way it is possible to differentiate the first update iteration after 140 + * enabling hiperdispatch. 141 + */ 142 + if (hd_entitled_cores == 0 || hd_enabled == 0) { 143 + prev = ktime_set(0, 0); 144 + return; 145 + } 146 + now = ktime_get(); 147 + if (ktime_after(prev, 0)) { 148 + if (hd_high_capacity_cores == hd_online_cores) 149 + hd_high_time += ktime_ms_delta(now, prev); 150 + else 151 + hd_low_time += ktime_ms_delta(now, prev); 152 + } 153 + prev = now; 154 + } 155 + 156 + static void hd_update_capacities(void) 157 + { 158 + int cpu, upscaling_cores; 159 + unsigned long capacity; 160 + 161 + upscaling_cores = hd_high_capacity_cores - hd_entitled_cores; 162 + capacity = upscaling_cores > 0 ? CPU_CAPACITY_HIGH : CPU_CAPACITY_LOW; 163 + hd_high_capacity_cores = hd_entitled_cores; 164 + for_each_cpu(cpu, &hd_vl_coremask) { 165 + smp_set_core_capacity(cpu, capacity); 166 + if (capacity != CPU_CAPACITY_HIGH) 167 + continue; 168 + hd_high_capacity_cores++; 169 + upscaling_cores--; 170 + if (upscaling_cores == 0) 171 + capacity = CPU_CAPACITY_LOW; 172 + } 173 + } 174 + 175 + void hd_disable_hiperdispatch(void) 176 + { 177 + cancel_delayed_work_sync(&hd_capacity_work); 178 + hd_high_capacity_cores = hd_online_cores; 179 + hd_previous_steal = 0; 180 + } 181 + 182 + int hd_enable_hiperdispatch(void) 183 + { 184 + mutex_lock(&hd_counter_mutex); 185 + hd_update_times(); 186 + mutex_unlock(&hd_counter_mutex); 187 + if (hd_enabled == 0) 188 + return 0; 189 + if (hd_entitled_cores == 0) 190 + return 0; 191 + if (hd_online_cores <= hd_entitled_cores) 192 + return 0; 193 + mod_delayed_work(system_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor); 194 + hd_update_capacities(); 195 + return 1; 196 + } 197 + 198 + static unsigned long hd_steal_avg(unsigned long new) 199 + { 200 + static unsigned long steal; 201 + 202 + steal = (steal * (HD_STEAL_AVG_WEIGHT - 1) + new) / HD_STEAL_AVG_WEIGHT; 203 + return steal; 204 + } 205 + 206 + static unsigned long hd_calculate_steal_percentage(void) 207 + { 208 + unsigned long time_delta, steal_delta, steal, percentage; 209 + static ktime_t prev; 210 + int cpus, cpu; 211 + ktime_t now; 212 + 213 + cpus = 0; 214 + steal = 0; 215 + percentage = 0; 216 + for_each_cpu(cpu, &hd_vmvl_cpumask) { 217 + steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 218 + cpus++; 219 + } 220 + /* 221 + * If there is no vertical medium and low CPUs steal time 222 + * is 0 as vertical high CPUs shouldn't experience steal time. 223 + */ 224 + if (cpus == 0) 225 + return percentage; 226 + now = ktime_get(); 227 + time_delta = ktime_to_ns(ktime_sub(now, prev)); 228 + if (steal > hd_previous_steal && hd_previous_steal != 0) { 229 + steal_delta = (steal - hd_previous_steal) * 100 / time_delta; 230 + percentage = steal_delta / cpus; 231 + } 232 + hd_previous_steal = steal; 233 + prev = now; 234 + return percentage; 235 + } 236 + 237 + static void hd_capacity_work_fn(struct work_struct *work) 238 + { 239 + unsigned long steal_percentage, new_cores; 240 + 241 + mutex_lock(&smp_cpu_state_mutex); 242 + /* 243 + * If online cores are less or equal to entitled cores hiperdispatch 244 + * does not need to make any adjustments, call a topology update to 245 + * disable hiperdispatch. 246 + * Normally this check is handled on topology update, but during cpu 247 + * unhotplug, topology and cpu mask updates are done in reverse 248 + * order, causing hd_enable_hiperdispatch() to get stale data. 249 + */ 250 + if (hd_online_cores <= hd_entitled_cores) { 251 + topology_schedule_update(); 252 + mutex_unlock(&smp_cpu_state_mutex); 253 + return; 254 + } 255 + steal_percentage = hd_steal_avg(hd_calculate_steal_percentage()); 256 + if (steal_percentage < hd_steal_threshold) 257 + new_cores = hd_online_cores; 258 + else 259 + new_cores = hd_entitled_cores; 260 + if (hd_high_capacity_cores != new_cores) { 261 + trace_s390_hd_rebuild_domains(hd_high_capacity_cores, new_cores); 262 + hd_high_capacity_cores = new_cores; 263 + atomic64_inc(&hd_adjustments); 264 + topology_schedule_update(); 265 + } 266 + trace_s390_hd_work_fn(steal_percentage, hd_entitled_cores, hd_high_capacity_cores); 267 + mutex_unlock(&smp_cpu_state_mutex); 268 + schedule_delayed_work(&hd_capacity_work, HD_DELAY_INTERVAL); 269 + } 270 + 271 + static int hiperdispatch_ctl_handler(const struct ctl_table *ctl, int write, 272 + void *buffer, size_t *lenp, loff_t *ppos) 273 + { 274 + int hiperdispatch; 275 + int rc; 276 + struct ctl_table ctl_entry = { 277 + .procname = ctl->procname, 278 + .data = &hiperdispatch, 279 + .maxlen = sizeof(int), 280 + .extra1 = SYSCTL_ZERO, 281 + .extra2 = SYSCTL_ONE, 282 + }; 283 + 284 + hiperdispatch = hd_enabled; 285 + rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos); 286 + if (rc < 0 || !write) 287 + return rc; 288 + mutex_lock(&smp_cpu_state_mutex); 289 + if (hd_set_hiperdispatch_mode(hiperdispatch)) 290 + topology_schedule_update(); 291 + mutex_unlock(&smp_cpu_state_mutex); 292 + return 0; 293 + } 294 + 295 + static struct ctl_table hiperdispatch_ctl_table[] = { 296 + { 297 + .procname = "hiperdispatch", 298 + .mode = 0644, 299 + .proc_handler = hiperdispatch_ctl_handler, 300 + }, 301 + }; 302 + 303 + static ssize_t hd_steal_threshold_show(struct device *dev, 304 + struct device_attribute *attr, 305 + char *buf) 306 + { 307 + return sysfs_emit(buf, "%u\n", hd_steal_threshold); 308 + } 309 + 310 + static ssize_t hd_steal_threshold_store(struct device *dev, 311 + struct device_attribute *attr, 312 + const char *buf, 313 + size_t count) 314 + { 315 + unsigned int val; 316 + int rc; 317 + 318 + rc = kstrtouint(buf, 0, &val); 319 + if (rc) 320 + return rc; 321 + if (val > 100) 322 + return -ERANGE; 323 + hd_steal_threshold = val; 324 + return count; 325 + } 326 + 327 + static DEVICE_ATTR_RW(hd_steal_threshold); 328 + 329 + static ssize_t hd_delay_factor_show(struct device *dev, 330 + struct device_attribute *attr, 331 + char *buf) 332 + { 333 + return sysfs_emit(buf, "%u\n", hd_delay_factor); 334 + } 335 + 336 + static ssize_t hd_delay_factor_store(struct device *dev, 337 + struct device_attribute *attr, 338 + const char *buf, 339 + size_t count) 340 + { 341 + unsigned int val; 342 + int rc; 343 + 344 + rc = kstrtouint(buf, 0, &val); 345 + if (rc) 346 + return rc; 347 + if (!val) 348 + return -ERANGE; 349 + hd_delay_factor = val; 350 + return count; 351 + } 352 + 353 + static DEVICE_ATTR_RW(hd_delay_factor); 354 + 355 + static struct attribute *hd_attrs[] = { 356 + &dev_attr_hd_steal_threshold.attr, 357 + &dev_attr_hd_delay_factor.attr, 358 + NULL, 359 + }; 360 + 361 + static const struct attribute_group hd_attr_group = { 362 + .name = "hiperdispatch", 363 + .attrs = hd_attrs, 364 + }; 365 + 366 + static int hd_greedy_time_get(void *unused, u64 *val) 367 + { 368 + mutex_lock(&hd_counter_mutex); 369 + hd_update_times(); 370 + *val = hd_high_time; 371 + mutex_unlock(&hd_counter_mutex); 372 + return 0; 373 + } 374 + 375 + DEFINE_SIMPLE_ATTRIBUTE(hd_greedy_time_fops, hd_greedy_time_get, NULL, "%llu\n"); 376 + 377 + static int hd_conservative_time_get(void *unused, u64 *val) 378 + { 379 + mutex_lock(&hd_counter_mutex); 380 + hd_update_times(); 381 + *val = hd_low_time; 382 + mutex_unlock(&hd_counter_mutex); 383 + return 0; 384 + } 385 + 386 + DEFINE_SIMPLE_ATTRIBUTE(hd_conservative_time_fops, hd_conservative_time_get, NULL, "%llu\n"); 387 + 388 + static int hd_adjustment_count_get(void *unused, u64 *val) 389 + { 390 + *val = atomic64_read(&hd_adjustments); 391 + return 0; 392 + } 393 + 394 + DEFINE_SIMPLE_ATTRIBUTE(hd_adjustments_fops, hd_adjustment_count_get, NULL, "%llu\n"); 395 + 396 + static void __init hd_create_debugfs_counters(void) 397 + { 398 + struct dentry *dir; 399 + 400 + dir = debugfs_create_dir("hiperdispatch", arch_debugfs_dir); 401 + debugfs_create_file("conservative_time_ms", 0400, dir, NULL, &hd_conservative_time_fops); 402 + debugfs_create_file("greedy_time_ms", 0400, dir, NULL, &hd_greedy_time_fops); 403 + debugfs_create_file("adjustment_count", 0400, dir, NULL, &hd_adjustments_fops); 404 + } 405 + 406 + static void __init hd_create_attributes(void) 407 + { 408 + struct device *dev; 409 + 410 + dev = bus_get_dev_root(&cpu_subsys); 411 + if (!dev) 412 + return; 413 + if (sysfs_create_group(&dev->kobj, &hd_attr_group)) 414 + pr_warn("Unable to create hiperdispatch attribute group\n"); 415 + put_device(dev); 416 + } 417 + 418 + static int __init hd_init(void) 419 + { 420 + if (IS_ENABLED(CONFIG_HIPERDISPATCH_ON)) { 421 + hd_set_hiperdispatch_mode(1); 422 + topology_schedule_update(); 423 + } 424 + if (!register_sysctl("s390", hiperdispatch_ctl_table)) 425 + pr_warn("Failed to register s390.hiperdispatch sysctl attribute\n"); 426 + hd_create_debugfs_counters(); 427 + hd_create_attributes(); 428 + return 0; 429 + } 430 + late_initcall(hd_init);
+1
arch/s390/kernel/irq.c
··· 76 76 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, 77 77 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, 78 78 {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"}, 79 + {.irq = IRQEXT_WTI, .name = "WTI", .desc = "[EXT] Warning Track"}, 79 80 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, 80 81 {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"}, 81 82 {.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
+13 -2
arch/s390/kernel/kprobes.c
··· 21 21 #include <linux/hardirq.h> 22 22 #include <linux/ftrace.h> 23 23 #include <linux/execmem.h> 24 + #include <asm/text-patching.h> 24 25 #include <asm/set_memory.h> 25 26 #include <asm/sections.h> 26 27 #include <asm/dis.h> ··· 153 152 { 154 153 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 155 154 156 - stop_machine_cpuslocked(swap_instruction, &args, NULL); 155 + if (MACHINE_HAS_SEQ_INSN) { 156 + swap_instruction(&args); 157 + text_poke_sync(); 158 + } else { 159 + stop_machine_cpuslocked(swap_instruction, &args, NULL); 160 + } 157 161 } 158 162 NOKPROBE_SYMBOL(arch_arm_kprobe); 159 163 ··· 166 160 { 167 161 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 168 162 169 - stop_machine_cpuslocked(swap_instruction, &args, NULL); 163 + if (MACHINE_HAS_SEQ_INSN) { 164 + swap_instruction(&args); 165 + text_poke_sync(); 166 + } else { 167 + stop_machine_cpuslocked(swap_instruction, &args, NULL); 168 + } 170 169 } 171 170 NOKPROBE_SYMBOL(arch_disarm_kprobe); 172 171
+3 -2
arch/s390/kernel/mcount.S
··· 9 9 #include <asm/ftrace.h> 10 10 #include <asm/nospec-insn.h> 11 11 #include <asm/ptrace.h> 12 + #include <asm/march.h> 12 13 13 14 #define STACK_FRAME_SIZE_PTREGS (STACK_FRAME_OVERHEAD + __PT_SIZE) 14 15 #define STACK_PTREGS (STACK_FRAME_OVERHEAD) ··· 89 88 SYM_CODE_END(ftrace_caller) 90 89 91 90 SYM_CODE_START(ftrace_common) 92 - #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 91 + #ifdef MARCH_HAS_Z196_FEATURES 93 92 aghik %r2,%r0,-MCOUNT_INSN_SIZE 94 93 lgrl %r4,function_trace_op 95 94 lgrl %r1,ftrace_func ··· 116 115 .Lftrace_graph_caller_end: 117 116 #endif 118 117 lg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15) 119 - #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 118 + #ifdef MARCH_HAS_Z196_FEATURES 120 119 ltg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15) 121 120 locgrz %r1,%r0 122 121 #else
+4
arch/s390/kernel/perf_cpum_cf.c
··· 22 22 #include <asm/hwctrset.h> 23 23 #include <asm/debug.h> 24 24 25 + /* Perf PMU definitions for the counter facility */ 26 + #define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */ 27 + #define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */ 28 + 25 29 enum cpumf_ctr_set { 26 30 CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */ 27 31 CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
+80 -229
arch/s390/kernel/perf_cpum_sf.c
··· 24 24 #include <asm/timex.h> 25 25 #include <linux/io.h> 26 26 27 + /* Perf PMU definitions for the sampling facility */ 28 + #define PERF_CPUM_SF_MAX_CTR 2 29 + #define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ 30 + #define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ 31 + #define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ 32 + #define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ 33 + #define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */ 34 + 35 + #define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) 36 + #define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) 37 + #define TEAR_REG(hwc) ((hwc)->last_tag) 38 + #define SAMPL_RATE(hwc) ((hwc)->event_base) 39 + #define SAMPL_FLAGS(hwc) ((hwc)->config_base) 40 + #define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) 41 + #define SAMPL_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) 42 + 27 43 /* Minimum number of sample-data-block-tables: 28 44 * At least one table is required for the sampling buffer structure. 29 45 * A single table contains up to 511 pointers to sample-data-blocks. ··· 129 113 return USEC_PER_SEC * qsi->cpu_speed / rate; 130 114 } 131 115 132 - /* Return TOD timestamp contained in an trailer entry */ 133 - static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te) 134 - { 135 - /* TOD in STCKE format */ 136 - if (te->header.t) 137 - return *((unsigned long long *)&te->timestamp[1]); 138 - 139 - /* TOD in STCK format */ 140 - return *((unsigned long long *)&te->timestamp[0]); 141 - } 142 - 143 116 /* Return pointer to trailer entry of an sample data block */ 144 117 static inline struct hws_trailer_entry *trailer_entry_ptr(unsigned long v) 145 118 { ··· 159 154 /* 160 155 * sf_disable() - Switch off sampling facility 161 156 */ 162 - static int sf_disable(void) 157 + static void sf_disable(void) 163 158 { 164 159 struct hws_lsctl_request_block sreq; 165 160 166 161 memset(&sreq, 0, sizeof(sreq)); 167 - return lsctl(&sreq); 162 + lsctl(&sreq); 168 163 } 169 164 170 165 /* ··· 213 208 } 214 209 } 215 210 216 - debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__, 217 - (unsigned long)sfb->sdbt); 218 211 memset(sfb, 0, sizeof(*sfb)); 219 212 } 220 213 ··· 268 265 * the sampling buffer origin. 269 266 */ 270 267 if (sfb->sdbt != get_next_sdbt(tail)) { 271 - debug_sprintf_event(sfdbg, 3, "%s: " 272 - "sampling buffer is not linked: origin %#lx" 273 - " tail %#lx\n", __func__, 274 - (unsigned long)sfb->sdbt, 268 + debug_sprintf_event(sfdbg, 3, "%s buffer not linked origin %#lx tail %#lx\n", 269 + __func__, (unsigned long)sfb->sdbt, 275 270 (unsigned long)tail); 276 271 return -EINVAL; 277 272 } ··· 319 318 *tail = virt_to_phys(sfb->sdbt) + 1; 320 319 sfb->tail = tail; 321 320 322 - debug_sprintf_event(sfdbg, 4, "%s: new buffer" 323 - " settings: sdbt %lu sdb %lu\n", __func__, 324 - sfb->num_sdbt, sfb->num_sdb); 325 321 return rc; 326 322 } 327 323 ··· 355 357 356 358 /* Allocate requested number of sample-data-blocks */ 357 359 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); 358 - if (rc) { 360 + if (rc) 359 361 free_sampling_buffer(sfb); 360 - debug_sprintf_event(sfdbg, 4, "%s: " 361 - "realloc_sampling_buffer failed with rc %i\n", 362 - __func__, rc); 363 - } else 364 - debug_sprintf_event(sfdbg, 4, 365 - "%s: tear %#lx dear %#lx\n", __func__, 366 - (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt); 367 362 return rc; 368 363 } 369 364 ··· 368 377 CPUM_SF_MAX_SDB = max; 369 378 370 379 memset(&si, 0, sizeof(si)); 371 - if (!qsi(&si)) 372 - CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); 380 + qsi(&si); 381 + CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); 373 382 } 374 383 375 384 static unsigned long sfb_max_limit(struct hw_perf_event *hwc) ··· 386 395 if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) 387 396 return SFB_ALLOC_REG(hwc) - sfb->num_sdb; 388 397 return 0; 389 - } 390 - 391 - static int sfb_has_pending_allocs(struct sf_buffer *sfb, 392 - struct hw_perf_event *hwc) 393 - { 394 - return sfb_pending_allocs(sfb, hwc) > 0; 395 398 } 396 399 397 400 static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) ··· 411 426 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) 412 427 { 413 428 unsigned long n_sdb, freq; 414 - size_t sample_size; 415 429 416 430 /* Calculate sampling buffers using 4K pages 417 431 * ··· 441 457 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up 442 458 * to 511 SDBs). 443 459 */ 444 - sample_size = sizeof(struct hws_basic_entry); 445 460 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); 446 461 n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000); 447 462 ··· 455 472 sfb_init_allocs(n_sdb, hwc); 456 473 if (sf_buffer_available(cpuhw)) 457 474 return 0; 458 - 459 - debug_sprintf_event(sfdbg, 3, 460 - "%s: rate %lu f %lu sdb %lu/%lu" 461 - " sample_size %lu cpuhw %p\n", __func__, 462 - SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), 463 - sample_size, cpuhw); 464 475 465 476 return alloc_sampling_buffer(&cpuhw->sfb, 466 477 sfb_pending_allocs(&cpuhw->sfb, hwc)); ··· 512 535 if (num) 513 536 sfb_account_allocs(num, hwc); 514 537 515 - debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n", 516 - __func__, OVERFLOW_REG(hwc), ratio, num); 517 538 OVERFLOW_REG(hwc) = 0; 518 539 } 519 540 ··· 529 554 static void extend_sampling_buffer(struct sf_buffer *sfb, 530 555 struct hw_perf_event *hwc) 531 556 { 532 - unsigned long num, num_old; 533 - int rc; 557 + unsigned long num; 534 558 535 559 num = sfb_pending_allocs(sfb, hwc); 536 560 if (!num) 537 561 return; 538 - num_old = sfb->num_sdb; 539 562 540 563 /* Disable the sampling facility to reset any states and also 541 564 * clear pending measurement alerts. ··· 545 572 * called by perf. Because this is a reallocation, it is fine if the 546 573 * new SDB-request cannot be satisfied immediately. 547 574 */ 548 - rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); 549 - if (rc) 550 - debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n", 551 - __func__, rc); 552 - 553 - if (sfb_has_pending_allocs(sfb, hwc)) 554 - debug_sprintf_event(sfdbg, 5, "%s: " 555 - "req %lu alloc %lu remaining %lu\n", 556 - __func__, num, sfb->num_sdb - num_old, 557 - sfb_pending_allocs(sfb, hwc)); 575 + realloc_sampling_buffer(sfb, num, GFP_ATOMIC); 558 576 } 559 577 560 578 /* Number of perf events counting hardware events */ 561 - static atomic_t num_events; 579 + static refcount_t num_events; 562 580 /* Used to avoid races in calling reserve/release_cpumf_hardware */ 563 581 static DEFINE_MUTEX(pmc_reserve_mutex); 564 582 565 583 #define PMC_INIT 0 566 584 #define PMC_RELEASE 1 567 - #define PMC_FAILURE 2 568 585 static void setup_pmc_cpu(void *flags) 569 586 { 570 - struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf); 571 - int err = 0; 587 + struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 572 588 573 589 switch (*((int *)flags)) { 574 590 case PMC_INIT: 575 - memset(cpusf, 0, sizeof(*cpusf)); 576 - err = qsi(&cpusf->qsi); 577 - if (err) 578 - break; 579 - cpusf->flags |= PMU_F_RESERVED; 580 - err = sf_disable(); 591 + memset(cpuhw, 0, sizeof(*cpuhw)); 592 + qsi(&cpuhw->qsi); 593 + cpuhw->flags |= PMU_F_RESERVED; 594 + sf_disable(); 581 595 break; 582 596 case PMC_RELEASE: 583 - cpusf->flags &= ~PMU_F_RESERVED; 584 - err = sf_disable(); 585 - if (!err) 586 - deallocate_buffers(cpusf); 597 + cpuhw->flags &= ~PMU_F_RESERVED; 598 + sf_disable(); 599 + deallocate_buffers(cpuhw); 587 600 break; 588 - } 589 - if (err) { 590 - *((int *)flags) |= PMC_FAILURE; 591 - pr_err("Switching off the sampling facility failed with rc %i\n", err); 592 601 } 593 602 } 594 603 ··· 582 627 on_each_cpu(setup_pmc_cpu, &flags, 1); 583 628 } 584 629 585 - static int reserve_pmc_hardware(void) 630 + static void reserve_pmc_hardware(void) 586 631 { 587 632 int flags = PMC_INIT; 588 633 589 634 on_each_cpu(setup_pmc_cpu, &flags, 1); 590 - if (flags & PMC_FAILURE) { 591 - release_pmc_hardware(); 592 - return -ENODEV; 593 - } 594 635 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 595 - 596 - return 0; 597 636 } 598 637 599 638 static void hw_perf_event_destroy(struct perf_event *event) 600 639 { 601 640 /* Release PMC if this is the last perf event */ 602 - if (!atomic_add_unless(&num_events, -1, 1)) { 603 - mutex_lock(&pmc_reserve_mutex); 604 - if (atomic_dec_return(&num_events) == 0) 605 - release_pmc_hardware(); 641 + if (refcount_dec_and_mutex_lock(&num_events, &pmc_reserve_mutex)) { 642 + release_pmc_hardware(); 606 643 mutex_unlock(&pmc_reserve_mutex); 607 644 } 608 645 } ··· 698 751 */ 699 752 if (sample_rate_to_freq(si, rate) > 700 753 sysctl_perf_event_sample_rate) { 701 - debug_sprintf_event(sfdbg, 1, "%s: " 702 - "Sampling rate exceeds maximum " 703 - "perf sample rate\n", __func__); 704 754 rate = 0; 705 755 } 706 756 } ··· 742 798 attr->sample_period = rate; 743 799 SAMPL_RATE(hwc) = rate; 744 800 hw_init_period(hwc, SAMPL_RATE(hwc)); 745 - debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n", 746 - __func__, event->cpu, event->attr.sample_period, 747 - event->attr.freq, SAMPLE_FREQ_MODE(hwc)); 748 801 return 0; 749 802 } 750 803 ··· 751 810 struct hws_qsi_info_block si; 752 811 struct perf_event_attr *attr = &event->attr; 753 812 struct hw_perf_event *hwc = &event->hw; 754 - int cpu, err; 813 + int cpu, err = 0; 755 814 756 815 /* Reserve CPU-measurement sampling facility */ 757 - err = 0; 758 - if (!atomic_inc_not_zero(&num_events)) { 759 - mutex_lock(&pmc_reserve_mutex); 760 - if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) 761 - err = -EBUSY; 762 - else 763 - atomic_inc(&num_events); 764 - mutex_unlock(&pmc_reserve_mutex); 816 + mutex_lock(&pmc_reserve_mutex); 817 + if (!refcount_inc_not_zero(&num_events)) { 818 + reserve_pmc_hardware(); 819 + refcount_set(&num_events, 1); 765 820 } 821 + mutex_unlock(&pmc_reserve_mutex); 766 822 event->destroy = hw_perf_event_destroy; 767 - 768 - if (err) 769 - goto out; 770 823 771 824 /* Access per-CPU sampling information (query sampling info) */ 772 825 /* ··· 773 838 */ 774 839 memset(&si, 0, sizeof(si)); 775 840 cpuhw = NULL; 776 - if (event->cpu == -1) 841 + if (event->cpu == -1) { 777 842 qsi(&si); 778 - else { 843 + } else { 779 844 /* Event is pinned to a particular CPU, retrieve the per-CPU 780 845 * sampling structure for accessing the CPU-specific QSI. 781 846 */ ··· 815 880 err = __hw_perf_event_init_rate(event, &si); 816 881 if (err) 817 882 goto out; 818 - 819 - /* Initialize sample data overflow accounting */ 820 - hwc->extra_reg.reg = REG_OVERFLOW; 821 - OVERFLOW_REG(hwc) = 0; 822 883 823 884 /* Use AUX buffer. No need to allocate it by ourself */ 824 885 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) ··· 938 1007 extend_sampling_buffer(&cpuhw->sfb, hwc); 939 1008 } 940 1009 /* Rate may be adjusted with ioctl() */ 941 - cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); 1010 + cpuhw->lsctl.interval = SAMPL_RATE(hwc); 942 1011 } 943 1012 944 1013 /* (Re)enable the PMU and sampling facility */ ··· 954 1023 955 1024 /* Load current program parameter */ 956 1025 lpp(&get_lowcore()->lpp); 957 - 958 - debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i " 959 - "interval %#lx tear %#lx dear %#lx\n", __func__, 960 - cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, 961 - cpuhw->lsctl.cd, cpuhw->lsctl.interval, 962 - cpuhw->lsctl.tear, cpuhw->lsctl.dear); 963 1026 } 964 1027 965 1028 static void cpumsf_pmu_disable(struct pmu *pmu) ··· 980 1055 return; 981 1056 } 982 1057 983 - /* Save state of TEAR and DEAR register contents */ 984 - err = qsi(&si); 985 - if (!err) { 986 - /* TEAR/DEAR values are valid only if the sampling facility is 987 - * enabled. Note that cpumsf_pmu_disable() might be called even 988 - * for a disabled sampling facility because cpumsf_pmu_enable() 989 - * controls the enable/disable state. 990 - */ 991 - if (si.es) { 992 - cpuhw->lsctl.tear = si.tear; 993 - cpuhw->lsctl.dear = si.dear; 994 - } 995 - } else 996 - debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n", 997 - __func__, err); 1058 + /* 1059 + * Save state of TEAR and DEAR register contents. 1060 + * TEAR/DEAR values are valid only if the sampling facility is 1061 + * enabled. Note that cpumsf_pmu_disable() might be called even 1062 + * for a disabled sampling facility because cpumsf_pmu_enable() 1063 + * controls the enable/disable state. 1064 + */ 1065 + qsi(&si); 1066 + if (si.es) { 1067 + cpuhw->lsctl.tear = si.tear; 1068 + cpuhw->lsctl.dear = si.dear; 1069 + } 998 1070 999 1071 cpuhw->flags &= ~PMU_F_ENABLED; 1000 1072 } ··· 1157 1235 /* Count discarded samples */ 1158 1236 *overflow += 1; 1159 1237 } else { 1160 - debug_sprintf_event(sfdbg, 4, 1161 - "%s: Found unknown" 1162 - " sampling data entry: te->f %i" 1163 - " basic.def %#4x (%p)\n", __func__, 1164 - te->header.f, sample->def, sample); 1165 1238 /* Sample slot is not yet written or other record. 1166 1239 * 1167 1240 * This condition can occur if the buffer was reused ··· 1201 1284 * AUX buffer is used when in diagnostic sampling mode. 1202 1285 * No perf events/samples are created. 1203 1286 */ 1204 - if (SAMPL_DIAG_MODE(&event->hw)) 1287 + if (SAMPL_DIAG_MODE(hwc)) 1205 1288 return; 1206 1289 1207 1290 sdbt = (unsigned long *)TEAR_REG(hwc); ··· 1225 1308 * For details, see sfb_account_overflows(). 1226 1309 */ 1227 1310 sampl_overflow += te->header.overflow; 1228 - 1229 - /* Timestamps are valid for full sample-data-blocks only */ 1230 - debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx/%#lx " 1231 - "overflow %llu timestamp %#llx\n", 1232 - __func__, sdb, (unsigned long)sdbt, 1233 - te->header.overflow, 1234 - (te->header.f) ? trailer_timestamp(te) : 0ULL); 1235 1311 1236 1312 /* Collect all samples from a single sample-data-block and 1237 1313 * flag if an (perf) event overflow happened. If so, the PMU ··· 1250 1340 sdbt = get_next_sdbt(sdbt); 1251 1341 1252 1342 /* Update event hardware registers */ 1253 - TEAR_REG(hwc) = (unsigned long) sdbt; 1343 + TEAR_REG(hwc) = (unsigned long)sdbt; 1254 1344 1255 1345 /* Stop processing sample-data if all samples of the current 1256 1346 * sample-data-block were flushed even if it was not full. ··· 1272 1362 * are dropped. 1273 1363 * Slightly increase the interval to avoid hitting this limit. 1274 1364 */ 1275 - if (event_overflow) { 1365 + if (event_overflow) 1276 1366 SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10); 1277 - debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n", 1278 - __func__, 1279 - DIV_ROUND_UP(SAMPL_RATE(hwc), 10)); 1280 - } 1281 - 1282 - if (sampl_overflow || event_overflow) 1283 - debug_sprintf_event(sfdbg, 4, "%s: " 1284 - "overflows: sample %llu event %llu" 1285 - " total %llu num_sdb %llu\n", 1286 - __func__, sampl_overflow, event_overflow, 1287 - OVERFLOW_REG(hwc), num_sdb); 1288 1367 } 1289 1368 1290 1369 static inline unsigned long aux_sdb_index(struct aux_buffer *aux, ··· 1341 1442 /* Remove alert indicators in the buffer */ 1342 1443 te = aux_sdb_trailer(aux, aux->alert_mark); 1343 1444 te->header.a = 0; 1344 - 1345 - debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n", 1346 - __func__, i, range_scan, aux->head); 1347 1445 } 1348 1446 1349 1447 /* ··· 1359 1463 unsigned long range, i, range_scan, idx, head, base, offset; 1360 1464 struct hws_trailer_entry *te; 1361 1465 1362 - if (WARN_ON_ONCE(handle->head & ~PAGE_MASK)) 1466 + if (handle->head & ~PAGE_MASK) 1363 1467 return -EINVAL; 1364 1468 1365 1469 aux->head = handle->head >> PAGE_SHIFT; ··· 1371 1475 * SDBs between aux->head and aux->empty_mark are already ready 1372 1476 * for new data. range_scan is num of SDBs not within them. 1373 1477 */ 1374 - debug_sprintf_event(sfdbg, 6, 1375 - "%s: range %ld head %ld alert %ld empty %ld\n", 1376 - __func__, range, aux->head, aux->alert_mark, 1377 - aux->empty_mark); 1378 1478 if (range > aux_sdb_num_empty(aux)) { 1379 1479 range_scan = range - aux_sdb_num_empty(aux); 1380 1480 idx = aux->empty_mark + 1; ··· 1395 1503 offset = head % CPUM_SF_SDB_PER_TABLE; 1396 1504 cpuhw->lsctl.tear = virt_to_phys((void *)base) + offset * sizeof(unsigned long); 1397 1505 cpuhw->lsctl.dear = virt_to_phys((void *)aux->sdb_index[head]); 1398 - 1399 - debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld empty %ld " 1400 - "index %ld tear %#lx dear %#lx\n", __func__, 1401 - aux->head, aux->alert_mark, aux->empty_mark, 1402 - head / CPUM_SF_SDB_PER_TABLE, 1403 - cpuhw->lsctl.tear, cpuhw->lsctl.dear); 1404 1506 1405 1507 return 0; 1406 1508 } ··· 1457 1571 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, 1458 1572 unsigned long long *overflow) 1459 1573 { 1460 - unsigned long i, range_scan, idx, idx_old; 1461 1574 union hws_trailer_header old, prev, new; 1575 + unsigned long i, range_scan, idx; 1462 1576 unsigned long long orig_overflow; 1463 1577 struct hws_trailer_entry *te; 1464 1578 1465 - debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld " 1466 - "empty %ld\n", __func__, range, aux->head, 1467 - aux->alert_mark, aux->empty_mark); 1468 1579 if (range <= aux_sdb_num_empty(aux)) 1469 1580 /* 1470 1581 * No need to scan. All SDBs in range are marked as empty. ··· 1484 1601 * indicator fall into this range, set it. 1485 1602 */ 1486 1603 range_scan = range - aux_sdb_num_empty(aux); 1487 - idx_old = idx = aux->empty_mark + 1; 1604 + idx = aux->empty_mark + 1; 1488 1605 for (i = 0; i < range_scan; i++, idx++) { 1489 1606 te = aux_sdb_trailer(aux, idx); 1490 1607 prev.val = READ_ONCE_ALIGNED_128(te->header.val); ··· 1506 1623 /* Update empty_mark to new position */ 1507 1624 aux->empty_mark = aux->head + range - 1; 1508 1625 1509 - debug_sprintf_event(sfdbg, 6, "%s: range_scan %ld idx %ld..%ld " 1510 - "empty %ld\n", __func__, range_scan, idx_old, 1511 - idx - 1, aux->empty_mark); 1512 1626 return true; 1513 1627 } 1514 1628 ··· 1522 1642 unsigned long num_sdb; 1523 1643 1524 1644 aux = perf_get_aux(handle); 1525 - if (WARN_ON_ONCE(!aux)) 1645 + if (!aux) 1526 1646 return; 1527 1647 1528 1648 /* Inform user space new data arrived */ 1529 1649 size = aux_sdb_num_alert(aux) << PAGE_SHIFT; 1530 - debug_sprintf_event(sfdbg, 6, "%s: #alert %ld\n", __func__, 1650 + debug_sprintf_event(sfdbg, 6, "%s #alert %ld\n", __func__, 1531 1651 size >> PAGE_SHIFT); 1532 1652 perf_aux_output_end(handle, size); 1533 1653 ··· 1541 1661 num_sdb); 1542 1662 break; 1543 1663 } 1544 - if (WARN_ON_ONCE(!aux)) 1664 + if (!aux) 1545 1665 return; 1546 1666 1547 1667 /* Update head and alert_mark to new position */ ··· 1561 1681 perf_aux_output_end(&cpuhw->handle, size); 1562 1682 pr_err("Sample data caused the AUX buffer with %lu " 1563 1683 "pages to overflow\n", aux->sfb.num_sdb); 1564 - debug_sprintf_event(sfdbg, 1, "%s: head %ld range %ld " 1565 - "overflow %lld\n", __func__, 1566 - aux->head, range, overflow); 1567 1684 } else { 1568 1685 size = aux_sdb_num_alert(aux) << PAGE_SHIFT; 1569 1686 perf_aux_output_end(&cpuhw->handle, size); 1570 - debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld " 1571 - "already full, try another\n", 1572 - __func__, 1573 - aux->head, aux->alert_mark); 1574 1687 } 1575 1688 } 1576 - 1577 - if (done) 1578 - debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld " 1579 - "empty %ld\n", __func__, aux->head, 1580 - aux->alert_mark, aux->empty_mark); 1581 1689 } 1582 1690 1583 1691 /* ··· 1587 1719 kfree(aux->sdbt_index); 1588 1720 kfree(aux->sdb_index); 1589 1721 kfree(aux); 1590 - 1591 - debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu\n", __func__, num_sdbt); 1592 1722 } 1593 1723 1594 1724 static void aux_sdb_init(unsigned long sdb) ··· 1694 1828 */ 1695 1829 aux->empty_mark = sfb->num_sdb - 1; 1696 1830 1697 - debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu SDBs %lu\n", __func__, 1698 - sfb->num_sdbt, sfb->num_sdb); 1699 - 1700 1831 return aux; 1701 1832 1702 1833 no_sdbt: ··· 1726 1863 1727 1864 memset(&si, 0, sizeof(si)); 1728 1865 if (event->cpu == -1) { 1729 - if (qsi(&si)) 1730 - return -ENODEV; 1866 + qsi(&si); 1731 1867 } else { 1732 1868 /* Event is pinned to a particular CPU, retrieve the per-CPU 1733 1869 * sampling structure for accessing the CPU-specific QSI. ··· 1736 1874 si = cpuhw->qsi; 1737 1875 } 1738 1876 1739 - do_freq = !!SAMPLE_FREQ_MODE(&event->hw); 1877 + do_freq = !!SAMPL_FREQ_MODE(&event->hw); 1740 1878 rate = getrate(do_freq, value, &si); 1741 1879 if (!rate) 1742 1880 return -EINVAL; ··· 1744 1882 event->attr.sample_period = rate; 1745 1883 SAMPL_RATE(&event->hw) = rate; 1746 1884 hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); 1747 - debug_sprintf_event(sfdbg, 4, "%s:" 1748 - " cpu %d value %#llx period %#llx freq %d\n", 1749 - __func__, event->cpu, value, 1750 - event->attr.sample_period, do_freq); 1751 1885 return 0; 1752 1886 } 1753 1887 ··· 1754 1896 { 1755 1897 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1756 1898 1757 - if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1899 + if (!(event->hw.state & PERF_HES_STOPPED)) 1758 1900 return; 1759 - 1760 - if (flags & PERF_EF_RELOAD) 1761 - WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 1762 - 1763 1901 perf_pmu_disable(event->pmu); 1764 1902 event->hw.state = 0; 1765 1903 cpuhw->lsctl.cs = 1; ··· 1790 1936 { 1791 1937 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1792 1938 struct aux_buffer *aux; 1793 - int err; 1939 + int err = 0; 1794 1940 1795 1941 if (cpuhw->flags & PMU_F_IN_USE) 1796 1942 return -EAGAIN; ··· 1798 1944 if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt) 1799 1945 return -EINVAL; 1800 1946 1801 - err = 0; 1802 1947 perf_pmu_disable(event->pmu); 1803 1948 1804 1949 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; ··· 1968 2115 1969 2116 /* Report measurement alerts only for non-PRA codes */ 1970 2117 if (alert != CPU_MF_INT_SF_PRA) 1971 - debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__, 2118 + debug_sprintf_event(sfdbg, 6, "%s alert %#x\n", __func__, 1972 2119 alert); 1973 2120 1974 2121 /* Sampling authorization change request */ ··· 1996 2143 /* Ignore the notification if no events are scheduled on the PMU. 1997 2144 * This might be racy... 1998 2145 */ 1999 - if (!atomic_read(&num_events)) 2146 + if (!refcount_read(&num_events)) 2000 2147 return 0; 2001 2148 2002 2149 local_irq_disable(); ··· 2058 2205 .get = param_get_sfb_size, 2059 2206 }; 2060 2207 2061 - #define RS_INIT_FAILURE_QSI 0x0001 2062 - #define RS_INIT_FAILURE_BSDES 0x0002 2063 - #define RS_INIT_FAILURE_ALRT 0x0003 2064 - #define RS_INIT_FAILURE_PERF 0x0004 2208 + enum { 2209 + RS_INIT_FAILURE_BSDES = 2, /* Bad basic sampling size */ 2210 + RS_INIT_FAILURE_ALRT = 3, /* IRQ registration failure */ 2211 + RS_INIT_FAILURE_PERF = 4 /* PMU registration failure */ 2212 + }; 2213 + 2065 2214 static void __init pr_cpumsf_err(unsigned int reason) 2066 2215 { 2067 2216 pr_err("Sampling facility support for perf is not available: " ··· 2079 2224 return -ENODEV; 2080 2225 2081 2226 memset(&si, 0, sizeof(si)); 2082 - if (qsi(&si)) { 2083 - pr_cpumsf_err(RS_INIT_FAILURE_QSI); 2084 - return -ENODEV; 2085 - } 2086 - 2227 + qsi(&si); 2087 2228 if (!si.as && !si.ad) 2088 2229 return -ENODEV; 2089 2230
+16
arch/s390/kernel/perf_pai_crypto.c
··· 738 738 [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", 739 739 [155] = "IBM_RESERVED_155", 740 740 [156] = "IBM_RESERVED_156", 741 + [157] = "KM_FULL_XTS_AES_128", 742 + [158] = "KM_FULL_XTS_AES_256", 743 + [159] = "KM_FULL_XTS_ENCRYPTED_AES_128", 744 + [160] = "KM_FULL_XTS_ENCRYPTED_AES_256", 745 + [161] = "KMAC_HMAC_SHA_224", 746 + [162] = "KMAC_HMAC_SHA_256", 747 + [163] = "KMAC_HMAC_SHA_384", 748 + [164] = "KMAC_HMAC_SHA_512", 749 + [165] = "KMAC_HMAC_ENCRYPTED_SHA_224", 750 + [166] = "KMAC_HMAC_ENCRYPTED_SHA_256", 751 + [167] = "KMAC_HMAC_ENCRYPTED_SHA_384", 752 + [168] = "KMAC_HMAC_ENCRYPTED_SHA_512", 753 + [169] = "PCKMO_ENCRYPT_HMAC_512_KEY", 754 + [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY", 755 + [171] = "PCKMO_ENCRYPT_AES_XTS_128", 756 + [172] = "PCKMO_ENCRYPT_AES_XTS_256", 741 757 }; 742 758 743 759 static void __init attr_event_free(struct attribute **attrs, int num)
+9
arch/s390/kernel/perf_pai_ext.c
··· 635 635 [25] = "NNPA_1MFRAME", 636 636 [26] = "NNPA_2GFRAME", 637 637 [27] = "NNPA_ACCESSEXCEPT", 638 + [28] = "NNPA_TRANSFORM", 639 + [29] = "NNPA_GELU", 640 + [30] = "NNPA_MOMENTS", 641 + [31] = "NNPA_LAYERNORM", 642 + [32] = "NNPA_MATMUL_OP_BCAST1", 643 + [33] = "NNPA_SQRT", 644 + [34] = "NNPA_INVSQRT", 645 + [35] = "NNPA_NORM", 646 + [36] = "NNPA_REDUCE", 638 647 }; 639 648 640 649 static void __init attr_event_free(struct attribute **attrs, int num)
+21
arch/s390/kernel/smp.c
··· 671 671 return per_cpu(pcpu_devices, cpu).polarization; 672 672 } 673 673 674 + void smp_cpu_set_capacity(int cpu, unsigned long val) 675 + { 676 + per_cpu(pcpu_devices, cpu).capacity = val; 677 + } 678 + 679 + unsigned long smp_cpu_get_capacity(int cpu) 680 + { 681 + return per_cpu(pcpu_devices, cpu).capacity; 682 + } 683 + 684 + void smp_set_core_capacity(int cpu, unsigned long val) 685 + { 686 + int i; 687 + 688 + cpu = smp_get_base_cpu(cpu); 689 + for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++) 690 + smp_cpu_set_capacity(i, val); 691 + } 692 + 674 693 int smp_cpu_get_cpu_address(int cpu) 675 694 { 676 695 return per_cpu(pcpu_devices, cpu).address; ··· 738 719 else 739 720 pcpu->state = CPU_STATE_STANDBY; 740 721 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 722 + smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH); 741 723 set_cpu_present(cpu, true); 742 724 if (!early && arch_register_cpu(cpu)) 743 725 set_cpu_present(cpu, false); ··· 981 961 ipl_pcpu->state = CPU_STATE_CONFIGURED; 982 962 lc->pcpu = (unsigned long)ipl_pcpu; 983 963 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 964 + smp_cpu_set_capacity(0, CPU_CAPACITY_HIGH); 984 965 } 985 966 986 967 void __init smp_setup_processor_id(void)
-19
arch/s390/kernel/stacktrace.c
··· 162 162 { 163 163 arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false); 164 164 } 165 - 166 - unsigned long return_address(unsigned int n) 167 - { 168 - struct unwind_state state; 169 - unsigned long addr; 170 - 171 - /* Increment to skip current stack entry */ 172 - n++; 173 - 174 - unwind_for_each_frame(&state, NULL, NULL, 0) { 175 - addr = unwind_get_return_address(&state); 176 - if (!addr) 177 - break; 178 - if (!n--) 179 - return addr; 180 - } 181 - return 0; 182 - } 183 - EXPORT_SYMBOL_GPL(return_address);
+60 -16
arch/s390/kernel/topology.c
··· 24 24 #include <linux/mm.h> 25 25 #include <linux/nodemask.h> 26 26 #include <linux/node.h> 27 + #include <asm/hiperdispatch.h> 27 28 #include <asm/sysinfo.h> 28 29 29 30 #define PTF_HORIZONTAL (0UL) ··· 48 47 static void set_topology_timer(void); 49 48 static void topology_work_fn(struct work_struct *work); 50 49 static struct sysinfo_15_1_x *tl_info; 50 + static int cpu_management; 51 51 52 52 static DECLARE_WORK(topology_work, topology_work_fn); 53 53 ··· 146 144 cpumask_set_cpu(cpu, &book->mask); 147 145 cpumask_set_cpu(cpu, &socket->mask); 148 146 smp_cpu_set_polarization(cpu, tl_core->pp); 147 + smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH); 149 148 } 150 149 } 151 150 } ··· 273 270 topo->drawer_id = id; 274 271 } 275 272 } 273 + hd_reset_state(); 276 274 for_each_online_cpu(cpu) { 277 275 topo = &cpu_topology[cpu]; 278 276 pkg_first = cpumask_first(&topo->core_mask); ··· 282 278 for_each_cpu(sibling, &topo->core_mask) { 283 279 topo_sibling = &cpu_topology[sibling]; 284 280 smt_first = cpumask_first(&topo_sibling->thread_mask); 285 - if (sibling == smt_first) 281 + if (sibling == smt_first) { 286 282 topo_package->booted_cores++; 283 + hd_add_core(sibling); 284 + } 287 285 } 288 286 } else { 289 287 topo->booted_cores = topo_package->booted_cores; ··· 309 303 static int __arch_update_cpu_topology(void) 310 304 { 311 305 struct sysinfo_15_1_x *info = tl_info; 312 - int rc = 0; 306 + int rc, hd_status; 313 307 308 + hd_status = 0; 309 + rc = 0; 314 310 mutex_lock(&smp_cpu_state_mutex); 315 311 if (MACHINE_HAS_TOPOLOGY) { 316 312 rc = 1; ··· 322 314 update_cpu_masks(); 323 315 if (!MACHINE_HAS_TOPOLOGY) 324 316 topology_update_polarization_simple(); 317 + if (cpu_management == 1) 318 + hd_status = hd_enable_hiperdispatch(); 325 319 mutex_unlock(&smp_cpu_state_mutex); 320 + if (hd_status == 0) 321 + hd_disable_hiperdispatch(); 326 322 return rc; 327 323 } 328 324 ··· 386 374 set_topology_timer(); 387 375 } 388 376 389 - static int cpu_management; 377 + static int set_polarization(int polarization) 378 + { 379 + int rc = 0; 380 + 381 + cpus_read_lock(); 382 + mutex_lock(&smp_cpu_state_mutex); 383 + if (cpu_management == polarization) 384 + goto out; 385 + rc = topology_set_cpu_management(polarization); 386 + if (rc) 387 + goto out; 388 + cpu_management = polarization; 389 + topology_expect_change(); 390 + out: 391 + mutex_unlock(&smp_cpu_state_mutex); 392 + cpus_read_unlock(); 393 + return rc; 394 + } 390 395 391 396 static ssize_t dispatching_show(struct device *dev, 392 397 struct device_attribute *attr, ··· 429 400 return -EINVAL; 430 401 if (val != 0 && val != 1) 431 402 return -EINVAL; 432 - rc = 0; 433 - cpus_read_lock(); 434 - mutex_lock(&smp_cpu_state_mutex); 435 - if (cpu_management == val) 436 - goto out; 437 - rc = topology_set_cpu_management(val); 438 - if (rc) 439 - goto out; 440 - cpu_management = val; 441 - topology_expect_change(); 442 - out: 443 - mutex_unlock(&smp_cpu_state_mutex); 444 - cpus_read_unlock(); 403 + rc = set_polarization(val); 445 404 return rc ? rc : count; 446 405 } 447 406 static DEVICE_ATTR_RW(dispatching); ··· 641 624 return rc; 642 625 } 643 626 627 + static int polarization_ctl_handler(const struct ctl_table *ctl, int write, 628 + void *buffer, size_t *lenp, loff_t *ppos) 629 + { 630 + int polarization; 631 + int rc; 632 + struct ctl_table ctl_entry = { 633 + .procname = ctl->procname, 634 + .data = &polarization, 635 + .maxlen = sizeof(int), 636 + .extra1 = SYSCTL_ZERO, 637 + .extra2 = SYSCTL_ONE, 638 + }; 639 + 640 + polarization = cpu_management; 641 + rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos); 642 + if (rc < 0 || !write) 643 + return rc; 644 + return set_polarization(polarization); 645 + } 646 + 644 647 static struct ctl_table topology_ctl_table[] = { 645 648 { 646 649 .procname = "topology", 647 650 .mode = 0644, 648 651 .proc_handler = topology_ctl_handler, 652 + }, 653 + { 654 + .procname = "polarization", 655 + .mode = 0644, 656 + .proc_handler = polarization_ctl_handler, 649 657 }, 650 658 }; 651 659 ··· 684 642 set_topology_timer(); 685 643 else 686 644 topology_update_polarization_simple(); 645 + if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY_VERTICAL)) 646 + set_polarization(1); 687 647 register_sysctl("s390", topology_ctl_table); 688 648 689 649 dev_root = bus_get_dev_root(&cpu_subsys);
+215
arch/s390/kernel/wti.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Support for warning track interruption 4 + * 5 + * Copyright IBM Corp. 2023 6 + */ 7 + 8 + #include <linux/cpu.h> 9 + #include <linux/debugfs.h> 10 + #include <linux/kallsyms.h> 11 + #include <linux/smpboot.h> 12 + #include <linux/irq.h> 13 + #include <uapi/linux/sched/types.h> 14 + #include <asm/debug.h> 15 + #include <asm/diag.h> 16 + #include <asm/sclp.h> 17 + 18 + #define WTI_DBF_LEN 64 19 + 20 + struct wti_debug { 21 + unsigned long missed; 22 + unsigned long addr; 23 + pid_t pid; 24 + }; 25 + 26 + struct wti_state { 27 + /* debug data for s390dbf */ 28 + struct wti_debug dbg; 29 + /* 30 + * Represents the real-time thread responsible to 31 + * acknowledge the warning-track interrupt and trigger 32 + * preliminary and postliminary precautions. 33 + */ 34 + struct task_struct *thread; 35 + /* 36 + * If pending is true, the real-time thread must be scheduled. 37 + * If not, a wake up of that thread will remain a noop. 38 + */ 39 + bool pending; 40 + }; 41 + 42 + static DEFINE_PER_CPU(struct wti_state, wti_state); 43 + 44 + static debug_info_t *wti_dbg; 45 + 46 + /* 47 + * During a warning-track grace period, interrupts are disabled 48 + * to prevent delays of the warning-track acknowledgment. 49 + * 50 + * Once the CPU is physically dispatched again, interrupts are 51 + * re-enabled. 52 + */ 53 + 54 + static void wti_irq_disable(void) 55 + { 56 + unsigned long flags; 57 + struct ctlreg cr6; 58 + 59 + local_irq_save(flags); 60 + local_ctl_store(6, &cr6); 61 + /* disable all I/O interrupts */ 62 + cr6.val &= ~0xff000000UL; 63 + local_ctl_load(6, &cr6); 64 + local_irq_restore(flags); 65 + } 66 + 67 + static void wti_irq_enable(void) 68 + { 69 + unsigned long flags; 70 + struct ctlreg cr6; 71 + 72 + local_irq_save(flags); 73 + local_ctl_store(6, &cr6); 74 + /* enable all I/O interrupts */ 75 + cr6.val |= 0xff000000UL; 76 + local_ctl_load(6, &cr6); 77 + local_irq_restore(flags); 78 + } 79 + 80 + static void store_debug_data(struct wti_state *st) 81 + { 82 + struct pt_regs *regs = get_irq_regs(); 83 + 84 + st->dbg.pid = current->pid; 85 + st->dbg.addr = 0; 86 + if (!user_mode(regs)) 87 + st->dbg.addr = regs->psw.addr; 88 + } 89 + 90 + static void wti_interrupt(struct ext_code ext_code, 91 + unsigned int param32, unsigned long param64) 92 + { 93 + struct wti_state *st = this_cpu_ptr(&wti_state); 94 + 95 + inc_irq_stat(IRQEXT_WTI); 96 + wti_irq_disable(); 97 + store_debug_data(st); 98 + st->pending = true; 99 + wake_up_process(st->thread); 100 + } 101 + 102 + static int wti_pending(unsigned int cpu) 103 + { 104 + struct wti_state *st = per_cpu_ptr(&wti_state, cpu); 105 + 106 + return st->pending; 107 + } 108 + 109 + static void wti_dbf_grace_period(struct wti_state *st) 110 + { 111 + struct wti_debug *wdi = &st->dbg; 112 + char buf[WTI_DBF_LEN]; 113 + 114 + if (wdi->addr) 115 + snprintf(buf, sizeof(buf), "%d %pS", wdi->pid, (void *)wdi->addr); 116 + else 117 + snprintf(buf, sizeof(buf), "%d <user>", wdi->pid); 118 + debug_text_event(wti_dbg, 2, buf); 119 + wdi->missed++; 120 + } 121 + 122 + static int wti_show(struct seq_file *seq, void *v) 123 + { 124 + struct wti_state *st; 125 + int cpu; 126 + 127 + cpus_read_lock(); 128 + seq_puts(seq, " "); 129 + for_each_online_cpu(cpu) 130 + seq_printf(seq, "CPU%-8d", cpu); 131 + seq_putc(seq, '\n'); 132 + for_each_online_cpu(cpu) { 133 + st = per_cpu_ptr(&wti_state, cpu); 134 + seq_printf(seq, " %10lu", st->dbg.missed); 135 + } 136 + seq_putc(seq, '\n'); 137 + cpus_read_unlock(); 138 + return 0; 139 + } 140 + DEFINE_SHOW_ATTRIBUTE(wti); 141 + 142 + static void wti_thread_fn(unsigned int cpu) 143 + { 144 + struct wti_state *st = per_cpu_ptr(&wti_state, cpu); 145 + 146 + st->pending = false; 147 + /* 148 + * Yield CPU voluntarily to the hypervisor. Control 149 + * resumes when hypervisor decides to dispatch CPU 150 + * to this LPAR again. 151 + */ 152 + if (diag49c(DIAG49C_SUBC_ACK)) 153 + wti_dbf_grace_period(st); 154 + wti_irq_enable(); 155 + } 156 + 157 + static struct smp_hotplug_thread wti_threads = { 158 + .store = &wti_state.thread, 159 + .thread_should_run = wti_pending, 160 + .thread_fn = wti_thread_fn, 161 + .thread_comm = "cpuwti/%u", 162 + .selfparking = false, 163 + }; 164 + 165 + static int __init wti_init(void) 166 + { 167 + struct sched_param wti_sched_param = { .sched_priority = MAX_RT_PRIO - 1 }; 168 + struct dentry *wti_dir; 169 + struct wti_state *st; 170 + int cpu, rc; 171 + 172 + rc = -EOPNOTSUPP; 173 + if (!sclp.has_wti) 174 + goto out; 175 + rc = smpboot_register_percpu_thread(&wti_threads); 176 + if (WARN_ON(rc)) 177 + goto out; 178 + for_each_online_cpu(cpu) { 179 + st = per_cpu_ptr(&wti_state, cpu); 180 + sched_setscheduler(st->thread, SCHED_FIFO, &wti_sched_param); 181 + } 182 + rc = register_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt); 183 + if (rc) { 184 + pr_warn("Couldn't request external interrupt 0x1007\n"); 185 + goto out_thread; 186 + } 187 + irq_subclass_register(IRQ_SUBCLASS_WARNING_TRACK); 188 + rc = diag49c(DIAG49C_SUBC_REG); 189 + if (rc) { 190 + pr_warn("Failed to register warning track interrupt through DIAG 49C\n"); 191 + rc = -EOPNOTSUPP; 192 + goto out_subclass; 193 + } 194 + wti_dir = debugfs_create_dir("wti", arch_debugfs_dir); 195 + debugfs_create_file("stat", 0400, wti_dir, NULL, &wti_fops); 196 + wti_dbg = debug_register("wti", 1, 1, WTI_DBF_LEN); 197 + if (!wti_dbg) { 198 + rc = -ENOMEM; 199 + goto out_debug_register; 200 + } 201 + rc = debug_register_view(wti_dbg, &debug_hex_ascii_view); 202 + if (rc) 203 + goto out_debug_register; 204 + goto out; 205 + out_debug_register: 206 + debug_unregister(wti_dbg); 207 + out_subclass: 208 + irq_subclass_unregister(IRQ_SUBCLASS_WARNING_TRACK); 209 + unregister_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt); 210 + out_thread: 211 + smpboot_unregister_percpu_thread(&wti_threads); 212 + out: 213 + return rc; 214 + } 215 + late_initcall(wti_init);
+17 -1
arch/s390/mm/cmm.c
··· 95 95 (*counter)++; 96 96 spin_unlock(&cmm_lock); 97 97 nr--; 98 + cond_resched(); 98 99 } 99 100 return nr; 100 101 } 101 102 102 - static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) 103 + static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) 103 104 { 104 105 struct cmm_page_array *pa; 105 106 unsigned long addr; ··· 122 121 } 123 122 spin_unlock(&cmm_lock); 124 123 return nr; 124 + } 125 + 126 + static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) 127 + { 128 + long inc = 0; 129 + 130 + while (nr) { 131 + inc = min(256L, nr); 132 + nr -= inc; 133 + inc = __cmm_free_pages(inc, counter, list); 134 + if (inc) 135 + break; 136 + cond_resched(); 137 + } 138 + return nr + inc; 125 139 } 126 140 127 141 static int cmm_oom_notify(struct notifier_block *self,
+77 -114
arch/s390/mm/dump_pagetables.c
··· 18 18 struct addr_marker { 19 19 int is_start; 20 20 unsigned long start_address; 21 + unsigned long size; 21 22 const char *name; 22 23 }; 23 24 24 - enum address_markers_idx { 25 - KVA_NR = 0, 26 - LOWCORE_START_NR, 27 - LOWCORE_END_NR, 28 - AMODE31_START_NR, 29 - AMODE31_END_NR, 30 - KERNEL_START_NR, 31 - KERNEL_END_NR, 32 - #ifdef CONFIG_KFENCE 33 - KFENCE_START_NR, 34 - KFENCE_END_NR, 35 - #endif 36 - IDENTITY_START_NR, 37 - IDENTITY_END_NR, 38 - VMEMMAP_NR, 39 - VMEMMAP_END_NR, 40 - VMALLOC_NR, 41 - VMALLOC_END_NR, 42 - #ifdef CONFIG_KMSAN 43 - KMSAN_VMALLOC_SHADOW_START_NR, 44 - KMSAN_VMALLOC_SHADOW_END_NR, 45 - KMSAN_VMALLOC_ORIGIN_START_NR, 46 - KMSAN_VMALLOC_ORIGIN_END_NR, 47 - KMSAN_MODULES_SHADOW_START_NR, 48 - KMSAN_MODULES_SHADOW_END_NR, 49 - KMSAN_MODULES_ORIGIN_START_NR, 50 - KMSAN_MODULES_ORIGIN_END_NR, 51 - #endif 52 - MODULES_NR, 53 - MODULES_END_NR, 54 - ABS_LOWCORE_NR, 55 - ABS_LOWCORE_END_NR, 56 - MEMCPY_REAL_NR, 57 - MEMCPY_REAL_END_NR, 58 - #ifdef CONFIG_KASAN 59 - KASAN_SHADOW_START_NR, 60 - KASAN_SHADOW_END_NR, 61 - #endif 62 - }; 63 - 64 - static struct addr_marker address_markers[] = { 65 - [KVA_NR] = {0, 0, "Kernel Virtual Address Space"}, 66 - [LOWCORE_START_NR] = {1, 0, "Lowcore Start"}, 67 - [LOWCORE_END_NR] = {0, 0, "Lowcore End"}, 68 - [IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"}, 69 - [IDENTITY_END_NR] = {0, 0, "Identity Mapping End"}, 70 - [AMODE31_START_NR] = {1, 0, "Amode31 Area Start"}, 71 - [AMODE31_END_NR] = {0, 0, "Amode31 Area End"}, 72 - [KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"}, 73 - [KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"}, 74 - #ifdef CONFIG_KFENCE 75 - [KFENCE_START_NR] = {1, 0, "KFence Pool Start"}, 76 - [KFENCE_END_NR] = {0, 0, "KFence Pool End"}, 77 - #endif 78 - [VMEMMAP_NR] = {1, 0, "vmemmap Area Start"}, 79 - [VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"}, 80 - [VMALLOC_NR] = {1, 0, "vmalloc Area Start"}, 81 - [VMALLOC_END_NR] = {0, 0, "vmalloc Area End"}, 82 - #ifdef CONFIG_KMSAN 83 - [KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"}, 84 - [KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"}, 85 - [KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"}, 86 - [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"}, 87 - [KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"}, 88 - [KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"}, 89 - [KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"}, 90 - [KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"}, 91 - #endif 92 - [MODULES_NR] = {1, 0, "Modules Area Start"}, 93 - [MODULES_END_NR] = {0, 0, "Modules Area End"}, 94 - [ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"}, 95 - [ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"}, 96 - [MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"}, 97 - [MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"}, 98 - #ifdef CONFIG_KASAN 99 - [KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"}, 100 - [KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"}, 101 - #endif 102 - {1, -1UL, NULL} 103 - }; 25 + static struct addr_marker *markers; 26 + static unsigned int markers_cnt; 104 27 105 28 struct pg_state { 106 29 struct ptdump_state ptdump; ··· 96 173 97 174 while (addr >= st->marker[1].start_address) { 98 175 st->marker++; 99 - pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name); 176 + pt_dump_seq_printf(m, "---[ %s %s ]---\n", st->marker->name, 177 + st->marker->is_start ? "Start" : "End"); 100 178 } 101 179 st->start_address = addr; 102 180 st->current_prot = prot; ··· 126 202 if (level == -1) 127 203 addr = max_addr; 128 204 if (st->level == -1) { 129 - pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name); 205 + pt_dump_seq_puts(m, "---[ Kernel Virtual Address Space ]---\n"); 130 206 note_page_update_state(st, addr, prot, level); 131 207 } else if (prot != st->current_prot || level != st->level || 132 208 addr >= st->marker[1].start_address) { ··· 200 276 .check_wx = false, 201 277 .wx_pages = 0, 202 278 .start_address = 0, 203 - .marker = address_markers, 279 + .marker = markers, 204 280 }; 205 281 206 282 get_online_mems(); ··· 223 299 if (ama->start_address < amb->start_address) 224 300 return -1; 225 301 /* 226 - * If the start addresses of two markers are identical consider the 227 - * marker which defines the start of an area higher than the one which 228 - * defines the end of an area. This keeps pairs of markers sorted. 302 + * If the start addresses of two markers are identical sort markers in an 303 + * order that considers areas contained within other areas correctly. 229 304 */ 305 + if (ama->is_start && amb->is_start) { 306 + if (ama->size > amb->size) 307 + return -1; 308 + if (ama->size < amb->size) 309 + return 1; 310 + return 0; 311 + } 312 + if (!ama->is_start && !amb->is_start) { 313 + if (ama->size > amb->size) 314 + return 1; 315 + if (ama->size < amb->size) 316 + return -1; 317 + return 0; 318 + } 230 319 if (ama->is_start) 231 320 return 1; 232 321 if (amb->is_start) 233 322 return -1; 234 323 return 0; 324 + } 325 + 326 + static int add_marker(unsigned long start, unsigned long end, const char *name) 327 + { 328 + size_t oldsize, newsize; 329 + 330 + oldsize = markers_cnt * sizeof(*markers); 331 + newsize = oldsize + 2 * sizeof(*markers); 332 + if (!oldsize) 333 + markers = kvmalloc(newsize, GFP_KERNEL); 334 + else 335 + markers = kvrealloc(markers, newsize, GFP_KERNEL); 336 + if (!markers) 337 + goto error; 338 + markers[markers_cnt].is_start = 1; 339 + markers[markers_cnt].start_address = start; 340 + markers[markers_cnt].size = end - start; 341 + markers[markers_cnt].name = name; 342 + markers_cnt++; 343 + markers[markers_cnt].is_start = 0; 344 + markers[markers_cnt].start_address = end; 345 + markers[markers_cnt].size = end - start; 346 + markers[markers_cnt].name = name; 347 + markers_cnt++; 348 + return 0; 349 + error: 350 + markers_cnt = 0; 351 + return -ENOMEM; 235 352 } 236 353 237 354 static int pt_dump_init(void) ··· 281 316 unsigned long kfence_start = (unsigned long)__kfence_pool; 282 317 #endif 283 318 unsigned long lowcore = (unsigned long)get_lowcore(); 319 + int rc; 284 320 285 321 /* 286 322 * Figure out the maximum virtual address being accessible with the ··· 290 324 */ 291 325 max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; 292 326 max_addr = 1UL << (max_addr * 11 + 31); 293 - address_markers[LOWCORE_START_NR].start_address = lowcore; 294 - address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore); 295 - address_markers[IDENTITY_START_NR].start_address = __identity_base; 296 - address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size; 297 - address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31; 298 - address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31; 299 - address_markers[MODULES_NR].start_address = MODULES_VADDR; 300 - address_markers[MODULES_END_NR].start_address = MODULES_END; 301 - address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore; 302 - address_markers[ABS_LOWCORE_END_NR].start_address = __abs_lowcore + ABS_LOWCORE_MAP_SIZE; 303 - address_markers[MEMCPY_REAL_NR].start_address = __memcpy_real_area; 304 - address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + MEMCPY_REAL_SIZE; 305 - address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; 306 - address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size; 307 - address_markers[VMALLOC_NR].start_address = VMALLOC_START; 308 - address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; 327 + /* start + end markers - must be added first */ 328 + rc = add_marker(0, -1UL, NULL); 329 + rc |= add_marker((unsigned long)_stext, (unsigned long)_end, "Kernel Image"); 330 + rc |= add_marker(lowcore, lowcore + sizeof(struct lowcore), "Lowcore"); 331 + rc |= add_marker(__identity_base, __identity_base + ident_map_size, "Identity Mapping"); 332 + rc |= add_marker((unsigned long)__samode31, (unsigned long)__eamode31, "Amode31 Area"); 333 + rc |= add_marker(MODULES_VADDR, MODULES_END, "Modules Area"); 334 + rc |= add_marker(__abs_lowcore, __abs_lowcore + ABS_LOWCORE_MAP_SIZE, "Lowcore Area"); 335 + rc |= add_marker(__memcpy_real_area, __memcpy_real_area + MEMCPY_REAL_SIZE, "Real Memory Copy Area"); 336 + rc |= add_marker((unsigned long)vmemmap, (unsigned long)vmemmap + vmemmap_size, "vmemmap Area"); 337 + rc |= add_marker(VMALLOC_START, VMALLOC_END, "vmalloc Area"); 309 338 #ifdef CONFIG_KFENCE 310 - address_markers[KFENCE_START_NR].start_address = kfence_start; 311 - address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE; 339 + rc |= add_marker(kfence_start, kfence_start + KFENCE_POOL_SIZE, "KFence Pool"); 312 340 #endif 313 341 #ifdef CONFIG_KMSAN 314 - address_markers[KMSAN_VMALLOC_SHADOW_START_NR].start_address = KMSAN_VMALLOC_SHADOW_START; 315 - address_markers[KMSAN_VMALLOC_SHADOW_END_NR].start_address = KMSAN_VMALLOC_SHADOW_END; 316 - address_markers[KMSAN_VMALLOC_ORIGIN_START_NR].start_address = KMSAN_VMALLOC_ORIGIN_START; 317 - address_markers[KMSAN_VMALLOC_ORIGIN_END_NR].start_address = KMSAN_VMALLOC_ORIGIN_END; 318 - address_markers[KMSAN_MODULES_SHADOW_START_NR].start_address = KMSAN_MODULES_SHADOW_START; 319 - address_markers[KMSAN_MODULES_SHADOW_END_NR].start_address = KMSAN_MODULES_SHADOW_END; 320 - address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START; 321 - address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END; 342 + rc |= add_marker(KMSAN_VMALLOC_SHADOW_START, KMSAN_VMALLOC_SHADOW_END, "Kmsan vmalloc Shadow"); 343 + rc |= add_marker(KMSAN_VMALLOC_ORIGIN_START, KMSAN_VMALLOC_ORIGIN_END, "Kmsan vmalloc Origins"); 344 + rc |= add_marker(KMSAN_MODULES_SHADOW_START, KMSAN_MODULES_SHADOW_END, "Kmsan Modules Shadow"); 345 + rc |= add_marker(KMSAN_MODULES_ORIGIN_START, KMSAN_MODULES_ORIGIN_END, "Kmsan Modules Origins"); 322 346 #endif 323 - sort(address_markers, ARRAY_SIZE(address_markers) - 1, 324 - sizeof(address_markers[0]), ptdump_cmp, NULL); 347 + #ifdef CONFIG_KASAN 348 + rc |= add_marker(KASAN_SHADOW_START, KASAN_SHADOW_END, "Kasan Shadow"); 349 + #endif 350 + if (rc) 351 + goto error; 352 + sort(&markers[1], markers_cnt - 1, sizeof(*markers), ptdump_cmp, NULL); 325 353 #ifdef CONFIG_PTDUMP_DEBUGFS 326 354 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); 327 355 #endif /* CONFIG_PTDUMP_DEBUGFS */ 328 356 return 0; 357 + error: 358 + kvfree(markers); 359 + return -ENOMEM; 329 360 } 330 361 device_initcall(pt_dump_init);
+38 -14
arch/s390/tools/opcodes.txt
··· 527 527 b939 dfltcc RRF_R0RR2 528 528 b93a kdsa RRE_RR 529 529 b93b nnpa RRE_00 530 - b93c ppno RRE_RR 531 - b93e kimd RRE_RR 532 - b93f klmd RRE_RR 530 + b93c prno RRE_RR 531 + b93e kimd RRF_U0RR 532 + b93f klmd RRF_U0RR 533 533 b941 cfdtr RRF_UURF 534 534 b942 clgdtr RRF_UURF 535 535 b943 clfdtr RRF_UURF ··· 549 549 b965 ocgrk RRF_R0RR2 550 550 b966 nogrk RRF_R0RR2 551 551 b967 nxgrk RRF_R0RR2 552 + b968 clzg RRE_RR 553 + b969 ctzg RRE_RR 554 + b96c bextg RRF_R0RR2 555 + b96d bdepg RRF_R0RR2 552 556 b972 crt RRF_U0RR 553 557 b973 clrt RRF_U0RR 554 558 b974 nnrk RRF_R0RR2 ··· 800 796 e35c mfy RXY_RRRD 801 797 e35e aly RXY_RRRD 802 798 e35f sly RXY_RRRD 799 + e360 lxab RXY_RRRD 800 + e361 llxab RXY_RRRD 801 + e362 lxah RXY_RRRD 802 + e363 llxah RXY_RRRD 803 + e364 lxaf RXY_RRRD 804 + e365 llxaf RXY_RRRD 805 + e366 lxag RXY_RRRD 806 + e367 llxag RXY_RRRD 807 + e368 lxaq RXY_RRRD 808 + e369 llxaq RXY_RRRD 803 809 e370 sthy RXY_RRRD 804 810 e371 lay RXY_RRRD 805 811 e372 stcy RXY_RRRD ··· 894 880 e63d vstrl VSI_URDV 895 881 e63f vstrlr VRS_RRDV 896 882 e649 vlip VRI_V0UU2 883 + e64a vcvdq VRI_VV0UU 884 + e64e vcvbq VRR_VV0U2 897 885 e650 vcvb VRR_RV0UU 898 886 e651 vclzdp VRR_VV0U2 899 887 e652 vcvbg VRR_RV0UU ··· 909 893 e65c vupkzl VRR_VV0U2 910 894 e65d vcfn VRR_VV0UU2 911 895 e65e vclfnl VRR_VV0UU2 912 - e65f vtp VRR_0V 896 + e65f vtp VRR_0V0U 913 897 e670 vpkzr VRI_VVV0UU2 914 898 e671 vap VRI_VVV0UU2 915 899 e672 vsrpr VRI_VVV0UU2 ··· 924 908 e67c vscshp VRR_VVV 925 909 e67d vcsph VRR_VVV0U0 926 910 e67e vsdp VRI_VVV0UU2 911 + e67f vtz VRR_0VVU 927 912 e700 vleb VRX_VRRDU 928 913 e701 vleh VRX_VRRDU 929 914 e702 vleg VRX_VRRDU ··· 965 948 e750 vpopct VRR_VV0U 966 949 e752 vctz VRR_VV0U 967 950 e753 vclz VRR_VV0U 951 + e754 vgem VRR_VV0U 968 952 e756 vlr VRX_VV 969 953 e75c vistr VRR_VV0U0U 970 954 e75f vseg VRR_VV0U ··· 1003 985 e785 vbperm VRR_VVV 1004 986 e786 vsld VRI_VVV0U 1005 987 e787 vsrd VRI_VVV0U 988 + e788 veval VRI_VVV0UV 989 + e789 vblend VRR_VVVU0V 1006 990 e78a vstrc VRR_VVVUU0V 1007 991 e78b vstrs VRR_VVVUU0V 1008 992 e78c vperm VRR_VVV0V ··· 1030 1010 e7ad vmalo VRR_VVVU0V 1031 1011 e7ae vmae VRR_VVVU0V 1032 1012 e7af vmao VRR_VVVU0V 1013 + e7b0 vdl VRR_VVV0UU 1014 + e7b1 vrl VRR_VVV0UU 1015 + e7b2 vd VRR_VVV0UU 1016 + e7b3 vr VRR_VVV0UU 1033 1017 e7b4 vgfm VRR_VVV0U 1034 1018 e7b8 vmsl VRR_VVVUU0V 1035 1019 e7b9 vaccc VRR_VVVU0V ··· 1041 1017 e7bc vgfma VRR_VVVU0V 1042 1018 e7bd vsbcbi VRR_VVVU0V 1043 1019 e7bf vsbi VRR_VVVU0V 1044 - e7c0 vclgd VRR_VV0UUU 1045 - e7c1 vcdlg VRR_VV0UUU 1046 - e7c2 vcgd VRR_VV0UUU 1047 - e7c3 vcdg VRR_VV0UUU 1048 - e7c4 vlde VRR_VV0UU2 1049 - e7c5 vled VRR_VV0UUU 1020 + e7c0 vclfp VRR_VV0UUU 1021 + e7c1 vcfpl VRR_VV0UUU 1022 + e7c2 vcsfp VRR_VV0UUU 1023 + e7c3 vcfps VRR_VV0UUU 1024 + e7c4 vfll VRR_VV0UU2 1025 + e7c5 vflr VRR_VV0UUU 1050 1026 e7c7 vfi VRR_VV0UUU 1051 1027 e7ca wfk VRR_VV0UU2 1052 1028 e7cb wfc VRR_VV0UU2 ··· 1118 1094 eb55 cliy SIY_URD 1119 1095 eb56 oiy SIY_URD 1120 1096 eb57 xiy SIY_URD 1121 - eb60 lric RSY_RDRU 1122 - eb61 stric RSY_RDRU 1123 - eb62 mric RSY_RDRU 1097 + eb60 lric RSY_RURD2 1098 + eb61 stric RSY_RURD2 1099 + eb62 mric RSY_RURD2 1124 1100 eb6a asi SIY_IRD 1125 1101 eb6e alsi SIY_IRD 1126 1102 eb71 lpswey SIY_RD ··· 1128 1104 eb7e algsi SIY_IRD 1129 1105 eb80 icmh RSY_RURD 1130 1106 eb81 icmy RSY_RURD 1131 - eb8a sqbs RSY_RDRU 1107 + eb8a sqbs RSY_RURD2 1132 1108 eb8e mvclu RSY_RRRD 1133 1109 eb8f clclu RSY_RRRD 1134 1110 eb90 stmy RSY_RRRD
+69 -8
drivers/crypto/Kconfig
··· 21 21 (so called VIA PadLock ACE, Advanced Cryptography Engine) 22 22 that provides instructions for very fast cryptographic 23 23 operations with supported algorithms. 24 - 24 + 25 25 The instructions are used only when the CPU supports them. 26 26 Otherwise software encryption is used. 27 27 ··· 78 78 config PKEY 79 79 tristate "Kernel API for protected key handling" 80 80 depends on S390 81 - depends on ZCRYPT 82 81 help 83 - With this option enabled the pkey kernel module provides an API 82 + With this option enabled the pkey kernel modules provide an API 84 83 for creation and handling of protected keys. Other parts of the 85 84 kernel or userspace applications may use these functions. 86 85 87 - Select this option if you want to enable the kernel and userspace 88 - API for proteced key handling. 86 + The protected key support is distributed into: 87 + - A pkey base and API kernel module (pkey.ko) which offers the 88 + infrastructure for the pkey handler kernel modules, the ioctl 89 + and the sysfs API and the in-kernel API to the crypto cipher 90 + implementations using protected key. 91 + - A pkey pckmo kernel module (pkey-pckmo.ko) which is automatically 92 + loaded when pckmo support (that is generation of protected keys 93 + from clear key values) is available. 94 + - A pkey CCA kernel module (pkey-cca.ko) which is automatically 95 + loaded when a CEX crypto card is available. 96 + - A pkey EP11 kernel module (pkey-ep11.ko) which is automatically 97 + loaded when a CEX crypto card is available. 89 98 90 - Please note that creation of protected keys from secure keys 91 - requires to have at least one CEX card in coprocessor mode 92 - available at runtime. 99 + Select this option if you want to enable the kernel and userspace 100 + API for protected key handling. 101 + 102 + config PKEY_CCA 103 + tristate "PKEY CCA support handler" 104 + depends on PKEY 105 + depends on ZCRYPT 106 + help 107 + This is the CCA support handler for deriving protected keys 108 + from CCA (secure) keys. Also this handler provides an alternate 109 + way to make protected keys from clear key values. 110 + 111 + The PKEY CCA support handler needs a Crypto Express card (CEX) 112 + in CCA mode. 113 + 114 + If you have selected the PKEY option then you should also enable 115 + this option unless you are sure you never need to derive protected 116 + keys from CCA key material. 117 + 118 + config PKEY_EP11 119 + tristate "PKEY EP11 support handler" 120 + depends on PKEY 121 + depends on ZCRYPT 122 + help 123 + This is the EP11 support handler for deriving protected keys 124 + from EP11 (secure) keys. Also this handler provides an alternate 125 + way to make protected keys from clear key values. 126 + 127 + The PKEY EP11 support handler needs a Crypto Express card (CEX) 128 + in EP11 mode. 129 + 130 + If you have selected the PKEY option then you should also enable 131 + this option unless you are sure you never need to derive protected 132 + keys from EP11 key material. 133 + 134 + config PKEY_PCKMO 135 + tristate "PKEY PCKMO support handler" 136 + depends on PKEY 137 + help 138 + This is the PCKMO support handler for deriving protected keys 139 + from clear key values via invoking the PCKMO instruction. 140 + 141 + The PCKMO instruction can be enabled and disabled in the crypto 142 + settings at the LPAR profile. This handler checks for availability 143 + during initialization and if build as a kernel module unloads 144 + itself if PCKMO is disabled. 145 + 146 + The PCKMO way of deriving protected keys from clear key material 147 + is especially used during self test of protected key ciphers like 148 + PAES but the CCA and EP11 handler provide alternate ways to 149 + generate protected keys from clear key values. 150 + 151 + If you have selected the PKEY option then you should also enable 152 + this option unless you are sure you never need to derive protected 153 + keys from clear key values directly via PCKMO. 93 154 94 155 config CRYPTO_PAES_S390 95 156 tristate "PAES cipher algorithms"
+1
drivers/s390/char/sclp_early.c
··· 44 44 sclp.has_ibs = !!(sccb->fac117 & 0x20); 45 45 sclp.has_gisaf = !!(sccb->fac118 & 0x08); 46 46 sclp.has_hvs = !!(sccb->fac119 & 0x80); 47 + sclp.has_wti = !!(sccb->fac119 & 0x40); 47 48 sclp.has_kss = !!(sccb->fac98 & 0x01); 48 49 sclp.has_aisii = !!(sccb->fac118 & 0x40); 49 50 sclp.has_aeni = !!(sccb->fac118 & 0x20);
+14 -2
drivers/s390/crypto/Makefile
··· 13 13 # adapter drivers depend on ap.o and zcrypt.o 14 14 obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o 15 15 16 - # pkey kernel module 17 - pkey-objs := pkey_api.o 16 + # pkey base and api module 17 + pkey-objs := pkey_base.o pkey_api.o pkey_sysfs.o 18 18 obj-$(CONFIG_PKEY) += pkey.o 19 + 20 + # pkey cca handler module 21 + pkey-cca-objs := pkey_cca.o 22 + obj-$(CONFIG_PKEY_CCA) += pkey-cca.o 23 + 24 + # pkey ep11 handler module 25 + pkey-ep11-objs := pkey_ep11.o 26 + obj-$(CONFIG_PKEY_EP11) += pkey-ep11.o 27 + 28 + # pkey pckmo handler module 29 + pkey-pckmo-objs := pkey_pckmo.o 30 + obj-$(CONFIG_PKEY_PCKMO) += pkey-pckmo.o 19 31 20 32 # adjunct processor matrix 21 33 vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
+37 -22
drivers/s390/crypto/ap_bus.c
··· 107 107 static bool ap_scan_bus(void); 108 108 static bool ap_scan_bus_result; /* result of last ap_scan_bus() */ 109 109 static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */ 110 + static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */ 110 111 static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */ 111 112 static int ap_scan_bus_time = AP_CONFIG_TIME; 112 113 static struct timer_list ap_scan_bus_timer; ··· 734 733 if (!completion_done(&ap_apqn_bindings_complete)) { 735 734 complete_all(&ap_apqn_bindings_complete); 736 735 ap_send_bindings_complete_uevent(); 737 - pr_debug("%s all apqn bindings complete\n", __func__); 736 + pr_debug("all apqn bindings complete\n"); 738 737 } 739 738 } 740 739 } ··· 769 768 else if (l == 0 && timeout) 770 769 rc = -ETIME; 771 770 772 - pr_debug("%s rc=%d\n", __func__, rc); 771 + pr_debug("rc=%d\n", rc); 773 772 return rc; 774 773 } 775 774 EXPORT_SYMBOL(ap_wait_apqn_bindings_complete); ··· 796 795 drvres = to_ap_drv(dev->driver)->flags 797 796 & AP_DRIVER_FLAG_DEFAULT; 798 797 if (!!devres != !!drvres) { 799 - pr_debug("%s reprobing queue=%02x.%04x\n", 800 - __func__, card, queue); 798 + pr_debug("reprobing queue=%02x.%04x\n", card, queue); 801 799 rc = device_reprobe(dev); 802 800 if (rc) 803 801 AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n", ··· 1000 1000 unsigned long scan_counter = atomic64_read(&ap_scan_bus_count); 1001 1001 bool rc = false; 1002 1002 1003 - pr_debug(">%s scan counter=%lu\n", __func__, scan_counter); 1003 + pr_debug("> scan counter=%lu\n", scan_counter); 1004 1004 1005 1005 /* Only trigger AP bus scans after the initial scan is done */ 1006 1006 if (scan_counter <= 0) 1007 1007 goto out; 1008 1008 1009 + /* 1010 + * There is one unlikely but nevertheless valid scenario where the 1011 + * thread holding the mutex may try to send some crypto load but 1012 + * all cards are offline so a rescan is triggered which causes 1013 + * a recursive call of ap_bus_force_rescan(). A simple return if 1014 + * the mutex is already locked by this thread solves this. 1015 + */ 1016 + if (mutex_is_locked(&ap_scan_bus_mutex)) { 1017 + if (ap_scan_bus_task == current) 1018 + goto out; 1019 + } 1020 + 1009 1021 /* Try to acquire the AP scan bus mutex */ 1010 1022 if (mutex_trylock(&ap_scan_bus_mutex)) { 1011 1023 /* mutex acquired, run the AP bus scan */ 1024 + ap_scan_bus_task = current; 1012 1025 ap_scan_bus_result = ap_scan_bus(); 1013 1026 rc = ap_scan_bus_result; 1027 + ap_scan_bus_task = NULL; 1014 1028 mutex_unlock(&ap_scan_bus_mutex); 1015 1029 goto out; 1016 1030 } ··· 1043 1029 mutex_unlock(&ap_scan_bus_mutex); 1044 1030 1045 1031 out: 1046 - pr_debug("%s rc=%d\n", __func__, rc); 1032 + pr_debug("rc=%d\n", rc); 1047 1033 return rc; 1048 1034 } 1049 1035 EXPORT_SYMBOL(ap_bus_force_rescan); ··· 1057 1043 if (action != CHSC_NOTIFY_AP_CFG) 1058 1044 return NOTIFY_DONE; 1059 1045 1060 - pr_debug("%s config change, forcing bus rescan\n", __func__); 1046 + pr_debug("config change, forcing bus rescan\n"); 1061 1047 1062 1048 ap_bus_force_rescan(); 1063 1049 ··· 1914 1900 aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED; 1915 1901 } 1916 1902 spin_unlock_bh(&aq->lock); 1917 - pr_debug("%s(%d,%d) queue dev checkstop on\n", 1918 - __func__, ac->id, dom); 1903 + pr_debug("(%d,%d) queue dev checkstop on\n", 1904 + ac->id, dom); 1919 1905 /* 'receive' pending messages with -EAGAIN */ 1920 1906 ap_flush_queue(aq); 1921 1907 goto put_dev_and_continue; ··· 1925 1911 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) 1926 1912 _ap_queue_init_state(aq); 1927 1913 spin_unlock_bh(&aq->lock); 1928 - pr_debug("%s(%d,%d) queue dev checkstop off\n", 1929 - __func__, ac->id, dom); 1914 + pr_debug("(%d,%d) queue dev checkstop off\n", 1915 + ac->id, dom); 1930 1916 goto put_dev_and_continue; 1931 1917 } 1932 1918 /* config state change */ ··· 1938 1924 aq->last_err_rc = AP_RESPONSE_DECONFIGURED; 1939 1925 } 1940 1926 spin_unlock_bh(&aq->lock); 1941 - pr_debug("%s(%d,%d) queue dev config off\n", 1942 - __func__, ac->id, dom); 1927 + pr_debug("(%d,%d) queue dev config off\n", 1928 + ac->id, dom); 1943 1929 ap_send_config_uevent(&aq->ap_dev, aq->config); 1944 1930 /* 'receive' pending messages with -EAGAIN */ 1945 1931 ap_flush_queue(aq); ··· 1950 1936 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) 1951 1937 _ap_queue_init_state(aq); 1952 1938 spin_unlock_bh(&aq->lock); 1953 - pr_debug("%s(%d,%d) queue dev config on\n", 1954 - __func__, ac->id, dom); 1939 + pr_debug("(%d,%d) queue dev config on\n", 1940 + ac->id, dom); 1955 1941 ap_send_config_uevent(&aq->ap_dev, aq->config); 1956 1942 goto put_dev_and_continue; 1957 1943 } ··· 2023 2009 ap_scan_rm_card_dev_and_queue_devs(ac); 2024 2010 put_device(dev); 2025 2011 } else { 2026 - pr_debug("%s(%d) no type info (no APQN found), ignored\n", 2027 - __func__, ap); 2012 + pr_debug("(%d) no type info (no APQN found), ignored\n", 2013 + ap); 2028 2014 } 2029 2015 return; 2030 2016 } ··· 2036 2022 ap_scan_rm_card_dev_and_queue_devs(ac); 2037 2023 put_device(dev); 2038 2024 } else { 2039 - pr_debug("%s(%d) no valid type (0) info, ignored\n", 2040 - __func__, ap); 2025 + pr_debug("(%d) no valid type (0) info, ignored\n", ap); 2041 2026 } 2042 2027 return; 2043 2028 } ··· 2215 2202 bool config_changed; 2216 2203 int ap; 2217 2204 2218 - pr_debug(">%s\n", __func__); 2205 + pr_debug(">\n"); 2219 2206 2220 2207 /* (re-)fetch configuration via QCI */ 2221 2208 config_changed = ap_get_configuration(); ··· 2256 2243 } 2257 2244 2258 2245 if (atomic64_inc_return(&ap_scan_bus_count) == 1) { 2259 - pr_debug("%s init scan complete\n", __func__); 2246 + pr_debug("init scan complete\n"); 2260 2247 ap_send_init_scan_done_uevent(); 2261 2248 } 2262 2249 ··· 2264 2251 2265 2252 mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ); 2266 2253 2267 - pr_debug("<%s config_changed=%d\n", __func__, config_changed); 2254 + pr_debug("< config_changed=%d\n", config_changed); 2268 2255 2269 2256 return config_changed; 2270 2257 } ··· 2297 2284 * system_long_wq which invokes this function here again. 2298 2285 */ 2299 2286 if (mutex_trylock(&ap_scan_bus_mutex)) { 2287 + ap_scan_bus_task = current; 2300 2288 ap_scan_bus_result = ap_scan_bus(); 2289 + ap_scan_bus_task = NULL; 2301 2290 mutex_unlock(&ap_scan_bus_mutex); 2302 2291 } 2303 2292 }
+10 -10
drivers/s390/crypto/ap_queue.c
··· 171 171 aq->queue_count = 0; 172 172 list_splice_init(&aq->pendingq, &aq->requestq); 173 173 aq->requestq_count += aq->pendingq_count; 174 - pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n", 175 - __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), 174 + pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n", 175 + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), 176 176 aq->pendingq_count, aq->requestq_count); 177 177 aq->pendingq_count = 0; 178 178 break; ··· 453 453 case AP_BS_Q_USABLE: 454 454 /* association is through */ 455 455 aq->sm_state = AP_SM_STATE_IDLE; 456 - pr_debug("%s queue 0x%02x.%04x associated with %u\n", 457 - __func__, AP_QID_CARD(aq->qid), 456 + pr_debug("queue 0x%02x.%04x associated with %u\n", 457 + AP_QID_CARD(aq->qid), 458 458 AP_QID_QUEUE(aq->qid), aq->assoc_idx); 459 459 return AP_SM_WAIT_NONE; 460 460 case AP_BS_Q_USABLE_NO_SECURE_KEY: ··· 697 697 698 698 status = ap_test_queue(aq->qid, 1, &hwinfo); 699 699 if (status.response_code > AP_RESPONSE_BUSY) { 700 - pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n", 701 - __func__, status.response_code, 700 + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", 701 + status.response_code, 702 702 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); 703 703 return -EIO; 704 704 } ··· 853 853 854 854 status = ap_test_queue(aq->qid, 1, &hwinfo); 855 855 if (status.response_code > AP_RESPONSE_BUSY) { 856 - pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n", 857 - __func__, status.response_code, 856 + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", 857 + status.response_code, 858 858 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); 859 859 return -EIO; 860 860 } ··· 981 981 982 982 status = ap_test_queue(aq->qid, 1, &hwinfo); 983 983 if (status.response_code > AP_RESPONSE_BUSY) { 984 - pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n", 985 - __func__, status.response_code, 984 + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", 985 + status.response_code, 986 986 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); 987 987 return -EIO; 988 988 }
+685 -2222
drivers/s390/crypto/pkey_api.c
··· 10 10 #define KMSG_COMPONENT "pkey" 11 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 12 13 - #include <linux/fs.h> 14 13 #include <linux/init.h> 15 14 #include <linux/miscdevice.h> 16 - #include <linux/module.h> 17 15 #include <linux/slab.h> 18 - #include <linux/kallsyms.h> 19 - #include <linux/debugfs.h> 20 - #include <linux/random.h> 21 - #include <linux/cpufeature.h> 22 - #include <asm/zcrypt.h> 23 - #include <asm/cpacf.h> 24 - #include <asm/pkey.h> 25 - #include <crypto/aes.h> 26 16 27 17 #include "zcrypt_api.h" 28 18 #include "zcrypt_ccamisc.h" 29 - #include "zcrypt_ep11misc.h" 30 19 31 - MODULE_LICENSE("GPL"); 32 - MODULE_AUTHOR("IBM Corporation"); 33 - MODULE_DESCRIPTION("s390 protected key interface"); 34 - 35 - #define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ 36 - #define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header)) 37 - #define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ 38 - #define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ 39 - #define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */ 20 + #include "pkey_base.h" 40 21 41 22 /* 42 - * debug feature data and functions 23 + * Helper functions 43 24 */ 44 - 45 - static debug_info_t *pkey_dbf_info; 46 - 47 - #define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__) 48 - #define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__) 49 - #define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__) 50 - 51 - static void __init pkey_debug_init(void) 25 + static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 26 + const u8 *key, size_t keylen, 27 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 52 28 { 53 - /* 5 arguments per dbf entry (including the format string ptr) */ 54 - pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); 55 - debug_register_view(pkey_dbf_info, &debug_sprintf_view); 56 - debug_set_level(pkey_dbf_info, 3); 57 - } 58 - 59 - static void __exit pkey_debug_exit(void) 60 - { 61 - debug_unregister(pkey_dbf_info); 62 - } 63 - 64 - /* inside view of a protected key token (only type 0x00 version 0x01) */ 65 - struct protaeskeytoken { 66 - u8 type; /* 0x00 for PAES specific key tokens */ 67 - u8 res0[3]; 68 - u8 version; /* should be 0x01 for protected AES key token */ 69 - u8 res1[3]; 70 - u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ 71 - u32 len; /* bytes actually stored in protkey[] */ 72 - u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ 73 - } __packed; 74 - 75 - /* inside view of a clear key token (type 0x00 version 0x02) */ 76 - struct clearkeytoken { 77 - u8 type; /* 0x00 for PAES specific key tokens */ 78 - u8 res0[3]; 79 - u8 version; /* 0x02 for clear key token */ 80 - u8 res1[3]; 81 - u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */ 82 - u32 len; /* bytes actually stored in clearkey[] */ 83 - u8 clearkey[]; /* clear key value */ 84 - } __packed; 85 - 86 - /* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */ 87 - static inline u32 pkey_keytype_aes_to_size(u32 keytype) 88 - { 89 - switch (keytype) { 90 - case PKEY_KEYTYPE_AES_128: 91 - return 16; 92 - case PKEY_KEYTYPE_AES_192: 93 - return 24; 94 - case PKEY_KEYTYPE_AES_256: 95 - return 32; 96 - default: 97 - return 0; 98 - } 99 - } 100 - 101 - /* 102 - * Create a protected key from a clear key value via PCKMO instruction. 103 - */ 104 - static int pkey_clr2protkey(u32 keytype, const u8 *clrkey, 105 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 106 - { 107 - /* mask of available pckmo subfunctions */ 108 - static cpacf_mask_t pckmo_functions; 109 - 110 - u8 paramblock[112]; 111 - u32 pkeytype; 112 - int keysize; 113 - long fc; 114 - 115 - switch (keytype) { 116 - case PKEY_KEYTYPE_AES_128: 117 - /* 16 byte key, 32 byte aes wkvp, total 48 bytes */ 118 - keysize = 16; 119 - pkeytype = keytype; 120 - fc = CPACF_PCKMO_ENC_AES_128_KEY; 121 - break; 122 - case PKEY_KEYTYPE_AES_192: 123 - /* 24 byte key, 32 byte aes wkvp, total 56 bytes */ 124 - keysize = 24; 125 - pkeytype = keytype; 126 - fc = CPACF_PCKMO_ENC_AES_192_KEY; 127 - break; 128 - case PKEY_KEYTYPE_AES_256: 129 - /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ 130 - keysize = 32; 131 - pkeytype = keytype; 132 - fc = CPACF_PCKMO_ENC_AES_256_KEY; 133 - break; 134 - case PKEY_KEYTYPE_ECC_P256: 135 - /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ 136 - keysize = 32; 137 - pkeytype = PKEY_KEYTYPE_ECC; 138 - fc = CPACF_PCKMO_ENC_ECC_P256_KEY; 139 - break; 140 - case PKEY_KEYTYPE_ECC_P384: 141 - /* 48 byte key, 32 byte aes wkvp, total 80 bytes */ 142 - keysize = 48; 143 - pkeytype = PKEY_KEYTYPE_ECC; 144 - fc = CPACF_PCKMO_ENC_ECC_P384_KEY; 145 - break; 146 - case PKEY_KEYTYPE_ECC_P521: 147 - /* 80 byte key, 32 byte aes wkvp, total 112 bytes */ 148 - keysize = 80; 149 - pkeytype = PKEY_KEYTYPE_ECC; 150 - fc = CPACF_PCKMO_ENC_ECC_P521_KEY; 151 - break; 152 - case PKEY_KEYTYPE_ECC_ED25519: 153 - /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ 154 - keysize = 32; 155 - pkeytype = PKEY_KEYTYPE_ECC; 156 - fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY; 157 - break; 158 - case PKEY_KEYTYPE_ECC_ED448: 159 - /* 64 byte key, 32 byte aes wkvp, total 96 bytes */ 160 - keysize = 64; 161 - pkeytype = PKEY_KEYTYPE_ECC; 162 - fc = CPACF_PCKMO_ENC_ECC_ED448_KEY; 163 - break; 164 - default: 165 - PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", 166 - __func__, keytype); 167 - return -EINVAL; 168 - } 169 - 170 - if (*protkeylen < keysize + AES_WK_VP_SIZE) { 171 - PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n", 172 - __func__, *protkeylen, keysize + AES_WK_VP_SIZE); 173 - return -EINVAL; 174 - } 175 - 176 - /* Did we already check for PCKMO ? */ 177 - if (!pckmo_functions.bytes[0]) { 178 - /* no, so check now */ 179 - if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) 180 - return -ENODEV; 181 - } 182 - /* check for the pckmo subfunction we need now */ 183 - if (!cpacf_test_func(&pckmo_functions, fc)) { 184 - PKEY_DBF_ERR("%s pckmo functions not available\n", __func__); 185 - return -ENODEV; 186 - } 187 - 188 - /* prepare param block */ 189 - memset(paramblock, 0, sizeof(paramblock)); 190 - memcpy(paramblock, clrkey, keysize); 191 - 192 - /* call the pckmo instruction */ 193 - cpacf_pckmo(fc, paramblock); 194 - 195 - /* copy created protected key to key buffer including the wkvp block */ 196 - *protkeylen = keysize + AES_WK_VP_SIZE; 197 - memcpy(protkey, paramblock, *protkeylen); 198 - *protkeytype = pkeytype; 199 - 200 - return 0; 201 - } 202 - 203 - /* 204 - * Find card and transform secure key into protected key. 205 - */ 206 - static int pkey_skey2pkey(const u8 *key, u8 *protkey, 207 - u32 *protkeylen, u32 *protkeytype) 208 - { 209 - struct keytoken_header *hdr = (struct keytoken_header *)key; 210 - u16 cardnr, domain; 211 - int rc, verify; 212 - 213 - zcrypt_wait_api_operational(); 214 - 215 - /* 216 - * The cca_xxx2protkey call may fail when a card has been 217 - * addressed where the master key was changed after last fetch 218 - * of the mkvp into the cache. Try 3 times: First without verify 219 - * then with verify and last round with verify and old master 220 - * key verification pattern match not ignored. 221 - */ 222 - for (verify = 0; verify < 3; verify++) { 223 - rc = cca_findcard(key, &cardnr, &domain, verify); 224 - if (rc < 0) 225 - continue; 226 - if (rc > 0 && verify < 2) 227 - continue; 228 - switch (hdr->version) { 229 - case TOKVER_CCA_AES: 230 - rc = cca_sec2protkey(cardnr, domain, key, 231 - protkey, protkeylen, protkeytype); 232 - break; 233 - case TOKVER_CCA_VLSC: 234 - rc = cca_cipher2protkey(cardnr, domain, key, 235 - protkey, protkeylen, 236 - protkeytype); 237 - break; 238 - default: 239 - return -EINVAL; 240 - } 241 - if (rc == 0) 242 - break; 243 - } 244 - 245 - if (rc) 246 - pr_debug("%s failed rc=%d\n", __func__, rc); 247 - 248 - return rc; 249 - } 250 - 251 - /* 252 - * Construct EP11 key with given clear key value. 253 - */ 254 - static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, 255 - u8 *keybuf, size_t *keybuflen) 256 - { 257 - u32 nr_apqns, *apqns = NULL; 258 - u16 card, dom; 259 - int i, rc; 260 - 261 - zcrypt_wait_api_operational(); 262 - 263 - /* build a list of apqns suitable for ep11 keys with cpacf support */ 264 - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, 265 - ZCRYPT_CEX7, 266 - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, 267 - NULL); 268 - if (rc) 269 - goto out; 270 - 271 - /* go through the list of apqns and try to bild an ep11 key */ 272 - for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { 273 - card = apqns[i] >> 16; 274 - dom = apqns[i] & 0xFFFF; 275 - rc = ep11_clr2keyblob(card, dom, clrkeylen * 8, 276 - 0, clrkey, keybuf, keybuflen, 277 - PKEY_TYPE_EP11); 278 - if (rc == 0) 279 - break; 280 - } 281 - 282 - out: 283 - kfree(apqns); 284 - if (rc) 285 - pr_debug("%s failed rc=%d\n", __func__, rc); 286 - return rc; 287 - } 288 - 289 - /* 290 - * Find card and transform EP11 secure key into protected key. 291 - */ 292 - static int pkey_ep11key2pkey(const u8 *key, size_t keylen, 293 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 294 - { 295 - u32 nr_apqns, *apqns = NULL; 296 - int i, j, rc = -ENODEV; 297 - u16 card, dom; 298 - 299 - zcrypt_wait_api_operational(); 300 - 301 - /* try two times in case of failure */ 302 - for (i = 0; i < 2 && rc; i++) { 303 - 304 - /* build a list of apqns suitable for this key */ 305 - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, 306 - ZCRYPT_CEX7, 307 - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, 308 - ep11_kb_wkvp(key, keylen)); 309 - if (rc) 310 - continue; /* retry findcard on failure */ 311 - 312 - /* go through the list of apqns and try to derive an pkey */ 313 - for (rc = -ENODEV, j = 0; j < nr_apqns && rc; j++) { 314 - card = apqns[j] >> 16; 315 - dom = apqns[j] & 0xFFFF; 316 - rc = ep11_kblob2protkey(card, dom, key, keylen, 317 - protkey, protkeylen, protkeytype); 318 - } 319 - 320 - kfree(apqns); 321 - } 322 - 323 - if (rc) 324 - pr_debug("%s failed rc=%d\n", __func__, rc); 325 - 326 - return rc; 327 - } 328 - 329 - /* 330 - * Verify key and give back some info about the key. 331 - */ 332 - static int pkey_verifykey(const struct pkey_seckey *seckey, 333 - u16 *pcardnr, u16 *pdomain, 334 - u16 *pkeysize, u32 *pattributes) 335 - { 336 - struct secaeskeytoken *t = (struct secaeskeytoken *)seckey; 337 - u16 cardnr, domain; 338 29 int rc; 339 30 340 - /* check the secure key for valid AES secure key */ 341 - rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, (u8 *)seckey, 0); 342 - if (rc) 343 - goto out; 344 - if (pattributes) 345 - *pattributes = PKEY_VERIFY_ATTR_AES; 346 - if (pkeysize) 347 - *pkeysize = t->bitsize; 31 + /* try the direct way */ 32 + rc = pkey_handler_key_to_protkey(apqns, nr_apqns, 33 + key, keylen, 34 + protkey, protkeylen, 35 + protkeytype); 348 36 349 - /* try to find a card which can handle this key */ 350 - rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1); 351 - if (rc < 0) 352 - goto out; 353 - 354 - if (rc > 0) { 355 - /* key mkvp matches to old master key mkvp */ 356 - pr_debug("%s secure key has old mkvp\n", __func__); 357 - if (pattributes) 358 - *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP; 359 - rc = 0; 37 + /* if this did not work, try the slowpath way */ 38 + if (rc == -ENODEV) { 39 + rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns, 40 + key, keylen, 41 + protkey, protkeylen, 42 + protkeytype); 43 + if (rc) 44 + rc = -ENODEV; 360 45 } 361 46 362 - if (pcardnr) 363 - *pcardnr = cardnr; 364 - if (pdomain) 365 - *pdomain = domain; 366 - 367 - out: 368 - pr_debug("%s rc=%d\n", __func__, rc); 47 + pr_debug("rc=%d\n", rc); 369 48 return rc; 370 49 } 371 50 372 51 /* 373 - * Generate a random protected key 52 + * In-Kernel function: Transform a key blob (of any type) into a protected key 374 53 */ 375 - static int pkey_genprotkey(u32 keytype, u8 *protkey, 376 - u32 *protkeylen, u32 *protkeytype) 54 + int pkey_key2protkey(const u8 *key, u32 keylen, 55 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 377 56 { 378 - u8 clrkey[32]; 379 - int keysize; 380 57 int rc; 381 58 382 - keysize = pkey_keytype_aes_to_size(keytype); 383 - if (!keysize) { 384 - PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", __func__, 385 - keytype); 386 - return -EINVAL; 59 + rc = key2protkey(NULL, 0, key, keylen, 60 + protkey, protkeylen, protkeytype); 61 + if (rc == -ENODEV) { 62 + pkey_handler_request_modules(); 63 + rc = key2protkey(NULL, 0, key, keylen, 64 + protkey, protkeylen, protkeytype); 387 65 } 388 66 389 - /* generate a dummy random clear key */ 390 - get_random_bytes(clrkey, keysize); 391 - 392 - /* convert it to a dummy protected key */ 393 - rc = pkey_clr2protkey(keytype, clrkey, 394 - protkey, protkeylen, protkeytype); 395 - if (rc) 396 - return rc; 397 - 398 - /* replace the key part of the protected key with random bytes */ 399 - get_random_bytes(protkey, keysize); 400 - 401 - return 0; 67 + return rc; 402 68 } 69 + EXPORT_SYMBOL(pkey_key2protkey); 403 70 404 71 /* 405 - * Verify if a protected key is still valid 406 - */ 407 - static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen, 408 - u32 protkeytype) 409 - { 410 - struct { 411 - u8 iv[AES_BLOCK_SIZE]; 412 - u8 key[MAXPROTKEYSIZE]; 413 - } param; 414 - u8 null_msg[AES_BLOCK_SIZE]; 415 - u8 dest_buf[AES_BLOCK_SIZE]; 416 - unsigned int k, pkeylen; 417 - unsigned long fc; 418 - 419 - switch (protkeytype) { 420 - case PKEY_KEYTYPE_AES_128: 421 - pkeylen = 16 + AES_WK_VP_SIZE; 422 - fc = CPACF_KMC_PAES_128; 423 - break; 424 - case PKEY_KEYTYPE_AES_192: 425 - pkeylen = 24 + AES_WK_VP_SIZE; 426 - fc = CPACF_KMC_PAES_192; 427 - break; 428 - case PKEY_KEYTYPE_AES_256: 429 - pkeylen = 32 + AES_WK_VP_SIZE; 430 - fc = CPACF_KMC_PAES_256; 431 - break; 432 - default: 433 - PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__, 434 - protkeytype); 435 - return -EINVAL; 436 - } 437 - if (protkeylen != pkeylen) { 438 - PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n", 439 - __func__, protkeylen, protkeytype); 440 - return -EINVAL; 441 - } 442 - 443 - memset(null_msg, 0, sizeof(null_msg)); 444 - 445 - memset(param.iv, 0, sizeof(param.iv)); 446 - memcpy(param.key, protkey, protkeylen); 447 - 448 - k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf, 449 - sizeof(null_msg)); 450 - if (k != sizeof(null_msg)) { 451 - PKEY_DBF_ERR("%s protected key is not valid\n", __func__); 452 - return -EKEYREJECTED; 453 - } 454 - 455 - return 0; 456 - } 457 - 458 - /* Helper for pkey_nonccatok2pkey, handles aes clear key token */ 459 - static int nonccatokaes2pkey(const struct clearkeytoken *t, 460 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 461 - { 462 - size_t tmpbuflen = max_t(size_t, SECKEYBLOBSIZE, MAXEP11AESKEYBLOBSIZE); 463 - u8 *tmpbuf = NULL; 464 - u32 keysize; 465 - int rc; 466 - 467 - keysize = pkey_keytype_aes_to_size(t->keytype); 468 - if (!keysize) { 469 - PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", 470 - __func__, t->keytype); 471 - return -EINVAL; 472 - } 473 - if (t->len != keysize) { 474 - PKEY_DBF_ERR("%s non clear key aes token: invalid key len %u\n", 475 - __func__, t->len); 476 - return -EINVAL; 477 - } 478 - 479 - /* try direct way with the PCKMO instruction */ 480 - rc = pkey_clr2protkey(t->keytype, t->clearkey, 481 - protkey, protkeylen, protkeytype); 482 - if (!rc) 483 - goto out; 484 - 485 - /* PCKMO failed, so try the CCA secure key way */ 486 - tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC); 487 - if (!tmpbuf) 488 - return -ENOMEM; 489 - zcrypt_wait_api_operational(); 490 - rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, t->clearkey, tmpbuf); 491 - if (rc) 492 - goto try_via_ep11; 493 - rc = pkey_skey2pkey(tmpbuf, 494 - protkey, protkeylen, protkeytype); 495 - if (!rc) 496 - goto out; 497 - 498 - try_via_ep11: 499 - /* if the CCA way also failed, let's try via EP11 */ 500 - rc = pkey_clr2ep11key(t->clearkey, t->len, 501 - tmpbuf, &tmpbuflen); 502 - if (rc) 503 - goto failure; 504 - rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen, 505 - protkey, protkeylen, protkeytype); 506 - if (!rc) 507 - goto out; 508 - 509 - failure: 510 - PKEY_DBF_ERR("%s unable to build protected key from clear", __func__); 511 - 512 - out: 513 - kfree(tmpbuf); 514 - return rc; 515 - } 516 - 517 - /* Helper for pkey_nonccatok2pkey, handles ecc clear key token */ 518 - static int nonccatokecc2pkey(const struct clearkeytoken *t, 519 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 520 - { 521 - u32 keylen; 522 - int rc; 523 - 524 - switch (t->keytype) { 525 - case PKEY_KEYTYPE_ECC_P256: 526 - keylen = 32; 527 - break; 528 - case PKEY_KEYTYPE_ECC_P384: 529 - keylen = 48; 530 - break; 531 - case PKEY_KEYTYPE_ECC_P521: 532 - keylen = 80; 533 - break; 534 - case PKEY_KEYTYPE_ECC_ED25519: 535 - keylen = 32; 536 - break; 537 - case PKEY_KEYTYPE_ECC_ED448: 538 - keylen = 64; 539 - break; 540 - default: 541 - PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", 542 - __func__, t->keytype); 543 - return -EINVAL; 544 - } 545 - 546 - if (t->len != keylen) { 547 - PKEY_DBF_ERR("%s non clear key ecc token: invalid key len %u\n", 548 - __func__, t->len); 549 - return -EINVAL; 550 - } 551 - 552 - /* only one path possible: via PCKMO instruction */ 553 - rc = pkey_clr2protkey(t->keytype, t->clearkey, 554 - protkey, protkeylen, protkeytype); 555 - if (rc) { 556 - PKEY_DBF_ERR("%s unable to build protected key from clear", 557 - __func__); 558 - } 559 - 560 - return rc; 561 - } 562 - 563 - /* 564 - * Transform a non-CCA key token into a protected key 565 - */ 566 - static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, 567 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 568 - { 569 - struct keytoken_header *hdr = (struct keytoken_header *)key; 570 - int rc = -EINVAL; 571 - 572 - switch (hdr->version) { 573 - case TOKVER_PROTECTED_KEY: { 574 - struct protaeskeytoken *t; 575 - 576 - if (keylen != sizeof(struct protaeskeytoken)) 577 - goto out; 578 - t = (struct protaeskeytoken *)key; 579 - rc = pkey_verifyprotkey(t->protkey, t->len, t->keytype); 580 - if (rc) 581 - goto out; 582 - memcpy(protkey, t->protkey, t->len); 583 - *protkeylen = t->len; 584 - *protkeytype = t->keytype; 585 - break; 586 - } 587 - case TOKVER_CLEAR_KEY: { 588 - struct clearkeytoken *t = (struct clearkeytoken *)key; 589 - 590 - if (keylen < sizeof(struct clearkeytoken) || 591 - keylen != sizeof(*t) + t->len) 592 - goto out; 593 - switch (t->keytype) { 594 - case PKEY_KEYTYPE_AES_128: 595 - case PKEY_KEYTYPE_AES_192: 596 - case PKEY_KEYTYPE_AES_256: 597 - rc = nonccatokaes2pkey(t, protkey, 598 - protkeylen, protkeytype); 599 - break; 600 - case PKEY_KEYTYPE_ECC_P256: 601 - case PKEY_KEYTYPE_ECC_P384: 602 - case PKEY_KEYTYPE_ECC_P521: 603 - case PKEY_KEYTYPE_ECC_ED25519: 604 - case PKEY_KEYTYPE_ECC_ED448: 605 - rc = nonccatokecc2pkey(t, protkey, 606 - protkeylen, protkeytype); 607 - break; 608 - default: 609 - PKEY_DBF_ERR("%s unknown/unsupported non cca clear key type %u\n", 610 - __func__, t->keytype); 611 - return -EINVAL; 612 - } 613 - break; 614 - } 615 - case TOKVER_EP11_AES: { 616 - /* check ep11 key for exportable as protected key */ 617 - rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1); 618 - if (rc) 619 - goto out; 620 - rc = pkey_ep11key2pkey(key, keylen, 621 - protkey, protkeylen, protkeytype); 622 - break; 623 - } 624 - case TOKVER_EP11_AES_WITH_HEADER: 625 - /* check ep11 key with header for exportable as protected key */ 626 - rc = ep11_check_aes_key_with_hdr(pkey_dbf_info, 627 - 3, key, keylen, 1); 628 - if (rc) 629 - goto out; 630 - rc = pkey_ep11key2pkey(key, keylen, 631 - protkey, protkeylen, protkeytype); 632 - break; 633 - default: 634 - PKEY_DBF_ERR("%s unknown/unsupported non-CCA token version %d\n", 635 - __func__, hdr->version); 636 - } 637 - 638 - out: 639 - return rc; 640 - } 641 - 642 - /* 643 - * Transform a CCA internal key token into a protected key 644 - */ 645 - static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, 646 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 647 - { 648 - struct keytoken_header *hdr = (struct keytoken_header *)key; 649 - 650 - switch (hdr->version) { 651 - case TOKVER_CCA_AES: 652 - if (keylen != sizeof(struct secaeskeytoken)) 653 - return -EINVAL; 654 - break; 655 - case TOKVER_CCA_VLSC: 656 - if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) 657 - return -EINVAL; 658 - break; 659 - default: 660 - PKEY_DBF_ERR("%s unknown/unsupported CCA internal token version %d\n", 661 - __func__, hdr->version); 662 - return -EINVAL; 663 - } 664 - 665 - return pkey_skey2pkey(key, protkey, protkeylen, protkeytype); 666 - } 667 - 668 - /* 669 - * Transform a key blob (of any type) into a protected key 670 - */ 671 - int pkey_keyblob2pkey(const u8 *key, u32 keylen, 672 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 673 - { 674 - struct keytoken_header *hdr = (struct keytoken_header *)key; 675 - int rc; 676 - 677 - if (keylen < sizeof(struct keytoken_header)) { 678 - PKEY_DBF_ERR("%s invalid keylen %d\n", __func__, keylen); 679 - return -EINVAL; 680 - } 681 - 682 - switch (hdr->type) { 683 - case TOKTYPE_NON_CCA: 684 - rc = pkey_nonccatok2pkey(key, keylen, 685 - protkey, protkeylen, protkeytype); 686 - break; 687 - case TOKTYPE_CCA_INTERNAL: 688 - rc = pkey_ccainttok2pkey(key, keylen, 689 - protkey, protkeylen, protkeytype); 690 - break; 691 - default: 692 - PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n", 693 - __func__, hdr->type); 694 - return -EINVAL; 695 - } 696 - 697 - pr_debug("%s rc=%d\n", __func__, rc); 698 - return rc; 699 - } 700 - EXPORT_SYMBOL(pkey_keyblob2pkey); 701 - 702 - static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns, 703 - enum pkey_key_type ktype, enum pkey_key_size ksize, 704 - u32 kflags, u8 *keybuf, size_t *keybufsize) 705 - { 706 - int i, card, dom, rc; 707 - 708 - /* check for at least one apqn given */ 709 - if (!apqns || !nr_apqns) 710 - return -EINVAL; 711 - 712 - /* check key type and size */ 713 - switch (ktype) { 714 - case PKEY_TYPE_CCA_DATA: 715 - case PKEY_TYPE_CCA_CIPHER: 716 - if (*keybufsize < SECKEYBLOBSIZE) 717 - return -EINVAL; 718 - break; 719 - case PKEY_TYPE_EP11: 720 - if (*keybufsize < MINEP11AESKEYBLOBSIZE) 721 - return -EINVAL; 722 - break; 723 - case PKEY_TYPE_EP11_AES: 724 - if (*keybufsize < (sizeof(struct ep11kblob_header) + 725 - MINEP11AESKEYBLOBSIZE)) 726 - return -EINVAL; 727 - break; 728 - default: 729 - return -EINVAL; 730 - } 731 - switch (ksize) { 732 - case PKEY_SIZE_AES_128: 733 - case PKEY_SIZE_AES_192: 734 - case PKEY_SIZE_AES_256: 735 - break; 736 - default: 737 - return -EINVAL; 738 - } 739 - 740 - /* simple try all apqns from the list */ 741 - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { 742 - card = apqns[i].card; 743 - dom = apqns[i].domain; 744 - if (ktype == PKEY_TYPE_EP11 || 745 - ktype == PKEY_TYPE_EP11_AES) { 746 - rc = ep11_genaeskey(card, dom, ksize, kflags, 747 - keybuf, keybufsize, ktype); 748 - } else if (ktype == PKEY_TYPE_CCA_DATA) { 749 - rc = cca_genseckey(card, dom, ksize, keybuf); 750 - *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); 751 - } else { 752 - /* TOKVER_CCA_VLSC */ 753 - rc = cca_gencipherkey(card, dom, ksize, kflags, 754 - keybuf, keybufsize); 755 - } 756 - if (rc == 0) 757 - break; 758 - } 759 - 760 - return rc; 761 - } 762 - 763 - static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns, 764 - enum pkey_key_type ktype, enum pkey_key_size ksize, 765 - u32 kflags, const u8 *clrkey, 766 - u8 *keybuf, size_t *keybufsize) 767 - { 768 - int i, card, dom, rc; 769 - 770 - /* check for at least one apqn given */ 771 - if (!apqns || !nr_apqns) 772 - return -EINVAL; 773 - 774 - /* check key type and size */ 775 - switch (ktype) { 776 - case PKEY_TYPE_CCA_DATA: 777 - case PKEY_TYPE_CCA_CIPHER: 778 - if (*keybufsize < SECKEYBLOBSIZE) 779 - return -EINVAL; 780 - break; 781 - case PKEY_TYPE_EP11: 782 - if (*keybufsize < MINEP11AESKEYBLOBSIZE) 783 - return -EINVAL; 784 - break; 785 - case PKEY_TYPE_EP11_AES: 786 - if (*keybufsize < (sizeof(struct ep11kblob_header) + 787 - MINEP11AESKEYBLOBSIZE)) 788 - return -EINVAL; 789 - break; 790 - default: 791 - return -EINVAL; 792 - } 793 - switch (ksize) { 794 - case PKEY_SIZE_AES_128: 795 - case PKEY_SIZE_AES_192: 796 - case PKEY_SIZE_AES_256: 797 - break; 798 - default: 799 - return -EINVAL; 800 - } 801 - 802 - zcrypt_wait_api_operational(); 803 - 804 - /* simple try all apqns from the list */ 805 - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { 806 - card = apqns[i].card; 807 - dom = apqns[i].domain; 808 - if (ktype == PKEY_TYPE_EP11 || 809 - ktype == PKEY_TYPE_EP11_AES) { 810 - rc = ep11_clr2keyblob(card, dom, ksize, kflags, 811 - clrkey, keybuf, keybufsize, 812 - ktype); 813 - } else if (ktype == PKEY_TYPE_CCA_DATA) { 814 - rc = cca_clr2seckey(card, dom, ksize, 815 - clrkey, keybuf); 816 - *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); 817 - } else { 818 - /* TOKVER_CCA_VLSC */ 819 - rc = cca_clr2cipherkey(card, dom, ksize, kflags, 820 - clrkey, keybuf, keybufsize); 821 - } 822 - if (rc == 0) 823 - break; 824 - } 825 - 826 - return rc; 827 - } 828 - 829 - static int pkey_verifykey2(const u8 *key, size_t keylen, 830 - u16 *cardnr, u16 *domain, 831 - enum pkey_key_type *ktype, 832 - enum pkey_key_size *ksize, u32 *flags) 833 - { 834 - struct keytoken_header *hdr = (struct keytoken_header *)key; 835 - u32 _nr_apqns, *_apqns = NULL; 836 - int rc; 837 - 838 - if (keylen < sizeof(struct keytoken_header)) 839 - return -EINVAL; 840 - 841 - if (hdr->type == TOKTYPE_CCA_INTERNAL && 842 - hdr->version == TOKVER_CCA_AES) { 843 - struct secaeskeytoken *t = (struct secaeskeytoken *)key; 844 - 845 - rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0); 846 - if (rc) 847 - goto out; 848 - if (ktype) 849 - *ktype = PKEY_TYPE_CCA_DATA; 850 - if (ksize) 851 - *ksize = (enum pkey_key_size)t->bitsize; 852 - 853 - rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, 854 - ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1); 855 - if (rc == 0 && flags) 856 - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 857 - if (rc == -ENODEV) { 858 - rc = cca_findcard2(&_apqns, &_nr_apqns, 859 - *cardnr, *domain, 860 - ZCRYPT_CEX3C, AES_MK_SET, 861 - 0, t->mkvp, 1); 862 - if (rc == 0 && flags) 863 - *flags = PKEY_FLAGS_MATCH_ALT_MKVP; 864 - } 865 - if (rc) 866 - goto out; 867 - 868 - *cardnr = ((struct pkey_apqn *)_apqns)->card; 869 - *domain = ((struct pkey_apqn *)_apqns)->domain; 870 - 871 - } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 872 - hdr->version == TOKVER_CCA_VLSC) { 873 - struct cipherkeytoken *t = (struct cipherkeytoken *)key; 874 - 875 - rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1); 876 - if (rc) 877 - goto out; 878 - if (ktype) 879 - *ktype = PKEY_TYPE_CCA_CIPHER; 880 - if (ksize) { 881 - *ksize = PKEY_SIZE_UNKNOWN; 882 - if (!t->plfver && t->wpllen == 512) 883 - *ksize = PKEY_SIZE_AES_128; 884 - else if (!t->plfver && t->wpllen == 576) 885 - *ksize = PKEY_SIZE_AES_192; 886 - else if (!t->plfver && t->wpllen == 640) 887 - *ksize = PKEY_SIZE_AES_256; 888 - } 889 - 890 - rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, 891 - ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1); 892 - if (rc == 0 && flags) 893 - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 894 - if (rc == -ENODEV) { 895 - rc = cca_findcard2(&_apqns, &_nr_apqns, 896 - *cardnr, *domain, 897 - ZCRYPT_CEX6, AES_MK_SET, 898 - 0, t->mkvp0, 1); 899 - if (rc == 0 && flags) 900 - *flags = PKEY_FLAGS_MATCH_ALT_MKVP; 901 - } 902 - if (rc) 903 - goto out; 904 - 905 - *cardnr = ((struct pkey_apqn *)_apqns)->card; 906 - *domain = ((struct pkey_apqn *)_apqns)->domain; 907 - 908 - } else if (hdr->type == TOKTYPE_NON_CCA && 909 - hdr->version == TOKVER_EP11_AES) { 910 - struct ep11keyblob *kb = (struct ep11keyblob *)key; 911 - int api; 912 - 913 - rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1); 914 - if (rc) 915 - goto out; 916 - if (ktype) 917 - *ktype = PKEY_TYPE_EP11; 918 - if (ksize) 919 - *ksize = kb->head.bitlen; 920 - 921 - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 922 - rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, 923 - ZCRYPT_CEX7, api, 924 - ep11_kb_wkvp(key, keylen)); 925 - if (rc) 926 - goto out; 927 - 928 - if (flags) 929 - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 930 - 931 - *cardnr = ((struct pkey_apqn *)_apqns)->card; 932 - *domain = ((struct pkey_apqn *)_apqns)->domain; 933 - 934 - } else if (hdr->type == TOKTYPE_NON_CCA && 935 - hdr->version == TOKVER_EP11_AES_WITH_HEADER) { 936 - struct ep11kblob_header *kh = (struct ep11kblob_header *)key; 937 - int api; 938 - 939 - rc = ep11_check_aes_key_with_hdr(pkey_dbf_info, 940 - 3, key, keylen, 1); 941 - if (rc) 942 - goto out; 943 - if (ktype) 944 - *ktype = PKEY_TYPE_EP11_AES; 945 - if (ksize) 946 - *ksize = kh->bitlen; 947 - 948 - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 949 - rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, 950 - ZCRYPT_CEX7, api, 951 - ep11_kb_wkvp(key, keylen)); 952 - if (rc) 953 - goto out; 954 - 955 - if (flags) 956 - *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 957 - 958 - *cardnr = ((struct pkey_apqn *)_apqns)->card; 959 - *domain = ((struct pkey_apqn *)_apqns)->domain; 960 - } else { 961 - rc = -EINVAL; 962 - } 963 - 964 - out: 965 - kfree(_apqns); 966 - return rc; 967 - } 968 - 969 - static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, 970 - const u8 *key, size_t keylen, 971 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 972 - { 973 - struct keytoken_header *hdr = (struct keytoken_header *)key; 974 - int i, card, dom, rc; 975 - 976 - /* check for at least one apqn given */ 977 - if (!apqns || !nr_apqns) 978 - return -EINVAL; 979 - 980 - if (keylen < sizeof(struct keytoken_header)) 981 - return -EINVAL; 982 - 983 - if (hdr->type == TOKTYPE_CCA_INTERNAL) { 984 - if (hdr->version == TOKVER_CCA_AES) { 985 - if (keylen != sizeof(struct secaeskeytoken)) 986 - return -EINVAL; 987 - if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0)) 988 - return -EINVAL; 989 - } else if (hdr->version == TOKVER_CCA_VLSC) { 990 - if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) 991 - return -EINVAL; 992 - if (cca_check_secaescipherkey(pkey_dbf_info, 993 - 3, key, 0, 1)) 994 - return -EINVAL; 995 - } else { 996 - PKEY_DBF_ERR("%s unknown CCA internal token version %d\n", 997 - __func__, hdr->version); 998 - return -EINVAL; 999 - } 1000 - } else if (hdr->type == TOKTYPE_NON_CCA) { 1001 - if (hdr->version == TOKVER_EP11_AES) { 1002 - if (ep11_check_aes_key(pkey_dbf_info, 1003 - 3, key, keylen, 1)) 1004 - return -EINVAL; 1005 - } else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) { 1006 - if (ep11_check_aes_key_with_hdr(pkey_dbf_info, 1007 - 3, key, keylen, 1)) 1008 - return -EINVAL; 1009 - } else { 1010 - return pkey_nonccatok2pkey(key, keylen, 1011 - protkey, protkeylen, 1012 - protkeytype); 1013 - } 1014 - } else { 1015 - PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n", 1016 - __func__, hdr->type); 1017 - return -EINVAL; 1018 - } 1019 - 1020 - zcrypt_wait_api_operational(); 1021 - 1022 - /* simple try all apqns from the list */ 1023 - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { 1024 - card = apqns[i].card; 1025 - dom = apqns[i].domain; 1026 - if (hdr->type == TOKTYPE_CCA_INTERNAL && 1027 - hdr->version == TOKVER_CCA_AES) { 1028 - rc = cca_sec2protkey(card, dom, key, 1029 - protkey, protkeylen, protkeytype); 1030 - } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 1031 - hdr->version == TOKVER_CCA_VLSC) { 1032 - rc = cca_cipher2protkey(card, dom, key, 1033 - protkey, protkeylen, 1034 - protkeytype); 1035 - } else { 1036 - rc = ep11_kblob2protkey(card, dom, key, keylen, 1037 - protkey, protkeylen, 1038 - protkeytype); 1039 - } 1040 - if (rc == 0) 1041 - break; 1042 - } 1043 - 1044 - return rc; 1045 - } 1046 - 1047 - static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, 1048 - struct pkey_apqn *apqns, size_t *nr_apqns) 1049 - { 1050 - struct keytoken_header *hdr = (struct keytoken_header *)key; 1051 - u32 _nr_apqns, *_apqns = NULL; 1052 - int rc; 1053 - 1054 - if (keylen < sizeof(struct keytoken_header) || flags == 0) 1055 - return -EINVAL; 1056 - 1057 - zcrypt_wait_api_operational(); 1058 - 1059 - if (hdr->type == TOKTYPE_NON_CCA && 1060 - (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 1061 - hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 1062 - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1063 - struct ep11keyblob *kb = (struct ep11keyblob *) 1064 - (key + sizeof(struct ep11kblob_header)); 1065 - int minhwtype = 0, api = 0; 1066 - 1067 - if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) 1068 - return -EINVAL; 1069 - if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { 1070 - minhwtype = ZCRYPT_CEX7; 1071 - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 1072 - } 1073 - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 1074 - minhwtype, api, kb->wkvp); 1075 - if (rc) 1076 - goto out; 1077 - } else if (hdr->type == TOKTYPE_NON_CCA && 1078 - hdr->version == TOKVER_EP11_AES && 1079 - is_ep11_keyblob(key)) { 1080 - struct ep11keyblob *kb = (struct ep11keyblob *)key; 1081 - int minhwtype = 0, api = 0; 1082 - 1083 - if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) 1084 - return -EINVAL; 1085 - if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { 1086 - minhwtype = ZCRYPT_CEX7; 1087 - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 1088 - } 1089 - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 1090 - minhwtype, api, kb->wkvp); 1091 - if (rc) 1092 - goto out; 1093 - } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { 1094 - u64 cur_mkvp = 0, old_mkvp = 0; 1095 - int minhwtype = ZCRYPT_CEX3C; 1096 - 1097 - if (hdr->version == TOKVER_CCA_AES) { 1098 - struct secaeskeytoken *t = (struct secaeskeytoken *)key; 1099 - 1100 - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 1101 - cur_mkvp = t->mkvp; 1102 - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 1103 - old_mkvp = t->mkvp; 1104 - } else if (hdr->version == TOKVER_CCA_VLSC) { 1105 - struct cipherkeytoken *t = (struct cipherkeytoken *)key; 1106 - 1107 - minhwtype = ZCRYPT_CEX6; 1108 - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 1109 - cur_mkvp = t->mkvp0; 1110 - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 1111 - old_mkvp = t->mkvp0; 1112 - } else { 1113 - /* unknown cca internal token type */ 1114 - return -EINVAL; 1115 - } 1116 - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 1117 - minhwtype, AES_MK_SET, 1118 - cur_mkvp, old_mkvp, 1); 1119 - if (rc) 1120 - goto out; 1121 - } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { 1122 - struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; 1123 - u64 cur_mkvp = 0, old_mkvp = 0; 1124 - 1125 - if (t->secid == 0x20) { 1126 - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 1127 - cur_mkvp = t->mkvp; 1128 - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 1129 - old_mkvp = t->mkvp; 1130 - } else { 1131 - /* unknown cca internal 2 token type */ 1132 - return -EINVAL; 1133 - } 1134 - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 1135 - ZCRYPT_CEX7, APKA_MK_SET, 1136 - cur_mkvp, old_mkvp, 1); 1137 - if (rc) 1138 - goto out; 1139 - } else { 1140 - return -EINVAL; 1141 - } 1142 - 1143 - if (apqns) { 1144 - if (*nr_apqns < _nr_apqns) 1145 - rc = -ENOSPC; 1146 - else 1147 - memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); 1148 - } 1149 - *nr_apqns = _nr_apqns; 1150 - 1151 - out: 1152 - kfree(_apqns); 1153 - return rc; 1154 - } 1155 - 1156 - static int pkey_apqns4keytype(enum pkey_key_type ktype, 1157 - u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 1158 - struct pkey_apqn *apqns, size_t *nr_apqns) 1159 - { 1160 - u32 _nr_apqns, *_apqns = NULL; 1161 - int rc; 1162 - 1163 - zcrypt_wait_api_operational(); 1164 - 1165 - if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { 1166 - u64 cur_mkvp = 0, old_mkvp = 0; 1167 - int minhwtype = ZCRYPT_CEX3C; 1168 - 1169 - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 1170 - cur_mkvp = *((u64 *)cur_mkvp); 1171 - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 1172 - old_mkvp = *((u64 *)alt_mkvp); 1173 - if (ktype == PKEY_TYPE_CCA_CIPHER) 1174 - minhwtype = ZCRYPT_CEX6; 1175 - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 1176 - minhwtype, AES_MK_SET, 1177 - cur_mkvp, old_mkvp, 1); 1178 - if (rc) 1179 - goto out; 1180 - } else if (ktype == PKEY_TYPE_CCA_ECC) { 1181 - u64 cur_mkvp = 0, old_mkvp = 0; 1182 - 1183 - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 1184 - cur_mkvp = *((u64 *)cur_mkvp); 1185 - if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 1186 - old_mkvp = *((u64 *)alt_mkvp); 1187 - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 1188 - ZCRYPT_CEX7, APKA_MK_SET, 1189 - cur_mkvp, old_mkvp, 1); 1190 - if (rc) 1191 - goto out; 1192 - 1193 - } else if (ktype == PKEY_TYPE_EP11 || 1194 - ktype == PKEY_TYPE_EP11_AES || 1195 - ktype == PKEY_TYPE_EP11_ECC) { 1196 - u8 *wkvp = NULL; 1197 - int api; 1198 - 1199 - if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 1200 - wkvp = cur_mkvp; 1201 - api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 1202 - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 1203 - ZCRYPT_CEX7, api, wkvp); 1204 - if (rc) 1205 - goto out; 1206 - 1207 - } else { 1208 - return -EINVAL; 1209 - } 1210 - 1211 - if (apqns) { 1212 - if (*nr_apqns < _nr_apqns) 1213 - rc = -ENOSPC; 1214 - else 1215 - memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); 1216 - } 1217 - *nr_apqns = _nr_apqns; 1218 - 1219 - out: 1220 - kfree(_apqns); 1221 - return rc; 1222 - } 1223 - 1224 - static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, 1225 - const u8 *key, size_t keylen, 1226 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 1227 - { 1228 - struct keytoken_header *hdr = (struct keytoken_header *)key; 1229 - int i, card, dom, rc; 1230 - 1231 - /* check for at least one apqn given */ 1232 - if (!apqns || !nr_apqns) 1233 - return -EINVAL; 1234 - 1235 - if (keylen < sizeof(struct keytoken_header)) 1236 - return -EINVAL; 1237 - 1238 - if (hdr->type == TOKTYPE_NON_CCA && 1239 - hdr->version == TOKVER_EP11_AES_WITH_HEADER && 1240 - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1241 - /* EP11 AES key blob with header */ 1242 - if (ep11_check_aes_key_with_hdr(pkey_dbf_info, 1243 - 3, key, keylen, 1)) 1244 - return -EINVAL; 1245 - } else if (hdr->type == TOKTYPE_NON_CCA && 1246 - hdr->version == TOKVER_EP11_ECC_WITH_HEADER && 1247 - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1248 - /* EP11 ECC key blob with header */ 1249 - if (ep11_check_ecc_key_with_hdr(pkey_dbf_info, 1250 - 3, key, keylen, 1)) 1251 - return -EINVAL; 1252 - } else if (hdr->type == TOKTYPE_NON_CCA && 1253 - hdr->version == TOKVER_EP11_AES && 1254 - is_ep11_keyblob(key)) { 1255 - /* EP11 AES key blob with header in session field */ 1256 - if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1)) 1257 - return -EINVAL; 1258 - } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { 1259 - if (hdr->version == TOKVER_CCA_AES) { 1260 - /* CCA AES data key */ 1261 - if (keylen != sizeof(struct secaeskeytoken)) 1262 - return -EINVAL; 1263 - if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0)) 1264 - return -EINVAL; 1265 - } else if (hdr->version == TOKVER_CCA_VLSC) { 1266 - /* CCA AES cipher key */ 1267 - if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) 1268 - return -EINVAL; 1269 - if (cca_check_secaescipherkey(pkey_dbf_info, 1270 - 3, key, 0, 1)) 1271 - return -EINVAL; 1272 - } else { 1273 - PKEY_DBF_ERR("%s unknown CCA internal token version %d\n", 1274 - __func__, hdr->version); 1275 - return -EINVAL; 1276 - } 1277 - } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { 1278 - /* CCA ECC (private) key */ 1279 - if (keylen < sizeof(struct eccprivkeytoken)) 1280 - return -EINVAL; 1281 - if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1)) 1282 - return -EINVAL; 1283 - } else if (hdr->type == TOKTYPE_NON_CCA) { 1284 - return pkey_nonccatok2pkey(key, keylen, 1285 - protkey, protkeylen, protkeytype); 1286 - } else { 1287 - PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n", 1288 - __func__, hdr->type); 1289 - return -EINVAL; 1290 - } 1291 - 1292 - /* simple try all apqns from the list */ 1293 - for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 1294 - card = apqns[i].card; 1295 - dom = apqns[i].domain; 1296 - if (hdr->type == TOKTYPE_NON_CCA && 1297 - (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 1298 - hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 1299 - is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) 1300 - rc = ep11_kblob2protkey(card, dom, key, hdr->len, 1301 - protkey, protkeylen, 1302 - protkeytype); 1303 - else if (hdr->type == TOKTYPE_NON_CCA && 1304 - hdr->version == TOKVER_EP11_AES && 1305 - is_ep11_keyblob(key)) 1306 - rc = ep11_kblob2protkey(card, dom, key, hdr->len, 1307 - protkey, protkeylen, 1308 - protkeytype); 1309 - else if (hdr->type == TOKTYPE_CCA_INTERNAL && 1310 - hdr->version == TOKVER_CCA_AES) 1311 - rc = cca_sec2protkey(card, dom, key, protkey, 1312 - protkeylen, protkeytype); 1313 - else if (hdr->type == TOKTYPE_CCA_INTERNAL && 1314 - hdr->version == TOKVER_CCA_VLSC) 1315 - rc = cca_cipher2protkey(card, dom, key, protkey, 1316 - protkeylen, protkeytype); 1317 - else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) 1318 - rc = cca_ecc2protkey(card, dom, key, protkey, 1319 - protkeylen, protkeytype); 1320 - else 1321 - return -EINVAL; 1322 - } 1323 - 1324 - return rc; 1325 - } 1326 - 1327 - /* 1328 - * File io functions 72 + * Ioctl functions 1329 73 */ 1330 74 1331 75 static void *_copy_key_from_user(void __user *ukey, size_t keylen) ··· 88 1344 return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn)); 89 1345 } 90 1346 1347 + static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs) 1348 + { 1349 + struct pkey_genseck kgs; 1350 + struct pkey_apqn apqn; 1351 + u32 keybuflen; 1352 + int rc; 1353 + 1354 + if (copy_from_user(&kgs, ugs, sizeof(kgs))) 1355 + return -EFAULT; 1356 + 1357 + apqn.card = kgs.cardnr; 1358 + apqn.domain = kgs.domain; 1359 + keybuflen = sizeof(kgs.seckey.seckey); 1360 + rc = pkey_handler_gen_key(&apqn, 1, 1361 + kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, 1362 + kgs.seckey.seckey, &keybuflen, NULL); 1363 + pr_debug("gen_key()=%d\n", rc); 1364 + if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs))) 1365 + rc = -EFAULT; 1366 + memzero_explicit(&kgs, sizeof(kgs)); 1367 + 1368 + return rc; 1369 + } 1370 + 1371 + static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs) 1372 + { 1373 + struct pkey_clr2seck kcs; 1374 + struct pkey_apqn apqn; 1375 + u32 keybuflen; 1376 + int rc; 1377 + 1378 + if (copy_from_user(&kcs, ucs, sizeof(kcs))) 1379 + return -EFAULT; 1380 + 1381 + apqn.card = kcs.cardnr; 1382 + apqn.domain = kcs.domain; 1383 + keybuflen = sizeof(kcs.seckey.seckey); 1384 + rc = pkey_handler_clr_to_key(&apqn, 1, 1385 + kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, 1386 + kcs.clrkey.clrkey, 1387 + pkey_keytype_aes_to_size(kcs.keytype), 1388 + kcs.seckey.seckey, &keybuflen, NULL); 1389 + pr_debug("clr_to_key()=%d\n", rc); 1390 + if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs))) 1391 + rc = -EFAULT; 1392 + memzero_explicit(&kcs, sizeof(kcs)); 1393 + 1394 + return rc; 1395 + } 1396 + 1397 + static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp) 1398 + { 1399 + struct pkey_sec2protk ksp; 1400 + struct pkey_apqn apqn; 1401 + int rc; 1402 + 1403 + if (copy_from_user(&ksp, usp, sizeof(ksp))) 1404 + return -EFAULT; 1405 + 1406 + apqn.card = ksp.cardnr; 1407 + apqn.domain = ksp.domain; 1408 + ksp.protkey.len = sizeof(ksp.protkey.protkey); 1409 + rc = pkey_handler_key_to_protkey(&apqn, 1, 1410 + ksp.seckey.seckey, 1411 + sizeof(ksp.seckey.seckey), 1412 + ksp.protkey.protkey, 1413 + &ksp.protkey.len, &ksp.protkey.type); 1414 + pr_debug("key_to_protkey()=%d\n", rc); 1415 + if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) 1416 + rc = -EFAULT; 1417 + memzero_explicit(&ksp, sizeof(ksp)); 1418 + 1419 + return rc; 1420 + } 1421 + 1422 + static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp) 1423 + { 1424 + struct pkey_clr2protk kcp; 1425 + struct clearkeytoken *t; 1426 + u32 keylen; 1427 + u8 *tmpbuf; 1428 + int rc; 1429 + 1430 + if (copy_from_user(&kcp, ucp, sizeof(kcp))) 1431 + return -EFAULT; 1432 + 1433 + /* build a 'clear key token' from the clear key value */ 1434 + keylen = pkey_keytype_aes_to_size(kcp.keytype); 1435 + if (!keylen) { 1436 + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", 1437 + __func__, kcp.keytype); 1438 + memzero_explicit(&kcp, sizeof(kcp)); 1439 + return -EINVAL; 1440 + } 1441 + tmpbuf = kzalloc(sizeof(*t) + keylen, GFP_KERNEL); 1442 + if (!tmpbuf) { 1443 + memzero_explicit(&kcp, sizeof(kcp)); 1444 + return -ENOMEM; 1445 + } 1446 + t = (struct clearkeytoken *)tmpbuf; 1447 + t->type = TOKTYPE_NON_CCA; 1448 + t->version = TOKVER_CLEAR_KEY; 1449 + t->keytype = (keylen - 8) >> 3; 1450 + t->len = keylen; 1451 + memcpy(t->clearkey, kcp.clrkey.clrkey, keylen); 1452 + kcp.protkey.len = sizeof(kcp.protkey.protkey); 1453 + 1454 + rc = key2protkey(NULL, 0, 1455 + tmpbuf, sizeof(*t) + keylen, 1456 + kcp.protkey.protkey, 1457 + &kcp.protkey.len, &kcp.protkey.type); 1458 + pr_debug("key2protkey()=%d\n", rc); 1459 + 1460 + kfree_sensitive(tmpbuf); 1461 + 1462 + if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp))) 1463 + rc = -EFAULT; 1464 + memzero_explicit(&kcp, sizeof(kcp)); 1465 + 1466 + return rc; 1467 + } 1468 + 1469 + static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc) 1470 + { 1471 + struct pkey_findcard kfc; 1472 + struct pkey_apqn *apqns; 1473 + size_t nr_apqns; 1474 + int rc; 1475 + 1476 + if (copy_from_user(&kfc, ufc, sizeof(kfc))) 1477 + return -EFAULT; 1478 + 1479 + nr_apqns = MAXAPQNSINLIST; 1480 + apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), GFP_KERNEL); 1481 + if (!apqns) 1482 + return -ENOMEM; 1483 + 1484 + rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, 1485 + sizeof(kfc.seckey.seckey), 1486 + PKEY_FLAGS_MATCH_CUR_MKVP, 1487 + apqns, &nr_apqns); 1488 + if (rc == -ENODEV) 1489 + rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, 1490 + sizeof(kfc.seckey.seckey), 1491 + PKEY_FLAGS_MATCH_ALT_MKVP, 1492 + apqns, &nr_apqns); 1493 + pr_debug("apqns_for_key()=%d\n", rc); 1494 + if (rc) { 1495 + kfree(apqns); 1496 + return rc; 1497 + } 1498 + kfc.cardnr = apqns[0].card; 1499 + kfc.domain = apqns[0].domain; 1500 + kfree(apqns); 1501 + if (copy_to_user(ufc, &kfc, sizeof(kfc))) 1502 + return -EFAULT; 1503 + 1504 + return 0; 1505 + } 1506 + 1507 + static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp) 1508 + { 1509 + struct pkey_skey2pkey ksp; 1510 + int rc; 1511 + 1512 + if (copy_from_user(&ksp, usp, sizeof(ksp))) 1513 + return -EFAULT; 1514 + 1515 + ksp.protkey.len = sizeof(ksp.protkey.protkey); 1516 + rc = pkey_handler_key_to_protkey(NULL, 0, 1517 + ksp.seckey.seckey, 1518 + sizeof(ksp.seckey.seckey), 1519 + ksp.protkey.protkey, 1520 + &ksp.protkey.len, 1521 + &ksp.protkey.type); 1522 + pr_debug("key_to_protkey()=%d\n", rc); 1523 + if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) 1524 + rc = -EFAULT; 1525 + memzero_explicit(&ksp, sizeof(ksp)); 1526 + 1527 + return rc; 1528 + } 1529 + 1530 + static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk) 1531 + { 1532 + u32 keytype, keybitsize, flags; 1533 + struct pkey_verifykey kvk; 1534 + int rc; 1535 + 1536 + if (copy_from_user(&kvk, uvk, sizeof(kvk))) 1537 + return -EFAULT; 1538 + 1539 + kvk.cardnr = 0xFFFF; 1540 + kvk.domain = 0xFFFF; 1541 + rc = pkey_handler_verify_key(kvk.seckey.seckey, 1542 + sizeof(kvk.seckey.seckey), 1543 + &kvk.cardnr, &kvk.domain, 1544 + &keytype, &keybitsize, &flags); 1545 + pr_debug("verify_key()=%d\n", rc); 1546 + if (!rc && keytype != PKEY_TYPE_CCA_DATA) 1547 + rc = -EINVAL; 1548 + kvk.attributes = PKEY_VERIFY_ATTR_AES; 1549 + kvk.keysize = (u16)keybitsize; 1550 + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 1551 + kvk.attributes |= PKEY_VERIFY_ATTR_OLD_MKVP; 1552 + if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk))) 1553 + rc = -EFAULT; 1554 + memzero_explicit(&kvk, sizeof(kvk)); 1555 + 1556 + return rc; 1557 + } 1558 + 1559 + static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp) 1560 + { 1561 + struct pkey_genprotk kgp; 1562 + int rc; 1563 + 1564 + if (copy_from_user(&kgp, ugp, sizeof(kgp))) 1565 + return -EFAULT; 1566 + 1567 + kgp.protkey.len = sizeof(kgp.protkey.protkey); 1568 + rc = pkey_handler_gen_key(NULL, 0, kgp.keytype, 1569 + PKEY_TYPE_PROTKEY, 0, 0, 1570 + kgp.protkey.protkey, &kgp.protkey.len, 1571 + &kgp.protkey.type); 1572 + pr_debug("gen_key()=%d\n", rc); 1573 + if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp))) 1574 + rc = -EFAULT; 1575 + memzero_explicit(&kgp, sizeof(kgp)); 1576 + 1577 + return rc; 1578 + } 1579 + 1580 + static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp) 1581 + { 1582 + struct pkey_verifyprotk kvp; 1583 + struct protaeskeytoken *t; 1584 + u32 keytype; 1585 + u8 *tmpbuf; 1586 + int rc; 1587 + 1588 + if (copy_from_user(&kvp, uvp, sizeof(kvp))) 1589 + return -EFAULT; 1590 + 1591 + keytype = pkey_aes_bitsize_to_keytype(8 * kvp.protkey.len); 1592 + if (!keytype) { 1593 + PKEY_DBF_ERR("%s unknown/unsupported protkey length %u\n", 1594 + __func__, kvp.protkey.len); 1595 + memzero_explicit(&kvp, sizeof(kvp)); 1596 + return -EINVAL; 1597 + } 1598 + 1599 + /* build a 'protected key token' from the raw protected key */ 1600 + tmpbuf = kzalloc(sizeof(*t), GFP_KERNEL); 1601 + if (!tmpbuf) { 1602 + memzero_explicit(&kvp, sizeof(kvp)); 1603 + return -ENOMEM; 1604 + } 1605 + t = (struct protaeskeytoken *)tmpbuf; 1606 + t->type = TOKTYPE_NON_CCA; 1607 + t->version = TOKVER_PROTECTED_KEY; 1608 + t->keytype = keytype; 1609 + t->len = kvp.protkey.len; 1610 + memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len); 1611 + 1612 + rc = pkey_handler_verify_key(tmpbuf, sizeof(*t), 1613 + NULL, NULL, NULL, NULL, NULL); 1614 + pr_debug("verify_key()=%d\n", rc); 1615 + 1616 + kfree_sensitive(tmpbuf); 1617 + memzero_explicit(&kvp, sizeof(kvp)); 1618 + 1619 + return rc; 1620 + } 1621 + 1622 + static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp) 1623 + { 1624 + struct pkey_kblob2pkey ktp; 1625 + u8 *kkey; 1626 + int rc; 1627 + 1628 + if (copy_from_user(&ktp, utp, sizeof(ktp))) 1629 + return -EFAULT; 1630 + kkey = _copy_key_from_user(ktp.key, ktp.keylen); 1631 + if (IS_ERR(kkey)) 1632 + return PTR_ERR(kkey); 1633 + ktp.protkey.len = sizeof(ktp.protkey.protkey); 1634 + rc = key2protkey(NULL, 0, kkey, ktp.keylen, 1635 + ktp.protkey.protkey, &ktp.protkey.len, 1636 + &ktp.protkey.type); 1637 + pr_debug("key2protkey()=%d\n", rc); 1638 + kfree_sensitive(kkey); 1639 + if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) 1640 + rc = -EFAULT; 1641 + memzero_explicit(&ktp, sizeof(ktp)); 1642 + 1643 + return rc; 1644 + } 1645 + 1646 + static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs) 1647 + { 1648 + u32 klen = KEYBLOBBUFSIZE; 1649 + struct pkey_genseck2 kgs; 1650 + struct pkey_apqn *apqns; 1651 + u8 *kkey; 1652 + int rc; 1653 + u32 u; 1654 + 1655 + if (copy_from_user(&kgs, ugs, sizeof(kgs))) 1656 + return -EFAULT; 1657 + u = pkey_aes_bitsize_to_keytype(kgs.size); 1658 + if (!u) { 1659 + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", 1660 + __func__, kgs.size); 1661 + return -EINVAL; 1662 + } 1663 + apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries); 1664 + if (IS_ERR(apqns)) 1665 + return PTR_ERR(apqns); 1666 + kkey = kzalloc(klen, GFP_KERNEL); 1667 + if (!kkey) { 1668 + kfree(apqns); 1669 + return -ENOMEM; 1670 + } 1671 + rc = pkey_handler_gen_key(apqns, kgs.apqn_entries, 1672 + u, kgs.type, kgs.size, kgs.keygenflags, 1673 + kkey, &klen, NULL); 1674 + pr_debug("gen_key()=%d\n", rc); 1675 + kfree(apqns); 1676 + if (rc) { 1677 + kfree_sensitive(kkey); 1678 + return rc; 1679 + } 1680 + if (kgs.key) { 1681 + if (kgs.keylen < klen) { 1682 + kfree_sensitive(kkey); 1683 + return -EINVAL; 1684 + } 1685 + if (copy_to_user(kgs.key, kkey, klen)) { 1686 + kfree_sensitive(kkey); 1687 + return -EFAULT; 1688 + } 1689 + } 1690 + kgs.keylen = klen; 1691 + if (copy_to_user(ugs, &kgs, sizeof(kgs))) 1692 + rc = -EFAULT; 1693 + kfree_sensitive(kkey); 1694 + 1695 + return rc; 1696 + } 1697 + 1698 + static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs) 1699 + { 1700 + u32 klen = KEYBLOBBUFSIZE; 1701 + struct pkey_clr2seck2 kcs; 1702 + struct pkey_apqn *apqns; 1703 + u8 *kkey; 1704 + int rc; 1705 + u32 u; 1706 + 1707 + if (copy_from_user(&kcs, ucs, sizeof(kcs))) 1708 + return -EFAULT; 1709 + u = pkey_aes_bitsize_to_keytype(kcs.size); 1710 + if (!u) { 1711 + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", 1712 + __func__, kcs.size); 1713 + memzero_explicit(&kcs, sizeof(kcs)); 1714 + return -EINVAL; 1715 + } 1716 + apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries); 1717 + if (IS_ERR(apqns)) { 1718 + memzero_explicit(&kcs, sizeof(kcs)); 1719 + return PTR_ERR(apqns); 1720 + } 1721 + kkey = kzalloc(klen, GFP_KERNEL); 1722 + if (!kkey) { 1723 + kfree(apqns); 1724 + memzero_explicit(&kcs, sizeof(kcs)); 1725 + return -ENOMEM; 1726 + } 1727 + rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries, 1728 + u, kcs.type, kcs.size, kcs.keygenflags, 1729 + kcs.clrkey.clrkey, kcs.size / 8, 1730 + kkey, &klen, NULL); 1731 + pr_debug("clr_to_key()=%d\n", rc); 1732 + kfree(apqns); 1733 + if (rc) { 1734 + kfree_sensitive(kkey); 1735 + memzero_explicit(&kcs, sizeof(kcs)); 1736 + return rc; 1737 + } 1738 + if (kcs.key) { 1739 + if (kcs.keylen < klen) { 1740 + kfree_sensitive(kkey); 1741 + memzero_explicit(&kcs, sizeof(kcs)); 1742 + return -EINVAL; 1743 + } 1744 + if (copy_to_user(kcs.key, kkey, klen)) { 1745 + kfree_sensitive(kkey); 1746 + memzero_explicit(&kcs, sizeof(kcs)); 1747 + return -EFAULT; 1748 + } 1749 + } 1750 + kcs.keylen = klen; 1751 + if (copy_to_user(ucs, &kcs, sizeof(kcs))) 1752 + rc = -EFAULT; 1753 + memzero_explicit(&kcs, sizeof(kcs)); 1754 + kfree_sensitive(kkey); 1755 + 1756 + return rc; 1757 + } 1758 + 1759 + static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk) 1760 + { 1761 + struct pkey_verifykey2 kvk; 1762 + u8 *kkey; 1763 + int rc; 1764 + 1765 + if (copy_from_user(&kvk, uvk, sizeof(kvk))) 1766 + return -EFAULT; 1767 + kkey = _copy_key_from_user(kvk.key, kvk.keylen); 1768 + if (IS_ERR(kkey)) 1769 + return PTR_ERR(kkey); 1770 + 1771 + rc = pkey_handler_verify_key(kkey, kvk.keylen, 1772 + &kvk.cardnr, &kvk.domain, 1773 + &kvk.type, &kvk.size, &kvk.flags); 1774 + pr_debug("verify_key()=%d\n", rc); 1775 + 1776 + kfree_sensitive(kkey); 1777 + if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk))) 1778 + return -EFAULT; 1779 + 1780 + return rc; 1781 + } 1782 + 1783 + static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp) 1784 + { 1785 + struct pkey_apqn *apqns = NULL; 1786 + struct pkey_kblob2pkey2 ktp; 1787 + u8 *kkey; 1788 + int rc; 1789 + 1790 + if (copy_from_user(&ktp, utp, sizeof(ktp))) 1791 + return -EFAULT; 1792 + apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); 1793 + if (IS_ERR(apqns)) 1794 + return PTR_ERR(apqns); 1795 + kkey = _copy_key_from_user(ktp.key, ktp.keylen); 1796 + if (IS_ERR(kkey)) { 1797 + kfree(apqns); 1798 + return PTR_ERR(kkey); 1799 + } 1800 + ktp.protkey.len = sizeof(ktp.protkey.protkey); 1801 + rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, 1802 + ktp.protkey.protkey, &ktp.protkey.len, 1803 + &ktp.protkey.type); 1804 + pr_debug("key2protkey()=%d\n", rc); 1805 + kfree(apqns); 1806 + kfree_sensitive(kkey); 1807 + if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) 1808 + rc = -EFAULT; 1809 + memzero_explicit(&ktp, sizeof(ktp)); 1810 + 1811 + return rc; 1812 + } 1813 + 1814 + static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak) 1815 + { 1816 + struct pkey_apqn *apqns = NULL; 1817 + struct pkey_apqns4key kak; 1818 + size_t nr_apqns, len; 1819 + u8 *kkey; 1820 + int rc; 1821 + 1822 + if (copy_from_user(&kak, uak, sizeof(kak))) 1823 + return -EFAULT; 1824 + nr_apqns = kak.apqn_entries; 1825 + if (nr_apqns) { 1826 + apqns = kmalloc_array(nr_apqns, 1827 + sizeof(struct pkey_apqn), 1828 + GFP_KERNEL); 1829 + if (!apqns) 1830 + return -ENOMEM; 1831 + } 1832 + kkey = _copy_key_from_user(kak.key, kak.keylen); 1833 + if (IS_ERR(kkey)) { 1834 + kfree(apqns); 1835 + return PTR_ERR(kkey); 1836 + } 1837 + rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags, 1838 + apqns, &nr_apqns); 1839 + pr_debug("apqns_for_key()=%d\n", rc); 1840 + kfree_sensitive(kkey); 1841 + if (rc && rc != -ENOSPC) { 1842 + kfree(apqns); 1843 + return rc; 1844 + } 1845 + if (!rc && kak.apqns) { 1846 + if (nr_apqns > kak.apqn_entries) { 1847 + kfree(apqns); 1848 + return -EINVAL; 1849 + } 1850 + len = nr_apqns * sizeof(struct pkey_apqn); 1851 + if (len) { 1852 + if (copy_to_user(kak.apqns, apqns, len)) { 1853 + kfree(apqns); 1854 + return -EFAULT; 1855 + } 1856 + } 1857 + } 1858 + kak.apqn_entries = nr_apqns; 1859 + if (copy_to_user(uak, &kak, sizeof(kak))) 1860 + rc = -EFAULT; 1861 + kfree(apqns); 1862 + 1863 + return rc; 1864 + } 1865 + 1866 + static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat) 1867 + { 1868 + struct pkey_apqn *apqns = NULL; 1869 + struct pkey_apqns4keytype kat; 1870 + size_t nr_apqns, len; 1871 + int rc; 1872 + 1873 + if (copy_from_user(&kat, uat, sizeof(kat))) 1874 + return -EFAULT; 1875 + nr_apqns = kat.apqn_entries; 1876 + if (nr_apqns) { 1877 + apqns = kmalloc_array(nr_apqns, 1878 + sizeof(struct pkey_apqn), 1879 + GFP_KERNEL); 1880 + if (!apqns) 1881 + return -ENOMEM; 1882 + } 1883 + rc = pkey_handler_apqns_for_keytype(kat.type, 1884 + kat.cur_mkvp, kat.alt_mkvp, 1885 + kat.flags, apqns, &nr_apqns); 1886 + pr_debug("apqns_for_keytype()=%d\n", rc); 1887 + if (rc && rc != -ENOSPC) { 1888 + kfree(apqns); 1889 + return rc; 1890 + } 1891 + if (!rc && kat.apqns) { 1892 + if (nr_apqns > kat.apqn_entries) { 1893 + kfree(apqns); 1894 + return -EINVAL; 1895 + } 1896 + len = nr_apqns * sizeof(struct pkey_apqn); 1897 + if (len) { 1898 + if (copy_to_user(kat.apqns, apqns, len)) { 1899 + kfree(apqns); 1900 + return -EFAULT; 1901 + } 1902 + } 1903 + } 1904 + kat.apqn_entries = nr_apqns; 1905 + if (copy_to_user(uat, &kat, sizeof(kat))) 1906 + rc = -EFAULT; 1907 + kfree(apqns); 1908 + 1909 + return rc; 1910 + } 1911 + 1912 + static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp) 1913 + { 1914 + u32 protkeylen = PROTKEYBLOBBUFSIZE; 1915 + struct pkey_apqn *apqns = NULL; 1916 + struct pkey_kblob2pkey3 ktp; 1917 + u8 *kkey, *protkey; 1918 + int rc; 1919 + 1920 + if (copy_from_user(&ktp, utp, sizeof(ktp))) 1921 + return -EFAULT; 1922 + apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); 1923 + if (IS_ERR(apqns)) 1924 + return PTR_ERR(apqns); 1925 + kkey = _copy_key_from_user(ktp.key, ktp.keylen); 1926 + if (IS_ERR(kkey)) { 1927 + kfree(apqns); 1928 + return PTR_ERR(kkey); 1929 + } 1930 + protkey = kmalloc(protkeylen, GFP_KERNEL); 1931 + if (!protkey) { 1932 + kfree(apqns); 1933 + kfree_sensitive(kkey); 1934 + return -ENOMEM; 1935 + } 1936 + rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, 1937 + protkey, &protkeylen, &ktp.pkeytype); 1938 + pr_debug("key2protkey()=%d\n", rc); 1939 + kfree(apqns); 1940 + kfree_sensitive(kkey); 1941 + if (rc) { 1942 + kfree_sensitive(protkey); 1943 + return rc; 1944 + } 1945 + if (ktp.pkey && ktp.pkeylen) { 1946 + if (protkeylen > ktp.pkeylen) { 1947 + kfree_sensitive(protkey); 1948 + return -EINVAL; 1949 + } 1950 + if (copy_to_user(ktp.pkey, protkey, protkeylen)) { 1951 + kfree_sensitive(protkey); 1952 + return -EFAULT; 1953 + } 1954 + } 1955 + kfree_sensitive(protkey); 1956 + ktp.pkeylen = protkeylen; 1957 + if (copy_to_user(utp, &ktp, sizeof(ktp))) 1958 + return -EFAULT; 1959 + 1960 + return 0; 1961 + } 1962 + 91 1963 static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, 92 1964 unsigned long arg) 93 1965 { 94 1966 int rc; 95 1967 96 1968 switch (cmd) { 97 - case PKEY_GENSECK: { 98 - struct pkey_genseck __user *ugs = (void __user *)arg; 99 - struct pkey_genseck kgs; 100 - 101 - if (copy_from_user(&kgs, ugs, sizeof(kgs))) 102 - return -EFAULT; 103 - rc = cca_genseckey(kgs.cardnr, kgs.domain, 104 - kgs.keytype, kgs.seckey.seckey); 105 - pr_debug("%s cca_genseckey()=%d\n", __func__, rc); 106 - if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs))) 107 - rc = -EFAULT; 108 - memzero_explicit(&kgs, sizeof(kgs)); 1969 + case PKEY_GENSECK: 1970 + rc = pkey_ioctl_genseck((struct pkey_genseck __user *)arg); 109 1971 break; 110 - } 111 - case PKEY_CLR2SECK: { 112 - struct pkey_clr2seck __user *ucs = (void __user *)arg; 113 - struct pkey_clr2seck kcs; 114 - 115 - if (copy_from_user(&kcs, ucs, sizeof(kcs))) 116 - return -EFAULT; 117 - rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype, 118 - kcs.clrkey.clrkey, kcs.seckey.seckey); 119 - pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc); 120 - if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs))) 121 - rc = -EFAULT; 122 - memzero_explicit(&kcs, sizeof(kcs)); 1972 + case PKEY_CLR2SECK: 1973 + rc = pkey_ioctl_clr2seck((struct pkey_clr2seck __user *)arg); 123 1974 break; 124 - } 125 - case PKEY_SEC2PROTK: { 126 - struct pkey_sec2protk __user *usp = (void __user *)arg; 127 - struct pkey_sec2protk ksp; 128 - 129 - if (copy_from_user(&ksp, usp, sizeof(ksp))) 130 - return -EFAULT; 131 - ksp.protkey.len = sizeof(ksp.protkey.protkey); 132 - rc = cca_sec2protkey(ksp.cardnr, ksp.domain, 133 - ksp.seckey.seckey, ksp.protkey.protkey, 134 - &ksp.protkey.len, &ksp.protkey.type); 135 - pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc); 136 - if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) 137 - rc = -EFAULT; 138 - memzero_explicit(&ksp, sizeof(ksp)); 1975 + case PKEY_SEC2PROTK: 1976 + rc = pkey_ioctl_sec2protk((struct pkey_sec2protk __user *)arg); 139 1977 break; 140 - } 141 - case PKEY_CLR2PROTK: { 142 - struct pkey_clr2protk __user *ucp = (void __user *)arg; 143 - struct pkey_clr2protk kcp; 144 - 145 - if (copy_from_user(&kcp, ucp, sizeof(kcp))) 146 - return -EFAULT; 147 - kcp.protkey.len = sizeof(kcp.protkey.protkey); 148 - rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey, 149 - kcp.protkey.protkey, 150 - &kcp.protkey.len, &kcp.protkey.type); 151 - pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc); 152 - if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp))) 153 - rc = -EFAULT; 154 - memzero_explicit(&kcp, sizeof(kcp)); 1978 + case PKEY_CLR2PROTK: 1979 + rc = pkey_ioctl_clr2protk((struct pkey_clr2protk __user *)arg); 155 1980 break; 156 - } 157 - case PKEY_FINDCARD: { 158 - struct pkey_findcard __user *ufc = (void __user *)arg; 159 - struct pkey_findcard kfc; 160 - 161 - if (copy_from_user(&kfc, ufc, sizeof(kfc))) 162 - return -EFAULT; 163 - rc = cca_findcard(kfc.seckey.seckey, 164 - &kfc.cardnr, &kfc.domain, 1); 165 - pr_debug("%s cca_findcard()=%d\n", __func__, rc); 166 - if (rc < 0) 167 - break; 168 - if (copy_to_user(ufc, &kfc, sizeof(kfc))) 169 - return -EFAULT; 1981 + case PKEY_FINDCARD: 1982 + rc = pkey_ioctl_findcard((struct pkey_findcard __user *)arg); 170 1983 break; 171 - } 172 - case PKEY_SKEY2PKEY: { 173 - struct pkey_skey2pkey __user *usp = (void __user *)arg; 174 - struct pkey_skey2pkey ksp; 175 - 176 - if (copy_from_user(&ksp, usp, sizeof(ksp))) 177 - return -EFAULT; 178 - ksp.protkey.len = sizeof(ksp.protkey.protkey); 179 - rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey, 180 - &ksp.protkey.len, &ksp.protkey.type); 181 - pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc); 182 - if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) 183 - rc = -EFAULT; 184 - memzero_explicit(&ksp, sizeof(ksp)); 1984 + case PKEY_SKEY2PKEY: 1985 + rc = pkey_ioctl_skey2pkey((struct pkey_skey2pkey __user *)arg); 185 1986 break; 186 - } 187 - case PKEY_VERIFYKEY: { 188 - struct pkey_verifykey __user *uvk = (void __user *)arg; 189 - struct pkey_verifykey kvk; 190 - 191 - if (copy_from_user(&kvk, uvk, sizeof(kvk))) 192 - return -EFAULT; 193 - rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain, 194 - &kvk.keysize, &kvk.attributes); 195 - pr_debug("%s pkey_verifykey()=%d\n", __func__, rc); 196 - if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk))) 197 - rc = -EFAULT; 198 - memzero_explicit(&kvk, sizeof(kvk)); 1987 + case PKEY_VERIFYKEY: 1988 + rc = pkey_ioctl_verifykey((struct pkey_verifykey __user *)arg); 199 1989 break; 200 - } 201 - case PKEY_GENPROTK: { 202 - struct pkey_genprotk __user *ugp = (void __user *)arg; 203 - struct pkey_genprotk kgp; 204 - 205 - if (copy_from_user(&kgp, ugp, sizeof(kgp))) 206 - return -EFAULT; 207 - kgp.protkey.len = sizeof(kgp.protkey.protkey); 208 - rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey, 209 - &kgp.protkey.len, &kgp.protkey.type); 210 - pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc); 211 - if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp))) 212 - rc = -EFAULT; 213 - memzero_explicit(&kgp, sizeof(kgp)); 1990 + case PKEY_GENPROTK: 1991 + rc = pkey_ioctl_genprotk((struct pkey_genprotk __user *)arg); 214 1992 break; 215 - } 216 - case PKEY_VERIFYPROTK: { 217 - struct pkey_verifyprotk __user *uvp = (void __user *)arg; 218 - struct pkey_verifyprotk kvp; 219 - 220 - if (copy_from_user(&kvp, uvp, sizeof(kvp))) 221 - return -EFAULT; 222 - rc = pkey_verifyprotkey(kvp.protkey.protkey, 223 - kvp.protkey.len, kvp.protkey.type); 224 - pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc); 225 - memzero_explicit(&kvp, sizeof(kvp)); 1993 + case PKEY_VERIFYPROTK: 1994 + rc = pkey_ioctl_verifyprotk((struct pkey_verifyprotk __user *)arg); 226 1995 break; 227 - } 228 - case PKEY_KBLOB2PROTK: { 229 - struct pkey_kblob2pkey __user *utp = (void __user *)arg; 230 - struct pkey_kblob2pkey ktp; 231 - u8 *kkey; 232 - 233 - if (copy_from_user(&ktp, utp, sizeof(ktp))) 234 - return -EFAULT; 235 - kkey = _copy_key_from_user(ktp.key, ktp.keylen); 236 - if (IS_ERR(kkey)) 237 - return PTR_ERR(kkey); 238 - ktp.protkey.len = sizeof(ktp.protkey.protkey); 239 - rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey, 240 - &ktp.protkey.len, &ktp.protkey.type); 241 - pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc); 242 - kfree_sensitive(kkey); 243 - if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) 244 - rc = -EFAULT; 245 - memzero_explicit(&ktp, sizeof(ktp)); 1996 + case PKEY_KBLOB2PROTK: 1997 + rc = pkey_ioctl_kblob2protk((struct pkey_kblob2pkey __user *)arg); 246 1998 break; 247 - } 248 - case PKEY_GENSECK2: { 249 - struct pkey_genseck2 __user *ugs = (void __user *)arg; 250 - size_t klen = KEYBLOBBUFSIZE; 251 - struct pkey_genseck2 kgs; 252 - struct pkey_apqn *apqns; 253 - u8 *kkey; 254 - 255 - if (copy_from_user(&kgs, ugs, sizeof(kgs))) 256 - return -EFAULT; 257 - apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries); 258 - if (IS_ERR(apqns)) 259 - return PTR_ERR(apqns); 260 - kkey = kzalloc(klen, GFP_KERNEL); 261 - if (!kkey) { 262 - kfree(apqns); 263 - return -ENOMEM; 264 - } 265 - rc = pkey_genseckey2(apqns, kgs.apqn_entries, 266 - kgs.type, kgs.size, kgs.keygenflags, 267 - kkey, &klen); 268 - pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc); 269 - kfree(apqns); 270 - if (rc) { 271 - kfree_sensitive(kkey); 272 - break; 273 - } 274 - if (kgs.key) { 275 - if (kgs.keylen < klen) { 276 - kfree_sensitive(kkey); 277 - return -EINVAL; 278 - } 279 - if (copy_to_user(kgs.key, kkey, klen)) { 280 - kfree_sensitive(kkey); 281 - return -EFAULT; 282 - } 283 - } 284 - kgs.keylen = klen; 285 - if (copy_to_user(ugs, &kgs, sizeof(kgs))) 286 - rc = -EFAULT; 287 - kfree_sensitive(kkey); 1999 + case PKEY_GENSECK2: 2000 + rc = pkey_ioctl_genseck2((struct pkey_genseck2 __user *)arg); 288 2001 break; 289 - } 290 - case PKEY_CLR2SECK2: { 291 - struct pkey_clr2seck2 __user *ucs = (void __user *)arg; 292 - size_t klen = KEYBLOBBUFSIZE; 293 - struct pkey_clr2seck2 kcs; 294 - struct pkey_apqn *apqns; 295 - u8 *kkey; 296 - 297 - if (copy_from_user(&kcs, ucs, sizeof(kcs))) 298 - return -EFAULT; 299 - apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries); 300 - if (IS_ERR(apqns)) { 301 - memzero_explicit(&kcs, sizeof(kcs)); 302 - return PTR_ERR(apqns); 303 - } 304 - kkey = kzalloc(klen, GFP_KERNEL); 305 - if (!kkey) { 306 - kfree(apqns); 307 - memzero_explicit(&kcs, sizeof(kcs)); 308 - return -ENOMEM; 309 - } 310 - rc = pkey_clr2seckey2(apqns, kcs.apqn_entries, 311 - kcs.type, kcs.size, kcs.keygenflags, 312 - kcs.clrkey.clrkey, kkey, &klen); 313 - pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc); 314 - kfree(apqns); 315 - if (rc) { 316 - kfree_sensitive(kkey); 317 - memzero_explicit(&kcs, sizeof(kcs)); 318 - break; 319 - } 320 - if (kcs.key) { 321 - if (kcs.keylen < klen) { 322 - kfree_sensitive(kkey); 323 - memzero_explicit(&kcs, sizeof(kcs)); 324 - return -EINVAL; 325 - } 326 - if (copy_to_user(kcs.key, kkey, klen)) { 327 - kfree_sensitive(kkey); 328 - memzero_explicit(&kcs, sizeof(kcs)); 329 - return -EFAULT; 330 - } 331 - } 332 - kcs.keylen = klen; 333 - if (copy_to_user(ucs, &kcs, sizeof(kcs))) 334 - rc = -EFAULT; 335 - memzero_explicit(&kcs, sizeof(kcs)); 336 - kfree_sensitive(kkey); 2002 + case PKEY_CLR2SECK2: 2003 + rc = pkey_ioctl_clr2seck2((struct pkey_clr2seck2 __user *)arg); 337 2004 break; 338 - } 339 - case PKEY_VERIFYKEY2: { 340 - struct pkey_verifykey2 __user *uvk = (void __user *)arg; 341 - struct pkey_verifykey2 kvk; 342 - u8 *kkey; 343 - 344 - if (copy_from_user(&kvk, uvk, sizeof(kvk))) 345 - return -EFAULT; 346 - kkey = _copy_key_from_user(kvk.key, kvk.keylen); 347 - if (IS_ERR(kkey)) 348 - return PTR_ERR(kkey); 349 - rc = pkey_verifykey2(kkey, kvk.keylen, 350 - &kvk.cardnr, &kvk.domain, 351 - &kvk.type, &kvk.size, &kvk.flags); 352 - pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc); 353 - kfree_sensitive(kkey); 354 - if (rc) 355 - break; 356 - if (copy_to_user(uvk, &kvk, sizeof(kvk))) 357 - return -EFAULT; 2005 + case PKEY_VERIFYKEY2: 2006 + rc = pkey_ioctl_verifykey2((struct pkey_verifykey2 __user *)arg); 358 2007 break; 359 - } 360 - case PKEY_KBLOB2PROTK2: { 361 - struct pkey_kblob2pkey2 __user *utp = (void __user *)arg; 362 - struct pkey_apqn *apqns = NULL; 363 - struct pkey_kblob2pkey2 ktp; 364 - u8 *kkey; 365 - 366 - if (copy_from_user(&ktp, utp, sizeof(ktp))) 367 - return -EFAULT; 368 - apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); 369 - if (IS_ERR(apqns)) 370 - return PTR_ERR(apqns); 371 - kkey = _copy_key_from_user(ktp.key, ktp.keylen); 372 - if (IS_ERR(kkey)) { 373 - kfree(apqns); 374 - return PTR_ERR(kkey); 375 - } 376 - ktp.protkey.len = sizeof(ktp.protkey.protkey); 377 - rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries, 378 - kkey, ktp.keylen, 379 - ktp.protkey.protkey, &ktp.protkey.len, 380 - &ktp.protkey.type); 381 - pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); 382 - kfree(apqns); 383 - kfree_sensitive(kkey); 384 - if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) 385 - rc = -EFAULT; 386 - memzero_explicit(&ktp, sizeof(ktp)); 2008 + case PKEY_KBLOB2PROTK2: 2009 + rc = pkey_ioctl_kblob2protk2((struct pkey_kblob2pkey2 __user *)arg); 387 2010 break; 388 - } 389 - case PKEY_APQNS4K: { 390 - struct pkey_apqns4key __user *uak = (void __user *)arg; 391 - struct pkey_apqn *apqns = NULL; 392 - struct pkey_apqns4key kak; 393 - size_t nr_apqns, len; 394 - u8 *kkey; 395 - 396 - if (copy_from_user(&kak, uak, sizeof(kak))) 397 - return -EFAULT; 398 - nr_apqns = kak.apqn_entries; 399 - if (nr_apqns) { 400 - apqns = kmalloc_array(nr_apqns, 401 - sizeof(struct pkey_apqn), 402 - GFP_KERNEL); 403 - if (!apqns) 404 - return -ENOMEM; 405 - } 406 - kkey = _copy_key_from_user(kak.key, kak.keylen); 407 - if (IS_ERR(kkey)) { 408 - kfree(apqns); 409 - return PTR_ERR(kkey); 410 - } 411 - rc = pkey_apqns4key(kkey, kak.keylen, kak.flags, 412 - apqns, &nr_apqns); 413 - pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc); 414 - kfree_sensitive(kkey); 415 - if (rc && rc != -ENOSPC) { 416 - kfree(apqns); 417 - break; 418 - } 419 - if (!rc && kak.apqns) { 420 - if (nr_apqns > kak.apqn_entries) { 421 - kfree(apqns); 422 - return -EINVAL; 423 - } 424 - len = nr_apqns * sizeof(struct pkey_apqn); 425 - if (len) { 426 - if (copy_to_user(kak.apqns, apqns, len)) { 427 - kfree(apqns); 428 - return -EFAULT; 429 - } 430 - } 431 - } 432 - kak.apqn_entries = nr_apqns; 433 - if (copy_to_user(uak, &kak, sizeof(kak))) 434 - rc = -EFAULT; 435 - kfree(apqns); 2011 + case PKEY_APQNS4K: 2012 + rc = pkey_ioctl_apqns4k((struct pkey_apqns4key __user *)arg); 436 2013 break; 437 - } 438 - case PKEY_APQNS4KT: { 439 - struct pkey_apqns4keytype __user *uat = (void __user *)arg; 440 - struct pkey_apqn *apqns = NULL; 441 - struct pkey_apqns4keytype kat; 442 - size_t nr_apqns, len; 443 - 444 - if (copy_from_user(&kat, uat, sizeof(kat))) 445 - return -EFAULT; 446 - nr_apqns = kat.apqn_entries; 447 - if (nr_apqns) { 448 - apqns = kmalloc_array(nr_apqns, 449 - sizeof(struct pkey_apqn), 450 - GFP_KERNEL); 451 - if (!apqns) 452 - return -ENOMEM; 453 - } 454 - rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp, 455 - kat.flags, apqns, &nr_apqns); 456 - pr_debug("%s pkey_apqns4keytype()=%d\n", __func__, rc); 457 - if (rc && rc != -ENOSPC) { 458 - kfree(apqns); 459 - break; 460 - } 461 - if (!rc && kat.apqns) { 462 - if (nr_apqns > kat.apqn_entries) { 463 - kfree(apqns); 464 - return -EINVAL; 465 - } 466 - len = nr_apqns * sizeof(struct pkey_apqn); 467 - if (len) { 468 - if (copy_to_user(kat.apqns, apqns, len)) { 469 - kfree(apqns); 470 - return -EFAULT; 471 - } 472 - } 473 - } 474 - kat.apqn_entries = nr_apqns; 475 - if (copy_to_user(uat, &kat, sizeof(kat))) 476 - rc = -EFAULT; 477 - kfree(apqns); 2014 + case PKEY_APQNS4KT: 2015 + rc = pkey_ioctl_apqns4kt((struct pkey_apqns4keytype __user *)arg); 478 2016 break; 479 - } 480 - case PKEY_KBLOB2PROTK3: { 481 - struct pkey_kblob2pkey3 __user *utp = (void __user *)arg; 482 - u32 protkeylen = PROTKEYBLOBBUFSIZE; 483 - struct pkey_apqn *apqns = NULL; 484 - struct pkey_kblob2pkey3 ktp; 485 - u8 *kkey, *protkey; 486 - 487 - if (copy_from_user(&ktp, utp, sizeof(ktp))) 488 - return -EFAULT; 489 - apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries); 490 - if (IS_ERR(apqns)) 491 - return PTR_ERR(apqns); 492 - kkey = _copy_key_from_user(ktp.key, ktp.keylen); 493 - if (IS_ERR(kkey)) { 494 - kfree(apqns); 495 - return PTR_ERR(kkey); 496 - } 497 - protkey = kmalloc(protkeylen, GFP_KERNEL); 498 - if (!protkey) { 499 - kfree(apqns); 500 - kfree_sensitive(kkey); 501 - return -ENOMEM; 502 - } 503 - rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, 504 - kkey, ktp.keylen, 505 - protkey, &protkeylen, &ktp.pkeytype); 506 - pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); 507 - kfree(apqns); 508 - kfree_sensitive(kkey); 509 - if (rc) { 510 - kfree_sensitive(protkey); 511 - break; 512 - } 513 - if (ktp.pkey && ktp.pkeylen) { 514 - if (protkeylen > ktp.pkeylen) { 515 - kfree_sensitive(protkey); 516 - return -EINVAL; 517 - } 518 - if (copy_to_user(ktp.pkey, protkey, protkeylen)) { 519 - kfree_sensitive(protkey); 520 - return -EFAULT; 521 - } 522 - } 523 - kfree_sensitive(protkey); 524 - ktp.pkeylen = protkeylen; 525 - if (copy_to_user(utp, &ktp, sizeof(ktp))) 526 - return -EFAULT; 2017 + case PKEY_KBLOB2PROTK3: 2018 + rc = pkey_ioctl_kblob2protk3((struct pkey_kblob2pkey3 __user *)arg); 527 2019 break; 528 - } 529 2020 default: 530 2021 /* unknown/unsupported ioctl cmd */ 531 2022 return -ENOTTY; ··· 770 1791 } 771 1792 772 1793 /* 773 - * Sysfs and file io operations 1794 + * File io operations 774 1795 */ 775 - 776 - /* 777 - * Sysfs attribute read function for all protected key binary attributes. 778 - * The implementation can not deal with partial reads, because a new random 779 - * protected key blob is generated with each read. In case of partial reads 780 - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 781 - */ 782 - static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, 783 - loff_t off, size_t count) 784 - { 785 - struct protaeskeytoken protkeytoken; 786 - struct pkey_protkey protkey; 787 - int rc; 788 - 789 - if (off != 0 || count < sizeof(protkeytoken)) 790 - return -EINVAL; 791 - if (is_xts) 792 - if (count < 2 * sizeof(protkeytoken)) 793 - return -EINVAL; 794 - 795 - memset(&protkeytoken, 0, sizeof(protkeytoken)); 796 - protkeytoken.type = TOKTYPE_NON_CCA; 797 - protkeytoken.version = TOKVER_PROTECTED_KEY; 798 - protkeytoken.keytype = keytype; 799 - 800 - protkey.len = sizeof(protkey.protkey); 801 - rc = pkey_genprotkey(protkeytoken.keytype, 802 - protkey.protkey, &protkey.len, &protkey.type); 803 - if (rc) 804 - return rc; 805 - 806 - protkeytoken.len = protkey.len; 807 - memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); 808 - 809 - memcpy(buf, &protkeytoken, sizeof(protkeytoken)); 810 - 811 - if (is_xts) { 812 - /* xts needs a second protected key, reuse protkey struct */ 813 - protkey.len = sizeof(protkey.protkey); 814 - rc = pkey_genprotkey(protkeytoken.keytype, 815 - protkey.protkey, &protkey.len, &protkey.type); 816 - if (rc) 817 - return rc; 818 - 819 - protkeytoken.len = protkey.len; 820 - memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); 821 - 822 - memcpy(buf + sizeof(protkeytoken), &protkeytoken, 823 - sizeof(protkeytoken)); 824 - 825 - return 2 * sizeof(protkeytoken); 826 - } 827 - 828 - return sizeof(protkeytoken); 829 - } 830 - 831 - static ssize_t protkey_aes_128_read(struct file *filp, 832 - struct kobject *kobj, 833 - struct bin_attribute *attr, 834 - char *buf, loff_t off, 835 - size_t count) 836 - { 837 - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, 838 - off, count); 839 - } 840 - 841 - static ssize_t protkey_aes_192_read(struct file *filp, 842 - struct kobject *kobj, 843 - struct bin_attribute *attr, 844 - char *buf, loff_t off, 845 - size_t count) 846 - { 847 - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, 848 - off, count); 849 - } 850 - 851 - static ssize_t protkey_aes_256_read(struct file *filp, 852 - struct kobject *kobj, 853 - struct bin_attribute *attr, 854 - char *buf, loff_t off, 855 - size_t count) 856 - { 857 - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, 858 - off, count); 859 - } 860 - 861 - static ssize_t protkey_aes_128_xts_read(struct file *filp, 862 - struct kobject *kobj, 863 - struct bin_attribute *attr, 864 - char *buf, loff_t off, 865 - size_t count) 866 - { 867 - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, 868 - off, count); 869 - } 870 - 871 - static ssize_t protkey_aes_256_xts_read(struct file *filp, 872 - struct kobject *kobj, 873 - struct bin_attribute *attr, 874 - char *buf, loff_t off, 875 - size_t count) 876 - { 877 - return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, 878 - off, count); 879 - } 880 - 881 - static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken)); 882 - static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken)); 883 - static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken)); 884 - static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken)); 885 - static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken)); 886 - 887 - static struct bin_attribute *protkey_attrs[] = { 888 - &bin_attr_protkey_aes_128, 889 - &bin_attr_protkey_aes_192, 890 - &bin_attr_protkey_aes_256, 891 - &bin_attr_protkey_aes_128_xts, 892 - &bin_attr_protkey_aes_256_xts, 893 - NULL 894 - }; 895 - 896 - static struct attribute_group protkey_attr_group = { 897 - .name = "protkey", 898 - .bin_attrs = protkey_attrs, 899 - }; 900 - 901 - /* 902 - * Sysfs attribute read function for all secure key ccadata binary attributes. 903 - * The implementation can not deal with partial reads, because a new random 904 - * protected key blob is generated with each read. In case of partial reads 905 - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 906 - */ 907 - static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, 908 - loff_t off, size_t count) 909 - { 910 - struct pkey_seckey *seckey = (struct pkey_seckey *)buf; 911 - int rc; 912 - 913 - if (off != 0 || count < sizeof(struct secaeskeytoken)) 914 - return -EINVAL; 915 - if (is_xts) 916 - if (count < 2 * sizeof(struct secaeskeytoken)) 917 - return -EINVAL; 918 - 919 - rc = cca_genseckey(-1, -1, keytype, seckey->seckey); 920 - if (rc) 921 - return rc; 922 - 923 - if (is_xts) { 924 - seckey++; 925 - rc = cca_genseckey(-1, -1, keytype, seckey->seckey); 926 - if (rc) 927 - return rc; 928 - 929 - return 2 * sizeof(struct secaeskeytoken); 930 - } 931 - 932 - return sizeof(struct secaeskeytoken); 933 - } 934 - 935 - static ssize_t ccadata_aes_128_read(struct file *filp, 936 - struct kobject *kobj, 937 - struct bin_attribute *attr, 938 - char *buf, loff_t off, 939 - size_t count) 940 - { 941 - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, 942 - off, count); 943 - } 944 - 945 - static ssize_t ccadata_aes_192_read(struct file *filp, 946 - struct kobject *kobj, 947 - struct bin_attribute *attr, 948 - char *buf, loff_t off, 949 - size_t count) 950 - { 951 - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, 952 - off, count); 953 - } 954 - 955 - static ssize_t ccadata_aes_256_read(struct file *filp, 956 - struct kobject *kobj, 957 - struct bin_attribute *attr, 958 - char *buf, loff_t off, 959 - size_t count) 960 - { 961 - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, 962 - off, count); 963 - } 964 - 965 - static ssize_t ccadata_aes_128_xts_read(struct file *filp, 966 - struct kobject *kobj, 967 - struct bin_attribute *attr, 968 - char *buf, loff_t off, 969 - size_t count) 970 - { 971 - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, 972 - off, count); 973 - } 974 - 975 - static ssize_t ccadata_aes_256_xts_read(struct file *filp, 976 - struct kobject *kobj, 977 - struct bin_attribute *attr, 978 - char *buf, loff_t off, 979 - size_t count) 980 - { 981 - return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, 982 - off, count); 983 - } 984 - 985 - static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken)); 986 - static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken)); 987 - static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken)); 988 - static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken)); 989 - static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken)); 990 - 991 - static struct bin_attribute *ccadata_attrs[] = { 992 - &bin_attr_ccadata_aes_128, 993 - &bin_attr_ccadata_aes_192, 994 - &bin_attr_ccadata_aes_256, 995 - &bin_attr_ccadata_aes_128_xts, 996 - &bin_attr_ccadata_aes_256_xts, 997 - NULL 998 - }; 999 - 1000 - static struct attribute_group ccadata_attr_group = { 1001 - .name = "ccadata", 1002 - .bin_attrs = ccadata_attrs, 1003 - }; 1004 - 1005 - #define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80) 1006 - 1007 - /* 1008 - * Sysfs attribute read function for all secure key ccacipher binary attributes. 1009 - * The implementation can not deal with partial reads, because a new random 1010 - * secure key blob is generated with each read. In case of partial reads 1011 - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 1012 - */ 1013 - static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, 1014 - bool is_xts, char *buf, loff_t off, 1015 - size_t count) 1016 - { 1017 - size_t keysize = CCACIPHERTOKENSIZE; 1018 - u32 nr_apqns, *apqns = NULL; 1019 - int i, rc, card, dom; 1020 - 1021 - if (off != 0 || count < CCACIPHERTOKENSIZE) 1022 - return -EINVAL; 1023 - if (is_xts) 1024 - if (count < 2 * CCACIPHERTOKENSIZE) 1025 - return -EINVAL; 1026 - 1027 - /* build a list of apqns able to generate an cipher key */ 1028 - rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, 1029 - ZCRYPT_CEX6, 0, 0, 0, 0); 1030 - if (rc) 1031 - return rc; 1032 - 1033 - memset(buf, 0, is_xts ? 2 * keysize : keysize); 1034 - 1035 - /* simple try all apqns from the list */ 1036 - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { 1037 - card = apqns[i] >> 16; 1038 - dom = apqns[i] & 0xFFFF; 1039 - rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize); 1040 - if (rc == 0) 1041 - break; 1042 - } 1043 - if (rc) 1044 - return rc; 1045 - 1046 - if (is_xts) { 1047 - keysize = CCACIPHERTOKENSIZE; 1048 - buf += CCACIPHERTOKENSIZE; 1049 - rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize); 1050 - if (rc == 0) 1051 - return 2 * CCACIPHERTOKENSIZE; 1052 - } 1053 - 1054 - return CCACIPHERTOKENSIZE; 1055 - } 1056 - 1057 - static ssize_t ccacipher_aes_128_read(struct file *filp, 1058 - struct kobject *kobj, 1059 - struct bin_attribute *attr, 1060 - char *buf, loff_t off, 1061 - size_t count) 1062 - { 1063 - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf, 1064 - off, count); 1065 - } 1066 - 1067 - static ssize_t ccacipher_aes_192_read(struct file *filp, 1068 - struct kobject *kobj, 1069 - struct bin_attribute *attr, 1070 - char *buf, loff_t off, 1071 - size_t count) 1072 - { 1073 - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf, 1074 - off, count); 1075 - } 1076 - 1077 - static ssize_t ccacipher_aes_256_read(struct file *filp, 1078 - struct kobject *kobj, 1079 - struct bin_attribute *attr, 1080 - char *buf, loff_t off, 1081 - size_t count) 1082 - { 1083 - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf, 1084 - off, count); 1085 - } 1086 - 1087 - static ssize_t ccacipher_aes_128_xts_read(struct file *filp, 1088 - struct kobject *kobj, 1089 - struct bin_attribute *attr, 1090 - char *buf, loff_t off, 1091 - size_t count) 1092 - { 1093 - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf, 1094 - off, count); 1095 - } 1096 - 1097 - static ssize_t ccacipher_aes_256_xts_read(struct file *filp, 1098 - struct kobject *kobj, 1099 - struct bin_attribute *attr, 1100 - char *buf, loff_t off, 1101 - size_t count) 1102 - { 1103 - return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf, 1104 - off, count); 1105 - } 1106 - 1107 - static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE); 1108 - static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE); 1109 - static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE); 1110 - static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE); 1111 - static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE); 1112 - 1113 - static struct bin_attribute *ccacipher_attrs[] = { 1114 - &bin_attr_ccacipher_aes_128, 1115 - &bin_attr_ccacipher_aes_192, 1116 - &bin_attr_ccacipher_aes_256, 1117 - &bin_attr_ccacipher_aes_128_xts, 1118 - &bin_attr_ccacipher_aes_256_xts, 1119 - NULL 1120 - }; 1121 - 1122 - static struct attribute_group ccacipher_attr_group = { 1123 - .name = "ccacipher", 1124 - .bin_attrs = ccacipher_attrs, 1125 - }; 1126 - 1127 - /* 1128 - * Sysfs attribute read function for all ep11 aes key binary attributes. 1129 - * The implementation can not deal with partial reads, because a new random 1130 - * secure key blob is generated with each read. In case of partial reads 1131 - * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 1132 - * This function and the sysfs attributes using it provide EP11 key blobs 1133 - * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently 1134 - * 336 bytes. 1135 - */ 1136 - static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, 1137 - bool is_xts, char *buf, loff_t off, 1138 - size_t count) 1139 - { 1140 - size_t keysize = MAXEP11AESKEYBLOBSIZE; 1141 - u32 nr_apqns, *apqns = NULL; 1142 - int i, rc, card, dom; 1143 - 1144 - if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) 1145 - return -EINVAL; 1146 - if (is_xts) 1147 - if (count < 2 * MAXEP11AESKEYBLOBSIZE) 1148 - return -EINVAL; 1149 - 1150 - /* build a list of apqns able to generate an cipher key */ 1151 - rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF, 1152 - ZCRYPT_CEX7, 1153 - ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4, 1154 - NULL); 1155 - if (rc) 1156 - return rc; 1157 - 1158 - memset(buf, 0, is_xts ? 2 * keysize : keysize); 1159 - 1160 - /* simple try all apqns from the list */ 1161 - for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { 1162 - card = apqns[i] >> 16; 1163 - dom = apqns[i] & 0xFFFF; 1164 - rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize, 1165 - PKEY_TYPE_EP11_AES); 1166 - if (rc == 0) 1167 - break; 1168 - } 1169 - if (rc) 1170 - return rc; 1171 - 1172 - if (is_xts) { 1173 - keysize = MAXEP11AESKEYBLOBSIZE; 1174 - buf += MAXEP11AESKEYBLOBSIZE; 1175 - rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize, 1176 - PKEY_TYPE_EP11_AES); 1177 - if (rc == 0) 1178 - return 2 * MAXEP11AESKEYBLOBSIZE; 1179 - } 1180 - 1181 - return MAXEP11AESKEYBLOBSIZE; 1182 - } 1183 - 1184 - static ssize_t ep11_aes_128_read(struct file *filp, 1185 - struct kobject *kobj, 1186 - struct bin_attribute *attr, 1187 - char *buf, loff_t off, 1188 - size_t count) 1189 - { 1190 - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf, 1191 - off, count); 1192 - } 1193 - 1194 - static ssize_t ep11_aes_192_read(struct file *filp, 1195 - struct kobject *kobj, 1196 - struct bin_attribute *attr, 1197 - char *buf, loff_t off, 1198 - size_t count) 1199 - { 1200 - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf, 1201 - off, count); 1202 - } 1203 - 1204 - static ssize_t ep11_aes_256_read(struct file *filp, 1205 - struct kobject *kobj, 1206 - struct bin_attribute *attr, 1207 - char *buf, loff_t off, 1208 - size_t count) 1209 - { 1210 - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf, 1211 - off, count); 1212 - } 1213 - 1214 - static ssize_t ep11_aes_128_xts_read(struct file *filp, 1215 - struct kobject *kobj, 1216 - struct bin_attribute *attr, 1217 - char *buf, loff_t off, 1218 - size_t count) 1219 - { 1220 - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf, 1221 - off, count); 1222 - } 1223 - 1224 - static ssize_t ep11_aes_256_xts_read(struct file *filp, 1225 - struct kobject *kobj, 1226 - struct bin_attribute *attr, 1227 - char *buf, loff_t off, 1228 - size_t count) 1229 - { 1230 - return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf, 1231 - off, count); 1232 - } 1233 - 1234 - static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE); 1235 - static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE); 1236 - static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE); 1237 - static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE); 1238 - static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE); 1239 - 1240 - static struct bin_attribute *ep11_attrs[] = { 1241 - &bin_attr_ep11_aes_128, 1242 - &bin_attr_ep11_aes_192, 1243 - &bin_attr_ep11_aes_256, 1244 - &bin_attr_ep11_aes_128_xts, 1245 - &bin_attr_ep11_aes_256_xts, 1246 - NULL 1247 - }; 1248 - 1249 - static struct attribute_group ep11_attr_group = { 1250 - .name = "ep11", 1251 - .bin_attrs = ep11_attrs, 1252 - }; 1253 - 1254 - static const struct attribute_group *pkey_attr_groups[] = { 1255 - &protkey_attr_group, 1256 - &ccadata_attr_group, 1257 - &ccacipher_attr_group, 1258 - &ep11_attr_group, 1259 - NULL, 1260 - }; 1261 1796 1262 1797 static const struct file_operations pkey_fops = { 1263 1798 .owner = THIS_MODULE, ··· 788 2295 .groups = pkey_attr_groups, 789 2296 }; 790 2297 791 - /* 792 - * Module init 793 - */ 794 - static int __init pkey_init(void) 2298 + int __init pkey_api_init(void) 795 2299 { 796 - cpacf_mask_t func_mask; 797 - 798 - /* 799 - * The pckmo instruction should be available - even if we don't 800 - * actually invoke it. This instruction comes with MSA 3 which 801 - * is also the minimum level for the kmc instructions which 802 - * are able to work with protected keys. 803 - */ 804 - if (!cpacf_query(CPACF_PCKMO, &func_mask)) 805 - return -ENODEV; 806 - 807 - /* check for kmc instructions available */ 808 - if (!cpacf_query(CPACF_KMC, &func_mask)) 809 - return -ENODEV; 810 - if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) || 811 - !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) || 812 - !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256)) 813 - return -ENODEV; 814 - 815 - pkey_debug_init(); 816 - 2300 + /* register as a misc device */ 817 2301 return misc_register(&pkey_dev); 818 2302 } 819 2303 820 - /* 821 - * Module exit 822 - */ 823 - static void __exit pkey_exit(void) 2304 + void __exit pkey_api_exit(void) 824 2305 { 825 2306 misc_deregister(&pkey_dev); 826 - pkey_debug_exit(); 827 2307 } 828 - 829 - module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init); 830 - module_exit(pkey_exit);
+362
drivers/s390/crypto/pkey_base.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * pkey base: debug feature, pkey handler registry 4 + * 5 + * Copyright IBM Corp. 2024 6 + */ 7 + 8 + #define KMSG_COMPONENT "pkey" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/cpufeature.h> 12 + #include <linux/init.h> 13 + #include <linux/list.h> 14 + #include <linux/module.h> 15 + #include <linux/rculist.h> 16 + 17 + #include "pkey_base.h" 18 + 19 + MODULE_LICENSE("GPL"); 20 + MODULE_AUTHOR("IBM Corporation"); 21 + MODULE_DESCRIPTION("s390 protected key base and api"); 22 + 23 + /* 24 + * pkey debug feature 25 + */ 26 + debug_info_t *pkey_dbf_info; 27 + EXPORT_SYMBOL(pkey_dbf_info); 28 + 29 + /* 30 + * pkey handler registry 31 + */ 32 + 33 + static DEFINE_SPINLOCK(handler_list_write_lock); 34 + static LIST_HEAD(handler_list); 35 + 36 + int pkey_handler_register(struct pkey_handler *handler) 37 + { 38 + const struct pkey_handler *h; 39 + 40 + if (!handler || 41 + !handler->is_supported_key || 42 + !handler->is_supported_keytype) 43 + return -EINVAL; 44 + 45 + if (!try_module_get(handler->module)) 46 + return -ENXIO; 47 + 48 + spin_lock(&handler_list_write_lock); 49 + 50 + rcu_read_lock(); 51 + list_for_each_entry_rcu(h, &handler_list, list) { 52 + if (h == handler) { 53 + rcu_read_unlock(); 54 + spin_unlock(&handler_list_write_lock); 55 + module_put(handler->module); 56 + return -EEXIST; 57 + } 58 + } 59 + rcu_read_unlock(); 60 + 61 + list_add_rcu(&handler->list, &handler_list); 62 + spin_unlock(&handler_list_write_lock); 63 + synchronize_rcu(); 64 + 65 + module_put(handler->module); 66 + 67 + PKEY_DBF_INFO("%s pkey handler '%s' registered\n", __func__, 68 + handler->name ?: "<no name>"); 69 + 70 + return 0; 71 + } 72 + EXPORT_SYMBOL(pkey_handler_register); 73 + 74 + int pkey_handler_unregister(struct pkey_handler *handler) 75 + { 76 + spin_lock(&handler_list_write_lock); 77 + list_del_rcu(&handler->list); 78 + INIT_LIST_HEAD_RCU(&handler->list); 79 + spin_unlock(&handler_list_write_lock); 80 + synchronize_rcu(); 81 + 82 + PKEY_DBF_INFO("%s pkey handler '%s' unregistered\n", __func__, 83 + handler->name ?: "<no name>"); 84 + 85 + return 0; 86 + } 87 + EXPORT_SYMBOL(pkey_handler_unregister); 88 + 89 + /* 90 + * Handler invocation functions. 91 + */ 92 + 93 + const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen) 94 + { 95 + const struct pkey_handler *h; 96 + 97 + rcu_read_lock(); 98 + list_for_each_entry_rcu(h, &handler_list, list) { 99 + if (!try_module_get(h->module)) 100 + continue; 101 + if (h->is_supported_key(key, keylen)) { 102 + rcu_read_unlock(); 103 + return h; 104 + } 105 + module_put(h->module); 106 + } 107 + rcu_read_unlock(); 108 + 109 + return NULL; 110 + } 111 + EXPORT_SYMBOL(pkey_handler_get_keybased); 112 + 113 + const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt) 114 + { 115 + const struct pkey_handler *h; 116 + 117 + rcu_read_lock(); 118 + list_for_each_entry_rcu(h, &handler_list, list) { 119 + if (!try_module_get(h->module)) 120 + continue; 121 + if (h->is_supported_keytype(kt)) { 122 + rcu_read_unlock(); 123 + return h; 124 + } 125 + module_put(h->module); 126 + } 127 + rcu_read_unlock(); 128 + 129 + return NULL; 130 + } 131 + EXPORT_SYMBOL(pkey_handler_get_keytypebased); 132 + 133 + void pkey_handler_put(const struct pkey_handler *handler) 134 + { 135 + const struct pkey_handler *h; 136 + 137 + if (!handler) 138 + return; 139 + 140 + rcu_read_lock(); 141 + list_for_each_entry_rcu(h, &handler_list, list) { 142 + if (h == handler) { 143 + module_put(h->module); 144 + break; 145 + } 146 + } 147 + rcu_read_unlock(); 148 + } 149 + EXPORT_SYMBOL(pkey_handler_put); 150 + 151 + int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 152 + const u8 *key, u32 keylen, 153 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 154 + { 155 + const struct pkey_handler *h; 156 + int rc = -ENODEV; 157 + 158 + h = pkey_handler_get_keybased(key, keylen); 159 + if (h && h->key_to_protkey) { 160 + rc = h->key_to_protkey(apqns, nr_apqns, key, keylen, 161 + protkey, protkeylen, 162 + protkeytype); 163 + } 164 + pkey_handler_put(h); 165 + 166 + return rc; 167 + } 168 + EXPORT_SYMBOL(pkey_handler_key_to_protkey); 169 + 170 + /* 171 + * This handler invocation is special as there may be more than 172 + * one handler providing support for the very same key (type). 173 + * And the handler may not respond true on is_supported_key(), 174 + * so simple try and check return value here. 175 + */ 176 + int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, 177 + size_t nr_apqns, 178 + const u8 *key, u32 keylen, 179 + u8 *protkey, u32 *protkeylen, 180 + u32 *protkeytype) 181 + { 182 + const struct pkey_handler *h, *htmp[10]; 183 + int i, n = 0, rc = -ENODEV; 184 + 185 + rcu_read_lock(); 186 + list_for_each_entry_rcu(h, &handler_list, list) { 187 + if (!try_module_get(h->module)) 188 + continue; 189 + if (h->slowpath_key_to_protkey && n < ARRAY_SIZE(htmp)) 190 + htmp[n++] = h; 191 + else 192 + module_put(h->module); 193 + } 194 + rcu_read_unlock(); 195 + 196 + for (i = 0; i < n; i++) { 197 + h = htmp[i]; 198 + if (rc) 199 + rc = h->slowpath_key_to_protkey(apqns, nr_apqns, 200 + key, keylen, 201 + protkey, protkeylen, 202 + protkeytype); 203 + module_put(h->module); 204 + } 205 + 206 + return rc; 207 + } 208 + EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey); 209 + 210 + int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, 211 + u32 keytype, u32 keysubtype, 212 + u32 keybitsize, u32 flags, 213 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo) 214 + { 215 + const struct pkey_handler *h; 216 + int rc = -ENODEV; 217 + 218 + h = pkey_handler_get_keytypebased(keysubtype); 219 + if (h && h->gen_key) { 220 + rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype, 221 + keybitsize, flags, 222 + keybuf, keybuflen, keyinfo); 223 + } 224 + pkey_handler_put(h); 225 + 226 + return rc; 227 + } 228 + EXPORT_SYMBOL(pkey_handler_gen_key); 229 + 230 + int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, 231 + u32 keytype, u32 keysubtype, 232 + u32 keybitsize, u32 flags, 233 + const u8 *clrkey, u32 clrkeylen, 234 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo) 235 + { 236 + const struct pkey_handler *h; 237 + int rc = -ENODEV; 238 + 239 + h = pkey_handler_get_keytypebased(keysubtype); 240 + if (h && h->clr_to_key) { 241 + rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype, 242 + keybitsize, flags, clrkey, clrkeylen, 243 + keybuf, keybuflen, keyinfo); 244 + } 245 + pkey_handler_put(h); 246 + 247 + return rc; 248 + } 249 + EXPORT_SYMBOL(pkey_handler_clr_to_key); 250 + 251 + int pkey_handler_verify_key(const u8 *key, u32 keylen, 252 + u16 *card, u16 *dom, 253 + u32 *keytype, u32 *keybitsize, u32 *flags) 254 + { 255 + const struct pkey_handler *h; 256 + int rc = -ENODEV; 257 + 258 + h = pkey_handler_get_keybased(key, keylen); 259 + if (h && h->verify_key) { 260 + rc = h->verify_key(key, keylen, card, dom, 261 + keytype, keybitsize, flags); 262 + } 263 + pkey_handler_put(h); 264 + 265 + return rc; 266 + } 267 + EXPORT_SYMBOL(pkey_handler_verify_key); 268 + 269 + int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, 270 + struct pkey_apqn *apqns, size_t *nr_apqns) 271 + { 272 + const struct pkey_handler *h; 273 + int rc = -ENODEV; 274 + 275 + h = pkey_handler_get_keybased(key, keylen); 276 + if (h && h->apqns_for_key) 277 + rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns); 278 + pkey_handler_put(h); 279 + 280 + return rc; 281 + } 282 + EXPORT_SYMBOL(pkey_handler_apqns_for_key); 283 + 284 + int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, 285 + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 286 + struct pkey_apqn *apqns, size_t *nr_apqns) 287 + { 288 + const struct pkey_handler *h; 289 + int rc = -ENODEV; 290 + 291 + h = pkey_handler_get_keytypebased(keysubtype); 292 + if (h && h->apqns_for_keytype) { 293 + rc = h->apqns_for_keytype(keysubtype, 294 + cur_mkvp, alt_mkvp, flags, 295 + apqns, nr_apqns); 296 + } 297 + pkey_handler_put(h); 298 + 299 + return rc; 300 + } 301 + EXPORT_SYMBOL(pkey_handler_apqns_for_keytype); 302 + 303 + void pkey_handler_request_modules(void) 304 + { 305 + #ifdef CONFIG_MODULES 306 + static const char * const pkey_handler_modules[] = { 307 + "pkey_cca", "pkey_ep11", "pkey_pckmo" }; 308 + int i; 309 + 310 + for (i = 0; i < ARRAY_SIZE(pkey_handler_modules); i++) { 311 + const struct pkey_handler *h; 312 + bool found = false; 313 + 314 + rcu_read_lock(); 315 + list_for_each_entry_rcu(h, &handler_list, list) { 316 + if (h->module && 317 + !strcmp(h->module->name, pkey_handler_modules[i])) { 318 + found = true; 319 + break; 320 + } 321 + } 322 + rcu_read_unlock(); 323 + if (!found) { 324 + pr_debug("request_module(%s)\n", pkey_handler_modules[i]); 325 + request_module(pkey_handler_modules[i]); 326 + } 327 + } 328 + #endif 329 + } 330 + EXPORT_SYMBOL(pkey_handler_request_modules); 331 + 332 + /* 333 + * Module init 334 + */ 335 + static int __init pkey_init(void) 336 + { 337 + int rc; 338 + 339 + /* init debug feature */ 340 + pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); 341 + debug_register_view(pkey_dbf_info, &debug_sprintf_view); 342 + debug_set_level(pkey_dbf_info, 4); 343 + 344 + /* the handler registry does not need any init */ 345 + 346 + rc = pkey_api_init(); 347 + if (rc) 348 + debug_unregister(pkey_dbf_info); 349 + 350 + return rc; 351 + } 352 + 353 + /* 354 + * Module exit 355 + */ 356 + static void __exit pkey_exit(void) 357 + { 358 + pkey_api_exit(); 359 + } 360 + 361 + module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init); 362 + module_exit(pkey_exit);
+195
drivers/s390/crypto/pkey_base.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + /* 3 + * Copyright IBM Corp. 2024 4 + * 5 + * Pkey base: debug feature, defines and structs 6 + * common to all pkey code. 7 + */ 8 + 9 + #ifndef _PKEY_BASE_H_ 10 + #define _PKEY_BASE_H_ 11 + 12 + #include <linux/types.h> 13 + #include <asm/debug.h> 14 + #include <asm/pkey.h> 15 + 16 + /* 17 + * pkey debug feature 18 + */ 19 + 20 + extern debug_info_t *pkey_dbf_info; 21 + 22 + #define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__) 23 + #define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__) 24 + #define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__) 25 + 26 + /* 27 + * common defines and common structs 28 + */ 29 + 30 + #define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ 31 + #define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header)) 32 + #define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ 33 + #define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ 34 + #define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */ 35 + 36 + /* inside view of a generic protected key token */ 37 + struct protkeytoken { 38 + u8 type; /* 0x00 for PAES specific key tokens */ 39 + u8 res0[3]; 40 + u8 version; /* should be 0x01 for protected key token */ 41 + u8 res1[3]; 42 + u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ 43 + u32 len; /* bytes actually stored in protkey[] */ 44 + u8 protkey[]; /* the protected key blob */ 45 + } __packed; 46 + 47 + /* inside view of a protected AES key token */ 48 + struct protaeskeytoken { 49 + u8 type; /* 0x00 for PAES specific key tokens */ 50 + u8 res0[3]; 51 + u8 version; /* should be 0x01 for protected key token */ 52 + u8 res1[3]; 53 + u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ 54 + u32 len; /* bytes actually stored in protkey[] */ 55 + u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ 56 + } __packed; 57 + 58 + /* inside view of a clear key token (type 0x00 version 0x02) */ 59 + struct clearkeytoken { 60 + u8 type; /* 0x00 for PAES specific key tokens */ 61 + u8 res0[3]; 62 + u8 version; /* 0x02 for clear key token */ 63 + u8 res1[3]; 64 + u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */ 65 + u32 len; /* bytes actually stored in clearkey[] */ 66 + u8 clearkey[]; /* clear key value */ 67 + } __packed; 68 + 69 + /* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */ 70 + static inline u32 pkey_keytype_aes_to_size(u32 keytype) 71 + { 72 + switch (keytype) { 73 + case PKEY_KEYTYPE_AES_128: 74 + return 16; 75 + case PKEY_KEYTYPE_AES_192: 76 + return 24; 77 + case PKEY_KEYTYPE_AES_256: 78 + return 32; 79 + default: 80 + return 0; 81 + } 82 + } 83 + 84 + /* helper function which translates AES key bit size into PKEY_KEYTYPE_AES_* */ 85 + static inline u32 pkey_aes_bitsize_to_keytype(u32 keybitsize) 86 + { 87 + switch (keybitsize) { 88 + case 128: 89 + return PKEY_KEYTYPE_AES_128; 90 + case 192: 91 + return PKEY_KEYTYPE_AES_192; 92 + case 256: 93 + return PKEY_KEYTYPE_AES_256; 94 + default: 95 + return 0; 96 + } 97 + } 98 + 99 + /* 100 + * pkey_api.c: 101 + */ 102 + int __init pkey_api_init(void); 103 + void __exit pkey_api_exit(void); 104 + 105 + /* 106 + * pkey_sysfs.c: 107 + */ 108 + 109 + extern const struct attribute_group *pkey_attr_groups[]; 110 + 111 + /* 112 + * pkey handler registry 113 + */ 114 + 115 + struct pkey_handler { 116 + struct module *module; 117 + const char *name; 118 + /* 119 + * is_supported_key() and is_supported_keytype() are called 120 + * within an rcu_read_lock() scope and thus must not sleep! 121 + */ 122 + bool (*is_supported_key)(const u8 *key, u32 keylen); 123 + bool (*is_supported_keytype)(enum pkey_key_type); 124 + int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, 125 + const u8 *key, u32 keylen, 126 + u8 *protkey, u32 *protkeylen, u32 *protkeytype); 127 + int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns, 128 + size_t nr_apqns, 129 + const u8 *key, u32 keylen, 130 + u8 *protkey, u32 *protkeylen, 131 + u32 *protkeytype); 132 + int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns, 133 + u32 keytype, u32 keysubtype, 134 + u32 keybitsize, u32 flags, 135 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo); 136 + int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns, 137 + u32 keytype, u32 keysubtype, 138 + u32 keybitsize, u32 flags, 139 + const u8 *clrkey, u32 clrkeylen, 140 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo); 141 + int (*verify_key)(const u8 *key, u32 keylen, 142 + u16 *card, u16 *dom, 143 + u32 *keytype, u32 *keybitsize, u32 *flags); 144 + int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags, 145 + struct pkey_apqn *apqns, size_t *nr_apqns); 146 + int (*apqns_for_keytype)(enum pkey_key_type ktype, 147 + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 148 + struct pkey_apqn *apqns, size_t *nr_apqns); 149 + /* used internal by pkey base */ 150 + struct list_head list; 151 + }; 152 + 153 + int pkey_handler_register(struct pkey_handler *handler); 154 + int pkey_handler_unregister(struct pkey_handler *handler); 155 + 156 + /* 157 + * invocation function for the registered pkey handlers 158 + */ 159 + 160 + const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen); 161 + const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt); 162 + void pkey_handler_put(const struct pkey_handler *handler); 163 + 164 + int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 165 + const u8 *key, u32 keylen, 166 + u8 *protkey, u32 *protkeylen, u32 *protkeytype); 167 + int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, 168 + size_t nr_apqns, 169 + const u8 *key, u32 keylen, 170 + u8 *protkey, u32 *protkeylen, 171 + u32 *protkeytype); 172 + int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, 173 + u32 keytype, u32 keysubtype, 174 + u32 keybitsize, u32 flags, 175 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo); 176 + int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, 177 + u32 keytype, u32 keysubtype, 178 + u32 keybitsize, u32 flags, 179 + const u8 *clrkey, u32 clrkeylen, 180 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo); 181 + int pkey_handler_verify_key(const u8 *key, u32 keylen, 182 + u16 *card, u16 *dom, 183 + u32 *keytype, u32 *keybitsize, u32 *flags); 184 + int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, 185 + struct pkey_apqn *apqns, size_t *nr_apqns); 186 + int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype, 187 + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 188 + struct pkey_apqn *apqns, size_t *nr_apqns); 189 + 190 + /* 191 + * Unconditional try to load all handler modules 192 + */ 193 + void pkey_handler_request_modules(void); 194 + 195 + #endif /* _PKEY_BASE_H_ */
+629
drivers/s390/crypto/pkey_cca.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * pkey cca specific code 4 + * 5 + * Copyright IBM Corp. 2024 6 + */ 7 + 8 + #define KMSG_COMPONENT "pkey" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/init.h> 12 + #include <linux/module.h> 13 + #include <linux/cpufeature.h> 14 + 15 + #include "zcrypt_api.h" 16 + #include "zcrypt_ccamisc.h" 17 + #include "pkey_base.h" 18 + 19 + MODULE_LICENSE("GPL"); 20 + MODULE_AUTHOR("IBM Corporation"); 21 + MODULE_DESCRIPTION("s390 protected key CCA handler"); 22 + 23 + #if IS_MODULE(CONFIG_PKEY_CCA) 24 + static struct ap_device_id pkey_cca_card_ids[] = { 25 + { .dev_type = AP_DEVICE_TYPE_CEX4 }, 26 + { .dev_type = AP_DEVICE_TYPE_CEX5 }, 27 + { .dev_type = AP_DEVICE_TYPE_CEX6 }, 28 + { .dev_type = AP_DEVICE_TYPE_CEX7 }, 29 + { .dev_type = AP_DEVICE_TYPE_CEX8 }, 30 + { /* end of list */ }, 31 + }; 32 + MODULE_DEVICE_TABLE(ap, pkey_cca_card_ids); 33 + #endif 34 + 35 + /* 36 + * Check key blob for known and supported CCA key. 37 + */ 38 + static bool is_cca_key(const u8 *key, u32 keylen) 39 + { 40 + struct keytoken_header *hdr = (struct keytoken_header *)key; 41 + 42 + if (keylen < sizeof(*hdr)) 43 + return false; 44 + 45 + switch (hdr->type) { 46 + case TOKTYPE_CCA_INTERNAL: 47 + switch (hdr->version) { 48 + case TOKVER_CCA_AES: 49 + case TOKVER_CCA_VLSC: 50 + return true; 51 + default: 52 + return false; 53 + } 54 + case TOKTYPE_CCA_INTERNAL_PKA: 55 + return true; 56 + default: 57 + return false; 58 + } 59 + } 60 + 61 + static bool is_cca_keytype(enum pkey_key_type key_type) 62 + { 63 + switch (key_type) { 64 + case PKEY_TYPE_CCA_DATA: 65 + case PKEY_TYPE_CCA_CIPHER: 66 + case PKEY_TYPE_CCA_ECC: 67 + return true; 68 + default: 69 + return false; 70 + } 71 + } 72 + 73 + static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, 74 + struct pkey_apqn *apqns, size_t *nr_apqns) 75 + { 76 + struct keytoken_header *hdr = (struct keytoken_header *)key; 77 + u32 _nr_apqns, *_apqns = NULL; 78 + int rc; 79 + 80 + if (!flags) 81 + flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP; 82 + 83 + if (keylen < sizeof(struct keytoken_header)) 84 + return -EINVAL; 85 + 86 + zcrypt_wait_api_operational(); 87 + 88 + if (hdr->type == TOKTYPE_CCA_INTERNAL) { 89 + u64 cur_mkvp = 0, old_mkvp = 0; 90 + int minhwtype = ZCRYPT_CEX3C; 91 + 92 + if (hdr->version == TOKVER_CCA_AES) { 93 + struct secaeskeytoken *t = (struct secaeskeytoken *)key; 94 + 95 + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 96 + cur_mkvp = t->mkvp; 97 + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 98 + old_mkvp = t->mkvp; 99 + } else if (hdr->version == TOKVER_CCA_VLSC) { 100 + struct cipherkeytoken *t = (struct cipherkeytoken *)key; 101 + 102 + minhwtype = ZCRYPT_CEX6; 103 + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 104 + cur_mkvp = t->mkvp0; 105 + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 106 + old_mkvp = t->mkvp0; 107 + } else { 108 + /* unknown CCA internal token type */ 109 + return -EINVAL; 110 + } 111 + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 112 + minhwtype, AES_MK_SET, 113 + cur_mkvp, old_mkvp, 1); 114 + if (rc) 115 + goto out; 116 + 117 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { 118 + struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; 119 + u64 cur_mkvp = 0, old_mkvp = 0; 120 + 121 + if (t->secid == 0x20) { 122 + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 123 + cur_mkvp = t->mkvp; 124 + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 125 + old_mkvp = t->mkvp; 126 + } else { 127 + /* unknown CCA internal 2 token type */ 128 + return -EINVAL; 129 + } 130 + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 131 + ZCRYPT_CEX7, APKA_MK_SET, 132 + cur_mkvp, old_mkvp, 1); 133 + if (rc) 134 + goto out; 135 + 136 + } else { 137 + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", 138 + __func__, hdr->type, hdr->version); 139 + return -EINVAL; 140 + } 141 + 142 + if (apqns) { 143 + if (*nr_apqns < _nr_apqns) 144 + rc = -ENOSPC; 145 + else 146 + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); 147 + } 148 + *nr_apqns = _nr_apqns; 149 + 150 + out: 151 + kfree(_apqns); 152 + pr_debug("rc=%d\n", rc); 153 + return rc; 154 + } 155 + 156 + static int cca_apqns4type(enum pkey_key_type ktype, 157 + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 158 + struct pkey_apqn *apqns, size_t *nr_apqns) 159 + { 160 + u32 _nr_apqns, *_apqns = NULL; 161 + int rc; 162 + 163 + zcrypt_wait_api_operational(); 164 + 165 + if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { 166 + u64 cur_mkvp = 0, old_mkvp = 0; 167 + int minhwtype = ZCRYPT_CEX3C; 168 + 169 + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 170 + cur_mkvp = *((u64 *)cur_mkvp); 171 + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 172 + old_mkvp = *((u64 *)alt_mkvp); 173 + if (ktype == PKEY_TYPE_CCA_CIPHER) 174 + minhwtype = ZCRYPT_CEX6; 175 + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 176 + minhwtype, AES_MK_SET, 177 + cur_mkvp, old_mkvp, 1); 178 + if (rc) 179 + goto out; 180 + 181 + } else if (ktype == PKEY_TYPE_CCA_ECC) { 182 + u64 cur_mkvp = 0, old_mkvp = 0; 183 + 184 + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 185 + cur_mkvp = *((u64 *)cur_mkvp); 186 + if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 187 + old_mkvp = *((u64 *)alt_mkvp); 188 + rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 189 + ZCRYPT_CEX7, APKA_MK_SET, 190 + cur_mkvp, old_mkvp, 1); 191 + if (rc) 192 + goto out; 193 + 194 + } else { 195 + PKEY_DBF_ERR("%s unknown/unsupported key type %d", 196 + __func__, (int)ktype); 197 + return -EINVAL; 198 + } 199 + 200 + if (apqns) { 201 + if (*nr_apqns < _nr_apqns) 202 + rc = -ENOSPC; 203 + else 204 + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); 205 + } 206 + *nr_apqns = _nr_apqns; 207 + 208 + out: 209 + kfree(_apqns); 210 + pr_debug("rc=%d\n", rc); 211 + return rc; 212 + } 213 + 214 + static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 215 + const u8 *key, u32 keylen, 216 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 217 + { 218 + struct keytoken_header *hdr = (struct keytoken_header *)key; 219 + struct pkey_apqn *local_apqns = NULL; 220 + int i, rc; 221 + 222 + if (keylen < sizeof(*hdr)) 223 + return -EINVAL; 224 + 225 + if (hdr->type == TOKTYPE_CCA_INTERNAL && 226 + hdr->version == TOKVER_CCA_AES) { 227 + /* CCA AES data key */ 228 + if (keylen != sizeof(struct secaeskeytoken)) 229 + return -EINVAL; 230 + if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0)) 231 + return -EINVAL; 232 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 233 + hdr->version == TOKVER_CCA_VLSC) { 234 + /* CCA AES cipher key */ 235 + if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE) 236 + return -EINVAL; 237 + if (cca_check_secaescipherkey(pkey_dbf_info, 238 + 3, key, 0, 1)) 239 + return -EINVAL; 240 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { 241 + /* CCA ECC (private) key */ 242 + if (keylen < sizeof(struct eccprivkeytoken)) 243 + return -EINVAL; 244 + if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1)) 245 + return -EINVAL; 246 + } else { 247 + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", 248 + __func__, hdr->type, hdr->version); 249 + return -EINVAL; 250 + } 251 + 252 + zcrypt_wait_api_operational(); 253 + 254 + if (!apqns || (nr_apqns == 1 && 255 + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 256 + nr_apqns = MAXAPQNSINLIST; 257 + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 258 + GFP_KERNEL); 259 + if (!local_apqns) 260 + return -ENOMEM; 261 + rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); 262 + if (rc) 263 + goto out; 264 + apqns = local_apqns; 265 + } 266 + 267 + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 268 + if (hdr->type == TOKTYPE_CCA_INTERNAL && 269 + hdr->version == TOKVER_CCA_AES) { 270 + rc = cca_sec2protkey(apqns[i].card, apqns[i].domain, 271 + key, protkey, 272 + protkeylen, protkeytype); 273 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 274 + hdr->version == TOKVER_CCA_VLSC) { 275 + rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain, 276 + key, protkey, 277 + protkeylen, protkeytype); 278 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { 279 + rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain, 280 + key, protkey, 281 + protkeylen, protkeytype); 282 + } else { 283 + rc = -EINVAL; 284 + break; 285 + } 286 + } 287 + 288 + out: 289 + kfree(local_apqns); 290 + pr_debug("rc=%d\n", rc); 291 + return rc; 292 + } 293 + 294 + /* 295 + * Generate CCA secure key. 296 + * As of now only CCA AES Data or Cipher secure keys are 297 + * supported. 298 + * keytype is one of the PKEY_KEYTYPE_* constants, 299 + * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER, 300 + * keybitsize is the bit size of the key (may be 0 for 301 + * keytype PKEY_KEYTYPE_AES_*). 302 + */ 303 + static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, 304 + u32 keytype, u32 subtype, 305 + u32 keybitsize, u32 flags, 306 + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) 307 + { 308 + struct pkey_apqn *local_apqns = NULL; 309 + int i, len, rc; 310 + 311 + /* check keytype, subtype, keybitsize */ 312 + switch (keytype) { 313 + case PKEY_KEYTYPE_AES_128: 314 + case PKEY_KEYTYPE_AES_192: 315 + case PKEY_KEYTYPE_AES_256: 316 + len = pkey_keytype_aes_to_size(keytype); 317 + if (keybitsize && keybitsize != 8 * len) { 318 + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", 319 + __func__, keybitsize); 320 + return -EINVAL; 321 + } 322 + keybitsize = 8 * len; 323 + switch (subtype) { 324 + case PKEY_TYPE_CCA_DATA: 325 + case PKEY_TYPE_CCA_CIPHER: 326 + break; 327 + default: 328 + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", 329 + __func__, subtype); 330 + return -EINVAL; 331 + } 332 + break; 333 + default: 334 + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", 335 + __func__, keytype); 336 + return -EINVAL; 337 + } 338 + 339 + zcrypt_wait_api_operational(); 340 + 341 + if (!apqns || (nr_apqns == 1 && 342 + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 343 + nr_apqns = MAXAPQNSINLIST; 344 + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 345 + GFP_KERNEL); 346 + if (!local_apqns) 347 + return -ENOMEM; 348 + rc = cca_apqns4type(subtype, NULL, NULL, 0, 349 + local_apqns, &nr_apqns); 350 + if (rc) 351 + goto out; 352 + apqns = local_apqns; 353 + } 354 + 355 + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 356 + if (subtype == PKEY_TYPE_CCA_CIPHER) { 357 + rc = cca_gencipherkey(apqns[i].card, apqns[i].domain, 358 + keybitsize, flags, 359 + keybuf, keybuflen); 360 + } else { 361 + /* PKEY_TYPE_CCA_DATA */ 362 + rc = cca_genseckey(apqns[i].card, apqns[i].domain, 363 + keybitsize, keybuf); 364 + *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); 365 + } 366 + } 367 + 368 + out: 369 + kfree(local_apqns); 370 + pr_debug("rc=%d\n", rc); 371 + return rc; 372 + } 373 + 374 + /* 375 + * Generate CCA secure key with given clear key value. 376 + * As of now only CCA AES Data or Cipher secure keys are 377 + * supported. 378 + * keytype is one of the PKEY_KEYTYPE_* constants, 379 + * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER, 380 + * keybitsize is the bit size of the key (may be 0 for 381 + * keytype PKEY_KEYTYPE_AES_*). 382 + */ 383 + static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, 384 + u32 keytype, u32 subtype, 385 + u32 keybitsize, u32 flags, 386 + const u8 *clrkey, u32 clrkeylen, 387 + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) 388 + { 389 + struct pkey_apqn *local_apqns = NULL; 390 + int i, len, rc; 391 + 392 + /* check keytype, subtype, clrkeylen, keybitsize */ 393 + switch (keytype) { 394 + case PKEY_KEYTYPE_AES_128: 395 + case PKEY_KEYTYPE_AES_192: 396 + case PKEY_KEYTYPE_AES_256: 397 + len = pkey_keytype_aes_to_size(keytype); 398 + if (keybitsize && keybitsize != 8 * len) { 399 + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", 400 + __func__, keybitsize); 401 + return -EINVAL; 402 + } 403 + keybitsize = 8 * len; 404 + if (clrkeylen != len) { 405 + PKEY_DBF_ERR("%s invalid clear key len %d != %d\n", 406 + __func__, clrkeylen, len); 407 + return -EINVAL; 408 + } 409 + switch (subtype) { 410 + case PKEY_TYPE_CCA_DATA: 411 + case PKEY_TYPE_CCA_CIPHER: 412 + break; 413 + default: 414 + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", 415 + __func__, subtype); 416 + return -EINVAL; 417 + } 418 + break; 419 + default: 420 + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", 421 + __func__, keytype); 422 + return -EINVAL; 423 + } 424 + 425 + zcrypt_wait_api_operational(); 426 + 427 + if (!apqns || (nr_apqns == 1 && 428 + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 429 + nr_apqns = MAXAPQNSINLIST; 430 + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 431 + GFP_KERNEL); 432 + if (!local_apqns) 433 + return -ENOMEM; 434 + rc = cca_apqns4type(subtype, NULL, NULL, 0, 435 + local_apqns, &nr_apqns); 436 + if (rc) 437 + goto out; 438 + apqns = local_apqns; 439 + } 440 + 441 + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 442 + if (subtype == PKEY_TYPE_CCA_CIPHER) { 443 + rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain, 444 + keybitsize, flags, clrkey, 445 + keybuf, keybuflen); 446 + } else { 447 + /* PKEY_TYPE_CCA_DATA */ 448 + rc = cca_clr2seckey(apqns[i].card, apqns[i].domain, 449 + keybitsize, clrkey, keybuf); 450 + *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); 451 + } 452 + } 453 + 454 + out: 455 + kfree(local_apqns); 456 + pr_debug("rc=%d\n", rc); 457 + return rc; 458 + } 459 + 460 + static int cca_verifykey(const u8 *key, u32 keylen, 461 + u16 *card, u16 *dom, 462 + u32 *keytype, u32 *keybitsize, u32 *flags) 463 + { 464 + struct keytoken_header *hdr = (struct keytoken_header *)key; 465 + u32 nr_apqns, *apqns = NULL; 466 + int rc; 467 + 468 + if (keylen < sizeof(*hdr)) 469 + return -EINVAL; 470 + 471 + zcrypt_wait_api_operational(); 472 + 473 + if (hdr->type == TOKTYPE_CCA_INTERNAL && 474 + hdr->version == TOKVER_CCA_AES) { 475 + struct secaeskeytoken *t = (struct secaeskeytoken *)key; 476 + 477 + rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0); 478 + if (rc) 479 + goto out; 480 + *keytype = PKEY_TYPE_CCA_DATA; 481 + *keybitsize = t->bitsize; 482 + rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, 483 + ZCRYPT_CEX3C, AES_MK_SET, 484 + t->mkvp, 0, 1); 485 + if (!rc) 486 + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 487 + if (rc == -ENODEV) { 488 + rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, 489 + ZCRYPT_CEX3C, AES_MK_SET, 490 + 0, t->mkvp, 1); 491 + if (!rc) 492 + *flags = PKEY_FLAGS_MATCH_ALT_MKVP; 493 + } 494 + if (rc) 495 + goto out; 496 + 497 + *card = ((struct pkey_apqn *)apqns)->card; 498 + *dom = ((struct pkey_apqn *)apqns)->domain; 499 + 500 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 501 + hdr->version == TOKVER_CCA_VLSC) { 502 + struct cipherkeytoken *t = (struct cipherkeytoken *)key; 503 + 504 + rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1); 505 + if (rc) 506 + goto out; 507 + *keytype = PKEY_TYPE_CCA_CIPHER; 508 + *keybitsize = PKEY_SIZE_UNKNOWN; 509 + if (!t->plfver && t->wpllen == 512) 510 + *keybitsize = PKEY_SIZE_AES_128; 511 + else if (!t->plfver && t->wpllen == 576) 512 + *keybitsize = PKEY_SIZE_AES_192; 513 + else if (!t->plfver && t->wpllen == 640) 514 + *keybitsize = PKEY_SIZE_AES_256; 515 + rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, 516 + ZCRYPT_CEX6, AES_MK_SET, 517 + t->mkvp0, 0, 1); 518 + if (!rc) 519 + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 520 + if (rc == -ENODEV) { 521 + rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, 522 + ZCRYPT_CEX6, AES_MK_SET, 523 + 0, t->mkvp0, 1); 524 + if (!rc) 525 + *flags = PKEY_FLAGS_MATCH_ALT_MKVP; 526 + } 527 + if (rc) 528 + goto out; 529 + 530 + *card = ((struct pkey_apqn *)apqns)->card; 531 + *dom = ((struct pkey_apqn *)apqns)->domain; 532 + 533 + } else { 534 + /* unknown/unsupported key blob */ 535 + rc = -EINVAL; 536 + } 537 + 538 + out: 539 + kfree(apqns); 540 + pr_debug("rc=%d\n", rc); 541 + return rc; 542 + } 543 + 544 + /* 545 + * This function provides an alternate but usually slow way 546 + * to convert a 'clear key token' with AES key material into 547 + * a protected key. This is done via an intermediate step 548 + * which creates a CCA AES DATA secure key first and then 549 + * derives the protected key from this secure key. 550 + */ 551 + static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns, 552 + size_t nr_apqns, 553 + const u8 *key, u32 keylen, 554 + u8 *protkey, u32 *protkeylen, 555 + u32 *protkeytype) 556 + { 557 + const struct keytoken_header *hdr = (const struct keytoken_header *)key; 558 + const struct clearkeytoken *t = (const struct clearkeytoken *)key; 559 + u32 tmplen, keysize = 0; 560 + u8 *tmpbuf; 561 + int i, rc; 562 + 563 + if (keylen < sizeof(*hdr)) 564 + return -EINVAL; 565 + 566 + if (hdr->type == TOKTYPE_NON_CCA && 567 + hdr->version == TOKVER_CLEAR_KEY) 568 + keysize = pkey_keytype_aes_to_size(t->keytype); 569 + if (!keysize || t->len != keysize) 570 + return -EINVAL; 571 + 572 + /* alloc tmp key buffer */ 573 + tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC); 574 + if (!tmpbuf) 575 + return -ENOMEM; 576 + 577 + /* try two times in case of failure */ 578 + for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { 579 + tmplen = SECKEYBLOBSIZE; 580 + rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA, 581 + 8 * keysize, 0, t->clearkey, t->len, 582 + tmpbuf, &tmplen, NULL); 583 + pr_debug("cca_clr2key()=%d\n", rc); 584 + if (rc) 585 + continue; 586 + rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen, 587 + protkey, protkeylen, protkeytype); 588 + pr_debug("cca_key2protkey()=%d\n", rc); 589 + } 590 + 591 + kfree(tmpbuf); 592 + pr_debug("rc=%d\n", rc); 593 + return rc; 594 + } 595 + 596 + static struct pkey_handler cca_handler = { 597 + .module = THIS_MODULE, 598 + .name = "PKEY CCA handler", 599 + .is_supported_key = is_cca_key, 600 + .is_supported_keytype = is_cca_keytype, 601 + .key_to_protkey = cca_key2protkey, 602 + .slowpath_key_to_protkey = cca_slowpath_key2protkey, 603 + .gen_key = cca_gen_key, 604 + .clr_to_key = cca_clr2key, 605 + .verify_key = cca_verifykey, 606 + .apqns_for_key = cca_apqns4key, 607 + .apqns_for_keytype = cca_apqns4type, 608 + }; 609 + 610 + /* 611 + * Module init 612 + */ 613 + static int __init pkey_cca_init(void) 614 + { 615 + /* register this module as pkey handler for all the cca stuff */ 616 + return pkey_handler_register(&cca_handler); 617 + } 618 + 619 + /* 620 + * Module exit 621 + */ 622 + static void __exit pkey_cca_exit(void) 623 + { 624 + /* unregister this module as pkey handler */ 625 + pkey_handler_unregister(&cca_handler); 626 + } 627 + 628 + module_init(pkey_cca_init); 629 + module_exit(pkey_cca_exit);
+578
drivers/s390/crypto/pkey_ep11.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * pkey ep11 specific code 4 + * 5 + * Copyright IBM Corp. 2024 6 + */ 7 + 8 + #define KMSG_COMPONENT "pkey" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/init.h> 12 + #include <linux/module.h> 13 + #include <linux/cpufeature.h> 14 + 15 + #include "zcrypt_api.h" 16 + #include "zcrypt_ccamisc.h" 17 + #include "zcrypt_ep11misc.h" 18 + #include "pkey_base.h" 19 + 20 + MODULE_LICENSE("GPL"); 21 + MODULE_AUTHOR("IBM Corporation"); 22 + MODULE_DESCRIPTION("s390 protected key EP11 handler"); 23 + 24 + #if IS_MODULE(CONFIG_PKEY_EP11) 25 + static struct ap_device_id pkey_ep11_card_ids[] = { 26 + { .dev_type = AP_DEVICE_TYPE_CEX4 }, 27 + { .dev_type = AP_DEVICE_TYPE_CEX5 }, 28 + { .dev_type = AP_DEVICE_TYPE_CEX6 }, 29 + { .dev_type = AP_DEVICE_TYPE_CEX7 }, 30 + { .dev_type = AP_DEVICE_TYPE_CEX8 }, 31 + { /* end of list */ }, 32 + }; 33 + MODULE_DEVICE_TABLE(ap, pkey_ep11_card_ids); 34 + #endif 35 + 36 + /* 37 + * Check key blob for known and supported EP11 key. 38 + */ 39 + static bool is_ep11_key(const u8 *key, u32 keylen) 40 + { 41 + struct keytoken_header *hdr = (struct keytoken_header *)key; 42 + 43 + if (keylen < sizeof(*hdr)) 44 + return false; 45 + 46 + switch (hdr->type) { 47 + case TOKTYPE_NON_CCA: 48 + switch (hdr->version) { 49 + case TOKVER_EP11_AES: 50 + case TOKVER_EP11_AES_WITH_HEADER: 51 + case TOKVER_EP11_ECC_WITH_HEADER: 52 + return true; 53 + default: 54 + return false; 55 + } 56 + default: 57 + return false; 58 + } 59 + } 60 + 61 + static bool is_ep11_keytype(enum pkey_key_type key_type) 62 + { 63 + switch (key_type) { 64 + case PKEY_TYPE_EP11: 65 + case PKEY_TYPE_EP11_AES: 66 + case PKEY_TYPE_EP11_ECC: 67 + return true; 68 + default: 69 + return false; 70 + } 71 + } 72 + 73 + static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, 74 + struct pkey_apqn *apqns, size_t *nr_apqns) 75 + { 76 + struct keytoken_header *hdr = (struct keytoken_header *)key; 77 + u32 _nr_apqns, *_apqns = NULL; 78 + int rc; 79 + 80 + if (!flags) 81 + flags = PKEY_FLAGS_MATCH_CUR_MKVP; 82 + 83 + if (keylen < sizeof(struct keytoken_header) || flags == 0) 84 + return -EINVAL; 85 + 86 + zcrypt_wait_api_operational(); 87 + 88 + if (hdr->type == TOKTYPE_NON_CCA && 89 + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 90 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 91 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 92 + struct ep11keyblob *kb = (struct ep11keyblob *) 93 + (key + sizeof(struct ep11kblob_header)); 94 + int minhwtype = 0, api = 0; 95 + 96 + if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) 97 + return -EINVAL; 98 + if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { 99 + minhwtype = ZCRYPT_CEX7; 100 + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 101 + } 102 + rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 103 + minhwtype, api, kb->wkvp); 104 + if (rc) 105 + goto out; 106 + 107 + } else if (hdr->type == TOKTYPE_NON_CCA && 108 + hdr->version == TOKVER_EP11_AES && 109 + is_ep11_keyblob(key)) { 110 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 111 + int minhwtype = 0, api = 0; 112 + 113 + if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) 114 + return -EINVAL; 115 + if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) { 116 + minhwtype = ZCRYPT_CEX7; 117 + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 118 + } 119 + rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 120 + minhwtype, api, kb->wkvp); 121 + if (rc) 122 + goto out; 123 + 124 + } else { 125 + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", 126 + __func__, hdr->type, hdr->version); 127 + return -EINVAL; 128 + } 129 + 130 + if (apqns) { 131 + if (*nr_apqns < _nr_apqns) 132 + rc = -ENOSPC; 133 + else 134 + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); 135 + } 136 + *nr_apqns = _nr_apqns; 137 + 138 + out: 139 + kfree(_apqns); 140 + pr_debug("rc=%d\n", rc); 141 + return rc; 142 + } 143 + 144 + static int ep11_apqns4type(enum pkey_key_type ktype, 145 + u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 146 + struct pkey_apqn *apqns, size_t *nr_apqns) 147 + { 148 + u32 _nr_apqns, *_apqns = NULL; 149 + int rc; 150 + 151 + zcrypt_wait_api_operational(); 152 + 153 + if (ktype == PKEY_TYPE_EP11 || 154 + ktype == PKEY_TYPE_EP11_AES || 155 + ktype == PKEY_TYPE_EP11_ECC) { 156 + u8 *wkvp = NULL; 157 + int api; 158 + 159 + if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 160 + wkvp = cur_mkvp; 161 + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 162 + rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 163 + ZCRYPT_CEX7, api, wkvp); 164 + if (rc) 165 + goto out; 166 + 167 + } else { 168 + PKEY_DBF_ERR("%s unknown/unsupported key type %d\n", 169 + __func__, (int)ktype); 170 + return -EINVAL; 171 + } 172 + 173 + if (apqns) { 174 + if (*nr_apqns < _nr_apqns) 175 + rc = -ENOSPC; 176 + else 177 + memcpy(apqns, _apqns, _nr_apqns * sizeof(u32)); 178 + } 179 + *nr_apqns = _nr_apqns; 180 + 181 + out: 182 + kfree(_apqns); 183 + pr_debug("rc=%d\n", rc); 184 + return rc; 185 + } 186 + 187 + static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 188 + const u8 *key, u32 keylen, 189 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 190 + { 191 + struct keytoken_header *hdr = (struct keytoken_header *)key; 192 + struct pkey_apqn *local_apqns = NULL; 193 + int i, rc; 194 + 195 + if (keylen < sizeof(*hdr)) 196 + return -EINVAL; 197 + 198 + if (hdr->type == TOKTYPE_NON_CCA && 199 + hdr->version == TOKVER_EP11_AES_WITH_HEADER && 200 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 201 + /* EP11 AES key blob with header */ 202 + if (ep11_check_aes_key_with_hdr(pkey_dbf_info, 203 + 3, key, keylen, 1)) 204 + return -EINVAL; 205 + } else if (hdr->type == TOKTYPE_NON_CCA && 206 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER && 207 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 208 + /* EP11 ECC key blob with header */ 209 + if (ep11_check_ecc_key_with_hdr(pkey_dbf_info, 210 + 3, key, keylen, 1)) 211 + return -EINVAL; 212 + } else if (hdr->type == TOKTYPE_NON_CCA && 213 + hdr->version == TOKVER_EP11_AES && 214 + is_ep11_keyblob(key)) { 215 + /* EP11 AES key blob with header in session field */ 216 + if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1)) 217 + return -EINVAL; 218 + } else { 219 + PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n", 220 + __func__, hdr->type, hdr->version); 221 + return -EINVAL; 222 + } 223 + 224 + zcrypt_wait_api_operational(); 225 + 226 + if (!apqns || (nr_apqns == 1 && 227 + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 228 + nr_apqns = MAXAPQNSINLIST; 229 + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 230 + GFP_KERNEL); 231 + if (!local_apqns) 232 + return -ENOMEM; 233 + rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); 234 + if (rc) 235 + goto out; 236 + apqns = local_apqns; 237 + } 238 + 239 + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 240 + if (hdr->type == TOKTYPE_NON_CCA && 241 + hdr->version == TOKVER_EP11_AES_WITH_HEADER && 242 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 243 + rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, 244 + key, hdr->len, protkey, 245 + protkeylen, protkeytype); 246 + } else if (hdr->type == TOKTYPE_NON_CCA && 247 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER && 248 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 249 + rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, 250 + key, hdr->len, protkey, 251 + protkeylen, protkeytype); 252 + } else if (hdr->type == TOKTYPE_NON_CCA && 253 + hdr->version == TOKVER_EP11_AES && 254 + is_ep11_keyblob(key)) { 255 + rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, 256 + key, hdr->len, protkey, 257 + protkeylen, protkeytype); 258 + } else { 259 + rc = -EINVAL; 260 + break; 261 + } 262 + } 263 + 264 + out: 265 + kfree(local_apqns); 266 + pr_debug("rc=%d\n", rc); 267 + return rc; 268 + } 269 + 270 + /* 271 + * Generate EP11 secure key. 272 + * As of now only EP11 AES secure keys are supported. 273 + * keytype is one of the PKEY_KEYTYPE_* constants, 274 + * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES 275 + * or 0 (results in subtype PKEY_TYPE_EP11_AES), 276 + * keybitsize is the bit size of the key (may be 0 for 277 + * keytype PKEY_KEYTYPE_AES_*). 278 + */ 279 + static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, 280 + u32 keytype, u32 subtype, 281 + u32 keybitsize, u32 flags, 282 + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) 283 + { 284 + struct pkey_apqn *local_apqns = NULL; 285 + int i, len, rc; 286 + 287 + /* check keytype, subtype, keybitsize */ 288 + switch (keytype) { 289 + case PKEY_KEYTYPE_AES_128: 290 + case PKEY_KEYTYPE_AES_192: 291 + case PKEY_KEYTYPE_AES_256: 292 + len = pkey_keytype_aes_to_size(keytype); 293 + if (keybitsize && keybitsize != 8 * len) { 294 + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", 295 + __func__, keybitsize); 296 + return -EINVAL; 297 + } 298 + keybitsize = 8 * len; 299 + switch (subtype) { 300 + case PKEY_TYPE_EP11: 301 + case PKEY_TYPE_EP11_AES: 302 + break; 303 + default: 304 + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", 305 + __func__, subtype); 306 + return -EINVAL; 307 + } 308 + break; 309 + default: 310 + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", 311 + __func__, keytype); 312 + return -EINVAL; 313 + } 314 + 315 + zcrypt_wait_api_operational(); 316 + 317 + if (!apqns || (nr_apqns == 1 && 318 + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 319 + nr_apqns = MAXAPQNSINLIST; 320 + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 321 + GFP_KERNEL); 322 + if (!local_apqns) 323 + return -ENOMEM; 324 + rc = ep11_apqns4type(subtype, NULL, NULL, 0, 325 + local_apqns, &nr_apqns); 326 + if (rc) 327 + goto out; 328 + apqns = local_apqns; 329 + } 330 + 331 + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 332 + rc = ep11_genaeskey(apqns[i].card, apqns[i].domain, 333 + keybitsize, flags, 334 + keybuf, keybuflen, subtype); 335 + } 336 + 337 + out: 338 + kfree(local_apqns); 339 + pr_debug("rc=%d\n", rc); 340 + return rc; 341 + } 342 + 343 + /* 344 + * Generate EP11 secure key with given clear key value. 345 + * As of now only EP11 AES secure keys are supported. 346 + * keytype is one of the PKEY_KEYTYPE_* constants, 347 + * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES 348 + * or 0 (assumes PKEY_TYPE_EP11_AES then). 349 + * keybitsize is the bit size of the key (may be 0 for 350 + * keytype PKEY_KEYTYPE_AES_*). 351 + */ 352 + static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, 353 + u32 keytype, u32 subtype, 354 + u32 keybitsize, u32 flags, 355 + const u8 *clrkey, u32 clrkeylen, 356 + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) 357 + { 358 + struct pkey_apqn *local_apqns = NULL; 359 + int i, len, rc; 360 + 361 + /* check keytype, subtype, clrkeylen, keybitsize */ 362 + switch (keytype) { 363 + case PKEY_KEYTYPE_AES_128: 364 + case PKEY_KEYTYPE_AES_192: 365 + case PKEY_KEYTYPE_AES_256: 366 + len = pkey_keytype_aes_to_size(keytype); 367 + if (keybitsize && keybitsize != 8 * len) { 368 + PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n", 369 + __func__, keybitsize); 370 + return -EINVAL; 371 + } 372 + keybitsize = 8 * len; 373 + if (clrkeylen != len) { 374 + PKEY_DBF_ERR("%s invalid clear key len %d != %d\n", 375 + __func__, clrkeylen, len); 376 + return -EINVAL; 377 + } 378 + switch (subtype) { 379 + case PKEY_TYPE_EP11: 380 + case PKEY_TYPE_EP11_AES: 381 + break; 382 + default: 383 + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", 384 + __func__, subtype); 385 + return -EINVAL; 386 + } 387 + break; 388 + default: 389 + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", 390 + __func__, keytype); 391 + return -EINVAL; 392 + } 393 + 394 + zcrypt_wait_api_operational(); 395 + 396 + if (!apqns || (nr_apqns == 1 && 397 + apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 398 + nr_apqns = MAXAPQNSINLIST; 399 + local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 400 + GFP_KERNEL); 401 + if (!local_apqns) 402 + return -ENOMEM; 403 + rc = ep11_apqns4type(subtype, NULL, NULL, 0, 404 + local_apqns, &nr_apqns); 405 + if (rc) 406 + goto out; 407 + apqns = local_apqns; 408 + } 409 + 410 + for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 411 + rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain, 412 + keybitsize, flags, clrkey, 413 + keybuf, keybuflen, subtype); 414 + } 415 + 416 + out: 417 + kfree(local_apqns); 418 + pr_debug("rc=%d\n", rc); 419 + return rc; 420 + } 421 + 422 + static int ep11_verifykey(const u8 *key, u32 keylen, 423 + u16 *card, u16 *dom, 424 + u32 *keytype, u32 *keybitsize, u32 *flags) 425 + { 426 + struct keytoken_header *hdr = (struct keytoken_header *)key; 427 + u32 nr_apqns, *apqns = NULL; 428 + int rc; 429 + 430 + if (keylen < sizeof(*hdr)) 431 + return -EINVAL; 432 + 433 + zcrypt_wait_api_operational(); 434 + 435 + if (hdr->type == TOKTYPE_NON_CCA && 436 + hdr->version == TOKVER_EP11_AES) { 437 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 438 + int api; 439 + 440 + rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1); 441 + if (rc) 442 + goto out; 443 + *keytype = PKEY_TYPE_EP11; 444 + *keybitsize = kb->head.bitlen; 445 + 446 + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 447 + rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, 448 + ZCRYPT_CEX7, api, 449 + ep11_kb_wkvp(key, keylen)); 450 + if (rc) 451 + goto out; 452 + 453 + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 454 + 455 + *card = ((struct pkey_apqn *)apqns)->card; 456 + *dom = ((struct pkey_apqn *)apqns)->domain; 457 + 458 + } else if (hdr->type == TOKTYPE_NON_CCA && 459 + hdr->version == TOKVER_EP11_AES_WITH_HEADER) { 460 + struct ep11kblob_header *kh = (struct ep11kblob_header *)key; 461 + int api; 462 + 463 + rc = ep11_check_aes_key_with_hdr(pkey_dbf_info, 464 + 3, key, keylen, 1); 465 + if (rc) 466 + goto out; 467 + *keytype = PKEY_TYPE_EP11_AES; 468 + *keybitsize = kh->bitlen; 469 + 470 + api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 471 + rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, 472 + ZCRYPT_CEX7, api, 473 + ep11_kb_wkvp(key, keylen)); 474 + if (rc) 475 + goto out; 476 + 477 + *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 478 + 479 + *card = ((struct pkey_apqn *)apqns)->card; 480 + *dom = ((struct pkey_apqn *)apqns)->domain; 481 + 482 + } else { 483 + /* unknown/unsupported key blob */ 484 + rc = -EINVAL; 485 + } 486 + 487 + out: 488 + kfree(apqns); 489 + pr_debug("rc=%d\n", rc); 490 + return rc; 491 + } 492 + 493 + /* 494 + * This function provides an alternate but usually slow way 495 + * to convert a 'clear key token' with AES key material into 496 + * a protected key. That is done via an intermediate step 497 + * which creates an EP11 AES secure key first and then derives 498 + * the protected key from this secure key. 499 + */ 500 + static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns, 501 + size_t nr_apqns, 502 + const u8 *key, u32 keylen, 503 + u8 *protkey, u32 *protkeylen, 504 + u32 *protkeytype) 505 + { 506 + const struct keytoken_header *hdr = (const struct keytoken_header *)key; 507 + const struct clearkeytoken *t = (const struct clearkeytoken *)key; 508 + u32 tmplen, keysize = 0; 509 + u8 *tmpbuf; 510 + int i, rc; 511 + 512 + if (keylen < sizeof(*hdr)) 513 + return -EINVAL; 514 + 515 + if (hdr->type == TOKTYPE_NON_CCA && 516 + hdr->version == TOKVER_CLEAR_KEY) 517 + keysize = pkey_keytype_aes_to_size(t->keytype); 518 + if (!keysize || t->len != keysize) 519 + return -EINVAL; 520 + 521 + /* alloc tmp key buffer */ 522 + tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC); 523 + if (!tmpbuf) 524 + return -ENOMEM; 525 + 526 + /* try two times in case of failure */ 527 + for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { 528 + tmplen = MAXEP11AESKEYBLOBSIZE; 529 + rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11, 530 + 8 * keysize, 0, t->clearkey, t->len, 531 + tmpbuf, &tmplen, NULL); 532 + pr_debug("ep11_clr2key()=%d\n", rc); 533 + if (rc) 534 + continue; 535 + rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen, 536 + protkey, protkeylen, protkeytype); 537 + pr_debug("ep11_key2protkey()=%d\n", rc); 538 + } 539 + 540 + kfree(tmpbuf); 541 + pr_debug("rc=%d\n", rc); 542 + return rc; 543 + } 544 + 545 + static struct pkey_handler ep11_handler = { 546 + .module = THIS_MODULE, 547 + .name = "PKEY EP11 handler", 548 + .is_supported_key = is_ep11_key, 549 + .is_supported_keytype = is_ep11_keytype, 550 + .key_to_protkey = ep11_key2protkey, 551 + .slowpath_key_to_protkey = ep11_slowpath_key2protkey, 552 + .gen_key = ep11_gen_key, 553 + .clr_to_key = ep11_clr2key, 554 + .verify_key = ep11_verifykey, 555 + .apqns_for_key = ep11_apqns4key, 556 + .apqns_for_keytype = ep11_apqns4type, 557 + }; 558 + 559 + /* 560 + * Module init 561 + */ 562 + static int __init pkey_ep11_init(void) 563 + { 564 + /* register this module as pkey handler for all the ep11 stuff */ 565 + return pkey_handler_register(&ep11_handler); 566 + } 567 + 568 + /* 569 + * Module exit 570 + */ 571 + static void __exit pkey_ep11_exit(void) 572 + { 573 + /* unregister this module as pkey handler */ 574 + pkey_handler_unregister(&ep11_handler); 575 + } 576 + 577 + module_init(pkey_ep11_init); 578 + module_exit(pkey_ep11_exit);
+557
drivers/s390/crypto/pkey_pckmo.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * pkey pckmo specific code 4 + * 5 + * Copyright IBM Corp. 2024 6 + */ 7 + 8 + #define KMSG_COMPONENT "pkey" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/init.h> 12 + #include <linux/module.h> 13 + #include <linux/cpufeature.h> 14 + #include <asm/cpacf.h> 15 + #include <crypto/aes.h> 16 + #include <linux/random.h> 17 + 18 + #include "zcrypt_api.h" 19 + #include "zcrypt_ccamisc.h" 20 + #include "pkey_base.h" 21 + 22 + MODULE_LICENSE("GPL"); 23 + MODULE_AUTHOR("IBM Corporation"); 24 + MODULE_DESCRIPTION("s390 protected key PCKMO handler"); 25 + 26 + /* 27 + * Check key blob for known and supported here. 28 + */ 29 + static bool is_pckmo_key(const u8 *key, u32 keylen) 30 + { 31 + struct keytoken_header *hdr = (struct keytoken_header *)key; 32 + struct clearkeytoken *t = (struct clearkeytoken *)key; 33 + 34 + if (keylen < sizeof(*hdr)) 35 + return false; 36 + 37 + switch (hdr->type) { 38 + case TOKTYPE_NON_CCA: 39 + switch (hdr->version) { 40 + case TOKVER_CLEAR_KEY: 41 + switch (t->keytype) { 42 + case PKEY_KEYTYPE_AES_128: 43 + case PKEY_KEYTYPE_AES_192: 44 + case PKEY_KEYTYPE_AES_256: 45 + case PKEY_KEYTYPE_ECC_P256: 46 + case PKEY_KEYTYPE_ECC_P384: 47 + case PKEY_KEYTYPE_ECC_P521: 48 + case PKEY_KEYTYPE_ECC_ED25519: 49 + case PKEY_KEYTYPE_ECC_ED448: 50 + case PKEY_KEYTYPE_AES_XTS_128: 51 + case PKEY_KEYTYPE_AES_XTS_256: 52 + case PKEY_KEYTYPE_HMAC_512: 53 + case PKEY_KEYTYPE_HMAC_1024: 54 + return true; 55 + default: 56 + return false; 57 + } 58 + case TOKVER_PROTECTED_KEY: 59 + return true; 60 + default: 61 + return false; 62 + } 63 + default: 64 + return false; 65 + } 66 + } 67 + 68 + static bool is_pckmo_keytype(enum pkey_key_type keytype) 69 + { 70 + switch (keytype) { 71 + case PKEY_TYPE_PROTKEY: 72 + return true; 73 + default: 74 + return false; 75 + } 76 + } 77 + 78 + /* 79 + * Create a protected key from a clear key value via PCKMO instruction. 80 + */ 81 + static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen, 82 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 83 + { 84 + /* mask of available pckmo subfunctions */ 85 + static cpacf_mask_t pckmo_functions; 86 + 87 + int keysize, rc = -EINVAL; 88 + u8 paramblock[160]; 89 + u32 pkeytype; 90 + long fc; 91 + 92 + switch (keytype) { 93 + case PKEY_KEYTYPE_AES_128: 94 + /* 16 byte key, 32 byte aes wkvp, total 48 bytes */ 95 + keysize = 16; 96 + pkeytype = keytype; 97 + fc = CPACF_PCKMO_ENC_AES_128_KEY; 98 + break; 99 + case PKEY_KEYTYPE_AES_192: 100 + /* 24 byte key, 32 byte aes wkvp, total 56 bytes */ 101 + keysize = 24; 102 + pkeytype = keytype; 103 + fc = CPACF_PCKMO_ENC_AES_192_KEY; 104 + break; 105 + case PKEY_KEYTYPE_AES_256: 106 + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ 107 + keysize = 32; 108 + pkeytype = keytype; 109 + fc = CPACF_PCKMO_ENC_AES_256_KEY; 110 + break; 111 + case PKEY_KEYTYPE_ECC_P256: 112 + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ 113 + keysize = 32; 114 + pkeytype = PKEY_KEYTYPE_ECC; 115 + fc = CPACF_PCKMO_ENC_ECC_P256_KEY; 116 + break; 117 + case PKEY_KEYTYPE_ECC_P384: 118 + /* 48 byte key, 32 byte aes wkvp, total 80 bytes */ 119 + keysize = 48; 120 + pkeytype = PKEY_KEYTYPE_ECC; 121 + fc = CPACF_PCKMO_ENC_ECC_P384_KEY; 122 + break; 123 + case PKEY_KEYTYPE_ECC_P521: 124 + /* 80 byte key, 32 byte aes wkvp, total 112 bytes */ 125 + keysize = 80; 126 + pkeytype = PKEY_KEYTYPE_ECC; 127 + fc = CPACF_PCKMO_ENC_ECC_P521_KEY; 128 + break; 129 + case PKEY_KEYTYPE_ECC_ED25519: 130 + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ 131 + keysize = 32; 132 + pkeytype = PKEY_KEYTYPE_ECC; 133 + fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY; 134 + break; 135 + case PKEY_KEYTYPE_ECC_ED448: 136 + /* 64 byte key, 32 byte aes wkvp, total 96 bytes */ 137 + keysize = 64; 138 + pkeytype = PKEY_KEYTYPE_ECC; 139 + fc = CPACF_PCKMO_ENC_ECC_ED448_KEY; 140 + break; 141 + case PKEY_KEYTYPE_AES_XTS_128: 142 + /* 2x16 byte keys, 32 byte aes wkvp, total 64 bytes */ 143 + keysize = 32; 144 + pkeytype = PKEY_KEYTYPE_AES_XTS_128; 145 + fc = CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY; 146 + break; 147 + case PKEY_KEYTYPE_AES_XTS_256: 148 + /* 2x32 byte keys, 32 byte aes wkvp, total 96 bytes */ 149 + keysize = 64; 150 + pkeytype = PKEY_KEYTYPE_AES_XTS_256; 151 + fc = CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY; 152 + break; 153 + case PKEY_KEYTYPE_HMAC_512: 154 + /* 64 byte key, 32 byte aes wkvp, total 96 bytes */ 155 + keysize = 64; 156 + pkeytype = PKEY_KEYTYPE_HMAC_512; 157 + fc = CPACF_PCKMO_ENC_HMAC_512_KEY; 158 + break; 159 + case PKEY_KEYTYPE_HMAC_1024: 160 + /* 128 byte key, 32 byte aes wkvp, total 160 bytes */ 161 + keysize = 128; 162 + pkeytype = PKEY_KEYTYPE_HMAC_1024; 163 + fc = CPACF_PCKMO_ENC_HMAC_1024_KEY; 164 + break; 165 + default: 166 + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", 167 + __func__, keytype); 168 + goto out; 169 + } 170 + 171 + if (clrkeylen && clrkeylen < keysize) { 172 + PKEY_DBF_ERR("%s clear key size too small: %u < %d\n", 173 + __func__, clrkeylen, keysize); 174 + goto out; 175 + } 176 + if (*protkeylen < keysize + AES_WK_VP_SIZE) { 177 + PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n", 178 + __func__, *protkeylen, keysize + AES_WK_VP_SIZE); 179 + goto out; 180 + } 181 + 182 + /* Did we already check for PCKMO ? */ 183 + if (!pckmo_functions.bytes[0]) { 184 + /* no, so check now */ 185 + if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) { 186 + PKEY_DBF_ERR("%s cpacf_query() failed\n", __func__); 187 + rc = -ENODEV; 188 + goto out; 189 + } 190 + } 191 + /* check for the pckmo subfunction we need now */ 192 + if (!cpacf_test_func(&pckmo_functions, fc)) { 193 + PKEY_DBF_ERR("%s pckmo functions not available\n", __func__); 194 + rc = -ENODEV; 195 + goto out; 196 + } 197 + 198 + /* prepare param block */ 199 + memset(paramblock, 0, sizeof(paramblock)); 200 + memcpy(paramblock, clrkey, keysize); 201 + 202 + /* call the pckmo instruction */ 203 + cpacf_pckmo(fc, paramblock); 204 + 205 + /* copy created protected key to key buffer including the wkvp block */ 206 + *protkeylen = keysize + AES_WK_VP_SIZE; 207 + memcpy(protkey, paramblock, *protkeylen); 208 + *protkeytype = pkeytype; 209 + 210 + rc = 0; 211 + 212 + out: 213 + pr_debug("rc=%d\n", rc); 214 + return rc; 215 + } 216 + 217 + /* 218 + * Verify a raw protected key blob. 219 + * Currently only AES protected keys are supported. 220 + */ 221 + static int pckmo_verify_protkey(const u8 *protkey, u32 protkeylen, 222 + u32 protkeytype) 223 + { 224 + struct { 225 + u8 iv[AES_BLOCK_SIZE]; 226 + u8 key[MAXPROTKEYSIZE]; 227 + } param; 228 + u8 null_msg[AES_BLOCK_SIZE]; 229 + u8 dest_buf[AES_BLOCK_SIZE]; 230 + unsigned int k, pkeylen; 231 + unsigned long fc; 232 + int rc = -EINVAL; 233 + 234 + switch (protkeytype) { 235 + case PKEY_KEYTYPE_AES_128: 236 + pkeylen = 16 + AES_WK_VP_SIZE; 237 + fc = CPACF_KMC_PAES_128; 238 + break; 239 + case PKEY_KEYTYPE_AES_192: 240 + pkeylen = 24 + AES_WK_VP_SIZE; 241 + fc = CPACF_KMC_PAES_192; 242 + break; 243 + case PKEY_KEYTYPE_AES_256: 244 + pkeylen = 32 + AES_WK_VP_SIZE; 245 + fc = CPACF_KMC_PAES_256; 246 + break; 247 + default: 248 + PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__, 249 + protkeytype); 250 + goto out; 251 + } 252 + if (protkeylen != pkeylen) { 253 + PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n", 254 + __func__, protkeylen, protkeytype); 255 + goto out; 256 + } 257 + 258 + memset(null_msg, 0, sizeof(null_msg)); 259 + 260 + memset(param.iv, 0, sizeof(param.iv)); 261 + memcpy(param.key, protkey, protkeylen); 262 + 263 + k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf, 264 + sizeof(null_msg)); 265 + if (k != sizeof(null_msg)) { 266 + PKEY_DBF_ERR("%s protected key is not valid\n", __func__); 267 + rc = -EKEYREJECTED; 268 + goto out; 269 + } 270 + 271 + rc = 0; 272 + 273 + out: 274 + pr_debug("rc=%d\n", rc); 275 + return rc; 276 + } 277 + 278 + static int pckmo_key2protkey(const u8 *key, u32 keylen, 279 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 280 + { 281 + struct keytoken_header *hdr = (struct keytoken_header *)key; 282 + int rc = -EINVAL; 283 + 284 + if (keylen < sizeof(*hdr)) 285 + return -EINVAL; 286 + if (hdr->type != TOKTYPE_NON_CCA) 287 + return -EINVAL; 288 + 289 + switch (hdr->version) { 290 + case TOKVER_PROTECTED_KEY: { 291 + struct protkeytoken *t = (struct protkeytoken *)key; 292 + 293 + if (keylen < sizeof(*t)) 294 + goto out; 295 + switch (t->keytype) { 296 + case PKEY_KEYTYPE_AES_128: 297 + case PKEY_KEYTYPE_AES_192: 298 + case PKEY_KEYTYPE_AES_256: 299 + if (keylen != sizeof(struct protaeskeytoken)) 300 + goto out; 301 + rc = pckmo_verify_protkey(t->protkey, t->len, 302 + t->keytype); 303 + if (rc) 304 + goto out; 305 + break; 306 + case PKEY_KEYTYPE_AES_XTS_128: 307 + if (t->len != 64 || keylen != sizeof(*t) + t->len) 308 + goto out; 309 + break; 310 + case PKEY_KEYTYPE_AES_XTS_256: 311 + case PKEY_KEYTYPE_HMAC_512: 312 + if (t->len != 96 || keylen != sizeof(*t) + t->len) 313 + goto out; 314 + break; 315 + case PKEY_KEYTYPE_HMAC_1024: 316 + if (t->len != 160 || keylen != sizeof(*t) + t->len) 317 + goto out; 318 + break; 319 + default: 320 + PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n", 321 + __func__, t->keytype); 322 + goto out; 323 + } 324 + memcpy(protkey, t->protkey, t->len); 325 + *protkeylen = t->len; 326 + *protkeytype = t->keytype; 327 + break; 328 + } 329 + case TOKVER_CLEAR_KEY: { 330 + struct clearkeytoken *t = (struct clearkeytoken *)key; 331 + u32 keysize = 0; 332 + 333 + if (keylen < sizeof(struct clearkeytoken) || 334 + keylen != sizeof(*t) + t->len) 335 + goto out; 336 + switch (t->keytype) { 337 + case PKEY_KEYTYPE_AES_128: 338 + case PKEY_KEYTYPE_AES_192: 339 + case PKEY_KEYTYPE_AES_256: 340 + keysize = pkey_keytype_aes_to_size(t->keytype); 341 + break; 342 + case PKEY_KEYTYPE_ECC_P256: 343 + keysize = 32; 344 + break; 345 + case PKEY_KEYTYPE_ECC_P384: 346 + keysize = 48; 347 + break; 348 + case PKEY_KEYTYPE_ECC_P521: 349 + keysize = 80; 350 + break; 351 + case PKEY_KEYTYPE_ECC_ED25519: 352 + keysize = 32; 353 + break; 354 + case PKEY_KEYTYPE_ECC_ED448: 355 + keysize = 64; 356 + break; 357 + case PKEY_KEYTYPE_AES_XTS_128: 358 + keysize = 32; 359 + break; 360 + case PKEY_KEYTYPE_AES_XTS_256: 361 + keysize = 64; 362 + break; 363 + case PKEY_KEYTYPE_HMAC_512: 364 + keysize = 64; 365 + break; 366 + case PKEY_KEYTYPE_HMAC_1024: 367 + keysize = 128; 368 + break; 369 + default: 370 + break; 371 + } 372 + if (!keysize) { 373 + PKEY_DBF_ERR("%s clear key token: unknown keytype %u\n", 374 + __func__, t->keytype); 375 + goto out; 376 + } 377 + if (t->len != keysize) { 378 + PKEY_DBF_ERR("%s clear key token: invalid key len %u\n", 379 + __func__, t->len); 380 + goto out; 381 + } 382 + rc = pckmo_clr2protkey(t->keytype, t->clearkey, t->len, 383 + protkey, protkeylen, protkeytype); 384 + break; 385 + } 386 + default: 387 + PKEY_DBF_ERR("%s unknown non-CCA token version %d\n", 388 + __func__, hdr->version); 389 + break; 390 + } 391 + 392 + out: 393 + pr_debug("rc=%d\n", rc); 394 + return rc; 395 + } 396 + 397 + /* 398 + * Generate a random protected key. 399 + * Currently only the generation of AES protected keys 400 + * is supported. 401 + */ 402 + static int pckmo_gen_protkey(u32 keytype, u32 subtype, 403 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 404 + { 405 + u8 clrkey[128]; 406 + int keysize; 407 + int rc; 408 + 409 + switch (keytype) { 410 + case PKEY_KEYTYPE_AES_128: 411 + case PKEY_KEYTYPE_AES_192: 412 + case PKEY_KEYTYPE_AES_256: 413 + keysize = pkey_keytype_aes_to_size(keytype); 414 + break; 415 + case PKEY_KEYTYPE_AES_XTS_128: 416 + keysize = 32; 417 + break; 418 + case PKEY_KEYTYPE_AES_XTS_256: 419 + case PKEY_KEYTYPE_HMAC_512: 420 + keysize = 64; 421 + break; 422 + case PKEY_KEYTYPE_HMAC_1024: 423 + keysize = 128; 424 + break; 425 + default: 426 + PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", 427 + __func__, keytype); 428 + return -EINVAL; 429 + } 430 + if (subtype != PKEY_TYPE_PROTKEY) { 431 + PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n", 432 + __func__, subtype); 433 + return -EINVAL; 434 + } 435 + 436 + /* generate a dummy random clear key */ 437 + get_random_bytes(clrkey, keysize); 438 + 439 + /* convert it to a dummy protected key */ 440 + rc = pckmo_clr2protkey(keytype, clrkey, keysize, 441 + protkey, protkeylen, protkeytype); 442 + if (rc) 443 + goto out; 444 + 445 + /* replace the key part of the protected key with random bytes */ 446 + get_random_bytes(protkey, keysize); 447 + 448 + out: 449 + pr_debug("rc=%d\n", rc); 450 + return rc; 451 + } 452 + 453 + /* 454 + * Verify a protected key token blob. 455 + * Currently only AES protected keys are supported. 456 + */ 457 + static int pckmo_verify_key(const u8 *key, u32 keylen) 458 + { 459 + struct keytoken_header *hdr = (struct keytoken_header *)key; 460 + int rc = -EINVAL; 461 + 462 + if (keylen < sizeof(*hdr)) 463 + return -EINVAL; 464 + if (hdr->type != TOKTYPE_NON_CCA) 465 + return -EINVAL; 466 + 467 + switch (hdr->version) { 468 + case TOKVER_PROTECTED_KEY: { 469 + struct protaeskeytoken *t; 470 + 471 + if (keylen != sizeof(struct protaeskeytoken)) 472 + goto out; 473 + t = (struct protaeskeytoken *)key; 474 + rc = pckmo_verify_protkey(t->protkey, t->len, t->keytype); 475 + break; 476 + } 477 + default: 478 + PKEY_DBF_ERR("%s unknown non-CCA token version %d\n", 479 + __func__, hdr->version); 480 + break; 481 + } 482 + 483 + out: 484 + pr_debug("rc=%d\n", rc); 485 + return rc; 486 + } 487 + 488 + /* 489 + * Wrapper functions used for the pkey handler struct 490 + */ 491 + 492 + static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, 493 + size_t _nr_apqns, 494 + const u8 *key, u32 keylen, 495 + u8 *protkey, u32 *protkeylen, u32 *keyinfo) 496 + { 497 + return pckmo_key2protkey(key, keylen, 498 + protkey, protkeylen, keyinfo); 499 + } 500 + 501 + static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, 502 + u32 keytype, u32 keysubtype, 503 + u32 _keybitsize, u32 _flags, 504 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo) 505 + { 506 + return pckmo_gen_protkey(keytype, keysubtype, 507 + keybuf, keybuflen, keyinfo); 508 + } 509 + 510 + static int pkey_pckmo_verifykey(const u8 *key, u32 keylen, 511 + u16 *_card, u16 *_dom, 512 + u32 *_keytype, u32 *_keybitsize, u32 *_flags) 513 + { 514 + return pckmo_verify_key(key, keylen); 515 + } 516 + 517 + static struct pkey_handler pckmo_handler = { 518 + .module = THIS_MODULE, 519 + .name = "PKEY PCKMO handler", 520 + .is_supported_key = is_pckmo_key, 521 + .is_supported_keytype = is_pckmo_keytype, 522 + .key_to_protkey = pkey_pckmo_key2protkey, 523 + .gen_key = pkey_pckmo_gen_key, 524 + .verify_key = pkey_pckmo_verifykey, 525 + }; 526 + 527 + /* 528 + * Module init 529 + */ 530 + static int __init pkey_pckmo_init(void) 531 + { 532 + cpacf_mask_t func_mask; 533 + 534 + /* 535 + * The pckmo instruction should be available - even if we don't 536 + * actually invoke it. This instruction comes with MSA 3 which 537 + * is also the minimum level for the kmc instructions which 538 + * are able to work with protected keys. 539 + */ 540 + if (!cpacf_query(CPACF_PCKMO, &func_mask)) 541 + return -ENODEV; 542 + 543 + /* register this module as pkey handler for all the pckmo stuff */ 544 + return pkey_handler_register(&pckmo_handler); 545 + } 546 + 547 + /* 548 + * Module exit 549 + */ 550 + static void __exit pkey_pckmo_exit(void) 551 + { 552 + /* unregister this module as pkey handler */ 553 + pkey_handler_unregister(&pckmo_handler); 554 + } 555 + 556 + module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_pckmo_init); 557 + module_exit(pkey_pckmo_exit);
+648
drivers/s390/crypto/pkey_sysfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * pkey module sysfs related functions 4 + * 5 + * Copyright IBM Corp. 2024 6 + */ 7 + 8 + #define KMSG_COMPONENT "pkey" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/sysfs.h> 12 + 13 + #include "zcrypt_api.h" 14 + #include "zcrypt_ccamisc.h" 15 + #include "zcrypt_ep11misc.h" 16 + 17 + #include "pkey_base.h" 18 + 19 + /* 20 + * Wrapper around pkey_handler_gen_key() which deals with the 21 + * ENODEV return code and then tries to enforce a pkey handler 22 + * module load. 23 + */ 24 + static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype, 25 + u32 keybitsize, u32 flags, 26 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo) 27 + { 28 + int rc; 29 + 30 + rc = pkey_handler_gen_key(NULL, 0, 31 + keytype, keysubtype, 32 + keybitsize, flags, 33 + keybuf, keybuflen, keyinfo); 34 + if (rc == -ENODEV) { 35 + pkey_handler_request_modules(); 36 + rc = pkey_handler_gen_key(NULL, 0, 37 + keytype, keysubtype, 38 + keybitsize, flags, 39 + keybuf, keybuflen, keyinfo); 40 + } 41 + 42 + return rc; 43 + } 44 + 45 + /* 46 + * Sysfs attribute read function for all protected key binary attributes. 47 + * The implementation can not deal with partial reads, because a new random 48 + * protected key blob is generated with each read. In case of partial reads 49 + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 50 + */ 51 + static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, 52 + loff_t off, size_t count) 53 + { 54 + struct protaeskeytoken protkeytoken; 55 + struct pkey_protkey protkey; 56 + int rc; 57 + 58 + if (off != 0 || count < sizeof(protkeytoken)) 59 + return -EINVAL; 60 + if (is_xts) 61 + if (count < 2 * sizeof(protkeytoken)) 62 + return -EINVAL; 63 + 64 + memset(&protkeytoken, 0, sizeof(protkeytoken)); 65 + protkeytoken.type = TOKTYPE_NON_CCA; 66 + protkeytoken.version = TOKVER_PROTECTED_KEY; 67 + protkeytoken.keytype = keytype; 68 + 69 + protkey.len = sizeof(protkey.protkey); 70 + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, 71 + protkey.protkey, &protkey.len, 72 + &protkey.type); 73 + if (rc) 74 + return rc; 75 + 76 + protkeytoken.len = protkey.len; 77 + memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); 78 + 79 + memcpy(buf, &protkeytoken, sizeof(protkeytoken)); 80 + 81 + if (is_xts) { 82 + /* xts needs a second protected key, reuse protkey struct */ 83 + protkey.len = sizeof(protkey.protkey); 84 + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, 85 + protkey.protkey, &protkey.len, 86 + &protkey.type); 87 + if (rc) 88 + return rc; 89 + 90 + protkeytoken.len = protkey.len; 91 + memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); 92 + 93 + memcpy(buf + sizeof(protkeytoken), &protkeytoken, 94 + sizeof(protkeytoken)); 95 + 96 + return 2 * sizeof(protkeytoken); 97 + } 98 + 99 + return sizeof(protkeytoken); 100 + } 101 + 102 + /* 103 + * Sysfs attribute read function for the AES XTS prot key binary attributes. 104 + * The implementation can not deal with partial reads, because a new random 105 + * protected key blob is generated with each read. In case of partial reads 106 + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 107 + */ 108 + static ssize_t pkey_protkey_aes_xts_attr_read(u32 keytype, char *buf, 109 + loff_t off, size_t count) 110 + { 111 + struct protkeytoken *t = (struct protkeytoken *)buf; 112 + u32 protlen, prottype; 113 + int rc; 114 + 115 + switch (keytype) { 116 + case PKEY_KEYTYPE_AES_XTS_128: 117 + protlen = 64; 118 + break; 119 + case PKEY_KEYTYPE_AES_XTS_256: 120 + protlen = 96; 121 + break; 122 + default: 123 + return -EINVAL; 124 + } 125 + 126 + if (off != 0 || count < sizeof(*t) + protlen) 127 + return -EINVAL; 128 + 129 + memset(t, 0, sizeof(*t) + protlen); 130 + t->type = TOKTYPE_NON_CCA; 131 + t->version = TOKVER_PROTECTED_KEY; 132 + t->keytype = keytype; 133 + 134 + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, 135 + t->protkey, &protlen, &prottype); 136 + if (rc) 137 + return rc; 138 + 139 + t->len = protlen; 140 + 141 + return sizeof(*t) + protlen; 142 + } 143 + 144 + /* 145 + * Sysfs attribute read function for the HMAC prot key binary attributes. 146 + * The implementation can not deal with partial reads, because a new random 147 + * protected key blob is generated with each read. In case of partial reads 148 + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 149 + */ 150 + static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf, 151 + loff_t off, size_t count) 152 + { 153 + struct protkeytoken *t = (struct protkeytoken *)buf; 154 + u32 protlen, prottype; 155 + int rc; 156 + 157 + switch (keytype) { 158 + case PKEY_KEYTYPE_HMAC_512: 159 + protlen = 96; 160 + break; 161 + case PKEY_KEYTYPE_HMAC_1024: 162 + protlen = 160; 163 + break; 164 + default: 165 + return -EINVAL; 166 + } 167 + 168 + if (off != 0 || count < sizeof(*t) + protlen) 169 + return -EINVAL; 170 + 171 + memset(t, 0, sizeof(*t) + protlen); 172 + t->type = TOKTYPE_NON_CCA; 173 + t->version = TOKVER_PROTECTED_KEY; 174 + t->keytype = keytype; 175 + 176 + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0, 177 + t->protkey, &protlen, &prottype); 178 + if (rc) 179 + return rc; 180 + 181 + t->len = protlen; 182 + 183 + return sizeof(*t) + protlen; 184 + } 185 + 186 + static ssize_t protkey_aes_128_read(struct file *filp, 187 + struct kobject *kobj, 188 + struct bin_attribute *attr, 189 + char *buf, loff_t off, 190 + size_t count) 191 + { 192 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, 193 + off, count); 194 + } 195 + 196 + static ssize_t protkey_aes_192_read(struct file *filp, 197 + struct kobject *kobj, 198 + struct bin_attribute *attr, 199 + char *buf, loff_t off, 200 + size_t count) 201 + { 202 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, 203 + off, count); 204 + } 205 + 206 + static ssize_t protkey_aes_256_read(struct file *filp, 207 + struct kobject *kobj, 208 + struct bin_attribute *attr, 209 + char *buf, loff_t off, 210 + size_t count) 211 + { 212 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, 213 + off, count); 214 + } 215 + 216 + static ssize_t protkey_aes_128_xts_read(struct file *filp, 217 + struct kobject *kobj, 218 + struct bin_attribute *attr, 219 + char *buf, loff_t off, 220 + size_t count) 221 + { 222 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, 223 + off, count); 224 + } 225 + 226 + static ssize_t protkey_aes_256_xts_read(struct file *filp, 227 + struct kobject *kobj, 228 + struct bin_attribute *attr, 229 + char *buf, loff_t off, 230 + size_t count) 231 + { 232 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, 233 + off, count); 234 + } 235 + 236 + static ssize_t protkey_aes_xts_128_read(struct file *filp, 237 + struct kobject *kobj, 238 + struct bin_attribute *attr, 239 + char *buf, loff_t off, 240 + size_t count) 241 + { 242 + return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_128, 243 + buf, off, count); 244 + } 245 + 246 + static ssize_t protkey_aes_xts_256_read(struct file *filp, 247 + struct kobject *kobj, 248 + struct bin_attribute *attr, 249 + char *buf, loff_t off, 250 + size_t count) 251 + { 252 + return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_256, 253 + buf, off, count); 254 + } 255 + 256 + static ssize_t protkey_hmac_512_read(struct file *filp, 257 + struct kobject *kobj, 258 + struct bin_attribute *attr, 259 + char *buf, loff_t off, 260 + size_t count) 261 + { 262 + return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_512, 263 + buf, off, count); 264 + } 265 + 266 + static ssize_t protkey_hmac_1024_read(struct file *filp, 267 + struct kobject *kobj, 268 + struct bin_attribute *attr, 269 + char *buf, loff_t off, 270 + size_t count) 271 + { 272 + return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_1024, 273 + buf, off, count); 274 + } 275 + 276 + static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken)); 277 + static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken)); 278 + static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken)); 279 + static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken)); 280 + static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken)); 281 + static BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64); 282 + static BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96); 283 + static BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96); 284 + static BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160); 285 + 286 + static struct bin_attribute *protkey_attrs[] = { 287 + &bin_attr_protkey_aes_128, 288 + &bin_attr_protkey_aes_192, 289 + &bin_attr_protkey_aes_256, 290 + &bin_attr_protkey_aes_128_xts, 291 + &bin_attr_protkey_aes_256_xts, 292 + &bin_attr_protkey_aes_xts_128, 293 + &bin_attr_protkey_aes_xts_256, 294 + &bin_attr_protkey_hmac_512, 295 + &bin_attr_protkey_hmac_1024, 296 + NULL 297 + }; 298 + 299 + static struct attribute_group protkey_attr_group = { 300 + .name = "protkey", 301 + .bin_attrs = protkey_attrs, 302 + }; 303 + 304 + /* 305 + * Sysfs attribute read function for all secure key ccadata binary attributes. 306 + * The implementation can not deal with partial reads, because a new random 307 + * protected key blob is generated with each read. In case of partial reads 308 + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 309 + */ 310 + static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, 311 + loff_t off, size_t count) 312 + { 313 + struct pkey_seckey *seckey = (struct pkey_seckey *)buf; 314 + u32 buflen; 315 + int rc; 316 + 317 + if (off != 0 || count < sizeof(struct secaeskeytoken)) 318 + return -EINVAL; 319 + if (is_xts) 320 + if (count < 2 * sizeof(struct secaeskeytoken)) 321 + return -EINVAL; 322 + 323 + buflen = sizeof(seckey->seckey); 324 + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0, 325 + seckey->seckey, &buflen, NULL); 326 + if (rc) 327 + return rc; 328 + 329 + if (is_xts) { 330 + seckey++; 331 + buflen = sizeof(seckey->seckey); 332 + rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0, 333 + seckey->seckey, &buflen, NULL); 334 + if (rc) 335 + return rc; 336 + 337 + return 2 * sizeof(struct secaeskeytoken); 338 + } 339 + 340 + return sizeof(struct secaeskeytoken); 341 + } 342 + 343 + static ssize_t ccadata_aes_128_read(struct file *filp, 344 + struct kobject *kobj, 345 + struct bin_attribute *attr, 346 + char *buf, loff_t off, 347 + size_t count) 348 + { 349 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, 350 + off, count); 351 + } 352 + 353 + static ssize_t ccadata_aes_192_read(struct file *filp, 354 + struct kobject *kobj, 355 + struct bin_attribute *attr, 356 + char *buf, loff_t off, 357 + size_t count) 358 + { 359 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, 360 + off, count); 361 + } 362 + 363 + static ssize_t ccadata_aes_256_read(struct file *filp, 364 + struct kobject *kobj, 365 + struct bin_attribute *attr, 366 + char *buf, loff_t off, 367 + size_t count) 368 + { 369 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, 370 + off, count); 371 + } 372 + 373 + static ssize_t ccadata_aes_128_xts_read(struct file *filp, 374 + struct kobject *kobj, 375 + struct bin_attribute *attr, 376 + char *buf, loff_t off, 377 + size_t count) 378 + { 379 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, 380 + off, count); 381 + } 382 + 383 + static ssize_t ccadata_aes_256_xts_read(struct file *filp, 384 + struct kobject *kobj, 385 + struct bin_attribute *attr, 386 + char *buf, loff_t off, 387 + size_t count) 388 + { 389 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, 390 + off, count); 391 + } 392 + 393 + static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken)); 394 + static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken)); 395 + static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken)); 396 + static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken)); 397 + static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken)); 398 + 399 + static struct bin_attribute *ccadata_attrs[] = { 400 + &bin_attr_ccadata_aes_128, 401 + &bin_attr_ccadata_aes_192, 402 + &bin_attr_ccadata_aes_256, 403 + &bin_attr_ccadata_aes_128_xts, 404 + &bin_attr_ccadata_aes_256_xts, 405 + NULL 406 + }; 407 + 408 + static struct attribute_group ccadata_attr_group = { 409 + .name = "ccadata", 410 + .bin_attrs = ccadata_attrs, 411 + }; 412 + 413 + #define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80) 414 + 415 + /* 416 + * Sysfs attribute read function for all secure key ccacipher binary attributes. 417 + * The implementation can not deal with partial reads, because a new random 418 + * secure key blob is generated with each read. In case of partial reads 419 + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 420 + */ 421 + static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, 422 + bool is_xts, char *buf, loff_t off, 423 + size_t count) 424 + { 425 + u32 keysize = CCACIPHERTOKENSIZE; 426 + int rc; 427 + 428 + if (off != 0 || count < CCACIPHERTOKENSIZE) 429 + return -EINVAL; 430 + if (is_xts) 431 + if (count < 2 * CCACIPHERTOKENSIZE) 432 + return -EINVAL; 433 + 434 + memset(buf, 0, is_xts ? 2 * keysize : keysize); 435 + 436 + rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits), 437 + PKEY_TYPE_CCA_CIPHER, keybits, 0, 438 + buf, &keysize, NULL); 439 + if (rc) 440 + return rc; 441 + 442 + if (is_xts) { 443 + keysize = CCACIPHERTOKENSIZE; 444 + buf += CCACIPHERTOKENSIZE; 445 + rc = sys_pkey_handler_gen_key( 446 + pkey_aes_bitsize_to_keytype(keybits), 447 + PKEY_TYPE_CCA_CIPHER, keybits, 0, 448 + buf, &keysize, NULL); 449 + if (rc) 450 + return rc; 451 + return 2 * CCACIPHERTOKENSIZE; 452 + } 453 + 454 + return CCACIPHERTOKENSIZE; 455 + } 456 + 457 + static ssize_t ccacipher_aes_128_read(struct file *filp, 458 + struct kobject *kobj, 459 + struct bin_attribute *attr, 460 + char *buf, loff_t off, 461 + size_t count) 462 + { 463 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf, 464 + off, count); 465 + } 466 + 467 + static ssize_t ccacipher_aes_192_read(struct file *filp, 468 + struct kobject *kobj, 469 + struct bin_attribute *attr, 470 + char *buf, loff_t off, 471 + size_t count) 472 + { 473 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf, 474 + off, count); 475 + } 476 + 477 + static ssize_t ccacipher_aes_256_read(struct file *filp, 478 + struct kobject *kobj, 479 + struct bin_attribute *attr, 480 + char *buf, loff_t off, 481 + size_t count) 482 + { 483 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf, 484 + off, count); 485 + } 486 + 487 + static ssize_t ccacipher_aes_128_xts_read(struct file *filp, 488 + struct kobject *kobj, 489 + struct bin_attribute *attr, 490 + char *buf, loff_t off, 491 + size_t count) 492 + { 493 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf, 494 + off, count); 495 + } 496 + 497 + static ssize_t ccacipher_aes_256_xts_read(struct file *filp, 498 + struct kobject *kobj, 499 + struct bin_attribute *attr, 500 + char *buf, loff_t off, 501 + size_t count) 502 + { 503 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf, 504 + off, count); 505 + } 506 + 507 + static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE); 508 + static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE); 509 + static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE); 510 + static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE); 511 + static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE); 512 + 513 + static struct bin_attribute *ccacipher_attrs[] = { 514 + &bin_attr_ccacipher_aes_128, 515 + &bin_attr_ccacipher_aes_192, 516 + &bin_attr_ccacipher_aes_256, 517 + &bin_attr_ccacipher_aes_128_xts, 518 + &bin_attr_ccacipher_aes_256_xts, 519 + NULL 520 + }; 521 + 522 + static struct attribute_group ccacipher_attr_group = { 523 + .name = "ccacipher", 524 + .bin_attrs = ccacipher_attrs, 525 + }; 526 + 527 + /* 528 + * Sysfs attribute read function for all ep11 aes key binary attributes. 529 + * The implementation can not deal with partial reads, because a new random 530 + * secure key blob is generated with each read. In case of partial reads 531 + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 532 + * This function and the sysfs attributes using it provide EP11 key blobs 533 + * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently 534 + * 336 bytes. 535 + */ 536 + static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits, 537 + bool is_xts, char *buf, loff_t off, 538 + size_t count) 539 + { 540 + u32 keysize = MAXEP11AESKEYBLOBSIZE; 541 + int rc; 542 + 543 + if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) 544 + return -EINVAL; 545 + if (is_xts) 546 + if (count < 2 * MAXEP11AESKEYBLOBSIZE) 547 + return -EINVAL; 548 + 549 + memset(buf, 0, is_xts ? 2 * keysize : keysize); 550 + 551 + rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits), 552 + PKEY_TYPE_EP11_AES, keybits, 0, 553 + buf, &keysize, NULL); 554 + if (rc) 555 + return rc; 556 + 557 + if (is_xts) { 558 + keysize = MAXEP11AESKEYBLOBSIZE; 559 + buf += MAXEP11AESKEYBLOBSIZE; 560 + rc = sys_pkey_handler_gen_key( 561 + pkey_aes_bitsize_to_keytype(keybits), 562 + PKEY_TYPE_EP11_AES, keybits, 0, 563 + buf, &keysize, NULL); 564 + if (rc) 565 + return rc; 566 + return 2 * MAXEP11AESKEYBLOBSIZE; 567 + } 568 + 569 + return MAXEP11AESKEYBLOBSIZE; 570 + } 571 + 572 + static ssize_t ep11_aes_128_read(struct file *filp, 573 + struct kobject *kobj, 574 + struct bin_attribute *attr, 575 + char *buf, loff_t off, 576 + size_t count) 577 + { 578 + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf, 579 + off, count); 580 + } 581 + 582 + static ssize_t ep11_aes_192_read(struct file *filp, 583 + struct kobject *kobj, 584 + struct bin_attribute *attr, 585 + char *buf, loff_t off, 586 + size_t count) 587 + { 588 + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf, 589 + off, count); 590 + } 591 + 592 + static ssize_t ep11_aes_256_read(struct file *filp, 593 + struct kobject *kobj, 594 + struct bin_attribute *attr, 595 + char *buf, loff_t off, 596 + size_t count) 597 + { 598 + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf, 599 + off, count); 600 + } 601 + 602 + static ssize_t ep11_aes_128_xts_read(struct file *filp, 603 + struct kobject *kobj, 604 + struct bin_attribute *attr, 605 + char *buf, loff_t off, 606 + size_t count) 607 + { 608 + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf, 609 + off, count); 610 + } 611 + 612 + static ssize_t ep11_aes_256_xts_read(struct file *filp, 613 + struct kobject *kobj, 614 + struct bin_attribute *attr, 615 + char *buf, loff_t off, 616 + size_t count) 617 + { 618 + return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf, 619 + off, count); 620 + } 621 + 622 + static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE); 623 + static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE); 624 + static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE); 625 + static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE); 626 + static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE); 627 + 628 + static struct bin_attribute *ep11_attrs[] = { 629 + &bin_attr_ep11_aes_128, 630 + &bin_attr_ep11_aes_192, 631 + &bin_attr_ep11_aes_256, 632 + &bin_attr_ep11_aes_128_xts, 633 + &bin_attr_ep11_aes_256_xts, 634 + NULL 635 + }; 636 + 637 + static struct attribute_group ep11_attr_group = { 638 + .name = "ep11", 639 + .bin_attrs = ep11_attrs, 640 + }; 641 + 642 + const struct attribute_group *pkey_attr_groups[] = { 643 + &protkey_attr_group, 644 + &ccadata_attr_group, 645 + &ccacipher_attr_group, 646 + &ep11_attr_group, 647 + NULL, 648 + };
+13 -16
drivers/s390/crypto/zcrypt_api.c
··· 715 715 spin_unlock(&zcrypt_list_lock); 716 716 717 717 if (!pref_zq) { 718 - pr_debug("%s no matching queue found => ENODEV\n", __func__); 718 + pr_debug("no matching queue found => ENODEV\n"); 719 719 rc = -ENODEV; 720 720 goto out; 721 721 } ··· 819 819 spin_unlock(&zcrypt_list_lock); 820 820 821 821 if (!pref_zq) { 822 - pr_debug("%s no matching queue found => ENODEV\n", __func__); 822 + pr_debug("no matching queue found => ENODEV\n"); 823 823 rc = -ENODEV; 824 824 goto out; 825 825 } ··· 940 940 spin_unlock(&zcrypt_list_lock); 941 941 942 942 if (!pref_zq) { 943 - pr_debug("%s no match for address %02x.%04x => ENODEV\n", 944 - __func__, xcrb->user_defined, *domain); 943 + pr_debug("no match for address %02x.%04x => ENODEV\n", 944 + xcrb->user_defined, *domain); 945 945 rc = -ENODEV; 946 946 goto out; 947 947 } ··· 991 991 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 992 992 rc = -EIO; 993 993 if (rc) 994 - pr_debug("%s rc=%d\n", __func__, rc); 994 + pr_debug("rc=%d\n", rc); 995 995 996 996 return rc; 997 997 } ··· 1138 1138 1139 1139 if (!pref_zq) { 1140 1140 if (targets && target_num == 1) { 1141 - pr_debug("%s no match for address %02x.%04x => ENODEV\n", 1142 - __func__, (int)targets->ap_id, 1143 - (int)targets->dom_id); 1141 + pr_debug("no match for address %02x.%04x => ENODEV\n", 1142 + (int)targets->ap_id, (int)targets->dom_id); 1144 1143 } else if (targets) { 1145 - pr_debug("%s no match for %d target addrs => ENODEV\n", 1146 - __func__, (int)target_num); 1144 + pr_debug("no match for %d target addrs => ENODEV\n", 1145 + (int)target_num); 1147 1146 } else { 1148 - pr_debug("%s no match for address ff.ffff => ENODEV\n", 1149 - __func__); 1147 + pr_debug("no match for address ff.ffff => ENODEV\n"); 1150 1148 } 1151 1149 rc = -ENODEV; 1152 1150 goto out_free; ··· 1193 1195 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1194 1196 rc = -EIO; 1195 1197 if (rc) 1196 - pr_debug("%s rc=%d\n", __func__, rc); 1198 + pr_debug("rc=%d\n", rc); 1197 1199 1198 1200 return rc; 1199 1201 } ··· 1245 1247 spin_unlock(&zcrypt_list_lock); 1246 1248 1247 1249 if (!pref_zq) { 1248 - pr_debug("%s no matching queue found => ENODEV\n", __func__); 1250 + pr_debug("no matching queue found => ENODEV\n"); 1249 1251 rc = -ENODEV; 1250 1252 goto out; 1251 1253 } ··· 2035 2037 break; 2036 2038 default: 2037 2039 /* other failure */ 2038 - pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n", 2039 - __func__, rc); 2040 + pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc); 2040 2041 break; 2041 2042 } 2042 2043 break;
+4 -4
drivers/s390/crypto/zcrypt_ccamisc.c
··· 172 172 * key token. Returns 0 on success or errno value on failure. 173 173 */ 174 174 int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, 175 - const u8 *token, size_t keysize, 175 + const u8 *token, u32 keysize, 176 176 int checkcpacfexport) 177 177 { 178 178 struct eccprivkeytoken *t = (struct eccprivkeytoken *)token; ··· 187 187 } 188 188 if (t->len > keysize) { 189 189 if (dbg) 190 - DBF("%s token check failed, len %d > keysize %zu\n", 190 + DBF("%s token check failed, len %d > keysize %u\n", 191 191 __func__, (int)t->len, keysize); 192 192 return -EINVAL; 193 193 } ··· 737 737 * Generate (random) CCA AES CIPHER secure key. 738 738 */ 739 739 int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, 740 - u8 *keybuf, size_t *keybufsize) 740 + u8 *keybuf, u32 *keybufsize) 741 741 { 742 742 int rc; 743 743 u8 *mem, *ptr; ··· 1085 1085 * Build CCA AES CIPHER secure key with a given clear key value. 1086 1086 */ 1087 1087 int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, 1088 - const u8 *clrkey, u8 *keybuf, size_t *keybufsize) 1088 + const u8 *clrkey, u8 *keybuf, u32 *keybufsize) 1089 1089 { 1090 1090 int rc; 1091 1091 u8 *token;
+3 -3
drivers/s390/crypto/zcrypt_ccamisc.h
··· 153 153 * key token. Returns 0 on success or errno value on failure. 154 154 */ 155 155 int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, 156 - const u8 *token, size_t keysize, 156 + const u8 *token, u32 keysize, 157 157 int checkcpacfexport); 158 158 159 159 /* ··· 178 178 * Generate (random) CCA AES CIPHER secure key. 179 179 */ 180 180 int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, 181 - u8 *keybuf, size_t *keybufsize); 181 + u8 *keybuf, u32 *keybufsize); 182 182 183 183 /* 184 184 * Derive proteced key from CCA AES cipher secure key. ··· 190 190 * Build CCA AES CIPHER secure key with a given clear key value. 191 191 */ 192 192 int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, 193 - const u8 *clrkey, u8 *keybuf, size_t *keybufsize); 193 + const u8 *clrkey, u8 *keybuf, u32 *keybufsize); 194 194 195 195 /* 196 196 * Derive proteced key from CCA ECC secure private key.
+14 -14
drivers/s390/crypto/zcrypt_ep11misc.c
··· 203 203 * For valid ep11 keyblobs, returns a reference to the wrappingkey verification 204 204 * pattern. Otherwise NULL. 205 205 */ 206 - const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen) 206 + const u8 *ep11_kb_wkvp(const u8 *keyblob, u32 keybloblen) 207 207 { 208 208 struct ep11keyblob *kb; 209 209 ··· 217 217 * Simple check if the key blob is a valid EP11 AES key blob with header. 218 218 */ 219 219 int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, 220 - const u8 *key, size_t keylen, int checkcpacfexp) 220 + const u8 *key, u32 keylen, int checkcpacfexp) 221 221 { 222 222 struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; 223 223 struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); ··· 225 225 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 226 226 227 227 if (keylen < sizeof(*hdr) + sizeof(*kb)) { 228 - DBF("%s key check failed, keylen %zu < %zu\n", 228 + DBF("%s key check failed, keylen %u < %zu\n", 229 229 __func__, keylen, sizeof(*hdr) + sizeof(*kb)); 230 230 return -EINVAL; 231 231 } ··· 250 250 } 251 251 if (hdr->len > keylen) { 252 252 if (dbg) 253 - DBF("%s key check failed, header len %d keylen %zu mismatch\n", 253 + DBF("%s key check failed, header len %d keylen %u mismatch\n", 254 254 __func__, (int)hdr->len, keylen); 255 255 return -EINVAL; 256 256 } ··· 284 284 * Simple check if the key blob is a valid EP11 ECC key blob with header. 285 285 */ 286 286 int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, 287 - const u8 *key, size_t keylen, int checkcpacfexp) 287 + const u8 *key, u32 keylen, int checkcpacfexp) 288 288 { 289 289 struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; 290 290 struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); ··· 292 292 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 293 293 294 294 if (keylen < sizeof(*hdr) + sizeof(*kb)) { 295 - DBF("%s key check failed, keylen %zu < %zu\n", 295 + DBF("%s key check failed, keylen %u < %zu\n", 296 296 __func__, keylen, sizeof(*hdr) + sizeof(*kb)); 297 297 return -EINVAL; 298 298 } ··· 317 317 } 318 318 if (hdr->len > keylen) { 319 319 if (dbg) 320 - DBF("%s key check failed, header len %d keylen %zu mismatch\n", 320 + DBF("%s key check failed, header len %d keylen %u mismatch\n", 321 321 __func__, (int)hdr->len, keylen); 322 322 return -EINVAL; 323 323 } ··· 352 352 * the header in the session field (old style EP11 AES key). 353 353 */ 354 354 int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, 355 - const u8 *key, size_t keylen, int checkcpacfexp) 355 + const u8 *key, u32 keylen, int checkcpacfexp) 356 356 { 357 357 struct ep11keyblob *kb = (struct ep11keyblob *)key; 358 358 359 359 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 360 360 361 361 if (keylen < sizeof(*kb)) { 362 - DBF("%s key check failed, keylen %zu < %zu\n", 362 + DBF("%s key check failed, keylen %u < %zu\n", 363 363 __func__, keylen, sizeof(*kb)); 364 364 return -EINVAL; 365 365 } ··· 378 378 } 379 379 if (kb->head.len > keylen) { 380 380 if (dbg) 381 - DBF("%s key check failed, header len %d keylen %zu mismatch\n", 381 + DBF("%s key check failed, header len %d keylen %u mismatch\n", 382 382 __func__, (int)kb->head.len, keylen); 383 383 return -EINVAL; 384 384 } ··· 932 932 } 933 933 934 934 int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, 935 - u8 *keybuf, size_t *keybufsize, u32 keybufver) 935 + u8 *keybuf, u32 *keybufsize, u32 keybufver) 936 936 { 937 937 struct ep11kblob_header *hdr; 938 938 size_t hdr_size, pl_size; ··· 1256 1256 const u8 *enckey, size_t enckeysize, 1257 1257 u32 mech, const u8 *iv, 1258 1258 u32 keybitsize, u32 keygenflags, 1259 - u8 *keybuf, size_t *keybufsize, 1259 + u8 *keybuf, u32 *keybufsize, 1260 1260 u8 keybufver) 1261 1261 { 1262 1262 struct ep11kblob_header *hdr; ··· 1412 1412 } 1413 1413 1414 1414 int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, 1415 - const u8 *clrkey, u8 *keybuf, size_t *keybufsize, 1415 + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, 1416 1416 u32 keytype) 1417 1417 { 1418 1418 int rc; ··· 1471 1471 EXPORT_SYMBOL(ep11_clr2keyblob); 1472 1472 1473 1473 int ep11_kblob2protkey(u16 card, u16 dom, 1474 - const u8 *keyblob, size_t keybloblen, 1474 + const u8 *keyblob, u32 keybloblen, 1475 1475 u8 *protkey, u32 *protkeylen, u32 *protkeytype) 1476 1476 { 1477 1477 struct ep11kblob_header *hdr;
+7 -7
drivers/s390/crypto/zcrypt_ep11misc.h
··· 54 54 * For valid ep11 keyblobs, returns a reference to the wrappingkey verification 55 55 * pattern. Otherwise NULL. 56 56 */ 57 - const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen); 57 + const u8 *ep11_kb_wkvp(const u8 *kblob, u32 kbloblen); 58 58 59 59 /* 60 60 * Simple check if the key blob is a valid EP11 AES key blob with header. ··· 63 63 * Returns 0 on success or errno value on failure. 64 64 */ 65 65 int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, 66 - const u8 *key, size_t keylen, int checkcpacfexp); 66 + const u8 *key, u32 keylen, int checkcpacfexp); 67 67 68 68 /* 69 69 * Simple check if the key blob is a valid EP11 ECC key blob with header. ··· 72 72 * Returns 0 on success or errno value on failure. 73 73 */ 74 74 int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, 75 - const u8 *key, size_t keylen, int checkcpacfexp); 75 + const u8 *key, u32 keylen, int checkcpacfexp); 76 76 77 77 /* 78 78 * Simple check if the key blob is a valid EP11 AES key blob with ··· 82 82 * Returns 0 on success or errno value on failure. 83 83 */ 84 84 int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, 85 - const u8 *key, size_t keylen, int checkcpacfexp); 85 + const u8 *key, u32 keylen, int checkcpacfexp); 86 86 87 87 /* EP11 card info struct */ 88 88 struct ep11_card_info { ··· 115 115 * Generate (random) EP11 AES secure key. 116 116 */ 117 117 int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, 118 - u8 *keybuf, size_t *keybufsize, u32 keybufver); 118 + u8 *keybuf, u32 *keybufsize, u32 keybufver); 119 119 120 120 /* 121 121 * Generate EP11 AES secure key with given clear key value. 122 122 */ 123 123 int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, 124 - const u8 *clrkey, u8 *keybuf, size_t *keybufsize, 124 + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, 125 125 u32 keytype); 126 126 127 127 /* ··· 149 149 /* 150 150 * Derive proteced key from EP11 key blob (AES and ECC keys). 151 151 */ 152 - int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen, 152 + int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen, 153 153 u8 *protkey, u32 *protkeylen, u32 *protkeytype); 154 154 155 155 void zcrypt_ep11misc_exit(void);
+5 -5
drivers/s390/crypto/zcrypt_msgtype50.c
··· 427 427 len = t80h->len; 428 428 if (len > reply->bufsize || len > msg->bufsize || 429 429 len != reply->len) { 430 - pr_debug("%s len mismatch => EMSGSIZE\n", __func__); 430 + pr_debug("len mismatch => EMSGSIZE\n"); 431 431 msg->rc = -EMSGSIZE; 432 432 goto out; 433 433 } ··· 487 487 out: 488 488 ap_msg->private = NULL; 489 489 if (rc) 490 - pr_debug("%s send me cprb at dev=%02x.%04x rc=%d\n", 491 - __func__, AP_QID_CARD(zq->queue->qid), 490 + pr_debug("send me cprb at dev=%02x.%04x rc=%d\n", 491 + AP_QID_CARD(zq->queue->qid), 492 492 AP_QID_QUEUE(zq->queue->qid), rc); 493 493 return rc; 494 494 } ··· 537 537 out: 538 538 ap_msg->private = NULL; 539 539 if (rc) 540 - pr_debug("%s send crt cprb at dev=%02x.%04x rc=%d\n", 541 - __func__, AP_QID_CARD(zq->queue->qid), 540 + pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n", 541 + AP_QID_CARD(zq->queue->qid), 542 542 AP_QID_QUEUE(zq->queue->qid), rc); 543 543 return rc; 544 544 }
+15 -22
drivers/s390/crypto/zcrypt_msgtype6.c
··· 437 437 ap_msg->flags |= AP_MSG_FLAG_ADMIN; 438 438 break; 439 439 default: 440 - pr_debug("%s unknown CPRB minor version '%c%c'\n", 441 - __func__, msg->cprbx.func_id[0], 442 - msg->cprbx.func_id[1]); 440 + pr_debug("unknown CPRB minor version '%c%c'\n", 441 + msg->cprbx.func_id[0], msg->cprbx.func_id[1]); 443 442 } 444 443 445 444 /* copy data block */ ··· 628 629 629 630 /* Copy CPRB to user */ 630 631 if (xcrb->reply_control_blk_length < msg->fmt2.count1) { 631 - pr_debug("%s reply_control_blk_length %u < required %u => EMSGSIZE\n", 632 - __func__, xcrb->reply_control_blk_length, 633 - msg->fmt2.count1); 632 + pr_debug("reply_control_blk_length %u < required %u => EMSGSIZE\n", 633 + xcrb->reply_control_blk_length, msg->fmt2.count1); 634 634 return -EMSGSIZE; 635 635 } 636 636 if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr, ··· 640 642 /* Copy data buffer to user */ 641 643 if (msg->fmt2.count2) { 642 644 if (xcrb->reply_data_length < msg->fmt2.count2) { 643 - pr_debug("%s reply_data_length %u < required %u => EMSGSIZE\n", 644 - __func__, xcrb->reply_data_length, 645 - msg->fmt2.count2); 645 + pr_debug("reply_data_length %u < required %u => EMSGSIZE\n", 646 + xcrb->reply_data_length, msg->fmt2.count2); 646 647 return -EMSGSIZE; 647 648 } 648 649 if (z_copy_to_user(userspace, xcrb->reply_data_addr, ··· 670 673 char *data = reply->msg; 671 674 672 675 if (xcrb->resp_len < msg->fmt2.count1) { 673 - pr_debug("%s resp_len %u < required %u => EMSGSIZE\n", 674 - __func__, (unsigned int)xcrb->resp_len, 675 - msg->fmt2.count1); 676 + pr_debug("resp_len %u < required %u => EMSGSIZE\n", 677 + (unsigned int)xcrb->resp_len, msg->fmt2.count1); 676 678 return -EMSGSIZE; 677 679 } 678 680 ··· 871 875 len = sizeof(struct type86x_reply) + t86r->length; 872 876 if (len > reply->bufsize || len > msg->bufsize || 873 877 len != reply->len) { 874 - pr_debug("%s len mismatch => EMSGSIZE\n", 875 - __func__); 878 + pr_debug("len mismatch => EMSGSIZE\n"); 876 879 msg->rc = -EMSGSIZE; 877 880 goto out; 878 881 } ··· 885 890 len = t86r->fmt2.offset1 + t86r->fmt2.count1; 886 891 if (len > reply->bufsize || len > msg->bufsize || 887 892 len != reply->len) { 888 - pr_debug("%s len mismatch => EMSGSIZE\n", 889 - __func__); 893 + pr_debug("len mismatch => EMSGSIZE\n"); 890 894 msg->rc = -EMSGSIZE; 891 895 goto out; 892 896 } ··· 935 941 len = t86r->fmt2.offset1 + t86r->fmt2.count1; 936 942 if (len > reply->bufsize || len > msg->bufsize || 937 943 len != reply->len) { 938 - pr_debug("%s len mismatch => EMSGSIZE\n", 939 - __func__); 944 + pr_debug("len mismatch => EMSGSIZE\n"); 940 945 msg->rc = -EMSGSIZE; 941 946 goto out; 942 947 } ··· 1147 1154 1148 1155 out: 1149 1156 if (rc) 1150 - pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n", 1151 - __func__, AP_QID_CARD(zq->queue->qid), 1157 + pr_debug("send cprb at dev=%02x.%04x rc=%d\n", 1158 + AP_QID_CARD(zq->queue->qid), 1152 1159 AP_QID_QUEUE(zq->queue->qid), rc); 1153 1160 return rc; 1154 1161 } ··· 1270 1277 1271 1278 out: 1272 1279 if (rc) 1273 - pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n", 1274 - __func__, AP_QID_CARD(zq->queue->qid), 1280 + pr_debug("send cprb at dev=%02x.%04x rc=%d\n", 1281 + AP_QID_CARD(zq->queue->qid), 1275 1282 AP_QID_QUEUE(zq->queue->qid), rc); 1276 1283 return rc; 1277 1284 }