Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Alexander Gordeev:

- Fix the style of protected key API driver source: use x-mas tree for
all local variable declarations

- Rework protected key API driver to not use the struct pkey_protkey
and pkey_clrkey anymore. Both structures have a fixed size buffer,
but with the support of ECC protected key these buffers are not big
enough. Use dynamic buffers internally and transparently for
userspace

- Add support for a new 'non CCA clear key token' with ECC clear keys
supported: ECC P256, ECC P384, ECC P521, ECC ED25519 and ECC ED448.
This makes it possible to derive a protected key from the ECC clear
key input via PKEY_KBLOB2PROTK3 ioctl, while currently the only way
to derive is via PCKMO instruction

- The s390 PMU of PAI crypto and extension 1 NNPA counters use atomic_t
for reference counting. Replace this with the proper data type
refcount_t

- Select ARCH_SUPPORTS_INT128, but limit this to clang for now, since
gcc generates inefficient code, which may lead to stack overflows

- Replace one-element array with flexible-array member in struct
vfio_ccw_parent and refactor the rest of the code accordingly. Also,
prefer struct_size() over sizeof() open- coded versions

- Introduce OS_INFO_FLAGS_ENTRY pointing to a flags field and
OS_INFO_FLAG_REIPL_CLEAR flag that informs a dumper whether the
system memory should be cleared or not once dumped

- Fix a hang when a user attempts to remove a VFIO-AP mediated device
attached to a guest: add VFIO_DEVICE_GET_IRQ_INFO and
VFIO_DEVICE_SET_IRQS IOCTLs and wire up the VFIO bus driver callback
to request a release of the device

- Fix calculation for R_390_GOTENT relocations for modules

- Allow any user space process with CAP_PERFMON capability read and
display the CPU Measurement facility counter sets

- Rework large statically-defined per-CPU cpu_cf_events data structure
and replace it with dynamically allocated structures created when a
perf_event_open() system call is invoked or /dev/hwctr device is
accessed

* tag 's390-6.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/cpum_cf: rework PER_CPU_DEFINE of struct cpu_cf_events
s390/cpum_cf: open access to hwctr device for CAP_PERFMON privileged process
s390/module: fix rela calculation for R_390_GOTENT
s390/vfio-ap: wire in the vfio_device_ops request callback
s390/vfio-ap: realize the VFIO_DEVICE_SET_IRQS ioctl
s390/vfio-ap: realize the VFIO_DEVICE_GET_IRQ_INFO ioctl
s390/pkey: add support for ecc clear key
s390/pkey: do not use struct pkey_protkey
s390/pkey: introduce reverse x-mas trees
s390/zcore: conditionally clear memory on reipl
s390/ipl: add REIPL_CLEAR flag to os_info
vfio/ccw: use struct_size() helper
vfio/ccw: replace one-element array with flexible-array member
s390: select ARCH_SUPPORTS_INT128
s390/pai_ext: replace atomic_t with refcount_t
s390/pai_crypto: replace atomic_t with refcount_t

+997 -328
+1
arch/s390/Kconfig
··· 117 117 select ARCH_SUPPORTS_ATOMIC_RMW 118 118 select ARCH_SUPPORTS_DEBUG_PAGEALLOC 119 119 select ARCH_SUPPORTS_HUGETLBFS 120 + select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && CC_IS_CLANG 120 121 select ARCH_SUPPORTS_NUMA_BALANCING 121 122 select ARCH_SUPPORTS_PER_VMA_LOCK 122 123 select ARCH_USE_BUILTIN_BSWAP
+7 -2
arch/s390/crypto/paes_s390.c
··· 5 5 * s390 implementation of the AES Cipher Algorithm with protected keys. 6 6 * 7 7 * s390 Version: 8 - * Copyright IBM Corp. 2017,2020 8 + * Copyright IBM Corp. 2017, 2023 9 9 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 10 10 * Harald Freudenberger <freude@de.ibm.com> 11 11 */ ··· 132 132 if (i > 0 && ret == -EAGAIN && in_task()) 133 133 if (msleep_interruptible(1000)) 134 134 return -EINTR; 135 - ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk); 135 + ret = pkey_keyblob2pkey(kb->key, kb->keylen, 136 + pk->protkey, &pk->len, &pk->type); 136 137 if (ret == 0) 137 138 break; 138 139 } ··· 146 145 int ret; 147 146 struct pkey_protkey pkey; 148 147 148 + pkey.len = sizeof(pkey.protkey); 149 149 ret = __paes_keyblob2pkey(&ctx->kb, &pkey); 150 150 if (ret) 151 151 return ret; ··· 415 413 static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx) 416 414 { 417 415 struct pkey_protkey pkey0, pkey1; 416 + 417 + pkey0.len = sizeof(pkey0.protkey); 418 + pkey1.len = sizeof(pkey1.protkey); 418 419 419 420 if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) || 420 421 __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
+4
arch/s390/include/asm/asm-prototypes.h
··· 6 6 #include <asm/fpu/api.h> 7 7 #include <asm-generic/asm-prototypes.h> 8 8 9 + __int128_t __ashlti3(__int128_t a, int b); 10 + __int128_t __ashrti3(__int128_t a, int b); 11 + __int128_t __lshrti3(__int128_t a, int b); 12 + 9 13 #endif /* _ASM_S390_PROTOTYPES_H */
+6 -1
arch/s390/include/asm/cpacf.h
··· 2 2 /* 3 3 * CP Assist for Cryptographic Functions (CPACF) 4 4 * 5 - * Copyright IBM Corp. 2003, 2017 5 + * Copyright IBM Corp. 2003, 2023 6 6 * Author(s): Thomas Spatzier 7 7 * Jan Glauber 8 8 * Harald Freudenberger (freude@de.ibm.com) ··· 132 132 #define CPACF_PCKMO_ENC_AES_128_KEY 0x12 133 133 #define CPACF_PCKMO_ENC_AES_192_KEY 0x13 134 134 #define CPACF_PCKMO_ENC_AES_256_KEY 0x14 135 + #define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20 136 + #define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21 137 + #define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22 138 + #define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28 139 + #define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29 135 140 136 141 /* 137 142 * Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION)
+5 -2
arch/s390/include/asm/os_info.h
··· 16 16 17 17 #define OS_INFO_VMCOREINFO 0 18 18 #define OS_INFO_REIPL_BLOCK 1 19 + #define OS_INFO_FLAGS_ENTRY 2 20 + 21 + #define OS_INFO_FLAG_REIPL_CLEAR (1UL << 0) 19 22 20 23 struct os_info_entry { 21 24 u64 addr; ··· 33 30 u16 version_minor; 34 31 u64 crashkernel_addr; 35 32 u64 crashkernel_size; 36 - struct os_info_entry entry[2]; 37 - u8 reserved[4024]; 33 + struct os_info_entry entry[3]; 34 + u8 reserved[4004]; 38 35 } __packed; 39 36 40 37 void os_info_init(void);
+2 -2
arch/s390/include/asm/pkey.h
··· 2 2 /* 3 3 * Kernelspace interface to the pkey device driver 4 4 * 5 - * Copyright IBM Corp. 2016,2019 5 + * Copyright IBM Corp. 2016, 2023 6 6 * 7 7 * Author: Harald Freudenberger <freude@de.ibm.com> 8 8 * ··· 23 23 * @return 0 on success, negative errno value on failure 24 24 */ 25 25 int pkey_keyblob2pkey(const u8 *key, u32 keylen, 26 - struct pkey_protkey *protkey); 26 + u8 *protkey, u32 *protkeylen, u32 *protkeytype); 27 27 28 28 #endif /* _KAPI_PKEY_H */
+10 -5
arch/s390/include/uapi/asm/pkey.h
··· 2 2 /* 3 3 * Userspace interface to the pkey device driver 4 4 * 5 - * Copyright IBM Corp. 2017, 2019 5 + * Copyright IBM Corp. 2017, 2023 6 6 * 7 7 * Author: Harald Freudenberger <freude@de.ibm.com> 8 8 * ··· 32 32 #define MINKEYBLOBSIZE SECKEYBLOBSIZE 33 33 34 34 /* defines for the type field within the pkey_protkey struct */ 35 - #define PKEY_KEYTYPE_AES_128 1 36 - #define PKEY_KEYTYPE_AES_192 2 37 - #define PKEY_KEYTYPE_AES_256 3 38 - #define PKEY_KEYTYPE_ECC 4 35 + #define PKEY_KEYTYPE_AES_128 1 36 + #define PKEY_KEYTYPE_AES_192 2 37 + #define PKEY_KEYTYPE_AES_256 3 38 + #define PKEY_KEYTYPE_ECC 4 39 + #define PKEY_KEYTYPE_ECC_P256 5 40 + #define PKEY_KEYTYPE_ECC_P384 6 41 + #define PKEY_KEYTYPE_ECC_P521 7 42 + #define PKEY_KEYTYPE_ECC_ED25519 8 43 + #define PKEY_KEYTYPE_ECC_ED448 9 39 44 40 45 /* the newer ioctls use a pkey_key_type enum for type information */ 41 46 enum pkey_key_type {
+16
arch/s390/kernel/ipl.c
··· 176 176 static bool reipl_ccw_clear; 177 177 static bool reipl_eckd_clear; 178 178 179 + static unsigned long os_info_flags; 180 + 179 181 static inline int __diag308(unsigned long subcode, unsigned long addr) 180 182 { 181 183 union register_pair r1; ··· 1940 1938 struct lowcore *abs_lc; 1941 1939 unsigned int csum; 1942 1940 1941 + /* 1942 + * Set REIPL_CLEAR flag in os_info flags entry indicating 1943 + * 'clear' sysfs attribute has been set on the panicked system 1944 + * for specified reipl type. 1945 + * Always set for IPL_TYPE_NSS and IPL_TYPE_UNKNOWN. 1946 + */ 1947 + if ((reipl_type == IPL_TYPE_CCW && reipl_ccw_clear) || 1948 + (reipl_type == IPL_TYPE_ECKD && reipl_eckd_clear) || 1949 + (reipl_type == IPL_TYPE_FCP && reipl_fcp_clear) || 1950 + (reipl_type == IPL_TYPE_NVME && reipl_nvme_clear) || 1951 + reipl_type == IPL_TYPE_NSS || 1952 + reipl_type == IPL_TYPE_UNKNOWN) 1953 + os_info_flags |= OS_INFO_FLAG_REIPL_CLEAR; 1954 + os_info_entry_add(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags)); 1943 1955 csum = (__force unsigned int) 1944 1956 csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); 1945 1957 abs_lc = get_abs_lowcore();
+2 -1
arch/s390/kernel/module.c
··· 352 352 rc = apply_rela_bits(loc, val, 0, 64, 0, write); 353 353 else if (r_type == R_390_GOTENT || 354 354 r_type == R_390_GOTPLTENT) { 355 - val += (Elf_Addr) me->mem[MOD_TEXT].base - loc; 355 + val += (Elf_Addr)me->mem[MOD_TEXT].base + 356 + me->arch.got_offset - loc; 356 357 rc = apply_rela_bits(loc, val, 1, 32, 1, write); 357 358 } 358 359 break;
+334 -118
arch/s390/kernel/perf_cpum_cf.c
··· 76 76 } 77 77 78 78 struct cpu_cf_events { 79 + refcount_t refcnt; /* Reference count */ 79 80 atomic_t ctr_set[CPUMF_CTR_SET_MAX]; 80 81 u64 state; /* For perf_event_open SVC */ 81 82 u64 dev_state; /* For /dev/hwctr */ ··· 89 88 unsigned int sets; /* # Counter set saved in memory */ 90 89 }; 91 90 92 - /* Per-CPU event structure for the counter facility */ 93 - static DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events); 94 - 95 91 static unsigned int cfdiag_cpu_speed; /* CPU speed for CF_DIAG trailer */ 96 92 static debug_info_t *cf_dbg; 97 93 ··· 100 102 * of counter sets. Extract this information at device driver initialization. 101 103 */ 102 104 static struct cpumf_ctr_info cpumf_ctr_info; 105 + 106 + struct cpu_cf_ptr { 107 + struct cpu_cf_events *cpucf; 108 + }; 109 + 110 + static struct cpu_cf_root { /* Anchor to per CPU data */ 111 + refcount_t refcnt; /* Overall active events */ 112 + struct cpu_cf_ptr __percpu *cfptr; 113 + } cpu_cf_root; 114 + 115 + /* 116 + * Serialize event initialization and event removal. Both are called from 117 + * user space in task context with perf_event_open() and close() 118 + * system calls. 119 + * 120 + * This mutex serializes functions cpum_cf_alloc_cpu() called at event 121 + * initialization via cpumf_pmu_event_init() and function cpum_cf_free_cpu() 122 + * called at event removal via call back function hw_perf_event_destroy() 123 + * when the event is deleted. They are serialized to enforce correct 124 + * bookkeeping of pointer and reference counts anchored by 125 + * struct cpu_cf_root and the access to cpu_cf_root::refcnt and the 126 + * per CPU pointers stored in cpu_cf_root::cfptr. 127 + */ 128 + static DEFINE_MUTEX(pmc_reserve_mutex); 129 + 130 + /* 131 + * Get pointer to per-cpu structure. 132 + * 133 + * Function get_cpu_cfhw() is called from 134 + * - cfset_copy_all(): This function is protected by cpus_read_lock(), so 135 + * CPU hot plug remove can not happen. Event removal requires a close() 136 + * first. 137 + * 138 + * Function this_cpu_cfhw() is called from perf common code functions: 139 + * - pmu_{en|dis}able(), pmu_{add|del}()and pmu_{start|stop}(): 140 + * All functions execute with interrupts disabled on that particular CPU. 141 + * - cfset_ioctl_{on|off}, cfset_cpu_read(): see comment cfset_copy_all(). 142 + * 143 + * Therefore it is safe to access the CPU specific pointer to the event. 144 + */ 145 + static struct cpu_cf_events *get_cpu_cfhw(int cpu) 146 + { 147 + struct cpu_cf_ptr __percpu *p = cpu_cf_root.cfptr; 148 + 149 + if (p) { 150 + struct cpu_cf_ptr *q = per_cpu_ptr(p, cpu); 151 + 152 + return q->cpucf; 153 + } 154 + return NULL; 155 + } 156 + 157 + static struct cpu_cf_events *this_cpu_cfhw(void) 158 + { 159 + return get_cpu_cfhw(smp_processor_id()); 160 + } 161 + 162 + /* Disable counter sets on dedicated CPU */ 163 + static void cpum_cf_reset_cpu(void *flags) 164 + { 165 + lcctl(0); 166 + } 167 + 168 + /* Free per CPU data when the last event is removed. */ 169 + static void cpum_cf_free_root(void) 170 + { 171 + if (!refcount_dec_and_test(&cpu_cf_root.refcnt)) 172 + return; 173 + free_percpu(cpu_cf_root.cfptr); 174 + cpu_cf_root.cfptr = NULL; 175 + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 176 + on_each_cpu(cpum_cf_reset_cpu, NULL, 1); 177 + debug_sprintf_event(cf_dbg, 4, "%s2 root.refcnt %u cfptr %px\n", 178 + __func__, refcount_read(&cpu_cf_root.refcnt), 179 + cpu_cf_root.cfptr); 180 + } 181 + 182 + /* 183 + * On initialization of first event also allocate per CPU data dynamically. 184 + * Start with an array of pointers, the array size is the maximum number of 185 + * CPUs possible, which might be larger than the number of CPUs currently 186 + * online. 187 + */ 188 + static int cpum_cf_alloc_root(void) 189 + { 190 + int rc = 0; 191 + 192 + if (refcount_inc_not_zero(&cpu_cf_root.refcnt)) 193 + return rc; 194 + 195 + /* The memory is already zeroed. */ 196 + cpu_cf_root.cfptr = alloc_percpu(struct cpu_cf_ptr); 197 + if (cpu_cf_root.cfptr) { 198 + refcount_set(&cpu_cf_root.refcnt, 1); 199 + on_each_cpu(cpum_cf_reset_cpu, NULL, 1); 200 + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 201 + } else { 202 + rc = -ENOMEM; 203 + } 204 + 205 + return rc; 206 + } 207 + 208 + /* Free CPU counter data structure for a PMU */ 209 + static void cpum_cf_free_cpu(int cpu) 210 + { 211 + struct cpu_cf_events *cpuhw; 212 + struct cpu_cf_ptr *p; 213 + 214 + mutex_lock(&pmc_reserve_mutex); 215 + /* 216 + * When invoked via CPU hotplug handler, there might be no events 217 + * installed or that particular CPU might not have an 218 + * event installed. This anchor pointer can be NULL! 219 + */ 220 + if (!cpu_cf_root.cfptr) 221 + goto out; 222 + p = per_cpu_ptr(cpu_cf_root.cfptr, cpu); 223 + cpuhw = p->cpucf; 224 + /* 225 + * Might be zero when called from CPU hotplug handler and no event 226 + * installed on that CPU, but on different CPUs. 227 + */ 228 + if (!cpuhw) 229 + goto out; 230 + 231 + if (refcount_dec_and_test(&cpuhw->refcnt)) { 232 + kfree(cpuhw); 233 + p->cpucf = NULL; 234 + } 235 + cpum_cf_free_root(); 236 + out: 237 + mutex_unlock(&pmc_reserve_mutex); 238 + } 239 + 240 + /* Allocate CPU counter data structure for a PMU. Called under mutex lock. */ 241 + static int cpum_cf_alloc_cpu(int cpu) 242 + { 243 + struct cpu_cf_events *cpuhw; 244 + struct cpu_cf_ptr *p; 245 + int rc; 246 + 247 + mutex_lock(&pmc_reserve_mutex); 248 + rc = cpum_cf_alloc_root(); 249 + if (rc) 250 + goto unlock; 251 + p = per_cpu_ptr(cpu_cf_root.cfptr, cpu); 252 + cpuhw = p->cpucf; 253 + 254 + if (!cpuhw) { 255 + cpuhw = kzalloc(sizeof(*cpuhw), GFP_KERNEL); 256 + if (cpuhw) { 257 + p->cpucf = cpuhw; 258 + refcount_set(&cpuhw->refcnt, 1); 259 + } else { 260 + rc = -ENOMEM; 261 + } 262 + } else { 263 + refcount_inc(&cpuhw->refcnt); 264 + } 265 + if (rc) { 266 + /* 267 + * Error in allocation of event, decrement anchor. Since 268 + * cpu_cf_event in not created, its destroy() function is not 269 + * invoked. Adjust the reference counter for the anchor. 270 + */ 271 + cpum_cf_free_root(); 272 + } 273 + unlock: 274 + mutex_unlock(&pmc_reserve_mutex); 275 + return rc; 276 + } 277 + 278 + /* 279 + * Create/delete per CPU data structures for /dev/hwctr interface and events 280 + * created by perf_event_open(). 281 + * If cpu is -1, track task on all available CPUs. This requires 282 + * allocation of hardware data structures for all CPUs. This setup handles 283 + * perf_event_open() with task context and /dev/hwctr interface. 284 + * If cpu is non-zero install event on this CPU only. This setup handles 285 + * perf_event_open() with CPU context. 286 + */ 287 + static int cpum_cf_alloc(int cpu) 288 + { 289 + cpumask_var_t mask; 290 + int rc; 291 + 292 + if (cpu == -1) { 293 + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 294 + return -ENOMEM; 295 + for_each_online_cpu(cpu) { 296 + rc = cpum_cf_alloc_cpu(cpu); 297 + if (rc) { 298 + for_each_cpu(cpu, mask) 299 + cpum_cf_free_cpu(cpu); 300 + break; 301 + } 302 + cpumask_set_cpu(cpu, mask); 303 + } 304 + free_cpumask_var(mask); 305 + } else { 306 + rc = cpum_cf_alloc_cpu(cpu); 307 + } 308 + return rc; 309 + } 310 + 311 + static void cpum_cf_free(int cpu) 312 + { 313 + if (cpu == -1) { 314 + for_each_online_cpu(cpu) 315 + cpum_cf_free_cpu(cpu); 316 + } else { 317 + cpum_cf_free_cpu(cpu); 318 + } 319 + } 103 320 104 321 #define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */ 105 322 /* interval in seconds */ ··· 664 451 */ 665 452 static void cpumf_pmu_enable(struct pmu *pmu) 666 453 { 667 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 454 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 668 455 int err; 669 456 670 - if (cpuhw->flags & PMU_F_ENABLED) 457 + if (!cpuhw || (cpuhw->flags & PMU_F_ENABLED)) 671 458 return; 672 459 673 460 err = lcctl(cpuhw->state | cpuhw->dev_state); ··· 684 471 */ 685 472 static void cpumf_pmu_disable(struct pmu *pmu) 686 473 { 687 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 688 - int err; 474 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 689 475 u64 inactive; 476 + int err; 690 477 691 - if (!(cpuhw->flags & PMU_F_ENABLED)) 478 + if (!cpuhw || !(cpuhw->flags & PMU_F_ENABLED)) 692 479 return; 693 480 694 481 inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); ··· 700 487 cpuhw->flags &= ~PMU_F_ENABLED; 701 488 } 702 489 703 - #define PMC_INIT 0UL 704 - #define PMC_RELEASE 1UL 705 - 706 - static void cpum_cf_setup_cpu(void *flags) 707 - { 708 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 709 - 710 - switch ((unsigned long)flags) { 711 - case PMC_INIT: 712 - cpuhw->flags |= PMU_F_RESERVED; 713 - break; 714 - 715 - case PMC_RELEASE: 716 - cpuhw->flags &= ~PMU_F_RESERVED; 717 - break; 718 - } 719 - 720 - /* Disable CPU counter sets */ 721 - lcctl(0); 722 - debug_sprintf_event(cf_dbg, 5, "%s flags %#x flags %#x state %#llx\n", 723 - __func__, *(int *)flags, cpuhw->flags, 724 - cpuhw->state); 725 - } 726 - 727 - /* Initialize the CPU-measurement counter facility */ 728 - static int __kernel_cpumcf_begin(void) 729 - { 730 - on_each_cpu(cpum_cf_setup_cpu, (void *)PMC_INIT, 1); 731 - irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 732 - 733 - return 0; 734 - } 735 - 736 - /* Release the CPU-measurement counter facility */ 737 - static void __kernel_cpumcf_end(void) 738 - { 739 - on_each_cpu(cpum_cf_setup_cpu, (void *)PMC_RELEASE, 1); 740 - irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 741 - } 742 - 743 - /* Number of perf events counting hardware events */ 744 - static atomic_t num_events = ATOMIC_INIT(0); 745 - /* Used to avoid races in calling reserve/release_cpumf_hardware */ 746 - static DEFINE_MUTEX(pmc_reserve_mutex); 747 - 748 490 /* Release the PMU if event is the last perf event */ 749 491 static void hw_perf_event_destroy(struct perf_event *event) 750 492 { 751 - mutex_lock(&pmc_reserve_mutex); 752 - if (atomic_dec_return(&num_events) == 0) 753 - __kernel_cpumcf_end(); 754 - mutex_unlock(&pmc_reserve_mutex); 493 + cpum_cf_free(event->cpu); 755 494 } 756 495 757 496 /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */ ··· 726 561 [PERF_COUNT_HW_BRANCH_MISSES] = -1, 727 562 [PERF_COUNT_HW_BUS_CYCLES] = -1, 728 563 }; 729 - 730 - static void cpumf_hw_inuse(void) 731 - { 732 - mutex_lock(&pmc_reserve_mutex); 733 - if (atomic_inc_return(&num_events) == 1) 734 - __kernel_cpumcf_begin(); 735 - mutex_unlock(&pmc_reserve_mutex); 736 - } 737 564 738 565 static int is_userspace_event(u64 ev) 739 566 { ··· 810 653 } 811 654 812 655 /* Initialize for using the CPU-measurement counter facility */ 813 - cpumf_hw_inuse(); 656 + if (cpum_cf_alloc(event->cpu)) 657 + return -ENOMEM; 814 658 event->destroy = hw_perf_event_destroy; 815 659 816 660 /* ··· 914 756 915 757 static void cpumf_pmu_start(struct perf_event *event, int flags) 916 758 { 917 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 759 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 918 760 struct hw_perf_event *hwc = &event->hw; 919 761 int i; 920 762 ··· 988 830 989 831 static void cpumf_pmu_stop(struct perf_event *event, int flags) 990 832 { 991 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 833 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 992 834 struct hw_perf_event *hwc = &event->hw; 993 835 int i; 994 836 ··· 1015 857 false); 1016 858 if (cfdiag_diffctr(cpuhw, event->hw.config_base)) 1017 859 cfdiag_push_sample(event, cpuhw); 1018 - } else if (cpuhw->flags & PMU_F_RESERVED) { 1019 - /* Only update when PMU not hotplugged off */ 860 + } else { 1020 861 hw_perf_event_update(event); 1021 862 } 1022 863 hwc->state |= PERF_HES_UPTODATE; ··· 1024 867 1025 868 static int cpumf_pmu_add(struct perf_event *event, int flags) 1026 869 { 1027 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 870 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 1028 871 1029 872 ctr_set_enable(&cpuhw->state, event->hw.config_base); 1030 873 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; ··· 1037 880 1038 881 static void cpumf_pmu_del(struct perf_event *event, int flags) 1039 882 { 1040 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 883 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 1041 884 int i; 1042 885 1043 886 cpumf_pmu_stop(event, PERF_EF_UPDATE); ··· 1069 912 .read = cpumf_pmu_read, 1070 913 }; 1071 914 1072 - static int cpum_cf_setup(unsigned int cpu, unsigned long flags) 1073 - { 1074 - local_irq_disable(); 1075 - cpum_cf_setup_cpu((void *)flags); 1076 - local_irq_enable(); 1077 - return 0; 1078 - } 915 + static struct cfset_session { /* CPUs and counter set bit mask */ 916 + struct list_head head; /* Head of list of active processes */ 917 + } cfset_session = { 918 + .head = LIST_HEAD_INIT(cfset_session.head) 919 + }; 1079 920 921 + static refcount_t cfset_opencnt = REFCOUNT_INIT(0); /* Access count */ 922 + /* 923 + * Synchronize access to device /dev/hwc. This mutex protects against 924 + * concurrent access to functions cfset_open() and cfset_release(). 925 + * Same for CPU hotplug add and remove events triggering 926 + * cpum_cf_online_cpu() and cpum_cf_offline_cpu(). 927 + * It also serializes concurrent device ioctl access from multiple 928 + * processes accessing /dev/hwc. 929 + * 930 + * The mutex protects concurrent access to the /dev/hwctr session management 931 + * struct cfset_session and reference counting variable cfset_opencnt. 932 + */ 933 + static DEFINE_MUTEX(cfset_ctrset_mutex); 934 + 935 + /* 936 + * CPU hotplug handles only /dev/hwctr device. 937 + * For perf_event_open() the CPU hotplug handling is done on kernel common 938 + * code: 939 + * - CPU add: Nothing is done since a file descriptor can not be created 940 + * and returned to the user. 941 + * - CPU delete: Handled by common code via pmu_disable(), pmu_stop() and 942 + * pmu_delete(). The event itself is removed when the file descriptor is 943 + * closed. 944 + */ 1080 945 static int cfset_online_cpu(unsigned int cpu); 946 + 1081 947 static int cpum_cf_online_cpu(unsigned int cpu) 1082 948 { 1083 - debug_sprintf_event(cf_dbg, 4, "%s cpu %d in_irq %ld\n", __func__, 1084 - cpu, in_interrupt()); 1085 - cpum_cf_setup(cpu, PMC_INIT); 1086 - return cfset_online_cpu(cpu); 949 + int rc = 0; 950 + 951 + debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d " 952 + "opencnt %d\n", __func__, cpu, 953 + refcount_read(&cpu_cf_root.refcnt), 954 + refcount_read(&cfset_opencnt)); 955 + /* 956 + * Ignore notification for perf_event_open(). 957 + * Handle only /dev/hwctr device sessions. 958 + */ 959 + mutex_lock(&cfset_ctrset_mutex); 960 + if (refcount_read(&cfset_opencnt)) { 961 + rc = cpum_cf_alloc_cpu(cpu); 962 + if (!rc) 963 + cfset_online_cpu(cpu); 964 + } 965 + mutex_unlock(&cfset_ctrset_mutex); 966 + return rc; 1087 967 } 1088 968 1089 969 static int cfset_offline_cpu(unsigned int cpu); 970 + 1090 971 static int cpum_cf_offline_cpu(unsigned int cpu) 1091 972 { 1092 - debug_sprintf_event(cf_dbg, 4, "%s cpu %d\n", __func__, cpu); 1093 - cfset_offline_cpu(cpu); 1094 - return cpum_cf_setup(cpu, PMC_RELEASE); 973 + debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d opencnt %d\n", 974 + __func__, cpu, refcount_read(&cpu_cf_root.refcnt), 975 + refcount_read(&cfset_opencnt)); 976 + /* 977 + * During task exit processing of grouped perf events triggered by CPU 978 + * hotplug processing, pmu_disable() is called as part of perf context 979 + * removal process. Therefore do not trigger event removal now for 980 + * perf_event_open() created events. Perf common code triggers event 981 + * destruction when the event file descriptor is closed. 982 + * 983 + * Handle only /dev/hwctr device sessions. 984 + */ 985 + mutex_lock(&cfset_ctrset_mutex); 986 + if (refcount_read(&cfset_opencnt)) { 987 + cfset_offline_cpu(cpu); 988 + cpum_cf_free_cpu(cpu); 989 + } 990 + mutex_unlock(&cfset_ctrset_mutex); 991 + return 0; 1095 992 } 1096 993 1097 994 /* Return true if store counter set multiple instruction is available */ ··· 1164 953 return; 1165 954 1166 955 inc_irq_stat(IRQEXT_CMC); 1167 - cpuhw = this_cpu_ptr(&cpu_cf_events); 1168 956 1169 957 /* 1170 958 * Measurement alerts are shared and might happen when the PMU 1171 959 * is not reserved. Ignore these alerts in this case. 1172 960 */ 1173 - if (!(cpuhw->flags & PMU_F_RESERVED)) 961 + cpuhw = this_cpu_cfhw(); 962 + if (!cpuhw) 1174 963 return; 1175 964 1176 965 /* counter authorization change alert */ ··· 1250 1039 * counter set via normal file operations. 1251 1040 */ 1252 1041 1253 - static atomic_t cfset_opencnt = ATOMIC_INIT(0); /* Access count */ 1254 - static DEFINE_MUTEX(cfset_ctrset_mutex);/* Synchronize access to hardware */ 1255 1042 struct cfset_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */ 1256 1043 unsigned int sets; /* Counter set bit mask */ 1257 1044 atomic_t cpus_ack; /* # CPUs successfully executed func */ 1258 - }; 1259 - 1260 - static struct cfset_session { /* CPUs and counter set bit mask */ 1261 - struct list_head head; /* Head of list of active processes */ 1262 - } cfset_session = { 1263 - .head = LIST_HEAD_INIT(cfset_session.head) 1264 1045 }; 1265 1046 1266 1047 struct cfset_request { /* CPUs and counter set bit mask */ ··· 1316 1113 /* Stop all counter sets via ioctl interface */ 1317 1114 static void cfset_ioctl_off(void *parm) 1318 1115 { 1319 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 1116 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 1320 1117 struct cfset_call_on_cpu_parm *p = parm; 1321 1118 int rc; 1322 1119 1323 - /* Check if any counter set used by /dev/hwc */ 1120 + /* Check if any counter set used by /dev/hwctr */ 1324 1121 for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc) 1325 1122 if ((p->sets & cpumf_ctr_ctl[rc])) { 1326 1123 if (!atomic_dec_return(&cpuhw->ctr_set[rc])) { ··· 1344 1141 /* Start counter sets on particular CPU */ 1345 1142 static void cfset_ioctl_on(void *parm) 1346 1143 { 1347 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 1144 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 1348 1145 struct cfset_call_on_cpu_parm *p = parm; 1349 1146 int rc; 1350 1147 ··· 1366 1163 1367 1164 static void cfset_release_cpu(void *p) 1368 1165 { 1369 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 1166 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 1370 1167 int rc; 1371 1168 1372 1169 debug_sprintf_event(cf_dbg, 4, "%s state %#llx dev_state %#llx\n", ··· 1406 1203 kfree(file->private_data); 1407 1204 file->private_data = NULL; 1408 1205 } 1409 - if (!atomic_dec_return(&cfset_opencnt)) 1206 + if (refcount_dec_and_test(&cfset_opencnt)) { /* Last close */ 1410 1207 on_each_cpu(cfset_release_cpu, NULL, 1); 1208 + cpum_cf_free(-1); 1209 + } 1411 1210 mutex_unlock(&cfset_ctrset_mutex); 1412 - 1413 - hw_perf_event_destroy(NULL); 1414 1211 return 0; 1415 1212 } 1416 1213 1214 + /* 1215 + * Open via /dev/hwctr device. Allocate all per CPU resources on the first 1216 + * open of the device. The last close releases all per CPU resources. 1217 + * Parallel perf_event_open system calls also use per CPU resources. 1218 + * These invocations are handled via reference counting on the per CPU data 1219 + * structures. 1220 + */ 1417 1221 static int cfset_open(struct inode *inode, struct file *file) 1418 1222 { 1419 - if (!capable(CAP_SYS_ADMIN)) 1223 + int rc = 0; 1224 + 1225 + if (!perfmon_capable()) 1420 1226 return -EPERM; 1227 + file->private_data = NULL; 1228 + 1421 1229 mutex_lock(&cfset_ctrset_mutex); 1422 - if (atomic_inc_return(&cfset_opencnt) == 1) 1423 - cfset_session_init(); 1230 + if (!refcount_inc_not_zero(&cfset_opencnt)) { /* First open */ 1231 + rc = cpum_cf_alloc(-1); 1232 + if (!rc) { 1233 + cfset_session_init(); 1234 + refcount_set(&cfset_opencnt, 1); 1235 + } 1236 + } 1424 1237 mutex_unlock(&cfset_ctrset_mutex); 1425 1238 1426 - cpumf_hw_inuse(); 1427 - file->private_data = NULL; 1428 1239 /* nonseekable_open() never fails */ 1429 - return nonseekable_open(inode, file); 1240 + return rc ?: nonseekable_open(inode, file); 1430 1241 } 1431 1242 1432 1243 static int cfset_all_start(struct cfset_request *req) ··· 1497 1280 ctrset_read = (struct s390_ctrset_read __user *)arg; 1498 1281 uptr = ctrset_read->data; 1499 1282 for_each_cpu(cpu, mask) { 1500 - struct cpu_cf_events *cpuhw = per_cpu_ptr(&cpu_cf_events, cpu); 1283 + struct cpu_cf_events *cpuhw = get_cpu_cfhw(cpu); 1501 1284 struct s390_ctrset_cpudata __user *ctrset_cpudata; 1502 1285 1503 1286 ctrset_cpudata = uptr; ··· 1541 1324 /* Read all counter sets. */ 1542 1325 static void cfset_cpu_read(void *parm) 1543 1326 { 1544 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 1327 + struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 1545 1328 struct cfset_call_on_cpu_parm *p = parm; 1546 1329 int set, set_size; 1547 1330 size_t space; ··· 1565 1348 cpuhw->used += space; 1566 1349 cpuhw->sets += 1; 1567 1350 } 1351 + debug_sprintf_event(cf_dbg, 4, "%s sets %d used %zd\n", __func__, 1352 + cpuhw->sets, cpuhw->used); 1568 1353 } 1569 - debug_sprintf_event(cf_dbg, 4, "%s sets %d used %zd\n", __func__, 1570 - cpuhw->sets, cpuhw->used); 1571 1354 } 1572 1355 1573 1356 static int cfset_all_read(unsigned long arg, struct cfset_request *req) ··· 1719 1502 .name = S390_HWCTR_DEVICE, 1720 1503 .minor = MISC_DYNAMIC_MINOR, 1721 1504 .fops = &cfset_fops, 1505 + .mode = 0666, 1722 1506 }; 1723 1507 1724 1508 /* Hotplug add of a CPU. Scan through all active processes and add ··· 1730 1512 struct cfset_call_on_cpu_parm p; 1731 1513 struct cfset_request *rp; 1732 1514 1733 - mutex_lock(&cfset_ctrset_mutex); 1734 1515 if (!list_empty(&cfset_session.head)) { 1735 1516 list_for_each_entry(rp, &cfset_session.head, node) { 1736 1517 p.sets = rp->ctrset; ··· 1737 1520 cpumask_set_cpu(cpu, &rp->mask); 1738 1521 } 1739 1522 } 1740 - mutex_unlock(&cfset_ctrset_mutex); 1741 1523 return 0; 1742 1524 } 1743 1525 1744 1526 /* Hotplug remove of a CPU. Scan through all active processes and clear 1745 1527 * that CPU from the list of CPUs supplied with ioctl(..., START, ...). 1528 + * Adjust reference counts. 1746 1529 */ 1747 1530 static int cfset_offline_cpu(unsigned int cpu) 1748 1531 { 1749 1532 struct cfset_call_on_cpu_parm p; 1750 1533 struct cfset_request *rp; 1751 1534 1752 - mutex_lock(&cfset_ctrset_mutex); 1753 1535 if (!list_empty(&cfset_session.head)) { 1754 1536 list_for_each_entry(rp, &cfset_session.head, node) { 1755 1537 p.sets = rp->ctrset; ··· 1756 1540 cpumask_clear_cpu(cpu, &rp->mask); 1757 1541 } 1758 1542 } 1759 - mutex_unlock(&cfset_ctrset_mutex); 1760 1543 return 0; 1761 1544 } 1762 1545 ··· 1833 1618 } 1834 1619 1835 1620 /* Initialize for using the CPU-measurement counter facility */ 1836 - cpumf_hw_inuse(); 1621 + if (cpum_cf_alloc(event->cpu)) 1622 + return -ENOMEM; 1837 1623 event->destroy = hw_perf_event_destroy; 1838 1624 1839 1625 err = cfdiag_event_init2(event);
+11 -8
arch/s390/kernel/perf_pai_crypto.c
··· 36 36 unsigned long *page; /* Page for CPU to store counters */ 37 37 struct pai_userdata *save; /* Page to store no-zero counters */ 38 38 unsigned int active_events; /* # of PAI crypto users */ 39 - unsigned int refcnt; /* Reference count mapped buffers */ 39 + refcount_t refcnt; /* Reference count mapped buffers */ 40 40 enum paievt_mode mode; /* Type of event */ 41 41 struct perf_event *event; /* Perf event for sampling */ 42 42 }; ··· 57 57 static_branch_dec(&pai_key); 58 58 mutex_lock(&pai_reserve_mutex); 59 59 debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d" 60 - " mode %d refcnt %d\n", __func__, 60 + " mode %d refcnt %u\n", __func__, 61 61 event->attr.config, event->cpu, 62 - cpump->active_events, cpump->mode, cpump->refcnt); 63 - if (!--cpump->refcnt) { 62 + cpump->active_events, cpump->mode, 63 + refcount_read(&cpump->refcnt)); 64 + if (refcount_dec_and_test(&cpump->refcnt)) { 64 65 debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", 65 66 __func__, (unsigned long)cpump->page, 66 67 cpump->save); ··· 150 149 /* Allocate memory for counter page and counter extraction. 151 150 * Only the first counting event has to allocate a page. 152 151 */ 153 - if (cpump->page) 152 + if (cpump->page) { 153 + refcount_inc(&cpump->refcnt); 154 154 goto unlock; 155 + } 155 156 156 157 rc = -ENOMEM; 157 158 cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL); ··· 167 164 goto unlock; 168 165 } 169 166 rc = 0; 167 + refcount_set(&cpump->refcnt, 1); 170 168 171 169 unlock: 172 170 /* If rc is non-zero, do not set mode and reference count */ 173 171 if (!rc) { 174 - cpump->refcnt++; 175 172 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING 176 173 : PAI_MODE_COUNTING; 177 174 } 178 175 debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d" 179 - " mode %d refcnt %d page %#lx save %p rc %d\n", 176 + " mode %d refcnt %u page %#lx save %p rc %d\n", 180 177 __func__, a->sample_period, cpump->active_events, 181 - cpump->mode, cpump->refcnt, 178 + cpump->mode, refcount_read(&cpump->refcnt), 182 179 (unsigned long)cpump->page, cpump->save, rc); 183 180 mutex_unlock(&pai_reserve_mutex); 184 181 return rc;
+13 -10
arch/s390/kernel/perf_pai_ext.c
··· 50 50 struct pai_userdata *save; /* Area to store non-zero counters */ 51 51 enum paievt_mode mode; /* Type of event */ 52 52 unsigned int active_events; /* # of PAI Extension users */ 53 - unsigned int refcnt; 53 + refcount_t refcnt; 54 54 struct perf_event *event; /* Perf event for sampling */ 55 55 struct paiext_cb *paiext_cb; /* PAI extension control block area */ 56 56 }; ··· 60 60 }; 61 61 62 62 static struct paiext_root { /* Anchor to per CPU data */ 63 - int refcnt; /* Overall active events */ 63 + refcount_t refcnt; /* Overall active events */ 64 64 struct paiext_mapptr __percpu *mapptr; 65 65 } paiext_root; 66 66 67 67 /* Free per CPU data when the last event is removed. */ 68 68 static void paiext_root_free(void) 69 69 { 70 - if (!--paiext_root.refcnt) { 70 + if (refcount_dec_and_test(&paiext_root.refcnt)) { 71 71 free_percpu(paiext_root.mapptr); 72 72 paiext_root.mapptr = NULL; 73 73 } ··· 80 80 */ 81 81 static int paiext_root_alloc(void) 82 82 { 83 - if (++paiext_root.refcnt == 1) { 83 + if (!refcount_inc_not_zero(&paiext_root.refcnt)) { 84 84 /* The memory is already zeroed. */ 85 85 paiext_root.mapptr = alloc_percpu(struct paiext_mapptr); 86 86 if (!paiext_root.mapptr) { ··· 91 91 */ 92 92 return -ENOMEM; 93 93 } 94 + refcount_set(&paiext_root.refcnt, 1); 94 95 } 95 96 return 0; 96 97 } ··· 123 122 124 123 mutex_lock(&paiext_reserve_mutex); 125 124 cpump->event = NULL; 126 - if (!--cpump->refcnt) /* Last reference gone */ 125 + if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */ 127 126 paiext_free(mp); 128 127 paiext_root_free(); 129 128 mutex_unlock(&paiext_reserve_mutex); ··· 164 163 rc = -ENOMEM; 165 164 cpump = kzalloc(sizeof(*cpump), GFP_KERNEL); 166 165 if (!cpump) 167 - goto unlock; 166 + goto undo; 168 167 169 168 /* Allocate memory for counter area and counter extraction. 170 169 * These are ··· 184 183 GFP_KERNEL); 185 184 if (!cpump->save || !cpump->area || !cpump->paiext_cb) { 186 185 paiext_free(mp); 187 - goto unlock; 186 + goto undo; 188 187 } 188 + refcount_set(&cpump->refcnt, 1); 189 189 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING 190 190 : PAI_MODE_COUNTING; 191 191 } else { ··· 197 195 if (cpump->mode == PAI_MODE_SAMPLING || 198 196 (cpump->mode == PAI_MODE_COUNTING && a->sample_period)) { 199 197 rc = -EBUSY; 200 - goto unlock; 198 + goto undo; 201 199 } 200 + refcount_inc(&cpump->refcnt); 202 201 } 203 202 204 203 rc = 0; 205 204 cpump->event = event; 206 - ++cpump->refcnt; 207 205 208 - unlock: 206 + undo: 209 207 if (rc) { 210 208 /* Error in allocation of event, decrement anchor. Since 211 209 * the event in not created, its destroy() function is never ··· 213 211 */ 214 212 paiext_root_free(); 215 213 } 214 + unlock: 216 215 mutex_unlock(&paiext_reserve_mutex); 217 216 /* If rc is non-zero, no increment of counter/sampler was done. */ 218 217 return rc;
+1 -1
arch/s390/lib/Makefile
··· 3 3 # Makefile for s390-specific library files.. 4 4 # 5 5 6 - lib-y += delay.o string.o uaccess.o find.o spinlock.o 6 + lib-y += delay.o string.o uaccess.o find.o spinlock.o tishift.o 7 7 obj-y += mem.o xor.o 8 8 lib-$(CONFIG_KPROBES) += probes.o 9 9 lib-$(CONFIG_UPROBES) += probes.o
+63
arch/s390/lib/tishift.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #include <linux/linkage.h> 4 + #include <asm/nospec-insn.h> 5 + #include <asm/export.h> 6 + 7 + .section .noinstr.text, "ax" 8 + 9 + GEN_BR_THUNK %r14 10 + 11 + SYM_FUNC_START(__ashlti3) 12 + lmg %r0,%r1,0(%r3) 13 + cije %r4,0,1f 14 + lhi %r3,64 15 + sr %r3,%r4 16 + jnh 0f 17 + srlg %r3,%r1,0(%r3) 18 + sllg %r0,%r0,0(%r4) 19 + sllg %r1,%r1,0(%r4) 20 + ogr %r0,%r3 21 + j 1f 22 + 0: sllg %r0,%r1,-64(%r4) 23 + lghi %r1,0 24 + 1: stmg %r0,%r1,0(%r2) 25 + BR_EX %r14 26 + SYM_FUNC_END(__ashlti3) 27 + EXPORT_SYMBOL(__ashlti3) 28 + 29 + SYM_FUNC_START(__ashrti3) 30 + lmg %r0,%r1,0(%r3) 31 + cije %r4,0,1f 32 + lhi %r3,64 33 + sr %r3,%r4 34 + jnh 0f 35 + sllg %r3,%r0,0(%r3) 36 + srlg %r1,%r1,0(%r4) 37 + srag %r0,%r0,0(%r4) 38 + ogr %r1,%r3 39 + j 1f 40 + 0: srag %r1,%r0,-64(%r4) 41 + srag %r0,%r0,63 42 + 1: stmg %r0,%r1,0(%r2) 43 + BR_EX %r14 44 + SYM_FUNC_END(__ashrti3) 45 + EXPORT_SYMBOL(__ashrti3) 46 + 47 + SYM_FUNC_START(__lshrti3) 48 + lmg %r0,%r1,0(%r3) 49 + cije %r4,0,1f 50 + lhi %r3,64 51 + sr %r3,%r4 52 + jnh 0f 53 + sllg %r3,%r0,0(%r3) 54 + srlg %r1,%r1,0(%r4) 55 + srlg %r0,%r0,0(%r4) 56 + ogr %r1,%r3 57 + j 1f 58 + 0: srlg %r1,%r0,-64(%r4) 59 + lghi %r0,0 60 + 1: stmg %r0,%r1,0(%r2) 61 + BR_EX %r14 62 + SYM_FUNC_END(__lshrti3) 63 + EXPORT_SYMBOL(__lshrti3)
+40 -1
drivers/s390/char/zcore.c
··· 51 51 static struct dentry *zcore_reipl_file; 52 52 static struct dentry *zcore_hsa_file; 53 53 static struct ipl_parameter_block *zcore_ipl_block; 54 + static unsigned long os_info_flags; 54 55 55 56 static DEFINE_MUTEX(hsa_buf_mutex); 56 57 static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE); ··· 140 139 { 141 140 if (zcore_ipl_block) { 142 141 diag308(DIAG308_SET, zcore_ipl_block); 143 - diag308(DIAG308_LOAD_CLEAR, NULL); 142 + if (os_info_flags & OS_INFO_FLAG_REIPL_CLEAR) 143 + diag308(DIAG308_LOAD_CLEAR, NULL); 144 + /* Use special diag308 subcode for CCW normal ipl */ 145 + if (zcore_ipl_block->pb0_hdr.pbt == IPL_PBT_CCW) 146 + diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); 147 + else 148 + diag308(DIAG308_LOAD_NORMAL, NULL); 144 149 } 145 150 return count; 146 151 } ··· 219 212 */ 220 213 static int __init zcore_reipl_init(void) 221 214 { 215 + struct os_info_entry *entry; 222 216 struct ipib_info ipib_info; 217 + unsigned long os_info_addr; 218 + struct os_info *os_info; 223 219 int rc; 224 220 225 221 rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info)); ··· 244 234 free_page((unsigned long) zcore_ipl_block); 245 235 zcore_ipl_block = NULL; 246 236 } 237 + /* 238 + * Read the bit-flags field from os_info flags entry. 239 + * Return zero even for os_info read or entry checksum errors in order 240 + * to continue dump processing, considering that os_info could be 241 + * corrupted on the panicked system. 242 + */ 243 + os_info = (void *)__get_free_page(GFP_KERNEL); 244 + if (!os_info) 245 + return -ENOMEM; 246 + rc = memcpy_hsa_kernel(&os_info_addr, __LC_OS_INFO, sizeof(os_info_addr)); 247 + if (rc) 248 + goto out; 249 + if (os_info_addr < sclp.hsa_size) 250 + rc = memcpy_hsa_kernel(os_info, os_info_addr, PAGE_SIZE); 251 + else 252 + rc = memcpy_real(os_info, os_info_addr, PAGE_SIZE); 253 + if (rc || os_info_csum(os_info) != os_info->csum) 254 + goto out; 255 + entry = &os_info->entry[OS_INFO_FLAGS_ENTRY]; 256 + if (entry->addr && entry->size) { 257 + if (entry->addr < sclp.hsa_size) 258 + rc = memcpy_hsa_kernel(&os_info_flags, entry->addr, sizeof(os_info_flags)); 259 + else 260 + rc = memcpy_real(&os_info_flags, entry->addr, sizeof(os_info_flags)); 261 + if (rc || (__force u32)csum_partial(&os_info_flags, entry->size, 0) != entry->csum) 262 + os_info_flags = 0; 263 + } 264 + out: 265 + free_page((unsigned long)os_info); 247 266 return 0; 248 267 } 249 268
+1 -1
drivers/s390/cio/vfio_ccw_drv.c
··· 171 171 return -ENODEV; 172 172 } 173 173 174 - parent = kzalloc(sizeof(*parent), GFP_KERNEL); 174 + parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL); 175 175 if (!parent) 176 176 return -ENOMEM; 177 177
+1 -1
drivers/s390/cio/vfio_ccw_private.h
··· 79 79 80 80 struct mdev_parent parent; 81 81 struct mdev_type mdev_type; 82 - struct mdev_type *mdev_types[1]; 82 + struct mdev_type *mdev_types[]; 83 83 }; 84 84 85 85 /**
+335 -174
drivers/s390/crypto/pkey_api.c
··· 2 2 /* 3 3 * pkey device driver 4 4 * 5 - * Copyright IBM Corp. 2017,2019 5 + * Copyright IBM Corp. 2017, 2023 6 + * 6 7 * Author(s): Harald Freudenberger 7 8 */ 8 9 ··· 33 32 MODULE_DESCRIPTION("s390 protected key interface"); 34 33 35 34 #define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */ 35 + #define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header)) 36 36 #define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ 37 37 #define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ 38 + #define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */ 38 39 39 40 /* 40 41 * debug feature data and functions ··· 74 71 } __packed; 75 72 76 73 /* inside view of a clear key token (type 0x00 version 0x02) */ 77 - struct clearaeskeytoken { 78 - u8 type; /* 0x00 for PAES specific key tokens */ 74 + struct clearkeytoken { 75 + u8 type; /* 0x00 for PAES specific key tokens */ 79 76 u8 res0[3]; 80 - u8 version; /* 0x02 for clear AES key token */ 77 + u8 version; /* 0x02 for clear key token */ 81 78 u8 res1[3]; 82 - u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ 83 - u32 len; /* bytes actually stored in clearkey[] */ 79 + u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */ 80 + u32 len; /* bytes actually stored in clearkey[] */ 84 81 u8 clearkey[]; /* clear key value */ 85 82 } __packed; 86 83 84 + /* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */ 85 + static inline u32 pkey_keytype_aes_to_size(u32 keytype) 86 + { 87 + switch (keytype) { 88 + case PKEY_KEYTYPE_AES_128: 89 + return 16; 90 + case PKEY_KEYTYPE_AES_192: 91 + return 24; 92 + case PKEY_KEYTYPE_AES_256: 93 + return 32; 94 + default: 95 + return 0; 96 + } 97 + } 98 + 87 99 /* 88 - * Create a protected key from a clear key value. 100 + * Create a protected key from a clear key value via PCKMO instruction. 89 101 */ 90 - static int pkey_clr2protkey(u32 keytype, 91 - const struct pkey_clrkey *clrkey, 92 - struct pkey_protkey *protkey) 102 + static int pkey_clr2protkey(u32 keytype, const u8 *clrkey, 103 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 93 104 { 94 105 /* mask of available pckmo subfunctions */ 95 106 static cpacf_mask_t pckmo_functions; 96 107 97 - long fc; 108 + u8 paramblock[112]; 109 + u32 pkeytype; 98 110 int keysize; 99 - u8 paramblock[64]; 111 + long fc; 100 112 101 113 switch (keytype) { 102 114 case PKEY_KEYTYPE_AES_128: 115 + /* 16 byte key, 32 byte aes wkvp, total 48 bytes */ 103 116 keysize = 16; 117 + pkeytype = keytype; 104 118 fc = CPACF_PCKMO_ENC_AES_128_KEY; 105 119 break; 106 120 case PKEY_KEYTYPE_AES_192: 121 + /* 24 byte key, 32 byte aes wkvp, total 56 bytes */ 107 122 keysize = 24; 123 + pkeytype = keytype; 108 124 fc = CPACF_PCKMO_ENC_AES_192_KEY; 109 125 break; 110 126 case PKEY_KEYTYPE_AES_256: 127 + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ 111 128 keysize = 32; 129 + pkeytype = keytype; 112 130 fc = CPACF_PCKMO_ENC_AES_256_KEY; 113 131 break; 132 + case PKEY_KEYTYPE_ECC_P256: 133 + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ 134 + keysize = 32; 135 + pkeytype = PKEY_KEYTYPE_ECC; 136 + fc = CPACF_PCKMO_ENC_ECC_P256_KEY; 137 + break; 138 + case PKEY_KEYTYPE_ECC_P384: 139 + /* 48 byte key, 32 byte aes wkvp, total 80 bytes */ 140 + keysize = 48; 141 + pkeytype = PKEY_KEYTYPE_ECC; 142 + fc = CPACF_PCKMO_ENC_ECC_P384_KEY; 143 + break; 144 + case PKEY_KEYTYPE_ECC_P521: 145 + /* 80 byte key, 32 byte aes wkvp, total 112 bytes */ 146 + keysize = 80; 147 + pkeytype = PKEY_KEYTYPE_ECC; 148 + fc = CPACF_PCKMO_ENC_ECC_P521_KEY; 149 + break; 150 + case PKEY_KEYTYPE_ECC_ED25519: 151 + /* 32 byte key, 32 byte aes wkvp, total 64 bytes */ 152 + keysize = 32; 153 + pkeytype = PKEY_KEYTYPE_ECC; 154 + fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY; 155 + break; 156 + case PKEY_KEYTYPE_ECC_ED448: 157 + /* 64 byte key, 32 byte aes wkvp, total 96 bytes */ 158 + keysize = 64; 159 + pkeytype = PKEY_KEYTYPE_ECC; 160 + fc = CPACF_PCKMO_ENC_ECC_ED448_KEY; 161 + break; 114 162 default: 115 - DEBUG_ERR("%s unknown/unsupported keytype %d\n", 163 + DEBUG_ERR("%s unknown/unsupported keytype %u\n", 116 164 __func__, keytype); 165 + return -EINVAL; 166 + } 167 + 168 + if (*protkeylen < keysize + AES_WK_VP_SIZE) { 169 + DEBUG_ERR("%s prot key buffer size too small: %u < %d\n", 170 + __func__, *protkeylen, keysize + AES_WK_VP_SIZE); 117 171 return -EINVAL; 118 172 } 119 173 ··· 188 128 189 129 /* prepare param block */ 190 130 memset(paramblock, 0, sizeof(paramblock)); 191 - memcpy(paramblock, clrkey->clrkey, keysize); 131 + memcpy(paramblock, clrkey, keysize); 192 132 193 133 /* call the pckmo instruction */ 194 134 cpacf_pckmo(fc, paramblock); 195 135 196 - /* copy created protected key */ 197 - protkey->type = keytype; 198 - protkey->len = keysize + 32; 199 - memcpy(protkey->protkey, paramblock, keysize + 32); 136 + /* copy created protected key to key buffer including the wkvp block */ 137 + *protkeylen = keysize + AES_WK_VP_SIZE; 138 + memcpy(protkey, paramblock, *protkeylen); 139 + *protkeytype = pkeytype; 200 140 201 141 return 0; 202 142 } ··· 204 144 /* 205 145 * Find card and transform secure key into protected key. 206 146 */ 207 - static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey) 147 + static int pkey_skey2pkey(const u8 *key, u8 *protkey, 148 + u32 *protkeylen, u32 *protkeytype) 208 149 { 209 - int rc, verify; 210 - u16 cardnr, domain; 211 150 struct keytoken_header *hdr = (struct keytoken_header *)key; 151 + u16 cardnr, domain; 152 + int rc, verify; 212 153 213 154 zcrypt_wait_api_operational(); 214 155 ··· 228 167 continue; 229 168 switch (hdr->version) { 230 169 case TOKVER_CCA_AES: 231 - rc = cca_sec2protkey(cardnr, domain, 232 - key, pkey->protkey, 233 - &pkey->len, &pkey->type); 170 + rc = cca_sec2protkey(cardnr, domain, key, 171 + protkey, protkeylen, protkeytype); 234 172 break; 235 173 case TOKVER_CCA_VLSC: 236 - rc = cca_cipher2protkey(cardnr, domain, 237 - key, pkey->protkey, 238 - &pkey->len, &pkey->type); 174 + rc = cca_cipher2protkey(cardnr, domain, key, 175 + protkey, protkeylen, 176 + protkeytype); 239 177 break; 240 178 default: 241 179 return -EINVAL; ··· 255 195 static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen, 256 196 u8 *keybuf, size_t *keybuflen) 257 197 { 258 - int i, rc; 259 - u16 card, dom; 260 198 u32 nr_apqns, *apqns = NULL; 199 + u16 card, dom; 200 + int i, rc; 261 201 262 202 zcrypt_wait_api_operational(); 263 203 ··· 287 227 /* 288 228 * Find card and transform EP11 secure key into protected key. 289 229 */ 290 - static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey) 230 + static int pkey_ep11key2pkey(const u8 *key, u8 *protkey, 231 + u32 *protkeylen, u32 *protkeytype) 291 232 { 292 - int i, rc; 293 - u16 card, dom; 294 - u32 nr_apqns, *apqns = NULL; 295 233 struct ep11keyblob *kb = (struct ep11keyblob *)key; 234 + u32 nr_apqns, *apqns = NULL; 235 + u16 card, dom; 236 + int i, rc; 296 237 297 238 zcrypt_wait_api_operational(); 298 239 ··· 307 246 for (rc = -ENODEV, i = 0; i < nr_apqns; i++) { 308 247 card = apqns[i] >> 16; 309 248 dom = apqns[i] & 0xFFFF; 310 - pkey->len = sizeof(pkey->protkey); 311 249 rc = ep11_kblob2protkey(card, dom, key, kb->head.len, 312 - pkey->protkey, &pkey->len, &pkey->type); 250 + protkey, protkeylen, protkeytype); 313 251 if (rc == 0) 314 252 break; 315 253 } ··· 366 306 /* 367 307 * Generate a random protected key 368 308 */ 369 - static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey) 309 + static int pkey_genprotkey(u32 keytype, u8 *protkey, 310 + u32 *protkeylen, u32 *protkeytype) 370 311 { 371 - struct pkey_clrkey clrkey; 312 + u8 clrkey[32]; 372 313 int keysize; 373 314 int rc; 374 315 375 - switch (keytype) { 376 - case PKEY_KEYTYPE_AES_128: 377 - keysize = 16; 378 - break; 379 - case PKEY_KEYTYPE_AES_192: 380 - keysize = 24; 381 - break; 382 - case PKEY_KEYTYPE_AES_256: 383 - keysize = 32; 384 - break; 385 - default: 316 + keysize = pkey_keytype_aes_to_size(keytype); 317 + if (!keysize) { 386 318 DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, 387 319 keytype); 388 320 return -EINVAL; 389 321 } 390 322 391 323 /* generate a dummy random clear key */ 392 - get_random_bytes(clrkey.clrkey, keysize); 324 + get_random_bytes(clrkey, keysize); 393 325 394 326 /* convert it to a dummy protected key */ 395 - rc = pkey_clr2protkey(keytype, &clrkey, protkey); 327 + rc = pkey_clr2protkey(keytype, clrkey, 328 + protkey, protkeylen, protkeytype); 396 329 if (rc) 397 330 return rc; 398 331 399 332 /* replace the key part of the protected key with random bytes */ 400 - get_random_bytes(protkey->protkey, keysize); 333 + get_random_bytes(protkey, keysize); 401 334 402 335 return 0; 403 336 } ··· 398 345 /* 399 346 * Verify if a protected key is still valid 400 347 */ 401 - static int pkey_verifyprotkey(const struct pkey_protkey *protkey) 348 + static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen, 349 + u32 protkeytype) 402 350 { 403 - unsigned long fc; 404 351 struct { 405 352 u8 iv[AES_BLOCK_SIZE]; 406 353 u8 key[MAXPROTKEYSIZE]; 407 354 } param; 408 355 u8 null_msg[AES_BLOCK_SIZE]; 409 356 u8 dest_buf[AES_BLOCK_SIZE]; 410 - unsigned int k; 357 + unsigned int k, pkeylen; 358 + unsigned long fc; 411 359 412 - switch (protkey->type) { 360 + switch (protkeytype) { 413 361 case PKEY_KEYTYPE_AES_128: 362 + pkeylen = 16 + AES_WK_VP_SIZE; 414 363 fc = CPACF_KMC_PAES_128; 415 364 break; 416 365 case PKEY_KEYTYPE_AES_192: 366 + pkeylen = 24 + AES_WK_VP_SIZE; 417 367 fc = CPACF_KMC_PAES_192; 418 368 break; 419 369 case PKEY_KEYTYPE_AES_256: 370 + pkeylen = 32 + AES_WK_VP_SIZE; 420 371 fc = CPACF_KMC_PAES_256; 421 372 break; 422 373 default: 423 - DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, 424 - protkey->type); 374 + DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__, 375 + protkeytype); 376 + return -EINVAL; 377 + } 378 + if (protkeylen != pkeylen) { 379 + DEBUG_ERR("%s invalid protected key size %u for keytype %u\n", 380 + __func__, protkeylen, protkeytype); 425 381 return -EINVAL; 426 382 } 427 383 428 384 memset(null_msg, 0, sizeof(null_msg)); 429 385 430 386 memset(param.iv, 0, sizeof(param.iv)); 431 - memcpy(param.key, protkey->protkey, sizeof(param.key)); 387 + memcpy(param.key, protkey, protkeylen); 432 388 433 389 k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf, 434 390 sizeof(null_msg)); ··· 449 387 return 0; 450 388 } 451 389 390 + /* Helper for pkey_nonccatok2pkey, handles aes clear key token */ 391 + static int nonccatokaes2pkey(const struct clearkeytoken *t, 392 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 393 + { 394 + size_t tmpbuflen = max_t(size_t, SECKEYBLOBSIZE, MAXEP11AESKEYBLOBSIZE); 395 + u8 *tmpbuf = NULL; 396 + u32 keysize; 397 + int rc; 398 + 399 + keysize = pkey_keytype_aes_to_size(t->keytype); 400 + if (!keysize) { 401 + DEBUG_ERR("%s unknown/unsupported keytype %u\n", 402 + __func__, t->keytype); 403 + return -EINVAL; 404 + } 405 + if (t->len != keysize) { 406 + DEBUG_ERR("%s non clear key aes token: invalid key len %u\n", 407 + __func__, t->len); 408 + return -EINVAL; 409 + } 410 + 411 + /* try direct way with the PCKMO instruction */ 412 + rc = pkey_clr2protkey(t->keytype, t->clearkey, 413 + protkey, protkeylen, protkeytype); 414 + if (!rc) 415 + goto out; 416 + 417 + /* PCKMO failed, so try the CCA secure key way */ 418 + tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC); 419 + if (!tmpbuf) 420 + return -ENOMEM; 421 + zcrypt_wait_api_operational(); 422 + rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, t->clearkey, tmpbuf); 423 + if (rc) 424 + goto try_via_ep11; 425 + rc = pkey_skey2pkey(tmpbuf, 426 + protkey, protkeylen, protkeytype); 427 + if (!rc) 428 + goto out; 429 + 430 + try_via_ep11: 431 + /* if the CCA way also failed, let's try via EP11 */ 432 + rc = pkey_clr2ep11key(t->clearkey, t->len, 433 + tmpbuf, &tmpbuflen); 434 + if (rc) 435 + goto failure; 436 + rc = pkey_ep11key2pkey(tmpbuf, 437 + protkey, protkeylen, protkeytype); 438 + if (!rc) 439 + goto out; 440 + 441 + failure: 442 + DEBUG_ERR("%s unable to build protected key from clear", __func__); 443 + 444 + out: 445 + kfree(tmpbuf); 446 + return rc; 447 + } 448 + 449 + /* Helper for pkey_nonccatok2pkey, handles ecc clear key token */ 450 + static int nonccatokecc2pkey(const struct clearkeytoken *t, 451 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 452 + { 453 + u32 keylen; 454 + int rc; 455 + 456 + switch (t->keytype) { 457 + case PKEY_KEYTYPE_ECC_P256: 458 + keylen = 32; 459 + break; 460 + case PKEY_KEYTYPE_ECC_P384: 461 + keylen = 48; 462 + break; 463 + case PKEY_KEYTYPE_ECC_P521: 464 + keylen = 80; 465 + break; 466 + case PKEY_KEYTYPE_ECC_ED25519: 467 + keylen = 32; 468 + break; 469 + case PKEY_KEYTYPE_ECC_ED448: 470 + keylen = 64; 471 + break; 472 + default: 473 + DEBUG_ERR("%s unknown/unsupported keytype %u\n", 474 + __func__, t->keytype); 475 + return -EINVAL; 476 + } 477 + 478 + if (t->len != keylen) { 479 + DEBUG_ERR("%s non clear key ecc token: invalid key len %u\n", 480 + __func__, t->len); 481 + return -EINVAL; 482 + } 483 + 484 + /* only one path possible: via PCKMO instruction */ 485 + rc = pkey_clr2protkey(t->keytype, t->clearkey, 486 + protkey, protkeylen, protkeytype); 487 + if (rc) { 488 + DEBUG_ERR("%s unable to build protected key from clear", 489 + __func__); 490 + } 491 + 492 + return rc; 493 + } 494 + 452 495 /* 453 496 * Transform a non-CCA key token into a protected key 454 497 */ 455 498 static int pkey_nonccatok2pkey(const u8 *key, u32 keylen, 456 - struct pkey_protkey *protkey) 499 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 457 500 { 458 - int rc = -EINVAL; 459 - u8 *tmpbuf = NULL; 460 501 struct keytoken_header *hdr = (struct keytoken_header *)key; 502 + int rc = -EINVAL; 461 503 462 504 switch (hdr->version) { 463 505 case TOKVER_PROTECTED_KEY: { ··· 570 404 if (keylen != sizeof(struct protaeskeytoken)) 571 405 goto out; 572 406 t = (struct protaeskeytoken *)key; 573 - protkey->len = t->len; 574 - protkey->type = t->keytype; 575 - memcpy(protkey->protkey, t->protkey, 576 - sizeof(protkey->protkey)); 577 - rc = pkey_verifyprotkey(protkey); 407 + rc = pkey_verifyprotkey(t->protkey, t->len, t->keytype); 408 + if (rc) 409 + goto out; 410 + memcpy(protkey, t->protkey, t->len); 411 + *protkeylen = t->len; 412 + *protkeytype = t->keytype; 578 413 break; 579 414 } 580 415 case TOKVER_CLEAR_KEY: { 581 - struct clearaeskeytoken *t; 582 - struct pkey_clrkey ckey; 583 - union u_tmpbuf { 584 - u8 skey[SECKEYBLOBSIZE]; 585 - u8 ep11key[MAXEP11AESKEYBLOBSIZE]; 586 - }; 587 - size_t tmpbuflen = sizeof(union u_tmpbuf); 416 + struct clearkeytoken *t = (struct clearkeytoken *)key; 588 417 589 - if (keylen < sizeof(struct clearaeskeytoken)) 418 + if (keylen < sizeof(struct clearkeytoken) || 419 + keylen != sizeof(*t) + t->len) 590 420 goto out; 591 - t = (struct clearaeskeytoken *)key; 592 - if (keylen != sizeof(*t) + t->len) 593 - goto out; 594 - if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) || 595 - (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) || 596 - (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32)) 597 - memcpy(ckey.clrkey, t->clearkey, t->len); 598 - else 599 - goto out; 600 - /* alloc temp key buffer space */ 601 - tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC); 602 - if (!tmpbuf) { 603 - rc = -ENOMEM; 604 - goto out; 421 + switch (t->keytype) { 422 + case PKEY_KEYTYPE_AES_128: 423 + case PKEY_KEYTYPE_AES_192: 424 + case PKEY_KEYTYPE_AES_256: 425 + rc = nonccatokaes2pkey(t, protkey, 426 + protkeylen, protkeytype); 427 + break; 428 + case PKEY_KEYTYPE_ECC_P256: 429 + case PKEY_KEYTYPE_ECC_P384: 430 + case PKEY_KEYTYPE_ECC_P521: 431 + case PKEY_KEYTYPE_ECC_ED25519: 432 + case PKEY_KEYTYPE_ECC_ED448: 433 + rc = nonccatokecc2pkey(t, protkey, 434 + protkeylen, protkeytype); 435 + break; 436 + default: 437 + DEBUG_ERR("%s unknown/unsupported non cca clear key type %u\n", 438 + __func__, t->keytype); 439 + return -EINVAL; 605 440 } 606 - /* try direct way with the PCKMO instruction */ 607 - rc = pkey_clr2protkey(t->keytype, &ckey, protkey); 608 - if (rc == 0) 609 - break; 610 - /* PCKMO failed, so try the CCA secure key way */ 611 - zcrypt_wait_api_operational(); 612 - rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, 613 - ckey.clrkey, tmpbuf); 614 - if (rc == 0) 615 - rc = pkey_skey2pkey(tmpbuf, protkey); 616 - if (rc == 0) 617 - break; 618 - /* if the CCA way also failed, let's try via EP11 */ 619 - rc = pkey_clr2ep11key(ckey.clrkey, t->len, 620 - tmpbuf, &tmpbuflen); 621 - if (rc == 0) 622 - rc = pkey_ep11key2pkey(tmpbuf, protkey); 623 - /* now we should really have an protected key */ 624 - DEBUG_ERR("%s unable to build protected key from clear", 625 - __func__); 626 441 break; 627 442 } 628 443 case TOKVER_EP11_AES: { ··· 611 464 rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); 612 465 if (rc) 613 466 goto out; 614 - rc = pkey_ep11key2pkey(key, protkey); 467 + rc = pkey_ep11key2pkey(key, 468 + protkey, protkeylen, protkeytype); 615 469 break; 616 470 } 617 471 case TOKVER_EP11_AES_WITH_HEADER: ··· 621 473 if (rc) 622 474 goto out; 623 475 rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header), 624 - protkey); 476 + protkey, protkeylen, protkeytype); 625 477 break; 626 478 default: 627 479 DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n", 628 480 __func__, hdr->version); 629 - rc = -EINVAL; 630 481 } 631 482 632 483 out: 633 - kfree(tmpbuf); 634 484 return rc; 635 485 } 636 486 ··· 636 490 * Transform a CCA internal key token into a protected key 637 491 */ 638 492 static int pkey_ccainttok2pkey(const u8 *key, u32 keylen, 639 - struct pkey_protkey *protkey) 493 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 640 494 { 641 495 struct keytoken_header *hdr = (struct keytoken_header *)key; 642 496 ··· 655 509 return -EINVAL; 656 510 } 657 511 658 - return pkey_skey2pkey(key, protkey); 512 + return pkey_skey2pkey(key, protkey, protkeylen, protkeytype); 659 513 } 660 514 661 515 /* 662 516 * Transform a key blob (of any type) into a protected key 663 517 */ 664 518 int pkey_keyblob2pkey(const u8 *key, u32 keylen, 665 - struct pkey_protkey *protkey) 519 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 666 520 { 667 - int rc; 668 521 struct keytoken_header *hdr = (struct keytoken_header *)key; 522 + int rc; 669 523 670 524 if (keylen < sizeof(struct keytoken_header)) { 671 525 DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen); ··· 674 528 675 529 switch (hdr->type) { 676 530 case TOKTYPE_NON_CCA: 677 - rc = pkey_nonccatok2pkey(key, keylen, protkey); 531 + rc = pkey_nonccatok2pkey(key, keylen, 532 + protkey, protkeylen, protkeytype); 678 533 break; 679 534 case TOKTYPE_CCA_INTERNAL: 680 - rc = pkey_ccainttok2pkey(key, keylen, protkey); 535 + rc = pkey_ccainttok2pkey(key, keylen, 536 + protkey, protkeylen, protkeytype); 681 537 break; 682 538 default: 683 539 DEBUG_ERR("%s unknown/unsupported blob type %d\n", ··· 811 663 enum pkey_key_type *ktype, 812 664 enum pkey_key_size *ksize, u32 *flags) 813 665 { 814 - int rc; 815 - u32 _nr_apqns, *_apqns = NULL; 816 666 struct keytoken_header *hdr = (struct keytoken_header *)key; 667 + u32 _nr_apqns, *_apqns = NULL; 668 + int rc; 817 669 818 670 if (keylen < sizeof(struct keytoken_header)) 819 671 return -EINVAL; ··· 919 771 920 772 static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, 921 773 const u8 *key, size_t keylen, 922 - struct pkey_protkey *pkey) 774 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 923 775 { 924 - int i, card, dom, rc; 925 776 struct keytoken_header *hdr = (struct keytoken_header *)key; 777 + int i, card, dom, rc; 926 778 927 779 /* check for at least one apqn given */ 928 780 if (!apqns || !nr_apqns) ··· 954 806 if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) 955 807 return -EINVAL; 956 808 } else { 957 - return pkey_nonccatok2pkey(key, keylen, pkey); 809 + return pkey_nonccatok2pkey(key, keylen, 810 + protkey, protkeylen, 811 + protkeytype); 958 812 } 959 813 } else { 960 814 DEBUG_ERR("%s unknown/unsupported blob type %d\n", ··· 972 822 dom = apqns[i].domain; 973 823 if (hdr->type == TOKTYPE_CCA_INTERNAL && 974 824 hdr->version == TOKVER_CCA_AES) { 975 - rc = cca_sec2protkey(card, dom, key, pkey->protkey, 976 - &pkey->len, &pkey->type); 825 + rc = cca_sec2protkey(card, dom, key, 826 + protkey, protkeylen, protkeytype); 977 827 } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 978 828 hdr->version == TOKVER_CCA_VLSC) { 979 - rc = cca_cipher2protkey(card, dom, key, pkey->protkey, 980 - &pkey->len, &pkey->type); 829 + rc = cca_cipher2protkey(card, dom, key, 830 + protkey, protkeylen, 831 + protkeytype); 981 832 } else { 982 833 /* EP11 AES secure key blob */ 983 834 struct ep11keyblob *kb = (struct ep11keyblob *)key; 984 835 985 - pkey->len = sizeof(pkey->protkey); 986 836 rc = ep11_kblob2protkey(card, dom, key, kb->head.len, 987 - pkey->protkey, &pkey->len, 988 - &pkey->type); 837 + protkey, protkeylen, 838 + protkeytype); 989 839 } 990 840 if (rc == 0) 991 841 break; ··· 997 847 static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, 998 848 struct pkey_apqn *apqns, size_t *nr_apqns) 999 849 { 1000 - int rc; 1001 - u32 _nr_apqns, *_apqns = NULL; 1002 850 struct keytoken_header *hdr = (struct keytoken_header *)key; 851 + u32 _nr_apqns, *_apqns = NULL; 852 + int rc; 1003 853 1004 854 if (keylen < sizeof(struct keytoken_header) || flags == 0) 1005 855 return -EINVAL; ··· 1010 860 (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 1011 861 hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 1012 862 is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1013 - int minhwtype = 0, api = 0; 1014 863 struct ep11keyblob *kb = (struct ep11keyblob *) 1015 864 (key + sizeof(struct ep11kblob_header)); 865 + int minhwtype = 0, api = 0; 1016 866 1017 867 if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) 1018 868 return -EINVAL; ··· 1027 877 } else if (hdr->type == TOKTYPE_NON_CCA && 1028 878 hdr->version == TOKVER_EP11_AES && 1029 879 is_ep11_keyblob(key)) { 1030 - int minhwtype = 0, api = 0; 1031 880 struct ep11keyblob *kb = (struct ep11keyblob *)key; 881 + int minhwtype = 0, api = 0; 1032 882 1033 883 if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) 1034 884 return -EINVAL; ··· 1041 891 if (rc) 1042 892 goto out; 1043 893 } else if (hdr->type == TOKTYPE_CCA_INTERNAL) { 1044 - int minhwtype = ZCRYPT_CEX3C; 1045 894 u64 cur_mkvp = 0, old_mkvp = 0; 895 + int minhwtype = ZCRYPT_CEX3C; 1046 896 1047 897 if (hdr->version == TOKVER_CCA_AES) { 1048 898 struct secaeskeytoken *t = (struct secaeskeytoken *)key; ··· 1069 919 if (rc) 1070 920 goto out; 1071 921 } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { 1072 - u64 cur_mkvp = 0, old_mkvp = 0; 1073 922 struct eccprivkeytoken *t = (struct eccprivkeytoken *)key; 923 + u64 cur_mkvp = 0, old_mkvp = 0; 1074 924 1075 925 if (t->secid == 0x20) { 1076 926 if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) ··· 1107 957 u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 1108 958 struct pkey_apqn *apqns, size_t *nr_apqns) 1109 959 { 1110 - int rc; 1111 960 u32 _nr_apqns, *_apqns = NULL; 961 + int rc; 1112 962 1113 963 zcrypt_wait_api_operational(); 1114 964 ··· 1170 1020 } 1171 1021 1172 1022 static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns, 1173 - const u8 *key, size_t keylen, u32 *protkeytype, 1174 - u8 *protkey, u32 *protkeylen) 1023 + const u8 *key, size_t keylen, 1024 + u8 *protkey, u32 *protkeylen, u32 *protkeytype) 1175 1025 { 1176 - int i, card, dom, rc; 1177 1026 struct keytoken_header *hdr = (struct keytoken_header *)key; 1027 + int i, card, dom, rc; 1178 1028 1179 1029 /* check for at least one apqn given */ 1180 1030 if (!apqns || !nr_apqns) ··· 1226 1076 if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1)) 1227 1077 return -EINVAL; 1228 1078 } else if (hdr->type == TOKTYPE_NON_CCA) { 1229 - struct pkey_protkey pkey; 1230 - 1231 - rc = pkey_nonccatok2pkey(key, keylen, &pkey); 1232 - if (rc) 1233 - return rc; 1234 - memcpy(protkey, pkey.protkey, pkey.len); 1235 - *protkeylen = pkey.len; 1236 - *protkeytype = pkey.type; 1237 - return 0; 1079 + return pkey_nonccatok2pkey(key, keylen, 1080 + protkey, protkeylen, protkeytype); 1238 1081 } else { 1239 1082 DEBUG_ERR("%s unknown/unsupported blob type %d\n", 1240 1083 __func__, hdr->type); ··· 1273 1130 1274 1131 static void *_copy_key_from_user(void __user *ukey, size_t keylen) 1275 1132 { 1276 - if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE) 1133 + if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE) 1277 1134 return ERR_PTR(-EINVAL); 1278 1135 1279 1136 return memdup_user(ukey, keylen); ··· 1330 1187 1331 1188 if (copy_from_user(&ksp, usp, sizeof(ksp))) 1332 1189 return -EFAULT; 1190 + ksp.protkey.len = sizeof(ksp.protkey.protkey); 1333 1191 rc = cca_sec2protkey(ksp.cardnr, ksp.domain, 1334 1192 ksp.seckey.seckey, ksp.protkey.protkey, 1335 1193 &ksp.protkey.len, &ksp.protkey.type); ··· 1347 1203 1348 1204 if (copy_from_user(&kcp, ucp, sizeof(kcp))) 1349 1205 return -EFAULT; 1350 - rc = pkey_clr2protkey(kcp.keytype, 1351 - &kcp.clrkey, &kcp.protkey); 1206 + kcp.protkey.len = sizeof(kcp.protkey.protkey); 1207 + rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey, 1208 + kcp.protkey.protkey, 1209 + &kcp.protkey.len, &kcp.protkey.type); 1352 1210 DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc); 1353 1211 if (rc) 1354 1212 break; ··· 1380 1234 1381 1235 if (copy_from_user(&ksp, usp, sizeof(ksp))) 1382 1236 return -EFAULT; 1383 - rc = pkey_skey2pkey(ksp.seckey.seckey, &ksp.protkey); 1237 + ksp.protkey.len = sizeof(ksp.protkey.protkey); 1238 + rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey, 1239 + &ksp.protkey.len, &ksp.protkey.type); 1384 1240 DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc); 1385 1241 if (rc) 1386 1242 break; ··· 1411 1263 1412 1264 if (copy_from_user(&kgp, ugp, sizeof(kgp))) 1413 1265 return -EFAULT; 1414 - rc = pkey_genprotkey(kgp.keytype, &kgp.protkey); 1266 + kgp.protkey.len = sizeof(kgp.protkey.protkey); 1267 + rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey, 1268 + &kgp.protkey.len, &kgp.protkey.type); 1415 1269 DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc); 1416 1270 if (rc) 1417 1271 break; ··· 1427 1277 1428 1278 if (copy_from_user(&kvp, uvp, sizeof(kvp))) 1429 1279 return -EFAULT; 1430 - rc = pkey_verifyprotkey(&kvp.protkey); 1280 + rc = pkey_verifyprotkey(kvp.protkey.protkey, 1281 + kvp.protkey.len, kvp.protkey.type); 1431 1282 DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc); 1432 1283 break; 1433 1284 } ··· 1442 1291 kkey = _copy_key_from_user(ktp.key, ktp.keylen); 1443 1292 if (IS_ERR(kkey)) 1444 1293 return PTR_ERR(kkey); 1445 - rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey); 1294 + ktp.protkey.len = sizeof(ktp.protkey.protkey); 1295 + rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey, 1296 + &ktp.protkey.len, &ktp.protkey.type); 1446 1297 DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); 1447 1298 memzero_explicit(kkey, ktp.keylen); 1448 1299 kfree(kkey); ··· 1456 1303 } 1457 1304 case PKEY_GENSECK2: { 1458 1305 struct pkey_genseck2 __user *ugs = (void __user *)arg; 1306 + size_t klen = KEYBLOBBUFSIZE; 1459 1307 struct pkey_genseck2 kgs; 1460 1308 struct pkey_apqn *apqns; 1461 - size_t klen = KEYBLOBBUFSIZE; 1462 1309 u8 *kkey; 1463 1310 1464 1311 if (copy_from_user(&kgs, ugs, sizeof(kgs))) ··· 1498 1345 } 1499 1346 case PKEY_CLR2SECK2: { 1500 1347 struct pkey_clr2seck2 __user *ucs = (void __user *)arg; 1348 + size_t klen = KEYBLOBBUFSIZE; 1501 1349 struct pkey_clr2seck2 kcs; 1502 1350 struct pkey_apqn *apqns; 1503 - size_t klen = KEYBLOBBUFSIZE; 1504 1351 u8 *kkey; 1505 1352 1506 1353 if (copy_from_user(&kcs, ucs, sizeof(kcs))) ··· 1562 1409 } 1563 1410 case PKEY_KBLOB2PROTK2: { 1564 1411 struct pkey_kblob2pkey2 __user *utp = (void __user *)arg; 1565 - struct pkey_kblob2pkey2 ktp; 1566 1412 struct pkey_apqn *apqns = NULL; 1413 + struct pkey_kblob2pkey2 ktp; 1567 1414 u8 *kkey; 1568 1415 1569 1416 if (copy_from_user(&ktp, utp, sizeof(ktp))) ··· 1576 1423 kfree(apqns); 1577 1424 return PTR_ERR(kkey); 1578 1425 } 1426 + ktp.protkey.len = sizeof(ktp.protkey.protkey); 1579 1427 rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries, 1580 - kkey, ktp.keylen, &ktp.protkey); 1428 + kkey, ktp.keylen, 1429 + ktp.protkey.protkey, &ktp.protkey.len, 1430 + &ktp.protkey.type); 1581 1431 DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); 1582 1432 kfree(apqns); 1583 1433 memzero_explicit(kkey, ktp.keylen); ··· 1593 1437 } 1594 1438 case PKEY_APQNS4K: { 1595 1439 struct pkey_apqns4key __user *uak = (void __user *)arg; 1596 - struct pkey_apqns4key kak; 1597 1440 struct pkey_apqn *apqns = NULL; 1441 + struct pkey_apqns4key kak; 1598 1442 size_t nr_apqns, len; 1599 1443 u8 *kkey; 1600 1444 ··· 1642 1486 } 1643 1487 case PKEY_APQNS4KT: { 1644 1488 struct pkey_apqns4keytype __user *uat = (void __user *)arg; 1645 - struct pkey_apqns4keytype kat; 1646 1489 struct pkey_apqn *apqns = NULL; 1490 + struct pkey_apqns4keytype kat; 1647 1491 size_t nr_apqns, len; 1648 1492 1649 1493 if (copy_from_user(&kat, uat, sizeof(kat))) ··· 1684 1528 } 1685 1529 case PKEY_KBLOB2PROTK3: { 1686 1530 struct pkey_kblob2pkey3 __user *utp = (void __user *)arg; 1687 - struct pkey_kblob2pkey3 ktp; 1688 - struct pkey_apqn *apqns = NULL; 1689 1531 u32 protkeylen = PROTKEYBLOBBUFSIZE; 1532 + struct pkey_apqn *apqns = NULL; 1533 + struct pkey_kblob2pkey3 ktp; 1690 1534 u8 *kkey, *protkey; 1691 1535 1692 1536 if (copy_from_user(&ktp, utp, sizeof(ktp))) ··· 1705 1549 kfree(kkey); 1706 1550 return -ENOMEM; 1707 1551 } 1708 - rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, kkey, 1709 - ktp.keylen, &ktp.pkeytype, 1710 - protkey, &protkeylen); 1552 + rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, 1553 + kkey, ktp.keylen, 1554 + protkey, &protkeylen, &ktp.pkeytype); 1711 1555 DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); 1712 1556 kfree(apqns); 1713 1557 memzero_explicit(kkey, ktp.keylen); ··· 1768 1612 protkeytoken.version = TOKVER_PROTECTED_KEY; 1769 1613 protkeytoken.keytype = keytype; 1770 1614 1771 - rc = pkey_genprotkey(protkeytoken.keytype, &protkey); 1615 + protkey.len = sizeof(protkey.protkey); 1616 + rc = pkey_genprotkey(protkeytoken.keytype, 1617 + protkey.protkey, &protkey.len, &protkey.type); 1772 1618 if (rc) 1773 1619 return rc; 1774 1620 ··· 1780 1622 memcpy(buf, &protkeytoken, sizeof(protkeytoken)); 1781 1623 1782 1624 if (is_xts) { 1783 - rc = pkey_genprotkey(protkeytoken.keytype, &protkey); 1625 + /* xts needs a second protected key, reuse protkey struct */ 1626 + protkey.len = sizeof(protkey.protkey); 1627 + rc = pkey_genprotkey(protkeytoken.keytype, 1628 + protkey.protkey, &protkey.len, &protkey.type); 1784 1629 if (rc) 1785 1630 return rc; 1786 1631 ··· 1878 1717 static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, 1879 1718 loff_t off, size_t count) 1880 1719 { 1881 - int rc; 1882 1720 struct pkey_seckey *seckey = (struct pkey_seckey *)buf; 1721 + int rc; 1883 1722 1884 1723 if (off != 0 || count < sizeof(struct secaeskeytoken)) 1885 1724 return -EINVAL; ··· 1985 1824 bool is_xts, char *buf, loff_t off, 1986 1825 size_t count) 1987 1826 { 1988 - int i, rc, card, dom; 1989 - u32 nr_apqns, *apqns = NULL; 1990 1827 size_t keysize = CCACIPHERTOKENSIZE; 1828 + u32 nr_apqns, *apqns = NULL; 1829 + int i, rc, card, dom; 1991 1830 1992 1831 if (off != 0 || count < CCACIPHERTOKENSIZE) 1993 1832 return -EINVAL; ··· 2108 1947 bool is_xts, char *buf, loff_t off, 2109 1948 size_t count) 2110 1949 { 2111 - int i, rc, card, dom; 2112 - u32 nr_apqns, *apqns = NULL; 2113 1950 size_t keysize = MAXEP11AESKEYBLOBSIZE; 1951 + u32 nr_apqns, *apqns = NULL; 1952 + int i, rc, card, dom; 2114 1953 2115 1954 if (off != 0 || count < MAXEP11AESKEYBLOBSIZE) 2116 1955 return -EINVAL;
+133 -1
drivers/s390/crypto/vfio_ap_ops.c
··· 716 716 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); 717 717 if (ret) 718 718 goto err_put_vdev; 719 + matrix_mdev->req_trigger = NULL; 719 720 dev_set_drvdata(&mdev->dev, matrix_mdev); 720 721 mutex_lock(&matrix_dev->mdevs_lock); 721 722 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); ··· 1736 1735 vfio_ap_mdev_unset_kvm(matrix_mdev); 1737 1736 } 1738 1737 1738 + static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count) 1739 + { 1740 + struct device *dev = vdev->dev; 1741 + struct ap_matrix_mdev *matrix_mdev; 1742 + 1743 + matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); 1744 + 1745 + if (matrix_mdev->req_trigger) { 1746 + if (!(count % 10)) 1747 + dev_notice_ratelimited(dev, 1748 + "Relaying device request to user (#%u)\n", 1749 + count); 1750 + 1751 + eventfd_signal(matrix_mdev->req_trigger, 1); 1752 + } else if (count == 0) { 1753 + dev_notice(dev, 1754 + "No device request registered, blocked until released by user\n"); 1755 + } 1756 + } 1757 + 1739 1758 static int vfio_ap_mdev_get_device_info(unsigned long arg) 1740 1759 { 1741 1760 unsigned long minsz; ··· 1771 1750 1772 1751 info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET; 1773 1752 info.num_regions = 0; 1774 - info.num_irqs = 0; 1753 + info.num_irqs = VFIO_AP_NUM_IRQS; 1775 1754 1776 1755 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 1756 + } 1757 + 1758 + static ssize_t vfio_ap_get_irq_info(unsigned long arg) 1759 + { 1760 + unsigned long minsz; 1761 + struct vfio_irq_info info; 1762 + 1763 + minsz = offsetofend(struct vfio_irq_info, count); 1764 + 1765 + if (copy_from_user(&info, (void __user *)arg, minsz)) 1766 + return -EFAULT; 1767 + 1768 + if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS) 1769 + return -EINVAL; 1770 + 1771 + switch (info.index) { 1772 + case VFIO_AP_REQ_IRQ_INDEX: 1773 + info.count = 1; 1774 + info.flags = VFIO_IRQ_INFO_EVENTFD; 1775 + break; 1776 + default: 1777 + return -EINVAL; 1778 + } 1779 + 1780 + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; 1781 + } 1782 + 1783 + static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg) 1784 + { 1785 + int ret; 1786 + size_t data_size; 1787 + unsigned long minsz; 1788 + 1789 + minsz = offsetofend(struct vfio_irq_set, count); 1790 + 1791 + if (copy_from_user(irq_set, (void __user *)arg, minsz)) 1792 + return -EFAULT; 1793 + 1794 + ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS, 1795 + &data_size); 1796 + if (ret) 1797 + return ret; 1798 + 1799 + if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER)) 1800 + return -EINVAL; 1801 + 1802 + return 0; 1803 + } 1804 + 1805 + static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, 1806 + unsigned long arg) 1807 + { 1808 + s32 fd; 1809 + void __user *data; 1810 + unsigned long minsz; 1811 + struct eventfd_ctx *req_trigger; 1812 + 1813 + minsz = offsetofend(struct vfio_irq_set, count); 1814 + data = (void __user *)(arg + minsz); 1815 + 1816 + if (get_user(fd, (s32 __user *)data)) 1817 + return -EFAULT; 1818 + 1819 + if (fd == -1) { 1820 + if (matrix_mdev->req_trigger) 1821 + eventfd_ctx_put(matrix_mdev->req_trigger); 1822 + matrix_mdev->req_trigger = NULL; 1823 + } else if (fd >= 0) { 1824 + req_trigger = eventfd_ctx_fdget(fd); 1825 + if (IS_ERR(req_trigger)) 1826 + return PTR_ERR(req_trigger); 1827 + 1828 + if (matrix_mdev->req_trigger) 1829 + eventfd_ctx_put(matrix_mdev->req_trigger); 1830 + 1831 + matrix_mdev->req_trigger = req_trigger; 1832 + } else { 1833 + return -EINVAL; 1834 + } 1835 + 1836 + return 0; 1837 + } 1838 + 1839 + static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, 1840 + unsigned long arg) 1841 + { 1842 + int ret; 1843 + struct vfio_irq_set irq_set; 1844 + 1845 + ret = vfio_ap_irq_set_init(&irq_set, arg); 1846 + if (ret) 1847 + return ret; 1848 + 1849 + switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { 1850 + case VFIO_IRQ_SET_DATA_EVENTFD: 1851 + switch (irq_set.index) { 1852 + case VFIO_AP_REQ_IRQ_INDEX: 1853 + return vfio_ap_set_request_irq(matrix_mdev, arg); 1854 + default: 1855 + return -EINVAL; 1856 + } 1857 + default: 1858 + return -EINVAL; 1859 + } 1777 1860 } 1778 1861 1779 1862 static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, ··· 1894 1769 break; 1895 1770 case VFIO_DEVICE_RESET: 1896 1771 ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable); 1772 + break; 1773 + case VFIO_DEVICE_GET_IRQ_INFO: 1774 + ret = vfio_ap_get_irq_info(arg); 1775 + break; 1776 + case VFIO_DEVICE_SET_IRQS: 1777 + ret = vfio_ap_set_irqs(matrix_mdev, arg); 1897 1778 break; 1898 1779 default: 1899 1780 ret = -EOPNOTSUPP; ··· 1975 1844 .bind_iommufd = vfio_iommufd_emulated_bind, 1976 1845 .unbind_iommufd = vfio_iommufd_emulated_unbind, 1977 1846 .attach_ioas = vfio_iommufd_emulated_attach_ioas, 1847 + .request = vfio_ap_mdev_request 1978 1848 }; 1979 1849 1980 1850 static struct mdev_driver vfio_ap_matrix_driver = {
+3
drivers/s390/crypto/vfio_ap_private.h
··· 15 15 #include <linux/types.h> 16 16 #include <linux/mdev.h> 17 17 #include <linux/delay.h> 18 + #include <linux/eventfd.h> 18 19 #include <linux/mutex.h> 19 20 #include <linux/kvm_host.h> 20 21 #include <linux/vfio.h> ··· 104 103 * PQAP(AQIC) instruction. 105 104 * @mdev: the mediated device 106 105 * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev 106 + * @req_trigger eventfd ctx for signaling userspace to return a device 107 107 * @apm_add: bitmap of APIDs added to the host's AP configuration 108 108 * @aqm_add: bitmap of APQIs added to the host's AP configuration 109 109 * @adm_add: bitmap of control domain numbers added to the host's AP ··· 119 117 crypto_hook pqap_hook; 120 118 struct mdev_device *mdev; 121 119 struct ap_queue_table qtable; 120 + struct eventfd_ctx *req_trigger; 122 121 DECLARE_BITMAP(apm_add, AP_DEVICES); 123 122 DECLARE_BITMAP(aqm_add, AP_DOMAINS); 124 123 DECLARE_BITMAP(adm_add, AP_DOMAINS);
+9
include/uapi/linux/vfio.h
··· 646 646 VFIO_CCW_NUM_IRQS 647 647 }; 648 648 649 + /* 650 + * The vfio-ap bus driver makes use of the following IRQ index mapping. 651 + * Unimplemented IRQ types return a count of zero. 652 + */ 653 + enum { 654 + VFIO_AP_REQ_IRQ_INDEX, 655 + VFIO_AP_NUM_IRQS 656 + }; 657 + 649 658 /** 650 659 * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12, 651 660 * struct vfio_pci_hot_reset_info)