Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Heiko Carstens:

- Large rework of the protected key crypto code to allow for
asynchronous handling without memory allocation

- Speed up system call entry/exit path by re-implementing lazy ASCE
handling

- Add module autoload support for the diag288_wdt watchdog device
driver

- Get rid of s390 specific strcpy() and strncpy() implementations, and
switch all remaining users to strscpy() when possible

- Various other small fixes and improvements

* tag 's390-6.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (51 commits)
s390/pci: Serialize device addition and removal
s390/pci: Allow re-add of a reserved but not yet removed device
s390/pci: Prevent self deletion in disable_slot()
s390/pci: Remove redundant bus removal and disable from zpci_release_device()
s390/crypto: Extend protected key conversion retry loop
s390/pci: Fix __pcilg_mio_inuser() inline assembly
s390/ptrace: Always inline regs_get_kernel_stack_nth() and regs_get_register()
s390/thread_info: Cleanup header includes
s390/extmem: Add workaround for DCSS unload diag
s390/crypto: Rework protected key AES for true asynch support
s390/cpacf: Rework cpacf_pcc() to return condition code
s390/mm: Fix potential use-after-free in __crst_table_upgrade()
s390/mm: Add mmap_assert_write_locked() check to crst_table_upgrade()
s390/string: Remove strcpy() implementation
s390/con3270: Use strscpy() instead of strcpy()
s390/boot: Use strspcy() instead of strcpy()
s390: Simple strcpy() to strscpy() conversions
s390/pkey/crypto: Introduce xflags param for pkey in-kernel API
s390/pkey: Provide and pass xflags within pkey and zcrypt layers
s390/uv: Remove uv_get_secret_metadata function
...

+2612 -1857
+1
arch/s390/Kconfig
··· 146 146 select ARCH_WANTS_NO_INSTR 147 147 select ARCH_WANT_DEFAULT_BPF_JIT 148 148 select ARCH_WANT_IPC_PARSE_VERSION 149 + select ARCH_WANT_IRQS_OFF_ACTIVATE_MM 149 150 select ARCH_WANT_KERNEL_PMD_MKWRITE 150 151 select ARCH_WANT_LD_ORPHAN_WARN 151 152 select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
+4 -3
arch/s390/boot/ipl_parm.c
··· 179 179 if (has_ebcdic_char(parmarea.command_line)) 180 180 EBCASC(parmarea.command_line, COMMAND_LINE_SIZE); 181 181 /* copy arch command line */ 182 - strcpy(early_command_line, strim(parmarea.command_line)); 182 + strscpy(early_command_line, strim(parmarea.command_line)); 183 183 184 184 /* append IPL PARM data to the boot command line */ 185 185 if (!is_prot_virt_guest() && ipl_block_valid) ··· 253 253 int rc; 254 254 255 255 __kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE); 256 - args = strcpy(command_line_buf, early_command_line); 256 + strscpy(command_line_buf, early_command_line); 257 + args = command_line_buf; 257 258 while (*args) { 258 259 args = next_arg(args, &param, &val); 259 260 ··· 310 309 if (!strcmp(param, "bootdebug")) { 311 310 bootdebug = true; 312 311 if (val) 313 - strncpy(bootdebug_filter, val, sizeof(bootdebug_filter) - 1); 312 + strscpy(bootdebug_filter, val); 314 313 } 315 314 if (!strcmp(param, "quiet")) 316 315 boot_console_loglevel = CONSOLE_LOGLEVEL_QUIET;
+4 -3
arch/s390/boot/printk.c
··· 29 29 /* store strings separated by '\0' */ 30 30 if (len + 1 > avail) 31 31 boot_rb_off = 0; 32 - strcpy(boot_rb + boot_rb_off, str); 32 + avail = sizeof(boot_rb) - boot_rb_off - 1; 33 + strscpy(boot_rb + boot_rb_off, str, avail); 33 34 boot_rb_off += len + 1; 34 35 } 35 36 ··· 159 158 160 159 p = findsym((unsigned long)ip, &off, &len); 161 160 if (p) { 162 - strncpy(buf, p, MAX_SYMLEN); 161 + strscpy(buf, p, MAX_SYMLEN); 163 162 /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */ 164 163 p = buf + strnlen(buf, MAX_SYMLEN - 15); 165 - strcpy(p, "+0x"); 164 + strscpy(p, "+0x", MAX_SYMLEN - (p - buf)); 166 165 as_hex(p + 3, off, 0); 167 166 strcat(p, "/0x"); 168 167 as_hex(p + strlen(p), len, 0);
+17
arch/s390/boot/startup.c
··· 6 6 #include <asm/boot_data.h> 7 7 #include <asm/extmem.h> 8 8 #include <asm/sections.h> 9 + #include <asm/diag288.h> 9 10 #include <asm/maccess.h> 10 11 #include <asm/machine.h> 11 12 #include <asm/sysinfo.h> ··· 70 69 set_machine_feature(MFEATURE_KVM); 71 70 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) 72 71 set_machine_feature(MFEATURE_VM); 72 + } 73 + 74 + static void detect_diag288(void) 75 + { 76 + /* "BEGIN" in EBCDIC character set */ 77 + static const char cmd[] = "\xc2\xc5\xc7\xc9\xd5"; 78 + unsigned long action, len; 79 + 80 + action = machine_is_vm() ? (unsigned long)cmd : LPARWDT_RESTART; 81 + len = machine_is_vm() ? sizeof(cmd) : 0; 82 + if (__diag288(WDT_FUNC_INIT, MIN_INTERVAL, action, len)) 83 + return; 84 + __diag288(WDT_FUNC_CANCEL, 0, 0, 0); 85 + set_machine_feature(MFEATURE_DIAG288); 73 86 } 74 87 75 88 static void detect_diag9c(void) ··· 534 519 detect_facilities(); 535 520 detect_diag9c(); 536 521 detect_machine_type(); 522 + /* detect_diag288() needs machine type */ 523 + detect_diag288(); 537 524 cmma_init(); 538 525 sanitize_prot_virt_host(); 539 526 max_physmem_end = detect_max_physmem_end();
+12
arch/s390/boot/string.c
··· 29 29 return 0; 30 30 } 31 31 32 + ssize_t sized_strscpy(char *dst, const char *src, size_t count) 33 + { 34 + size_t len; 35 + 36 + if (count == 0) 37 + return -E2BIG; 38 + len = strnlen(src, count - 1); 39 + memcpy(dst, src, len); 40 + dst[len] = '\0'; 41 + return src[len] ? -E2BIG : len; 42 + } 43 + 32 44 void *memset64(uint64_t *s, uint64_t v, size_t count) 33 45 { 34 46 uint64_t *xs = s;
+1299 -568
arch/s390/crypto/paes_s390.c
··· 5 5 * s390 implementation of the AES Cipher Algorithm with protected keys. 6 6 * 7 7 * s390 Version: 8 - * Copyright IBM Corp. 2017, 2023 8 + * Copyright IBM Corp. 2017, 2025 9 9 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 10 10 * Harald Freudenberger <freude@de.ibm.com> 11 11 */ ··· 13 13 #define KMSG_COMPONENT "paes_s390" 14 14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 15 16 - #include <crypto/aes.h> 17 - #include <crypto/algapi.h> 18 - #include <linux/bug.h> 19 - #include <linux/err.h> 20 - #include <linux/module.h> 16 + #include <linux/atomic.h> 21 17 #include <linux/cpufeature.h> 18 + #include <linux/delay.h> 19 + #include <linux/err.h> 22 20 #include <linux/init.h> 21 + #include <linux/miscdevice.h> 22 + #include <linux/module.h> 23 23 #include <linux/mutex.h> 24 24 #include <linux/spinlock.h> 25 - #include <linux/delay.h> 25 + #include <crypto/aes.h> 26 + #include <crypto/algapi.h> 27 + #include <crypto/engine.h> 26 28 #include <crypto/internal/skcipher.h> 27 29 #include <crypto/xts.h> 28 30 #include <asm/cpacf.h> ··· 46 44 47 45 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; 48 46 47 + static struct crypto_engine *paes_crypto_engine; 48 + #define MAX_QLEN 10 49 + 50 + /* 51 + * protected key specific stuff 52 + */ 53 + 49 54 struct paes_protkey { 50 55 u32 type; 51 56 u32 len; 52 57 u8 protkey[PXTS_256_PROTKEY_SIZE]; 53 58 }; 54 59 55 - struct key_blob { 56 - /* 57 - * Small keys will be stored in the keybuf. Larger keys are 58 - * stored in extra allocated memory. In both cases does 59 - * key point to the memory where the key is stored. 60 - * The code distinguishes by checking keylen against 61 - * sizeof(keybuf). See the two following helper functions. 62 - */ 63 - u8 *key; 64 - u8 keybuf[128]; 60 + #define PK_STATE_NO_KEY 0 61 + #define PK_STATE_CONVERT_IN_PROGRESS 1 62 + #define PK_STATE_VALID 2 63 + 64 + struct s390_paes_ctx { 65 + /* source key material used to derive a protected key from */ 66 + u8 keybuf[PAES_MAX_KEYSIZE]; 65 67 unsigned int keylen; 68 + 69 + /* cpacf function code to use with this protected key type */ 70 + long fc; 71 + 72 + /* nr of requests enqueued via crypto engine which use this tfm ctx */ 73 + atomic_t via_engine_ctr; 74 + 75 + /* spinlock to atomic read/update all the following fields */ 76 + spinlock_t pk_lock; 77 + 78 + /* see PK_STATE* defines above, < 0 holds convert failure rc */ 79 + int pk_state; 80 + /* if state is valid, pk holds the protected key */ 81 + struct paes_protkey pk; 82 + }; 83 + 84 + struct s390_pxts_ctx { 85 + /* source key material used to derive a protected key from */ 86 + u8 keybuf[2 * PAES_MAX_KEYSIZE]; 87 + unsigned int keylen; 88 + 89 + /* cpacf function code to use with this protected key type */ 90 + long fc; 91 + 92 + /* nr of requests enqueued via crypto engine which use this tfm ctx */ 93 + atomic_t via_engine_ctr; 94 + 95 + /* spinlock to atomic read/update all the following fields */ 96 + spinlock_t pk_lock; 97 + 98 + /* see PK_STATE* defines above, < 0 holds convert failure rc */ 99 + int pk_state; 100 + /* if state is valid, pk[] hold(s) the protected key(s) */ 101 + struct paes_protkey pk[2]; 66 102 }; 67 103 68 104 /* ··· 129 89 return sizeof(*token) + cklen; 130 90 } 131 91 132 - static inline int _key_to_kb(struct key_blob *kb, 133 - const u8 *key, 134 - unsigned int keylen) 92 + /* 93 + * paes_ctx_setkey() - Set key value into context, maybe construct 94 + * a clear key token digestible by pkey from a clear key value. 95 + */ 96 + static inline int paes_ctx_setkey(struct s390_paes_ctx *ctx, 97 + const u8 *key, unsigned int keylen) 135 98 { 99 + if (keylen > sizeof(ctx->keybuf)) 100 + return -EINVAL; 101 + 136 102 switch (keylen) { 137 103 case 16: 138 104 case 24: 139 105 case 32: 140 106 /* clear key value, prepare pkey clear key token in keybuf */ 141 - memset(kb->keybuf, 0, sizeof(kb->keybuf)); 142 - kb->keylen = make_clrkey_token(key, keylen, kb->keybuf); 143 - kb->key = kb->keybuf; 107 + memset(ctx->keybuf, 0, sizeof(ctx->keybuf)); 108 + ctx->keylen = make_clrkey_token(key, keylen, ctx->keybuf); 144 109 break; 145 110 default: 146 111 /* other key material, let pkey handle this */ 147 - if (keylen <= sizeof(kb->keybuf)) 148 - kb->key = kb->keybuf; 149 - else { 150 - kb->key = kmalloc(keylen, GFP_KERNEL); 151 - if (!kb->key) 152 - return -ENOMEM; 153 - } 154 - memcpy(kb->key, key, keylen); 155 - kb->keylen = keylen; 112 + memcpy(ctx->keybuf, key, keylen); 113 + ctx->keylen = keylen; 156 114 break; 157 115 } 158 116 159 117 return 0; 160 118 } 161 119 162 - static inline int _xts_key_to_kb(struct key_blob *kb, 163 - const u8 *key, 164 - unsigned int keylen) 120 + /* 121 + * pxts_ctx_setkey() - Set key value into context, maybe construct 122 + * a clear key token digestible by pkey from a clear key value. 123 + */ 124 + static inline int pxts_ctx_setkey(struct s390_pxts_ctx *ctx, 125 + const u8 *key, unsigned int keylen) 165 126 { 166 127 size_t cklen = keylen / 2; 167 128 168 - memset(kb->keybuf, 0, sizeof(kb->keybuf)); 129 + if (keylen > sizeof(ctx->keybuf)) 130 + return -EINVAL; 169 131 170 132 switch (keylen) { 171 133 case 32: 172 134 case 64: 173 135 /* clear key value, prepare pkey clear key tokens in keybuf */ 174 - kb->key = kb->keybuf; 175 - kb->keylen = make_clrkey_token(key, cklen, kb->key); 176 - kb->keylen += make_clrkey_token(key + cklen, cklen, 177 - kb->key + kb->keylen); 136 + memset(ctx->keybuf, 0, sizeof(ctx->keybuf)); 137 + ctx->keylen = make_clrkey_token(key, cklen, ctx->keybuf); 138 + ctx->keylen += make_clrkey_token(key + cklen, cklen, 139 + ctx->keybuf + ctx->keylen); 178 140 break; 179 141 default: 180 142 /* other key material, let pkey handle this */ 181 - if (keylen <= sizeof(kb->keybuf)) { 182 - kb->key = kb->keybuf; 183 - } else { 184 - kb->key = kmalloc(keylen, GFP_KERNEL); 185 - if (!kb->key) 186 - return -ENOMEM; 187 - } 188 - memcpy(kb->key, key, keylen); 189 - kb->keylen = keylen; 143 + memcpy(ctx->keybuf, key, keylen); 144 + ctx->keylen = keylen; 190 145 break; 191 146 } 192 147 193 148 return 0; 194 149 } 195 150 196 - static inline void _free_kb_keybuf(struct key_blob *kb) 151 + /* 152 + * Convert the raw key material into a protected key via PKEY api. 153 + * This function may sleep - don't call in non-sleeping context. 154 + */ 155 + static inline int convert_key(const u8 *key, unsigned int keylen, 156 + struct paes_protkey *pk) 197 157 { 198 - if (kb->key && kb->key != kb->keybuf 199 - && kb->keylen > sizeof(kb->keybuf)) { 200 - kfree_sensitive(kb->key); 201 - kb->key = NULL; 202 - } 203 - memzero_explicit(kb->keybuf, sizeof(kb->keybuf)); 204 - } 158 + int rc, i; 205 159 206 - struct s390_paes_ctx { 207 - struct key_blob kb; 208 - struct paes_protkey pk; 209 - spinlock_t pk_lock; 210 - unsigned long fc; 211 - }; 160 + pk->len = sizeof(pk->protkey); 212 161 213 - struct s390_pxts_ctx { 214 - struct key_blob kb; 215 - struct paes_protkey pk[2]; 216 - spinlock_t pk_lock; 217 - unsigned long fc; 218 - }; 219 - 220 - static inline int __paes_keyblob2pkey(const u8 *key, unsigned int keylen, 221 - struct paes_protkey *pk) 222 - { 223 - int i, rc = -EIO; 224 - 225 - /* try three times in case of busy card */ 226 - for (i = 0; rc && i < 3; i++) { 227 - if (rc == -EBUSY && in_task()) { 228 - if (msleep_interruptible(1000)) 229 - return -EINTR; 162 + /* 163 + * In case of a busy card retry with increasing delay 164 + * of 200, 400, 800 and 1600 ms - in total 3 s. 165 + */ 166 + for (rc = -EIO, i = 0; rc && i < 5; i++) { 167 + if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) { 168 + rc = -EINTR; 169 + goto out; 230 170 } 231 - rc = pkey_key2protkey(key, keylen, pk->protkey, &pk->len, 232 - &pk->type); 171 + rc = pkey_key2protkey(key, keylen, 172 + pk->protkey, &pk->len, &pk->type, 173 + PKEY_XFLAG_NOMEMALLOC); 233 174 } 234 175 176 + out: 177 + pr_debug("rc=%d\n", rc); 235 178 return rc; 236 179 } 237 180 238 - static inline int __paes_convert_key(struct s390_paes_ctx *ctx) 181 + /* 182 + * (Re-)Convert the raw key material from the ctx into a protected key 183 + * via convert_key() function. Update the pk_state, pk_type, pk_len 184 + * and the protected key in the tfm context. 185 + * Please note this function may be invoked concurrently with the very 186 + * same tfm context. The pk_lock spinlock in the context ensures an 187 + * atomic update of the pk and the pk state but does not guarantee any 188 + * order of update. So a fresh converted valid protected key may get 189 + * updated with an 'old' expired key value. As the cpacf instructions 190 + * detect this, refuse to operate with an invalid key and the calling 191 + * code triggers a (re-)conversion this does no harm. This may lead to 192 + * unnecessary additional conversion but never to invalid data on en- 193 + * or decrypt operations. 194 + */ 195 + static int paes_convert_key(struct s390_paes_ctx *ctx) 239 196 { 240 197 struct paes_protkey pk; 241 198 int rc; 242 199 243 - pk.len = sizeof(pk.protkey); 244 - rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk); 245 - if (rc) 246 - return rc; 247 - 248 200 spin_lock_bh(&ctx->pk_lock); 249 - memcpy(&ctx->pk, &pk, sizeof(pk)); 201 + ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; 250 202 spin_unlock_bh(&ctx->pk_lock); 251 203 252 - return 0; 204 + rc = convert_key(ctx->keybuf, ctx->keylen, &pk); 205 + 206 + /* update context */ 207 + spin_lock_bh(&ctx->pk_lock); 208 + if (rc) { 209 + ctx->pk_state = rc; 210 + } else { 211 + ctx->pk_state = PK_STATE_VALID; 212 + ctx->pk = pk; 213 + } 214 + spin_unlock_bh(&ctx->pk_lock); 215 + 216 + memzero_explicit(&pk, sizeof(pk)); 217 + pr_debug("rc=%d\n", rc); 218 + return rc; 253 219 } 254 220 255 - static int ecb_paes_init(struct crypto_skcipher *tfm) 221 + /* 222 + * (Re-)Convert the raw xts key material from the ctx into a 223 + * protected key via convert_key() function. Update the pk_state, 224 + * pk_type, pk_len and the protected key in the tfm context. 225 + * See also comments on function paes_convert_key. 226 + */ 227 + static int pxts_convert_key(struct s390_pxts_ctx *ctx) 256 228 { 257 - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 258 - 259 - ctx->kb.key = NULL; 260 - spin_lock_init(&ctx->pk_lock); 261 - 262 - return 0; 263 - } 264 - 265 - static void ecb_paes_exit(struct crypto_skcipher *tfm) 266 - { 267 - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 268 - 269 - _free_kb_keybuf(&ctx->kb); 270 - } 271 - 272 - static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx) 273 - { 274 - unsigned long fc; 229 + struct paes_protkey pk0, pk1; 230 + size_t split_keylen; 275 231 int rc; 276 232 277 - rc = __paes_convert_key(ctx); 233 + spin_lock_bh(&ctx->pk_lock); 234 + ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; 235 + spin_unlock_bh(&ctx->pk_lock); 236 + 237 + rc = convert_key(ctx->keybuf, ctx->keylen, &pk0); 278 238 if (rc) 279 - return rc; 239 + goto out; 240 + 241 + switch (pk0.type) { 242 + case PKEY_KEYTYPE_AES_128: 243 + case PKEY_KEYTYPE_AES_256: 244 + /* second keytoken required */ 245 + if (ctx->keylen % 2) { 246 + rc = -EINVAL; 247 + goto out; 248 + } 249 + split_keylen = ctx->keylen / 2; 250 + rc = convert_key(ctx->keybuf + split_keylen, 251 + split_keylen, &pk1); 252 + if (rc) 253 + goto out; 254 + if (pk0.type != pk1.type) { 255 + rc = -EINVAL; 256 + goto out; 257 + } 258 + break; 259 + case PKEY_KEYTYPE_AES_XTS_128: 260 + case PKEY_KEYTYPE_AES_XTS_256: 261 + /* single key */ 262 + pk1.type = 0; 263 + break; 264 + default: 265 + /* unsupported protected keytype */ 266 + rc = -EINVAL; 267 + goto out; 268 + } 269 + 270 + out: 271 + /* update context */ 272 + spin_lock_bh(&ctx->pk_lock); 273 + if (rc) { 274 + ctx->pk_state = rc; 275 + } else { 276 + ctx->pk_state = PK_STATE_VALID; 277 + ctx->pk[0] = pk0; 278 + ctx->pk[1] = pk1; 279 + } 280 + spin_unlock_bh(&ctx->pk_lock); 281 + 282 + memzero_explicit(&pk0, sizeof(pk0)); 283 + memzero_explicit(&pk1, sizeof(pk1)); 284 + pr_debug("rc=%d\n", rc); 285 + return rc; 286 + } 287 + 288 + /* 289 + * PAES ECB implementation 290 + */ 291 + 292 + struct ecb_param { 293 + u8 key[PAES_256_PROTKEY_SIZE]; 294 + } __packed; 295 + 296 + struct s390_pecb_req_ctx { 297 + unsigned long modifier; 298 + struct skcipher_walk walk; 299 + bool param_init_done; 300 + struct ecb_param param; 301 + }; 302 + 303 + static int ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 304 + unsigned int key_len) 305 + { 306 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 307 + long fc; 308 + int rc; 309 + 310 + /* set raw key into context */ 311 + rc = paes_ctx_setkey(ctx, in_key, key_len); 312 + if (rc) 313 + goto out; 314 + 315 + /* convert key into protected key */ 316 + rc = paes_convert_key(ctx); 317 + if (rc) 318 + goto out; 280 319 281 320 /* Pick the correct function code based on the protected key type */ 282 - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : 283 - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 : 284 - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0; 285 - 286 - /* Check if the function code is available */ 321 + switch (ctx->pk.type) { 322 + case PKEY_KEYTYPE_AES_128: 323 + fc = CPACF_KM_PAES_128; 324 + break; 325 + case PKEY_KEYTYPE_AES_192: 326 + fc = CPACF_KM_PAES_192; 327 + break; 328 + case PKEY_KEYTYPE_AES_256: 329 + fc = CPACF_KM_PAES_256; 330 + break; 331 + default: 332 + fc = 0; 333 + break; 334 + } 287 335 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 288 336 289 - return ctx->fc ? 0 : -EINVAL; 337 + rc = fc ? 0 : -EINVAL; 338 + 339 + out: 340 + pr_debug("rc=%d\n", rc); 341 + return rc; 290 342 } 291 343 292 - static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 293 - unsigned int key_len) 344 + static int ecb_paes_do_crypt(struct s390_paes_ctx *ctx, 345 + struct s390_pecb_req_ctx *req_ctx, 346 + bool maysleep) 294 347 { 295 - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 296 - int rc; 348 + struct ecb_param *param = &req_ctx->param; 349 + struct skcipher_walk *walk = &req_ctx->walk; 350 + unsigned int nbytes, n, k; 351 + int pk_state, rc = 0; 297 352 298 - _free_kb_keybuf(&ctx->kb); 299 - rc = _key_to_kb(&ctx->kb, in_key, key_len); 353 + if (!req_ctx->param_init_done) { 354 + /* fetch and check protected key state */ 355 + spin_lock_bh(&ctx->pk_lock); 356 + pk_state = ctx->pk_state; 357 + switch (pk_state) { 358 + case PK_STATE_NO_KEY: 359 + rc = -ENOKEY; 360 + break; 361 + case PK_STATE_CONVERT_IN_PROGRESS: 362 + rc = -EKEYEXPIRED; 363 + break; 364 + case PK_STATE_VALID: 365 + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 366 + req_ctx->param_init_done = true; 367 + break; 368 + default: 369 + rc = pk_state < 0 ? pk_state : -EIO; 370 + break; 371 + } 372 + spin_unlock_bh(&ctx->pk_lock); 373 + } 300 374 if (rc) 301 - return rc; 375 + goto out; 302 376 303 - return __ecb_paes_set_key(ctx); 377 + /* 378 + * Note that in case of partial processing or failure the walk 379 + * is NOT unmapped here. So a follow up task may reuse the walk 380 + * or in case of unrecoverable failure needs to unmap it. 381 + */ 382 + while ((nbytes = walk->nbytes) != 0) { 383 + /* only use complete blocks */ 384 + n = nbytes & ~(AES_BLOCK_SIZE - 1); 385 + k = cpacf_km(ctx->fc | req_ctx->modifier, param, 386 + walk->dst.virt.addr, walk->src.virt.addr, n); 387 + if (k) 388 + rc = skcipher_walk_done(walk, nbytes - k); 389 + if (k < n) { 390 + if (!maysleep) { 391 + rc = -EKEYEXPIRED; 392 + goto out; 393 + } 394 + rc = paes_convert_key(ctx); 395 + if (rc) 396 + goto out; 397 + spin_lock_bh(&ctx->pk_lock); 398 + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 399 + spin_unlock_bh(&ctx->pk_lock); 400 + } 401 + } 402 + 403 + out: 404 + pr_debug("rc=%d\n", rc); 405 + return rc; 304 406 } 305 407 306 408 static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) 307 409 { 410 + struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req); 308 411 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 309 412 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 310 - struct { 311 - u8 key[PAES_256_PROTKEY_SIZE]; 312 - } param; 313 - struct skcipher_walk walk; 314 - unsigned int nbytes, n, k; 413 + struct skcipher_walk *walk = &req_ctx->walk; 315 414 int rc; 316 415 317 - rc = skcipher_walk_virt(&walk, req, false); 416 + /* 417 + * Attempt synchronous encryption first. If it fails, schedule the request 418 + * asynchronously via the crypto engine. To preserve execution order, 419 + * once a request is queued to the engine, further requests using the same 420 + * tfm will also be routed through the engine. 421 + */ 422 + 423 + rc = skcipher_walk_virt(walk, req, false); 318 424 if (rc) 319 - return rc; 425 + goto out; 320 426 321 - spin_lock_bh(&ctx->pk_lock); 322 - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); 323 - spin_unlock_bh(&ctx->pk_lock); 427 + req_ctx->modifier = modifier; 428 + req_ctx->param_init_done = false; 324 429 325 - while ((nbytes = walk.nbytes) != 0) { 326 - /* only use complete blocks */ 327 - n = nbytes & ~(AES_BLOCK_SIZE - 1); 328 - k = cpacf_km(ctx->fc | modifier, &param, 329 - walk.dst.virt.addr, walk.src.virt.addr, n); 330 - if (k) 331 - rc = skcipher_walk_done(&walk, nbytes - k); 332 - if (k < n) { 333 - if (__paes_convert_key(ctx)) 334 - return skcipher_walk_done(&walk, -EIO); 335 - spin_lock_bh(&ctx->pk_lock); 336 - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); 337 - spin_unlock_bh(&ctx->pk_lock); 338 - } 430 + /* Try synchronous operation if no active engine usage */ 431 + if (!atomic_read(&ctx->via_engine_ctr)) { 432 + rc = ecb_paes_do_crypt(ctx, req_ctx, false); 433 + if (rc == 0) 434 + goto out; 339 435 } 436 + 437 + /* 438 + * If sync operation failed or key expired or there are already 439 + * requests enqueued via engine, fallback to async. Mark tfm as 440 + * using engine to serialize requests. 441 + */ 442 + if (rc == 0 || rc == -EKEYEXPIRED) { 443 + atomic_inc(&ctx->via_engine_ctr); 444 + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); 445 + if (rc != -EINPROGRESS) 446 + atomic_dec(&ctx->via_engine_ctr); 447 + } 448 + 449 + if (rc != -EINPROGRESS) 450 + skcipher_walk_done(walk, rc); 451 + 452 + out: 453 + if (rc != -EINPROGRESS) 454 + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 455 + pr_debug("rc=%d\n", rc); 340 456 return rc; 341 457 } 342 458 ··· 506 310 return ecb_paes_crypt(req, CPACF_DECRYPT); 507 311 } 508 312 509 - static struct skcipher_alg ecb_paes_alg = { 510 - .base.cra_name = "ecb(paes)", 511 - .base.cra_driver_name = "ecb-paes-s390", 512 - .base.cra_priority = 401, /* combo: aes + ecb + 1 */ 513 - .base.cra_blocksize = AES_BLOCK_SIZE, 514 - .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 515 - .base.cra_module = THIS_MODULE, 516 - .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list), 517 - .init = ecb_paes_init, 518 - .exit = ecb_paes_exit, 519 - .min_keysize = PAES_MIN_KEYSIZE, 520 - .max_keysize = PAES_MAX_KEYSIZE, 521 - .setkey = ecb_paes_set_key, 522 - .encrypt = ecb_paes_encrypt, 523 - .decrypt = ecb_paes_decrypt, 524 - }; 525 - 526 - static int cbc_paes_init(struct crypto_skcipher *tfm) 313 + static int ecb_paes_init(struct crypto_skcipher *tfm) 527 314 { 528 315 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 529 316 530 - ctx->kb.key = NULL; 317 + memset(ctx, 0, sizeof(*ctx)); 531 318 spin_lock_init(&ctx->pk_lock); 319 + 320 + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pecb_req_ctx)); 532 321 533 322 return 0; 534 323 } 535 324 536 - static void cbc_paes_exit(struct crypto_skcipher *tfm) 325 + static void ecb_paes_exit(struct crypto_skcipher *tfm) 537 326 { 538 327 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 539 328 540 - _free_kb_keybuf(&ctx->kb); 329 + memzero_explicit(ctx, sizeof(*ctx)); 541 330 } 542 331 543 - static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx) 332 + static int ecb_paes_do_one_request(struct crypto_engine *engine, void *areq) 544 333 { 545 - unsigned long fc; 334 + struct skcipher_request *req = skcipher_request_cast(areq); 335 + struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req); 336 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 337 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 338 + struct skcipher_walk *walk = &req_ctx->walk; 546 339 int rc; 547 340 548 - rc = __paes_convert_key(ctx); 341 + /* walk has already been prepared */ 342 + 343 + rc = ecb_paes_do_crypt(ctx, req_ctx, true); 344 + if (rc == -EKEYEXPIRED) { 345 + /* 346 + * Protected key expired, conversion is in process. 347 + * Trigger a re-schedule of this request by returning 348 + * -ENOSPC ("hardware queue is full") to the crypto engine. 349 + * To avoid immediately re-invocation of this callback, 350 + * tell the scheduler to voluntarily give up the CPU here. 351 + */ 352 + cond_resched(); 353 + pr_debug("rescheduling request\n"); 354 + return -ENOSPC; 355 + } else if (rc) { 356 + skcipher_walk_done(walk, rc); 357 + } 358 + 359 + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 360 + pr_debug("request complete with rc=%d\n", rc); 361 + local_bh_disable(); 362 + atomic_dec(&ctx->via_engine_ctr); 363 + crypto_finalize_skcipher_request(engine, req, rc); 364 + local_bh_enable(); 365 + return rc; 366 + } 367 + 368 + static struct skcipher_engine_alg ecb_paes_alg = { 369 + .base = { 370 + .base.cra_name = "ecb(paes)", 371 + .base.cra_driver_name = "ecb-paes-s390", 372 + .base.cra_priority = 401, /* combo: aes + ecb + 1 */ 373 + .base.cra_blocksize = AES_BLOCK_SIZE, 374 + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 375 + .base.cra_module = THIS_MODULE, 376 + .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.base.cra_list), 377 + .init = ecb_paes_init, 378 + .exit = ecb_paes_exit, 379 + .min_keysize = PAES_MIN_KEYSIZE, 380 + .max_keysize = PAES_MAX_KEYSIZE, 381 + .setkey = ecb_paes_setkey, 382 + .encrypt = ecb_paes_encrypt, 383 + .decrypt = ecb_paes_decrypt, 384 + }, 385 + .op = { 386 + .do_one_request = ecb_paes_do_one_request, 387 + }, 388 + }; 389 + 390 + /* 391 + * PAES CBC implementation 392 + */ 393 + 394 + struct cbc_param { 395 + u8 iv[AES_BLOCK_SIZE]; 396 + u8 key[PAES_256_PROTKEY_SIZE]; 397 + } __packed; 398 + 399 + struct s390_pcbc_req_ctx { 400 + unsigned long modifier; 401 + struct skcipher_walk walk; 402 + bool param_init_done; 403 + struct cbc_param param; 404 + }; 405 + 406 + static int cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 407 + unsigned int key_len) 408 + { 409 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 410 + long fc; 411 + int rc; 412 + 413 + /* set raw key into context */ 414 + rc = paes_ctx_setkey(ctx, in_key, key_len); 549 415 if (rc) 550 - return rc; 416 + goto out; 417 + 418 + /* convert raw key into protected key */ 419 + rc = paes_convert_key(ctx); 420 + if (rc) 421 + goto out; 551 422 552 423 /* Pick the correct function code based on the protected key type */ 553 - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 : 554 - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 : 555 - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0; 556 - 557 - /* Check if the function code is available */ 424 + switch (ctx->pk.type) { 425 + case PKEY_KEYTYPE_AES_128: 426 + fc = CPACF_KMC_PAES_128; 427 + break; 428 + case PKEY_KEYTYPE_AES_192: 429 + fc = CPACF_KMC_PAES_192; 430 + break; 431 + case PKEY_KEYTYPE_AES_256: 432 + fc = CPACF_KMC_PAES_256; 433 + break; 434 + default: 435 + fc = 0; 436 + break; 437 + } 558 438 ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; 559 439 560 - return ctx->fc ? 0 : -EINVAL; 440 + rc = fc ? 0 : -EINVAL; 441 + 442 + out: 443 + pr_debug("rc=%d\n", rc); 444 + return rc; 561 445 } 562 446 563 - static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 564 - unsigned int key_len) 447 + static int cbc_paes_do_crypt(struct s390_paes_ctx *ctx, 448 + struct s390_pcbc_req_ctx *req_ctx, 449 + bool maysleep) 565 450 { 566 - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 567 - int rc; 451 + struct cbc_param *param = &req_ctx->param; 452 + struct skcipher_walk *walk = &req_ctx->walk; 453 + unsigned int nbytes, n, k; 454 + int pk_state, rc = 0; 568 455 569 - _free_kb_keybuf(&ctx->kb); 570 - rc = _key_to_kb(&ctx->kb, in_key, key_len); 456 + if (!req_ctx->param_init_done) { 457 + /* fetch and check protected key state */ 458 + spin_lock_bh(&ctx->pk_lock); 459 + pk_state = ctx->pk_state; 460 + switch (pk_state) { 461 + case PK_STATE_NO_KEY: 462 + rc = -ENOKEY; 463 + break; 464 + case PK_STATE_CONVERT_IN_PROGRESS: 465 + rc = -EKEYEXPIRED; 466 + break; 467 + case PK_STATE_VALID: 468 + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 469 + req_ctx->param_init_done = true; 470 + break; 471 + default: 472 + rc = pk_state < 0 ? pk_state : -EIO; 473 + break; 474 + } 475 + spin_unlock_bh(&ctx->pk_lock); 476 + } 571 477 if (rc) 572 - return rc; 478 + goto out; 573 479 574 - return __cbc_paes_set_key(ctx); 480 + memcpy(param->iv, walk->iv, AES_BLOCK_SIZE); 481 + 482 + /* 483 + * Note that in case of partial processing or failure the walk 484 + * is NOT unmapped here. So a follow up task may reuse the walk 485 + * or in case of unrecoverable failure needs to unmap it. 486 + */ 487 + while ((nbytes = walk->nbytes) != 0) { 488 + /* only use complete blocks */ 489 + n = nbytes & ~(AES_BLOCK_SIZE - 1); 490 + k = cpacf_kmc(ctx->fc | req_ctx->modifier, param, 491 + walk->dst.virt.addr, walk->src.virt.addr, n); 492 + if (k) { 493 + memcpy(walk->iv, param->iv, AES_BLOCK_SIZE); 494 + rc = skcipher_walk_done(walk, nbytes - k); 495 + } 496 + if (k < n) { 497 + if (!maysleep) { 498 + rc = -EKEYEXPIRED; 499 + goto out; 500 + } 501 + rc = paes_convert_key(ctx); 502 + if (rc) 503 + goto out; 504 + spin_lock_bh(&ctx->pk_lock); 505 + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 506 + spin_unlock_bh(&ctx->pk_lock); 507 + } 508 + } 509 + 510 + out: 511 + pr_debug("rc=%d\n", rc); 512 + return rc; 575 513 } 576 514 577 515 static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) 578 516 { 517 + struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req); 579 518 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 580 519 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 581 - struct { 582 - u8 iv[AES_BLOCK_SIZE]; 583 - u8 key[PAES_256_PROTKEY_SIZE]; 584 - } param; 585 - struct skcipher_walk walk; 586 - unsigned int nbytes, n, k; 520 + struct skcipher_walk *walk = &req_ctx->walk; 587 521 int rc; 588 522 589 - rc = skcipher_walk_virt(&walk, req, false); 523 + /* 524 + * Attempt synchronous encryption first. If it fails, schedule the request 525 + * asynchronously via the crypto engine. To preserve execution order, 526 + * once a request is queued to the engine, further requests using the same 527 + * tfm will also be routed through the engine. 528 + */ 529 + 530 + rc = skcipher_walk_virt(walk, req, false); 590 531 if (rc) 591 - return rc; 532 + goto out; 592 533 593 - memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); 594 - spin_lock_bh(&ctx->pk_lock); 595 - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); 596 - spin_unlock_bh(&ctx->pk_lock); 534 + req_ctx->modifier = modifier; 535 + req_ctx->param_init_done = false; 597 536 598 - while ((nbytes = walk.nbytes) != 0) { 599 - /* only use complete blocks */ 600 - n = nbytes & ~(AES_BLOCK_SIZE - 1); 601 - k = cpacf_kmc(ctx->fc | modifier, &param, 602 - walk.dst.virt.addr, walk.src.virt.addr, n); 603 - if (k) { 604 - memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); 605 - rc = skcipher_walk_done(&walk, nbytes - k); 606 - } 607 - if (k < n) { 608 - if (__paes_convert_key(ctx)) 609 - return skcipher_walk_done(&walk, -EIO); 610 - spin_lock_bh(&ctx->pk_lock); 611 - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); 612 - spin_unlock_bh(&ctx->pk_lock); 613 - } 537 + /* Try synchronous operation if no active engine usage */ 538 + if (!atomic_read(&ctx->via_engine_ctr)) { 539 + rc = cbc_paes_do_crypt(ctx, req_ctx, false); 540 + if (rc == 0) 541 + goto out; 614 542 } 543 + 544 + /* 545 + * If sync operation failed or key expired or there are already 546 + * requests enqueued via engine, fallback to async. Mark tfm as 547 + * using engine to serialize requests. 548 + */ 549 + if (rc == 0 || rc == -EKEYEXPIRED) { 550 + atomic_inc(&ctx->via_engine_ctr); 551 + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); 552 + if (rc != -EINPROGRESS) 553 + atomic_dec(&ctx->via_engine_ctr); 554 + } 555 + 556 + if (rc != -EINPROGRESS) 557 + skcipher_walk_done(walk, rc); 558 + 559 + out: 560 + if (rc != -EINPROGRESS) 561 + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 562 + pr_debug("rc=%d\n", rc); 615 563 return rc; 616 564 } 617 565 ··· 769 429 return cbc_paes_crypt(req, CPACF_DECRYPT); 770 430 } 771 431 772 - static struct skcipher_alg cbc_paes_alg = { 773 - .base.cra_name = "cbc(paes)", 774 - .base.cra_driver_name = "cbc-paes-s390", 775 - .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 776 - .base.cra_blocksize = AES_BLOCK_SIZE, 777 - .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 778 - .base.cra_module = THIS_MODULE, 779 - .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list), 780 - .init = cbc_paes_init, 781 - .exit = cbc_paes_exit, 782 - .min_keysize = PAES_MIN_KEYSIZE, 783 - .max_keysize = PAES_MAX_KEYSIZE, 784 - .ivsize = AES_BLOCK_SIZE, 785 - .setkey = cbc_paes_set_key, 786 - .encrypt = cbc_paes_encrypt, 787 - .decrypt = cbc_paes_decrypt, 788 - }; 789 - 790 - static int xts_paes_init(struct crypto_skcipher *tfm) 432 + static int cbc_paes_init(struct crypto_skcipher *tfm) 791 433 { 792 - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 434 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 793 435 794 - ctx->kb.key = NULL; 436 + memset(ctx, 0, sizeof(*ctx)); 795 437 spin_lock_init(&ctx->pk_lock); 796 438 439 + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pcbc_req_ctx)); 440 + 797 441 return 0; 798 442 } 799 443 800 - static void xts_paes_exit(struct crypto_skcipher *tfm) 444 + static void cbc_paes_exit(struct crypto_skcipher *tfm) 801 445 { 802 - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 446 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 803 447 804 - _free_kb_keybuf(&ctx->kb); 448 + memzero_explicit(ctx, sizeof(*ctx)); 805 449 } 806 450 807 - static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx) 451 + static int cbc_paes_do_one_request(struct crypto_engine *engine, void *areq) 808 452 { 809 - struct paes_protkey pk0, pk1; 810 - size_t split_keylen; 453 + struct skcipher_request *req = skcipher_request_cast(areq); 454 + struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req); 455 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 456 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 457 + struct skcipher_walk *walk = &req_ctx->walk; 811 458 int rc; 812 459 813 - pk0.len = sizeof(pk0.protkey); 814 - pk1.len = sizeof(pk1.protkey); 460 + /* walk has already been prepared */ 815 461 816 - rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk0); 817 - if (rc) 818 - return rc; 819 - 820 - switch (pk0.type) { 821 - case PKEY_KEYTYPE_AES_128: 822 - case PKEY_KEYTYPE_AES_256: 823 - /* second keytoken required */ 824 - if (ctx->kb.keylen % 2) 825 - return -EINVAL; 826 - split_keylen = ctx->kb.keylen / 2; 827 - 828 - rc = __paes_keyblob2pkey(ctx->kb.key + split_keylen, 829 - split_keylen, &pk1); 830 - if (rc) 831 - return rc; 832 - 833 - if (pk0.type != pk1.type) 834 - return -EINVAL; 835 - break; 836 - case PKEY_KEYTYPE_AES_XTS_128: 837 - case PKEY_KEYTYPE_AES_XTS_256: 838 - /* single key */ 839 - pk1.type = 0; 840 - break; 841 - default: 842 - /* unsupported protected keytype */ 843 - return -EINVAL; 462 + rc = cbc_paes_do_crypt(ctx, req_ctx, true); 463 + if (rc == -EKEYEXPIRED) { 464 + /* 465 + * Protected key expired, conversion is in process. 466 + * Trigger a re-schedule of this request by returning 467 + * -ENOSPC ("hardware queue is full") to the crypto engine. 468 + * To avoid immediately re-invocation of this callback, 469 + * tell the scheduler to voluntarily give up the CPU here. 470 + */ 471 + cond_resched(); 472 + pr_debug("rescheduling request\n"); 473 + return -ENOSPC; 474 + } else if (rc) { 475 + skcipher_walk_done(walk, rc); 844 476 } 845 477 846 - spin_lock_bh(&ctx->pk_lock); 847 - ctx->pk[0] = pk0; 848 - ctx->pk[1] = pk1; 849 - spin_unlock_bh(&ctx->pk_lock); 478 + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 479 + pr_debug("request complete with rc=%d\n", rc); 480 + local_bh_disable(); 481 + atomic_dec(&ctx->via_engine_ctr); 482 + crypto_finalize_skcipher_request(engine, req, rc); 483 + local_bh_enable(); 484 + return rc; 485 + } 486 + 487 + static struct skcipher_engine_alg cbc_paes_alg = { 488 + .base = { 489 + .base.cra_name = "cbc(paes)", 490 + .base.cra_driver_name = "cbc-paes-s390", 491 + .base.cra_priority = 402, /* cbc-paes-s390 + 1 */ 492 + .base.cra_blocksize = AES_BLOCK_SIZE, 493 + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 494 + .base.cra_module = THIS_MODULE, 495 + .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.base.cra_list), 496 + .init = cbc_paes_init, 497 + .exit = cbc_paes_exit, 498 + .min_keysize = PAES_MIN_KEYSIZE, 499 + .max_keysize = PAES_MAX_KEYSIZE, 500 + .ivsize = AES_BLOCK_SIZE, 501 + .setkey = cbc_paes_setkey, 502 + .encrypt = cbc_paes_encrypt, 503 + .decrypt = cbc_paes_decrypt, 504 + }, 505 + .op = { 506 + .do_one_request = cbc_paes_do_one_request, 507 + }, 508 + }; 509 + 510 + /* 511 + * PAES CTR implementation 512 + */ 513 + 514 + struct ctr_param { 515 + u8 key[PAES_256_PROTKEY_SIZE]; 516 + } __packed; 517 + 518 + struct s390_pctr_req_ctx { 519 + unsigned long modifier; 520 + struct skcipher_walk walk; 521 + bool param_init_done; 522 + struct ctr_param param; 523 + }; 524 + 525 + static int ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 526 + unsigned int key_len) 527 + { 528 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 529 + long fc; 530 + int rc; 531 + 532 + /* set raw key into context */ 533 + rc = paes_ctx_setkey(ctx, in_key, key_len); 534 + if (rc) 535 + goto out; 536 + 537 + /* convert raw key into protected key */ 538 + rc = paes_convert_key(ctx); 539 + if (rc) 540 + goto out; 541 + 542 + /* Pick the correct function code based on the protected key type */ 543 + switch (ctx->pk.type) { 544 + case PKEY_KEYTYPE_AES_128: 545 + fc = CPACF_KMCTR_PAES_128; 546 + break; 547 + case PKEY_KEYTYPE_AES_192: 548 + fc = CPACF_KMCTR_PAES_192; 549 + break; 550 + case PKEY_KEYTYPE_AES_256: 551 + fc = CPACF_KMCTR_PAES_256; 552 + break; 553 + default: 554 + fc = 0; 555 + break; 556 + } 557 + ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 558 + 559 + rc = fc ? 0 : -EINVAL; 560 + 561 + out: 562 + pr_debug("rc=%d\n", rc); 563 + return rc; 564 + } 565 + 566 + static inline unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 567 + { 568 + unsigned int i, n; 569 + 570 + /* only use complete blocks, max. PAGE_SIZE */ 571 + memcpy(ctrptr, iv, AES_BLOCK_SIZE); 572 + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 573 + for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 574 + memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 575 + crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 576 + ctrptr += AES_BLOCK_SIZE; 577 + } 578 + return n; 579 + } 580 + 581 + static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx, 582 + struct s390_pctr_req_ctx *req_ctx, 583 + bool maysleep) 584 + { 585 + struct ctr_param *param = &req_ctx->param; 586 + struct skcipher_walk *walk = &req_ctx->walk; 587 + u8 buf[AES_BLOCK_SIZE], *ctrptr; 588 + unsigned int nbytes, n, k; 589 + int pk_state, locked, rc = 0; 590 + 591 + if (!req_ctx->param_init_done) { 592 + /* fetch and check protected key state */ 593 + spin_lock_bh(&ctx->pk_lock); 594 + pk_state = ctx->pk_state; 595 + switch (pk_state) { 596 + case PK_STATE_NO_KEY: 597 + rc = -ENOKEY; 598 + break; 599 + case PK_STATE_CONVERT_IN_PROGRESS: 600 + rc = -EKEYEXPIRED; 601 + break; 602 + case PK_STATE_VALID: 603 + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 604 + req_ctx->param_init_done = true; 605 + break; 606 + default: 607 + rc = pk_state < 0 ? pk_state : -EIO; 608 + break; 609 + } 610 + spin_unlock_bh(&ctx->pk_lock); 611 + } 612 + if (rc) 613 + goto out; 614 + 615 + locked = mutex_trylock(&ctrblk_lock); 616 + 617 + /* 618 + * Note that in case of partial processing or failure the walk 619 + * is NOT unmapped here. So a follow up task may reuse the walk 620 + * or in case of unrecoverable failure needs to unmap it. 621 + */ 622 + while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { 623 + n = AES_BLOCK_SIZE; 624 + if (nbytes >= 2 * AES_BLOCK_SIZE && locked) 625 + n = __ctrblk_init(ctrblk, walk->iv, nbytes); 626 + ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; 627 + k = cpacf_kmctr(ctx->fc, param, walk->dst.virt.addr, 628 + walk->src.virt.addr, n, ctrptr); 629 + if (k) { 630 + if (ctrptr == ctrblk) 631 + memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE, 632 + AES_BLOCK_SIZE); 633 + crypto_inc(walk->iv, AES_BLOCK_SIZE); 634 + rc = skcipher_walk_done(walk, nbytes - k); 635 + } 636 + if (k < n) { 637 + if (!maysleep) { 638 + if (locked) 639 + mutex_unlock(&ctrblk_lock); 640 + rc = -EKEYEXPIRED; 641 + goto out; 642 + } 643 + rc = paes_convert_key(ctx); 644 + if (rc) { 645 + if (locked) 646 + mutex_unlock(&ctrblk_lock); 647 + goto out; 648 + } 649 + spin_lock_bh(&ctx->pk_lock); 650 + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 651 + spin_unlock_bh(&ctx->pk_lock); 652 + } 653 + } 654 + if (locked) 655 + mutex_unlock(&ctrblk_lock); 656 + 657 + /* final block may be < AES_BLOCK_SIZE, copy only nbytes */ 658 + if (nbytes) { 659 + memset(buf, 0, AES_BLOCK_SIZE); 660 + memcpy(buf, walk->src.virt.addr, nbytes); 661 + while (1) { 662 + if (cpacf_kmctr(ctx->fc, param, buf, 663 + buf, AES_BLOCK_SIZE, 664 + walk->iv) == AES_BLOCK_SIZE) 665 + break; 666 + if (!maysleep) { 667 + rc = -EKEYEXPIRED; 668 + goto out; 669 + } 670 + rc = paes_convert_key(ctx); 671 + if (rc) 672 + goto out; 673 + spin_lock_bh(&ctx->pk_lock); 674 + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 675 + spin_unlock_bh(&ctx->pk_lock); 676 + } 677 + memcpy(walk->dst.virt.addr, buf, nbytes); 678 + crypto_inc(walk->iv, AES_BLOCK_SIZE); 679 + rc = skcipher_walk_done(walk, 0); 680 + } 681 + 682 + out: 683 + pr_debug("rc=%d\n", rc); 684 + return rc; 685 + } 686 + 687 + static int ctr_paes_crypt(struct skcipher_request *req) 688 + { 689 + struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req); 690 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 691 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 692 + struct skcipher_walk *walk = &req_ctx->walk; 693 + int rc; 694 + 695 + /* 696 + * Attempt synchronous encryption first. If it fails, schedule the request 697 + * asynchronously via the crypto engine. To preserve execution order, 698 + * once a request is queued to the engine, further requests using the same 699 + * tfm will also be routed through the engine. 700 + */ 701 + 702 + rc = skcipher_walk_virt(walk, req, false); 703 + if (rc) 704 + goto out; 705 + 706 + req_ctx->param_init_done = false; 707 + 708 + /* Try synchronous operation if no active engine usage */ 709 + if (!atomic_read(&ctx->via_engine_ctr)) { 710 + rc = ctr_paes_do_crypt(ctx, req_ctx, false); 711 + if (rc == 0) 712 + goto out; 713 + } 714 + 715 + /* 716 + * If sync operation failed or key expired or there are already 717 + * requests enqueued via engine, fallback to async. Mark tfm as 718 + * using engine to serialize requests. 719 + */ 720 + if (rc == 0 || rc == -EKEYEXPIRED) { 721 + atomic_inc(&ctx->via_engine_ctr); 722 + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); 723 + if (rc != -EINPROGRESS) 724 + atomic_dec(&ctx->via_engine_ctr); 725 + } 726 + 727 + if (rc != -EINPROGRESS) 728 + skcipher_walk_done(walk, rc); 729 + 730 + out: 731 + if (rc != -EINPROGRESS) 732 + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 733 + pr_debug("rc=%d\n", rc); 734 + return rc; 735 + } 736 + 737 + static int ctr_paes_init(struct crypto_skcipher *tfm) 738 + { 739 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 740 + 741 + memset(ctx, 0, sizeof(*ctx)); 742 + spin_lock_init(&ctx->pk_lock); 743 + 744 + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pctr_req_ctx)); 850 745 851 746 return 0; 852 747 } 853 748 854 - static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx) 749 + static void ctr_paes_exit(struct crypto_skcipher *tfm) 855 750 { 856 - unsigned long fc; 751 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 752 + 753 + memzero_explicit(ctx, sizeof(*ctx)); 754 + } 755 + 756 + static int ctr_paes_do_one_request(struct crypto_engine *engine, void *areq) 757 + { 758 + struct skcipher_request *req = skcipher_request_cast(areq); 759 + struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req); 760 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 761 + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 762 + struct skcipher_walk *walk = &req_ctx->walk; 857 763 int rc; 858 764 859 - rc = __xts_paes_convert_key(ctx); 765 + /* walk has already been prepared */ 766 + 767 + rc = ctr_paes_do_crypt(ctx, req_ctx, true); 768 + if (rc == -EKEYEXPIRED) { 769 + /* 770 + * Protected key expired, conversion is in process. 771 + * Trigger a re-schedule of this request by returning 772 + * -ENOSPC ("hardware queue is full") to the crypto engine. 773 + * To avoid immediately re-invocation of this callback, 774 + * tell the scheduler to voluntarily give up the CPU here. 775 + */ 776 + cond_resched(); 777 + pr_debug("rescheduling request\n"); 778 + return -ENOSPC; 779 + } else if (rc) { 780 + skcipher_walk_done(walk, rc); 781 + } 782 + 783 + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 784 + pr_debug("request complete with rc=%d\n", rc); 785 + local_bh_disable(); 786 + atomic_dec(&ctx->via_engine_ctr); 787 + crypto_finalize_skcipher_request(engine, req, rc); 788 + local_bh_enable(); 789 + return rc; 790 + } 791 + 792 + static struct skcipher_engine_alg ctr_paes_alg = { 793 + .base = { 794 + .base.cra_name = "ctr(paes)", 795 + .base.cra_driver_name = "ctr-paes-s390", 796 + .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 797 + .base.cra_blocksize = 1, 798 + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 799 + .base.cra_module = THIS_MODULE, 800 + .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.base.cra_list), 801 + .init = ctr_paes_init, 802 + .exit = ctr_paes_exit, 803 + .min_keysize = PAES_MIN_KEYSIZE, 804 + .max_keysize = PAES_MAX_KEYSIZE, 805 + .ivsize = AES_BLOCK_SIZE, 806 + .setkey = ctr_paes_setkey, 807 + .encrypt = ctr_paes_crypt, 808 + .decrypt = ctr_paes_crypt, 809 + .chunksize = AES_BLOCK_SIZE, 810 + }, 811 + .op = { 812 + .do_one_request = ctr_paes_do_one_request, 813 + }, 814 + }; 815 + 816 + /* 817 + * PAES XTS implementation 818 + */ 819 + 820 + struct xts_full_km_param { 821 + u8 key[64]; 822 + u8 tweak[16]; 823 + u8 nap[16]; 824 + u8 wkvp[32]; 825 + } __packed; 826 + 827 + struct xts_km_param { 828 + u8 key[PAES_256_PROTKEY_SIZE]; 829 + u8 init[16]; 830 + } __packed; 831 + 832 + struct xts_pcc_param { 833 + u8 key[PAES_256_PROTKEY_SIZE]; 834 + u8 tweak[16]; 835 + u8 block[16]; 836 + u8 bit[16]; 837 + u8 xts[16]; 838 + } __packed; 839 + 840 + struct s390_pxts_req_ctx { 841 + unsigned long modifier; 842 + struct skcipher_walk walk; 843 + bool param_init_done; 844 + union { 845 + struct xts_full_km_param full_km_param; 846 + struct xts_km_param km_param; 847 + } param; 848 + }; 849 + 850 + static int xts_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 851 + unsigned int in_keylen) 852 + { 853 + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 854 + u8 ckey[2 * AES_MAX_KEY_SIZE]; 855 + unsigned int ckey_len; 856 + long fc; 857 + int rc; 858 + 859 + if ((in_keylen == 32 || in_keylen == 64) && 860 + xts_verify_key(tfm, in_key, in_keylen)) 861 + return -EINVAL; 862 + 863 + /* set raw key into context */ 864 + rc = pxts_ctx_setkey(ctx, in_key, in_keylen); 860 865 if (rc) 861 - return rc; 866 + goto out; 867 + 868 + /* convert raw key(s) into protected key(s) */ 869 + rc = pxts_convert_key(ctx); 870 + if (rc) 871 + goto out; 872 + 873 + /* 874 + * xts_verify_key verifies the key length is not odd and makes 875 + * sure that the two keys are not the same. This can be done 876 + * on the two protected keys as well - but not for full xts keys. 877 + */ 878 + if (ctx->pk[0].type == PKEY_KEYTYPE_AES_128 || 879 + ctx->pk[0].type == PKEY_KEYTYPE_AES_256) { 880 + ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 881 + AES_KEYSIZE_128 : AES_KEYSIZE_256; 882 + memcpy(ckey, ctx->pk[0].protkey, ckey_len); 883 + memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); 884 + rc = xts_verify_key(tfm, ckey, 2 * ckey_len); 885 + memzero_explicit(ckey, sizeof(ckey)); 886 + if (rc) 887 + goto out; 888 + } 862 889 863 890 /* Pick the correct function code based on the protected key type */ 864 891 switch (ctx->pk[0].type) { ··· 1245 538 fc = 0; 1246 539 break; 1247 540 } 1248 - 1249 - /* Check if the function code is available */ 1250 541 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 1251 542 1252 - return ctx->fc ? 0 : -EINVAL; 543 + rc = fc ? 0 : -EINVAL; 544 + 545 + out: 546 + pr_debug("rc=%d\n", rc); 547 + return rc; 1253 548 } 1254 549 1255 - static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 1256 - unsigned int in_keylen) 550 + static int xts_paes_do_crypt_fullkey(struct s390_pxts_ctx *ctx, 551 + struct s390_pxts_req_ctx *req_ctx, 552 + bool maysleep) 1257 553 { 1258 - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 1259 - u8 ckey[2 * AES_MAX_KEY_SIZE]; 1260 - unsigned int ckey_len; 1261 - int rc; 1262 - 1263 - if ((in_keylen == 32 || in_keylen == 64) && 1264 - xts_verify_key(tfm, in_key, in_keylen)) 1265 - return -EINVAL; 1266 - 1267 - _free_kb_keybuf(&ctx->kb); 1268 - rc = _xts_key_to_kb(&ctx->kb, in_key, in_keylen); 1269 - if (rc) 1270 - return rc; 1271 - 1272 - rc = __xts_paes_set_key(ctx); 1273 - if (rc) 1274 - return rc; 1275 - 1276 - /* 1277 - * It is not possible on a single protected key (e.g. full AES-XTS) to 1278 - * check, if k1 and k2 are the same. 1279 - */ 1280 - if (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128 || 1281 - ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_256) 1282 - return 0; 1283 - /* 1284 - * xts_verify_key verifies the key length is not odd and makes 1285 - * sure that the two keys are not the same. This can be done 1286 - * on the two protected keys as well 1287 - */ 1288 - ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 1289 - AES_KEYSIZE_128 : AES_KEYSIZE_256; 1290 - memcpy(ckey, ctx->pk[0].protkey, ckey_len); 1291 - memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); 1292 - return xts_verify_key(tfm, ckey, 2*ckey_len); 1293 - } 1294 - 1295 - static int paes_xts_crypt_full(struct skcipher_request *req, 1296 - unsigned long modifier) 1297 - { 1298 - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1299 - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 554 + struct xts_full_km_param *param = &req_ctx->param.full_km_param; 555 + struct skcipher_walk *walk = &req_ctx->walk; 1300 556 unsigned int keylen, offset, nbytes, n, k; 1301 - struct { 1302 - u8 key[64]; 1303 - u8 tweak[16]; 1304 - u8 nap[16]; 1305 - u8 wkvp[32]; 1306 - } fxts_param = { 1307 - .nap = {0}, 1308 - }; 1309 - struct skcipher_walk walk; 1310 - int rc; 557 + int rc = 0; 1311 558 1312 - rc = skcipher_walk_virt(&walk, req, false); 1313 - if (rc) 1314 - return rc; 559 + /* 560 + * The calling function xts_paes_do_crypt() ensures the 561 + * protected key state is always PK_STATE_VALID when this 562 + * function is invoked. 563 + */ 1315 564 1316 565 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64; 1317 566 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0; 1318 567 1319 - spin_lock_bh(&ctx->pk_lock); 1320 - memcpy(fxts_param.key + offset, ctx->pk[0].protkey, keylen); 1321 - memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen, 1322 - sizeof(fxts_param.wkvp)); 1323 - spin_unlock_bh(&ctx->pk_lock); 1324 - memcpy(fxts_param.tweak, walk.iv, sizeof(fxts_param.tweak)); 1325 - fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */ 568 + if (!req_ctx->param_init_done) { 569 + memset(param, 0, sizeof(*param)); 570 + spin_lock_bh(&ctx->pk_lock); 571 + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); 572 + memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp)); 573 + spin_unlock_bh(&ctx->pk_lock); 574 + memcpy(param->tweak, walk->iv, sizeof(param->tweak)); 575 + param->nap[0] = 0x01; /* initial alpha power (1, little-endian) */ 576 + req_ctx->param_init_done = true; 577 + } 1326 578 1327 - while ((nbytes = walk.nbytes) != 0) { 579 + /* 580 + * Note that in case of partial processing or failure the walk 581 + * is NOT unmapped here. So a follow up task may reuse the walk 582 + * or in case of unrecoverable failure needs to unmap it. 583 + */ 584 + while ((nbytes = walk->nbytes) != 0) { 1328 585 /* only use complete blocks */ 1329 586 n = nbytes & ~(AES_BLOCK_SIZE - 1); 1330 - k = cpacf_km(ctx->fc | modifier, fxts_param.key + offset, 1331 - walk.dst.virt.addr, walk.src.virt.addr, n); 587 + k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset, 588 + walk->dst.virt.addr, walk->src.virt.addr, n); 1332 589 if (k) 1333 - rc = skcipher_walk_done(&walk, nbytes - k); 590 + rc = skcipher_walk_done(walk, nbytes - k); 1334 591 if (k < n) { 1335 - if (__xts_paes_convert_key(ctx)) 1336 - return skcipher_walk_done(&walk, -EIO); 592 + if (!maysleep) { 593 + rc = -EKEYEXPIRED; 594 + goto out; 595 + } 596 + rc = pxts_convert_key(ctx); 597 + if (rc) 598 + goto out; 1337 599 spin_lock_bh(&ctx->pk_lock); 1338 - memcpy(fxts_param.key + offset, ctx->pk[0].protkey, 1339 - keylen); 1340 - memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen, 1341 - sizeof(fxts_param.wkvp)); 600 + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); 601 + memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp)); 1342 602 spin_unlock_bh(&ctx->pk_lock); 1343 603 } 1344 604 } 1345 605 606 + out: 607 + pr_debug("rc=%d\n", rc); 1346 608 return rc; 1347 609 } 1348 610 1349 - static int paes_xts_crypt(struct skcipher_request *req, unsigned long modifier) 611 + static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx, 612 + struct xts_km_param *param, 613 + struct skcipher_walk *walk, 614 + unsigned int keylen, 615 + unsigned int offset, bool maysleep) 1350 616 { 1351 - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1352 - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 1353 - unsigned int keylen, offset, nbytes, n, k; 1354 - struct { 1355 - u8 key[PAES_256_PROTKEY_SIZE]; 1356 - u8 tweak[16]; 1357 - u8 block[16]; 1358 - u8 bit[16]; 1359 - u8 xts[16]; 1360 - } pcc_param; 1361 - struct { 1362 - u8 key[PAES_256_PROTKEY_SIZE]; 1363 - u8 init[16]; 1364 - } xts_param; 1365 - struct skcipher_walk walk; 1366 - int rc; 617 + struct xts_pcc_param pcc_param; 618 + unsigned long cc = 1; 619 + int rc = 0; 1367 620 1368 - rc = skcipher_walk_virt(&walk, req, false); 1369 - if (rc) 1370 - return rc; 621 + while (cc) { 622 + memset(&pcc_param, 0, sizeof(pcc_param)); 623 + memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); 624 + spin_lock_bh(&ctx->pk_lock); 625 + memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); 626 + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); 627 + spin_unlock_bh(&ctx->pk_lock); 628 + cc = cpacf_pcc(ctx->fc, pcc_param.key + offset); 629 + if (cc) { 630 + if (!maysleep) { 631 + rc = -EKEYEXPIRED; 632 + break; 633 + } 634 + rc = pxts_convert_key(ctx); 635 + if (rc) 636 + break; 637 + continue; 638 + } 639 + memcpy(param->init, pcc_param.xts, 16); 640 + } 641 + 642 + memzero_explicit(pcc_param.key, sizeof(pcc_param.key)); 643 + return rc; 644 + } 645 + 646 + static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx, 647 + struct s390_pxts_req_ctx *req_ctx, 648 + bool maysleep) 649 + { 650 + struct xts_km_param *param = &req_ctx->param.km_param; 651 + struct skcipher_walk *walk = &req_ctx->walk; 652 + unsigned int keylen, offset, nbytes, n, k; 653 + int rc = 0; 654 + 655 + /* 656 + * The calling function xts_paes_do_crypt() ensures the 657 + * protected key state is always PK_STATE_VALID when this 658 + * function is invoked. 659 + */ 1371 660 1372 661 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; 1373 662 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; 1374 663 1375 - memset(&pcc_param, 0, sizeof(pcc_param)); 1376 - memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); 1377 - spin_lock_bh(&ctx->pk_lock); 1378 - memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); 1379 - memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen); 1380 - spin_unlock_bh(&ctx->pk_lock); 1381 - cpacf_pcc(ctx->fc, pcc_param.key + offset); 1382 - memcpy(xts_param.init, pcc_param.xts, 16); 664 + if (!req_ctx->param_init_done) { 665 + rc = __xts_2keys_prep_param(ctx, param, walk, 666 + keylen, offset, maysleep); 667 + if (rc) 668 + goto out; 669 + req_ctx->param_init_done = true; 670 + } 1383 671 1384 - while ((nbytes = walk.nbytes) != 0) { 672 + /* 673 + * Note that in case of partial processing or failure the walk 674 + * is NOT unmapped here. So a follow up task may reuse the walk 675 + * or in case of unrecoverable failure needs to unmap it. 676 + */ 677 + while ((nbytes = walk->nbytes) != 0) { 1385 678 /* only use complete blocks */ 1386 679 n = nbytes & ~(AES_BLOCK_SIZE - 1); 1387 - k = cpacf_km(ctx->fc | modifier, xts_param.key + offset, 1388 - walk.dst.virt.addr, walk.src.virt.addr, n); 680 + k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset, 681 + walk->dst.virt.addr, walk->src.virt.addr, n); 1389 682 if (k) 1390 - rc = skcipher_walk_done(&walk, nbytes - k); 683 + rc = skcipher_walk_done(walk, nbytes - k); 1391 684 if (k < n) { 1392 - if (__xts_paes_convert_key(ctx)) 1393 - return skcipher_walk_done(&walk, -EIO); 685 + if (!maysleep) { 686 + rc = -EKEYEXPIRED; 687 + goto out; 688 + } 689 + rc = pxts_convert_key(ctx); 690 + if (rc) 691 + goto out; 1394 692 spin_lock_bh(&ctx->pk_lock); 1395 - memcpy(xts_param.key + offset, 1396 - ctx->pk[0].protkey, keylen); 693 + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); 1397 694 spin_unlock_bh(&ctx->pk_lock); 1398 695 } 1399 696 } 1400 697 698 + out: 699 + pr_debug("rc=%d\n", rc); 700 + return rc; 701 + } 702 + 703 + static int xts_paes_do_crypt(struct s390_pxts_ctx *ctx, 704 + struct s390_pxts_req_ctx *req_ctx, 705 + bool maysleep) 706 + { 707 + int pk_state, rc = 0; 708 + 709 + /* fetch and check protected key state */ 710 + spin_lock_bh(&ctx->pk_lock); 711 + pk_state = ctx->pk_state; 712 + switch (pk_state) { 713 + case PK_STATE_NO_KEY: 714 + rc = -ENOKEY; 715 + break; 716 + case PK_STATE_CONVERT_IN_PROGRESS: 717 + rc = -EKEYEXPIRED; 718 + break; 719 + case PK_STATE_VALID: 720 + break; 721 + default: 722 + rc = pk_state < 0 ? pk_state : -EIO; 723 + break; 724 + } 725 + spin_unlock_bh(&ctx->pk_lock); 726 + if (rc) 727 + goto out; 728 + 729 + /* Call the 'real' crypt function based on the xts prot key type. */ 730 + switch (ctx->fc) { 731 + case CPACF_KM_PXTS_128: 732 + case CPACF_KM_PXTS_256: 733 + rc = xts_paes_do_crypt_2keys(ctx, req_ctx, maysleep); 734 + break; 735 + case CPACF_KM_PXTS_128_FULL: 736 + case CPACF_KM_PXTS_256_FULL: 737 + rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, maysleep); 738 + break; 739 + default: 740 + rc = -EINVAL; 741 + } 742 + 743 + out: 744 + pr_debug("rc=%d\n", rc); 1401 745 return rc; 1402 746 } 1403 747 1404 748 static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) 1405 749 { 750 + struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req); 1406 751 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1407 752 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 753 + struct skcipher_walk *walk = &req_ctx->walk; 754 + int rc; 1408 755 1409 - switch (ctx->fc) { 1410 - case CPACF_KM_PXTS_128: 1411 - case CPACF_KM_PXTS_256: 1412 - return paes_xts_crypt(req, modifier); 1413 - case CPACF_KM_PXTS_128_FULL: 1414 - case CPACF_KM_PXTS_256_FULL: 1415 - return paes_xts_crypt_full(req, modifier); 1416 - default: 1417 - return -EINVAL; 756 + /* 757 + * Attempt synchronous encryption first. If it fails, schedule the request 758 + * asynchronously via the crypto engine. To preserve execution order, 759 + * once a request is queued to the engine, further requests using the same 760 + * tfm will also be routed through the engine. 761 + */ 762 + 763 + rc = skcipher_walk_virt(walk, req, false); 764 + if (rc) 765 + goto out; 766 + 767 + req_ctx->modifier = modifier; 768 + req_ctx->param_init_done = false; 769 + 770 + /* Try synchronous operation if no active engine usage */ 771 + if (!atomic_read(&ctx->via_engine_ctr)) { 772 + rc = xts_paes_do_crypt(ctx, req_ctx, false); 773 + if (rc == 0) 774 + goto out; 1418 775 } 776 + 777 + /* 778 + * If sync operation failed or key expired or there are already 779 + * requests enqueued via engine, fallback to async. Mark tfm as 780 + * using engine to serialize requests. 781 + */ 782 + if (rc == 0 || rc == -EKEYEXPIRED) { 783 + atomic_inc(&ctx->via_engine_ctr); 784 + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); 785 + if (rc != -EINPROGRESS) 786 + atomic_dec(&ctx->via_engine_ctr); 787 + } 788 + 789 + if (rc != -EINPROGRESS) 790 + skcipher_walk_done(walk, rc); 791 + 792 + out: 793 + if (rc != -EINPROGRESS) 794 + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 795 + pr_debug("rc=%d\n", rc); 796 + return rc; 1419 797 } 1420 798 1421 799 static int xts_paes_encrypt(struct skcipher_request *req) ··· 1513 721 return xts_paes_crypt(req, CPACF_DECRYPT); 1514 722 } 1515 723 1516 - static struct skcipher_alg xts_paes_alg = { 1517 - .base.cra_name = "xts(paes)", 1518 - .base.cra_driver_name = "xts-paes-s390", 1519 - .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 1520 - .base.cra_blocksize = AES_BLOCK_SIZE, 1521 - .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), 1522 - .base.cra_module = THIS_MODULE, 1523 - .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list), 1524 - .init = xts_paes_init, 1525 - .exit = xts_paes_exit, 1526 - .min_keysize = 2 * PAES_MIN_KEYSIZE, 1527 - .max_keysize = 2 * PAES_MAX_KEYSIZE, 1528 - .ivsize = AES_BLOCK_SIZE, 1529 - .setkey = xts_paes_set_key, 1530 - .encrypt = xts_paes_encrypt, 1531 - .decrypt = xts_paes_decrypt, 1532 - }; 1533 - 1534 - static int ctr_paes_init(struct crypto_skcipher *tfm) 724 + static int xts_paes_init(struct crypto_skcipher *tfm) 1535 725 { 1536 - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 726 + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 1537 727 1538 - ctx->kb.key = NULL; 728 + memset(ctx, 0, sizeof(*ctx)); 1539 729 spin_lock_init(&ctx->pk_lock); 730 + 731 + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pxts_req_ctx)); 1540 732 1541 733 return 0; 1542 734 } 1543 735 1544 - static void ctr_paes_exit(struct crypto_skcipher *tfm) 736 + static void xts_paes_exit(struct crypto_skcipher *tfm) 1545 737 { 1546 - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 738 + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 1547 739 1548 - _free_kb_keybuf(&ctx->kb); 740 + memzero_explicit(ctx, sizeof(*ctx)); 1549 741 } 1550 742 1551 - static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx) 743 + static int xts_paes_do_one_request(struct crypto_engine *engine, void *areq) 1552 744 { 1553 - unsigned long fc; 1554 - int rc; 1555 - 1556 - rc = __paes_convert_key(ctx); 1557 - if (rc) 1558 - return rc; 1559 - 1560 - /* Pick the correct function code based on the protected key type */ 1561 - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 : 1562 - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 : 1563 - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? 1564 - CPACF_KMCTR_PAES_256 : 0; 1565 - 1566 - /* Check if the function code is available */ 1567 - ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 1568 - 1569 - return ctx->fc ? 0 : -EINVAL; 1570 - } 1571 - 1572 - static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 1573 - unsigned int key_len) 1574 - { 1575 - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 1576 - int rc; 1577 - 1578 - _free_kb_keybuf(&ctx->kb); 1579 - rc = _key_to_kb(&ctx->kb, in_key, key_len); 1580 - if (rc) 1581 - return rc; 1582 - 1583 - return __ctr_paes_set_key(ctx); 1584 - } 1585 - 1586 - static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 1587 - { 1588 - unsigned int i, n; 1589 - 1590 - /* only use complete blocks, max. PAGE_SIZE */ 1591 - memcpy(ctrptr, iv, AES_BLOCK_SIZE); 1592 - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 1593 - for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 1594 - memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 1595 - crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 1596 - ctrptr += AES_BLOCK_SIZE; 1597 - } 1598 - return n; 1599 - } 1600 - 1601 - static int ctr_paes_crypt(struct skcipher_request *req) 1602 - { 745 + struct skcipher_request *req = skcipher_request_cast(areq); 746 + struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req); 1603 747 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1604 - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 1605 - u8 buf[AES_BLOCK_SIZE], *ctrptr; 1606 - struct { 1607 - u8 key[PAES_256_PROTKEY_SIZE]; 1608 - } param; 1609 - struct skcipher_walk walk; 1610 - unsigned int nbytes, n, k; 1611 - int rc, locked; 748 + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 749 + struct skcipher_walk *walk = &req_ctx->walk; 750 + int rc; 1612 751 1613 - rc = skcipher_walk_virt(&walk, req, false); 1614 - if (rc) 1615 - return rc; 752 + /* walk has already been prepared */ 1616 753 1617 - spin_lock_bh(&ctx->pk_lock); 1618 - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); 1619 - spin_unlock_bh(&ctx->pk_lock); 1620 - 1621 - locked = mutex_trylock(&ctrblk_lock); 1622 - 1623 - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 1624 - n = AES_BLOCK_SIZE; 1625 - if (nbytes >= 2*AES_BLOCK_SIZE && locked) 1626 - n = __ctrblk_init(ctrblk, walk.iv, nbytes); 1627 - ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; 1628 - k = cpacf_kmctr(ctx->fc, &param, walk.dst.virt.addr, 1629 - walk.src.virt.addr, n, ctrptr); 1630 - if (k) { 1631 - if (ctrptr == ctrblk) 1632 - memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE, 1633 - AES_BLOCK_SIZE); 1634 - crypto_inc(walk.iv, AES_BLOCK_SIZE); 1635 - rc = skcipher_walk_done(&walk, nbytes - k); 1636 - } 1637 - if (k < n) { 1638 - if (__paes_convert_key(ctx)) { 1639 - if (locked) 1640 - mutex_unlock(&ctrblk_lock); 1641 - return skcipher_walk_done(&walk, -EIO); 1642 - } 1643 - spin_lock_bh(&ctx->pk_lock); 1644 - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); 1645 - spin_unlock_bh(&ctx->pk_lock); 1646 - } 1647 - } 1648 - if (locked) 1649 - mutex_unlock(&ctrblk_lock); 1650 - /* 1651 - * final block may be < AES_BLOCK_SIZE, copy only nbytes 1652 - */ 1653 - if (nbytes) { 1654 - memset(buf, 0, AES_BLOCK_SIZE); 1655 - memcpy(buf, walk.src.virt.addr, nbytes); 1656 - while (1) { 1657 - if (cpacf_kmctr(ctx->fc, &param, buf, 1658 - buf, AES_BLOCK_SIZE, 1659 - walk.iv) == AES_BLOCK_SIZE) 1660 - break; 1661 - if (__paes_convert_key(ctx)) 1662 - return skcipher_walk_done(&walk, -EIO); 1663 - spin_lock_bh(&ctx->pk_lock); 1664 - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); 1665 - spin_unlock_bh(&ctx->pk_lock); 1666 - } 1667 - memcpy(walk.dst.virt.addr, buf, nbytes); 1668 - crypto_inc(walk.iv, AES_BLOCK_SIZE); 1669 - rc = skcipher_walk_done(&walk, nbytes); 754 + rc = xts_paes_do_crypt(ctx, req_ctx, true); 755 + if (rc == -EKEYEXPIRED) { 756 + /* 757 + * Protected key expired, conversion is in process. 758 + * Trigger a re-schedule of this request by returning 759 + * -ENOSPC ("hardware queue is full") to the crypto engine. 760 + * To avoid immediately re-invocation of this callback, 761 + * tell the scheduler to voluntarily give up the CPU here. 762 + */ 763 + cond_resched(); 764 + pr_debug("rescheduling request\n"); 765 + return -ENOSPC; 766 + } else if (rc) { 767 + skcipher_walk_done(walk, rc); 1670 768 } 1671 769 770 + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 771 + pr_debug("request complete with rc=%d\n", rc); 772 + local_bh_disable(); 773 + atomic_dec(&ctx->via_engine_ctr); 774 + crypto_finalize_skcipher_request(engine, req, rc); 775 + local_bh_enable(); 1672 776 return rc; 1673 777 } 1674 778 1675 - static struct skcipher_alg ctr_paes_alg = { 1676 - .base.cra_name = "ctr(paes)", 1677 - .base.cra_driver_name = "ctr-paes-s390", 1678 - .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 1679 - .base.cra_blocksize = 1, 1680 - .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 1681 - .base.cra_module = THIS_MODULE, 1682 - .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list), 1683 - .init = ctr_paes_init, 1684 - .exit = ctr_paes_exit, 1685 - .min_keysize = PAES_MIN_KEYSIZE, 1686 - .max_keysize = PAES_MAX_KEYSIZE, 1687 - .ivsize = AES_BLOCK_SIZE, 1688 - .setkey = ctr_paes_set_key, 1689 - .encrypt = ctr_paes_crypt, 1690 - .decrypt = ctr_paes_crypt, 1691 - .chunksize = AES_BLOCK_SIZE, 779 + static struct skcipher_engine_alg xts_paes_alg = { 780 + .base = { 781 + .base.cra_name = "xts(paes)", 782 + .base.cra_driver_name = "xts-paes-s390", 783 + .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 784 + .base.cra_blocksize = AES_BLOCK_SIZE, 785 + .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), 786 + .base.cra_module = THIS_MODULE, 787 + .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.base.cra_list), 788 + .init = xts_paes_init, 789 + .exit = xts_paes_exit, 790 + .min_keysize = 2 * PAES_MIN_KEYSIZE, 791 + .max_keysize = 2 * PAES_MAX_KEYSIZE, 792 + .ivsize = AES_BLOCK_SIZE, 793 + .setkey = xts_paes_setkey, 794 + .encrypt = xts_paes_encrypt, 795 + .decrypt = xts_paes_decrypt, 796 + }, 797 + .op = { 798 + .do_one_request = xts_paes_do_one_request, 799 + }, 1692 800 }; 1693 801 1694 - static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg) 802 + /* 803 + * alg register, unregister, module init, exit 804 + */ 805 + 806 + static struct miscdevice paes_dev = { 807 + .name = "paes", 808 + .minor = MISC_DYNAMIC_MINOR, 809 + }; 810 + 811 + static inline void __crypto_unregister_skcipher(struct skcipher_engine_alg *alg) 1695 812 { 1696 - if (!list_empty(&alg->base.cra_list)) 1697 - crypto_unregister_skcipher(alg); 813 + if (!list_empty(&alg->base.base.cra_list)) 814 + crypto_engine_unregister_skcipher(alg); 1698 815 } 1699 816 1700 817 static void paes_s390_fini(void) 1701 818 { 819 + if (paes_crypto_engine) { 820 + crypto_engine_stop(paes_crypto_engine); 821 + crypto_engine_exit(paes_crypto_engine); 822 + } 1702 823 __crypto_unregister_skcipher(&ctr_paes_alg); 1703 824 __crypto_unregister_skcipher(&xts_paes_alg); 1704 825 __crypto_unregister_skcipher(&cbc_paes_alg); 1705 826 __crypto_unregister_skcipher(&ecb_paes_alg); 1706 827 if (ctrblk) 1707 - free_page((unsigned long) ctrblk); 828 + free_page((unsigned long)ctrblk); 829 + misc_deregister(&paes_dev); 1708 830 } 1709 831 1710 832 static int __init paes_s390_init(void) 1711 833 { 1712 834 int rc; 835 + 836 + /* register a simple paes pseudo misc device */ 837 + rc = misc_register(&paes_dev); 838 + if (rc) 839 + return rc; 840 + 841 + /* with this pseudo devie alloc and start a crypto engine */ 842 + paes_crypto_engine = 843 + crypto_engine_alloc_init_and_set(paes_dev.this_device, 844 + true, NULL, false, MAX_QLEN); 845 + if (!paes_crypto_engine) { 846 + rc = -ENOMEM; 847 + goto out_err; 848 + } 849 + rc = crypto_engine_start(paes_crypto_engine); 850 + if (rc) { 851 + crypto_engine_exit(paes_crypto_engine); 852 + paes_crypto_engine = NULL; 853 + goto out_err; 854 + } 1713 855 1714 856 /* Query available functions for KM, KMC and KMCTR */ 1715 857 cpacf_query(CPACF_KM, &km_functions); ··· 1653 927 if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) || 1654 928 cpacf_test_func(&km_functions, CPACF_KM_PAES_192) || 1655 929 cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) { 1656 - rc = crypto_register_skcipher(&ecb_paes_alg); 930 + rc = crypto_engine_register_skcipher(&ecb_paes_alg); 1657 931 if (rc) 1658 932 goto out_err; 933 + pr_debug("%s registered\n", ecb_paes_alg.base.base.cra_driver_name); 1659 934 } 1660 935 1661 936 if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || 1662 937 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || 1663 938 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) { 1664 - rc = crypto_register_skcipher(&cbc_paes_alg); 939 + rc = crypto_engine_register_skcipher(&cbc_paes_alg); 1665 940 if (rc) 1666 941 goto out_err; 942 + pr_debug("%s registered\n", cbc_paes_alg.base.base.cra_driver_name); 1667 943 } 1668 944 1669 945 if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) || 1670 946 cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) { 1671 - rc = crypto_register_skcipher(&xts_paes_alg); 947 + rc = crypto_engine_register_skcipher(&xts_paes_alg); 1672 948 if (rc) 1673 949 goto out_err; 950 + pr_debug("%s registered\n", xts_paes_alg.base.base.cra_driver_name); 1674 951 } 1675 952 1676 953 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) || 1677 954 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) || 1678 955 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) { 1679 - ctrblk = (u8 *) __get_free_page(GFP_KERNEL); 956 + ctrblk = (u8 *)__get_free_page(GFP_KERNEL); 1680 957 if (!ctrblk) { 1681 958 rc = -ENOMEM; 1682 959 goto out_err; 1683 960 } 1684 - rc = crypto_register_skcipher(&ctr_paes_alg); 961 + rc = crypto_engine_register_skcipher(&ctr_paes_alg); 1685 962 if (rc) 1686 963 goto out_err; 964 + pr_debug("%s registered\n", ctr_paes_alg.base.base.cra_driver_name); 1687 965 } 1688 966 1689 967 return 0; 968 + 1690 969 out_err: 1691 970 paes_s390_fini(); 1692 971 return rc;
+36
arch/s390/include/asm/asce.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_S390_ASCE_H 4 + #define _ASM_S390_ASCE_H 5 + 6 + #include <linux/thread_info.h> 7 + #include <linux/irqflags.h> 8 + #include <asm/lowcore.h> 9 + #include <asm/ctlreg.h> 10 + 11 + static inline bool enable_sacf_uaccess(void) 12 + { 13 + unsigned long flags; 14 + 15 + if (test_thread_flag(TIF_ASCE_PRIMARY)) 16 + return true; 17 + local_irq_save(flags); 18 + local_ctl_load(1, &get_lowcore()->kernel_asce); 19 + set_thread_flag(TIF_ASCE_PRIMARY); 20 + local_irq_restore(flags); 21 + return false; 22 + } 23 + 24 + static inline void disable_sacf_uaccess(bool previous) 25 + { 26 + unsigned long flags; 27 + 28 + if (previous) 29 + return; 30 + local_irq_save(flags); 31 + local_ctl_load(1, &get_lowcore()->user_asce); 32 + clear_thread_flag(TIF_ASCE_PRIMARY); 33 + local_irq_restore(flags); 34 + } 35 + 36 + #endif /* _ASM_S390_ASCE_H */
+15 -3
arch/s390/include/asm/cpacf.h
··· 649 649 * instruction 650 650 * @func: the function code passed to PCC; see CPACF_KM_xxx defines 651 651 * @param: address of parameter block; see POP for details on each func 652 + * 653 + * Returns the condition code, this is 654 + * 0 - cc code 0 (normal completion) 655 + * 1 - cc code 1 (protected key wkvp mismatch or src operand out of range) 656 + * 2 - cc code 2 (something invalid, scalar multiply infinity, ...) 657 + * Condition code 3 (partial completion) is handled within the asm code 658 + * and never returned. 652 659 */ 653 - static inline void cpacf_pcc(unsigned long func, void *param) 660 + static inline int cpacf_pcc(unsigned long func, void *param) 654 661 { 662 + int cc; 663 + 655 664 asm volatile( 656 665 " lgr 0,%[fc]\n" 657 666 " lgr 1,%[pba]\n" 658 667 "0: .insn rre,%[opc] << 16,0,0\n" /* PCC opcode */ 659 668 " brc 1,0b\n" /* handle partial completion */ 660 - : 669 + CC_IPM(cc) 670 + : CC_OUT(cc, cc) 661 671 : [fc] "d" (func), [pba] "d" ((unsigned long)param), 662 672 [opc] "i" (CPACF_PCC) 663 - : "cc", "memory", "0", "1"); 673 + : CC_CLOBBER_LIST("memory", "0", "1")); 674 + 675 + return CC_TRANSFORM(cc); 664 676 } 665 677 666 678 /**
+1
arch/s390/include/asm/cpufeature.h
··· 15 15 S390_CPU_FEATURE_MSA, 16 16 S390_CPU_FEATURE_VXRS, 17 17 S390_CPU_FEATURE_UV, 18 + S390_CPU_FEATURE_D288, 18 19 MAX_CPU_FEATURES 19 20 }; 20 21
+41
arch/s390/include/asm/diag288.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_S390_DIAG288_H 4 + #define _ASM_S390_DIAG288_H 5 + 6 + #include <asm/asm-extable.h> 7 + #include <asm/types.h> 8 + 9 + #define MIN_INTERVAL 15 /* Minimal time supported by diag288 */ 10 + #define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */ 11 + 12 + #define WDT_DEFAULT_TIMEOUT 30 13 + 14 + /* Function codes - init, change, cancel */ 15 + #define WDT_FUNC_INIT 0 16 + #define WDT_FUNC_CHANGE 1 17 + #define WDT_FUNC_CANCEL 2 18 + #define WDT_FUNC_CONCEAL 0x80000000 19 + 20 + /* Action codes for LPAR watchdog */ 21 + #define LPARWDT_RESTART 0 22 + 23 + static inline int __diag288(unsigned int func, unsigned int timeout, 24 + unsigned long action, unsigned int len) 25 + { 26 + union register_pair r1 = { .even = func, .odd = timeout, }; 27 + union register_pair r3 = { .even = action, .odd = len, }; 28 + int rc = -EINVAL; 29 + 30 + asm volatile( 31 + " diag %[r1],%[r3],0x288\n" 32 + "0: lhi %[rc],0\n" 33 + "1:" 34 + EX_TABLE(0b, 1b) 35 + : [rc] "+d" (rc) 36 + : [r1] "d" (r1.pair), [r3] "d" (r3.pair) 37 + : "cc", "memory"); 38 + return rc; 39 + } 40 + 41 + #endif /* _ASM_S390_DIAG288_H */
+6
arch/s390/include/asm/futex.h
··· 13 13 static uaccess_kmsan_or_inline int \ 14 14 __futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \ 15 15 { \ 16 + bool sacf_flag; \ 16 17 int rc, new; \ 17 18 \ 18 19 instrument_copy_from_user_before(old, uaddr, sizeof(*old)); \ 20 + sacf_flag = enable_sacf_uaccess(); \ 19 21 asm_inline volatile( \ 20 22 " sacf 256\n" \ 21 23 "0: l %[old],%[uaddr]\n" \ ··· 34 32 [new] "=&d" (new), [uaddr] "+Q" (*uaddr) \ 35 33 : [oparg] "d" (oparg) \ 36 34 : "cc"); \ 35 + disable_sacf_uaccess(sacf_flag); \ 37 36 if (!rc) \ 38 37 instrument_copy_from_user_after(old, uaddr, sizeof(*old), 0); \ 39 38 return rc; \ ··· 78 75 static uaccess_kmsan_or_inline 79 76 int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) 80 77 { 78 + bool sacf_flag; 81 79 int rc; 82 80 83 81 instrument_copy_from_user_before(uval, uaddr, sizeof(*uval)); 82 + sacf_flag = enable_sacf_uaccess(); 84 83 asm_inline volatile( 85 84 " sacf 256\n" 86 85 "0: cs %[old],%[new],%[uaddr]\n" ··· 93 88 : [rc] "=d" (rc), [old] "+d" (oldval), [uaddr] "+Q" (*uaddr) 94 89 : [new] "d" (newval) 95 90 : "cc", "memory"); 91 + disable_sacf_uaccess(sacf_flag); 96 92 *uval = oldval; 97 93 instrument_copy_from_user_after(uval, uaddr, sizeof(*uval), 0); 98 94 return rc;
+1
arch/s390/include/asm/machine.h
··· 18 18 #define MFEATURE_VM 7 19 19 #define MFEATURE_KVM 8 20 20 #define MFEATURE_LPAR 9 21 + #define MFEATURE_DIAG288 10 21 22 22 23 #ifndef __ASSEMBLY__ 23 24
+15 -2
arch/s390/include/asm/mmu_context.h
··· 13 13 #include <linux/mm_types.h> 14 14 #include <asm/tlbflush.h> 15 15 #include <asm/ctlreg.h> 16 + #include <asm/asce.h> 16 17 #include <asm-generic/mm_hooks.h> 17 18 18 19 #define init_new_context init_new_context ··· 78 77 else 79 78 get_lowcore()->user_asce.val = next->context.asce; 80 79 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 81 - /* Clear previous user-ASCE from CR7 */ 80 + /* Clear previous user-ASCE from CR1 and CR7 */ 81 + local_ctl_load(1, &s390_invalid_asce); 82 82 local_ctl_load(7, &s390_invalid_asce); 83 83 if (prev != next) 84 84 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); ··· 101 99 { 102 100 struct task_struct *tsk = current; 103 101 struct mm_struct *mm = tsk->mm; 102 + unsigned long flags; 104 103 105 104 if (mm) { 106 105 preempt_disable(); ··· 111 108 __tlb_flush_mm_lazy(mm); 112 109 preempt_enable(); 113 110 } 111 + local_irq_save(flags); 112 + if (test_thread_flag(TIF_ASCE_PRIMARY)) 113 + local_ctl_load(1, &get_lowcore()->kernel_asce); 114 + else 115 + local_ctl_load(1, &get_lowcore()->user_asce); 114 116 local_ctl_load(7, &get_lowcore()->user_asce); 117 + local_irq_restore(flags); 115 118 } 116 119 117 120 #define activate_mm activate_mm 118 121 static inline void activate_mm(struct mm_struct *prev, 119 122 struct mm_struct *next) 120 123 { 121 - switch_mm(prev, next, current); 124 + switch_mm_irqs_off(prev, next, current); 122 125 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 126 + if (test_thread_flag(TIF_ASCE_PRIMARY)) 127 + local_ctl_load(1, &get_lowcore()->kernel_asce); 128 + else 129 + local_ctl_load(1, &get_lowcore()->user_asce); 123 130 local_ctl_load(7, &get_lowcore()->user_asce); 124 131 } 125 132
+14 -1
arch/s390/include/asm/pkey.h
··· 20 20 * @param key pointer to a buffer containing the key blob 21 21 * @param keylen size of the key blob in bytes 22 22 * @param protkey pointer to buffer receiving the protected key 23 + * @param xflags additional execution flags (see PKEY_XFLAG_* definitions below) 24 + * As of now the only supported flag is PKEY_XFLAG_NOMEMALLOC. 23 25 * @return 0 on success, negative errno value on failure 24 26 */ 25 27 int pkey_key2protkey(const u8 *key, u32 keylen, 26 - u8 *protkey, u32 *protkeylen, u32 *protkeytype); 28 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 29 + u32 xflags); 30 + 31 + /* 32 + * If this flag is given in the xflags parameter, the pkey implementation 33 + * is not allowed to allocate memory but instead should fall back to use 34 + * preallocated memory or simple fail with -ENOMEM. 35 + * This flag is for protected key derive within a cipher or similar 36 + * which must not allocate memory which would cause io operations - see 37 + * also the CRYPTO_ALG_ALLOCATES_MEMORY flag in crypto.h. 38 + */ 39 + #define PKEY_XFLAG_NOMEMALLOC 0x0001 27 40 28 41 #endif /* _KAPI_PKEY_H */
+39 -8
arch/s390/include/asm/ptrace.h
··· 9 9 10 10 #include <linux/bits.h> 11 11 #include <uapi/asm/ptrace.h> 12 + #include <asm/thread_info.h> 12 13 #include <asm/tpi.h> 13 14 14 15 #define PIF_SYSCALL 0 /* inside a system call */ ··· 127 126 struct tpi_info tpi_info; 128 127 }; 129 128 unsigned long flags; 130 - unsigned long cr1; 131 129 unsigned long last_break; 132 130 }; 133 131 ··· 229 229 230 230 int regs_query_register_offset(const char *name); 231 231 const char *regs_query_register_name(unsigned int offset); 232 - unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); 233 - unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); 232 + 233 + static __always_inline unsigned long kernel_stack_pointer(struct pt_regs *regs) 234 + { 235 + return regs->gprs[15]; 236 + } 237 + 238 + static __always_inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) 239 + { 240 + if (offset >= NUM_GPRS) 241 + return 0; 242 + return regs->gprs[offset]; 243 + } 244 + 245 + static __always_inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 246 + { 247 + unsigned long ksp = kernel_stack_pointer(regs); 248 + 249 + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); 250 + } 251 + 252 + /** 253 + * regs_get_kernel_stack_nth() - get Nth entry of the stack 254 + * @regs:pt_regs which contains kernel stack pointer. 255 + * @n:stack entry number. 256 + * 257 + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 258 + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, 259 + * this returns 0. 260 + */ 261 + static __always_inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 262 + { 263 + unsigned long addr; 264 + 265 + addr = kernel_stack_pointer(regs) + n * sizeof(long); 266 + if (!regs_within_kernel_stack(regs, addr)) 267 + return 0; 268 + return READ_ONCE_NOCHECK(addr); 269 + } 234 270 235 271 /** 236 272 * regs_get_kernel_argument() - get Nth function argument in kernel ··· 285 249 return regs_get_register(regs, 2 + n); 286 250 n -= NR_REG_ARGUMENTS; 287 251 return regs_get_kernel_stack_nth(regs, argoffset + n); 288 - } 289 - 290 - static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) 291 - { 292 - return regs->gprs[15]; 293 252 } 294 253 295 254 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
-20
arch/s390/include/asm/string.h
··· 26 26 #define __HAVE_ARCH_MEMSCAN /* inline & arch function */ 27 27 #define __HAVE_ARCH_STRCAT /* inline & arch function */ 28 28 #define __HAVE_ARCH_STRCMP /* arch function */ 29 - #define __HAVE_ARCH_STRCPY /* inline & arch function */ 30 29 #define __HAVE_ARCH_STRLCAT /* arch function */ 31 30 #define __HAVE_ARCH_STRLEN /* inline & arch function */ 32 31 #define __HAVE_ARCH_STRNCAT /* arch function */ 33 - #define __HAVE_ARCH_STRNCPY /* arch function */ 34 32 #define __HAVE_ARCH_STRNLEN /* inline & arch function */ 35 33 #define __HAVE_ARCH_STRSTR /* arch function */ 36 34 #define __HAVE_ARCH_MEMSET16 /* arch function */ ··· 40 42 int strcmp(const char *s1, const char *s2); 41 43 size_t strlcat(char *dest, const char *src, size_t n); 42 44 char *strncat(char *dest, const char *src, size_t n); 43 - char *strncpy(char *dest, const char *src, size_t n); 44 45 char *strstr(const char *s1, const char *s2); 45 46 #endif /* !defined(CONFIG_KASAN) && !defined(CONFIG_KMSAN) */ 46 47 ··· 152 155 } 153 156 #endif 154 157 155 - #ifdef __HAVE_ARCH_STRCPY 156 - static inline char *strcpy(char *dst, const char *src) 157 - { 158 - char *ret = dst; 159 - 160 - asm volatile( 161 - " lghi 0,0\n" 162 - "0: mvst %[dst],%[src]\n" 163 - " jo 0b" 164 - : [dst] "+&a" (dst), [src] "+&a" (src) 165 - : 166 - : "cc", "memory", "0"); 167 - return ret; 168 - } 169 - #endif 170 - 171 158 #if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) 172 159 static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s) 173 160 { ··· 189 208 void *memchr(const void * s, int c, size_t n); 190 209 void *memscan(void *s, int c, size_t n); 191 210 char *strcat(char *dst, const char *src); 192 - char *strcpy(char *dst, const char *src); 193 211 size_t strlen(const char *s); 194 212 size_t strnlen(const char * s, size_t n); 195 213 #endif /* !IN_ARCH_STRING_C */
+3 -2
arch/s390/include/asm/thread_info.h
··· 9 9 #define _ASM_THREAD_INFO_H 10 10 11 11 #include <linux/bits.h> 12 + #include <vdso/page.h> 12 13 13 14 /* 14 15 * General size of kernel stacks ··· 25 24 #define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) 26 25 27 26 #ifndef __ASSEMBLY__ 28 - #include <asm/lowcore.h> 29 - #include <asm/page.h> 30 27 31 28 /* 32 29 * low level task data that entry.S needs immediate access to ··· 63 64 #define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling needed */ 64 65 #define TIF_UPROBE 4 /* breakpointed or single-stepping */ 65 66 #define TIF_PATCH_PENDING 5 /* pending live patching update */ 67 + #define TIF_ASCE_PRIMARY 6 /* primary asce is kernel asce */ 66 68 #define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ 67 69 #define TIF_GUARDED_STORAGE 8 /* load guarded storage control block */ 68 70 #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ ··· 85 85 #define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY) 86 86 #define _TIF_UPROBE BIT(TIF_UPROBE) 87 87 #define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) 88 + #define _TIF_ASCE_PRIMARY BIT(TIF_ASCE_PRIMARY) 88 89 #define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) 89 90 #define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) 90 91 #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
+12
arch/s390/include/asm/uaccess.h
··· 19 19 #include <asm/extable.h> 20 20 #include <asm/facility.h> 21 21 #include <asm-generic/access_ok.h> 22 + #include <asm/asce.h> 22 23 #include <linux/instrumented.h> 23 24 24 25 void debug_user_asce(int exit); ··· 479 478 __uint128_t old, __uint128_t new, 480 479 unsigned long key, int size) 481 480 { 481 + bool sacf_flag; 482 482 int rc = 0; 483 483 484 484 switch (size) { ··· 492 490 _old = ((unsigned int)old & 0xff) << shift; 493 491 _new = ((unsigned int)new & 0xff) << shift; 494 492 mask = ~(0xff << shift); 493 + sacf_flag = enable_sacf_uaccess(); 495 494 asm_inline volatile( 496 495 " spka 0(%[key])\n" 497 496 " sacf 256\n" ··· 527 524 [default_key] "J" (PAGE_DEFAULT_KEY), 528 525 [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) 529 526 : "memory", "cc"); 527 + disable_sacf_uaccess(sacf_flag); 530 528 *(unsigned char *)uval = prev >> shift; 531 529 if (!count) 532 530 rc = -EAGAIN; ··· 542 538 _old = ((unsigned int)old & 0xffff) << shift; 543 539 _new = ((unsigned int)new & 0xffff) << shift; 544 540 mask = ~(0xffff << shift); 541 + sacf_flag = enable_sacf_uaccess(); 545 542 asm_inline volatile( 546 543 " spka 0(%[key])\n" 547 544 " sacf 256\n" ··· 577 572 [default_key] "J" (PAGE_DEFAULT_KEY), 578 573 [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) 579 574 : "memory", "cc"); 575 + disable_sacf_uaccess(sacf_flag); 580 576 *(unsigned short *)uval = prev >> shift; 581 577 if (!count) 582 578 rc = -EAGAIN; ··· 586 580 case 4: { 587 581 unsigned int prev = old; 588 582 583 + sacf_flag = enable_sacf_uaccess(); 589 584 asm_inline volatile( 590 585 " spka 0(%[key])\n" 591 586 " sacf 256\n" ··· 602 595 [key] "a" (key << 4), 603 596 [default_key] "J" (PAGE_DEFAULT_KEY) 604 597 : "memory", "cc"); 598 + disable_sacf_uaccess(sacf_flag); 605 599 *(unsigned int *)uval = prev; 606 600 return rc; 607 601 } 608 602 case 8: { 609 603 unsigned long prev = old; 610 604 605 + sacf_flag = enable_sacf_uaccess(); 611 606 asm_inline volatile( 612 607 " spka 0(%[key])\n" 613 608 " sacf 256\n" ··· 625 616 [key] "a" (key << 4), 626 617 [default_key] "J" (PAGE_DEFAULT_KEY) 627 618 : "memory", "cc"); 619 + disable_sacf_uaccess(sacf_flag); 628 620 *(unsigned long *)uval = prev; 629 621 return rc; 630 622 } 631 623 case 16: { 632 624 __uint128_t prev = old; 633 625 626 + sacf_flag = enable_sacf_uaccess(); 634 627 asm_inline volatile( 635 628 " spka 0(%[key])\n" 636 629 " sacf 256\n" ··· 648 637 [key] "a" (key << 4), 649 638 [default_key] "J" (PAGE_DEFAULT_KEY) 650 639 : "memory", "cc"); 640 + disable_sacf_uaccess(sacf_flag); 651 641 *(__uint128_t *)uval = prev; 652 642 return rc; 653 643 }
+3 -2
arch/s390/include/asm/uv.h
··· 616 616 return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS); 617 617 } 618 618 619 - int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], 620 - struct uv_secret_list_item_hdr *secret); 619 + int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN], 620 + struct uv_secret_list *list, 621 + struct uv_secret_list_item_hdr *secret); 621 622 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size); 622 623 623 624 extern int prot_virt_host;
-1
arch/s390/kernel/asm-offsets.c
··· 50 50 OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2); 51 51 OFFSET(__PT_INT_CODE, pt_regs, int_code); 52 52 OFFSET(__PT_FLAGS, pt_regs, flags); 53 - OFFSET(__PT_CR1, pt_regs, cr1); 54 53 OFFSET(__PT_LAST_BREAK, pt_regs, last_break); 55 54 DEFINE(__PT_SIZE, sizeof(struct pt_regs)); 56 55 BLANK();
+1 -1
arch/s390/kernel/cert_store.c
··· 138 138 * First 64 bytes of the key description is key name in EBCDIC CP 500. 139 139 * Convert it to ASCII for displaying in /proc/keys. 140 140 */ 141 - strscpy(ascii, key->description, sizeof(ascii)); 141 + strscpy(ascii, key->description); 142 142 EBCASC_500(ascii, VC_NAME_LEN_BYTES); 143 143 seq_puts(m, ascii); 144 144
+5
arch/s390/kernel/cpufeature.c
··· 5 5 6 6 #include <linux/cpufeature.h> 7 7 #include <linux/bug.h> 8 + #include <asm/machine.h> 8 9 #include <asm/elf.h> 9 10 10 11 enum { 11 12 TYPE_HWCAP, 12 13 TYPE_FACILITY, 14 + TYPE_MACHINE, 13 15 }; 14 16 15 17 struct s390_cpu_feature { ··· 23 21 [S390_CPU_FEATURE_MSA] = {.type = TYPE_HWCAP, .num = HWCAP_NR_MSA}, 24 22 [S390_CPU_FEATURE_VXRS] = {.type = TYPE_HWCAP, .num = HWCAP_NR_VXRS}, 25 23 [S390_CPU_FEATURE_UV] = {.type = TYPE_FACILITY, .num = 158}, 24 + [S390_CPU_FEATURE_D288] = {.type = TYPE_MACHINE, .num = MFEATURE_DIAG288}, 26 25 }; 27 26 28 27 /* ··· 41 38 return !!(elf_hwcap & BIT(feature->num)); 42 39 case TYPE_FACILITY: 43 40 return test_facility(feature->num); 41 + case TYPE_MACHINE: 42 + return test_machine_feature(feature->num); 44 43 default: 45 44 WARN_ON_ONCE(1); 46 45 return 0;
+1 -1
arch/s390/kernel/crash_dump.c
··· 354 354 355 355 memset(&prpsinfo, 0, sizeof(prpsinfo)); 356 356 prpsinfo.pr_sname = 'R'; 357 - strcpy(prpsinfo.pr_fname, "vmlinux"); 357 + strscpy(prpsinfo.pr_fname, "vmlinux"); 358 358 return nt_init(ptr, PRPSINFO, prpsinfo); 359 359 } 360 360
+1 -1
arch/s390/kernel/debug.c
··· 251 251 rc->level = level; 252 252 rc->buf_size = buf_size; 253 253 rc->entry_size = sizeof(debug_entry_t) + buf_size; 254 - strscpy(rc->name, name, sizeof(rc->name)); 254 + strscpy(rc->name, name); 255 255 memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); 256 256 memset(rc->debugfs_entries, 0, DEBUG_MAX_VIEWS * sizeof(struct dentry *)); 257 257 refcount_set(&(rc->ref_count), 0);
+4 -16
arch/s390/kernel/entry.S
··· 116 116 .macro SIEEXIT sie_control,lowcore 117 117 lg %r9,\sie_control # get control block pointer 118 118 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 119 - lctlg %c1,%c1,__LC_KERNEL_ASCE(\lowcore) # load primary asce 119 + lctlg %c1,%c1,__LC_USER_ASCE(\lowcore) # load primary asce 120 120 lg %r9,__LC_CURRENT(\lowcore) 121 121 mvi __TI_sie(%r9),0 122 122 larl %r9,sie_exit # skip forward to sie_exit ··· 208 208 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 209 209 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 210 210 GET_LC %r14 211 - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r14) # load primary asce 211 + lctlg %c1,%c1,__LC_USER_ASCE(%r14) # load primary asce 212 212 lg %r14,__LC_CURRENT(%r14) 213 213 mvi __TI_sie(%r14),0 214 214 SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) ··· 240 240 lghi %r14,0 241 241 .Lsysc_per: 242 242 STBEAR __LC_LAST_BREAK(%r13) 243 - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) 244 243 lg %r15,__LC_KERNEL_STACK(%r13) 245 244 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 246 245 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) ··· 260 261 lgr %r3,%r14 261 262 brasl %r14,__do_syscall 262 263 STACKLEAK_ERASE 263 - lctlg %c1,%c1,__LC_USER_ASCE(%r13) 264 264 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 265 265 BPON 266 266 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) ··· 276 278 brasl %r14,__ret_from_fork 277 279 STACKLEAK_ERASE 278 280 GET_LC %r13 279 - lctlg %c1,%c1,__LC_USER_ASCE(%r13) 280 281 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 281 282 BPON 282 283 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) ··· 296 299 lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13) 297 300 xgr %r10,%r10 298 301 tmhh %r8,0x0001 # coming from user space? 299 - jno .Lpgm_skip_asce 300 - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) 301 - j 3f # -> fault in user space 302 - .Lpgm_skip_asce: 302 + jo 3f # -> fault in user space 303 303 #if IS_ENABLED(CONFIG_KVM) 304 304 lg %r11,__LC_CURRENT(%r13) 305 305 tm __TI_sie(%r11),0xff ··· 334 340 tmhh %r8,0x0001 # returning to user space? 335 341 jno .Lpgm_exit_kernel 336 342 STACKLEAK_ERASE 337 - lctlg %c1,%c1,__LC_USER_ASCE(%r13) 338 343 BPON 339 344 stpt __LC_EXIT_TIMER(%r13) 340 345 .Lpgm_exit_kernel: ··· 377 384 #endif 378 385 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 379 386 j 2f 380 - 1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) 381 - lg %r15,__LC_KERNEL_STACK(%r13) 387 + 1: lg %r15,__LC_KERNEL_STACK(%r13) 382 388 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 383 389 la %r11,STACK_FRAME_OVERHEAD(%r15) 384 390 stmg %r0,%r7,__PT_R0(%r11) ··· 400 408 tmhh %r8,0x0001 # returning to user ? 401 409 jno 2f 402 410 STACKLEAK_ERASE 403 - lctlg %c1,%c1,__LC_USER_ASCE(%r13) 404 411 BPON 405 412 stpt __LC_EXIT_TIMER(%r13) 406 413 2: LBEAR __PT_LAST_BREAK(%r11) ··· 467 476 .Lmcck_user: 468 477 lg %r15,__LC_MCCK_STACK(%r13) 469 478 la %r11,STACK_FRAME_OVERHEAD(%r15) 470 - stctg %c1,%c1,__PT_CR1(%r11) 471 - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) 472 479 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 473 480 lay %r14,__LC_GPREGS_SAVE_AREA(%r13) 474 481 mvc __PT_R0(128,%r11),0(%r14) ··· 484 495 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 485 496 lgr %r2,%r11 # pass pointer to pt_regs 486 497 brasl %r14,s390_do_machine_check 487 - lctlg %c1,%c1,__PT_CR1(%r11) 488 498 lmg %r0,%r10,__PT_R0(%r11) 489 499 mvc __LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW 490 500 tm __LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ?
+13 -14
arch/s390/kernel/ipl.c
··· 270 270 { \ 271 271 if (len >= sizeof(_value)) \ 272 272 return -E2BIG; \ 273 - len = strscpy(_value, buf, sizeof(_value)); \ 273 + len = strscpy(_value, buf); \ 274 274 if ((ssize_t)len < 0) \ 275 275 return len; \ 276 276 strim(_value); \ ··· 2249 2249 2250 2250 __initcall(s390_ipl_init); 2251 2251 2252 - static void __init strncpy_skip_quote(char *dst, char *src, int n) 2252 + static void __init strscpy_skip_quote(char *dst, char *src, int n) 2253 2253 { 2254 2254 int sx, dx; 2255 2255 2256 - dx = 0; 2257 - for (sx = 0; src[sx] != 0; sx++) { 2256 + if (!n) 2257 + return; 2258 + for (sx = 0, dx = 0; src[sx]; sx++) { 2258 2259 if (src[sx] == '"') 2259 2260 continue; 2260 - dst[dx++] = src[sx]; 2261 - if (dx >= n) 2261 + dst[dx] = src[sx]; 2262 + if (dx + 1 == n) 2262 2263 break; 2264 + dx++; 2263 2265 } 2266 + dst[dx] = '\0'; 2264 2267 } 2265 2268 2266 2269 static int __init vmcmd_on_reboot_setup(char *str) 2267 2270 { 2268 2271 if (!machine_is_vm()) 2269 2272 return 1; 2270 - strncpy_skip_quote(vmcmd_on_reboot, str, VMCMD_MAX_SIZE); 2271 - vmcmd_on_reboot[VMCMD_MAX_SIZE] = 0; 2273 + strscpy_skip_quote(vmcmd_on_reboot, str, sizeof(vmcmd_on_reboot)); 2272 2274 on_reboot_trigger.action = &vmcmd_action; 2273 2275 return 1; 2274 2276 } ··· 2280 2278 { 2281 2279 if (!machine_is_vm()) 2282 2280 return 1; 2283 - strncpy_skip_quote(vmcmd_on_panic, str, VMCMD_MAX_SIZE); 2284 - vmcmd_on_panic[VMCMD_MAX_SIZE] = 0; 2281 + strscpy_skip_quote(vmcmd_on_panic, str, sizeof(vmcmd_on_panic)); 2285 2282 on_panic_trigger.action = &vmcmd_action; 2286 2283 return 1; 2287 2284 } ··· 2290 2289 { 2291 2290 if (!machine_is_vm()) 2292 2291 return 1; 2293 - strncpy_skip_quote(vmcmd_on_halt, str, VMCMD_MAX_SIZE); 2294 - vmcmd_on_halt[VMCMD_MAX_SIZE] = 0; 2292 + strscpy_skip_quote(vmcmd_on_halt, str, sizeof(vmcmd_on_halt)); 2295 2293 on_halt_trigger.action = &vmcmd_action; 2296 2294 return 1; 2297 2295 } ··· 2300 2300 { 2301 2301 if (!machine_is_vm()) 2302 2302 return 1; 2303 - strncpy_skip_quote(vmcmd_on_poff, str, VMCMD_MAX_SIZE); 2304 - vmcmd_on_poff[VMCMD_MAX_SIZE] = 0; 2303 + strscpy_skip_quote(vmcmd_on_poff, str, sizeof(vmcmd_on_poff)); 2305 2304 on_poff_trigger.action = &vmcmd_action; 2306 2305 return 1; 2307 2306 }
+2 -2
arch/s390/kernel/perf_cpum_cf_events.c
··· 290 290 CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5); 291 291 CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7); 292 292 CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc); 293 - CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108); 294 - CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109); 293 + CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x0108); 294 + CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x0109); 295 295 CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); 296 296 CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); 297 297 CPUMF_EVENT_ATTR(cf_z16, L1D_RO_EXCL_WRITES, 0x0080);
+8 -8
arch/s390/kernel/processor.c
··· 268 268 add_device_randomness(&cpu_id, sizeof(cpu_id)); 269 269 switch (cpu_id.machine) { 270 270 default: /* Use "z10" as default. */ 271 - strcpy(elf_platform, "z10"); 271 + strscpy(elf_platform, "z10"); 272 272 break; 273 273 case 0x2817: 274 274 case 0x2818: 275 - strcpy(elf_platform, "z196"); 275 + strscpy(elf_platform, "z196"); 276 276 break; 277 277 case 0x2827: 278 278 case 0x2828: 279 - strcpy(elf_platform, "zEC12"); 279 + strscpy(elf_platform, "zEC12"); 280 280 break; 281 281 case 0x2964: 282 282 case 0x2965: 283 - strcpy(elf_platform, "z13"); 283 + strscpy(elf_platform, "z13"); 284 284 break; 285 285 case 0x3906: 286 286 case 0x3907: 287 - strcpy(elf_platform, "z14"); 287 + strscpy(elf_platform, "z14"); 288 288 break; 289 289 case 0x8561: 290 290 case 0x8562: 291 - strcpy(elf_platform, "z15"); 291 + strscpy(elf_platform, "z15"); 292 292 break; 293 293 case 0x3931: 294 294 case 0x3932: 295 - strcpy(elf_platform, "z16"); 295 + strscpy(elf_platform, "z16"); 296 296 break; 297 297 case 0x9175: 298 298 case 0x9176: 299 - strcpy(elf_platform, "z17"); 299 + strscpy(elf_platform, "z17"); 300 300 break; 301 301 } 302 302 return 0;
-33
arch/s390/kernel/ptrace.c
··· 1524 1524 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 1525 1525 }; 1526 1526 1527 - unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) 1528 - { 1529 - if (offset >= NUM_GPRS) 1530 - return 0; 1531 - return regs->gprs[offset]; 1532 - } 1533 - 1534 1527 int regs_query_register_offset(const char *name) 1535 1528 { 1536 1529 unsigned long offset; ··· 1542 1549 if (offset >= NUM_GPRS) 1543 1550 return NULL; 1544 1551 return gpr_names[offset]; 1545 - } 1546 - 1547 - static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 1548 - { 1549 - unsigned long ksp = kernel_stack_pointer(regs); 1550 - 1551 - return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); 1552 - } 1553 - 1554 - /** 1555 - * regs_get_kernel_stack_nth() - get Nth entry of the stack 1556 - * @regs:pt_regs which contains kernel stack pointer. 1557 - * @n:stack entry number. 1558 - * 1559 - * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 1560 - * is specifined by @regs. If the @n th entry is NOT in the kernel stack, 1561 - * this returns 0. 1562 - */ 1563 - unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 1564 - { 1565 - unsigned long addr; 1566 - 1567 - addr = kernel_stack_pointer(regs) + n * sizeof(long); 1568 - if (!regs_within_kernel_stack(regs, addr)) 1569 - return 0; 1570 - return READ_ONCE_NOCHECK(addr); 1571 1552 }
+1 -1
arch/s390/kernel/smp.c
··· 263 263 abs_lc = get_abs_lowcore(); 264 264 memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area)); 265 265 put_abs_lowcore(abs_lc); 266 - lc->cregs_save_area[1] = lc->kernel_asce; 266 + lc->cregs_save_area[1] = lc->user_asce; 267 267 lc->cregs_save_area[7] = lc->user_asce; 268 268 save_access_regs((unsigned int *) lc->access_regs_save_area); 269 269 arch_spin_lock_setup(cpu);
+13 -34
arch/s390/kernel/uv.c
··· 782 782 device_initcall(uv_sysfs_init); 783 783 784 784 /* 785 - * Find the secret with the secret_id in the provided list. 785 + * Locate a secret in the list by its id. 786 + * @secret_id: search pattern. 787 + * @list: ephemeral buffer space 788 + * @secret: output data, containing the secret's metadata. 789 + * 790 + * Search for a secret with the given secret_id in the Ultravisor secret store. 786 791 * 787 792 * Context: might sleep. 788 793 */ ··· 808 803 809 804 /* 810 805 * Do the actual search for `uv_get_secret_metadata`. 806 + * @secret_id: search pattern. 807 + * @list: ephemeral buffer space 808 + * @secret: output data, containing the secret's metadata. 811 809 * 812 810 * Context: might sleep. 813 811 */ 814 - static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN], 815 - struct uv_secret_list *list, 816 - struct uv_secret_list_item_hdr *secret) 812 + int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN], 813 + struct uv_secret_list *list, 814 + struct uv_secret_list_item_hdr *secret) 817 815 { 818 816 u16 start_idx = 0; 819 817 u16 list_rc; ··· 838 830 839 831 return -ENOENT; 840 832 } 841 - 842 - /** 843 - * uv_get_secret_metadata() - get secret metadata for a given secret id. 844 - * @secret_id: search pattern. 845 - * @secret: output data, containing the secret's metadata. 846 - * 847 - * Search for a secret with the given secret_id in the Ultravisor secret store. 848 - * 849 - * Context: might sleep. 850 - * 851 - * Return: 852 - * * %0: - Found entry; secret->idx and secret->type are valid. 853 - * * %ENOENT - No entry found. 854 - * * %ENODEV: - Not supported: UV not available or command not available. 855 - * * %EIO: - Other unexpected UV error. 856 - */ 857 - int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], 858 - struct uv_secret_list_item_hdr *secret) 859 - { 860 - struct uv_secret_list *buf; 861 - int rc; 862 - 863 - buf = kzalloc(sizeof(*buf), GFP_KERNEL); 864 - if (!buf) 865 - return -ENOMEM; 866 - rc = find_secret(secret_id, buf, secret); 867 - kfree(buf); 868 - return rc; 869 - } 870 - EXPORT_SYMBOL_GPL(uv_get_secret_metadata); 833 + EXPORT_SYMBOL_GPL(uv_find_secret); 871 834 872 835 /** 873 836 * uv_retrieve_secret() - get the secret value for the secret index.
-47
arch/s390/lib/string.c
··· 78 78 #endif 79 79 80 80 /** 81 - * strcpy - Copy a %NUL terminated string 82 - * @dest: Where to copy the string to 83 - * @src: Where to copy the string from 84 - * 85 - * returns a pointer to @dest 86 - */ 87 - #ifdef __HAVE_ARCH_STRCPY 88 - char *strcpy(char *dest, const char *src) 89 - { 90 - char *ret = dest; 91 - 92 - asm volatile( 93 - " lghi 0,0\n" 94 - "0: mvst %[dest],%[src]\n" 95 - " jo 0b\n" 96 - : [dest] "+&a" (dest), [src] "+&a" (src) 97 - : 98 - : "cc", "memory", "0"); 99 - return ret; 100 - } 101 - EXPORT_SYMBOL(strcpy); 102 - #endif 103 - 104 - /** 105 - * strncpy - Copy a length-limited, %NUL-terminated string 106 - * @dest: Where to copy the string to 107 - * @src: Where to copy the string from 108 - * @n: The maximum number of bytes to copy 109 - * 110 - * The result is not %NUL-terminated if the source exceeds 111 - * @n bytes. 112 - */ 113 - #ifdef __HAVE_ARCH_STRNCPY 114 - char *strncpy(char *dest, const char *src, size_t n) 115 - { 116 - size_t len = __strnend(src, n) - src; 117 - memset(dest + len, 0, n - len); 118 - memcpy(dest, src, len); 119 - return dest; 120 - } 121 - EXPORT_SYMBOL(strncpy); 122 - #endif 123 - 124 - /** 125 81 * strcat - Append one %NUL-terminated string to another 126 82 * @dest: The string to be appended to 127 83 * @src: The string to append to it ··· 137 181 * @n: The maximum numbers of bytes to copy 138 182 * 139 183 * returns a pointer to @dest 140 - * 141 - * Note that in contrast to strncpy, strncat ensures the result is 142 - * terminated. 143 184 */ 144 185 #ifdef __HAVE_ARCH_STRNCAT 145 186 char *strncat(char *dest, const char *src, size_t n)
+3 -2
arch/s390/lib/uaccess.c
··· 17 17 #ifdef CONFIG_DEBUG_ENTRY 18 18 void debug_user_asce(int exit) 19 19 { 20 + struct lowcore *lc = get_lowcore(); 20 21 struct ctlreg cr1, cr7; 21 22 22 23 local_ctl_store(1, &cr1); 23 24 local_ctl_store(7, &cr7); 24 - if (cr1.val == get_lowcore()->kernel_asce.val && cr7.val == get_lowcore()->user_asce.val) 25 + if (cr1.val == lc->user_asce.val && cr7.val == lc->user_asce.val) 25 26 return; 26 27 panic("incorrect ASCE on kernel %s\n" 27 28 "cr1: %016lx cr7: %016lx\n" 28 29 "kernel: %016lx user: %016lx\n", 29 30 exit ? "exit" : "entry", cr1.val, cr7.val, 30 - get_lowcore()->kernel_asce.val, get_lowcore()->user_asce.val); 31 + lc->kernel_asce.val, lc->user_asce.val); 31 32 } 32 33 #endif /*CONFIG_DEBUG_ENTRY */ 33 34
+16 -2
arch/s390/mm/extmem.c
··· 530 530 return rc; 531 531 } 532 532 533 + static void __dcss_diag_purge_on_cpu_0(void *data) 534 + { 535 + struct dcss_segment *seg = (struct dcss_segment *)data; 536 + unsigned long dummy; 537 + 538 + dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); 539 + } 540 + 533 541 /* 534 542 * Decrease the use count of a DCSS segment and remove 535 543 * it from the address space if nobody is using it ··· 546 538 void 547 539 segment_unload(char *name) 548 540 { 549 - unsigned long dummy; 550 541 struct dcss_segment *seg; 551 542 552 543 if (!machine_is_vm()) ··· 563 556 kfree(seg->res); 564 557 vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); 565 558 list_del(&seg->list); 566 - dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); 559 + /* 560 + * Workaround for z/VM issue, where calling the DCSS unload diag on 561 + * a non-IPL CPU would cause bogus sclp maximum memory detection on 562 + * next IPL. 563 + * IPL CPU 0 cannot be set offline, so the dcss_diag() call can 564 + * directly be scheduled to that CPU. 565 + */ 566 + smp_call_function_single(0, __dcss_diag_purge_on_cpu_0, seg, 1); 567 567 kfree(seg); 568 568 out_unlock: 569 569 mutex_unlock(&dcss_lock);
+8 -9
arch/s390/mm/pgalloc.c
··· 38 38 static void __crst_table_upgrade(void *arg) 39 39 { 40 40 struct mm_struct *mm = arg; 41 + struct ctlreg asce; 41 42 42 43 /* change all active ASCEs to avoid the creation of new TLBs */ 43 44 if (current->active_mm == mm) { 44 - get_lowcore()->user_asce.val = mm->context.asce; 45 - local_ctl_load(7, &get_lowcore()->user_asce); 45 + asce.val = mm->context.asce; 46 + get_lowcore()->user_asce = asce; 47 + local_ctl_load(7, &asce); 48 + if (!test_thread_flag(TIF_ASCE_PRIMARY)) 49 + local_ctl_load(1, &asce); 46 50 } 47 51 __tlb_flush_local(); 48 52 } ··· 55 51 { 56 52 unsigned long *pgd = NULL, *p4d = NULL, *__pgd; 57 53 unsigned long asce_limit = mm->context.asce_limit; 54 + 55 + mmap_assert_write_locked(mm); 58 56 59 57 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ 60 58 VM_BUG_ON(asce_limit < _REGION2_SIZE); ··· 80 74 } 81 75 82 76 spin_lock_bh(&mm->page_table_lock); 83 - 84 - /* 85 - * This routine gets called with mmap_lock lock held and there is 86 - * no reason to optimize for the case of otherwise. However, if 87 - * that would ever change, the below check will let us know. 88 - */ 89 - VM_BUG_ON(asce_limit != mm->context.asce_limit); 90 77 91 78 if (p4d) { 92 79 __pgd = (unsigned long *) mm->pgd;
+29 -16
arch/s390/pci/pci.c
··· 45 45 /* list of all detected zpci devices */ 46 46 static LIST_HEAD(zpci_list); 47 47 static DEFINE_SPINLOCK(zpci_list_lock); 48 + static DEFINE_MUTEX(zpci_add_remove_lock); 48 49 49 50 static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE); 50 51 static DEFINE_SPINLOCK(zpci_domain_lock); ··· 70 69 EXPORT_SYMBOL_GPL(zpci_aipb); 71 70 struct airq_iv *zpci_aif_sbv; 72 71 EXPORT_SYMBOL_GPL(zpci_aif_sbv); 72 + 73 + void zpci_zdev_put(struct zpci_dev *zdev) 74 + { 75 + if (!zdev) 76 + return; 77 + mutex_lock(&zpci_add_remove_lock); 78 + kref_put_lock(&zdev->kref, zpci_release_device, &zpci_list_lock); 79 + mutex_unlock(&zpci_add_remove_lock); 80 + } 73 81 74 82 struct zpci_dev *get_zdev_by_fid(u32 fid) 75 83 { ··· 847 837 { 848 838 int rc; 849 839 840 + mutex_lock(&zpci_add_remove_lock); 850 841 zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state); 851 842 rc = zpci_init_iommu(zdev); 852 843 if (rc) ··· 861 850 spin_lock(&zpci_list_lock); 862 851 list_add_tail(&zdev->entry, &zpci_list); 863 852 spin_unlock(&zpci_list_lock); 853 + mutex_unlock(&zpci_add_remove_lock); 864 854 return 0; 865 855 866 856 error_destroy_iommu: 867 857 zpci_destroy_iommu(zdev); 868 858 error: 869 859 zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc); 860 + mutex_unlock(&zpci_add_remove_lock); 870 861 return rc; 871 862 } 872 863 ··· 938 925 * @zdev: the zpci_dev that was reserved 939 926 * 940 927 * Handle the case that a given zPCI function was reserved by another system. 941 - * After a call to this function the zpci_dev can not be found via 942 - * get_zdev_by_fid() anymore but may still be accessible via existing 943 - * references though it will not be functional anymore. 944 928 */ 945 929 void zpci_device_reserved(struct zpci_dev *zdev) 946 930 { 947 - /* 948 - * Remove device from zpci_list as it is going away. This also 949 - * makes sure we ignore subsequent zPCI events for this device. 950 - */ 951 - spin_lock(&zpci_list_lock); 952 - list_del(&zdev->entry); 953 - spin_unlock(&zpci_list_lock); 931 + lockdep_assert_held(&zdev->state_lock); 932 + /* We may declare the device reserved multiple times */ 933 + if (zdev->state == ZPCI_FN_STATE_RESERVED) 934 + return; 954 935 zdev->state = ZPCI_FN_STATE_RESERVED; 955 936 zpci_dbg(3, "rsv fid:%x\n", zdev->fid); 937 + /* 938 + * The underlying device is gone. Allow the zdev to be freed 939 + * as soon as all other references are gone by accounting for 940 + * the removal as a dropped reference. 941 + */ 956 942 zpci_zdev_put(zdev); 957 943 } 958 944 ··· 959 947 { 960 948 struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref); 961 949 950 + lockdep_assert_held(&zpci_add_remove_lock); 962 951 WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED); 963 - 964 - if (zdev->zbus->bus) 965 - zpci_bus_remove_device(zdev, false); 966 - 967 - if (zdev_enabled(zdev)) 968 - zpci_disable_device(zdev); 952 + /* 953 + * We already hold zpci_list_lock thanks to kref_put_lock(). 954 + * This makes sure no new reference can be taken from the list. 955 + */ 956 + list_del(&zdev->entry); 957 + spin_unlock(&zpci_list_lock); 969 958 970 959 if (zdev->has_hp_slot) 971 960 zpci_exit_slot(zdev);
+2 -5
arch/s390/pci/pci_bus.h
··· 21 21 void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error); 22 22 23 23 void zpci_release_device(struct kref *kref); 24 - static inline void zpci_zdev_put(struct zpci_dev *zdev) 25 - { 26 - if (zdev) 27 - kref_put(&zdev->kref, zpci_release_device); 28 - } 24 + 25 + void zpci_zdev_put(struct zpci_dev *zdev); 29 26 30 27 static inline void zpci_zdev_get(struct zpci_dev *zdev) 31 28 {
+21 -1
arch/s390/pci/pci_event.c
··· 335 335 zdev->state = ZPCI_FN_STATE_STANDBY; 336 336 } 337 337 338 + static void zpci_event_reappear(struct zpci_dev *zdev) 339 + { 340 + lockdep_assert_held(&zdev->state_lock); 341 + /* 342 + * The zdev is in the reserved state. This means that it was presumed to 343 + * go away but there are still undropped references. Now, the platform 344 + * announced its availability again. Bring back the lingering zdev 345 + * to standby. This is safe because we hold a temporary reference 346 + * now so that it won't go away. Account for the re-appearance of the 347 + * underlying device by incrementing the reference count. 348 + */ 349 + zdev->state = ZPCI_FN_STATE_STANDBY; 350 + zpci_zdev_get(zdev); 351 + zpci_dbg(1, "rea fid:%x, fh:%x\n", zdev->fid, zdev->fh); 352 + } 353 + 338 354 static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) 339 355 { 340 356 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); ··· 374 358 break; 375 359 } 376 360 } else { 361 + if (zdev->state == ZPCI_FN_STATE_RESERVED) 362 + zpci_event_reappear(zdev); 377 363 /* the configuration request may be stale */ 378 - if (zdev->state != ZPCI_FN_STATE_STANDBY) 364 + else if (zdev->state != ZPCI_FN_STATE_STANDBY) 379 365 break; 380 366 zdev->state = ZPCI_FN_STATE_CONFIGURED; 381 367 } ··· 393 375 break; 394 376 } 395 377 } else { 378 + if (zdev->state == ZPCI_FN_STATE_RESERVED) 379 + zpci_event_reappear(zdev); 396 380 zpci_update_fh(zdev, ccdf->fh); 397 381 } 398 382 break;
+10 -2
arch/s390/pci/pci_mmio.c
··· 32 32 u64 len, u8 *status) 33 33 { 34 34 int cc, exception; 35 + bool sacf_flag; 35 36 36 37 exception = 1; 38 + sacf_flag = enable_sacf_uaccess(); 37 39 asm_inline volatile ( 38 40 " sacf 256\n" 39 41 "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n" ··· 46 44 : CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception) 47 45 : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src)) 48 46 : CC_CLOBBER_LIST("memory")); 47 + disable_sacf_uaccess(sacf_flag); 49 48 *status = len >> 24 & 0xff; 50 49 return exception ? -ENXIO : CC_TRANSFORM(cc); 51 50 } ··· 57 54 { 58 55 union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen}; 59 56 int cc, exception; 57 + bool sacf_flag; 60 58 u64 val = 0; 61 59 u64 cnt = ulen; 62 60 u8 tmp; ··· 68 64 * address space. pcistg then uses the user mappings. 69 65 */ 70 66 exception = 1; 67 + sacf_flag = enable_sacf_uaccess(); 71 68 asm_inline volatile ( 72 69 " sacf 256\n" 73 70 "0: llgc %[tmp],0(%[src])\n" ··· 86 81 CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair) 87 82 : 88 83 : CC_CLOBBER_LIST("memory")); 84 + disable_sacf_uaccess(sacf_flag); 89 85 *status = ioaddr_len.odd >> 24 & 0xff; 90 86 91 87 cc = exception ? -ENXIO : CC_TRANSFORM(cc); ··· 210 204 u64 ulen, u8 *status) 211 205 { 212 206 union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen}; 207 + bool sacf_flag; 213 208 u64 cnt = ulen; 214 209 int shift = ulen * 8; 215 210 int cc, exception; ··· 222 215 * user address @dst 223 216 */ 224 217 exception = 1; 218 + sacf_flag = enable_sacf_uaccess(); 225 219 asm_inline volatile ( 226 220 " sacf 256\n" 227 221 "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n" ··· 244 236 : [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception), 245 237 CC_OUT(cc, cc), [val] "=d" (val), 246 238 [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp), 247 - [shift] "+d" (shift) 239 + [shift] "+a" (shift) 248 240 : 249 241 : CC_CLOBBER_LIST("memory")); 250 - 242 + disable_sacf_uaccess(sacf_flag); 251 243 cc = exception ? -ENXIO : CC_TRANSFORM(cc); 252 244 /* did we write everything to the user space buffer? */ 253 245 if (!cc && cnt != 0)
+1 -1
drivers/pci/hotplug/s390_pci_hpc.c
··· 65 65 66 66 rc = zpci_deconfigure_device(zdev); 67 67 out: 68 - mutex_unlock(&zdev->state_lock); 69 68 if (pdev) 70 69 pci_dev_put(pdev); 70 + mutex_unlock(&zdev->state_lock); 71 71 return rc; 72 72 } 73 73
+2 -2
drivers/s390/block/dcssblk.c
··· 314 314 if (*seg_info == NULL) 315 315 return -ENOMEM; 316 316 317 - strcpy((*seg_info)->segment_name, name); 317 + strscpy((*seg_info)->segment_name, name); 318 318 319 319 /* load the segment */ 320 320 rc = segment_load(name, SEGMENT_SHARED, ··· 612 612 rc = -ENOMEM; 613 613 goto out; 614 614 } 615 - strcpy(dev_info->segment_name, local_buf); 615 + strscpy(dev_info->segment_name, local_buf); 616 616 dev_info->segment_type = seg_info->segment_type; 617 617 INIT_LIST_HEAD(&dev_info->seg_list); 618 618 }
+12 -5
drivers/s390/char/con3270.c
··· 102 102 103 103 /* Input stuff. */ 104 104 char *prompt; /* Output string for input area. */ 105 + size_t prompt_sz; /* Size of output string. */ 105 106 char *input; /* Input string for read request. */ 106 107 struct raw3270_request *read; /* Single read request. */ 107 108 struct raw3270_request *kreset; /* Single keyboard reset request. */ ··· 207 206 208 207 static void tty3270_update_prompt(struct tty3270 *tp, char *input) 209 208 { 210 - strcpy(tp->prompt, input); 209 + strscpy(tp->prompt, input, tp->prompt_sz); 211 210 tp->update_flags |= TTY_UPDATE_INPUT; 212 211 tty3270_set_timer(tp, 1); 213 212 } ··· 972 971 char *old_input, *new_input; 973 972 struct tty_struct *tty; 974 973 struct winsize ws; 974 + size_t prompt_sz; 975 975 int new_allocated, old_allocated = tp->allocated_lines; 976 976 977 977 if (old_model == new_model && ··· 984 982 return; 985 983 } 986 984 987 - new_input = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL | GFP_DMA); 985 + prompt_sz = tty3270_input_size(new_cols); 986 + new_input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); 988 987 if (!new_input) 989 988 return; 990 - new_prompt = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL); 989 + new_prompt = kzalloc(prompt_sz, GFP_KERNEL); 991 990 if (!new_prompt) 992 991 goto out_input; 993 992 screen = tty3270_alloc_screen(tp, new_rows, new_cols, &new_allocated); ··· 1013 1010 old_rcl_lines = tp->rcl_lines; 1014 1011 tp->input = new_input; 1015 1012 tp->prompt = new_prompt; 1013 + tp->prompt_sz = prompt_sz; 1016 1014 tp->rcl_lines = new_rcl_lines; 1017 1015 tp->rcl_read_index = 0; 1018 1016 tp->rcl_write_index = 0; ··· 1100 1096 tty3270_create_view(int index, struct tty3270 **newtp) 1101 1097 { 1102 1098 struct tty3270 *tp; 1099 + size_t prompt_sz; 1103 1100 int rc; 1104 1101 1105 1102 if (tty3270_max_index < index + 1) ··· 1130 1125 goto out_free_screen; 1131 1126 } 1132 1127 1133 - tp->input = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL | GFP_DMA); 1128 + prompt_sz = tty3270_input_size(tp->view.cols); 1129 + tp->input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); 1134 1130 if (!tp->input) { 1135 1131 rc = -ENOMEM; 1136 1132 goto out_free_converted_line; 1137 1133 } 1138 1134 1139 - tp->prompt = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL); 1135 + tp->prompt = kzalloc(prompt_sz, GFP_KERNEL); 1140 1136 if (!tp->prompt) { 1141 1137 rc = -ENOMEM; 1142 1138 goto out_free_input; 1143 1139 } 1140 + tp->prompt_sz = prompt_sz; 1144 1141 1145 1142 tp->rcl_lines = tty3270_alloc_recall(tp->view.cols); 1146 1143 if (!tp->rcl_lines) {
+1 -1
drivers/s390/char/diag_ftp.c
··· 159 159 goto out; 160 160 } 161 161 162 - len = strscpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident)); 162 + len = strscpy(ldfpl->fident, ftp->fname); 163 163 if (len < 0) { 164 164 len = -EINVAL; 165 165 goto out_free;
+74
drivers/s390/crypto/ap_bus.c
··· 41 41 #include <linux/module.h> 42 42 #include <asm/uv.h> 43 43 #include <asm/chsc.h> 44 + #include <linux/mempool.h> 44 45 45 46 #include "ap_bus.h" 46 47 #include "ap_debug.h" ··· 102 101 * AP bus related debug feature things. 103 102 */ 104 103 debug_info_t *ap_dbf_info; 104 + 105 + /* 106 + * There is a need for a do-not-allocate-memory path through the AP bus 107 + * layer. The pkey layer may be triggered via the in-kernel interface from 108 + * a protected key crypto algorithm (namely PAES) to convert a secure key 109 + * into a protected key. This happens in a workqueue context, so sleeping 110 + * is allowed but memory allocations causing IO operations are not permitted. 111 + * To accomplish this, an AP message memory pool with pre-allocated space 112 + * is established. When ap_init_apmsg() with use_mempool set to true is 113 + * called, instead of kmalloc() the ap message buffer is allocated from 114 + * the ap_msg_pool. This pool only holds a limited amount of buffers: 115 + * ap_msg_pool_min_items with the item size AP_DEFAULT_MAX_MSG_SIZE and 116 + * exactly one of these items (if available) is returned if ap_init_apmsg() 117 + * with the use_mempool arg set to true is called. When this pool is exhausted 118 + * and use_mempool is set true, ap_init_apmsg() returns -ENOMEM without 119 + * any attempt to allocate memory and the caller has to deal with that. 120 + */ 121 + static mempool_t *ap_msg_pool; 122 + static unsigned int ap_msg_pool_min_items = 8; 123 + module_param_named(msgpool_min_items, ap_msg_pool_min_items, uint, 0440); 124 + MODULE_PARM_DESC(msgpool_min_items, "AP message pool minimal items"); 105 125 106 126 /* 107 127 * AP bus rescan related things. ··· 568 546 569 547 #define is_card_dev(x) ((x)->parent == ap_root_device) 570 548 #define is_queue_dev(x) ((x)->parent != ap_root_device) 549 + 550 + /* 551 + * ap_init_apmsg() - Initialize ap_message. 552 + */ 553 + int ap_init_apmsg(struct ap_message *ap_msg, u32 flags) 554 + { 555 + unsigned int maxmsgsize; 556 + 557 + memset(ap_msg, 0, sizeof(*ap_msg)); 558 + ap_msg->flags = flags; 559 + 560 + if (flags & AP_MSG_FLAG_MEMPOOL) { 561 + ap_msg->msg = mempool_alloc_preallocated(ap_msg_pool); 562 + if (!ap_msg->msg) 563 + return -ENOMEM; 564 + ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; 565 + return 0; 566 + } 567 + 568 + maxmsgsize = atomic_read(&ap_max_msg_size); 569 + ap_msg->msg = kmalloc(maxmsgsize, GFP_KERNEL); 570 + if (!ap_msg->msg) 571 + return -ENOMEM; 572 + ap_msg->bufsize = maxmsgsize; 573 + 574 + return 0; 575 + } 576 + EXPORT_SYMBOL(ap_init_apmsg); 577 + 578 + /* 579 + * ap_release_apmsg() - Release ap_message. 580 + */ 581 + void ap_release_apmsg(struct ap_message *ap_msg) 582 + { 583 + if (ap_msg->flags & AP_MSG_FLAG_MEMPOOL) { 584 + memzero_explicit(ap_msg->msg, ap_msg->bufsize); 585 + mempool_free(ap_msg->msg, ap_msg_pool); 586 + } else { 587 + kfree_sensitive(ap_msg->msg); 588 + } 589 + } 590 + EXPORT_SYMBOL(ap_release_apmsg); 571 591 572 592 /** 573 593 * ap_bus_match() ··· 2495 2431 /* init ap_queue hashtable */ 2496 2432 hash_init(ap_queues); 2497 2433 2434 + /* create ap msg buffer memory pool */ 2435 + ap_msg_pool = mempool_create_kmalloc_pool(ap_msg_pool_min_items, 2436 + AP_DEFAULT_MAX_MSG_SIZE); 2437 + if (!ap_msg_pool) { 2438 + rc = -ENOMEM; 2439 + goto out; 2440 + } 2441 + 2498 2442 /* set up the AP permissions (ioctls, ap and aq masks) */ 2499 2443 ap_perms_init(); 2500 2444 ··· 2549 2477 out_bus: 2550 2478 bus_unregister(&ap_bus_type); 2551 2479 out: 2480 + mempool_destroy(ap_msg_pool); 2552 2481 ap_debug_exit(); 2553 2482 return rc; 2554 2483 } ··· 2560 2487 ap_irq_exit(); 2561 2488 root_device_unregister(ap_root_device); 2562 2489 bus_unregister(&ap_bus_type); 2490 + mempool_destroy(ap_msg_pool); 2563 2491 ap_debug_exit(); 2564 2492 } 2565 2493
+9 -21
drivers/s390/crypto/ap_bus.h
··· 214 214 215 215 typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue); 216 216 217 + struct ap_response_type { 218 + struct completion work; 219 + int type; 220 + }; 221 + 217 222 struct ap_message { 218 223 struct list_head list; /* Request queueing. */ 219 224 unsigned long psmid; /* Message id. */ ··· 227 222 size_t bufsize; /* allocated msg buffer size */ 228 223 u16 flags; /* Flags, see AP_MSG_FLAG_xxx */ 229 224 int rc; /* Return code for this message */ 230 - void *private; /* ap driver private pointer. */ 225 + struct ap_response_type response; 231 226 /* receive is called from tasklet context */ 232 227 void (*receive)(struct ap_queue *, struct ap_message *, 233 228 struct ap_message *); ··· 236 231 #define AP_MSG_FLAG_SPECIAL 0x0001 /* flag msg as 'special' with NQAP */ 237 232 #define AP_MSG_FLAG_USAGE 0x0002 /* CCA, EP11: usage (no admin) msg */ 238 233 #define AP_MSG_FLAG_ADMIN 0x0004 /* CCA, EP11: admin (=control) msg */ 234 + #define AP_MSG_FLAG_MEMPOOL 0x0008 /* ap msg buffer allocated via mempool */ 239 235 240 - /** 241 - * ap_init_message() - Initialize ap_message. 242 - * Initialize a message before using. Otherwise this might result in 243 - * unexpected behaviour. 244 - */ 245 - static inline void ap_init_message(struct ap_message *ap_msg) 246 - { 247 - memset(ap_msg, 0, sizeof(*ap_msg)); 248 - } 249 - 250 - /** 251 - * ap_release_message() - Release ap_message. 252 - * Releases all memory used internal within the ap_message struct 253 - * Currently this is the message and private field. 254 - */ 255 - static inline void ap_release_message(struct ap_message *ap_msg) 256 - { 257 - kfree_sensitive(ap_msg->msg); 258 - kfree_sensitive(ap_msg->private); 259 - } 236 + int ap_init_apmsg(struct ap_message *ap_msg, u32 flags); 237 + void ap_release_apmsg(struct ap_message *ap_msg); 260 238 261 239 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event); 262 240 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
+26 -24
drivers/s390/crypto/pkey_api.c
··· 24 24 */ 25 25 static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 26 26 const u8 *key, size_t keylen, 27 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 27 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 28 + u32 xflags) 28 29 { 29 30 int rc; 30 31 ··· 33 32 rc = pkey_handler_key_to_protkey(apqns, nr_apqns, 34 33 key, keylen, 35 34 protkey, protkeylen, 36 - protkeytype); 35 + protkeytype, xflags); 37 36 38 37 /* if this did not work, try the slowpath way */ 39 38 if (rc == -ENODEV) { 40 39 rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns, 41 40 key, keylen, 42 41 protkey, protkeylen, 43 - protkeytype); 42 + protkeytype, xflags); 44 43 if (rc) 45 44 rc = -ENODEV; 46 45 } ··· 53 52 * In-Kernel function: Transform a key blob (of any type) into a protected key 54 53 */ 55 54 int pkey_key2protkey(const u8 *key, u32 keylen, 56 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 55 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) 57 56 { 58 57 int rc; 59 58 60 59 rc = key2protkey(NULL, 0, key, keylen, 61 - protkey, protkeylen, protkeytype); 60 + protkey, protkeylen, protkeytype, xflags); 62 61 if (rc == -ENODEV) { 63 62 pkey_handler_request_modules(); 64 63 rc = key2protkey(NULL, 0, key, keylen, 65 - protkey, protkeylen, protkeytype); 64 + protkey, protkeylen, protkeytype, xflags); 66 65 } 67 66 68 67 return rc; ··· 104 103 keybuflen = sizeof(kgs.seckey.seckey); 105 104 rc = pkey_handler_gen_key(&apqn, 1, 106 105 kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, 107 - kgs.seckey.seckey, &keybuflen, NULL); 106 + kgs.seckey.seckey, &keybuflen, NULL, 0); 108 107 pr_debug("gen_key()=%d\n", rc); 109 108 if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs))) 110 109 rc = -EFAULT; ··· 130 129 kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, 131 130 kcs.clrkey.clrkey, 132 131 pkey_keytype_aes_to_size(kcs.keytype), 133 - kcs.seckey.seckey, &keybuflen, NULL); 132 + kcs.seckey.seckey, &keybuflen, NULL, 0); 134 133 pr_debug("clr_to_key()=%d\n", rc); 135 134 if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs))) 136 135 rc = -EFAULT; ··· 155 154 ksp.seckey.seckey, 156 155 sizeof(ksp.seckey.seckey), 157 156 ksp.protkey.protkey, 158 - &ksp.protkey.len, &ksp.protkey.type); 157 + &ksp.protkey.len, &ksp.protkey.type, 158 + 0); 159 159 pr_debug("key_to_protkey()=%d\n", rc); 160 160 if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) 161 161 rc = -EFAULT; ··· 200 198 rc = key2protkey(NULL, 0, 201 199 tmpbuf, sizeof(*t) + keylen, 202 200 kcp.protkey.protkey, 203 - &kcp.protkey.len, &kcp.protkey.type); 201 + &kcp.protkey.len, &kcp.protkey.type, 0); 204 202 pr_debug("key2protkey()=%d\n", rc); 205 203 206 204 kfree_sensitive(tmpbuf); ··· 230 228 rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, 231 229 sizeof(kfc.seckey.seckey), 232 230 PKEY_FLAGS_MATCH_CUR_MKVP, 233 - apqns, &nr_apqns); 231 + apqns, &nr_apqns, 0); 234 232 if (rc == -ENODEV) 235 233 rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, 236 234 sizeof(kfc.seckey.seckey), 237 235 PKEY_FLAGS_MATCH_ALT_MKVP, 238 - apqns, &nr_apqns); 236 + apqns, &nr_apqns, 0); 239 237 pr_debug("apqns_for_key()=%d\n", rc); 240 238 if (rc) { 241 239 kfree(apqns); ··· 264 262 sizeof(ksp.seckey.seckey), 265 263 ksp.protkey.protkey, 266 264 &ksp.protkey.len, 267 - &ksp.protkey.type); 265 + &ksp.protkey.type, 0); 268 266 pr_debug("key_to_protkey()=%d\n", rc); 269 267 if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) 270 268 rc = -EFAULT; ··· 287 285 rc = pkey_handler_verify_key(kvk.seckey.seckey, 288 286 sizeof(kvk.seckey.seckey), 289 287 &kvk.cardnr, &kvk.domain, 290 - &keytype, &keybitsize, &flags); 288 + &keytype, &keybitsize, &flags, 0); 291 289 pr_debug("verify_key()=%d\n", rc); 292 290 if (!rc && keytype != PKEY_TYPE_CCA_DATA) 293 291 rc = -EINVAL; ··· 314 312 rc = pkey_handler_gen_key(NULL, 0, kgp.keytype, 315 313 PKEY_TYPE_PROTKEY, 0, 0, 316 314 kgp.protkey.protkey, &kgp.protkey.len, 317 - &kgp.protkey.type); 315 + &kgp.protkey.type, 0); 318 316 pr_debug("gen_key()=%d\n", rc); 319 317 if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp))) 320 318 rc = -EFAULT; ··· 356 354 memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len); 357 355 358 356 rc = pkey_handler_verify_key(tmpbuf, sizeof(*t), 359 - NULL, NULL, NULL, NULL, NULL); 357 + NULL, NULL, NULL, NULL, NULL, 0); 360 358 pr_debug("verify_key()=%d\n", rc); 361 359 362 360 kfree_sensitive(tmpbuf); ··· 379 377 ktp.protkey.len = sizeof(ktp.protkey.protkey); 380 378 rc = key2protkey(NULL, 0, kkey, ktp.keylen, 381 379 ktp.protkey.protkey, &ktp.protkey.len, 382 - &ktp.protkey.type); 380 + &ktp.protkey.type, 0); 383 381 pr_debug("key2protkey()=%d\n", rc); 384 382 kfree_sensitive(kkey); 385 383 if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) ··· 416 414 } 417 415 rc = pkey_handler_gen_key(apqns, kgs.apqn_entries, 418 416 u, kgs.type, kgs.size, kgs.keygenflags, 419 - kkey, &klen, NULL); 417 + kkey, &klen, NULL, 0); 420 418 pr_debug("gen_key()=%d\n", rc); 421 419 kfree(apqns); 422 420 if (rc) { ··· 473 471 rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries, 474 472 u, kcs.type, kcs.size, kcs.keygenflags, 475 473 kcs.clrkey.clrkey, kcs.size / 8, 476 - kkey, &klen, NULL); 474 + kkey, &klen, NULL, 0); 477 475 pr_debug("clr_to_key()=%d\n", rc); 478 476 kfree(apqns); 479 477 if (rc) { ··· 516 514 517 515 rc = pkey_handler_verify_key(kkey, kvk.keylen, 518 516 &kvk.cardnr, &kvk.domain, 519 - &kvk.type, &kvk.size, &kvk.flags); 517 + &kvk.type, &kvk.size, &kvk.flags, 0); 520 518 pr_debug("verify_key()=%d\n", rc); 521 519 522 520 kfree_sensitive(kkey); ··· 546 544 ktp.protkey.len = sizeof(ktp.protkey.protkey); 547 545 rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, 548 546 ktp.protkey.protkey, &ktp.protkey.len, 549 - &ktp.protkey.type); 547 + &ktp.protkey.type, 0); 550 548 pr_debug("key2protkey()=%d\n", rc); 551 549 kfree(apqns); 552 550 kfree_sensitive(kkey); ··· 581 579 return PTR_ERR(kkey); 582 580 } 583 581 rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags, 584 - apqns, &nr_apqns); 582 + apqns, &nr_apqns, 0); 585 583 pr_debug("apqns_for_key()=%d\n", rc); 586 584 kfree_sensitive(kkey); 587 585 if (rc && rc != -ENOSPC) { ··· 628 626 } 629 627 rc = pkey_handler_apqns_for_keytype(kat.type, 630 628 kat.cur_mkvp, kat.alt_mkvp, 631 - kat.flags, apqns, &nr_apqns); 629 + kat.flags, apqns, &nr_apqns, 0); 632 630 pr_debug("apqns_for_keytype()=%d\n", rc); 633 631 if (rc && rc != -ENOSPC) { 634 632 kfree(apqns); ··· 680 678 return -ENOMEM; 681 679 } 682 680 rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, 683 - protkey, &protkeylen, &ktp.pkeytype); 681 + protkey, &protkeylen, &ktp.pkeytype, 0); 684 682 pr_debug("key2protkey()=%d\n", rc); 685 683 kfree(apqns); 686 684 kfree_sensitive(kkey);
+20 -14
drivers/s390/crypto/pkey_base.c
··· 150 150 151 151 int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 152 152 const u8 *key, u32 keylen, 153 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 153 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 154 + u32 xflags) 154 155 { 155 156 const struct pkey_handler *h; 156 157 int rc = -ENODEV; ··· 160 159 if (h && h->key_to_protkey) { 161 160 rc = h->key_to_protkey(apqns, nr_apqns, key, keylen, 162 161 protkey, protkeylen, 163 - protkeytype); 162 + protkeytype, xflags); 164 163 } 165 164 pkey_handler_put(h); 166 165 ··· 178 177 size_t nr_apqns, 179 178 const u8 *key, u32 keylen, 180 179 u8 *protkey, u32 *protkeylen, 181 - u32 *protkeytype) 180 + u32 *protkeytype, u32 xflags) 182 181 { 183 182 const struct pkey_handler *h, *htmp[10]; 184 183 int i, n = 0, rc = -ENODEV; ··· 200 199 rc = h->slowpath_key_to_protkey(apqns, nr_apqns, 201 200 key, keylen, 202 201 protkey, protkeylen, 203 - protkeytype); 202 + protkeytype, xflags); 204 203 module_put(h->module); 205 204 } 206 205 ··· 211 210 int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, 212 211 u32 keytype, u32 keysubtype, 213 212 u32 keybitsize, u32 flags, 214 - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) 213 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags) 215 214 { 216 215 const struct pkey_handler *h; 217 216 int rc = -ENODEV; ··· 220 219 if (h && h->gen_key) { 221 220 rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype, 222 221 keybitsize, flags, 223 - keybuf, keybuflen, keyinfo); 222 + keybuf, keybuflen, keyinfo, xflags); 224 223 } 225 224 pkey_handler_put(h); 226 225 ··· 232 231 u32 keytype, u32 keysubtype, 233 232 u32 keybitsize, u32 flags, 234 233 const u8 *clrkey, u32 clrkeylen, 235 - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) 234 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, 235 + u32 xflags) 236 236 { 237 237 const struct pkey_handler *h; 238 238 int rc = -ENODEV; ··· 242 240 if (h && h->clr_to_key) { 243 241 rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype, 244 242 keybitsize, flags, clrkey, clrkeylen, 245 - keybuf, keybuflen, keyinfo); 243 + keybuf, keybuflen, keyinfo, xflags); 246 244 } 247 245 pkey_handler_put(h); 248 246 ··· 252 250 253 251 int pkey_handler_verify_key(const u8 *key, u32 keylen, 254 252 u16 *card, u16 *dom, 255 - u32 *keytype, u32 *keybitsize, u32 *flags) 253 + u32 *keytype, u32 *keybitsize, u32 *flags, 254 + u32 xflags) 256 255 { 257 256 const struct pkey_handler *h; 258 257 int rc = -ENODEV; ··· 261 258 h = pkey_handler_get_keybased(key, keylen); 262 259 if (h && h->verify_key) { 263 260 rc = h->verify_key(key, keylen, card, dom, 264 - keytype, keybitsize, flags); 261 + keytype, keybitsize, flags, xflags); 265 262 } 266 263 pkey_handler_put(h); 267 264 ··· 270 267 EXPORT_SYMBOL(pkey_handler_verify_key); 271 268 272 269 int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, 273 - struct pkey_apqn *apqns, size_t *nr_apqns) 270 + struct pkey_apqn *apqns, size_t *nr_apqns, 271 + u32 xflags) 274 272 { 275 273 const struct pkey_handler *h; 276 274 int rc = -ENODEV; 277 275 278 276 h = pkey_handler_get_keybased(key, keylen); 279 277 if (h && h->apqns_for_key) 280 - rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns); 278 + rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns, 279 + xflags); 281 280 pkey_handler_put(h); 282 281 283 282 return rc; ··· 288 283 289 284 int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, 290 285 u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 291 - struct pkey_apqn *apqns, size_t *nr_apqns) 286 + struct pkey_apqn *apqns, size_t *nr_apqns, 287 + u32 xflags) 292 288 { 293 289 const struct pkey_handler *h; 294 290 int rc = -ENODEV; ··· 298 292 if (h && h->apqns_for_keytype) { 299 293 rc = h->apqns_for_keytype(keysubtype, 300 294 cur_mkvp, alt_mkvp, flags, 301 - apqns, nr_apqns); 295 + apqns, nr_apqns, xflags); 302 296 } 303 297 pkey_handler_put(h); 304 298
+23 -14
drivers/s390/crypto/pkey_base.h
··· 159 159 bool (*is_supported_keytype)(enum pkey_key_type); 160 160 int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, 161 161 const u8 *key, u32 keylen, 162 - u8 *protkey, u32 *protkeylen, u32 *protkeytype); 162 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 163 + u32 xflags); 163 164 int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns, 164 165 size_t nr_apqns, 165 166 const u8 *key, u32 keylen, 166 167 u8 *protkey, u32 *protkeylen, 167 - u32 *protkeytype); 168 + u32 *protkeytype, u32 xflags); 168 169 int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns, 169 170 u32 keytype, u32 keysubtype, 170 171 u32 keybitsize, u32 flags, 171 - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); 172 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); 172 173 int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns, 173 174 u32 keytype, u32 keysubtype, 174 175 u32 keybitsize, u32 flags, 175 176 const u8 *clrkey, u32 clrkeylen, 176 - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); 177 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); 177 178 int (*verify_key)(const u8 *key, u32 keylen, 178 179 u16 *card, u16 *dom, 179 - u32 *keytype, u32 *keybitsize, u32 *flags); 180 + u32 *keytype, u32 *keybitsize, u32 *flags, 181 + u32 xflags); 180 182 int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags, 181 - struct pkey_apqn *apqns, size_t *nr_apqns); 183 + struct pkey_apqn *apqns, size_t *nr_apqns, 184 + u32 xflags); 182 185 int (*apqns_for_keytype)(enum pkey_key_type ktype, 183 186 u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 184 - struct pkey_apqn *apqns, size_t *nr_apqns); 187 + struct pkey_apqn *apqns, size_t *nr_apqns, 188 + u32 xflags); 185 189 /* used internal by pkey base */ 186 190 struct list_head list; 187 191 }; ··· 203 199 204 200 int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 205 201 const u8 *key, u32 keylen, 206 - u8 *protkey, u32 *protkeylen, u32 *protkeytype); 202 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 203 + u32 xflags); 207 204 int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, 208 205 size_t nr_apqns, 209 206 const u8 *key, u32 keylen, 210 207 u8 *protkey, u32 *protkeylen, 211 - u32 *protkeytype); 208 + u32 *protkeytype, u32 xflags); 212 209 int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, 213 210 u32 keytype, u32 keysubtype, 214 211 u32 keybitsize, u32 flags, 215 - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); 212 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); 216 213 int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, 217 214 u32 keytype, u32 keysubtype, 218 215 u32 keybitsize, u32 flags, 219 216 const u8 *clrkey, u32 clrkeylen, 220 - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); 217 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, 218 + u32 xflags); 221 219 int pkey_handler_verify_key(const u8 *key, u32 keylen, 222 220 u16 *card, u16 *dom, 223 - u32 *keytype, u32 *keybitsize, u32 *flags); 221 + u32 *keytype, u32 *keybitsize, u32 *flags, 222 + u32 xflags); 224 223 int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, 225 - struct pkey_apqn *apqns, size_t *nr_apqns); 224 + struct pkey_apqn *apqns, size_t *nr_apqns, 225 + u32 xflags); 226 226 int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype, 227 227 u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 228 - struct pkey_apqn *apqns, size_t *nr_apqns); 228 + struct pkey_apqn *apqns, size_t *nr_apqns, 229 + u32 xflags); 229 230 230 231 /* 231 232 * Unconditional try to load all handler modules
+67 -69
drivers/s390/crypto/pkey_cca.c
··· 70 70 } 71 71 72 72 static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, 73 - struct pkey_apqn *apqns, size_t *nr_apqns) 73 + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) 74 74 { 75 75 struct keytoken_header *hdr = (struct keytoken_header *)key; 76 - u32 _nr_apqns, *_apqns = NULL; 76 + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); 77 + u32 xflags; 77 78 int rc; 79 + 80 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 78 81 79 82 if (!flags) 80 83 flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP; ··· 110 107 /* unknown CCA internal token type */ 111 108 return -EINVAL; 112 109 } 113 - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 110 + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 114 111 minhwtype, AES_MK_SET, 115 - cur_mkvp, old_mkvp, 1); 112 + cur_mkvp, old_mkvp, xflags); 116 113 if (rc) 117 114 goto out; 118 115 ··· 129 126 /* unknown CCA internal 2 token type */ 130 127 return -EINVAL; 131 128 } 132 - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 129 + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 133 130 ZCRYPT_CEX7, APKA_MK_SET, 134 - cur_mkvp, old_mkvp, 1); 131 + cur_mkvp, old_mkvp, xflags); 135 132 if (rc) 136 133 goto out; 137 134 ··· 150 147 *nr_apqns = _nr_apqns; 151 148 152 149 out: 153 - kfree(_apqns); 154 150 pr_debug("rc=%d\n", rc); 155 151 return rc; 156 152 } 157 153 158 154 static int cca_apqns4type(enum pkey_key_type ktype, 159 155 u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 160 - struct pkey_apqn *apqns, size_t *nr_apqns) 156 + struct pkey_apqn *apqns, size_t *nr_apqns, 157 + u32 pflags) 161 158 { 162 - u32 _nr_apqns, *_apqns = NULL; 159 + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); 160 + u32 xflags; 163 161 int rc; 162 + 163 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 164 164 165 165 zcrypt_wait_api_operational(); 166 166 ··· 177 171 old_mkvp = *((u64 *)alt_mkvp); 178 172 if (ktype == PKEY_TYPE_CCA_CIPHER) 179 173 minhwtype = ZCRYPT_CEX6; 180 - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 174 + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 181 175 minhwtype, AES_MK_SET, 182 - cur_mkvp, old_mkvp, 1); 176 + cur_mkvp, old_mkvp, xflags); 183 177 if (rc) 184 178 goto out; 185 179 ··· 190 184 cur_mkvp = *((u64 *)cur_mkvp); 191 185 if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 192 186 old_mkvp = *((u64 *)alt_mkvp); 193 - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 187 + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 194 188 ZCRYPT_CEX7, APKA_MK_SET, 195 - cur_mkvp, old_mkvp, 1); 189 + cur_mkvp, old_mkvp, xflags); 196 190 if (rc) 197 191 goto out; 198 192 ··· 211 205 *nr_apqns = _nr_apqns; 212 206 213 207 out: 214 - kfree(_apqns); 215 208 pr_debug("rc=%d\n", rc); 216 209 return rc; 217 210 } 218 211 219 212 static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 220 213 const u8 *key, u32 keylen, 221 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 214 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 215 + u32 pflags) 222 216 { 223 217 struct keytoken_header *hdr = (struct keytoken_header *)key; 224 - struct pkey_apqn *local_apqns = NULL; 218 + struct pkey_apqn _apqns[MAXAPQNSINLIST]; 219 + u32 xflags; 225 220 int i, rc; 221 + 222 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 226 223 227 224 if (keylen < sizeof(*hdr)) 228 225 return -EINVAL; ··· 262 253 if (!apqns || (nr_apqns == 1 && 263 254 apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 264 255 nr_apqns = MAXAPQNSINLIST; 265 - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 266 - GFP_KERNEL); 267 - if (!local_apqns) 268 - return -ENOMEM; 269 - rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); 256 + rc = cca_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); 270 257 if (rc) 271 258 goto out; 272 - apqns = local_apqns; 259 + apqns = _apqns; 273 260 } 274 261 275 262 for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { ··· 273 268 hdr->version == TOKVER_CCA_AES) { 274 269 rc = cca_sec2protkey(apqns[i].card, apqns[i].domain, 275 270 key, protkey, 276 - protkeylen, protkeytype); 271 + protkeylen, protkeytype, xflags); 277 272 } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 278 273 hdr->version == TOKVER_CCA_VLSC) { 279 274 rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain, 280 275 key, protkey, 281 - protkeylen, protkeytype); 276 + protkeylen, protkeytype, xflags); 282 277 } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { 283 278 rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain, 284 279 key, protkey, 285 - protkeylen, protkeytype); 280 + protkeylen, protkeytype, xflags); 286 281 } else { 287 282 rc = -EINVAL; 288 283 break; ··· 290 285 } 291 286 292 287 out: 293 - kfree(local_apqns); 294 288 pr_debug("rc=%d\n", rc); 295 289 return rc; 296 290 } ··· 306 302 static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, 307 303 u32 keytype, u32 subtype, 308 304 u32 keybitsize, u32 flags, 309 - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) 305 + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) 310 306 { 311 - struct pkey_apqn *local_apqns = NULL; 307 + struct pkey_apqn _apqns[MAXAPQNSINLIST]; 312 308 int i, len, rc; 309 + u32 xflags; 310 + 311 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 313 312 314 313 /* check keytype, subtype, keybitsize */ 315 314 switch (keytype) { ··· 347 340 if (!apqns || (nr_apqns == 1 && 348 341 apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 349 342 nr_apqns = MAXAPQNSINLIST; 350 - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 351 - GFP_KERNEL); 352 - if (!local_apqns) 353 - return -ENOMEM; 354 343 rc = cca_apqns4type(subtype, NULL, NULL, 0, 355 - local_apqns, &nr_apqns); 344 + _apqns, &nr_apqns, pflags); 356 345 if (rc) 357 346 goto out; 358 - apqns = local_apqns; 347 + apqns = _apqns; 359 348 } 360 349 361 350 for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 362 351 if (subtype == PKEY_TYPE_CCA_CIPHER) { 363 352 rc = cca_gencipherkey(apqns[i].card, apqns[i].domain, 364 353 keybitsize, flags, 365 - keybuf, keybuflen); 354 + keybuf, keybuflen, xflags); 366 355 } else { 367 356 /* PKEY_TYPE_CCA_DATA */ 368 357 rc = cca_genseckey(apqns[i].card, apqns[i].domain, 369 - keybitsize, keybuf); 358 + keybitsize, keybuf, xflags); 370 359 *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); 371 360 } 372 361 } 373 362 374 363 out: 375 - kfree(local_apqns); 376 364 pr_debug("rc=%d\n", rc); 377 365 return rc; 378 366 } ··· 385 383 u32 keytype, u32 subtype, 386 384 u32 keybitsize, u32 flags, 387 385 const u8 *clrkey, u32 clrkeylen, 388 - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) 386 + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) 389 387 { 390 - struct pkey_apqn *local_apqns = NULL; 388 + struct pkey_apqn _apqns[MAXAPQNSINLIST]; 391 389 int i, len, rc; 390 + u32 xflags; 391 + 392 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 392 393 393 394 /* check keytype, subtype, clrkeylen, keybitsize */ 394 395 switch (keytype) { ··· 431 426 if (!apqns || (nr_apqns == 1 && 432 427 apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 433 428 nr_apqns = MAXAPQNSINLIST; 434 - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 435 - GFP_KERNEL); 436 - if (!local_apqns) 437 - return -ENOMEM; 438 429 rc = cca_apqns4type(subtype, NULL, NULL, 0, 439 - local_apqns, &nr_apqns); 430 + _apqns, &nr_apqns, pflags); 440 431 if (rc) 441 432 goto out; 442 - apqns = local_apqns; 433 + apqns = _apqns; 443 434 } 444 435 445 436 for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 446 437 if (subtype == PKEY_TYPE_CCA_CIPHER) { 447 438 rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain, 448 439 keybitsize, flags, clrkey, 449 - keybuf, keybuflen); 440 + keybuf, keybuflen, xflags); 450 441 } else { 451 442 /* PKEY_TYPE_CCA_DATA */ 452 443 rc = cca_clr2seckey(apqns[i].card, apqns[i].domain, 453 - keybitsize, clrkey, keybuf); 444 + keybitsize, clrkey, keybuf, xflags); 454 445 *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); 455 446 } 456 447 } 457 448 458 449 out: 459 - kfree(local_apqns); 460 450 pr_debug("rc=%d\n", rc); 461 451 return rc; 462 452 } 463 453 464 454 static int cca_verifykey(const u8 *key, u32 keylen, 465 455 u16 *card, u16 *dom, 466 - u32 *keytype, u32 *keybitsize, u32 *flags) 456 + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) 467 457 { 468 458 struct keytoken_header *hdr = (struct keytoken_header *)key; 469 - u32 nr_apqns, *apqns = NULL; 459 + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); 460 + u32 xflags; 470 461 int rc; 462 + 463 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 471 464 472 465 if (keylen < sizeof(*hdr)) 473 466 return -EINVAL; ··· 481 478 goto out; 482 479 *keytype = PKEY_TYPE_CCA_DATA; 483 480 *keybitsize = t->bitsize; 484 - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, 481 + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, 485 482 ZCRYPT_CEX3C, AES_MK_SET, 486 - t->mkvp, 0, 1); 483 + t->mkvp, 0, xflags); 487 484 if (!rc) 488 485 *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 489 486 if (rc == -ENODEV) { 490 - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, 487 + nr_apqns = ARRAY_SIZE(apqns); 488 + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, 491 489 ZCRYPT_CEX3C, AES_MK_SET, 492 - 0, t->mkvp, 1); 490 + 0, t->mkvp, xflags); 493 491 if (!rc) 494 492 *flags = PKEY_FLAGS_MATCH_ALT_MKVP; 495 493 } ··· 515 511 *keybitsize = PKEY_SIZE_AES_192; 516 512 else if (!t->plfver && t->wpllen == 640) 517 513 *keybitsize = PKEY_SIZE_AES_256; 518 - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, 514 + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, 519 515 ZCRYPT_CEX6, AES_MK_SET, 520 - t->mkvp0, 0, 1); 516 + t->mkvp0, 0, xflags); 521 517 if (!rc) 522 518 *flags = PKEY_FLAGS_MATCH_CUR_MKVP; 523 519 if (rc == -ENODEV) { 524 - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, 520 + nr_apqns = ARRAY_SIZE(apqns); 521 + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, 525 522 ZCRYPT_CEX6, AES_MK_SET, 526 - 0, t->mkvp0, 1); 523 + 0, t->mkvp0, xflags); 527 524 if (!rc) 528 525 *flags = PKEY_FLAGS_MATCH_ALT_MKVP; 529 526 } ··· 540 535 } 541 536 542 537 out: 543 - kfree(apqns); 544 538 pr_debug("rc=%d\n", rc); 545 539 return rc; 546 540 } ··· 555 551 size_t nr_apqns, 556 552 const u8 *key, u32 keylen, 557 553 u8 *protkey, u32 *protkeylen, 558 - u32 *protkeytype) 554 + u32 *protkeytype, u32 pflags) 559 555 { 560 556 const struct keytoken_header *hdr = (const struct keytoken_header *)key; 561 557 const struct clearkeytoken *t = (const struct clearkeytoken *)key; 558 + u8 tmpbuf[SECKEYBLOBSIZE]; /* 64 bytes */ 562 559 u32 tmplen, keysize = 0; 563 - u8 *tmpbuf; 564 560 int i, rc; 565 561 566 562 if (keylen < sizeof(*hdr)) ··· 572 568 if (!keysize || t->len != keysize) 573 569 return -EINVAL; 574 570 575 - /* alloc tmp key buffer */ 576 - tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC); 577 - if (!tmpbuf) 578 - return -ENOMEM; 579 - 580 571 /* try two times in case of failure */ 581 572 for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { 582 573 tmplen = SECKEYBLOBSIZE; 583 574 rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA, 584 575 8 * keysize, 0, t->clearkey, t->len, 585 - tmpbuf, &tmplen, NULL); 576 + tmpbuf, &tmplen, NULL, pflags); 586 577 pr_debug("cca_clr2key()=%d\n", rc); 587 578 if (rc) 588 579 continue; 589 580 rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen, 590 - protkey, protkeylen, protkeytype); 581 + protkey, protkeylen, protkeytype, pflags); 591 582 pr_debug("cca_key2protkey()=%d\n", rc); 592 583 } 593 584 594 - kfree(tmpbuf); 595 585 pr_debug("rc=%d\n", rc); 596 586 return rc; 597 587 }
+56 -61
drivers/s390/crypto/pkey_ep11.c
··· 70 70 } 71 71 72 72 static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, 73 - struct pkey_apqn *apqns, size_t *nr_apqns) 73 + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) 74 74 { 75 75 struct keytoken_header *hdr = (struct keytoken_header *)key; 76 - u32 _nr_apqns, *_apqns = NULL; 76 + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); 77 + u32 xflags; 77 78 int rc; 79 + 80 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 78 81 79 82 if (!flags) 80 83 flags = PKEY_FLAGS_MATCH_CUR_MKVP; ··· 101 98 minhwtype = ZCRYPT_CEX7; 102 99 api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 103 100 } 104 - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 105 - minhwtype, api, kb->wkvp); 101 + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 102 + minhwtype, api, kb->wkvp, xflags); 106 103 if (rc) 107 104 goto out; 108 105 ··· 118 115 minhwtype = ZCRYPT_CEX7; 119 116 api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 120 117 } 121 - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 122 - minhwtype, api, kb->wkvp); 118 + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 119 + minhwtype, api, kb->wkvp, xflags); 123 120 if (rc) 124 121 goto out; 125 122 ··· 138 135 *nr_apqns = _nr_apqns; 139 136 140 137 out: 141 - kfree(_apqns); 142 138 pr_debug("rc=%d\n", rc); 143 139 return rc; 144 140 } 145 141 146 142 static int ep11_apqns4type(enum pkey_key_type ktype, 147 143 u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, 148 - struct pkey_apqn *apqns, size_t *nr_apqns) 144 + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) 149 145 { 150 - u32 _nr_apqns, *_apqns = NULL; 146 + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); 147 + u32 xflags; 151 148 int rc; 149 + 150 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 152 151 153 152 zcrypt_wait_api_operational(); 154 153 ··· 163 158 if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 164 159 wkvp = cur_mkvp; 165 160 api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 166 - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 167 - ZCRYPT_CEX7, api, wkvp); 161 + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 162 + ZCRYPT_CEX7, api, wkvp, xflags); 168 163 if (rc) 169 164 goto out; 170 165 ··· 183 178 *nr_apqns = _nr_apqns; 184 179 185 180 out: 186 - kfree(_apqns); 187 181 pr_debug("rc=%d\n", rc); 188 182 return rc; 189 183 } 190 184 191 185 static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, 192 186 const u8 *key, u32 keylen, 193 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 187 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 188 + u32 pflags) 194 189 { 195 190 struct keytoken_header *hdr = (struct keytoken_header *)key; 196 - struct pkey_apqn *local_apqns = NULL; 191 + struct pkey_apqn _apqns[MAXAPQNSINLIST]; 192 + u32 xflags; 197 193 int i, rc; 194 + 195 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 198 196 199 197 if (keylen < sizeof(*hdr)) 200 198 return -EINVAL; ··· 233 225 if (!apqns || (nr_apqns == 1 && 234 226 apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 235 227 nr_apqns = MAXAPQNSINLIST; 236 - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 237 - GFP_KERNEL); 238 - if (!local_apqns) 239 - return -ENOMEM; 240 - rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); 228 + rc = ep11_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); 241 229 if (rc) 242 230 goto out; 243 - apqns = local_apqns; 231 + apqns = _apqns; 244 232 } 245 233 246 234 for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { ··· 245 241 is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 246 242 rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, 247 243 key, hdr->len, protkey, 248 - protkeylen, protkeytype); 244 + protkeylen, protkeytype, xflags); 249 245 } else if (hdr->type == TOKTYPE_NON_CCA && 250 246 hdr->version == TOKVER_EP11_ECC_WITH_HEADER && 251 247 is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 252 248 rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, 253 249 key, hdr->len, protkey, 254 - protkeylen, protkeytype); 250 + protkeylen, protkeytype, xflags); 255 251 } else if (hdr->type == TOKTYPE_NON_CCA && 256 252 hdr->version == TOKVER_EP11_AES && 257 253 is_ep11_keyblob(key)) { 258 254 rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, 259 255 key, hdr->len, protkey, 260 - protkeylen, protkeytype); 256 + protkeylen, protkeytype, xflags); 261 257 } else { 262 258 rc = -EINVAL; 263 259 break; ··· 265 261 } 266 262 267 263 out: 268 - kfree(local_apqns); 269 264 pr_debug("rc=%d\n", rc); 270 265 return rc; 271 266 } ··· 281 278 static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, 282 279 u32 keytype, u32 subtype, 283 280 u32 keybitsize, u32 flags, 284 - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) 281 + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) 285 282 { 286 - struct pkey_apqn *local_apqns = NULL; 283 + struct pkey_apqn _apqns[MAXAPQNSINLIST]; 287 284 int i, len, rc; 285 + u32 xflags; 286 + 287 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 288 288 289 289 /* check keytype, subtype, keybitsize */ 290 290 switch (keytype) { ··· 322 316 if (!apqns || (nr_apqns == 1 && 323 317 apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 324 318 nr_apqns = MAXAPQNSINLIST; 325 - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 326 - GFP_KERNEL); 327 - if (!local_apqns) 328 - return -ENOMEM; 329 319 rc = ep11_apqns4type(subtype, NULL, NULL, 0, 330 - local_apqns, &nr_apqns); 320 + _apqns, &nr_apqns, pflags); 331 321 if (rc) 332 322 goto out; 333 - apqns = local_apqns; 323 + apqns = _apqns; 334 324 } 335 325 336 326 for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 337 327 rc = ep11_genaeskey(apqns[i].card, apqns[i].domain, 338 328 keybitsize, flags, 339 - keybuf, keybuflen, subtype); 329 + keybuf, keybuflen, subtype, xflags); 340 330 } 341 331 342 332 out: 343 - kfree(local_apqns); 344 333 pr_debug("rc=%d\n", rc); 345 334 return rc; 346 335 } ··· 353 352 u32 keytype, u32 subtype, 354 353 u32 keybitsize, u32 flags, 355 354 const u8 *clrkey, u32 clrkeylen, 356 - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) 355 + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) 357 356 { 358 - struct pkey_apqn *local_apqns = NULL; 357 + struct pkey_apqn _apqns[MAXAPQNSINLIST]; 359 358 int i, len, rc; 359 + u32 xflags; 360 + 361 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 360 362 361 363 /* check keytype, subtype, clrkeylen, keybitsize */ 362 364 switch (keytype) { ··· 399 395 if (!apqns || (nr_apqns == 1 && 400 396 apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { 401 397 nr_apqns = MAXAPQNSINLIST; 402 - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), 403 - GFP_KERNEL); 404 - if (!local_apqns) 405 - return -ENOMEM; 406 398 rc = ep11_apqns4type(subtype, NULL, NULL, 0, 407 - local_apqns, &nr_apqns); 399 + _apqns, &nr_apqns, pflags); 408 400 if (rc) 409 401 goto out; 410 - apqns = local_apqns; 402 + apqns = _apqns; 411 403 } 412 404 413 405 for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 414 406 rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain, 415 407 keybitsize, flags, clrkey, 416 - keybuf, keybuflen, subtype); 408 + keybuf, keybuflen, subtype, xflags); 417 409 } 418 410 419 411 out: 420 - kfree(local_apqns); 421 412 pr_debug("rc=%d\n", rc); 422 413 return rc; 423 414 } 424 415 425 416 static int ep11_verifykey(const u8 *key, u32 keylen, 426 417 u16 *card, u16 *dom, 427 - u32 *keytype, u32 *keybitsize, u32 *flags) 418 + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) 428 419 { 429 420 struct keytoken_header *hdr = (struct keytoken_header *)key; 430 - u32 nr_apqns, *apqns = NULL; 421 + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); 422 + u32 xflags; 431 423 int rc; 424 + 425 + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; 432 426 433 427 if (keylen < sizeof(*hdr)) 434 428 return -EINVAL; ··· 445 443 *keybitsize = kb->head.bitlen; 446 444 447 445 api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 448 - rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, 446 + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, 449 447 ZCRYPT_CEX7, api, 450 - ep11_kb_wkvp(key, keylen)); 448 + ep11_kb_wkvp(key, keylen), xflags); 451 449 if (rc) 452 450 goto out; 453 451 ··· 469 467 *keybitsize = kh->bitlen; 470 468 471 469 api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; 472 - rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, 470 + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, 473 471 ZCRYPT_CEX7, api, 474 - ep11_kb_wkvp(key, keylen)); 472 + ep11_kb_wkvp(key, keylen), xflags); 475 473 if (rc) 476 474 goto out; 477 475 ··· 486 484 } 487 485 488 486 out: 489 - kfree(apqns); 490 487 pr_debug("rc=%d\n", rc); 491 488 return rc; 492 489 } ··· 501 500 size_t nr_apqns, 502 501 const u8 *key, u32 keylen, 503 502 u8 *protkey, u32 *protkeylen, 504 - u32 *protkeytype) 503 + u32 *protkeytype, u32 pflags) 505 504 { 506 505 const struct keytoken_header *hdr = (const struct keytoken_header *)key; 507 506 const struct clearkeytoken *t = (const struct clearkeytoken *)key; 507 + u8 tmpbuf[MAXEP11AESKEYBLOBSIZE]; /* 336 bytes */ 508 508 u32 tmplen, keysize = 0; 509 - u8 *tmpbuf; 510 509 int i, rc; 511 510 512 511 if (keylen < sizeof(*hdr)) ··· 518 517 if (!keysize || t->len != keysize) 519 518 return -EINVAL; 520 519 521 - /* alloc tmp key buffer */ 522 - tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC); 523 - if (!tmpbuf) 524 - return -ENOMEM; 525 - 526 520 /* try two times in case of failure */ 527 521 for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { 528 522 tmplen = MAXEP11AESKEYBLOBSIZE; 529 523 rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11, 530 524 8 * keysize, 0, t->clearkey, t->len, 531 - tmpbuf, &tmplen, NULL); 525 + tmpbuf, &tmplen, NULL, pflags); 532 526 pr_debug("ep11_clr2key()=%d\n", rc); 533 527 if (rc) 534 528 continue; 535 529 rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen, 536 - protkey, protkeylen, protkeytype); 530 + protkey, protkeylen, protkeytype, pflags); 537 531 pr_debug("ep11_key2protkey()=%d\n", rc); 538 532 } 539 533 540 - kfree(tmpbuf); 541 534 pr_debug("rc=%d\n", rc); 542 535 return rc; 543 536 }
+6 -3
drivers/s390/crypto/pkey_pckmo.c
··· 406 406 static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, 407 407 size_t _nr_apqns, 408 408 const u8 *key, u32 keylen, 409 - u8 *protkey, u32 *protkeylen, u32 *keyinfo) 409 + u8 *protkey, u32 *protkeylen, u32 *keyinfo, 410 + u32 _xflags __always_unused) 410 411 { 411 412 return pckmo_key2protkey(key, keylen, 412 413 protkey, protkeylen, keyinfo); ··· 416 415 static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, 417 416 u32 keytype, u32 keysubtype, 418 417 u32 _keybitsize, u32 _flags, 419 - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) 418 + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, 419 + u32 _xflags __always_unused) 420 420 { 421 421 return pckmo_gen_protkey(keytype, keysubtype, 422 422 keybuf, keybuflen, keyinfo); ··· 425 423 426 424 static int pkey_pckmo_verifykey(const u8 *key, u32 keylen, 427 425 u16 *_card, u16 *_dom, 428 - u32 *_keytype, u32 *_keybitsize, u32 *_flags) 426 + u32 *_keytype, u32 *_keybitsize, 427 + u32 *_flags, u32 _xflags __always_unused) 429 428 { 430 429 return pckmo_verify_key(key, keylen); 431 430 }
+2 -2
drivers/s390/crypto/pkey_sysfs.c
··· 29 29 rc = pkey_handler_gen_key(NULL, 0, 30 30 keytype, keysubtype, 31 31 keybitsize, flags, 32 - keybuf, keybuflen, keyinfo); 32 + keybuf, keybuflen, keyinfo, 0); 33 33 if (rc == -ENODEV) { 34 34 pkey_handler_request_modules(); 35 35 rc = pkey_handler_gen_key(NULL, 0, 36 36 keytype, keysubtype, 37 37 keybitsize, flags, 38 - keybuf, keybuflen, keyinfo); 38 + keybuf, keybuflen, keyinfo, 0); 39 39 } 40 40 41 41 return rc;
+39 -5
drivers/s390/crypto/pkey_uv.c
··· 21 21 MODULE_DESCRIPTION("s390 protected key UV handler"); 22 22 23 23 /* 24 + * One pre-allocated uv_secret_list for use with uv_find_secret() 25 + */ 26 + static struct uv_secret_list *uv_list; 27 + static DEFINE_MUTEX(uv_list_mutex); 28 + 29 + /* 24 30 * UV secret token struct and defines. 25 31 */ 26 32 ··· 91 85 } 92 86 } 93 87 88 + static int get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], 89 + struct uv_secret_list_item_hdr *secret) 90 + { 91 + int rc; 92 + 93 + mutex_lock(&uv_list_mutex); 94 + memset(uv_list, 0, sizeof(*uv_list)); 95 + rc = uv_find_secret(secret_id, uv_list, secret); 96 + mutex_unlock(&uv_list_mutex); 97 + 98 + return rc; 99 + } 100 + 94 101 static int retrieve_secret(const u8 secret_id[UV_SECRET_ID_LEN], 95 102 u16 *secret_type, u8 *buf, u32 *buflen) 96 103 { 97 104 struct uv_secret_list_item_hdr secret_meta_data; 98 105 int rc; 99 106 100 - rc = uv_get_secret_metadata(secret_id, &secret_meta_data); 107 + rc = get_secret_metadata(secret_id, &secret_meta_data); 101 108 if (rc) 102 109 return rc; 103 110 ··· 191 172 static int uv_key2protkey(const struct pkey_apqn *_apqns __always_unused, 192 173 size_t _nr_apqns __always_unused, 193 174 const u8 *key, u32 keylen, 194 - u8 *protkey, u32 *protkeylen, u32 *keyinfo) 175 + u8 *protkey, u32 *protkeylen, u32 *keyinfo, 176 + u32 _xflags __always_unused) 195 177 { 196 178 struct uvsecrettoken *t = (struct uvsecrettoken *)key; 197 179 u32 pkeysize, pkeytype; ··· 234 214 static int uv_verifykey(const u8 *key, u32 keylen, 235 215 u16 *_card __always_unused, 236 216 u16 *_dom __always_unused, 237 - u32 *keytype, u32 *keybitsize, u32 *flags) 217 + u32 *keytype, u32 *keybitsize, u32 *flags, 218 + u32 xflags __always_unused) 238 219 { 239 220 struct uvsecrettoken *t = (struct uvsecrettoken *)key; 240 221 struct uv_secret_list_item_hdr secret_meta_data; ··· 246 225 if (rc) 247 226 goto out; 248 227 249 - rc = uv_get_secret_metadata(t->secret_id, &secret_meta_data); 228 + rc = get_secret_metadata(t->secret_id, &secret_meta_data); 250 229 if (rc) 251 230 goto out; 252 231 ··· 284 263 */ 285 264 static int __init pkey_uv_init(void) 286 265 { 266 + int rc; 267 + 287 268 if (!is_prot_virt_guest()) 288 269 return -ENODEV; 289 270 290 271 if (!test_bit_inv(BIT_UVC_CMD_RETR_SECRET, uv_info.inst_calls_list)) 291 272 return -ENODEV; 292 273 293 - return pkey_handler_register(&uv_handler); 274 + uv_list = kmalloc(sizeof(*uv_list), GFP_KERNEL); 275 + if (!uv_list) 276 + return -ENOMEM; 277 + 278 + rc = pkey_handler_register(&uv_handler); 279 + if (rc) 280 + kfree(uv_list); 281 + 282 + return rc; 294 283 } 295 284 296 285 /* ··· 309 278 static void __exit pkey_uv_exit(void) 310 279 { 311 280 pkey_handler_unregister(&uv_handler); 281 + mutex_lock(&uv_list_mutex); 282 + kvfree(uv_list); 283 + mutex_unlock(&uv_list_mutex); 312 284 } 313 285 314 286 module_cpu_feature_match(S390_CPU_FEATURE_UV, pkey_uv_init);
+105 -62
drivers/s390/crypto/zcrypt_api.c
··· 50 50 "Copyright IBM Corp. 2001, 2012"); 51 51 MODULE_LICENSE("GPL"); 52 52 53 + unsigned int zcrypt_mempool_threshold = 5; 54 + module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440); 55 + MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)"); 56 + 53 57 /* 54 58 * zcrypt tracepoint functions 55 59 */ ··· 646 642 struct zcrypt_queue *zq, *pref_zq; 647 643 struct ap_message ap_msg; 648 644 unsigned int wgt = 0, pref_wgt = 0; 649 - unsigned int func_code; 650 - int cpen, qpen, qid = 0, rc = -ENODEV; 645 + unsigned int func_code = 0; 646 + int cpen, qpen, qid = 0, rc; 651 647 struct module *mod; 652 648 653 649 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 654 650 655 - ap_init_message(&ap_msg); 651 + rc = ap_init_apmsg(&ap_msg, 0); 652 + if (rc) 653 + goto out; 656 654 657 655 if (mex->outputdatalength < mex->inputdatalength) { 658 - func_code = 0; 659 656 rc = -EINVAL; 660 657 goto out; 661 658 } ··· 733 728 spin_unlock(&zcrypt_list_lock); 734 729 735 730 out: 736 - ap_release_message(&ap_msg); 731 + ap_release_apmsg(&ap_msg); 737 732 if (tr) { 738 733 tr->last_rc = rc; 739 734 tr->last_qid = qid; ··· 751 746 struct zcrypt_queue *zq, *pref_zq; 752 747 struct ap_message ap_msg; 753 748 unsigned int wgt = 0, pref_wgt = 0; 754 - unsigned int func_code; 755 - int cpen, qpen, qid = 0, rc = -ENODEV; 749 + unsigned int func_code = 0; 750 + int cpen, qpen, qid = 0, rc; 756 751 struct module *mod; 757 752 758 753 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 759 754 760 - ap_init_message(&ap_msg); 755 + rc = ap_init_apmsg(&ap_msg, 0); 756 + if (rc) 757 + goto out; 761 758 762 759 if (crt->outputdatalength < crt->inputdatalength) { 763 - func_code = 0; 764 760 rc = -EINVAL; 765 761 goto out; 766 762 } ··· 838 832 spin_unlock(&zcrypt_list_lock); 839 833 840 834 out: 841 - ap_release_message(&ap_msg); 835 + ap_release_apmsg(&ap_msg); 842 836 if (tr) { 843 837 tr->last_rc = rc; 844 838 tr->last_qid = qid; ··· 848 842 return rc; 849 843 } 850 844 851 - static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, 845 + static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms, 852 846 struct zcrypt_track *tr, 853 847 struct ica_xcRB *xcrb) 854 848 { 849 + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; 855 850 struct zcrypt_card *zc, *pref_zc; 856 851 struct zcrypt_queue *zq, *pref_zq; 857 852 struct ap_message ap_msg; 858 853 unsigned int wgt = 0, pref_wgt = 0; 859 - unsigned int func_code; 854 + unsigned int func_code = 0; 860 855 unsigned short *domain, tdom; 861 - int cpen, qpen, qid = 0, rc = -ENODEV; 856 + int cpen, qpen, qid = 0, rc; 862 857 struct module *mod; 863 858 864 859 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); 865 860 866 861 xcrb->status = 0; 867 - ap_init_message(&ap_msg); 862 + 863 + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? 864 + AP_MSG_FLAG_MEMPOOL : 0); 865 + if (rc) 866 + goto out; 868 867 869 868 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 870 869 if (rc) ··· 973 962 spin_unlock(&zcrypt_list_lock); 974 963 975 964 out: 976 - ap_release_message(&ap_msg); 965 + ap_release_apmsg(&ap_msg); 977 966 if (tr) { 978 967 tr->last_rc = rc; 979 968 tr->last_qid = qid; ··· 983 972 return rc; 984 973 } 985 974 986 - long zcrypt_send_cprb(struct ica_xcRB *xcrb) 975 + long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags) 987 976 { 988 977 struct zcrypt_track tr; 989 978 int rc; ··· 991 980 memset(&tr, 0, sizeof(tr)); 992 981 993 982 do { 994 - rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); 983 + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); 995 984 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 996 985 997 986 /* on ENODEV failure: retry once again after a requested rescan */ 998 987 if (rc == -ENODEV && zcrypt_process_rescan()) 999 988 do { 1000 - rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); 989 + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); 1001 990 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1002 991 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1003 992 rc = -EIO; ··· 1035 1024 return false; 1036 1025 } 1037 1026 1038 - static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, 1027 + static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms, 1039 1028 struct zcrypt_track *tr, 1040 1029 struct ep11_urb *xcrb) 1041 1030 { 1031 + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; 1042 1032 struct zcrypt_card *zc, *pref_zc; 1043 1033 struct zcrypt_queue *zq, *pref_zq; 1044 - struct ep11_target_dev *targets; 1034 + struct ep11_target_dev *targets = NULL; 1045 1035 unsigned short target_num; 1046 1036 unsigned int wgt = 0, pref_wgt = 0; 1047 - unsigned int func_code, domain; 1037 + unsigned int func_code = 0, domain; 1048 1038 struct ap_message ap_msg; 1049 - int cpen, qpen, qid = 0, rc = -ENODEV; 1039 + int cpen, qpen, qid = 0, rc; 1050 1040 struct module *mod; 1051 1041 1052 1042 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 1053 1043 1054 - ap_init_message(&ap_msg); 1044 + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? 1045 + AP_MSG_FLAG_MEMPOOL : 0); 1046 + if (rc) 1047 + goto out; 1055 1048 1056 1049 target_num = (unsigned short)xcrb->targets_num; 1057 1050 1058 1051 /* empty list indicates autoselect (all available targets) */ 1059 - targets = NULL; 1052 + rc = -ENOMEM; 1060 1053 if (target_num != 0) { 1061 - struct ep11_target_dev __user *uptr; 1062 - 1063 - targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 1064 - if (!targets) { 1065 - func_code = 0; 1066 - rc = -ENOMEM; 1067 - goto out; 1068 - } 1069 - 1070 - uptr = (struct ep11_target_dev __force __user *)xcrb->targets; 1071 - if (z_copy_from_user(userspace, targets, uptr, 1072 - target_num * sizeof(*targets))) { 1073 - func_code = 0; 1074 - rc = -EFAULT; 1075 - goto out_free; 1054 + if (userspace) { 1055 + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 1056 + if (!targets) 1057 + goto out; 1058 + if (copy_from_user(targets, xcrb->targets, 1059 + target_num * sizeof(*targets))) { 1060 + rc = -EFAULT; 1061 + goto out; 1062 + } 1063 + } else { 1064 + targets = (struct ep11_target_dev __force __kernel *)xcrb->targets; 1076 1065 } 1077 1066 } 1078 1067 1079 1068 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 1080 1069 if (rc) 1081 - goto out_free; 1070 + goto out; 1082 1071 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, 1083 1072 ap_msg.msg, ap_msg.len, false); 1084 1073 ··· 1086 1075 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 1087 1076 if (!test_bit_inv(domain, perms->adm)) { 1088 1077 rc = -ENODEV; 1089 - goto out_free; 1078 + goto out; 1090 1079 } 1091 1080 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 1092 1081 rc = -EOPNOTSUPP; 1093 - goto out_free; 1082 + goto out; 1094 1083 } 1095 1084 } 1096 1085 ··· 1158 1147 pr_debug("no match for address ff.ffff => ENODEV\n"); 1159 1148 } 1160 1149 rc = -ENODEV; 1161 - goto out_free; 1150 + goto out; 1162 1151 } 1163 1152 1164 1153 qid = pref_zq->queue->qid; ··· 1172 1161 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1173 1162 spin_unlock(&zcrypt_list_lock); 1174 1163 1175 - out_free: 1176 - kfree(targets); 1177 1164 out: 1178 - ap_release_message(&ap_msg); 1165 + if (userspace) 1166 + kfree(targets); 1167 + ap_release_apmsg(&ap_msg); 1179 1168 if (tr) { 1180 1169 tr->last_rc = rc; 1181 1170 tr->last_qid = qid; ··· 1185 1174 return rc; 1186 1175 } 1187 1176 1188 - long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 1177 + long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags) 1189 1178 { 1190 1179 struct zcrypt_track tr; 1191 1180 int rc; ··· 1193 1182 memset(&tr, 0, sizeof(tr)); 1194 1183 1195 1184 do { 1196 - rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); 1185 + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); 1197 1186 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1198 1187 1199 1188 /* on ENODEV failure: retry once again after a requested rescan */ 1200 1189 if (rc == -ENODEV && zcrypt_process_rescan()) 1201 1190 do { 1202 - rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); 1191 + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); 1203 1192 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1204 1193 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1205 1194 rc = -EIO; ··· 1215 1204 struct zcrypt_card *zc, *pref_zc; 1216 1205 struct zcrypt_queue *zq, *pref_zq; 1217 1206 unsigned int wgt = 0, pref_wgt = 0; 1218 - unsigned int func_code; 1207 + unsigned int func_code = 0; 1219 1208 struct ap_message ap_msg; 1220 1209 unsigned int domain; 1221 1210 int qid = 0, rc = -ENODEV; ··· 1223 1212 1224 1213 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1225 1214 1226 - ap_init_message(&ap_msg); 1215 + rc = ap_init_apmsg(&ap_msg, 0); 1216 + if (rc) 1217 + goto out; 1227 1218 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); 1228 1219 if (rc) 1229 1220 goto out; ··· 1271 1258 spin_unlock(&zcrypt_list_lock); 1272 1259 1273 1260 out: 1274 - ap_release_message(&ap_msg); 1261 + ap_release_apmsg(&ap_msg); 1275 1262 trace_s390_zcrypt_rep(buffer, func_code, rc, 1276 1263 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1277 1264 return rc; ··· 1304 1291 spin_unlock(&zcrypt_list_lock); 1305 1292 } 1306 1293 1307 - void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 1294 + void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, 1295 + int maxcard, int maxqueue) 1308 1296 { 1309 1297 struct zcrypt_card *zc; 1310 1298 struct zcrypt_queue *zq; 1311 1299 struct zcrypt_device_status_ext *stat; 1312 1300 int card, queue; 1313 1301 1302 + maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT); 1303 + maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT); 1304 + 1314 1305 spin_lock(&zcrypt_list_lock); 1315 1306 for_each_zcrypt_card(zc) { 1316 1307 for_each_zcrypt_queue(zq, zc) { 1317 1308 card = AP_QID_CARD(zq->queue->qid); 1318 1309 queue = AP_QID_QUEUE(zq->queue->qid); 1319 - stat = &devstatus[card * AP_DOMAINS + queue]; 1310 + if (card >= maxcard || queue >= maxqueue) 1311 + continue; 1312 + stat = &devstatus[card * maxqueue + queue]; 1320 1313 stat->hwtype = zc->card->ap_dev.device_type; 1321 1314 stat->functions = zc->card->hwinfo.fac >> 26; 1322 1315 stat->qid = zq->queue->qid; ··· 1542 1523 int rc; 1543 1524 struct ica_xcRB xcrb; 1544 1525 struct zcrypt_track tr; 1526 + u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1545 1527 struct ica_xcRB __user *uxcrb = (void __user *)arg; 1546 1528 1547 1529 memset(&tr, 0, sizeof(tr)); ··· 1550 1530 return -EFAULT; 1551 1531 1552 1532 do { 1553 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1533 + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); 1554 1534 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1555 1535 1556 1536 /* on ENODEV failure: retry once again after a requested rescan */ 1557 1537 if (rc == -ENODEV && zcrypt_process_rescan()) 1558 1538 do { 1559 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1539 + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); 1560 1540 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1561 1541 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1562 1542 rc = -EIO; ··· 1573 1553 int rc; 1574 1554 struct ep11_urb xcrb; 1575 1555 struct zcrypt_track tr; 1556 + u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1576 1557 struct ep11_urb __user *uxcrb = (void __user *)arg; 1577 1558 1578 1559 memset(&tr, 0, sizeof(tr)); ··· 1581 1560 return -EFAULT; 1582 1561 1583 1562 do { 1584 - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1563 + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); 1585 1564 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1586 1565 1587 1566 /* on ENODEV failure: retry once again after a requested rescan */ 1588 1567 if (rc == -ENODEV && zcrypt_process_rescan()) 1589 1568 do { 1590 - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1569 + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); 1591 1570 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1592 1571 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1593 1572 rc = -EIO; ··· 1628 1607 GFP_KERNEL); 1629 1608 if (!device_status) 1630 1609 return -ENOMEM; 1631 - zcrypt_device_status_mask_ext(device_status); 1610 + zcrypt_device_status_mask_ext(device_status, 1611 + MAX_ZDEV_CARDIDS_EXT, 1612 + MAX_ZDEV_DOMAINS_EXT); 1632 1613 if (copy_to_user((char __user *)arg, device_status, 1633 1614 total_size)) 1634 1615 rc = -EFAULT; ··· 1850 1827 unsigned int cmd, unsigned long arg) 1851 1828 { 1852 1829 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1830 + u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1853 1831 struct compat_ica_xcrb xcrb32; 1854 1832 struct zcrypt_track tr; 1855 1833 struct ica_xcRB xcrb64; ··· 1880 1856 xcrb64.priority_window = xcrb32.priority_window; 1881 1857 xcrb64.status = xcrb32.status; 1882 1858 do { 1883 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1859 + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); 1884 1860 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1885 1861 1886 1862 /* on ENODEV failure: retry once again after a requested rescan */ 1887 1863 if (rc == -ENODEV && zcrypt_process_rescan()) 1888 1864 do { 1889 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1865 + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); 1890 1866 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1891 1867 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1892 1868 rc = -EIO; ··· 2156 2132 { 2157 2133 int rc; 2158 2134 2135 + /* make sure the mempool threshold is >= 1 */ 2136 + if (zcrypt_mempool_threshold < 1) { 2137 + rc = -EINVAL; 2138 + goto out; 2139 + } 2140 + 2159 2141 rc = zcrypt_debug_init(); 2160 2142 if (rc) 2161 2143 goto out; 2162 2144 2163 2145 rc = zcdn_init(); 2164 2146 if (rc) 2165 - goto out; 2147 + goto out_zcdn_init_failed; 2148 + 2149 + rc = zcrypt_ccamisc_init(); 2150 + if (rc) 2151 + goto out_ccamisc_init_failed; 2152 + 2153 + rc = zcrypt_ep11misc_init(); 2154 + if (rc) 2155 + goto out_ep11misc_init_failed; 2166 2156 2167 2157 /* Register the request sprayer. */ 2168 2158 rc = misc_register(&zcrypt_misc_device); ··· 2189 2151 return 0; 2190 2152 2191 2153 out_misc_register_failed: 2154 + zcrypt_ep11misc_exit(); 2155 + out_ep11misc_init_failed: 2156 + zcrypt_ccamisc_exit(); 2157 + out_ccamisc_init_failed: 2192 2158 zcdn_exit(); 2159 + out_zcdn_init_failed: 2193 2160 zcrypt_debug_exit(); 2194 2161 out: 2195 2162 return rc;
+13 -3
drivers/s390/crypto/zcrypt_api.h
··· 76 76 #define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000 77 77 #define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000 78 78 79 + /* 80 + * xflags - to be used with zcrypt_send_cprb() and 81 + * zcrypt_send_ep11_cprb() for the xflags parameter. 82 + */ 83 + #define ZCRYPT_XFLAG_USERSPACE 0x0001 /* data ptrs address userspace */ 84 + #define ZCRYPT_XFLAG_NOMEMALLOC 0x0002 /* do not allocate memory via kmalloc */ 85 + 79 86 struct zcrypt_ops { 80 87 long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *, 81 88 struct ap_message *); ··· 139 132 extern spinlock_t zcrypt_list_lock; 140 133 extern struct list_head zcrypt_card_list; 141 134 135 + extern unsigned int zcrypt_mempool_threshold; 136 + 142 137 #define for_each_zcrypt_card(_zc) \ 143 138 list_for_each_entry(_zc, &zcrypt_card_list, list) 144 139 ··· 170 161 struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); 171 162 int zcrypt_api_init(void); 172 163 void zcrypt_api_exit(void); 173 - long zcrypt_send_cprb(struct ica_xcRB *xcRB); 174 - long zcrypt_send_ep11_cprb(struct ep11_urb *urb); 175 - void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus); 164 + long zcrypt_send_cprb(struct ica_xcRB *xcRB, u32 xflags); 165 + long zcrypt_send_ep11_cprb(struct ep11_urb *urb, u32 xflags); 166 + void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, 167 + int maxcard, int maxqueue); 176 168 int zcrypt_device_status_ext(int card, int queue, 177 169 struct zcrypt_device_status_ext *devstatus); 178 170
+167 -319
drivers/s390/crypto/zcrypt_ccamisc.c
··· 11 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 12 13 13 #include <linux/init.h> 14 + #include <linux/mempool.h> 14 15 #include <linux/module.h> 15 16 #include <linux/slab.h> 16 17 #include <linux/random.h> ··· 30 29 /* Size of vardata block used for some of the cca requests/replies */ 31 30 #define VARDATASIZE 4096 32 31 33 - struct cca_info_list_entry { 34 - struct list_head list; 35 - u16 cardnr; 36 - u16 domain; 37 - struct cca_info info; 38 - }; 32 + /* 33 + * Cprb memory pool held for urgent cases where no memory 34 + * can be allocated via kmalloc. This pool is only used 35 + * when alloc_and_prep_cprbmem() is called with the xflag 36 + * ZCRYPT_XFLAG_NOMEMALLOC. The cprb memory needs to hold 37 + * space for request AND reply! 38 + */ 39 + #define CPRB_MEMPOOL_ITEM_SIZE (16 * 1024) 40 + static mempool_t *cprb_mempool; 39 41 40 - /* a list with cca_info_list_entry entries */ 41 - static LIST_HEAD(cca_info_list); 42 - static DEFINE_SPINLOCK(cca_info_list_lock); 42 + /* 43 + * This is a pre-allocated memory for the device status array 44 + * used within the findcard() functions. It is currently 45 + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is 46 + * controlled via dev_status_mem_mutex. Needs adaption if more 47 + * than 128 cards or domains to be are supported. 48 + */ 49 + #define ZCRYPT_DEV_STATUS_CARD_MAX 128 50 + #define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 51 + #define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ 52 + ZCRYPT_DEV_STATUS_QUEUE_MAX) 53 + #define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ 54 + sizeof(struct zcrypt_device_status_ext)) 55 + static void *dev_status_mem; 56 + static DEFINE_MUTEX(dev_status_mem_mutex); 43 57 44 58 /* 45 59 * Simple check if the token is a valid CCA secure AES data key ··· 235 219 static int alloc_and_prep_cprbmem(size_t paramblen, 236 220 u8 **p_cprb_mem, 237 221 struct CPRBX **p_req_cprb, 238 - struct CPRBX **p_rep_cprb) 222 + struct CPRBX **p_rep_cprb, 223 + u32 xflags) 239 224 { 240 - u8 *cprbmem; 225 + u8 *cprbmem = NULL; 241 226 size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; 227 + size_t len = 2 * cprbplusparamblen; 242 228 struct CPRBX *preqcblk, *prepcblk; 243 229 244 230 /* 245 231 * allocate consecutive memory for request CPRB, request param 246 232 * block, reply CPRB and reply param block 247 233 */ 248 - cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL); 234 + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { 235 + if (len <= CPRB_MEMPOOL_ITEM_SIZE) 236 + cprbmem = mempool_alloc_preallocated(cprb_mempool); 237 + } else { 238 + cprbmem = kmalloc(len, GFP_KERNEL); 239 + } 249 240 if (!cprbmem) 250 241 return -ENOMEM; 242 + memset(cprbmem, 0, len); 251 243 252 244 preqcblk = (struct CPRBX *)cprbmem; 253 245 prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen); ··· 285 261 * with zeros before freeing (useful if there was some 286 262 * clear key material in there). 287 263 */ 288 - static void free_cprbmem(void *mem, size_t paramblen, int scrub) 264 + static void free_cprbmem(void *mem, size_t paramblen, bool scrub, u32 xflags) 289 265 { 290 - if (scrub) 266 + if (mem && scrub) 291 267 memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen)); 292 - kfree(mem); 268 + 269 + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) 270 + mempool_free(mem, cprb_mempool); 271 + else 272 + kfree(mem); 293 273 } 294 274 295 275 /* ··· 318 290 * Generate (random) CCA AES DATA secure key. 319 291 */ 320 292 int cca_genseckey(u16 cardnr, u16 domain, 321 - u32 keybitsize, u8 *seckey) 293 + u32 keybitsize, u8 *seckey, u32 xflags) 322 294 { 323 295 int i, rc, keysize; 324 296 int seckeysize; ··· 360 332 } __packed * prepparm; 361 333 362 334 /* get already prepared memory for 2 cprbs with param block each */ 363 - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); 335 + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, 336 + &preqcblk, &prepcblk, xflags); 364 337 if (rc) 365 338 return rc; 366 339 ··· 408 379 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); 409 380 410 381 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ 411 - rc = zcrypt_send_cprb(&xcrb); 382 + rc = zcrypt_send_cprb(&xcrb, xflags); 412 383 if (rc) { 413 384 ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", 414 385 __func__, (int)cardnr, (int)domain, rc); ··· 453 424 memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); 454 425 455 426 out: 456 - free_cprbmem(mem, PARMBSIZE, 0); 427 + free_cprbmem(mem, PARMBSIZE, false, xflags); 457 428 return rc; 458 429 } 459 430 EXPORT_SYMBOL(cca_genseckey); ··· 462 433 * Generate an CCA AES DATA secure key with given key value. 463 434 */ 464 435 int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, 465 - const u8 *clrkey, u8 *seckey) 436 + const u8 *clrkey, u8 *seckey, u32 xflags) 466 437 { 467 438 int rc, keysize, seckeysize; 468 439 u8 *mem, *ptr; ··· 502 473 } __packed * prepparm; 503 474 504 475 /* get already prepared memory for 2 cprbs with param block each */ 505 - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); 476 + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, 477 + &preqcblk, &prepcblk, xflags); 506 478 if (rc) 507 479 return rc; 508 480 ··· 547 517 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); 548 518 549 519 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ 550 - rc = zcrypt_send_cprb(&xcrb); 520 + rc = zcrypt_send_cprb(&xcrb, xflags); 551 521 if (rc) { 552 522 ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 553 523 __func__, (int)cardnr, (int)domain, rc); ··· 593 563 memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); 594 564 595 565 out: 596 - free_cprbmem(mem, PARMBSIZE, 1); 566 + free_cprbmem(mem, PARMBSIZE, true, xflags); 597 567 return rc; 598 568 } 599 569 EXPORT_SYMBOL(cca_clr2seckey); ··· 603 573 */ 604 574 int cca_sec2protkey(u16 cardnr, u16 domain, 605 575 const u8 *seckey, u8 *protkey, u32 *protkeylen, 606 - u32 *protkeytype) 576 + u32 *protkeytype, u32 xflags) 607 577 { 608 578 int rc; 609 579 u8 *mem, *ptr; ··· 649 619 } __packed * prepparm; 650 620 651 621 /* get already prepared memory for 2 cprbs with param block each */ 652 - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); 622 + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, 623 + &preqcblk, &prepcblk, xflags); 653 624 if (rc) 654 625 return rc; 655 626 ··· 675 644 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); 676 645 677 646 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ 678 - rc = zcrypt_send_cprb(&xcrb); 647 + rc = zcrypt_send_cprb(&xcrb, xflags); 679 648 if (rc) { 680 649 ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 681 650 __func__, (int)cardnr, (int)domain, rc); ··· 743 712 *protkeylen = prepparm->lv3.ckb.len; 744 713 745 714 out: 746 - free_cprbmem(mem, PARMBSIZE, 0); 715 + free_cprbmem(mem, PARMBSIZE, true, xflags); 747 716 return rc; 748 717 } 749 718 EXPORT_SYMBOL(cca_sec2protkey); ··· 768 737 * Generate (random) CCA AES CIPHER secure key. 769 738 */ 770 739 int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, 771 - u8 *keybuf, u32 *keybufsize) 740 + u8 *keybuf, u32 *keybufsize, u32 xflags) 772 741 { 773 742 int rc; 774 743 u8 *mem, *ptr; ··· 844 813 struct cipherkeytoken *t; 845 814 846 815 /* get already prepared memory for 2 cprbs with param block each */ 847 - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); 816 + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, 817 + &preqcblk, &prepcblk, xflags); 848 818 if (rc) 849 819 return rc; 850 820 ··· 904 872 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); 905 873 906 874 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ 907 - rc = zcrypt_send_cprb(&xcrb); 875 + rc = zcrypt_send_cprb(&xcrb, xflags); 908 876 if (rc) { 909 877 ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 910 878 __func__, (int)cardnr, (int)domain, rc); ··· 955 923 *keybufsize = t->len; 956 924 957 925 out: 958 - free_cprbmem(mem, PARMBSIZE, 0); 926 + free_cprbmem(mem, PARMBSIZE, false, xflags); 959 927 return rc; 960 928 } 961 929 EXPORT_SYMBOL(cca_gencipherkey); ··· 970 938 const u8 *clr_key_value, 971 939 int clr_key_bit_size, 972 940 u8 *key_token, 973 - int *key_token_size) 941 + int *key_token_size, 942 + u32 xflags) 974 943 { 975 944 int rc, n; 976 945 u8 *mem, *ptr; ··· 1022 989 int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1; 1023 990 1024 991 /* get already prepared memory for 2 cprbs with param block each */ 1025 - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); 992 + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, 993 + &preqcblk, &prepcblk, xflags); 1026 994 if (rc) 1027 995 return rc; 1028 996 ··· 1072 1038 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); 1073 1039 1074 1040 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ 1075 - rc = zcrypt_send_cprb(&xcrb); 1041 + rc = zcrypt_send_cprb(&xcrb, xflags); 1076 1042 if (rc) { 1077 1043 ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1078 1044 __func__, (int)cardnr, (int)domain, rc); ··· 1111 1077 *key_token_size = t->len; 1112 1078 1113 1079 out: 1114 - free_cprbmem(mem, PARMBSIZE, 0); 1080 + free_cprbmem(mem, PARMBSIZE, false, xflags); 1115 1081 return rc; 1116 1082 } 1117 1083 ··· 1119 1085 * Build CCA AES CIPHER secure key with a given clear key value. 1120 1086 */ 1121 1087 int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, 1122 - const u8 *clrkey, u8 *keybuf, u32 *keybufsize) 1088 + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, u32 xflags) 1123 1089 { 1124 1090 int rc; 1125 - u8 *token; 1091 + void *mem; 1126 1092 int tokensize; 1127 - u8 exorbuf[32]; 1093 + u8 *token, exorbuf[32]; 1128 1094 struct cipherkeytoken *t; 1129 1095 1130 1096 /* fill exorbuf with random data */ 1131 1097 get_random_bytes(exorbuf, sizeof(exorbuf)); 1132 1098 1133 - /* allocate space for the key token to build */ 1134 - token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL); 1135 - if (!token) 1099 + /* 1100 + * Allocate space for the key token to build. 1101 + * Also we only need up to MAXCCAVLSCTOKENSIZE bytes for this 1102 + * we use the already existing cprb mempool to solve this 1103 + * short term memory requirement. 1104 + */ 1105 + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? 1106 + mempool_alloc_preallocated(cprb_mempool) : 1107 + mempool_alloc(cprb_mempool, GFP_KERNEL); 1108 + if (!mem) 1136 1109 return -ENOMEM; 1137 1110 1138 1111 /* prepare the token with the key skeleton */ 1112 + token = (u8 *)mem; 1139 1113 tokensize = SIZEOF_SKELETON; 1140 1114 memcpy(token, aes_cipher_key_skeleton, tokensize); 1141 1115 ··· 1162 1120 * 4/4 COMPLETE the secure cipher key import 1163 1121 */ 1164 1122 rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART", 1165 - exorbuf, keybitsize, token, &tokensize); 1123 + exorbuf, keybitsize, token, &tokensize, xflags); 1166 1124 if (rc) { 1167 1125 ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n", 1168 1126 __func__, rc); 1169 1127 goto out; 1170 1128 } 1171 1129 rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, 1172 - clrkey, keybitsize, token, &tokensize); 1130 + clrkey, keybitsize, token, &tokensize, xflags); 1173 1131 if (rc) { 1174 1132 ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n", 1175 1133 __func__, rc); 1176 1134 goto out; 1177 1135 } 1178 1136 rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, 1179 - exorbuf, keybitsize, token, &tokensize); 1137 + exorbuf, keybitsize, token, &tokensize, xflags); 1180 1138 if (rc) { 1181 1139 ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n", 1182 1140 __func__, rc); 1183 1141 goto out; 1184 1142 } 1185 1143 rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL, 1186 - NULL, keybitsize, token, &tokensize); 1144 + NULL, keybitsize, token, &tokensize, xflags); 1187 1145 if (rc) { 1188 1146 ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n", 1189 1147 __func__, rc); ··· 1200 1158 *keybufsize = tokensize; 1201 1159 1202 1160 out: 1203 - kfree(token); 1161 + mempool_free(mem, cprb_mempool); 1204 1162 return rc; 1205 1163 } 1206 1164 EXPORT_SYMBOL(cca_clr2cipherkey); ··· 1209 1167 * Derive proteced key from CCA AES cipher secure key. 1210 1168 */ 1211 1169 int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, 1212 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 1170 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 1171 + u32 xflags) 1213 1172 { 1214 1173 int rc; 1215 1174 u8 *mem, *ptr; ··· 1262 1219 int keytoklen = ((struct cipherkeytoken *)ckey)->len; 1263 1220 1264 1221 /* get already prepared memory for 2 cprbs with param block each */ 1265 - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); 1222 + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, 1223 + &preqcblk, &prepcblk, xflags); 1266 1224 if (rc) 1267 1225 return rc; 1268 1226 ··· 1293 1249 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); 1294 1250 1295 1251 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ 1296 - rc = zcrypt_send_cprb(&xcrb); 1252 + rc = zcrypt_send_cprb(&xcrb, xflags); 1297 1253 if (rc) { 1298 1254 ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1299 1255 __func__, (int)cardnr, (int)domain, rc); ··· 1367 1323 *protkeylen = prepparm->vud.ckb.keylen; 1368 1324 1369 1325 out: 1370 - free_cprbmem(mem, PARMBSIZE, 0); 1326 + free_cprbmem(mem, PARMBSIZE, true, xflags); 1371 1327 return rc; 1372 1328 } 1373 1329 EXPORT_SYMBOL(cca_cipher2protkey); ··· 1376 1332 * Derive protected key from CCA ECC secure private key. 1377 1333 */ 1378 1334 int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, 1379 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 1335 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) 1380 1336 { 1381 1337 int rc; 1382 1338 u8 *mem, *ptr; ··· 1426 1382 int keylen = ((struct eccprivkeytoken *)key)->len; 1427 1383 1428 1384 /* get already prepared memory for 2 cprbs with param block each */ 1429 - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); 1385 + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, 1386 + &preqcblk, &prepcblk, xflags); 1430 1387 if (rc) 1431 1388 return rc; 1432 1389 ··· 1457 1412 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); 1458 1413 1459 1414 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ 1460 - rc = zcrypt_send_cprb(&xcrb); 1415 + rc = zcrypt_send_cprb(&xcrb, xflags); 1461 1416 if (rc) { 1462 1417 ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1463 1418 __func__, (int)cardnr, (int)domain, rc); ··· 1515 1470 *protkeytype = PKEY_KEYTYPE_ECC; 1516 1471 1517 1472 out: 1518 - free_cprbmem(mem, PARMBSIZE, 0); 1473 + free_cprbmem(mem, PARMBSIZE, true, xflags); 1519 1474 return rc; 1520 1475 } 1521 1476 EXPORT_SYMBOL(cca_ecc2protkey); ··· 1526 1481 int cca_query_crypto_facility(u16 cardnr, u16 domain, 1527 1482 const char *keyword, 1528 1483 u8 *rarray, size_t *rarraylen, 1529 - u8 *varray, size_t *varraylen) 1484 + u8 *varray, size_t *varraylen, 1485 + u32 xflags) 1530 1486 { 1531 1487 int rc; 1532 1488 u16 len; ··· 1551 1505 } __packed * prepparm; 1552 1506 1553 1507 /* get already prepared memory for 2 cprbs with param block each */ 1554 - rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk); 1508 + rc = alloc_and_prep_cprbmem(parmbsize, &mem, 1509 + &preqcblk, &prepcblk, xflags); 1555 1510 if (rc) 1556 1511 return rc; 1557 1512 ··· 1573 1526 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); 1574 1527 1575 1528 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ 1576 - rc = zcrypt_send_cprb(&xcrb); 1529 + rc = zcrypt_send_cprb(&xcrb, xflags); 1577 1530 if (rc) { 1578 1531 ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1579 1532 __func__, (int)cardnr, (int)domain, rc); ··· 1620 1573 } 1621 1574 1622 1575 out: 1623 - free_cprbmem(mem, parmbsize, 0); 1576 + free_cprbmem(mem, parmbsize, false, xflags); 1624 1577 return rc; 1625 1578 } 1626 1579 EXPORT_SYMBOL(cca_query_crypto_facility); 1627 1580 1628 - static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci) 1629 - { 1630 - int rc = -ENOENT; 1631 - struct cca_info_list_entry *ptr; 1632 - 1633 - spin_lock_bh(&cca_info_list_lock); 1634 - list_for_each_entry(ptr, &cca_info_list, list) { 1635 - if (ptr->cardnr == cardnr && ptr->domain == domain) { 1636 - memcpy(ci, &ptr->info, sizeof(*ci)); 1637 - rc = 0; 1638 - break; 1639 - } 1640 - } 1641 - spin_unlock_bh(&cca_info_list_lock); 1642 - 1643 - return rc; 1644 - } 1645 - 1646 - static void cca_info_cache_update(u16 cardnr, u16 domain, 1647 - const struct cca_info *ci) 1648 - { 1649 - int found = 0; 1650 - struct cca_info_list_entry *ptr; 1651 - 1652 - spin_lock_bh(&cca_info_list_lock); 1653 - list_for_each_entry(ptr, &cca_info_list, list) { 1654 - if (ptr->cardnr == cardnr && 1655 - ptr->domain == domain) { 1656 - memcpy(&ptr->info, ci, sizeof(*ci)); 1657 - found = 1; 1658 - break; 1659 - } 1660 - } 1661 - if (!found) { 1662 - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); 1663 - if (!ptr) { 1664 - spin_unlock_bh(&cca_info_list_lock); 1665 - return; 1666 - } 1667 - ptr->cardnr = cardnr; 1668 - ptr->domain = domain; 1669 - memcpy(&ptr->info, ci, sizeof(*ci)); 1670 - list_add(&ptr->list, &cca_info_list); 1671 - } 1672 - spin_unlock_bh(&cca_info_list_lock); 1673 - } 1674 - 1675 - static void cca_info_cache_scrub(u16 cardnr, u16 domain) 1676 - { 1677 - struct cca_info_list_entry *ptr; 1678 - 1679 - spin_lock_bh(&cca_info_list_lock); 1680 - list_for_each_entry(ptr, &cca_info_list, list) { 1681 - if (ptr->cardnr == cardnr && 1682 - ptr->domain == domain) { 1683 - list_del(&ptr->list); 1684 - kfree(ptr); 1685 - break; 1686 - } 1687 - } 1688 - spin_unlock_bh(&cca_info_list_lock); 1689 - } 1690 - 1691 - static void __exit mkvp_cache_free(void) 1692 - { 1693 - struct cca_info_list_entry *ptr, *pnext; 1694 - 1695 - spin_lock_bh(&cca_info_list_lock); 1696 - list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) { 1697 - list_del(&ptr->list); 1698 - kfree(ptr); 1699 - } 1700 - spin_unlock_bh(&cca_info_list_lock); 1701 - } 1702 - 1703 1581 /* 1704 - * Fetch cca_info values via query_crypto_facility from adapter. 1582 + * Fetch cca_info values about a CCA queue via 1583 + * query_crypto_facility from adapter. 1705 1584 */ 1706 - static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) 1585 + int cca_get_info(u16 cardnr, u16 domain, struct cca_info *ci, u32 xflags) 1707 1586 { 1587 + void *mem; 1708 1588 int rc, found = 0; 1709 1589 size_t rlen, vlen; 1710 - u8 *rarray, *varray, *pg; 1590 + u8 *rarray, *varray; 1711 1591 struct zcrypt_device_status_ext devstat; 1712 1592 1713 1593 memset(ci, 0, sizeof(*ci)); ··· 1645 1671 return rc; 1646 1672 ci->hwtype = devstat.hwtype; 1647 1673 1648 - /* prep page for rule array and var array use */ 1649 - pg = (u8 *)__get_free_page(GFP_KERNEL); 1650 - if (!pg) 1674 + /* 1675 + * Prep memory for rule array and var array use. 1676 + * Use the cprb mempool for this. 1677 + */ 1678 + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? 1679 + mempool_alloc_preallocated(cprb_mempool) : 1680 + mempool_alloc(cprb_mempool, GFP_KERNEL); 1681 + if (!mem) 1651 1682 return -ENOMEM; 1652 - rarray = pg; 1653 - varray = pg + PAGE_SIZE / 2; 1683 + rarray = (u8 *)mem; 1684 + varray = (u8 *)mem + PAGE_SIZE / 2; 1654 1685 rlen = vlen = PAGE_SIZE / 2; 1655 1686 1656 1687 /* QF for this card/domain */ 1657 1688 rc = cca_query_crypto_facility(cardnr, domain, "STATICSA", 1658 - rarray, &rlen, varray, &vlen); 1689 + rarray, &rlen, varray, &vlen, xflags); 1659 1690 if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) { 1660 1691 memcpy(ci->serial, rarray, 8); 1661 1692 ci->new_asym_mk_state = (char)rarray[4 * 8]; ··· 1687 1708 goto out; 1688 1709 rlen = vlen = PAGE_SIZE / 2; 1689 1710 rc = cca_query_crypto_facility(cardnr, domain, "STATICSB", 1690 - rarray, &rlen, varray, &vlen); 1711 + rarray, &rlen, varray, &vlen, xflags); 1691 1712 if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) { 1692 1713 ci->new_apka_mk_state = (char)rarray[10 * 8]; 1693 1714 ci->cur_apka_mk_state = (char)rarray[11 * 8]; ··· 1702 1723 } 1703 1724 1704 1725 out: 1705 - free_page((unsigned long)pg); 1726 + mempool_free(mem, cprb_mempool); 1706 1727 return found == 2 ? 0 : -ENOENT; 1707 - } 1708 - 1709 - /* 1710 - * Fetch cca information about a CCA queue. 1711 - */ 1712 - int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify) 1713 - { 1714 - int rc; 1715 - 1716 - rc = cca_info_cache_fetch(card, dom, ci); 1717 - if (rc || verify) { 1718 - rc = fetch_cca_info(card, dom, ci); 1719 - if (rc == 0) 1720 - cca_info_cache_update(card, dom, ci); 1721 - } 1722 - 1723 - return rc; 1724 1728 } 1725 1729 EXPORT_SYMBOL(cca_get_info); 1726 1730 1727 - /* 1728 - * Search for a matching crypto card based on the 1729 - * Master Key Verification Pattern given. 1730 - */ 1731 - static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain, 1732 - int verify, int minhwtype) 1733 - { 1734 - struct zcrypt_device_status_ext *device_status; 1735 - u16 card, dom; 1736 - struct cca_info ci; 1737 - int i, rc, oi = -1; 1738 - 1739 - /* mkvp must not be zero, minhwtype needs to be >= 0 */ 1740 - if (mkvp == 0 || minhwtype < 0) 1741 - return -EINVAL; 1742 - 1743 - /* fetch status of all crypto cards */ 1744 - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, 1745 - sizeof(struct zcrypt_device_status_ext), 1746 - GFP_KERNEL); 1747 - if (!device_status) 1748 - return -ENOMEM; 1749 - zcrypt_device_status_mask_ext(device_status); 1750 - 1751 - /* walk through all crypto cards */ 1752 - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { 1753 - card = AP_QID_CARD(device_status[i].qid); 1754 - dom = AP_QID_QUEUE(device_status[i].qid); 1755 - if (device_status[i].online && 1756 - device_status[i].functions & 0x04) { 1757 - /* enabled CCA card, check current mkvp from cache */ 1758 - if (cca_info_cache_fetch(card, dom, &ci) == 0 && 1759 - ci.hwtype >= minhwtype && 1760 - ci.cur_aes_mk_state == '2' && 1761 - ci.cur_aes_mkvp == mkvp) { 1762 - if (!verify) 1763 - break; 1764 - /* verify: refresh card info */ 1765 - if (fetch_cca_info(card, dom, &ci) == 0) { 1766 - cca_info_cache_update(card, dom, &ci); 1767 - if (ci.hwtype >= minhwtype && 1768 - ci.cur_aes_mk_state == '2' && 1769 - ci.cur_aes_mkvp == mkvp) 1770 - break; 1771 - } 1772 - } 1773 - } else { 1774 - /* Card is offline and/or not a CCA card. */ 1775 - /* del mkvp entry from cache if it exists */ 1776 - cca_info_cache_scrub(card, dom); 1777 - } 1778 - } 1779 - if (i >= MAX_ZDEV_ENTRIES_EXT) { 1780 - /* nothing found, so this time without cache */ 1781 - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { 1782 - if (!(device_status[i].online && 1783 - device_status[i].functions & 0x04)) 1784 - continue; 1785 - card = AP_QID_CARD(device_status[i].qid); 1786 - dom = AP_QID_QUEUE(device_status[i].qid); 1787 - /* fresh fetch mkvp from adapter */ 1788 - if (fetch_cca_info(card, dom, &ci) == 0) { 1789 - cca_info_cache_update(card, dom, &ci); 1790 - if (ci.hwtype >= minhwtype && 1791 - ci.cur_aes_mk_state == '2' && 1792 - ci.cur_aes_mkvp == mkvp) 1793 - break; 1794 - if (ci.hwtype >= minhwtype && 1795 - ci.old_aes_mk_state == '2' && 1796 - ci.old_aes_mkvp == mkvp && 1797 - oi < 0) 1798 - oi = i; 1799 - } 1800 - } 1801 - if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) { 1802 - /* old mkvp matched, use this card then */ 1803 - card = AP_QID_CARD(device_status[oi].qid); 1804 - dom = AP_QID_QUEUE(device_status[oi].qid); 1805 - } 1806 - } 1807 - if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) { 1808 - if (pcardnr) 1809 - *pcardnr = card; 1810 - if (pdomain) 1811 - *pdomain = dom; 1812 - rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1); 1813 - } else { 1814 - rc = -ENODEV; 1815 - } 1816 - 1817 - kvfree(device_status); 1818 - return rc; 1819 - } 1820 - 1821 - /* 1822 - * Search for a matching crypto card based on the Master Key 1823 - * Verification Pattern provided inside a secure key token. 1824 - */ 1825 - int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify) 1826 - { 1827 - u64 mkvp; 1828 - int minhwtype = 0; 1829 - const struct keytoken_header *hdr = (struct keytoken_header *)key; 1830 - 1831 - if (hdr->type != TOKTYPE_CCA_INTERNAL) 1832 - return -EINVAL; 1833 - 1834 - switch (hdr->version) { 1835 - case TOKVER_CCA_AES: 1836 - mkvp = ((struct secaeskeytoken *)key)->mkvp; 1837 - break; 1838 - case TOKVER_CCA_VLSC: 1839 - mkvp = ((struct cipherkeytoken *)key)->mkvp0; 1840 - minhwtype = AP_DEVICE_TYPE_CEX6; 1841 - break; 1842 - default: 1843 - return -EINVAL; 1844 - } 1845 - 1846 - return findcard(mkvp, pcardnr, pdomain, verify, minhwtype); 1847 - } 1848 - EXPORT_SYMBOL(cca_findcard); 1849 - 1850 - int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, 1731 + int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, 1851 1732 int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, 1852 - int verify) 1733 + u32 xflags) 1853 1734 { 1854 1735 struct zcrypt_device_status_ext *device_status; 1855 - u32 *_apqns = NULL, _nr_apqns = 0; 1856 - int i, card, dom, curmatch, oldmatch, rc = 0; 1736 + int i, card, dom, curmatch, oldmatch; 1857 1737 struct cca_info ci; 1738 + u32 _nr_apqns = 0; 1858 1739 1859 - /* fetch status of all crypto cards */ 1860 - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, 1861 - sizeof(struct zcrypt_device_status_ext), 1862 - GFP_KERNEL); 1863 - if (!device_status) 1864 - return -ENOMEM; 1865 - zcrypt_device_status_mask_ext(device_status); 1740 + /* occupy the device status memory */ 1741 + mutex_lock(&dev_status_mem_mutex); 1742 + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); 1743 + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; 1866 1744 1867 - /* allocate 1k space for up to 256 apqns */ 1868 - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); 1869 - if (!_apqns) { 1870 - kvfree(device_status); 1871 - return -ENOMEM; 1872 - } 1745 + /* fetch crypto device status into this struct */ 1746 + zcrypt_device_status_mask_ext(device_status, 1747 + ZCRYPT_DEV_STATUS_CARD_MAX, 1748 + ZCRYPT_DEV_STATUS_QUEUE_MAX); 1873 1749 1874 1750 /* walk through all the crypto apqnss */ 1875 - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { 1751 + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { 1876 1752 card = AP_QID_CARD(device_status[i].qid); 1877 1753 dom = AP_QID_QUEUE(device_status[i].qid); 1878 1754 /* check online state */ ··· 1743 1909 if (domain != 0xFFFF && dom != domain) 1744 1910 continue; 1745 1911 /* get cca info on this apqn */ 1746 - if (cca_get_info(card, dom, &ci, verify)) 1912 + if (cca_get_info(card, dom, &ci, xflags)) 1747 1913 continue; 1748 1914 /* current master key needs to be valid */ 1749 1915 if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2') ··· 1773 1939 continue; 1774 1940 } 1775 1941 /* apqn passed all filtering criterons, add to the array */ 1776 - if (_nr_apqns < 256) 1777 - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); 1942 + if (_nr_apqns < *nr_apqns) 1943 + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); 1778 1944 } 1779 1945 1780 - /* nothing found ? */ 1781 - if (!_nr_apqns) { 1782 - kfree(_apqns); 1783 - rc = -ENODEV; 1784 - } else { 1785 - /* no re-allocation, simple return the _apqns array */ 1786 - *apqns = _apqns; 1787 - *nr_apqns = _nr_apqns; 1788 - rc = 0; 1789 - } 1946 + *nr_apqns = _nr_apqns; 1790 1947 1791 - kvfree(device_status); 1792 - return rc; 1948 + /* release the device status memory */ 1949 + mutex_unlock(&dev_status_mem_mutex); 1950 + 1951 + return _nr_apqns ? 0 : -ENODEV; 1793 1952 } 1794 1953 EXPORT_SYMBOL(cca_findcard2); 1795 1954 1796 - void __exit zcrypt_ccamisc_exit(void) 1955 + int __init zcrypt_ccamisc_init(void) 1797 1956 { 1798 - mkvp_cache_free(); 1957 + /* Pre-allocate a small memory pool for cca cprbs. */ 1958 + cprb_mempool = mempool_create_kmalloc_pool(zcrypt_mempool_threshold, 1959 + CPRB_MEMPOOL_ITEM_SIZE); 1960 + if (!cprb_mempool) 1961 + return -ENOMEM; 1962 + 1963 + /* Pre-allocate one crypto status card struct used in findcard() */ 1964 + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); 1965 + if (!dev_status_mem) { 1966 + mempool_destroy(cprb_mempool); 1967 + return -ENOMEM; 1968 + } 1969 + 1970 + return 0; 1971 + } 1972 + 1973 + void zcrypt_ccamisc_exit(void) 1974 + { 1975 + mutex_lock(&dev_status_mem_mutex); 1976 + kvfree(dev_status_mem); 1977 + mutex_unlock(&dev_status_mem_mutex); 1978 + mempool_destroy(cprb_mempool); 1799 1979 }
+20 -29
drivers/s390/crypto/zcrypt_ccamisc.h
··· 160 160 /* 161 161 * Generate (random) CCA AES DATA secure key. 162 162 */ 163 - int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey); 163 + int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey, 164 + u32 xflags); 164 165 165 166 /* 166 167 * Generate CCA AES DATA secure key with given clear key value. 167 168 */ 168 169 int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, 169 - const u8 *clrkey, u8 *seckey); 170 + const u8 *clrkey, u8 *seckey, u32 xflags); 170 171 171 172 /* 172 173 * Derive proteced key from an CCA AES DATA secure key. 173 174 */ 174 175 int cca_sec2protkey(u16 cardnr, u16 domain, 175 176 const u8 *seckey, u8 *protkey, u32 *protkeylen, 176 - u32 *protkeytype); 177 + u32 *protkeytype, u32 xflags); 177 178 178 179 /* 179 180 * Generate (random) CCA AES CIPHER secure key. 180 181 */ 181 182 int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, 182 - u8 *keybuf, u32 *keybufsize); 183 + u8 *keybuf, u32 *keybufsize, u32 xflags); 183 184 184 185 /* 185 186 * Derive proteced key from CCA AES cipher secure key. 186 187 */ 187 188 int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, 188 - u8 *protkey, u32 *protkeylen, u32 *protkeytype); 189 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 190 + u32 xflags); 189 191 190 192 /* 191 193 * Build CCA AES CIPHER secure key with a given clear key value. 192 194 */ 193 195 int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, 194 - const u8 *clrkey, u8 *keybuf, u32 *keybufsize); 196 + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, 197 + u32 xflags); 195 198 196 199 /* 197 200 * Derive proteced key from CCA ECC secure private key. 198 201 */ 199 202 int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, 200 - u8 *protkey, u32 *protkeylen, u32 *protkeytype); 203 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags); 201 204 202 205 /* 203 206 * Query cryptographic facility from CCA adapter ··· 208 205 int cca_query_crypto_facility(u16 cardnr, u16 domain, 209 206 const char *keyword, 210 207 u8 *rarray, size_t *rarraylen, 211 - u8 *varray, size_t *varraylen); 212 - 213 - /* 214 - * Search for a matching crypto card based on the Master Key 215 - * Verification Pattern provided inside a secure key. 216 - * Works with CCA AES data and cipher keys. 217 - * Returns < 0 on failure, 0 if CURRENT MKVP matches and 218 - * 1 if OLD MKVP matches. 219 - */ 220 - int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); 208 + u8 *varray, size_t *varraylen, 209 + u32 xflags); 221 210 222 211 /* 223 212 * Build a list of cca apqns meeting the following constrains: ··· 219 224 * - if minhwtype > 0 only apqns with hwtype >= minhwtype 220 225 * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp 221 226 * - if old_mkvp != 0 only apqns where old_mkvp == mkvp 222 - * - if verify is enabled and a cur_mkvp and/or old_mkvp 223 - * value is given, then refetch the cca_info and make sure the current 224 - * cur_mkvp or old_mkvp values of the apqn are used. 225 227 * The mktype determines which set of master keys to use: 226 228 * 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set 227 - * The array of apqn entries is allocated with kmalloc and returned in *apqns; 228 - * the number of apqns stored into the list is returned in *nr_apqns. One apqn 229 - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and 230 - * may be casted to struct pkey_apqn. The return value is either 0 for success 231 - * or a negative errno value. If no apqn meeting the criteria is found, 232 - * -ENODEV is returned. 229 + * The caller should set *nr_apqns to the nr of elements available in *apqns. 230 + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. 231 + * The return value is either 0 for success or a negative errno value. 232 + * If no apqn meeting the criteria is found, -ENODEV is returned. 233 233 */ 234 - int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, 234 + int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, 235 235 int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, 236 - int verify); 236 + u32 xflags); 237 237 238 238 #define AES_MK_SET 0 239 239 #define APKA_MK_SET 1 ··· 260 270 /* 261 271 * Fetch cca information about an CCA queue. 262 272 */ 263 - int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify); 273 + int cca_get_info(u16 card, u16 dom, struct cca_info *ci, u32 xflags); 264 274 275 + int zcrypt_ccamisc_init(void); 265 276 void zcrypt_ccamisc_exit(void); 266 277 267 278 #endif /* _ZCRYPT_CCAMISC_H_ */
+18 -23
drivers/s390/crypto/zcrypt_cex4.c
··· 79 79 struct device_attribute *attr, 80 80 char *buf) 81 81 { 82 - struct zcrypt_card *zc = dev_get_drvdata(dev); 83 - struct cca_info ci; 84 82 struct ap_card *ac = to_ap_card(dev); 83 + struct cca_info ci; 85 84 86 85 memset(&ci, 0, sizeof(ci)); 87 86 88 87 if (ap_domain_index >= 0) 89 - cca_get_info(ac->id, ap_domain_index, &ci, zc->online); 88 + cca_get_info(ac->id, ap_domain_index, &ci, 0); 90 89 91 90 return sysfs_emit(buf, "%s\n", ci.serial); 92 91 } ··· 109 110 struct device_attribute *attr, 110 111 char *buf) 111 112 { 112 - struct zcrypt_queue *zq = dev_get_drvdata(dev); 113 - int n = 0; 114 - struct cca_info ci; 115 - static const char * const cao_state[] = { "invalid", "valid" }; 116 113 static const char * const new_state[] = { "empty", "partial", "full" }; 114 + static const char * const cao_state[] = { "invalid", "valid" }; 115 + struct zcrypt_queue *zq = dev_get_drvdata(dev); 116 + struct cca_info ci; 117 + int n = 0; 117 118 118 119 memset(&ci, 0, sizeof(ci)); 119 120 120 121 cca_get_info(AP_QID_CARD(zq->queue->qid), 121 122 AP_QID_QUEUE(zq->queue->qid), 122 - &ci, zq->online); 123 + &ci, 0); 123 124 124 125 if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') 125 126 n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n", ··· 209 210 struct device_attribute *attr, 210 211 char *buf) 211 212 { 212 - struct zcrypt_card *zc = dev_get_drvdata(dev); 213 - struct ep11_card_info ci; 214 213 struct ap_card *ac = to_ap_card(dev); 214 + struct ep11_card_info ci; 215 215 216 216 memset(&ci, 0, sizeof(ci)); 217 217 218 - ep11_get_card_info(ac->id, &ci, zc->online); 218 + ep11_get_card_info(ac->id, &ci, 0); 219 219 220 220 if (ci.API_ord_nr > 0) 221 221 return sysfs_emit(buf, "%u\n", ci.API_ord_nr); ··· 229 231 struct device_attribute *attr, 230 232 char *buf) 231 233 { 232 - struct zcrypt_card *zc = dev_get_drvdata(dev); 233 - struct ep11_card_info ci; 234 234 struct ap_card *ac = to_ap_card(dev); 235 + struct ep11_card_info ci; 235 236 236 237 memset(&ci, 0, sizeof(ci)); 237 238 238 - ep11_get_card_info(ac->id, &ci, zc->online); 239 + ep11_get_card_info(ac->id, &ci, 0); 239 240 240 241 if (ci.FW_version > 0) 241 242 return sysfs_emit(buf, "%d.%d\n", ··· 251 254 struct device_attribute *attr, 252 255 char *buf) 253 256 { 254 - struct zcrypt_card *zc = dev_get_drvdata(dev); 255 - struct ep11_card_info ci; 256 257 struct ap_card *ac = to_ap_card(dev); 258 + struct ep11_card_info ci; 257 259 258 260 memset(&ci, 0, sizeof(ci)); 259 261 260 - ep11_get_card_info(ac->id, &ci, zc->online); 262 + ep11_get_card_info(ac->id, &ci, 0); 261 263 262 264 if (ci.serial[0]) 263 265 return sysfs_emit(buf, "%16.16s\n", ci.serial); ··· 287 291 struct device_attribute *attr, 288 292 char *buf) 289 293 { 290 - struct zcrypt_card *zc = dev_get_drvdata(dev); 291 - int i, n = 0; 292 - struct ep11_card_info ci; 293 294 struct ap_card *ac = to_ap_card(dev); 295 + struct ep11_card_info ci; 296 + int i, n = 0; 294 297 295 298 memset(&ci, 0, sizeof(ci)); 296 299 297 - ep11_get_card_info(ac->id, &ci, zc->online); 300 + ep11_get_card_info(ac->id, &ci, 0); 298 301 299 302 for (i = 0; ep11_op_modes[i].mode_txt; i++) { 300 303 if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) { ··· 343 348 if (zq->online) 344 349 ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), 345 350 AP_QID_QUEUE(zq->queue->qid), 346 - &di); 351 + &di, 0); 347 352 348 353 if (di.cur_wk_state == '0') { 349 354 n = sysfs_emit(buf, "WK CUR: %s -\n", ··· 390 395 if (zq->online) 391 396 ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), 392 397 AP_QID_QUEUE(zq->queue->qid), 393 - &di); 398 + &di, 0); 394 399 395 400 for (i = 0; ep11_op_modes[i].mode_txt; i++) { 396 401 if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
+212 -242
drivers/s390/crypto/zcrypt_ep11misc.c
··· 10 10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 11 12 12 #include <linux/init.h> 13 + #include <linux/mempool.h> 13 14 #include <linux/module.h> 14 - #include <linux/slab.h> 15 15 #include <linux/random.h> 16 + #include <linux/slab.h> 16 17 #include <asm/zcrypt.h> 17 18 #include <asm/pkey.h> 18 19 #include <crypto/aes.h> ··· 31 30 static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 32 31 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; 33 32 34 - /* ep11 card info cache */ 35 - struct card_list_entry { 36 - struct list_head list; 37 - u16 cardnr; 38 - struct ep11_card_info info; 39 - }; 40 - static LIST_HEAD(card_list); 41 - static DEFINE_SPINLOCK(card_list_lock); 33 + /* 34 + * Cprb memory pool held for urgent cases where no memory 35 + * can be allocated via kmalloc. This pool is only used when 36 + * alloc_cprbmem() is called with the xflag ZCRYPT_XFLAG_NOMEMALLOC. 37 + */ 38 + #define CPRB_MEMPOOL_ITEM_SIZE (8 * 1024) 39 + static mempool_t *cprb_mempool; 42 40 43 - static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci) 44 - { 45 - int rc = -ENOENT; 46 - struct card_list_entry *ptr; 47 - 48 - spin_lock_bh(&card_list_lock); 49 - list_for_each_entry(ptr, &card_list, list) { 50 - if (ptr->cardnr == cardnr) { 51 - memcpy(ci, &ptr->info, sizeof(*ci)); 52 - rc = 0; 53 - break; 54 - } 55 - } 56 - spin_unlock_bh(&card_list_lock); 57 - 58 - return rc; 59 - } 60 - 61 - static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci) 62 - { 63 - int found = 0; 64 - struct card_list_entry *ptr; 65 - 66 - spin_lock_bh(&card_list_lock); 67 - list_for_each_entry(ptr, &card_list, list) { 68 - if (ptr->cardnr == cardnr) { 69 - memcpy(&ptr->info, ci, sizeof(*ci)); 70 - found = 1; 71 - break; 72 - } 73 - } 74 - if (!found) { 75 - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); 76 - if (!ptr) { 77 - spin_unlock_bh(&card_list_lock); 78 - return; 79 - } 80 - ptr->cardnr = cardnr; 81 - memcpy(&ptr->info, ci, sizeof(*ci)); 82 - list_add(&ptr->list, &card_list); 83 - } 84 - spin_unlock_bh(&card_list_lock); 85 - } 86 - 87 - static void card_cache_scrub(u16 cardnr) 88 - { 89 - struct card_list_entry *ptr; 90 - 91 - spin_lock_bh(&card_list_lock); 92 - list_for_each_entry(ptr, &card_list, list) { 93 - if (ptr->cardnr == cardnr) { 94 - list_del(&ptr->list); 95 - kfree(ptr); 96 - break; 97 - } 98 - } 99 - spin_unlock_bh(&card_list_lock); 100 - } 101 - 102 - static void __exit card_cache_free(void) 103 - { 104 - struct card_list_entry *ptr, *pnext; 105 - 106 - spin_lock_bh(&card_list_lock); 107 - list_for_each_entry_safe(ptr, pnext, &card_list, list) { 108 - list_del(&ptr->list); 109 - kfree(ptr); 110 - } 111 - spin_unlock_bh(&card_list_lock); 112 - } 41 + /* 42 + * This is a pre-allocated memory for the device status array 43 + * used within the ep11_findcard2() function. It is currently 44 + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is 45 + * controlled via dev_status_mem_mutex. Needs adaption if more 46 + * than 128 cards or domains to be are supported. 47 + */ 48 + #define ZCRYPT_DEV_STATUS_CARD_MAX 128 49 + #define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 50 + #define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ 51 + ZCRYPT_DEV_STATUS_QUEUE_MAX) 52 + #define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ 53 + sizeof(struct zcrypt_device_status_ext)) 54 + static void *dev_status_mem; 55 + static DEFINE_MUTEX(dev_status_mem_mutex); 113 56 114 57 static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver, 115 58 struct ep11kblob_header **kbhdr, size_t *kbhdrsize, ··· 356 411 /* 357 412 * Allocate and prepare ep11 cprb plus additional payload. 358 413 */ 359 - static inline struct ep11_cprb *alloc_cprb(size_t payload_len) 414 + static void *alloc_cprbmem(size_t payload_len, u32 xflags) 360 415 { 361 416 size_t len = sizeof(struct ep11_cprb) + payload_len; 362 - struct ep11_cprb *cprb; 417 + struct ep11_cprb *cprb = NULL; 363 418 364 - cprb = kzalloc(len, GFP_KERNEL); 419 + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { 420 + if (len <= CPRB_MEMPOOL_ITEM_SIZE) 421 + cprb = mempool_alloc_preallocated(cprb_mempool); 422 + } else { 423 + cprb = kmalloc(len, GFP_KERNEL); 424 + } 365 425 if (!cprb) 366 426 return NULL; 427 + memset(cprb, 0, len); 367 428 368 429 cprb->cprb_len = sizeof(struct ep11_cprb); 369 430 cprb->cprb_ver_id = 0x04; ··· 378 427 cprb->payload_len = payload_len; 379 428 380 429 return cprb; 430 + } 431 + 432 + /* 433 + * Free ep11 cprb buffer space. 434 + */ 435 + static void free_cprbmem(void *mem, size_t payload_len, bool scrub, u32 xflags) 436 + { 437 + if (mem && scrub) 438 + memzero_explicit(mem, sizeof(struct ep11_cprb) + payload_len); 439 + 440 + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) 441 + mempool_free(mem, cprb_mempool); 442 + else 443 + kfree(mem); 381 444 } 382 445 383 446 /* ··· 454 489 struct ep11_cprb *req, size_t req_len, 455 490 struct ep11_cprb *rep, size_t rep_len) 456 491 { 492 + memset(u, 0, sizeof(*u)); 457 493 u->targets = (u8 __user *)t; 458 494 u->targets_num = nt; 459 495 u->req = (u8 __user *)req; ··· 549 583 * Helper function which does an ep11 query with given query type. 550 584 */ 551 585 static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, 552 - size_t buflen, u8 *buf) 586 + size_t buflen, u8 *buf, u32 xflags) 553 587 { 554 588 struct ep11_info_req_pl { 555 589 struct pl_head head; ··· 571 605 } __packed * rep_pl; 572 606 struct ep11_cprb *req = NULL, *rep = NULL; 573 607 struct ep11_target_dev target; 574 - struct ep11_urb *urb = NULL; 608 + struct ep11_urb urb; 575 609 int api = EP11_API_V1, rc = -ENOMEM; 576 610 577 611 /* request cprb and payload */ 578 - req = alloc_cprb(sizeof(struct ep11_info_req_pl)); 612 + req = alloc_cprbmem(sizeof(struct ep11_info_req_pl), xflags); 579 613 if (!req) 580 614 goto out; 581 615 req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req)); ··· 587 621 req_pl->query_subtype_len = sizeof(u32); 588 622 589 623 /* reply cprb and payload */ 590 - rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen); 624 + rep = alloc_cprbmem(sizeof(struct ep11_info_rep_pl) + buflen, xflags); 591 625 if (!rep) 592 626 goto out; 593 627 rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 594 628 595 629 /* urb and target */ 596 - urb = kmalloc(sizeof(*urb), GFP_KERNEL); 597 - if (!urb) 598 - goto out; 599 630 target.ap_id = cardnr; 600 631 target.dom_id = domain; 601 - prep_urb(urb, &target, 1, 632 + prep_urb(&urb, &target, 1, 602 633 req, sizeof(*req) + sizeof(*req_pl), 603 634 rep, sizeof(*rep) + sizeof(*rep_pl) + buflen); 604 635 605 - rc = zcrypt_send_ep11_cprb(urb); 636 + rc = zcrypt_send_ep11_cprb(&urb, xflags); 606 637 if (rc) { 607 638 ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 608 639 __func__, (int)cardnr, (int)domain, rc); ··· 630 667 memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len); 631 668 632 669 out: 633 - kfree(req); 634 - kfree(rep); 635 - kfree(urb); 670 + free_cprbmem(req, 0, false, xflags); 671 + free_cprbmem(rep, 0, false, xflags); 636 672 return rc; 637 673 } 638 674 639 675 /* 640 676 * Provide information about an EP11 card. 641 677 */ 642 - int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) 678 + int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags) 643 679 { 644 680 int rc; 645 681 struct ep11_module_query_info { ··· 668 706 u32 max_CP_index; 669 707 } __packed * pmqi = NULL; 670 708 671 - rc = card_cache_fetch(card, info); 672 - if (rc || verify) { 673 - pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL); 674 - if (!pmqi) 675 - return -ENOMEM; 676 - rc = ep11_query_info(card, AUTOSEL_DOM, 677 - 0x01 /* module info query */, 678 - sizeof(*pmqi), (u8 *)pmqi); 679 - if (rc) { 680 - if (rc == -ENODEV) 681 - card_cache_scrub(card); 682 - goto out; 683 - } 684 - memset(info, 0, sizeof(*info)); 685 - info->API_ord_nr = pmqi->API_ord_nr; 686 - info->FW_version = 687 - (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; 688 - memcpy(info->serial, pmqi->serial, sizeof(info->serial)); 689 - info->op_mode = pmqi->op_mode; 690 - card_cache_update(card, info); 691 - } 709 + /* use the cprb mempool to satisfy this short term mem alloc */ 710 + pmqi = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? 711 + mempool_alloc_preallocated(cprb_mempool) : 712 + mempool_alloc(cprb_mempool, GFP_KERNEL); 713 + if (!pmqi) 714 + return -ENOMEM; 715 + rc = ep11_query_info(card, AUTOSEL_DOM, 716 + 0x01 /* module info query */, 717 + sizeof(*pmqi), (u8 *)pmqi, xflags); 718 + if (rc) 719 + goto out; 720 + 721 + memset(info, 0, sizeof(*info)); 722 + info->API_ord_nr = pmqi->API_ord_nr; 723 + info->FW_version = (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; 724 + memcpy(info->serial, pmqi->serial, sizeof(info->serial)); 725 + info->op_mode = pmqi->op_mode; 692 726 693 727 out: 694 - kfree(pmqi); 728 + mempool_free(pmqi, cprb_mempool); 695 729 return rc; 696 730 } 697 731 EXPORT_SYMBOL(ep11_get_card_info); ··· 695 737 /* 696 738 * Provide information about a domain within an EP11 card. 697 739 */ 698 - int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) 740 + int ep11_get_domain_info(u16 card, u16 domain, 741 + struct ep11_domain_info *info, u32 xflags) 699 742 { 700 743 int rc; 701 744 struct ep11_domain_query_info { ··· 705 746 u8 new_WK_VP[32]; 706 747 u32 dom_flags; 707 748 u64 op_mode; 708 - } __packed * p_dom_info; 709 - 710 - p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL); 711 - if (!p_dom_info) 712 - return -ENOMEM; 749 + } __packed dom_query_info; 713 750 714 751 rc = ep11_query_info(card, domain, 0x03 /* domain info query */, 715 - sizeof(*p_dom_info), (u8 *)p_dom_info); 752 + sizeof(dom_query_info), (u8 *)&dom_query_info, 753 + xflags); 716 754 if (rc) 717 755 goto out; 718 756 719 757 memset(info, 0, sizeof(*info)); 720 758 info->cur_wk_state = '0'; 721 759 info->new_wk_state = '0'; 722 - if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) { 723 - if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) { 760 + if (dom_query_info.dom_flags & 0x10 /* left imprint mode */) { 761 + if (dom_query_info.dom_flags & 0x02 /* cur wk valid */) { 724 762 info->cur_wk_state = '1'; 725 - memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32); 763 + memcpy(info->cur_wkvp, dom_query_info.cur_WK_VP, 32); 726 764 } 727 - if (p_dom_info->dom_flags & 0x04 || /* new wk present */ 728 - p_dom_info->dom_flags & 0x08 /* new wk committed */) { 765 + if (dom_query_info.dom_flags & 0x04 || /* new wk present */ 766 + dom_query_info.dom_flags & 0x08 /* new wk committed */) { 729 767 info->new_wk_state = 730 - p_dom_info->dom_flags & 0x08 ? '2' : '1'; 731 - memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32); 768 + dom_query_info.dom_flags & 0x08 ? '2' : '1'; 769 + memcpy(info->new_wkvp, dom_query_info.new_WK_VP, 32); 732 770 } 733 771 } 734 - info->op_mode = p_dom_info->op_mode; 772 + info->op_mode = dom_query_info.op_mode; 735 773 736 774 out: 737 - kfree(p_dom_info); 738 775 return rc; 739 776 } 740 777 EXPORT_SYMBOL(ep11_get_domain_info); ··· 743 788 744 789 static int _ep11_genaeskey(u16 card, u16 domain, 745 790 u32 keybitsize, u32 keygenflags, 746 - u8 *keybuf, size_t *keybufsize) 791 + u8 *keybuf, size_t *keybufsize, u32 xflags) 747 792 { 748 793 struct keygen_req_pl { 749 794 struct pl_head head; ··· 778 823 struct ep11_cprb *req = NULL, *rep = NULL; 779 824 size_t req_pl_size, pinblob_size = 0; 780 825 struct ep11_target_dev target; 781 - struct ep11_urb *urb = NULL; 826 + struct ep11_urb urb; 782 827 int api, rc = -ENOMEM; 783 828 u8 *p; 784 829 ··· 806 851 pinblob_size = EP11_PINBLOB_V1_BYTES; 807 852 } 808 853 req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size); 809 - req = alloc_cprb(req_pl_size); 854 + req = alloc_cprbmem(req_pl_size, xflags); 810 855 if (!req) 811 856 goto out; 812 857 req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req)); ··· 832 877 *p++ = pinblob_size; 833 878 834 879 /* reply cprb and payload */ 835 - rep = alloc_cprb(sizeof(struct keygen_rep_pl)); 880 + rep = alloc_cprbmem(sizeof(struct keygen_rep_pl), xflags); 836 881 if (!rep) 837 882 goto out; 838 883 rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 839 884 840 885 /* urb and target */ 841 - urb = kmalloc(sizeof(*urb), GFP_KERNEL); 842 - if (!urb) 843 - goto out; 844 886 target.ap_id = card; 845 887 target.dom_id = domain; 846 - prep_urb(urb, &target, 1, 888 + prep_urb(&urb, &target, 1, 847 889 req, sizeof(*req) + req_pl_size, 848 890 rep, sizeof(*rep) + sizeof(*rep_pl)); 849 891 850 - rc = zcrypt_send_ep11_cprb(urb); 892 + rc = zcrypt_send_ep11_cprb(&urb, xflags); 851 893 if (rc) { 852 894 ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 853 895 __func__, (int)card, (int)domain, rc); ··· 877 925 *keybufsize = rep_pl->data_len; 878 926 879 927 out: 880 - kfree(req); 881 - kfree(rep); 882 - kfree(urb); 928 + free_cprbmem(req, 0, false, xflags); 929 + free_cprbmem(rep, sizeof(struct keygen_rep_pl), true, xflags); 883 930 return rc; 884 931 } 885 932 886 933 int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, 887 - u8 *keybuf, u32 *keybufsize, u32 keybufver) 934 + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags) 888 935 { 889 936 struct ep11kblob_header *hdr; 890 937 size_t hdr_size, pl_size; ··· 904 953 return rc; 905 954 906 955 rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags, 907 - pl, &pl_size); 956 + pl, &pl_size, xflags); 908 957 if (rc) 909 958 return rc; 910 959 ··· 924 973 u16 mode, u32 mech, const u8 *iv, 925 974 const u8 *key, size_t keysize, 926 975 const u8 *inbuf, size_t inbufsize, 927 - u8 *outbuf, size_t *outbufsize) 976 + u8 *outbuf, size_t *outbufsize, 977 + u32 xflags) 928 978 { 929 979 struct crypt_req_pl { 930 980 struct pl_head head; ··· 952 1000 } __packed * rep_pl; 953 1001 struct ep11_cprb *req = NULL, *rep = NULL; 954 1002 struct ep11_target_dev target; 955 - struct ep11_urb *urb = NULL; 956 - size_t req_pl_size, rep_pl_size; 1003 + struct ep11_urb urb; 1004 + size_t req_pl_size, rep_pl_size = 0; 957 1005 int n, api = EP11_API_V1, rc = -ENOMEM; 958 1006 u8 *p; 959 1007 ··· 964 1012 /* request cprb and payload */ 965 1013 req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0) 966 1014 + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize); 967 - req = alloc_cprb(req_pl_size); 1015 + req = alloc_cprbmem(req_pl_size, xflags); 968 1016 if (!req) 969 1017 goto out; 970 1018 req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req)); ··· 986 1034 987 1035 /* reply cprb and payload, assume out data size <= in data size + 32 */ 988 1036 rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32); 989 - rep = alloc_cprb(rep_pl_size); 1037 + rep = alloc_cprbmem(rep_pl_size, xflags); 990 1038 if (!rep) 991 1039 goto out; 992 1040 rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 993 1041 994 1042 /* urb and target */ 995 - urb = kmalloc(sizeof(*urb), GFP_KERNEL); 996 - if (!urb) 997 - goto out; 998 1043 target.ap_id = card; 999 1044 target.dom_id = domain; 1000 - prep_urb(urb, &target, 1, 1045 + prep_urb(&urb, &target, 1, 1001 1046 req, sizeof(*req) + req_pl_size, 1002 1047 rep, sizeof(*rep) + rep_pl_size); 1003 1048 1004 - rc = zcrypt_send_ep11_cprb(urb); 1049 + rc = zcrypt_send_ep11_cprb(&urb, xflags); 1005 1050 if (rc) { 1006 1051 ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 1007 1052 __func__, (int)card, (int)domain, rc); ··· 1044 1095 *outbufsize = n; 1045 1096 1046 1097 out: 1047 - kfree(req); 1048 - kfree(rep); 1049 - kfree(urb); 1098 + free_cprbmem(req, req_pl_size, true, xflags); 1099 + free_cprbmem(rep, rep_pl_size, true, xflags); 1050 1100 return rc; 1051 1101 } 1052 1102 ··· 1054 1106 const u8 *enckey, size_t enckeysize, 1055 1107 u32 mech, const u8 *iv, 1056 1108 u32 keybitsize, u32 keygenflags, 1057 - u8 *keybuf, size_t *keybufsize) 1109 + u8 *keybuf, size_t *keybufsize, u32 xflags) 1058 1110 { 1059 1111 struct uw_req_pl { 1060 1112 struct pl_head head; ··· 1091 1143 struct ep11_cprb *req = NULL, *rep = NULL; 1092 1144 size_t req_pl_size, pinblob_size = 0; 1093 1145 struct ep11_target_dev target; 1094 - struct ep11_urb *urb = NULL; 1146 + struct ep11_urb urb; 1095 1147 int api, rc = -ENOMEM; 1096 1148 u8 *p; 1097 1149 ··· 1109 1161 req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0) 1110 1162 + ASN1TAGLEN(keksize) + ASN1TAGLEN(0) 1111 1163 + ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize); 1112 - req = alloc_cprb(req_pl_size); 1164 + req = alloc_cprbmem(req_pl_size, xflags); 1113 1165 if (!req) 1114 1166 goto out; 1115 1167 req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req)); ··· 1145 1197 p += asn1tag_write(p, 0x04, enckey, enckeysize); 1146 1198 1147 1199 /* reply cprb and payload */ 1148 - rep = alloc_cprb(sizeof(struct uw_rep_pl)); 1200 + rep = alloc_cprbmem(sizeof(struct uw_rep_pl), xflags); 1149 1201 if (!rep) 1150 1202 goto out; 1151 1203 rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 1152 1204 1153 1205 /* urb and target */ 1154 - urb = kmalloc(sizeof(*urb), GFP_KERNEL); 1155 - if (!urb) 1156 - goto out; 1157 1206 target.ap_id = card; 1158 1207 target.dom_id = domain; 1159 - prep_urb(urb, &target, 1, 1208 + prep_urb(&urb, &target, 1, 1160 1209 req, sizeof(*req) + req_pl_size, 1161 1210 rep, sizeof(*rep) + sizeof(*rep_pl)); 1162 1211 1163 - rc = zcrypt_send_ep11_cprb(urb); 1212 + rc = zcrypt_send_ep11_cprb(&urb, xflags); 1164 1213 if (rc) { 1165 1214 ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 1166 1215 __func__, (int)card, (int)domain, rc); ··· 1190 1245 *keybufsize = rep_pl->data_len; 1191 1246 1192 1247 out: 1193 - kfree(req); 1194 - kfree(rep); 1195 - kfree(urb); 1248 + free_cprbmem(req, req_pl_size, true, xflags); 1249 + free_cprbmem(rep, sizeof(struct uw_rep_pl), true, xflags); 1196 1250 return rc; 1197 1251 } 1198 1252 ··· 1201 1257 u32 mech, const u8 *iv, 1202 1258 u32 keybitsize, u32 keygenflags, 1203 1259 u8 *keybuf, u32 *keybufsize, 1204 - u8 keybufver) 1260 + u8 keybufver, u32 xflags) 1205 1261 { 1206 1262 struct ep11kblob_header *hdr; 1207 1263 size_t hdr_size, pl_size; ··· 1215 1271 1216 1272 rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize, 1217 1273 mech, iv, keybitsize, keygenflags, 1218 - pl, &pl_size); 1274 + pl, &pl_size, xflags); 1219 1275 if (rc) 1220 1276 return rc; 1221 1277 ··· 1234 1290 static int _ep11_wrapkey(u16 card, u16 domain, 1235 1291 const u8 *key, size_t keysize, 1236 1292 u32 mech, const u8 *iv, 1237 - u8 *databuf, size_t *datasize) 1293 + u8 *databuf, size_t *datasize, u32 xflags) 1238 1294 { 1239 1295 struct wk_req_pl { 1240 1296 struct pl_head head; ··· 1263 1319 } __packed * rep_pl; 1264 1320 struct ep11_cprb *req = NULL, *rep = NULL; 1265 1321 struct ep11_target_dev target; 1266 - struct ep11_urb *urb = NULL; 1322 + struct ep11_urb urb; 1267 1323 size_t req_pl_size; 1268 1324 int api, rc = -ENOMEM; 1269 1325 u8 *p; ··· 1271 1327 /* request cprb and payload */ 1272 1328 req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0) 1273 1329 + ASN1TAGLEN(keysize) + 4; 1274 - req = alloc_cprb(req_pl_size); 1330 + req = alloc_cprbmem(req_pl_size, xflags); 1275 1331 if (!req) 1276 1332 goto out; 1277 1333 if (!mech || mech == 0x80060001) ··· 1301 1357 *p++ = 0; 1302 1358 1303 1359 /* reply cprb and payload */ 1304 - rep = alloc_cprb(sizeof(struct wk_rep_pl)); 1360 + rep = alloc_cprbmem(sizeof(struct wk_rep_pl), xflags); 1305 1361 if (!rep) 1306 1362 goto out; 1307 1363 rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 1308 1364 1309 1365 /* urb and target */ 1310 - urb = kmalloc(sizeof(*urb), GFP_KERNEL); 1311 - if (!urb) 1312 - goto out; 1313 1366 target.ap_id = card; 1314 1367 target.dom_id = domain; 1315 - prep_urb(urb, &target, 1, 1368 + prep_urb(&urb, &target, 1, 1316 1369 req, sizeof(*req) + req_pl_size, 1317 1370 rep, sizeof(*rep) + sizeof(*rep_pl)); 1318 1371 1319 - rc = zcrypt_send_ep11_cprb(urb); 1372 + rc = zcrypt_send_ep11_cprb(&urb, xflags); 1320 1373 if (rc) { 1321 1374 ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 1322 1375 __func__, (int)card, (int)domain, rc); ··· 1346 1405 *datasize = rep_pl->data_len; 1347 1406 1348 1407 out: 1349 - kfree(req); 1350 - kfree(rep); 1351 - kfree(urb); 1408 + free_cprbmem(req, req_pl_size, true, xflags); 1409 + free_cprbmem(rep, sizeof(struct wk_rep_pl), true, xflags); 1352 1410 return rc; 1353 1411 } 1354 1412 1355 1413 int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, 1356 1414 const u8 *clrkey, u8 *keybuf, u32 *keybufsize, 1357 - u32 keytype) 1415 + u32 keytype, u32 xflags) 1358 1416 { 1359 1417 int rc; 1360 - u8 encbuf[64], *kek = NULL; 1418 + void *mem; 1419 + u8 encbuf[64], *kek; 1361 1420 size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); 1362 1421 1363 1422 if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) { ··· 1368 1427 return -EINVAL; 1369 1428 } 1370 1429 1371 - /* allocate memory for the temp kek */ 1430 + /* 1431 + * Allocate space for the temp kek. 1432 + * Also we only need up to MAXEP11AESKEYBLOBSIZE bytes for this 1433 + * we use the already existing cprb mempool to solve this 1434 + * short term memory requirement. 1435 + */ 1436 + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? 1437 + mempool_alloc_preallocated(cprb_mempool) : 1438 + mempool_alloc(cprb_mempool, GFP_KERNEL); 1439 + if (!mem) 1440 + return -ENOMEM; 1441 + kek = (u8 *)mem; 1372 1442 keklen = MAXEP11AESKEYBLOBSIZE; 1373 - kek = kmalloc(keklen, GFP_ATOMIC); 1374 - if (!kek) { 1375 - rc = -ENOMEM; 1376 - goto out; 1377 - } 1378 1443 1379 1444 /* Step 1: generate AES 256 bit random kek key */ 1380 1445 rc = _ep11_genaeskey(card, domain, 256, 1381 1446 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ 1382 - kek, &keklen); 1447 + kek, &keklen, xflags); 1383 1448 if (rc) { 1384 1449 ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n", 1385 1450 __func__, rc); ··· 1394 1447 1395 1448 /* Step 2: encrypt clear key value with the kek key */ 1396 1449 rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen, 1397 - clrkey, clrkeylen, encbuf, &encbuflen); 1450 + clrkey, clrkeylen, encbuf, &encbuflen, xflags); 1398 1451 if (rc) { 1399 1452 ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n", 1400 1453 __func__, rc); ··· 1404 1457 /* Step 3: import the encrypted key value as a new key */ 1405 1458 rc = ep11_unwrapkey(card, domain, kek, keklen, 1406 1459 encbuf, encbuflen, 0, def_iv, 1407 - keybitsize, 0, keybuf, keybufsize, keytype); 1460 + keybitsize, 0, keybuf, keybufsize, keytype, xflags); 1408 1461 if (rc) { 1409 - ZCRYPT_DBF_ERR("%s importing key value as new key failed,, rc=%d\n", 1462 + ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n", 1410 1463 __func__, rc); 1411 1464 goto out; 1412 1465 } 1413 1466 1414 1467 out: 1415 - kfree(kek); 1468 + mempool_free(mem, cprb_mempool); 1416 1469 return rc; 1417 1470 } 1418 1471 EXPORT_SYMBOL(ep11_clr2keyblob); 1419 1472 1420 1473 int ep11_kblob2protkey(u16 card, u16 dom, 1421 1474 const u8 *keyblob, u32 keybloblen, 1422 - u8 *protkey, u32 *protkeylen, u32 *protkeytype) 1475 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 1476 + u32 xflags) 1423 1477 { 1424 1478 struct ep11kblob_header *hdr; 1425 1479 struct ep11keyblob *key; ··· 1446 1498 } 1447 1499 /* !!! hdr is no longer a valid header !!! */ 1448 1500 1449 - /* alloc temp working buffer */ 1501 + /* need a temp working buffer */ 1450 1502 wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); 1451 - wkbuf = kmalloc(wkbuflen, GFP_ATOMIC); 1452 - if (!wkbuf) 1453 - return -ENOMEM; 1503 + if (wkbuflen > CPRB_MEMPOOL_ITEM_SIZE) { 1504 + /* this should never happen */ 1505 + rc = -ENOMEM; 1506 + ZCRYPT_DBF_WARN("%s wkbuflen %d > cprb mempool item size %d, rc=%d\n", 1507 + __func__, (int)wkbuflen, CPRB_MEMPOOL_ITEM_SIZE, rc); 1508 + return rc; 1509 + } 1510 + /* use the cprb mempool to satisfy this short term mem allocation */ 1511 + wkbuf = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? 1512 + mempool_alloc_preallocated(cprb_mempool) : 1513 + mempool_alloc(cprb_mempool, GFP_ATOMIC); 1514 + if (!wkbuf) { 1515 + rc = -ENOMEM; 1516 + ZCRYPT_DBF_WARN("%s allocating tmp buffer via cprb mempool failed, rc=%d\n", 1517 + __func__, rc); 1518 + return rc; 1519 + } 1454 1520 1455 1521 /* ep11 secure key -> protected key + info */ 1456 1522 rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen, 1457 - 0, def_iv, wkbuf, &wkbuflen); 1523 + 0, def_iv, wkbuf, &wkbuflen, xflags); 1458 1524 if (rc) { 1459 1525 ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n", 1460 1526 __func__, rc); ··· 1535 1573 *protkeylen = wki->pkeysize; 1536 1574 1537 1575 out: 1538 - kfree(wkbuf); 1576 + mempool_free(wkbuf, cprb_mempool); 1539 1577 return rc; 1540 1578 } 1541 1579 EXPORT_SYMBOL(ep11_kblob2protkey); 1542 1580 1543 - int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, 1544 - int minhwtype, int minapi, const u8 *wkvp) 1581 + int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, 1582 + int minhwtype, int minapi, const u8 *wkvp, u32 xflags) 1545 1583 { 1546 1584 struct zcrypt_device_status_ext *device_status; 1547 - u32 *_apqns = NULL, _nr_apqns = 0; 1548 - int i, card, dom, rc = -ENOMEM; 1549 1585 struct ep11_domain_info edi; 1550 1586 struct ep11_card_info eci; 1587 + u32 _nr_apqns = 0; 1588 + int i, card, dom; 1551 1589 1552 - /* fetch status of all crypto cards */ 1553 - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, 1554 - sizeof(struct zcrypt_device_status_ext), 1555 - GFP_KERNEL); 1556 - if (!device_status) 1557 - return -ENOMEM; 1558 - zcrypt_device_status_mask_ext(device_status); 1590 + /* occupy the device status memory */ 1591 + mutex_lock(&dev_status_mem_mutex); 1592 + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); 1593 + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; 1559 1594 1560 - /* allocate 1k space for up to 256 apqns */ 1561 - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); 1562 - if (!_apqns) { 1563 - kvfree(device_status); 1564 - return -ENOMEM; 1565 - } 1595 + /* fetch crypto device status into this struct */ 1596 + zcrypt_device_status_mask_ext(device_status, 1597 + ZCRYPT_DEV_STATUS_CARD_MAX, 1598 + ZCRYPT_DEV_STATUS_QUEUE_MAX); 1566 1599 1567 1600 /* walk through all the crypto apqnss */ 1568 - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { 1601 + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { 1569 1602 card = AP_QID_CARD(device_status[i].qid); 1570 1603 dom = AP_QID_QUEUE(device_status[i].qid); 1571 1604 /* check online state */ ··· 1580 1623 continue; 1581 1624 /* check min api version if given */ 1582 1625 if (minapi > 0) { 1583 - if (ep11_get_card_info(card, &eci, 0)) 1626 + if (ep11_get_card_info(card, &eci, xflags)) 1584 1627 continue; 1585 1628 if (minapi > eci.API_ord_nr) 1586 1629 continue; 1587 1630 } 1588 1631 /* check wkvp if given */ 1589 1632 if (wkvp) { 1590 - if (ep11_get_domain_info(card, dom, &edi)) 1633 + if (ep11_get_domain_info(card, dom, &edi, xflags)) 1591 1634 continue; 1592 1635 if (edi.cur_wk_state != '1') 1593 1636 continue; ··· 1595 1638 continue; 1596 1639 } 1597 1640 /* apqn passed all filtering criterons, add to the array */ 1598 - if (_nr_apqns < 256) 1599 - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); 1641 + if (_nr_apqns < *nr_apqns) 1642 + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); 1600 1643 } 1601 1644 1602 - /* nothing found ? */ 1603 - if (!_nr_apqns) { 1604 - kfree(_apqns); 1605 - rc = -ENODEV; 1606 - } else { 1607 - /* no re-allocation, simple return the _apqns array */ 1608 - *apqns = _apqns; 1609 - *nr_apqns = _nr_apqns; 1610 - rc = 0; 1611 - } 1645 + *nr_apqns = _nr_apqns; 1612 1646 1613 - kvfree(device_status); 1614 - return rc; 1647 + mutex_unlock(&dev_status_mem_mutex); 1648 + 1649 + return _nr_apqns ? 0 : -ENODEV; 1615 1650 } 1616 1651 EXPORT_SYMBOL(ep11_findcard2); 1617 1652 1618 - void __exit zcrypt_ep11misc_exit(void) 1653 + int __init zcrypt_ep11misc_init(void) 1619 1654 { 1620 - card_cache_free(); 1655 + /* Pre-allocate a small memory pool for ep11 cprbs. */ 1656 + cprb_mempool = mempool_create_kmalloc_pool(2 * zcrypt_mempool_threshold, 1657 + CPRB_MEMPOOL_ITEM_SIZE); 1658 + if (!cprb_mempool) 1659 + return -ENOMEM; 1660 + 1661 + /* Pre-allocate one crypto status card struct used in ep11_findcard2() */ 1662 + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); 1663 + if (!dev_status_mem) { 1664 + mempool_destroy(cprb_mempool); 1665 + return -ENOMEM; 1666 + } 1667 + 1668 + return 0; 1669 + } 1670 + 1671 + void zcrypt_ep11misc_exit(void) 1672 + { 1673 + mutex_lock(&dev_status_mem_mutex); 1674 + kvfree(dev_status_mem); 1675 + mutex_unlock(&dev_status_mem_mutex); 1676 + mempool_destroy(cprb_mempool); 1621 1677 }
+14 -13
drivers/s390/crypto/zcrypt_ep11misc.h
··· 104 104 /* 105 105 * Provide information about an EP11 card. 106 106 */ 107 - int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify); 107 + int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags); 108 108 109 109 /* 110 110 * Provide information about a domain within an EP11 card. 111 111 */ 112 - int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info); 112 + int ep11_get_domain_info(u16 card, u16 domain, 113 + struct ep11_domain_info *info, u32 xflags); 113 114 114 115 /* 115 116 * Generate (random) EP11 AES secure key. 116 117 */ 117 118 int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, 118 - u8 *keybuf, u32 *keybufsize, u32 keybufver); 119 + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags); 119 120 120 121 /* 121 122 * Generate EP11 AES secure key with given clear key value. 122 123 */ 123 124 int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, 124 125 const u8 *clrkey, u8 *keybuf, u32 *keybufsize, 125 - u32 keytype); 126 + u32 keytype, u32 xflags); 126 127 127 128 /* 128 129 * Build a list of ep11 apqns meeting the following constrains: ··· 137 136 * key for this domain. When a wkvp is given there will always be a re-fetch 138 137 * of the domain info for the potential apqn - so this triggers an request 139 138 * reply to each apqn eligible. 140 - * The array of apqn entries is allocated with kmalloc and returned in *apqns; 141 - * the number of apqns stored into the list is returned in *nr_apqns. One apqn 142 - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and 143 - * may be casted to struct pkey_apqn. The return value is either 0 for success 144 - * or a negative errno value. If no apqn meeting the criteria is found, 145 - * -ENODEV is returned. 139 + * The caller should set *nr_apqns to the nr of elements available in *apqns. 140 + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. 141 + * The return value is either 0 for success or a negative errno value. 142 + * If no apqn meeting the criteria is found, -ENODEV is returned. 146 143 */ 147 - int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, 148 - int minhwtype, int minapi, const u8 *wkvp); 144 + int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, 145 + int minhwtype, int minapi, const u8 *wkvp, u32 xflags); 149 146 150 147 /* 151 148 * Derive proteced key from EP11 key blob (AES and ECC keys). 152 149 */ 153 150 int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen, 154 - u8 *protkey, u32 *protkeylen, u32 *protkeytype); 151 + u8 *protkey, u32 *protkeylen, u32 *protkeytype, 152 + u32 xflags); 155 153 154 + int zcrypt_ep11misc_init(void); 156 155 void zcrypt_ep11misc_exit(void); 157 156 158 157 #endif /* _ZCRYPT_EP11MISC_H_ */
+17 -19
drivers/s390/crypto/zcrypt_msgtype50.c
··· 438 438 msg->len = sizeof(error_reply); 439 439 } 440 440 out: 441 - complete((struct completion *)msg->private); 441 + complete(&msg->response.work); 442 442 } 443 443 444 444 static atomic_t zcrypt_step = ATOMIC_INIT(0); ··· 449 449 * @zq: pointer to zcrypt_queue structure that identifies the 450 450 * CEXxA device to the request distributor 451 451 * @mex: pointer to the modexpo request buffer 452 + * This function assumes that ap_msg has been initialized with 453 + * ap_init_apmsg() and thus a valid buffer with the size of 454 + * ap_msg->bufsize is available within ap_msg. Also the caller has 455 + * to make sure ap_release_apmsg() is always called even on failure. 452 456 */ 453 457 static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, 454 458 struct ica_rsa_modexpo *mex, 455 459 struct ap_message *ap_msg) 456 460 { 457 - struct completion work; 458 461 int rc; 459 462 460 - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; 461 - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); 462 - if (!ap_msg->msg) 463 - return -ENOMEM; 463 + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) 464 + return -EMSGSIZE; 464 465 ap_msg->receive = zcrypt_msgtype50_receive; 465 466 ap_msg->psmid = (((unsigned long)current->pid) << 32) + 466 467 atomic_inc_return(&zcrypt_step); 467 - ap_msg->private = &work; 468 468 rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex); 469 469 if (rc) 470 470 goto out; 471 - init_completion(&work); 471 + init_completion(&ap_msg->response.work); 472 472 rc = ap_queue_message(zq->queue, ap_msg); 473 473 if (rc) 474 474 goto out; 475 - rc = wait_for_completion_interruptible(&work); 475 + rc = wait_for_completion_interruptible(&ap_msg->response.work); 476 476 if (rc == 0) { 477 477 rc = ap_msg->rc; 478 478 if (rc == 0) ··· 485 485 } 486 486 487 487 out: 488 - ap_msg->private = NULL; 489 488 if (rc) 490 489 pr_debug("send me cprb at dev=%02x.%04x rc=%d\n", 491 490 AP_QID_CARD(zq->queue->qid), ··· 498 499 * @zq: pointer to zcrypt_queue structure that identifies the 499 500 * CEXxA device to the request distributor 500 501 * @crt: pointer to the modexpoc_crt request buffer 502 + * This function assumes that ap_msg has been initialized with 503 + * ap_init_apmsg() and thus a valid buffer with the size of 504 + * ap_msg->bufsize is available within ap_msg. Also the caller has 505 + * to make sure ap_release_apmsg() is always called even on failure. 501 506 */ 502 507 static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, 503 508 struct ica_rsa_modexpo_crt *crt, 504 509 struct ap_message *ap_msg) 505 510 { 506 - struct completion work; 507 511 int rc; 508 512 509 - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; 510 - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); 511 - if (!ap_msg->msg) 512 - return -ENOMEM; 513 + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) 514 + return -EMSGSIZE; 513 515 ap_msg->receive = zcrypt_msgtype50_receive; 514 516 ap_msg->psmid = (((unsigned long)current->pid) << 32) + 515 517 atomic_inc_return(&zcrypt_step); 516 - ap_msg->private = &work; 517 518 rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt); 518 519 if (rc) 519 520 goto out; 520 - init_completion(&work); 521 + init_completion(&ap_msg->response.work); 521 522 rc = ap_queue_message(zq->queue, ap_msg); 522 523 if (rc) 523 524 goto out; 524 - rc = wait_for_completion_interruptible(&work); 525 + rc = wait_for_completion_interruptible(&ap_msg->response.work); 525 526 if (rc == 0) { 526 527 rc = ap_msg->rc; 527 528 if (rc == 0) ··· 534 535 } 535 536 536 537 out: 537 - ap_msg->private = NULL; 538 538 if (rc) 539 539 pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n", 540 540 AP_QID_CARD(zq->queue->qid),
+43 -66
drivers/s390/crypto/zcrypt_msgtype6.c
··· 31 31 32 32 #define CEIL4(x) ((((x) + 3) / 4) * 4) 33 33 34 - struct response_type { 35 - struct completion work; 36 - int type; 37 - }; 38 - 39 34 #define CEXXC_RESPONSE_TYPE_ICA 0 40 35 #define CEXXC_RESPONSE_TYPE_XCRB 1 41 36 #define CEXXC_RESPONSE_TYPE_EP11 2 ··· 851 856 .type = TYPE82_RSP_CODE, 852 857 .reply_code = REP82_ERROR_MACHINE_FAILURE, 853 858 }; 854 - struct response_type *resp_type = msg->private; 859 + struct ap_response_type *resp_type = &msg->response; 855 860 struct type86x_reply *t86r; 856 861 int len; 857 862 ··· 915 920 .type = TYPE82_RSP_CODE, 916 921 .reply_code = REP82_ERROR_MACHINE_FAILURE, 917 922 }; 918 - struct response_type *resp_type = msg->private; 923 + struct ap_response_type *resp_type = &msg->response; 919 924 struct type86_ep11_reply *t86r; 920 925 int len; 921 926 ··· 962 967 struct ica_rsa_modexpo *mex, 963 968 struct ap_message *ap_msg) 964 969 { 965 - struct response_type resp_type = { 966 - .type = CEXXC_RESPONSE_TYPE_ICA, 967 - }; 970 + struct ap_response_type *resp_type = &ap_msg->response; 968 971 int rc; 969 972 970 973 ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); ··· 972 979 ap_msg->receive = zcrypt_msgtype6_receive; 973 980 ap_msg->psmid = (((unsigned long)current->pid) << 32) + 974 981 atomic_inc_return(&zcrypt_step); 975 - ap_msg->private = &resp_type; 976 982 rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex); 977 983 if (rc) 978 984 goto out_free; 979 - init_completion(&resp_type.work); 985 + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; 986 + init_completion(&resp_type->work); 980 987 rc = ap_queue_message(zq->queue, ap_msg); 981 988 if (rc) 982 989 goto out_free; 983 - rc = wait_for_completion_interruptible(&resp_type.work); 990 + rc = wait_for_completion_interruptible(&resp_type->work); 984 991 if (rc == 0) { 985 992 rc = ap_msg->rc; 986 993 if (rc == 0) ··· 994 1001 995 1002 out_free: 996 1003 free_page((unsigned long)ap_msg->msg); 997 - ap_msg->private = NULL; 998 1004 ap_msg->msg = NULL; 999 1005 return rc; 1000 1006 } ··· 1009 1017 struct ica_rsa_modexpo_crt *crt, 1010 1018 struct ap_message *ap_msg) 1011 1019 { 1012 - struct response_type resp_type = { 1013 - .type = CEXXC_RESPONSE_TYPE_ICA, 1014 - }; 1020 + struct ap_response_type *resp_type = &ap_msg->response; 1015 1021 int rc; 1016 1022 1017 1023 ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); ··· 1019 1029 ap_msg->receive = zcrypt_msgtype6_receive; 1020 1030 ap_msg->psmid = (((unsigned long)current->pid) << 32) + 1021 1031 atomic_inc_return(&zcrypt_step); 1022 - ap_msg->private = &resp_type; 1023 1032 rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt); 1024 1033 if (rc) 1025 1034 goto out_free; 1026 - init_completion(&resp_type.work); 1035 + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; 1036 + init_completion(&resp_type->work); 1027 1037 rc = ap_queue_message(zq->queue, ap_msg); 1028 1038 if (rc) 1029 1039 goto out_free; 1030 - rc = wait_for_completion_interruptible(&resp_type.work); 1040 + rc = wait_for_completion_interruptible(&resp_type->work); 1031 1041 if (rc == 0) { 1032 1042 rc = ap_msg->rc; 1033 1043 if (rc == 0) ··· 1041 1051 1042 1052 out_free: 1043 1053 free_page((unsigned long)ap_msg->msg); 1044 - ap_msg->private = NULL; 1045 1054 ap_msg->msg = NULL; 1046 1055 return rc; 1047 1056 } ··· 1050 1061 * Prepare a CCA AP msg: fetch the required data from userspace, 1051 1062 * prepare the AP msg, fill some info into the ap_message struct, 1052 1063 * extract some data from the CPRB and give back to the caller. 1053 - * This function allocates memory and needs an ap_msg prepared 1054 - * by the caller with ap_init_message(). Also the caller has to 1055 - * make sure ap_release_message() is always called even on failure. 1064 + * This function assumes that ap_msg has been initialized with 1065 + * ap_init_apmsg() and thus a valid buffer with the size of 1066 + * ap_msg->bufsize is available within ap_msg. Also the caller has 1067 + * to make sure ap_release_apmsg() is always called even on failure. 1056 1068 */ 1057 1069 int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb, 1058 1070 struct ap_message *ap_msg, 1059 1071 unsigned int *func_code, unsigned short **dom) 1060 1072 { 1061 - struct response_type resp_type = { 1062 - .type = CEXXC_RESPONSE_TYPE_XCRB, 1063 - }; 1073 + struct ap_response_type *resp_type = &ap_msg->response; 1064 1074 1065 - ap_msg->bufsize = atomic_read(&ap_max_msg_size); 1066 - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); 1067 - if (!ap_msg->msg) 1068 - return -ENOMEM; 1069 1075 ap_msg->receive = zcrypt_msgtype6_receive; 1070 1076 ap_msg->psmid = (((unsigned long)current->pid) << 32) + 1071 1077 atomic_inc_return(&zcrypt_step); 1072 - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1073 - if (!ap_msg->private) 1074 - return -ENOMEM; 1078 + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; 1075 1079 return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom); 1076 1080 } 1077 1081 ··· 1079 1097 struct ica_xcRB *xcrb, 1080 1098 struct ap_message *ap_msg) 1081 1099 { 1082 - struct response_type *rtype = ap_msg->private; 1100 + struct ap_response_type *resp_type = &ap_msg->response; 1083 1101 struct { 1084 1102 struct type6_hdr hdr; 1085 1103 struct CPRBX cprbx; ··· 1110 1128 msg->hdr.fromcardlen1 -= delta; 1111 1129 } 1112 1130 1113 - init_completion(&rtype->work); 1131 + init_completion(&resp_type->work); 1114 1132 rc = ap_queue_message(zq->queue, ap_msg); 1115 1133 if (rc) 1116 1134 goto out; 1117 - rc = wait_for_completion_interruptible(&rtype->work); 1135 + rc = wait_for_completion_interruptible(&resp_type->work); 1118 1136 if (rc == 0) { 1119 1137 rc = ap_msg->rc; 1120 1138 if (rc == 0) ··· 1140 1158 * Prepare an EP11 AP msg: fetch the required data from userspace, 1141 1159 * prepare the AP msg, fill some info into the ap_message struct, 1142 1160 * extract some data from the CPRB and give back to the caller. 1143 - * This function allocates memory and needs an ap_msg prepared 1144 - * by the caller with ap_init_message(). Also the caller has to 1145 - * make sure ap_release_message() is always called even on failure. 1161 + * This function assumes that ap_msg has been initialized with 1162 + * ap_init_apmsg() and thus a valid buffer with the size of 1163 + * ap_msg->bufsize is available within ap_msg. Also the caller has 1164 + * to make sure ap_release_apmsg() is always called even on failure. 1146 1165 */ 1147 1166 int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb, 1148 1167 struct ap_message *ap_msg, 1149 1168 unsigned int *func_code, unsigned int *domain) 1150 1169 { 1151 - struct response_type resp_type = { 1152 - .type = CEXXC_RESPONSE_TYPE_EP11, 1153 - }; 1170 + struct ap_response_type *resp_type = &ap_msg->response; 1154 1171 1155 - ap_msg->bufsize = atomic_read(&ap_max_msg_size); 1156 - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); 1157 - if (!ap_msg->msg) 1158 - return -ENOMEM; 1159 1172 ap_msg->receive = zcrypt_msgtype6_receive_ep11; 1160 1173 ap_msg->psmid = (((unsigned long)current->pid) << 32) + 1161 1174 atomic_inc_return(&zcrypt_step); 1162 - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1163 - if (!ap_msg->private) 1164 - return -ENOMEM; 1175 + resp_type->type = CEXXC_RESPONSE_TYPE_EP11; 1165 1176 return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, 1166 1177 func_code, domain); 1167 1178 } ··· 1172 1197 { 1173 1198 int rc; 1174 1199 unsigned int lfmt; 1175 - struct response_type *rtype = ap_msg->private; 1200 + struct ap_response_type *resp_type = &ap_msg->response; 1176 1201 struct { 1177 1202 struct type6_hdr hdr; 1178 1203 struct ep11_cprb cprbx; ··· 1226 1251 msg->hdr.fromcardlen1 = zq->reply.bufsize - 1227 1252 sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext); 1228 1253 1229 - init_completion(&rtype->work); 1254 + init_completion(&resp_type->work); 1230 1255 rc = ap_queue_message(zq->queue, ap_msg); 1231 1256 if (rc) 1232 1257 goto out; 1233 - rc = wait_for_completion_interruptible(&rtype->work); 1258 + rc = wait_for_completion_interruptible(&resp_type->work); 1234 1259 if (rc == 0) { 1235 1260 rc = ap_msg->rc; 1236 1261 if (rc == 0) ··· 1251 1276 return rc; 1252 1277 } 1253 1278 1279 + /* 1280 + * Prepare a CEXXC get random request ap message. 1281 + * This function assumes that ap_msg has been initialized with 1282 + * ap_init_apmsg() and thus a valid buffer with the size of 1283 + * ap_max_msg_size is available within ap_msg. Also the caller has 1284 + * to make sure ap_release_apmsg() is always called even on failure. 1285 + */ 1254 1286 int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code, 1255 1287 unsigned int *domain) 1256 1288 { 1257 - struct response_type resp_type = { 1258 - .type = CEXXC_RESPONSE_TYPE_XCRB, 1259 - }; 1289 + struct ap_response_type *resp_type = &ap_msg->response; 1260 1290 1261 - ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; 1262 - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); 1263 - if (!ap_msg->msg) 1264 - return -ENOMEM; 1291 + if (ap_msg->bufsize < AP_DEFAULT_MAX_MSG_SIZE) 1292 + return -EMSGSIZE; 1265 1293 ap_msg->receive = zcrypt_msgtype6_receive; 1266 1294 ap_msg->psmid = (((unsigned long)current->pid) << 32) + 1267 1295 atomic_inc_return(&zcrypt_step); 1268 - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1269 - if (!ap_msg->private) 1270 - return -ENOMEM; 1296 + 1297 + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; 1271 1298 1272 1299 rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); 1273 1300 ··· 1296 1319 short int verb_length; 1297 1320 short int key_length; 1298 1321 } __packed * msg = ap_msg->msg; 1299 - struct response_type *rtype = ap_msg->private; 1322 + struct ap_response_type *resp_type = &ap_msg->response; 1300 1323 int rc; 1301 1324 1302 1325 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); 1303 1326 1304 - init_completion(&rtype->work); 1327 + init_completion(&resp_type->work); 1305 1328 rc = ap_queue_message(zq->queue, ap_msg); 1306 1329 if (rc) 1307 1330 goto out; 1308 - rc = wait_for_completion_interruptible(&rtype->work); 1331 + rc = wait_for_completion_interruptible(&resp_type->work); 1309 1332 if (rc == 0) { 1310 1333 rc = ap_msg->rc; 1311 1334 if (rc == 0)
+1 -1
drivers/s390/net/ctcm_mpc.c
··· 179 179 ctcm_pr_debug(" %s (+%s) : %s [%s]\n", 180 180 addr, boff, bhex, basc); 181 181 dup = 0; 182 - strcpy(duphex, bhex); 182 + strscpy(duphex, bhex); 183 183 } else 184 184 dup++; 185 185
+3 -50
drivers/watchdog/diag288_wdt.c
··· 29 29 #include <linux/watchdog.h> 30 30 #include <asm/machine.h> 31 31 #include <asm/ebcdic.h> 32 + #include <asm/diag288.h> 32 33 #include <asm/diag.h> 33 34 #include <linux/io.h> 34 35 35 36 #define MAX_CMDLEN 240 36 37 #define DEFAULT_CMD "SYSTEM RESTART" 37 - 38 - #define MIN_INTERVAL 15 /* Minimal time supported by diag88 */ 39 - #define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */ 40 - 41 - #define WDT_DEFAULT_TIMEOUT 30 42 - 43 - /* Function codes - init, change, cancel */ 44 - #define WDT_FUNC_INIT 0 45 - #define WDT_FUNC_CHANGE 1 46 - #define WDT_FUNC_CANCEL 2 47 - #define WDT_FUNC_CONCEAL 0x80000000 48 - 49 - /* Action codes for LPAR watchdog */ 50 - #define LPARWDT_RESTART 0 51 38 52 39 static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD; 53 40 static bool conceal_on; ··· 62 75 static int diag288(unsigned int func, unsigned int timeout, 63 76 unsigned long action, unsigned int len) 64 77 { 65 - union register_pair r1 = { .even = func, .odd = timeout, }; 66 - union register_pair r3 = { .even = action, .odd = len, }; 67 - int err; 68 - 69 78 diag_stat_inc(DIAG_STAT_X288); 70 - 71 - err = -EINVAL; 72 - asm volatile( 73 - " diag %[r1],%[r3],0x288\n" 74 - "0: la %[err],0\n" 75 - "1:\n" 76 - EX_TABLE(0b, 1b) 77 - : [err] "+d" (err) 78 - : [r1] "d" (r1.pair), [r3] "d" (r3.pair) 79 - : "cc", "memory"); 80 - return err; 79 + return __diag288(func, timeout, action, len); 81 80 } 82 81 83 82 static int diag288_str(unsigned int func, unsigned int timeout, char *cmd) ··· 162 189 163 190 static int __init diag288_init(void) 164 191 { 165 - int ret; 166 - 167 192 watchdog_set_nowayout(&wdt_dev, nowayout_info); 168 193 169 194 if (machine_is_vm()) { ··· 170 199 pr_err("The watchdog cannot be initialized\n"); 171 200 return -ENOMEM; 172 201 } 173 - 174 - ret = diag288_str(WDT_FUNC_INIT, MIN_INTERVAL, "BEGIN"); 175 - if (ret != 0) { 176 - pr_err("The watchdog cannot be initialized\n"); 177 - kfree(cmd_buf); 178 - return -EINVAL; 179 - } 180 - } else { 181 - if (diag288(WDT_FUNC_INIT, WDT_DEFAULT_TIMEOUT, 182 - LPARWDT_RESTART, 0)) { 183 - pr_err("The watchdog cannot be initialized\n"); 184 - return -EINVAL; 185 - } 186 - } 187 - 188 - if (diag288(WDT_FUNC_CANCEL, 0, 0, 0)) { 189 - pr_err("The watchdog cannot be deactivated\n"); 190 - return -EINVAL; 191 202 } 192 203 193 204 return watchdog_register_device(&wdt_dev); ··· 181 228 kfree(cmd_buf); 182 229 } 183 230 184 - module_init(diag288_init); 231 + module_cpu_feature_match(S390_CPU_FEATURE_D288, diag288_init); 185 232 module_exit(diag288_exit);