Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccp - Add support for RSA on the CCP

Wire up the CCP as an RSA cipher provider.

Signed-off-by: Gary R Hook <gary.hook@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Gary R Hook and committed by
Herbert Xu
ceeec0af 333706b8

+347
+1
drivers/crypto/ccp/Makefile
··· 15 15 ccp-crypto-aes-xts.o \ 16 16 ccp-crypto-aes-galois.o \ 17 17 ccp-crypto-des3.o \ 18 + ccp-crypto-rsa.o \ 18 19 ccp-crypto-sha.o
+19
drivers/crypto/ccp/ccp-crypto-main.c
··· 17 17 #include <linux/ccp.h> 18 18 #include <linux/scatterlist.h> 19 19 #include <crypto/internal/hash.h> 20 + #include <crypto/internal/akcipher.h> 20 21 21 22 #include "ccp-crypto.h" 22 23 ··· 38 37 module_param(des3_disable, uint, 0444); 39 38 MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value"); 40 39 40 + static unsigned int rsa_disable; 41 + module_param(rsa_disable, uint, 0444); 42 + MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); 43 + 41 44 /* List heads for the supported algorithms */ 42 45 static LIST_HEAD(hash_algs); 43 46 static LIST_HEAD(cipher_algs); 44 47 static LIST_HEAD(aead_algs); 48 + static LIST_HEAD(akcipher_algs); 45 49 46 50 /* For any tfm, requests for that tfm must be returned on the order 47 51 * received. With multiple queues available, the CCP can process more ··· 364 358 return ret; 365 359 } 366 360 361 + if (!rsa_disable) { 362 + ret = ccp_register_rsa_algs(&akcipher_algs); 363 + if (ret) 364 + return ret; 365 + } 366 + 367 367 return 0; 368 368 } 369 369 ··· 378 366 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; 379 367 struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; 380 368 struct ccp_crypto_aead *aead_alg, *aead_tmp; 369 + struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp; 381 370 382 371 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { 383 372 crypto_unregister_ahash(&ahash_alg->alg); ··· 396 383 crypto_unregister_aead(&aead_alg->alg); 397 384 list_del(&aead_alg->entry); 398 385 kfree(aead_alg); 386 + } 387 + 388 + list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) { 389 + crypto_unregister_akcipher(&akc_alg->alg); 390 + list_del(&akc_alg->entry); 391 + kfree(akc_alg); 399 392 } 400 393 } 401 394
+296
drivers/crypto/ccp/ccp-crypto-rsa.c
··· 1 + /* 2 + * AMD Cryptographic Coprocessor (CCP) RSA crypto API support 3 + * 4 + * Copyright (C) 2017 Advanced Micro Devices, Inc. 5 + * 6 + * Author: Gary R Hook <gary.hook@amd.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/module.h> 14 + #include <linux/sched.h> 15 + #include <linux/scatterlist.h> 16 + #include <linux/crypto.h> 17 + #include <crypto/algapi.h> 18 + #include <crypto/internal/rsa.h> 19 + #include <crypto/internal/akcipher.h> 20 + #include <crypto/akcipher.h> 21 + #include <crypto/scatterwalk.h> 22 + 23 + #include "ccp-crypto.h" 24 + 25 + static inline struct akcipher_request *akcipher_request_cast( 26 + struct crypto_async_request *req) 27 + { 28 + return container_of(req, struct akcipher_request, base); 29 + } 30 + 31 + static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen, 32 + const u8 *buf, size_t sz) 33 + { 34 + int nskip; 35 + 36 + for (nskip = 0; nskip < sz; nskip++) 37 + if (buf[nskip]) 38 + break; 39 + *kplen = sz - nskip; 40 + *kpbuf = kzalloc(*kplen, GFP_KERNEL); 41 + if (!*kpbuf) 42 + return -ENOMEM; 43 + memcpy(*kpbuf, buf + nskip, *kplen); 44 + 45 + return 0; 46 + } 47 + 48 + static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret) 49 + { 50 + struct akcipher_request *req = akcipher_request_cast(async_req); 51 + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req); 52 + 53 + if (ret) 54 + return ret; 55 + 56 + req->dst_len = rctx->cmd.u.rsa.key_size >> 3; 57 + 58 + return 0; 59 + } 60 + 61 + static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) 62 + { 63 + return CCP_RSA_MAXMOD; 64 + } 65 + 66 + static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt) 67 + { 68 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 69 + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); 70 + struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req); 71 + int ret = 0; 72 + 73 + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 74 + INIT_LIST_HEAD(&rctx->cmd.entry); 75 + rctx->cmd.engine = CCP_ENGINE_RSA; 76 + 77 + rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */ 78 + if (encrypt) { 79 + rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg; 80 + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len; 81 + } else { 82 + rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg; 83 + rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len; 84 + } 85 + rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg; 86 + rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len; 87 + rctx->cmd.u.rsa.src = req->src; 88 + rctx->cmd.u.rsa.src_len = req->src_len; 89 + rctx->cmd.u.rsa.dst = req->dst; 90 + 91 + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 92 + 93 + return ret; 94 + } 95 + 96 + static int ccp_rsa_encrypt(struct akcipher_request *req) 97 + { 98 + return ccp_rsa_crypt(req, true); 99 + } 100 + 101 + static int ccp_rsa_decrypt(struct akcipher_request *req) 102 + { 103 + return ccp_rsa_crypt(req, false); 104 + } 105 + 106 + static int ccp_check_key_length(unsigned int len) 107 + { 108 + /* In bits */ 109 + if (len < 8 || len > 4096) 110 + return -EINVAL; 111 + return 0; 112 + } 113 + 114 + static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx) 115 + { 116 + /* Clean up old key data */ 117 + kzfree(ctx->u.rsa.e_buf); 118 + ctx->u.rsa.e_buf = NULL; 119 + ctx->u.rsa.e_len = 0; 120 + kzfree(ctx->u.rsa.n_buf); 121 + ctx->u.rsa.n_buf = NULL; 122 + ctx->u.rsa.n_len = 0; 123 + kzfree(ctx->u.rsa.d_buf); 124 + ctx->u.rsa.d_buf = NULL; 125 + ctx->u.rsa.d_len = 0; 126 + } 127 + 128 + static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key, 129 + unsigned int keylen, bool private) 130 + { 131 + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); 132 + struct rsa_key raw_key; 133 + int ret; 134 + 135 + ccp_rsa_free_key_bufs(ctx); 136 + memset(&raw_key, 0, sizeof(raw_key)); 137 + 138 + /* Code borrowed from crypto/rsa.c */ 139 + if (private) 140 + ret = rsa_parse_priv_key(&raw_key, key, keylen); 141 + else 142 + ret = rsa_parse_pub_key(&raw_key, key, keylen); 143 + if (ret) 144 + goto n_key; 145 + 146 + ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len, 147 + raw_key.n, raw_key.n_sz); 148 + if (ret) 149 + goto key_err; 150 + sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len); 151 + 152 + ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */ 153 + if (ccp_check_key_length(ctx->u.rsa.key_len)) { 154 + ret = -EINVAL; 155 + goto key_err; 156 + } 157 + 158 + ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len, 159 + raw_key.e, raw_key.e_sz); 160 + if (ret) 161 + goto key_err; 162 + sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len); 163 + 164 + if (private) { 165 + ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf, 166 + &ctx->u.rsa.d_len, 167 + raw_key.d, raw_key.d_sz); 168 + if (ret) 169 + goto key_err; 170 + sg_init_one(&ctx->u.rsa.d_sg, 171 + ctx->u.rsa.d_buf, ctx->u.rsa.d_len); 172 + } 173 + 174 + return 0; 175 + 176 + key_err: 177 + ccp_rsa_free_key_bufs(ctx); 178 + 179 + n_key: 180 + return ret; 181 + } 182 + 183 + static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, 184 + unsigned int keylen) 185 + { 186 + return ccp_rsa_setkey(tfm, key, keylen, true); 187 + } 188 + 189 + static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, 190 + unsigned int keylen) 191 + { 192 + return ccp_rsa_setkey(tfm, key, keylen, false); 193 + } 194 + 195 + static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm) 196 + { 197 + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); 198 + 199 + akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx)); 200 + ctx->complete = ccp_rsa_complete; 201 + 202 + return 0; 203 + } 204 + 205 + static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm) 206 + { 207 + struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base); 208 + 209 + ccp_rsa_free_key_bufs(ctx); 210 + } 211 + 212 + static struct akcipher_alg ccp_rsa_defaults = { 213 + .encrypt = ccp_rsa_encrypt, 214 + .decrypt = ccp_rsa_decrypt, 215 + .sign = ccp_rsa_decrypt, 216 + .verify = ccp_rsa_encrypt, 217 + .set_pub_key = ccp_rsa_setpubkey, 218 + .set_priv_key = ccp_rsa_setprivkey, 219 + .max_size = ccp_rsa_maxsize, 220 + .init = ccp_rsa_init_tfm, 221 + .exit = ccp_rsa_exit_tfm, 222 + .base = { 223 + .cra_name = "rsa", 224 + .cra_driver_name = "rsa-ccp", 225 + .cra_priority = CCP_CRA_PRIORITY, 226 + .cra_module = THIS_MODULE, 227 + .cra_ctxsize = 2 * sizeof(struct ccp_ctx), 228 + }, 229 + }; 230 + 231 + struct ccp_rsa_def { 232 + unsigned int version; 233 + const char *name; 234 + const char *driver_name; 235 + unsigned int reqsize; 236 + struct akcipher_alg *alg_defaults; 237 + }; 238 + 239 + static struct ccp_rsa_def rsa_algs[] = { 240 + { 241 + .version = CCP_VERSION(3, 0), 242 + .name = "rsa", 243 + .driver_name = "rsa-ccp", 244 + .reqsize = sizeof(struct ccp_rsa_req_ctx), 245 + .alg_defaults = &ccp_rsa_defaults, 246 + } 247 + }; 248 + 249 + int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def) 250 + { 251 + struct ccp_crypto_akcipher_alg *ccp_alg; 252 + struct akcipher_alg *alg; 253 + int ret; 254 + 255 + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 256 + if (!ccp_alg) 257 + return -ENOMEM; 258 + 259 + INIT_LIST_HEAD(&ccp_alg->entry); 260 + 261 + alg = &ccp_alg->alg; 262 + *alg = *def->alg_defaults; 263 + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 264 + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 265 + def->driver_name); 266 + ret = crypto_register_akcipher(alg); 267 + if (ret) { 268 + pr_err("%s akcipher algorithm registration error (%d)\n", 269 + alg->base.cra_name, ret); 270 + kfree(ccp_alg); 271 + return ret; 272 + } 273 + 274 + list_add(&ccp_alg->entry, head); 275 + 276 + return 0; 277 + } 278 + 279 + int ccp_register_rsa_algs(struct list_head *head) 280 + { 281 + int i, ret; 282 + unsigned int ccpversion = ccp_version(); 283 + 284 + /* Register the RSA algorithm in standard mode 285 + * This works for CCP v3 and later 286 + */ 287 + for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) { 288 + if (rsa_algs[i].version > ccpversion) 289 + continue; 290 + ret = ccp_register_rsa_alg(head, &rsa_algs[i]); 291 + if (ret) 292 + return ret; 293 + } 294 + 295 + return 0; 296 + }
+31
drivers/crypto/ccp/ccp-crypto.h
··· 24 24 #include <crypto/ctr.h> 25 25 #include <crypto/hash.h> 26 26 #include <crypto/sha.h> 27 + #include <crypto/akcipher.h> 28 + #include <crypto/internal/rsa.h> 27 29 28 30 #define CCP_LOG_LEVEL KERN_INFO 29 31 ··· 58 56 char child_alg[CRYPTO_MAX_ALG_NAME]; 59 57 60 58 struct ahash_alg alg; 59 + }; 60 + 61 + struct ccp_crypto_akcipher_alg { 62 + struct list_head entry; 63 + 64 + struct akcipher_alg alg; 61 65 }; 62 66 63 67 static inline struct ccp_crypto_ablkcipher_alg * ··· 235 227 u8 buf[MAX_SHA_BLOCK_SIZE]; 236 228 }; 237 229 230 + /***** RSA related defines *****/ 231 + 232 + struct ccp_rsa_ctx { 233 + unsigned int key_len; /* in bits */ 234 + struct scatterlist e_sg; 235 + u8 *e_buf; 236 + unsigned int e_len; 237 + struct scatterlist n_sg; 238 + u8 *n_buf; 239 + unsigned int n_len; 240 + struct scatterlist d_sg; 241 + u8 *d_buf; 242 + unsigned int d_len; 243 + }; 244 + 245 + struct ccp_rsa_req_ctx { 246 + struct ccp_cmd cmd; 247 + }; 248 + 249 + #define CCP_RSA_MAXMOD (4 * 1024 / 8) 250 + 238 251 /***** Common Context Structure *****/ 239 252 struct ccp_ctx { 240 253 int (*complete)(struct crypto_async_request *req, int ret); 241 254 242 255 union { 243 256 struct ccp_aes_ctx aes; 257 + struct ccp_rsa_ctx rsa; 244 258 struct ccp_sha_ctx sha; 245 259 struct ccp_des3_ctx des3; 246 260 } u; ··· 279 249 int ccp_register_aes_aeads(struct list_head *head); 280 250 int ccp_register_sha_algs(struct list_head *head); 281 251 int ccp_register_des3_algs(struct list_head *head); 252 + int ccp_register_rsa_algs(struct list_head *head); 282 253 283 254 #endif