Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: aspeed - Add ACRY RSA driver

ACRY Engine is designed to accelerate the throughput of
ECDSA/RSA signature and verification.

This patch aims to add ACRY RSA engine driver for hardware
acceleration.

Signed-off-by: Neal Liu <neal_liu@aspeedtech.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Neal Liu and committed by
Herbert Xu
2f1cf4e5 d07bd950

+841
+11
drivers/crypto/aspeed/Kconfig
··· 46 46 crypto driver. 47 47 Supports AES/DES symmetric-key encryption and decryption 48 48 with ECB/CBC/CFB/OFB/CTR options. 49 + 50 + config CRYPTO_DEV_ASPEED_ACRY 51 + bool "Enable Aspeed ACRY RSA Engine" 52 + depends on CRYPTO_DEV_ASPEED 53 + select CRYPTO_ENGINE 54 + select CRYPTO_RSA 55 + help 56 + Select here to enable Aspeed ECC/RSA Engine (ACRY) 57 + RSA driver. 58 + Supports 256 bits to 4096 bits RSA encryption/decryption 59 + and signature/verification.
+2
drivers/crypto/aspeed/Makefile
··· 5 5 aspeed_crypto-objs := aspeed-hace.o \ 6 6 $(hace-hash-y) \ 7 7 $(hace-crypto-y) 8 + 9 + obj-$(CONFIG_CRYPTO_DEV_ASPEED_ACRY) += aspeed-acry.o
+828
drivers/crypto/aspeed/aspeed-acry.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright 2021 Aspeed Technology Inc. 4 + */ 5 + #include <crypto/akcipher.h> 6 + #include <crypto/algapi.h> 7 + #include <crypto/engine.h> 8 + #include <crypto/internal/akcipher.h> 9 + #include <crypto/internal/rsa.h> 10 + #include <crypto/scatterwalk.h> 11 + #include <linux/clk.h> 12 + #include <linux/platform_device.h> 13 + #include <linux/module.h> 14 + #include <linux/of_address.h> 15 + #include <linux/of_irq.h> 16 + #include <linux/of.h> 17 + #include <linux/of_device.h> 18 + #include <linux/mfd/syscon.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/count_zeros.h> 21 + #include <linux/err.h> 22 + #include <linux/dma-mapping.h> 23 + #include <linux/regmap.h> 24 + 25 + #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG 26 + #define ACRY_DBG(d, fmt, ...) \ 27 + dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 28 + #else 29 + #define ACRY_DBG(d, fmt, ...) \ 30 + dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 31 + #endif 32 + 33 + /***************************** 34 + * * 35 + * ACRY register definitions * 36 + * * 37 + * ***************************/ 38 + #define ASPEED_ACRY_TRIGGER 0x000 /* ACRY Engine Control: trigger */ 39 + #define ASPEED_ACRY_DMA_CMD 0x048 /* ACRY Engine Control: Command */ 40 + #define ASPEED_ACRY_DMA_SRC_BASE 0x04C /* ACRY DRAM base address for DMA */ 41 + #define ASPEED_ACRY_DMA_LEN 0x050 /* ACRY Data Length of DMA */ 42 + #define ASPEED_ACRY_RSA_KEY_LEN 0x058 /* ACRY RSA Exp/Mod Key Length (Bits) */ 43 + #define ASPEED_ACRY_INT_MASK 0x3F8 /* ACRY Interrupt Mask */ 44 + #define ASPEED_ACRY_STATUS 0x3FC /* ACRY Interrupt Status */ 45 + 46 + /* rsa trigger */ 47 + #define ACRY_CMD_RSA_TRIGGER BIT(0) 48 + #define ACRY_CMD_DMA_RSA_TRIGGER BIT(1) 49 + 50 + /* rsa dma cmd */ 51 + #define ACRY_CMD_DMA_SRAM_MODE_RSA (0x3 << 4) 52 + #define ACRY_CMD_DMEM_AHB BIT(8) 53 + #define ACRY_CMD_DMA_SRAM_AHB_ENGINE 0 54 + 55 + /* rsa key len */ 56 + #define RSA_E_BITS_LEN(x) ((x) << 16) 57 + #define RSA_M_BITS_LEN(x) (x) 58 + 59 + /* acry isr */ 60 + #define ACRY_RSA_ISR BIT(1) 61 + 62 + #define ASPEED_ACRY_BUFF_SIZE 0x1800 /* DMA buffer size */ 63 + #define ASPEED_ACRY_SRAM_MAX_LEN 2048 /* ACRY SRAM maximum length (Bytes) */ 64 + #define ASPEED_ACRY_RSA_MAX_KEY_LEN 512 /* ACRY RSA maximum key length (Bytes) */ 65 + 66 + #define CRYPTO_FLAGS_BUSY BIT(1) 67 + #define BYTES_PER_DWORD 4 68 + 69 + /***************************** 70 + * * 71 + * AHBC register definitions * 72 + * * 73 + * ***************************/ 74 + #define AHBC_REGION_PROT 0x240 75 + #define REGION_ACRYM BIT(23) 76 + 77 + #define ast_acry_write(acry, val, offset) \ 78 + writel((val), (acry)->regs + (offset)) 79 + 80 + #define ast_acry_read(acry, offset) \ 81 + readl((acry)->regs + (offset)) 82 + 83 + struct aspeed_acry_dev; 84 + 85 + typedef int (*aspeed_acry_fn_t)(struct aspeed_acry_dev *); 86 + 87 + struct aspeed_acry_dev { 88 + void __iomem *regs; 89 + struct device *dev; 90 + int irq; 91 + struct clk *clk; 92 + struct regmap *ahbc; 93 + 94 + struct akcipher_request *req; 95 + struct tasklet_struct done_task; 96 + aspeed_acry_fn_t resume; 97 + unsigned long flags; 98 + 99 + /* ACRY output SRAM buffer */ 100 + void __iomem *acry_sram; 101 + 102 + /* ACRY input DMA buffer */ 103 + void *buf_addr; 104 + dma_addr_t buf_dma_addr; 105 + 106 + struct crypto_engine *crypt_engine_rsa; 107 + 108 + /* ACRY SRAM memory mapped */ 109 + int exp_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN]; 110 + int mod_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN]; 111 + int data_byte_mapping[ASPEED_ACRY_SRAM_MAX_LEN]; 112 + }; 113 + 114 + struct aspeed_acry_ctx { 115 + struct crypto_engine_ctx enginectx; 116 + struct aspeed_acry_dev *acry_dev; 117 + 118 + struct rsa_key key; 119 + int enc; 120 + u8 *n; 121 + u8 *e; 122 + u8 *d; 123 + size_t n_sz; 124 + size_t e_sz; 125 + size_t d_sz; 126 + 127 + aspeed_acry_fn_t trigger; 128 + 129 + struct crypto_akcipher *fallback_tfm; 130 + }; 131 + 132 + struct aspeed_acry_alg { 133 + struct aspeed_acry_dev *acry_dev; 134 + struct akcipher_alg akcipher; 135 + }; 136 + 137 + enum aspeed_rsa_key_mode { 138 + ASPEED_RSA_EXP_MODE = 0, 139 + ASPEED_RSA_MOD_MODE, 140 + ASPEED_RSA_DATA_MODE, 141 + }; 142 + 143 + static inline struct akcipher_request * 144 + akcipher_request_cast(struct crypto_async_request *req) 145 + { 146 + return container_of(req, struct akcipher_request, base); 147 + } 148 + 149 + static int aspeed_acry_do_fallback(struct akcipher_request *req) 150 + { 151 + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); 152 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); 153 + int err; 154 + 155 + akcipher_request_set_tfm(req, ctx->fallback_tfm); 156 + 157 + if (ctx->enc) 158 + err = crypto_akcipher_encrypt(req); 159 + else 160 + err = crypto_akcipher_decrypt(req); 161 + 162 + akcipher_request_set_tfm(req, cipher); 163 + 164 + return err; 165 + } 166 + 167 + static bool aspeed_acry_need_fallback(struct akcipher_request *req) 168 + { 169 + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); 170 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); 171 + 172 + return ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN; 173 + } 174 + 175 + static int aspeed_acry_handle_queue(struct aspeed_acry_dev *acry_dev, 176 + struct akcipher_request *req) 177 + { 178 + if (aspeed_acry_need_fallback(req)) { 179 + ACRY_DBG(acry_dev, "SW fallback\n"); 180 + return aspeed_acry_do_fallback(req); 181 + } 182 + 183 + return crypto_transfer_akcipher_request_to_engine(acry_dev->crypt_engine_rsa, req); 184 + } 185 + 186 + static int aspeed_acry_do_request(struct crypto_engine *engine, void *areq) 187 + { 188 + struct akcipher_request *req = akcipher_request_cast(areq); 189 + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); 190 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); 191 + struct aspeed_acry_dev *acry_dev = ctx->acry_dev; 192 + 193 + acry_dev->req = req; 194 + acry_dev->flags |= CRYPTO_FLAGS_BUSY; 195 + 196 + return ctx->trigger(acry_dev); 197 + } 198 + 199 + static int aspeed_acry_complete(struct aspeed_acry_dev *acry_dev, int err) 200 + { 201 + struct akcipher_request *req = acry_dev->req; 202 + 203 + acry_dev->flags &= ~CRYPTO_FLAGS_BUSY; 204 + 205 + crypto_finalize_akcipher_request(acry_dev->crypt_engine_rsa, req, err); 206 + 207 + return err; 208 + } 209 + 210 + /* 211 + * Copy Data to DMA buffer for engine used. 212 + */ 213 + static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev *acry_dev, 214 + u8 *buf, struct scatterlist *src, 215 + size_t nbytes) 216 + { 217 + static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN]; 218 + int i = 0, j; 219 + int data_idx; 220 + 221 + ACRY_DBG(acry_dev, "\n"); 222 + 223 + scatterwalk_map_and_copy(dram_buffer, src, 0, nbytes, 0); 224 + 225 + for (j = nbytes - 1; j >= 0; j--) { 226 + data_idx = acry_dev->data_byte_mapping[i]; 227 + buf[data_idx] = dram_buffer[j]; 228 + i++; 229 + } 230 + 231 + for (; i < ASPEED_ACRY_SRAM_MAX_LEN; i++) { 232 + data_idx = acry_dev->data_byte_mapping[i]; 233 + buf[data_idx] = 0; 234 + } 235 + } 236 + 237 + /* 238 + * Copy Exp/Mod to DMA buffer for engine used. 239 + * 240 + * Params: 241 + * - mode 0 : Exponential 242 + * - mode 1 : Modulus 243 + * 244 + * Example: 245 + * - DRAM memory layout: 246 + * D[0], D[4], D[8], D[12] 247 + * - ACRY SRAM memory layout should reverse the order of source data: 248 + * D[12], D[8], D[4], D[0] 249 + */ 250 + static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf, 251 + const void *xbuf, size_t nbytes, 252 + enum aspeed_rsa_key_mode mode) 253 + { 254 + const u8 *src = xbuf; 255 + u32 *dw_buf = (u32 *)buf; 256 + int nbits, ndw; 257 + int i, j, idx; 258 + u32 data = 0; 259 + 260 + ACRY_DBG(acry_dev, "nbytes:%zu, mode:%d\n", nbytes, mode); 261 + 262 + if (nbytes > ASPEED_ACRY_RSA_MAX_KEY_LEN) 263 + return -ENOMEM; 264 + 265 + /* Remove the leading zeros */ 266 + while (nbytes > 0 && src[0] == 0) { 267 + src++; 268 + nbytes--; 269 + } 270 + 271 + nbits = nbytes * 8; 272 + if (nbytes > 0) 273 + nbits -= count_leading_zeros(src[0]) - (BITS_PER_LONG - 8); 274 + 275 + /* double-world alignment */ 276 + ndw = DIV_ROUND_UP(nbytes, BYTES_PER_DWORD); 277 + 278 + if (nbytes > 0) { 279 + i = BYTES_PER_DWORD - nbytes % BYTES_PER_DWORD; 280 + i %= BYTES_PER_DWORD; 281 + 282 + for (j = ndw; j > 0; j--) { 283 + for (; i < BYTES_PER_DWORD; i++) { 284 + data <<= 8; 285 + data |= *src++; 286 + } 287 + 288 + i = 0; 289 + 290 + if (mode == ASPEED_RSA_EXP_MODE) 291 + idx = acry_dev->exp_dw_mapping[j - 1]; 292 + else if (mode == ASPEED_RSA_MOD_MODE) 293 + idx = acry_dev->mod_dw_mapping[j - 1]; 294 + 295 + dw_buf[idx] = cpu_to_le32(data); 296 + } 297 + } 298 + 299 + return nbits; 300 + } 301 + 302 + static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev *acry_dev) 303 + { 304 + struct akcipher_request *req = acry_dev->req; 305 + u8 *sram_buffer = (u8 *)acry_dev->acry_sram; 306 + struct scatterlist *out_sg = req->dst; 307 + static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN]; 308 + int leading_zero = 1; 309 + int result_nbytes; 310 + int i = 0, j; 311 + int data_idx; 312 + 313 + /* Set Data Memory to AHB(CPU) Access Mode */ 314 + ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD); 315 + 316 + /* Disable ACRY SRAM protection */ 317 + regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT, 318 + REGION_ACRYM, 0); 319 + 320 + result_nbytes = ASPEED_ACRY_SRAM_MAX_LEN; 321 + 322 + for (j = ASPEED_ACRY_SRAM_MAX_LEN - 1; j >= 0; j--) { 323 + data_idx = acry_dev->data_byte_mapping[j]; 324 + if (sram_buffer[data_idx] == 0 && leading_zero) { 325 + result_nbytes--; 326 + } else { 327 + leading_zero = 0; 328 + dram_buffer[i] = sram_buffer[data_idx]; 329 + i++; 330 + } 331 + } 332 + 333 + ACRY_DBG(acry_dev, "result_nbytes:%d, req->dst_len:%d\n", 334 + result_nbytes, req->dst_len); 335 + 336 + if (result_nbytes <= req->dst_len) { 337 + scatterwalk_map_and_copy(dram_buffer, out_sg, 0, result_nbytes, 338 + 1); 339 + req->dst_len = result_nbytes; 340 + 341 + } else { 342 + dev_err(acry_dev->dev, "RSA engine error!\n"); 343 + } 344 + 345 + memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); 346 + 347 + return aspeed_acry_complete(acry_dev, 0); 348 + } 349 + 350 + static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev *acry_dev) 351 + { 352 + struct akcipher_request *req = acry_dev->req; 353 + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); 354 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); 355 + int ne, nm; 356 + 357 + if (!ctx->n || !ctx->n_sz) { 358 + dev_err(acry_dev->dev, "%s: key n is not set\n", __func__); 359 + return -EINVAL; 360 + } 361 + 362 + memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); 363 + 364 + /* Copy source data to DMA buffer */ 365 + aspeed_acry_rsa_sg_copy_to_buffer(acry_dev, acry_dev->buf_addr, 366 + req->src, req->src_len); 367 + 368 + nm = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->n, 369 + ctx->n_sz, ASPEED_RSA_MOD_MODE); 370 + if (ctx->enc) { 371 + if (!ctx->e || !ctx->e_sz) { 372 + dev_err(acry_dev->dev, "%s: key e is not set\n", 373 + __func__); 374 + return -EINVAL; 375 + } 376 + /* Copy key e to DMA buffer */ 377 + ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, 378 + ctx->e, ctx->e_sz, 379 + ASPEED_RSA_EXP_MODE); 380 + } else { 381 + if (!ctx->d || !ctx->d_sz) { 382 + dev_err(acry_dev->dev, "%s: key d is not set\n", 383 + __func__); 384 + return -EINVAL; 385 + } 386 + /* Copy key d to DMA buffer */ 387 + ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, 388 + ctx->key.d, ctx->key.d_sz, 389 + ASPEED_RSA_EXP_MODE); 390 + } 391 + 392 + ast_acry_write(acry_dev, acry_dev->buf_dma_addr, 393 + ASPEED_ACRY_DMA_SRC_BASE); 394 + ast_acry_write(acry_dev, (ne << 16) + nm, 395 + ASPEED_ACRY_RSA_KEY_LEN); 396 + ast_acry_write(acry_dev, ASPEED_ACRY_BUFF_SIZE, 397 + ASPEED_ACRY_DMA_LEN); 398 + 399 + acry_dev->resume = aspeed_acry_rsa_transfer; 400 + 401 + /* Enable ACRY SRAM protection */ 402 + regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT, 403 + REGION_ACRYM, REGION_ACRYM); 404 + 405 + ast_acry_write(acry_dev, ACRY_RSA_ISR, ASPEED_ACRY_INT_MASK); 406 + ast_acry_write(acry_dev, ACRY_CMD_DMA_SRAM_MODE_RSA | 407 + ACRY_CMD_DMA_SRAM_AHB_ENGINE, ASPEED_ACRY_DMA_CMD); 408 + 409 + /* Trigger RSA engines */ 410 + ast_acry_write(acry_dev, ACRY_CMD_RSA_TRIGGER | 411 + ACRY_CMD_DMA_RSA_TRIGGER, ASPEED_ACRY_TRIGGER); 412 + 413 + return 0; 414 + } 415 + 416 + static int aspeed_acry_rsa_enc(struct akcipher_request *req) 417 + { 418 + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); 419 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); 420 + struct aspeed_acry_dev *acry_dev = ctx->acry_dev; 421 + 422 + ctx->trigger = aspeed_acry_rsa_trigger; 423 + ctx->enc = 1; 424 + 425 + return aspeed_acry_handle_queue(acry_dev, req); 426 + } 427 + 428 + static int aspeed_acry_rsa_dec(struct akcipher_request *req) 429 + { 430 + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); 431 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); 432 + struct aspeed_acry_dev *acry_dev = ctx->acry_dev; 433 + 434 + ctx->trigger = aspeed_acry_rsa_trigger; 435 + ctx->enc = 0; 436 + 437 + return aspeed_acry_handle_queue(acry_dev, req); 438 + } 439 + 440 + static u8 *aspeed_rsa_key_copy(u8 *src, size_t len) 441 + { 442 + return kmemdup(src, len, GFP_KERNEL); 443 + } 444 + 445 + static int aspeed_rsa_set_n(struct aspeed_acry_ctx *ctx, u8 *value, 446 + size_t len) 447 + { 448 + ctx->n_sz = len; 449 + ctx->n = aspeed_rsa_key_copy(value, len); 450 + if (!ctx->n) 451 + return -ENOMEM; 452 + 453 + return 0; 454 + } 455 + 456 + static int aspeed_rsa_set_e(struct aspeed_acry_ctx *ctx, u8 *value, 457 + size_t len) 458 + { 459 + ctx->e_sz = len; 460 + ctx->e = aspeed_rsa_key_copy(value, len); 461 + if (!ctx->e) 462 + return -ENOMEM; 463 + 464 + return 0; 465 + } 466 + 467 + static int aspeed_rsa_set_d(struct aspeed_acry_ctx *ctx, u8 *value, 468 + size_t len) 469 + { 470 + ctx->d_sz = len; 471 + ctx->d = aspeed_rsa_key_copy(value, len); 472 + if (!ctx->d) 473 + return -ENOMEM; 474 + 475 + return 0; 476 + } 477 + 478 + static void aspeed_rsa_key_free(struct aspeed_acry_ctx *ctx) 479 + { 480 + kfree_sensitive(ctx->n); 481 + kfree_sensitive(ctx->e); 482 + kfree_sensitive(ctx->d); 483 + ctx->n_sz = 0; 484 + ctx->e_sz = 0; 485 + ctx->d_sz = 0; 486 + } 487 + 488 + static int aspeed_acry_rsa_setkey(struct crypto_akcipher *tfm, const void *key, 489 + unsigned int keylen, int priv) 490 + { 491 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); 492 + struct aspeed_acry_dev *acry_dev = ctx->acry_dev; 493 + int ret; 494 + 495 + if (priv) 496 + ret = rsa_parse_priv_key(&ctx->key, key, keylen); 497 + else 498 + ret = rsa_parse_pub_key(&ctx->key, key, keylen); 499 + 500 + if (ret) { 501 + dev_err(acry_dev->dev, "rsa parse key failed, ret:0x%x\n", 502 + ret); 503 + return ret; 504 + } 505 + 506 + /* Aspeed engine supports up to 4096 bits, 507 + * Use software fallback instead. 508 + */ 509 + if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN) 510 + return 0; 511 + 512 + ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz); 513 + if (ret) 514 + goto err; 515 + 516 + ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz); 517 + if (ret) 518 + goto err; 519 + 520 + if (priv) { 521 + ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz); 522 + if (ret) 523 + goto err; 524 + } 525 + 526 + return 0; 527 + 528 + err: 529 + dev_err(acry_dev->dev, "rsa set key failed\n"); 530 + aspeed_rsa_key_free(ctx); 531 + 532 + return ret; 533 + } 534 + 535 + static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher *tfm, 536 + const void *key, 537 + unsigned int keylen) 538 + { 539 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); 540 + int ret; 541 + 542 + ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen); 543 + if (ret) 544 + return ret; 545 + 546 + return aspeed_acry_rsa_setkey(tfm, key, keylen, 0); 547 + } 548 + 549 + static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher *tfm, 550 + const void *key, 551 + unsigned int keylen) 552 + { 553 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); 554 + int ret; 555 + 556 + ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen); 557 + if (ret) 558 + return ret; 559 + 560 + return aspeed_acry_rsa_setkey(tfm, key, keylen, 1); 561 + } 562 + 563 + static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher *tfm) 564 + { 565 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); 566 + 567 + if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN) 568 + return crypto_akcipher_maxsize(ctx->fallback_tfm); 569 + 570 + return ctx->n_sz; 571 + } 572 + 573 + static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm) 574 + { 575 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); 576 + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); 577 + const char *name = crypto_tfm_alg_name(&tfm->base); 578 + struct aspeed_acry_alg *acry_alg; 579 + 580 + acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher); 581 + 582 + ctx->acry_dev = acry_alg->acry_dev; 583 + 584 + ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC | 585 + CRYPTO_ALG_NEED_FALLBACK); 586 + if (IS_ERR(ctx->fallback_tfm)) { 587 + dev_err(ctx->acry_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", 588 + name, PTR_ERR(ctx->fallback_tfm)); 589 + return PTR_ERR(ctx->fallback_tfm); 590 + } 591 + 592 + ctx->enginectx.op.do_one_request = aspeed_acry_do_request; 593 + ctx->enginectx.op.prepare_request = NULL; 594 + ctx->enginectx.op.unprepare_request = NULL; 595 + 596 + return 0; 597 + } 598 + 599 + static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm) 600 + { 601 + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); 602 + 603 + crypto_free_akcipher(ctx->fallback_tfm); 604 + } 605 + 606 + struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = { 607 + { 608 + .akcipher = { 609 + .encrypt = aspeed_acry_rsa_enc, 610 + .decrypt = aspeed_acry_rsa_dec, 611 + .sign = aspeed_acry_rsa_dec, 612 + .verify = aspeed_acry_rsa_enc, 613 + .set_pub_key = aspeed_acry_rsa_set_pub_key, 614 + .set_priv_key = aspeed_acry_rsa_set_priv_key, 615 + .max_size = aspeed_acry_rsa_max_size, 616 + .init = aspeed_acry_rsa_init_tfm, 617 + .exit = aspeed_acry_rsa_exit_tfm, 618 + .base = { 619 + .cra_name = "rsa", 620 + .cra_driver_name = "aspeed-rsa", 621 + .cra_priority = 300, 622 + .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER | 623 + CRYPTO_ALG_ASYNC | 624 + CRYPTO_ALG_KERN_DRIVER_ONLY | 625 + CRYPTO_ALG_NEED_FALLBACK, 626 + .cra_module = THIS_MODULE, 627 + .cra_ctxsize = sizeof(struct aspeed_acry_ctx), 628 + }, 629 + }, 630 + }, 631 + }; 632 + 633 + static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev) 634 + { 635 + int i, rc; 636 + 637 + for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) { 638 + aspeed_acry_akcipher_algs[i].acry_dev = acry_dev; 639 + rc = crypto_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); 640 + if (rc) { 641 + ACRY_DBG(acry_dev, "Failed to register %s\n", 642 + aspeed_acry_akcipher_algs[i].akcipher.base.cra_name); 643 + } 644 + } 645 + } 646 + 647 + static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev) 648 + { 649 + int i; 650 + 651 + for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) 652 + crypto_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); 653 + } 654 + 655 + /* ACRY interrupt service routine. */ 656 + static irqreturn_t aspeed_acry_irq(int irq, void *dev) 657 + { 658 + struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)dev; 659 + u32 sts; 660 + 661 + sts = ast_acry_read(acry_dev, ASPEED_ACRY_STATUS); 662 + ast_acry_write(acry_dev, sts, ASPEED_ACRY_STATUS); 663 + 664 + ACRY_DBG(acry_dev, "irq sts:0x%x\n", sts); 665 + 666 + if (sts & ACRY_RSA_ISR) { 667 + /* Stop RSA engine */ 668 + ast_acry_write(acry_dev, 0, ASPEED_ACRY_TRIGGER); 669 + 670 + if (acry_dev->flags & CRYPTO_FLAGS_BUSY) 671 + tasklet_schedule(&acry_dev->done_task); 672 + else 673 + dev_err(acry_dev->dev, "RSA no active requests.\n"); 674 + } 675 + 676 + return IRQ_HANDLED; 677 + } 678 + 679 + /* 680 + * ACRY SRAM has its own memory layout. 681 + * Set the DRAM to SRAM indexing for future used. 682 + */ 683 + static void aspeed_acry_sram_mapping(struct aspeed_acry_dev *acry_dev) 684 + { 685 + int i, j = 0; 686 + 687 + for (i = 0; i < (ASPEED_ACRY_SRAM_MAX_LEN / BYTES_PER_DWORD); i++) { 688 + acry_dev->exp_dw_mapping[i] = j; 689 + acry_dev->mod_dw_mapping[i] = j + 4; 690 + acry_dev->data_byte_mapping[(i * 4)] = (j + 8) * 4; 691 + acry_dev->data_byte_mapping[(i * 4) + 1] = (j + 8) * 4 + 1; 692 + acry_dev->data_byte_mapping[(i * 4) + 2] = (j + 8) * 4 + 2; 693 + acry_dev->data_byte_mapping[(i * 4) + 3] = (j + 8) * 4 + 3; 694 + j++; 695 + j = j % 4 ? j : j + 8; 696 + } 697 + } 698 + 699 + static void aspeed_acry_done_task(unsigned long data) 700 + { 701 + struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)data; 702 + 703 + (void)acry_dev->resume(acry_dev); 704 + } 705 + 706 + static const struct of_device_id aspeed_acry_of_matches[] = { 707 + { .compatible = "aspeed,ast2600-acry", }, 708 + {}, 709 + }; 710 + 711 + static int aspeed_acry_probe(struct platform_device *pdev) 712 + { 713 + struct aspeed_acry_dev *acry_dev; 714 + struct device *dev = &pdev->dev; 715 + struct resource *res; 716 + int rc; 717 + 718 + acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev), 719 + GFP_KERNEL); 720 + if (!acry_dev) 721 + return -ENOMEM; 722 + 723 + acry_dev->dev = dev; 724 + 725 + platform_set_drvdata(pdev, acry_dev); 726 + 727 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 728 + acry_dev->regs = devm_ioremap_resource(dev, res); 729 + if (IS_ERR(acry_dev->regs)) 730 + return PTR_ERR(acry_dev->regs); 731 + 732 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 733 + acry_dev->acry_sram = devm_ioremap_resource(dev, res); 734 + if (IS_ERR(acry_dev->acry_sram)) 735 + return PTR_ERR(acry_dev->acry_sram); 736 + 737 + /* Get irq number and register it */ 738 + acry_dev->irq = platform_get_irq(pdev, 0); 739 + if (acry_dev->irq < 0) 740 + return -ENXIO; 741 + 742 + rc = devm_request_irq(dev, acry_dev->irq, aspeed_acry_irq, 0, 743 + dev_name(dev), acry_dev); 744 + if (rc) { 745 + dev_err(dev, "Failed to request irq.\n"); 746 + return rc; 747 + } 748 + 749 + acry_dev->clk = devm_clk_get_enabled(dev, NULL); 750 + if (IS_ERR(acry_dev->clk)) { 751 + dev_err(dev, "Failed to get acry clk\n"); 752 + return PTR_ERR(acry_dev->clk); 753 + } 754 + 755 + acry_dev->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node, 756 + "aspeed,ahbc"); 757 + if (IS_ERR(acry_dev->ahbc)) { 758 + dev_err(dev, "Failed to get AHBC regmap\n"); 759 + return -ENODEV; 760 + } 761 + 762 + /* Initialize crypto hardware engine structure for RSA */ 763 + acry_dev->crypt_engine_rsa = crypto_engine_alloc_init(dev, true); 764 + if (!acry_dev->crypt_engine_rsa) { 765 + rc = -ENOMEM; 766 + goto clk_exit; 767 + } 768 + 769 + rc = crypto_engine_start(acry_dev->crypt_engine_rsa); 770 + if (rc) 771 + goto err_engine_rsa_start; 772 + 773 + tasklet_init(&acry_dev->done_task, aspeed_acry_done_task, 774 + (unsigned long)acry_dev); 775 + 776 + /* Set Data Memory to AHB(CPU) Access Mode */ 777 + ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD); 778 + 779 + /* Initialize ACRY SRAM index */ 780 + aspeed_acry_sram_mapping(acry_dev); 781 + 782 + acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE, 783 + &acry_dev->buf_dma_addr, 784 + GFP_KERNEL); 785 + memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); 786 + 787 + aspeed_acry_register(acry_dev); 788 + 789 + dev_info(dev, "Aspeed ACRY Accelerator successfully registered\n"); 790 + 791 + return 0; 792 + 793 + err_engine_rsa_start: 794 + crypto_engine_exit(acry_dev->crypt_engine_rsa); 795 + clk_exit: 796 + clk_disable_unprepare(acry_dev->clk); 797 + 798 + return rc; 799 + } 800 + 801 + static int aspeed_acry_remove(struct platform_device *pdev) 802 + { 803 + struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev); 804 + 805 + aspeed_acry_unregister(acry_dev); 806 + crypto_engine_exit(acry_dev->crypt_engine_rsa); 807 + tasklet_kill(&acry_dev->done_task); 808 + clk_disable_unprepare(acry_dev->clk); 809 + 810 + return 0; 811 + } 812 + 813 + MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches); 814 + 815 + static struct platform_driver aspeed_acry_driver = { 816 + .probe = aspeed_acry_probe, 817 + .remove = aspeed_acry_remove, 818 + .driver = { 819 + .name = KBUILD_MODNAME, 820 + .of_match_table = aspeed_acry_of_matches, 821 + }, 822 + }; 823 + 824 + module_platform_driver(aspeed_acry_driver); 825 + 826 + MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>"); 827 + MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine"); 828 + MODULE_LICENSE("GPL");