Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: allwinner - Add sun8i-ss cryptographic offloader

The Security System is an hardware cryptographic offloader present
on Allwinner SoCs A80 and A83T.
It is different from the previous sun4i-ss.

This driver supports AES cipher in CBC and ECB mode.

Acked-by: Maxime Ripard <mripard@kernel.org>
Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Corentin Labbe and committed by
Herbert Xu
f08fcced a1afe274

+1328
+27
drivers/crypto/allwinner/Kconfig
··· 58 58 Say y to enable sun8i-ce debug stats. 59 59 This will create /sys/kernel/debug/sun8i-ce/stats for displaying 60 60 the number of requests per flow and per algorithm. 61 + 62 + config CRYPTO_DEV_SUN8I_SS 63 + tristate "Support for Allwinner Security System cryptographic offloader" 64 + select CRYPTO_BLKCIPHER 65 + select CRYPTO_ENGINE 66 + select CRYPTO_ECB 67 + select CRYPTO_CBC 68 + select CRYPTO_AES 69 + select CRYPTO_DES 70 + depends on CRYPTO_DEV_ALLWINNER 71 + depends on PM 72 + help 73 + Select y here to have support for the Security System available on 74 + Allwinner SoC A80, A83T. 75 + The Security System handle AES/3DES ciphers in ECB/CBC mode. 76 + 77 + To compile this driver as a module, choose M here: the module 78 + will be called sun8i-ss. 79 + 80 + config CRYPTO_DEV_SUN8I_SS_DEBUG 81 + bool "Enable sun8i-ss stats" 82 + depends on CRYPTO_DEV_SUN8I_SS 83 + depends on DEBUG_FS 84 + help 85 + Say y to enable sun8i-ss debug stats. 86 + This will create /sys/kernel/debug/sun8i-ss/stats for displaying 87 + the number of requests per flow and per algorithm.
+1
drivers/crypto/allwinner/Makefile
··· 1 1 obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss/ 2 2 obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce/ 3 + obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss/
+2
drivers/crypto/allwinner/sun8i-ss/Makefile
··· 1 + obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss.o 2 + sun8i-ss-y += sun8i-ss-core.o sun8i-ss-cipher.o
+438
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * sun8i-ss-cipher.c - hardware cryptographic offloader for 4 + * Allwinner A80/A83T SoC 5 + * 6 + * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com> 7 + * 8 + * This file add support for AES cipher with 128,192,256 bits keysize in 9 + * CBC and ECB mode. 10 + * 11 + * You could find a link for the datasheet in Documentation/arm/sunxi/README 12 + */ 13 + 14 + #include <linux/crypto.h> 15 + #include <linux/dma-mapping.h> 16 + #include <linux/io.h> 17 + #include <linux/pm_runtime.h> 18 + #include <crypto/scatterwalk.h> 19 + #include <crypto/internal/skcipher.h> 20 + #include "sun8i-ss.h" 21 + 22 + static bool sun8i_ss_need_fallback(struct skcipher_request *areq) 23 + { 24 + struct scatterlist *in_sg = areq->src; 25 + struct scatterlist *out_sg = areq->dst; 26 + struct scatterlist *sg; 27 + 28 + if (areq->cryptlen == 0 || areq->cryptlen % 16) 29 + return true; 30 + 31 + if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8) 32 + return true; 33 + 34 + sg = areq->src; 35 + while (sg) { 36 + if ((sg->length % 16) != 0) 37 + return true; 38 + if ((sg_dma_len(sg) % 16) != 0) 39 + return true; 40 + if (!IS_ALIGNED(sg->offset, 16)) 41 + return true; 42 + sg = sg_next(sg); 43 + } 44 + sg = areq->dst; 45 + while (sg) { 46 + if ((sg->length % 16) != 0) 47 + return true; 48 + if ((sg_dma_len(sg) % 16) != 0) 49 + return true; 50 + if (!IS_ALIGNED(sg->offset, 16)) 51 + return true; 52 + sg = sg_next(sg); 53 + } 54 + 55 + /* SS need same numbers of SG (with same length) for source and destination */ 56 + in_sg = areq->src; 57 + out_sg = areq->dst; 58 + while (in_sg && out_sg) { 59 + if (in_sg->length != out_sg->length) 60 + return true; 61 + in_sg = sg_next(in_sg); 62 + out_sg = sg_next(out_sg); 63 + } 64 + if (in_sg || out_sg) 65 + return true; 66 + return false; 67 + } 68 + 69 + static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) 70 + { 71 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 72 + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 73 + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 74 + int err; 75 + 76 + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); 77 + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 78 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 79 + struct sun8i_ss_alg_template *algt; 80 + 81 + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); 82 + algt->stat_fb++; 83 + #endif 84 + skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); 85 + skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL); 86 + skcipher_request_set_crypt(subreq, areq->src, areq->dst, 87 + areq->cryptlen, areq->iv); 88 + if (rctx->op_dir & SS_DECRYPTION) 89 + err = crypto_skcipher_decrypt(subreq); 90 + else 91 + err = crypto_skcipher_encrypt(subreq); 92 + skcipher_request_zero(subreq); 93 + return err; 94 + } 95 + 96 + static int sun8i_ss_cipher(struct skcipher_request *areq) 97 + { 98 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 99 + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 100 + struct sun8i_ss_dev *ss = op->ss; 101 + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 102 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 103 + struct sun8i_ss_alg_template *algt; 104 + struct scatterlist *sg; 105 + unsigned int todo, len, offset, ivsize; 106 + void *backup_iv = NULL; 107 + int nr_sgs = 0; 108 + int nr_sgd = 0; 109 + int err = 0; 110 + int i; 111 + 112 + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); 113 + 114 + dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, 115 + crypto_tfm_alg_name(areq->base.tfm), 116 + areq->cryptlen, 117 + rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), 118 + op->keylen); 119 + 120 + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 121 + algt->stat_req++; 122 + #endif 123 + 124 + rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode]; 125 + rctx->method = ss->variant->alg_cipher[algt->ss_algo_id]; 126 + rctx->keylen = op->keylen; 127 + 128 + rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE); 129 + if (dma_mapping_error(ss->dev, rctx->p_key)) { 130 + dev_err(ss->dev, "Cannot DMA MAP KEY\n"); 131 + err = -EFAULT; 132 + goto theend; 133 + } 134 + 135 + ivsize = crypto_skcipher_ivsize(tfm); 136 + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { 137 + rctx->ivlen = ivsize; 138 + rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); 139 + if (!rctx->biv) { 140 + err = -ENOMEM; 141 + goto theend_key; 142 + } 143 + if (rctx->op_dir & SS_DECRYPTION) { 144 + backup_iv = kzalloc(ivsize, GFP_KERNEL); 145 + if (!backup_iv) { 146 + err = -ENOMEM; 147 + goto theend_key; 148 + } 149 + offset = areq->cryptlen - ivsize; 150 + scatterwalk_map_and_copy(backup_iv, areq->src, offset, 151 + ivsize, 0); 152 + } 153 + memcpy(rctx->biv, areq->iv, ivsize); 154 + rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen, 155 + DMA_TO_DEVICE); 156 + if (dma_mapping_error(ss->dev, rctx->p_iv)) { 157 + dev_err(ss->dev, "Cannot DMA MAP IV\n"); 158 + err = -ENOMEM; 159 + goto theend_iv; 160 + } 161 + } 162 + if (areq->src == areq->dst) { 163 + nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), 164 + DMA_BIDIRECTIONAL); 165 + if (nr_sgs <= 0 || nr_sgs > 8) { 166 + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); 167 + err = -EINVAL; 168 + goto theend_iv; 169 + } 170 + nr_sgd = nr_sgs; 171 + } else { 172 + nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), 173 + DMA_TO_DEVICE); 174 + if (nr_sgs <= 0 || nr_sgs > 8) { 175 + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); 176 + err = -EINVAL; 177 + goto theend_iv; 178 + } 179 + nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst), 180 + DMA_FROM_DEVICE); 181 + if (nr_sgd <= 0 || nr_sgd > 8) { 182 + dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd); 183 + err = -EINVAL; 184 + goto theend_sgs; 185 + } 186 + } 187 + 188 + len = areq->cryptlen; 189 + i = 0; 190 + sg = areq->src; 191 + while (i < nr_sgs && sg && len) { 192 + if (sg_dma_len(sg) == 0) 193 + goto sgs_next; 194 + rctx->t_src[i].addr = sg_dma_address(sg); 195 + todo = min(len, sg_dma_len(sg)); 196 + rctx->t_src[i].len = todo / 4; 197 + dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__, 198 + areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo); 199 + len -= todo; 200 + i++; 201 + sgs_next: 202 + sg = sg_next(sg); 203 + } 204 + if (len > 0) { 205 + dev_err(ss->dev, "remaining len %d\n", len); 206 + err = -EINVAL; 207 + goto theend_sgs; 208 + } 209 + 210 + len = areq->cryptlen; 211 + i = 0; 212 + sg = areq->dst; 213 + while (i < nr_sgd && sg && len) { 214 + if (sg_dma_len(sg) == 0) 215 + goto sgd_next; 216 + rctx->t_dst[i].addr = sg_dma_address(sg); 217 + todo = min(len, sg_dma_len(sg)); 218 + rctx->t_dst[i].len = todo / 4; 219 + dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__, 220 + areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo); 221 + len -= todo; 222 + i++; 223 + sgd_next: 224 + sg = sg_next(sg); 225 + } 226 + if (len > 0) { 227 + dev_err(ss->dev, "remaining len %d\n", len); 228 + err = -EINVAL; 229 + goto theend_sgs; 230 + } 231 + 232 + err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); 233 + 234 + theend_sgs: 235 + if (areq->src == areq->dst) { 236 + dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); 237 + } else { 238 + dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE); 239 + dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); 240 + } 241 + 242 + theend_iv: 243 + if (rctx->p_iv) 244 + dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen, 245 + DMA_TO_DEVICE); 246 + 247 + if (areq->iv && ivsize > 0) { 248 + if (rctx->biv) { 249 + offset = areq->cryptlen - ivsize; 250 + if (rctx->op_dir & SS_DECRYPTION) { 251 + memcpy(areq->iv, backup_iv, ivsize); 252 + memzero_explicit(backup_iv, ivsize); 253 + kzfree(backup_iv); 254 + } else { 255 + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, 256 + ivsize, 0); 257 + } 258 + kfree(rctx->biv); 259 + } 260 + } 261 + 262 + theend_key: 263 + dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE); 264 + 265 + theend: 266 + 267 + return err; 268 + } 269 + 270 + static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq) 271 + { 272 + int err; 273 + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); 274 + 275 + err = sun8i_ss_cipher(breq); 276 + crypto_finalize_skcipher_request(engine, breq, err); 277 + 278 + return 0; 279 + } 280 + 281 + int sun8i_ss_skdecrypt(struct skcipher_request *areq) 282 + { 283 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 284 + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 285 + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 286 + struct crypto_engine *engine; 287 + int e; 288 + 289 + memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); 290 + rctx->op_dir = SS_DECRYPTION; 291 + 292 + if (sun8i_ss_need_fallback(areq)) 293 + return sun8i_ss_cipher_fallback(areq); 294 + 295 + e = sun8i_ss_get_engine_number(op->ss); 296 + engine = op->ss->flows[e].engine; 297 + rctx->flow = e; 298 + 299 + return crypto_transfer_skcipher_request_to_engine(engine, areq); 300 + } 301 + 302 + int sun8i_ss_skencrypt(struct skcipher_request *areq) 303 + { 304 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 305 + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 306 + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 307 + struct crypto_engine *engine; 308 + int e; 309 + 310 + memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); 311 + rctx->op_dir = SS_ENCRYPTION; 312 + 313 + if (sun8i_ss_need_fallback(areq)) 314 + return sun8i_ss_cipher_fallback(areq); 315 + 316 + e = sun8i_ss_get_engine_number(op->ss); 317 + engine = op->ss->flows[e].engine; 318 + rctx->flow = e; 319 + 320 + return crypto_transfer_skcipher_request_to_engine(engine, areq); 321 + } 322 + 323 + int sun8i_ss_cipher_init(struct crypto_tfm *tfm) 324 + { 325 + struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); 326 + struct sun8i_ss_alg_template *algt; 327 + const char *name = crypto_tfm_alg_name(tfm); 328 + struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); 329 + struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); 330 + int err; 331 + 332 + memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); 333 + 334 + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); 335 + op->ss = algt->ss; 336 + 337 + sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx); 338 + 339 + op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 340 + if (IS_ERR(op->fallback_tfm)) { 341 + dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", 342 + name, PTR_ERR(op->fallback_tfm)); 343 + return PTR_ERR(op->fallback_tfm); 344 + } 345 + 346 + dev_info(op->ss->dev, "Fallback for %s is %s\n", 347 + crypto_tfm_alg_driver_name(&sktfm->base), 348 + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base))); 349 + 350 + op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; 351 + op->enginectx.op.prepare_request = NULL; 352 + op->enginectx.op.unprepare_request = NULL; 353 + 354 + err = pm_runtime_get_sync(op->ss->dev); 355 + if (err < 0) { 356 + dev_err(op->ss->dev, "pm error %d\n", err); 357 + goto error_pm; 358 + } 359 + 360 + return 0; 361 + error_pm: 362 + crypto_free_sync_skcipher(op->fallback_tfm); 363 + return err; 364 + } 365 + 366 + void sun8i_ss_cipher_exit(struct crypto_tfm *tfm) 367 + { 368 + struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); 369 + 370 + if (op->key) { 371 + memzero_explicit(op->key, op->keylen); 372 + kfree(op->key); 373 + } 374 + crypto_free_sync_skcipher(op->fallback_tfm); 375 + pm_runtime_put_sync(op->ss->dev); 376 + } 377 + 378 + int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 379 + unsigned int keylen) 380 + { 381 + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 382 + struct sun8i_ss_dev *ss = op->ss; 383 + 384 + switch (keylen) { 385 + case 128 / 8: 386 + break; 387 + case 192 / 8: 388 + break; 389 + case 256 / 8: 390 + break; 391 + default: 392 + dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); 393 + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 394 + return -EINVAL; 395 + } 396 + if (op->key) { 397 + memzero_explicit(op->key, op->keylen); 398 + kfree(op->key); 399 + } 400 + op->keylen = keylen; 401 + op->key = kmalloc(keylen, GFP_KERNEL | GFP_DMA); 402 + if (!op->key) 403 + return -ENOMEM; 404 + memcpy(op->key, key, keylen); 405 + 406 + crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); 407 + crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 408 + 409 + return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); 410 + } 411 + 412 + int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, 413 + unsigned int keylen) 414 + { 415 + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 416 + struct sun8i_ss_dev *ss = op->ss; 417 + 418 + if (unlikely(keylen != 3 * DES_KEY_SIZE)) { 419 + dev_dbg(ss->dev, "Invalid keylen %u\n", keylen); 420 + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 421 + return -EINVAL; 422 + } 423 + 424 + if (op->key) { 425 + memzero_explicit(op->key, op->keylen); 426 + kfree(op->key); 427 + } 428 + op->keylen = keylen; 429 + op->key = kmalloc(keylen, GFP_KERNEL | GFP_DMA); 430 + if (!op->key) 431 + return -ENOMEM; 432 + memcpy(op->key, key, keylen); 433 + 434 + crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); 435 + crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 436 + 437 + return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); 438 + }
+642
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * sun8i-ss-core.c - hardware cryptographic offloader for 4 + * Allwinner A80/A83T SoC 5 + * 6 + * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com> 7 + * 8 + * Core file which registers crypto algorithms supported by the SecuritySystem 9 + * 10 + * You could find a link for the datasheet in Documentation/arm/sunxi/README 11 + */ 12 + #include <linux/clk.h> 13 + #include <linux/crypto.h> 14 + #include <linux/delay.h> 15 + #include <linux/dma-mapping.h> 16 + #include <linux/interrupt.h> 17 + #include <linux/io.h> 18 + #include <linux/irq.h> 19 + #include <linux/module.h> 20 + #include <linux/of.h> 21 + #include <linux/of_device.h> 22 + #include <linux/platform_device.h> 23 + #include <linux/pm_runtime.h> 24 + #include <linux/reset.h> 25 + #include <crypto/internal/skcipher.h> 26 + 27 + #include "sun8i-ss.h" 28 + 29 + static const struct ss_variant ss_a80_variant = { 30 + .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, 31 + }, 32 + .op_mode = { SS_OP_ECB, SS_OP_CBC, 33 + }, 34 + .ss_clks = { 35 + { "bus", 0, 300 * 1000 * 1000 }, 36 + { "mod", 0, 300 * 1000 * 1000 }, 37 + } 38 + }; 39 + 40 + static const struct ss_variant ss_a83t_variant = { 41 + .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, 42 + }, 43 + .op_mode = { SS_OP_ECB, SS_OP_CBC, 44 + }, 45 + .ss_clks = { 46 + { "bus", 0, 300 * 1000 * 1000 }, 47 + { "mod", 0, 300 * 1000 * 1000 }, 48 + } 49 + }; 50 + 51 + /* 52 + * sun8i_ss_get_engine_number() get the next channel slot 53 + * This is a simple round-robin way of getting the next channel 54 + */ 55 + int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss) 56 + { 57 + return atomic_inc_return(&ss->flow) % MAXFLOW; 58 + } 59 + 60 + int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, 61 + const char *name) 62 + { 63 + int flow = rctx->flow; 64 + u32 v = 1; 65 + int i; 66 + 67 + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 68 + ss->flows[flow].stat_req++; 69 + #endif 70 + 71 + /* choose between stream0/stream1 */ 72 + if (flow) 73 + v |= SS_FLOW1; 74 + else 75 + v |= SS_FLOW0; 76 + 77 + v |= rctx->op_mode; 78 + v |= rctx->method; 79 + 80 + if (rctx->op_dir) 81 + v |= SS_DECRYPTION; 82 + 83 + switch (rctx->keylen) { 84 + case 128 / 8: 85 + v |= SS_AES_128BITS << 7; 86 + break; 87 + case 192 / 8: 88 + v |= SS_AES_192BITS << 7; 89 + break; 90 + case 256 / 8: 91 + v |= SS_AES_256BITS << 7; 92 + break; 93 + } 94 + 95 + for (i = 0; i < MAX_SG; i++) { 96 + if (!rctx->t_dst[i].addr) 97 + break; 98 + 99 + mutex_lock(&ss->mlock); 100 + writel(rctx->p_key, ss->base + SS_KEY_ADR_REG); 101 + 102 + if (i == 0) { 103 + if (rctx->p_iv) 104 + writel(rctx->p_iv, ss->base + SS_IV_ADR_REG); 105 + } else { 106 + if (rctx->biv) { 107 + if (rctx->op_dir == SS_ENCRYPTION) 108 + writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); 109 + else 110 + writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); 111 + } 112 + } 113 + 114 + dev_dbg(ss->dev, 115 + "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n", 116 + i, flow, name, v, 117 + rctx->t_src[i].len, rctx->t_dst[i].len, 118 + rctx->method, rctx->op_mode, 119 + rctx->op_dir, rctx->t_src[i].len); 120 + 121 + writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG); 122 + writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG); 123 + writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG); 124 + 125 + reinit_completion(&ss->flows[flow].complete); 126 + ss->flows[flow].status = 0; 127 + wmb(); 128 + 129 + writel(v, ss->base + SS_CTL_REG); 130 + mutex_unlock(&ss->mlock); 131 + wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, 132 + msecs_to_jiffies(2000)); 133 + if (ss->flows[flow].status == 0) { 134 + dev_err(ss->dev, "DMA timeout for %s\n", name); 135 + return -EFAULT; 136 + } 137 + } 138 + 139 + return 0; 140 + } 141 + 142 + static irqreturn_t ss_irq_handler(int irq, void *data) 143 + { 144 + struct sun8i_ss_dev *ss = (struct sun8i_ss_dev *)data; 145 + int flow = 0; 146 + u32 p; 147 + 148 + p = readl(ss->base + SS_INT_STA_REG); 149 + for (flow = 0; flow < MAXFLOW; flow++) { 150 + if (p & (BIT(flow))) { 151 + writel(BIT(flow), ss->base + SS_INT_STA_REG); 152 + ss->flows[flow].status = 1; 153 + complete(&ss->flows[flow].complete); 154 + } 155 + } 156 + 157 + return IRQ_HANDLED; 158 + } 159 + 160 + static struct sun8i_ss_alg_template ss_algs[] = { 161 + { 162 + .type = CRYPTO_ALG_TYPE_SKCIPHER, 163 + .ss_algo_id = SS_ID_CIPHER_AES, 164 + .ss_blockmode = SS_ID_OP_CBC, 165 + .alg.skcipher = { 166 + .base = { 167 + .cra_name = "cbc(aes)", 168 + .cra_driver_name = "cbc-aes-sun8i-ss", 169 + .cra_priority = 400, 170 + .cra_blocksize = AES_BLOCK_SIZE, 171 + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 172 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, 173 + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 174 + .cra_module = THIS_MODULE, 175 + .cra_alignmask = 0xf, 176 + .cra_init = sun8i_ss_cipher_init, 177 + .cra_exit = sun8i_ss_cipher_exit, 178 + }, 179 + .min_keysize = AES_MIN_KEY_SIZE, 180 + .max_keysize = AES_MAX_KEY_SIZE, 181 + .ivsize = AES_BLOCK_SIZE, 182 + .setkey = sun8i_ss_aes_setkey, 183 + .encrypt = sun8i_ss_skencrypt, 184 + .decrypt = sun8i_ss_skdecrypt, 185 + } 186 + }, 187 + { 188 + .type = CRYPTO_ALG_TYPE_SKCIPHER, 189 + .ss_algo_id = SS_ID_CIPHER_AES, 190 + .ss_blockmode = SS_ID_OP_ECB, 191 + .alg.skcipher = { 192 + .base = { 193 + .cra_name = "ecb(aes)", 194 + .cra_driver_name = "ecb-aes-sun8i-ss", 195 + .cra_priority = 400, 196 + .cra_blocksize = AES_BLOCK_SIZE, 197 + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 198 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, 199 + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 200 + .cra_module = THIS_MODULE, 201 + .cra_alignmask = 0xf, 202 + .cra_init = sun8i_ss_cipher_init, 203 + .cra_exit = sun8i_ss_cipher_exit, 204 + }, 205 + .min_keysize = AES_MIN_KEY_SIZE, 206 + .max_keysize = AES_MAX_KEY_SIZE, 207 + .setkey = sun8i_ss_aes_setkey, 208 + .encrypt = sun8i_ss_skencrypt, 209 + .decrypt = sun8i_ss_skdecrypt, 210 + } 211 + }, 212 + { 213 + .type = CRYPTO_ALG_TYPE_SKCIPHER, 214 + .ss_algo_id = SS_ID_CIPHER_DES3, 215 + .ss_blockmode = SS_ID_OP_CBC, 216 + .alg.skcipher = { 217 + .base = { 218 + .cra_name = "cbc(des3_ede)", 219 + .cra_driver_name = "cbc-des3-sun8i-ss", 220 + .cra_priority = 400, 221 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 222 + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 223 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, 224 + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 225 + .cra_module = THIS_MODULE, 226 + .cra_alignmask = 0xf, 227 + .cra_init = sun8i_ss_cipher_init, 228 + .cra_exit = sun8i_ss_cipher_exit, 229 + }, 230 + .min_keysize = DES3_EDE_KEY_SIZE, 231 + .max_keysize = DES3_EDE_KEY_SIZE, 232 + .ivsize = DES3_EDE_BLOCK_SIZE, 233 + .setkey = sun8i_ss_des3_setkey, 234 + .encrypt = sun8i_ss_skencrypt, 235 + .decrypt = sun8i_ss_skdecrypt, 236 + } 237 + }, 238 + { 239 + .type = CRYPTO_ALG_TYPE_SKCIPHER, 240 + .ss_algo_id = SS_ID_CIPHER_DES3, 241 + .ss_blockmode = SS_ID_OP_ECB, 242 + .alg.skcipher = { 243 + .base = { 244 + .cra_name = "ecb(des3_ede)", 245 + .cra_driver_name = "ecb-des3-sun8i-ss", 246 + .cra_priority = 400, 247 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 248 + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 249 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, 250 + .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 251 + .cra_module = THIS_MODULE, 252 + .cra_alignmask = 0xf, 253 + .cra_init = sun8i_ss_cipher_init, 254 + .cra_exit = sun8i_ss_cipher_exit, 255 + }, 256 + .min_keysize = DES3_EDE_KEY_SIZE, 257 + .max_keysize = DES3_EDE_KEY_SIZE, 258 + .setkey = sun8i_ss_des3_setkey, 259 + .encrypt = sun8i_ss_skencrypt, 260 + .decrypt = sun8i_ss_skdecrypt, 261 + } 262 + }, 263 + }; 264 + 265 + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 266 + static int sun8i_ss_dbgfs_read(struct seq_file *seq, void *v) 267 + { 268 + struct sun8i_ss_dev *ss = seq->private; 269 + int i; 270 + 271 + for (i = 0; i < MAXFLOW; i++) 272 + seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req); 273 + 274 + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { 275 + if (!ss_algs[i].ss) 276 + continue; 277 + switch (ss_algs[i].type) { 278 + case CRYPTO_ALG_TYPE_SKCIPHER: 279 + seq_printf(seq, "%s %s %lu %lu\n", 280 + ss_algs[i].alg.skcipher.base.cra_driver_name, 281 + ss_algs[i].alg.skcipher.base.cra_name, 282 + ss_algs[i].stat_req, ss_algs[i].stat_fb); 283 + break; 284 + } 285 + } 286 + return 0; 287 + } 288 + 289 + static int sun8i_ss_dbgfs_open(struct inode *inode, struct file *file) 290 + { 291 + return single_open(file, sun8i_ss_dbgfs_read, inode->i_private); 292 + } 293 + 294 + static const struct file_operations sun8i_ss_debugfs_fops = { 295 + .owner = THIS_MODULE, 296 + .open = sun8i_ss_dbgfs_open, 297 + .read = seq_read, 298 + .llseek = seq_lseek, 299 + .release = single_release, 300 + }; 301 + #endif 302 + 303 + static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) 304 + { 305 + while (i >= 0) { 306 + crypto_engine_exit(ss->flows[i].engine); 307 + i--; 308 + } 309 + } 310 + 311 + /* 312 + * Allocate the flow list structure 313 + */ 314 + static int allocate_flows(struct sun8i_ss_dev *ss) 315 + { 316 + int i, err; 317 + 318 + ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), 319 + GFP_KERNEL); 320 + if (!ss->flows) 321 + return -ENOMEM; 322 + 323 + for (i = 0; i < MAXFLOW; i++) { 324 + init_completion(&ss->flows[i].complete); 325 + 326 + ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); 327 + if (!ss->flows[i].engine) { 328 + dev_err(ss->dev, "Cannot allocate engine\n"); 329 + i--; 330 + err = -ENOMEM; 331 + goto error_engine; 332 + } 333 + err = crypto_engine_start(ss->flows[i].engine); 334 + if (err) { 335 + dev_err(ss->dev, "Cannot start engine\n"); 336 + goto error_engine; 337 + } 338 + } 339 + return 0; 340 + error_engine: 341 + sun8i_ss_free_flows(ss, i); 342 + return err; 343 + } 344 + 345 + /* 346 + * Power management strategy: The device is suspended unless a TFM exists for 347 + * one of the algorithms proposed by this driver. 348 + */ 349 + static int sun8i_ss_pm_suspend(struct device *dev) 350 + { 351 + struct sun8i_ss_dev *ss = dev_get_drvdata(dev); 352 + int i; 353 + 354 + reset_control_assert(ss->reset); 355 + for (i = 0; i < SS_MAX_CLOCKS; i++) 356 + clk_disable_unprepare(ss->ssclks[i]); 357 + return 0; 358 + } 359 + 360 + static int sun8i_ss_pm_resume(struct device *dev) 361 + { 362 + struct sun8i_ss_dev *ss = dev_get_drvdata(dev); 363 + int err, i; 364 + 365 + for (i = 0; i < SS_MAX_CLOCKS; i++) { 366 + if (!ss->variant->ss_clks[i].name) 367 + continue; 368 + err = clk_prepare_enable(ss->ssclks[i]); 369 + if (err) { 370 + dev_err(ss->dev, "Cannot prepare_enable %s\n", 371 + ss->variant->ss_clks[i].name); 372 + goto error; 373 + } 374 + } 375 + err = reset_control_deassert(ss->reset); 376 + if (err) { 377 + dev_err(ss->dev, "Cannot deassert reset control\n"); 378 + goto error; 379 + } 380 + /* enable interrupts for all flows */ 381 + writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG); 382 + 383 + return 0; 384 + error: 385 + sun8i_ss_pm_suspend(dev); 386 + return err; 387 + } 388 + 389 + static const struct dev_pm_ops sun8i_ss_pm_ops = { 390 + SET_RUNTIME_PM_OPS(sun8i_ss_pm_suspend, sun8i_ss_pm_resume, NULL) 391 + }; 392 + 393 + static int sun8i_ss_pm_init(struct sun8i_ss_dev *ss) 394 + { 395 + int err; 396 + 397 + pm_runtime_use_autosuspend(ss->dev); 398 + pm_runtime_set_autosuspend_delay(ss->dev, 2000); 399 + 400 + err = pm_runtime_set_suspended(ss->dev); 401 + if (err) 402 + return err; 403 + pm_runtime_enable(ss->dev); 404 + return err; 405 + } 406 + 407 + static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss) 408 + { 409 + pm_runtime_disable(ss->dev); 410 + } 411 + 412 + static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss) 413 + { 414 + int ss_method, err, id, i; 415 + 416 + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { 417 + ss_algs[i].ss = ss; 418 + switch (ss_algs[i].type) { 419 + case CRYPTO_ALG_TYPE_SKCIPHER: 420 + id = ss_algs[i].ss_algo_id; 421 + ss_method = ss->variant->alg_cipher[id]; 422 + if (ss_method == SS_ID_NOTSUPP) { 423 + dev_info(ss->dev, 424 + "DEBUG: Algo of %s not supported\n", 425 + ss_algs[i].alg.skcipher.base.cra_name); 426 + ss_algs[i].ss = NULL; 427 + break; 428 + } 429 + id = ss_algs[i].ss_blockmode; 430 + ss_method = ss->variant->op_mode[id]; 431 + if (ss_method == SS_ID_NOTSUPP) { 432 + dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n", 433 + ss_algs[i].alg.skcipher.base.cra_name); 434 + ss_algs[i].ss = NULL; 435 + break; 436 + } 437 + dev_info(ss->dev, "DEBUG: Register %s\n", 438 + ss_algs[i].alg.skcipher.base.cra_name); 439 + err = crypto_register_skcipher(&ss_algs[i].alg.skcipher); 440 + if (err) { 441 + dev_err(ss->dev, "Fail to register %s\n", 442 + ss_algs[i].alg.skcipher.base.cra_name); 443 + ss_algs[i].ss = NULL; 444 + return err; 445 + } 446 + break; 447 + default: 448 + ss_algs[i].ss = NULL; 449 + dev_err(ss->dev, "ERROR: tryed to register an unknown algo\n"); 450 + } 451 + } 452 + return 0; 453 + } 454 + 455 + static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss) 456 + { 457 + int i; 458 + 459 + for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { 460 + if (!ss_algs[i].ss) 461 + continue; 462 + switch (ss_algs[i].type) { 463 + case CRYPTO_ALG_TYPE_SKCIPHER: 464 + dev_info(ss->dev, "Unregister %d %s\n", i, 465 + ss_algs[i].alg.skcipher.base.cra_name); 466 + crypto_unregister_skcipher(&ss_algs[i].alg.skcipher); 467 + break; 468 + } 469 + } 470 + } 471 + 472 + static int sun8i_ss_get_clks(struct sun8i_ss_dev *ss) 473 + { 474 + unsigned long cr; 475 + int err, i; 476 + 477 + for (i = 0; i < SS_MAX_CLOCKS; i++) { 478 + if (!ss->variant->ss_clks[i].name) 479 + continue; 480 + ss->ssclks[i] = devm_clk_get(ss->dev, ss->variant->ss_clks[i].name); 481 + if (IS_ERR(ss->ssclks[i])) { 482 + err = PTR_ERR(ss->ssclks[i]); 483 + dev_err(ss->dev, "Cannot get %s SS clock err=%d\n", 484 + ss->variant->ss_clks[i].name, err); 485 + return err; 486 + } 487 + cr = clk_get_rate(ss->ssclks[i]); 488 + if (!cr) 489 + return -EINVAL; 490 + if (ss->variant->ss_clks[i].freq > 0 && 491 + cr != ss->variant->ss_clks[i].freq) { 492 + dev_info(ss->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n", 493 + ss->variant->ss_clks[i].name, 494 + ss->variant->ss_clks[i].freq, 495 + ss->variant->ss_clks[i].freq / 1000000, 496 + cr, cr / 1000000); 497 + err = clk_set_rate(ss->ssclks[i], ss->variant->ss_clks[i].freq); 498 + if (err) 499 + dev_err(ss->dev, "Fail to set %s clk speed to %lu hz\n", 500 + ss->variant->ss_clks[i].name, 501 + ss->variant->ss_clks[i].freq); 502 + } 503 + if (ss->variant->ss_clks[i].max_freq > 0 && 504 + cr > ss->variant->ss_clks[i].max_freq) 505 + dev_warn(ss->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommandation (%lu hz)", 506 + ss->variant->ss_clks[i].name, cr, 507 + ss->variant->ss_clks[i].max_freq); 508 + } 509 + return 0; 510 + } 511 + 512 + static int sun8i_ss_probe(struct platform_device *pdev) 513 + { 514 + struct sun8i_ss_dev *ss; 515 + int err, irq; 516 + u32 v; 517 + 518 + ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL); 519 + if (!ss) 520 + return -ENOMEM; 521 + 522 + ss->dev = &pdev->dev; 523 + platform_set_drvdata(pdev, ss); 524 + 525 + ss->variant = of_device_get_match_data(&pdev->dev); 526 + if (!ss->variant) { 527 + dev_err(&pdev->dev, "Missing Crypto Engine variant\n"); 528 + return -EINVAL; 529 + } 530 + 531 + ss->base = devm_platform_ioremap_resource(pdev, 0);; 532 + if (IS_ERR(ss->base)) 533 + return PTR_ERR(ss->base); 534 + 535 + err = sun8i_ss_get_clks(ss); 536 + if (err) 537 + return err; 538 + 539 + irq = platform_get_irq(pdev, 0); 540 + if (irq < 0) { 541 + dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n"); 542 + return irq; 543 + } 544 + 545 + ss->reset = devm_reset_control_get(&pdev->dev, NULL); 546 + if (IS_ERR(ss->reset)) { 547 + if (PTR_ERR(ss->reset) == -EPROBE_DEFER) 548 + return PTR_ERR(ss->reset); 549 + dev_err(&pdev->dev, "No reset control found\n"); 550 + return PTR_ERR(ss->reset); 551 + } 552 + 553 + mutex_init(&ss->mlock); 554 + 555 + err = allocate_flows(ss); 556 + if (err) 557 + return err; 558 + 559 + err = sun8i_ss_pm_init(ss); 560 + if (err) 561 + goto error_pm; 562 + 563 + err = devm_request_irq(&pdev->dev, irq, ss_irq_handler, 0, "sun8i-ss", ss); 564 + if (err) { 565 + dev_err(ss->dev, "Cannot request SecuritySystem IRQ (err=%d)\n", err); 566 + goto error_irq; 567 + } 568 + 569 + err = sun8i_ss_register_algs(ss); 570 + if (err) 571 + goto error_alg; 572 + 573 + err = pm_runtime_get_sync(ss->dev); 574 + if (err < 0) 575 + goto error_alg; 576 + 577 + v = readl(ss->base + SS_CTL_REG); 578 + v >>= SS_DIE_ID_SHIFT; 579 + v &= SS_DIE_ID_MASK; 580 + dev_info(&pdev->dev, "Security System Die ID %x\n", v); 581 + 582 + pm_runtime_put_sync(ss->dev); 583 + 584 + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 585 + /* Ignore error of debugfs */ 586 + ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL); 587 + ss->dbgfs_stats = debugfs_create_file("stats", 0444, 588 + ss->dbgfs_dir, ss, 589 + &sun8i_ss_debugfs_fops); 590 + #endif 591 + 592 + return 0; 593 + error_alg: 594 + sun8i_ss_unregister_algs(ss); 595 + error_irq: 596 + sun8i_ss_pm_exit(ss); 597 + error_pm: 598 + sun8i_ss_free_flows(ss, MAXFLOW); 599 + return err; 600 + } 601 + 602 + static int sun8i_ss_remove(struct platform_device *pdev) 603 + { 604 + struct sun8i_ss_dev *ss = platform_get_drvdata(pdev); 605 + 606 + sun8i_ss_unregister_algs(ss); 607 + 608 + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 609 + debugfs_remove_recursive(ss->dbgfs_dir); 610 + #endif 611 + 612 + sun8i_ss_free_flows(ss, MAXFLOW); 613 + 614 + sun8i_ss_pm_exit(ss); 615 + 616 + return 0; 617 + } 618 + 619 + static const struct of_device_id sun8i_ss_crypto_of_match_table[] = { 620 + { .compatible = "allwinner,sun8i-a83t-crypto", 621 + .data = &ss_a83t_variant }, 622 + { .compatible = "allwinner,sun9i-a80-crypto", 623 + .data = &ss_a80_variant }, 624 + {} 625 + }; 626 + MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table); 627 + 628 + static struct platform_driver sun8i_ss_driver = { 629 + .probe = sun8i_ss_probe, 630 + .remove = sun8i_ss_remove, 631 + .driver = { 632 + .name = "sun8i-ss", 633 + .pm = &sun8i_ss_pm_ops, 634 + .of_match_table = sun8i_ss_crypto_of_match_table, 635 + }, 636 + }; 637 + 638 + module_platform_driver(sun8i_ss_driver); 639 + 640 + MODULE_DESCRIPTION("Allwinner SecuritySystem cryptographic offloader"); 641 + MODULE_LICENSE("GPL"); 642 + MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
+218
drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * sun8i-ss.h - hardware cryptographic offloader for 4 + * Allwinner A80/A83T SoC 5 + * 6 + * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com> 7 + */ 8 + #include <crypto/aes.h> 9 + #include <crypto/des.h> 10 + #include <crypto/engine.h> 11 + #include <crypto/skcipher.h> 12 + #include <linux/atomic.h> 13 + #include <linux/debugfs.h> 14 + #include <linux/crypto.h> 15 + 16 + #define SS_ENCRYPTION 0 17 + #define SS_DECRYPTION BIT(6) 18 + 19 + #define SS_ALG_AES 0 20 + #define SS_ALG_DES (1 << 2) 21 + #define SS_ALG_3DES (2 << 2) 22 + 23 + #define SS_CTL_REG 0x00 24 + #define SS_INT_CTL_REG 0x04 25 + #define SS_INT_STA_REG 0x08 26 + #define SS_KEY_ADR_REG 0x10 27 + #define SS_IV_ADR_REG 0x18 28 + #define SS_SRC_ADR_REG 0x20 29 + #define SS_DST_ADR_REG 0x28 30 + #define SS_LEN_ADR_REG 0x30 31 + 32 + #define SS_ID_NOTSUPP 0xFF 33 + 34 + #define SS_ID_CIPHER_AES 0 35 + #define SS_ID_CIPHER_DES 1 36 + #define SS_ID_CIPHER_DES3 2 37 + #define SS_ID_CIPHER_MAX 3 38 + 39 + #define SS_ID_OP_ECB 0 40 + #define SS_ID_OP_CBC 1 41 + #define SS_ID_OP_MAX 2 42 + 43 + #define SS_AES_128BITS 0 44 + #define SS_AES_192BITS 1 45 + #define SS_AES_256BITS 2 46 + 47 + #define SS_OP_ECB 0 48 + #define SS_OP_CBC (1 << 13) 49 + 50 + #define SS_FLOW0 BIT(30) 51 + #define SS_FLOW1 BIT(31) 52 + 53 + #define MAX_SG 8 54 + 55 + #define MAXFLOW 2 56 + 57 + #define SS_MAX_CLOCKS 2 58 + 59 + #define SS_DIE_ID_SHIFT 20 60 + #define SS_DIE_ID_MASK 0x07 61 + 62 + /* 63 + * struct ss_clock - Describe clocks used by sun8i-ss 64 + * @name: Name of clock needed by this variant 65 + * @freq: Frequency to set for each clock 66 + * @max_freq: Maximum frequency for each clock 67 + */ 68 + struct ss_clock { 69 + const char *name; 70 + unsigned long freq; 71 + unsigned long max_freq; 72 + }; 73 + 74 + /* 75 + * struct ss_variant - Describe SS capability for each variant hardware 76 + * @alg_cipher: list of supported ciphers. for each SS_ID_ this will give the 77 + * coresponding SS_ALG_XXX value 78 + * @op_mode: list of supported block modes 79 + * @ss_clks! list of clock needed by this variant 80 + */ 81 + struct ss_variant { 82 + char alg_cipher[SS_ID_CIPHER_MAX]; 83 + u32 op_mode[SS_ID_OP_MAX]; 84 + struct ss_clock ss_clks[SS_MAX_CLOCKS]; 85 + }; 86 + 87 + struct sginfo { 88 + u32 addr; 89 + u32 len; 90 + }; 91 + 92 + /* 93 + * struct sun8i_ss_flow - Information used by each flow 94 + * @engine: ptr to the crypto_engine for this flow 95 + * @complete: completion for the current task on this flow 96 + * @status: set to 1 by interrupt if task is done 97 + * @stat_req: number of request done by this flow 98 + */ 99 + struct sun8i_ss_flow { 100 + struct crypto_engine *engine; 101 + struct completion complete; 102 + int status; 103 + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 104 + unsigned long stat_req; 105 + #endif 106 + }; 107 + 108 + /* 109 + * struct sun8i_ss_dev - main container for all this driver information 110 + * @base: base address of SS 111 + * @ssclks: clocks used by SS 112 + * @reset: pointer to reset controller 113 + * @dev: the platform device 114 + * @mlock: Control access to device registers 115 + * @flows: array of all flow 116 + * @flow: flow to use in next request 117 + * @variant: pointer to variant specific data 118 + * @dbgfs_dir: Debugfs dentry for statistic directory 119 + * @dbgfs_stats: Debugfs dentry for statistic counters 120 + */ 121 + struct sun8i_ss_dev { 122 + void __iomem *base; 123 + struct clk *ssclks[SS_MAX_CLOCKS]; 124 + struct reset_control *reset; 125 + struct device *dev; 126 + struct mutex mlock; 127 + struct sun8i_ss_flow *flows; 128 + atomic_t flow; 129 + const struct ss_variant *variant; 130 + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 131 + struct dentry *dbgfs_dir; 132 + struct dentry *dbgfs_stats; 133 + #endif 134 + }; 135 + 136 + /* 137 + * struct sun8i_cipher_req_ctx - context for a skcipher request 138 + * @t_src: list of mapped SGs with their size 139 + * @t_dst: list of mapped SGs with their size 140 + * @p_key: DMA address of the key 141 + * @p_iv: DMA address of the IV 142 + * @method: current algorithm for this request 143 + * @op_mode: op_mode for this request 144 + * @op_dir: direction (encrypt vs decrypt) for this request 145 + * @flow: the flow to use for this request 146 + * @ivlen: size of biv 147 + * @keylen: keylen for this request 148 + * @biv: buffer which contain the IV 149 + */ 150 + struct sun8i_cipher_req_ctx { 151 + struct sginfo t_src[MAX_SG]; 152 + struct sginfo t_dst[MAX_SG]; 153 + u32 p_key; 154 + u32 p_iv; 155 + u32 method; 156 + u32 op_mode; 157 + u32 op_dir; 158 + int flow; 159 + unsigned int ivlen; 160 + unsigned int keylen; 161 + void *biv; 162 + }; 163 + 164 + /* 165 + * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM 166 + * @enginectx: crypto_engine used by this TFM 167 + * @key: pointer to key data 168 + * @keylen: len of the key 169 + * @ss: pointer to the private data of driver handling this TFM 170 + * @fallback_tfm: pointer to the fallback TFM 171 + */ 172 + struct sun8i_cipher_tfm_ctx { 173 + struct crypto_engine_ctx enginectx; 174 + u32 *key; 175 + u32 keylen; 176 + struct sun8i_ss_dev *ss; 177 + struct crypto_sync_skcipher *fallback_tfm; 178 + }; 179 + 180 + /* 181 + * struct sun8i_ss_alg_template - crypto_alg template 182 + * @type: the CRYPTO_ALG_TYPE for this template 183 + * @ss_algo_id: the SS_ID for this template 184 + * @ss_blockmode: the type of block operation SS_ID 185 + * @ss: pointer to the sun8i_ss_dev structure associated with 186 + * this template 187 + * @alg: one of sub struct must be used 188 + * @stat_req: number of request done on this template 189 + * @stat_fb: total of all data len done on this template 190 + */ 191 + struct sun8i_ss_alg_template { 192 + u32 type; 193 + u32 ss_algo_id; 194 + u32 ss_blockmode; 195 + struct sun8i_ss_dev *ss; 196 + union { 197 + struct skcipher_alg skcipher; 198 + } alg; 199 + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 200 + unsigned long stat_req; 201 + unsigned long stat_fb; 202 + #endif 203 + }; 204 + 205 + int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type); 206 + 207 + int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 208 + unsigned int keylen); 209 + int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, 210 + unsigned int keylen); 211 + int sun8i_ss_cipher_init(struct crypto_tfm *tfm); 212 + void sun8i_ss_cipher_exit(struct crypto_tfm *tfm); 213 + int sun8i_ss_skdecrypt(struct skcipher_request *areq); 214 + int sun8i_ss_skencrypt(struct skcipher_request *areq); 215 + 216 + int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss); 217 + 218 + int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, const char *name);