Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: sl3516 - Add sl3516 crypto engine

The cortina/gemini SL3516 SoC has a crypto IP name either (crypto
engine/crypto acceleration engine in the datasheet).
It support many algorithms like [AES|DES|3DES][ECB|CBC], SHA1, MD5 and
some HMAC.

This patch adds the core files and support for ecb(aes) and the RNG.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Corentin Labbe and committed by
Herbert Xu
46c5338d 124d77c2

+1353
+19
drivers/crypto/Kconfig
··· 266 266 Group, which can perform encryption, decryption, hashing, 267 267 checksumming, and raw copies. 268 268 269 + config CRYPTO_DEV_SL3516 270 + tristate "Stormlink SL3516 crypto offloader" 271 + select CRYPTO_SKCIPHER 272 + select CRYPTO_ENGINE 273 + select CRYPTO_ECB 274 + select CRYPTO_AES 275 + select HW_RANDOM 276 + help 277 + This option allows you to have support for SL3516 crypto offloader. 278 + 279 + config CRYPTO_DEV_SL3516_DEBUG 280 + bool "Enable SL3516 stats" 281 + depends on CRYPTO_DEV_SL3516 282 + depends on DEBUG_FS 283 + help 284 + Say y to enable SL3516 debug stats. 285 + This will create /sys/kernel/debug/sl3516/stats for displaying 286 + the number of requests per algorithm and other internal stats. 287 + 269 288 config CRYPTO_DEV_HIFN_795X 270 289 tristate "Driver HIFN 795x crypto accelerator chips" 271 290 select CRYPTO_LIB_DES
+1
drivers/crypto/Makefile
··· 38 38 obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o 39 39 obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o 40 40 obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o 41 + obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/ 41 42 obj-$(CONFIG_ARCH_STM32) += stm32/ 42 43 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 43 44 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
+2
drivers/crypto/gemini/Makefile
··· 1 + obj-$(CONFIG_CRYPTO_DEV_SL3516) += sl3516-ce.o 2 + sl3516-ce-y += sl3516-ce-core.o sl3516-ce-cipher.o sl3516-ce-rng.o
+388
drivers/crypto/gemini/sl3516-ce-cipher.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * sl3516-ce-cipher.c - hardware cryptographic offloader for Stormlink SL3516 SoC 4 + * 5 + * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com> 6 + * 7 + * This file adds support for AES cipher with 128,192,256 bits keysize in 8 + * ECB mode. 9 + */ 10 + 11 + #include <linux/crypto.h> 12 + #include <linux/dma-mapping.h> 13 + #include <linux/delay.h> 14 + #include <linux/io.h> 15 + #include <linux/io.h> 16 + #include <linux/pm_runtime.h> 17 + #include <crypto/scatterwalk.h> 18 + #include <crypto/internal/skcipher.h> 19 + #include "sl3516-ce.h" 20 + 21 + /* sl3516_ce_need_fallback - check if a request can be handled by the CE */ 22 + static bool sl3516_ce_need_fallback(struct skcipher_request *areq) 23 + { 24 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 25 + struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 26 + struct sl3516_ce_dev *ce = op->ce; 27 + struct scatterlist *in_sg = areq->src; 28 + struct scatterlist *out_sg = areq->dst; 29 + struct scatterlist *sg; 30 + 31 + if (areq->cryptlen == 0 || areq->cryptlen % 16) { 32 + ce->fallback_mod16++; 33 + return true; 34 + } 35 + 36 + /* 37 + * check if we have enough descriptors for TX 38 + * Note: TX need one control desc for each SG 39 + */ 40 + if (sg_nents(areq->src) > MAXDESC / 2) { 41 + ce->fallback_sg_count_tx++; 42 + return true; 43 + } 44 + /* check if we have enough descriptors for RX */ 45 + if (sg_nents(areq->dst) > MAXDESC) { 46 + ce->fallback_sg_count_rx++; 47 + return true; 48 + } 49 + 50 + sg = areq->src; 51 + while (sg) { 52 + if ((sg->length % 16) != 0) { 53 + ce->fallback_mod16++; 54 + return true; 55 + } 56 + if ((sg_dma_len(sg) % 16) != 0) { 57 + ce->fallback_mod16++; 58 + return true; 59 + } 60 + if (!IS_ALIGNED(sg->offset, 16)) { 61 + ce->fallback_align16++; 62 + return true; 63 + } 64 + sg = sg_next(sg); 65 + } 66 + sg = areq->dst; 67 + while (sg) { 68 + if ((sg->length % 16) != 0) { 69 + ce->fallback_mod16++; 70 + return true; 71 + } 72 + if ((sg_dma_len(sg) % 16) != 0) { 73 + ce->fallback_mod16++; 74 + return true; 75 + } 76 + if (!IS_ALIGNED(sg->offset, 16)) { 77 + ce->fallback_align16++; 78 + return true; 79 + } 80 + sg = sg_next(sg); 81 + } 82 + 83 + /* need same numbers of SG (with same length) for source and destination */ 84 + in_sg = areq->src; 85 + out_sg = areq->dst; 86 + while (in_sg && out_sg) { 87 + if (in_sg->length != out_sg->length) { 88 + ce->fallback_not_same_len++; 89 + return true; 90 + } 91 + in_sg = sg_next(in_sg); 92 + out_sg = sg_next(out_sg); 93 + } 94 + if (in_sg || out_sg) 95 + return true; 96 + 97 + return false; 98 + } 99 + 100 + static int sl3516_ce_cipher_fallback(struct skcipher_request *areq) 101 + { 102 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 103 + struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 104 + struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 105 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 106 + struct sl3516_ce_alg_template *algt; 107 + int err; 108 + 109 + algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher); 110 + algt->stat_fb++; 111 + 112 + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); 113 + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, 114 + areq->base.complete, areq->base.data); 115 + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, 116 + areq->cryptlen, areq->iv); 117 + if (rctx->op_dir == CE_DECRYPTION) 118 + err = crypto_skcipher_decrypt(&rctx->fallback_req); 119 + else 120 + err = crypto_skcipher_encrypt(&rctx->fallback_req); 121 + return err; 122 + } 123 + 124 + static int sl3516_ce_cipher(struct skcipher_request *areq) 125 + { 126 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 127 + struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 128 + struct sl3516_ce_dev *ce = op->ce; 129 + struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 130 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 131 + struct sl3516_ce_alg_template *algt; 132 + struct scatterlist *sg; 133 + unsigned int todo, len; 134 + struct pkt_control_ecb *ecb; 135 + int nr_sgs = 0; 136 + int nr_sgd = 0; 137 + int err = 0; 138 + int i; 139 + 140 + algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher); 141 + 142 + dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, 143 + crypto_tfm_alg_name(areq->base.tfm), 144 + areq->cryptlen, 145 + rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), 146 + op->keylen); 147 + 148 + algt->stat_req++; 149 + 150 + if (areq->src == areq->dst) { 151 + nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), 152 + DMA_BIDIRECTIONAL); 153 + if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) { 154 + dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); 155 + err = -EINVAL; 156 + goto theend; 157 + } 158 + nr_sgd = nr_sgs; 159 + } else { 160 + nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), 161 + DMA_TO_DEVICE); 162 + if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) { 163 + dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); 164 + err = -EINVAL; 165 + goto theend; 166 + } 167 + nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst), 168 + DMA_FROM_DEVICE); 169 + if (nr_sgd <= 0 || nr_sgd > MAXDESC) { 170 + dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd); 171 + err = -EINVAL; 172 + goto theend_sgs; 173 + } 174 + } 175 + 176 + len = areq->cryptlen; 177 + i = 0; 178 + sg = areq->src; 179 + while (i < nr_sgs && sg && len) { 180 + if (sg_dma_len(sg) == 0) 181 + goto sgs_next; 182 + rctx->t_src[i].addr = sg_dma_address(sg); 183 + todo = min(len, sg_dma_len(sg)); 184 + rctx->t_src[i].len = todo; 185 + dev_dbg(ce->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__, 186 + areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo); 187 + len -= todo; 188 + i++; 189 + sgs_next: 190 + sg = sg_next(sg); 191 + } 192 + if (len > 0) { 193 + dev_err(ce->dev, "remaining len %d/%u nr_sgs=%d\n", len, areq->cryptlen, nr_sgs); 194 + err = -EINVAL; 195 + goto theend_sgs; 196 + } 197 + 198 + len = areq->cryptlen; 199 + i = 0; 200 + sg = areq->dst; 201 + while (i < nr_sgd && sg && len) { 202 + if (sg_dma_len(sg) == 0) 203 + goto sgd_next; 204 + rctx->t_dst[i].addr = sg_dma_address(sg); 205 + todo = min(len, sg_dma_len(sg)); 206 + rctx->t_dst[i].len = todo; 207 + dev_dbg(ce->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__, 208 + areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo); 209 + len -= todo; 210 + i++; 211 + 212 + sgd_next: 213 + sg = sg_next(sg); 214 + } 215 + if (len > 0) { 216 + dev_err(ce->dev, "remaining len %d\n", len); 217 + err = -EINVAL; 218 + goto theend_sgs; 219 + } 220 + 221 + switch (algt->mode) { 222 + case ECB_AES: 223 + rctx->pctrllen = sizeof(struct pkt_control_ecb); 224 + ecb = (struct pkt_control_ecb *)ce->pctrl; 225 + 226 + rctx->tqflag = TQ0_TYPE_CTRL; 227 + rctx->tqflag |= TQ1_CIPHER; 228 + ecb->control.op_mode = rctx->op_dir; 229 + ecb->control.cipher_algorithm = ECB_AES; 230 + ecb->cipher.header_len = 0; 231 + ecb->cipher.algorithm_len = areq->cryptlen; 232 + cpu_to_be32_array((__be32 *)ecb->key, (u32 *)op->key, op->keylen / 4); 233 + rctx->h = &ecb->cipher; 234 + 235 + rctx->tqflag |= TQ4_KEY0; 236 + rctx->tqflag |= TQ5_KEY4; 237 + rctx->tqflag |= TQ6_KEY6; 238 + ecb->control.aesnk = op->keylen / 4; 239 + break; 240 + } 241 + 242 + rctx->nr_sgs = nr_sgs; 243 + rctx->nr_sgd = nr_sgd; 244 + err = sl3516_ce_run_task(ce, rctx, crypto_tfm_alg_name(areq->base.tfm)); 245 + 246 + theend_sgs: 247 + if (areq->src == areq->dst) { 248 + dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), 249 + DMA_BIDIRECTIONAL); 250 + } else { 251 + dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), 252 + DMA_TO_DEVICE); 253 + dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst), 254 + DMA_FROM_DEVICE); 255 + } 256 + 257 + theend: 258 + 259 + return err; 260 + } 261 + 262 + static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq) 263 + { 264 + int err; 265 + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); 266 + 267 + err = sl3516_ce_cipher(breq); 268 + crypto_finalize_skcipher_request(engine, breq, err); 269 + 270 + return 0; 271 + } 272 + 273 + int sl3516_ce_skdecrypt(struct skcipher_request *areq) 274 + { 275 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 276 + struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 277 + struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 278 + struct crypto_engine *engine; 279 + 280 + memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx)); 281 + rctx->op_dir = CE_DECRYPTION; 282 + 283 + if (sl3516_ce_need_fallback(areq)) 284 + return sl3516_ce_cipher_fallback(areq); 285 + 286 + engine = op->ce->engine; 287 + 288 + return crypto_transfer_skcipher_request_to_engine(engine, areq); 289 + } 290 + 291 + int sl3516_ce_skencrypt(struct skcipher_request *areq) 292 + { 293 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 294 + struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 295 + struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 296 + struct crypto_engine *engine; 297 + 298 + memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx)); 299 + rctx->op_dir = CE_ENCRYPTION; 300 + 301 + if (sl3516_ce_need_fallback(areq)) 302 + return sl3516_ce_cipher_fallback(areq); 303 + 304 + engine = op->ce->engine; 305 + 306 + return crypto_transfer_skcipher_request_to_engine(engine, areq); 307 + } 308 + 309 + int sl3516_ce_cipher_init(struct crypto_tfm *tfm) 310 + { 311 + struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); 312 + struct sl3516_ce_alg_template *algt; 313 + const char *name = crypto_tfm_alg_name(tfm); 314 + struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); 315 + struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); 316 + int err; 317 + 318 + memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx)); 319 + 320 + algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher); 321 + op->ce = algt->ce; 322 + 323 + op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 324 + if (IS_ERR(op->fallback_tfm)) { 325 + dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n", 326 + name, PTR_ERR(op->fallback_tfm)); 327 + return PTR_ERR(op->fallback_tfm); 328 + } 329 + 330 + sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) + 331 + crypto_skcipher_reqsize(op->fallback_tfm); 332 + 333 + dev_info(op->ce->dev, "Fallback for %s is %s\n", 334 + crypto_tfm_alg_driver_name(&sktfm->base), 335 + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); 336 + 337 + op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request; 338 + op->enginectx.op.prepare_request = NULL; 339 + op->enginectx.op.unprepare_request = NULL; 340 + 341 + err = pm_runtime_get_sync(op->ce->dev); 342 + if (err < 0) 343 + goto error_pm; 344 + 345 + return 0; 346 + error_pm: 347 + pm_runtime_put_noidle(op->ce->dev); 348 + crypto_free_skcipher(op->fallback_tfm); 349 + return err; 350 + } 351 + 352 + void sl3516_ce_cipher_exit(struct crypto_tfm *tfm) 353 + { 354 + struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); 355 + 356 + kfree_sensitive(op->key); 357 + crypto_free_skcipher(op->fallback_tfm); 358 + pm_runtime_put_sync_suspend(op->ce->dev); 359 + } 360 + 361 + int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 362 + unsigned int keylen) 363 + { 364 + struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 365 + struct sl3516_ce_dev *ce = op->ce; 366 + 367 + switch (keylen) { 368 + case 128 / 8: 369 + break; 370 + case 192 / 8: 371 + break; 372 + case 256 / 8: 373 + break; 374 + default: 375 + dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen); 376 + return -EINVAL; 377 + } 378 + kfree_sensitive(op->key); 379 + op->keylen = keylen; 380 + op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 381 + if (!op->key) 382 + return -ENOMEM; 383 + 384 + crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); 385 + crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 386 + 387 + return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); 388 + }
+535
drivers/crypto/gemini/sl3516-ce-core.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * sl3516-ce-core.c - hardware cryptographic offloader for Stormlink SL3516 SoC 4 + * 5 + * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com> 6 + * 7 + * Core file which registers crypto algorithms supported by the CryptoEngine 8 + */ 9 + #include <linux/clk.h> 10 + #include <linux/crypto.h> 11 + #include <linux/debugfs.h> 12 + #include <linux/dev_printk.h> 13 + #include <linux/dma-mapping.h> 14 + #include <linux/interrupt.h> 15 + #include <linux/io.h> 16 + #include <linux/irq.h> 17 + #include <linux/module.h> 18 + #include <linux/of.h> 19 + #include <linux/of_device.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/pm_runtime.h> 22 + #include <linux/reset.h> 23 + #include <crypto/internal/rng.h> 24 + #include <crypto/internal/skcipher.h> 25 + 26 + #include "sl3516-ce.h" 27 + 28 + static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce) 29 + { 30 + const size_t sz = sizeof(struct descriptor) * MAXDESC; 31 + int i; 32 + 33 + ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL); 34 + if (!ce->tx) 35 + return -ENOMEM; 36 + ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL); 37 + if (!ce->rx) 38 + goto err_rx; 39 + 40 + for (i = 0; i < MAXDESC; i++) { 41 + ce->tx[i].frame_ctrl.bits.own = CE_CPU; 42 + ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor); 43 + } 44 + ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx; 45 + 46 + for (i = 0; i < MAXDESC; i++) { 47 + ce->rx[i].frame_ctrl.bits.own = CE_CPU; 48 + ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor); 49 + } 50 + ce->rx[MAXDESC - 1].next_desc.next_descriptor = ce->drx; 51 + 52 + ce->pctrl = dma_alloc_coherent(ce->dev, sizeof(struct pkt_control_ecb), 53 + &ce->dctrl, GFP_KERNEL); 54 + if (!ce->pctrl) 55 + goto err_pctrl; 56 + 57 + return 0; 58 + err_pctrl: 59 + dma_free_coherent(ce->dev, sz, ce->rx, ce->drx); 60 + err_rx: 61 + dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx); 62 + return -ENOMEM; 63 + } 64 + 65 + static void sl3516_ce_free_descs(struct sl3516_ce_dev *ce) 66 + { 67 + const size_t sz = sizeof(struct descriptor) * MAXDESC; 68 + 69 + dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx); 70 + dma_free_coherent(ce->dev, sz, ce->rx, ce->drx); 71 + dma_free_coherent(ce->dev, sizeof(struct pkt_control_ecb), ce->pctrl, 72 + ce->dctrl); 73 + } 74 + 75 + static void start_dma_tx(struct sl3516_ce_dev *ce) 76 + { 77 + u32 v; 78 + 79 + v = TXDMA_CTRL_START | TXDMA_CTRL_CHAIN_MODE | TXDMA_CTRL_CONTINUE | \ 80 + TXDMA_CTRL_INT_FAIL | TXDMA_CTRL_INT_PERR | TXDMA_CTRL_BURST_UNK; 81 + 82 + writel(v, ce->base + IPSEC_TXDMA_CTRL); 83 + } 84 + 85 + static void start_dma_rx(struct sl3516_ce_dev *ce) 86 + { 87 + u32 v; 88 + 89 + v = RXDMA_CTRL_START | RXDMA_CTRL_CHAIN_MODE | RXDMA_CTRL_CONTINUE | \ 90 + RXDMA_CTRL_BURST_UNK | RXDMA_CTRL_INT_FINISH | \ 91 + RXDMA_CTRL_INT_FAIL | RXDMA_CTRL_INT_PERR | \ 92 + RXDMA_CTRL_INT_EOD | RXDMA_CTRL_INT_EOF; 93 + 94 + writel(v, ce->base + IPSEC_RXDMA_CTRL); 95 + } 96 + 97 + static struct descriptor *get_desc_tx(struct sl3516_ce_dev *ce) 98 + { 99 + struct descriptor *dd; 100 + 101 + dd = &ce->tx[ce->ctx]; 102 + ce->ctx++; 103 + if (ce->ctx >= MAXDESC) 104 + ce->ctx = 0; 105 + return dd; 106 + } 107 + 108 + static struct descriptor *get_desc_rx(struct sl3516_ce_dev *ce) 109 + { 110 + struct descriptor *rdd; 111 + 112 + rdd = &ce->rx[ce->crx]; 113 + ce->crx++; 114 + if (ce->crx >= MAXDESC) 115 + ce->crx = 0; 116 + return rdd; 117 + } 118 + 119 + int sl3516_ce_run_task(struct sl3516_ce_dev *ce, struct sl3516_ce_cipher_req_ctx *rctx, 120 + const char *name) 121 + { 122 + struct descriptor *dd, *rdd = NULL; 123 + u32 v; 124 + int i, err = 0; 125 + 126 + ce->stat_req++; 127 + 128 + reinit_completion(&ce->complete); 129 + ce->status = 0; 130 + 131 + for (i = 0; i < rctx->nr_sgd; i++) { 132 + dev_dbg(ce->dev, "%s handle DST SG %d/%d len=%d\n", __func__, 133 + i, rctx->nr_sgd, rctx->t_dst[i].len); 134 + rdd = get_desc_rx(ce); 135 + rdd->buf_adr = rctx->t_dst[i].addr; 136 + rdd->frame_ctrl.bits.buffer_size = rctx->t_dst[i].len; 137 + rdd->frame_ctrl.bits.own = CE_DMA; 138 + } 139 + rdd->next_desc.bits.eofie = 1; 140 + 141 + for (i = 0; i < rctx->nr_sgs; i++) { 142 + dev_dbg(ce->dev, "%s handle SRC SG %d/%d len=%d\n", __func__, 143 + i, rctx->nr_sgs, rctx->t_src[i].len); 144 + rctx->h->algorithm_len = rctx->t_src[i].len; 145 + 146 + dd = get_desc_tx(ce); 147 + dd->frame_ctrl.raw = 0; 148 + dd->flag_status.raw = 0; 149 + dd->frame_ctrl.bits.buffer_size = rctx->pctrllen; 150 + dd->buf_adr = ce->dctrl; 151 + dd->flag_status.tx_flag.tqflag = rctx->tqflag; 152 + dd->next_desc.bits.eofie = 0; 153 + dd->next_desc.bits.dec = 0; 154 + dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST; 155 + dd->frame_ctrl.bits.own = CE_DMA; 156 + 157 + dd = get_desc_tx(ce); 158 + dd->frame_ctrl.raw = 0; 159 + dd->flag_status.raw = 0; 160 + dd->frame_ctrl.bits.buffer_size = rctx->t_src[i].len; 161 + dd->buf_adr = rctx->t_src[i].addr; 162 + dd->flag_status.tx_flag.tqflag = 0; 163 + dd->next_desc.bits.eofie = 0; 164 + dd->next_desc.bits.dec = 0; 165 + dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST; 166 + dd->frame_ctrl.bits.own = CE_DMA; 167 + start_dma_tx(ce); 168 + start_dma_rx(ce); 169 + } 170 + wait_for_completion_interruptible_timeout(&ce->complete, 171 + msecs_to_jiffies(5000)); 172 + if (ce->status == 0) { 173 + dev_err(ce->dev, "DMA timeout for %s\n", name); 174 + err = -EFAULT; 175 + } 176 + v = readl(ce->base + IPSEC_STATUS_REG); 177 + if (v & 0xFFF) { 178 + dev_err(ce->dev, "IPSEC_STATUS_REG %x\n", v); 179 + err = -EFAULT; 180 + } 181 + 182 + return err; 183 + } 184 + 185 + static irqreturn_t ce_irq_handler(int irq, void *data) 186 + { 187 + struct sl3516_ce_dev *ce = (struct sl3516_ce_dev *)data; 188 + u32 v; 189 + 190 + ce->stat_irq++; 191 + 192 + v = readl(ce->base + IPSEC_DMA_STATUS); 193 + writel(v, ce->base + IPSEC_DMA_STATUS); 194 + 195 + if (v & DMA_STATUS_TS_DERR) 196 + dev_err(ce->dev, "AHB bus Error While Tx !!!\n"); 197 + if (v & DMA_STATUS_TS_PERR) 198 + dev_err(ce->dev, "Tx Descriptor Protocol Error !!!\n"); 199 + if (v & DMA_STATUS_RS_DERR) 200 + dev_err(ce->dev, "AHB bus Error While Rx !!!\n"); 201 + if (v & DMA_STATUS_RS_PERR) 202 + dev_err(ce->dev, "Rx Descriptor Protocol Error !!!\n"); 203 + 204 + if (v & DMA_STATUS_TS_EOFI) 205 + ce->stat_irq_tx++; 206 + if (v & DMA_STATUS_RS_EOFI) { 207 + ce->status = 1; 208 + complete(&ce->complete); 209 + ce->stat_irq_rx++; 210 + return IRQ_HANDLED; 211 + } 212 + 213 + return IRQ_HANDLED; 214 + } 215 + 216 + static struct sl3516_ce_alg_template ce_algs[] = { 217 + { 218 + .type = CRYPTO_ALG_TYPE_SKCIPHER, 219 + .mode = ECB_AES, 220 + .alg.skcipher = { 221 + .base = { 222 + .cra_name = "ecb(aes)", 223 + .cra_driver_name = "ecb-aes-sl3516", 224 + .cra_priority = 400, 225 + .cra_blocksize = AES_BLOCK_SIZE, 226 + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 227 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, 228 + .cra_ctxsize = sizeof(struct sl3516_ce_cipher_tfm_ctx), 229 + .cra_module = THIS_MODULE, 230 + .cra_alignmask = 0xf, 231 + .cra_init = sl3516_ce_cipher_init, 232 + .cra_exit = sl3516_ce_cipher_exit, 233 + }, 234 + .min_keysize = AES_MIN_KEY_SIZE, 235 + .max_keysize = AES_MAX_KEY_SIZE, 236 + .setkey = sl3516_ce_aes_setkey, 237 + .encrypt = sl3516_ce_skencrypt, 238 + .decrypt = sl3516_ce_skdecrypt, 239 + } 240 + }, 241 + }; 242 + 243 + #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG 244 + static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v) 245 + { 246 + struct sl3516_ce_dev *ce = seq->private; 247 + unsigned int i; 248 + 249 + seq_printf(seq, "HWRNG %lu %lu\n", 250 + ce->hwrng_stat_req, ce->hwrng_stat_bytes); 251 + seq_printf(seq, "IRQ %lu\n", ce->stat_irq); 252 + seq_printf(seq, "IRQ TX %lu\n", ce->stat_irq_tx); 253 + seq_printf(seq, "IRQ RX %lu\n", ce->stat_irq_rx); 254 + seq_printf(seq, "nreq %lu\n", ce->stat_req); 255 + seq_printf(seq, "fallback SG count TX %lu\n", ce->fallback_sg_count_tx); 256 + seq_printf(seq, "fallback SG count RX %lu\n", ce->fallback_sg_count_rx); 257 + seq_printf(seq, "fallback modulo16 %lu\n", ce->fallback_mod16); 258 + seq_printf(seq, "fallback align16 %lu\n", ce->fallback_align16); 259 + seq_printf(seq, "fallback not same len %lu\n", ce->fallback_not_same_len); 260 + 261 + for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { 262 + if (!ce_algs[i].ce) 263 + continue; 264 + switch (ce_algs[i].type) { 265 + case CRYPTO_ALG_TYPE_SKCIPHER: 266 + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", 267 + ce_algs[i].alg.skcipher.base.cra_driver_name, 268 + ce_algs[i].alg.skcipher.base.cra_name, 269 + ce_algs[i].stat_req, ce_algs[i].stat_fb); 270 + break; 271 + } 272 + } 273 + return 0; 274 + } 275 + 276 + DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs); 277 + #endif 278 + 279 + static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce) 280 + { 281 + int err; 282 + unsigned int i; 283 + 284 + for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { 285 + ce_algs[i].ce = ce; 286 + switch (ce_algs[i].type) { 287 + case CRYPTO_ALG_TYPE_SKCIPHER: 288 + dev_info(ce->dev, "DEBUG: Register %s\n", 289 + ce_algs[i].alg.skcipher.base.cra_name); 290 + err = crypto_register_skcipher(&ce_algs[i].alg.skcipher); 291 + if (err) { 292 + dev_err(ce->dev, "Fail to register %s\n", 293 + ce_algs[i].alg.skcipher.base.cra_name); 294 + ce_algs[i].ce = NULL; 295 + return err; 296 + } 297 + break; 298 + default: 299 + ce_algs[i].ce = NULL; 300 + dev_err(ce->dev, "ERROR: tried to register an unknown algo\n"); 301 + } 302 + } 303 + return 0; 304 + } 305 + 306 + static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce) 307 + { 308 + unsigned int i; 309 + 310 + for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { 311 + if (!ce_algs[i].ce) 312 + continue; 313 + switch (ce_algs[i].type) { 314 + case CRYPTO_ALG_TYPE_SKCIPHER: 315 + dev_info(ce->dev, "Unregister %d %s\n", i, 316 + ce_algs[i].alg.skcipher.base.cra_name); 317 + crypto_unregister_skcipher(&ce_algs[i].alg.skcipher); 318 + break; 319 + } 320 + } 321 + } 322 + 323 + static void sl3516_ce_start(struct sl3516_ce_dev *ce) 324 + { 325 + ce->ctx = 0; 326 + ce->crx = 0; 327 + writel(ce->dtx, ce->base + IPSEC_TXDMA_CURR_DESC); 328 + writel(ce->drx, ce->base + IPSEC_RXDMA_CURR_DESC); 329 + writel(0, ce->base + IPSEC_DMA_STATUS); 330 + } 331 + 332 + /* 333 + * Power management strategy: The device is suspended unless a TFM exists for 334 + * one of the algorithms proposed by this driver. 335 + */ 336 + static int sl3516_ce_pm_suspend(struct device *dev) 337 + { 338 + struct sl3516_ce_dev *ce = dev_get_drvdata(dev); 339 + 340 + reset_control_assert(ce->reset); 341 + clk_disable_unprepare(ce->clks); 342 + return 0; 343 + } 344 + 345 + static int sl3516_ce_pm_resume(struct device *dev) 346 + { 347 + struct sl3516_ce_dev *ce = dev_get_drvdata(dev); 348 + int err; 349 + 350 + err = clk_prepare_enable(ce->clks); 351 + if (err) { 352 + dev_err(ce->dev, "Cannot prepare_enable\n"); 353 + goto error; 354 + } 355 + err = reset_control_deassert(ce->reset); 356 + if (err) { 357 + dev_err(ce->dev, "Cannot deassert reset control\n"); 358 + goto error; 359 + } 360 + 361 + sl3516_ce_start(ce); 362 + 363 + return 0; 364 + error: 365 + sl3516_ce_pm_suspend(dev); 366 + return err; 367 + } 368 + 369 + static const struct dev_pm_ops sl3516_ce_pm_ops = { 370 + SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend, sl3516_ce_pm_resume, NULL) 371 + }; 372 + 373 + static int sl3516_ce_pm_init(struct sl3516_ce_dev *ce) 374 + { 375 + int err; 376 + 377 + pm_runtime_use_autosuspend(ce->dev); 378 + pm_runtime_set_autosuspend_delay(ce->dev, 2000); 379 + 380 + err = pm_runtime_set_suspended(ce->dev); 381 + if (err) 382 + return err; 383 + pm_runtime_enable(ce->dev); 384 + return err; 385 + } 386 + 387 + static void sl3516_ce_pm_exit(struct sl3516_ce_dev *ce) 388 + { 389 + pm_runtime_disable(ce->dev); 390 + } 391 + 392 + static int sl3516_ce_probe(struct platform_device *pdev) 393 + { 394 + struct sl3516_ce_dev *ce; 395 + int err, irq; 396 + u32 v; 397 + 398 + ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL); 399 + if (!ce) 400 + return -ENOMEM; 401 + 402 + ce->dev = &pdev->dev; 403 + platform_set_drvdata(pdev, ce); 404 + 405 + ce->base = devm_platform_ioremap_resource(pdev, 0); 406 + if (IS_ERR(ce->base)) 407 + return PTR_ERR(ce->base); 408 + 409 + irq = platform_get_irq(pdev, 0); 410 + if (irq < 0) 411 + return irq; 412 + 413 + err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "crypto", ce); 414 + if (err) { 415 + dev_err(ce->dev, "Cannot request Crypto Engine IRQ (err=%d)\n", err); 416 + return err; 417 + } 418 + 419 + ce->reset = devm_reset_control_get(&pdev->dev, NULL); 420 + if (IS_ERR(ce->reset)) 421 + return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset), 422 + "No reset control found\n"); 423 + ce->clks = devm_clk_get(ce->dev, NULL); 424 + if (IS_ERR(ce->clks)) { 425 + err = PTR_ERR(ce->clks); 426 + dev_err(ce->dev, "Cannot get clock err=%d\n", err); 427 + return err; 428 + } 429 + 430 + err = sl3516_ce_desc_init(ce); 431 + if (err) 432 + return err; 433 + 434 + err = sl3516_ce_pm_init(ce); 435 + if (err) 436 + goto error_pm; 437 + 438 + init_completion(&ce->complete); 439 + 440 + ce->engine = crypto_engine_alloc_init(ce->dev, true); 441 + if (!ce->engine) { 442 + dev_err(ce->dev, "Cannot allocate engine\n"); 443 + err = -ENOMEM; 444 + goto error_engine; 445 + } 446 + 447 + err = crypto_engine_start(ce->engine); 448 + if (err) { 449 + dev_err(ce->dev, "Cannot start engine\n"); 450 + goto error_engine; 451 + } 452 + 453 + err = sl3516_ce_register_algs(ce); 454 + if (err) 455 + goto error_alg; 456 + 457 + err = sl3516_ce_rng_register(ce); 458 + if (err) 459 + goto error_rng; 460 + 461 + err = pm_runtime_resume_and_get(ce->dev); 462 + if (err < 0) 463 + goto error_pmuse; 464 + 465 + v = readl(ce->base + IPSEC_ID); 466 + dev_info(ce->dev, "SL3516 dev %lx rev %lx\n", 467 + v & GENMASK(31, 4), 468 + v & GENMASK(3, 0)); 469 + v = readl(ce->base + IPSEC_DMA_DEVICE_ID); 470 + dev_info(ce->dev, "SL3516 DMA dev %lx rev %lx\n", 471 + v & GENMASK(15, 4), 472 + v & GENMASK(3, 0)); 473 + 474 + pm_runtime_put_sync(ce->dev); 475 + 476 + #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG 477 + /* Ignore error of debugfs */ 478 + ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL); 479 + ce->dbgfs_stats = debugfs_create_file("stats", 0444, 480 + ce->dbgfs_dir, ce, 481 + &sl3516_ce_debugfs_fops); 482 + #endif 483 + 484 + return 0; 485 + error_pmuse: 486 + sl3516_ce_rng_unregister(ce); 487 + error_rng: 488 + sl3516_ce_unregister_algs(ce); 489 + error_alg: 490 + crypto_engine_exit(ce->engine); 491 + error_engine: 492 + sl3516_ce_pm_exit(ce); 493 + error_pm: 494 + sl3516_ce_free_descs(ce); 495 + return err; 496 + } 497 + 498 + static int sl3516_ce_remove(struct platform_device *pdev) 499 + { 500 + struct sl3516_ce_dev *ce = platform_get_drvdata(pdev); 501 + 502 + sl3516_ce_rng_unregister(ce); 503 + sl3516_ce_unregister_algs(ce); 504 + crypto_engine_exit(ce->engine); 505 + sl3516_ce_pm_exit(ce); 506 + sl3516_ce_free_descs(ce); 507 + 508 + #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG 509 + debugfs_remove_recursive(ce->dbgfs_dir); 510 + #endif 511 + 512 + return 0; 513 + } 514 + 515 + static const struct of_device_id sl3516_ce_crypto_of_match_table[] = { 516 + { .compatible = "cortina,sl3516-crypto"}, 517 + {} 518 + }; 519 + MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table); 520 + 521 + static struct platform_driver sl3516_ce_driver = { 522 + .probe = sl3516_ce_probe, 523 + .remove = sl3516_ce_remove, 524 + .driver = { 525 + .name = "sl3516-crypto", 526 + .pm = &sl3516_ce_pm_ops, 527 + .of_match_table = sl3516_ce_crypto_of_match_table, 528 + }, 529 + }; 530 + 531 + module_platform_driver(sl3516_ce_driver); 532 + 533 + MODULE_DESCRIPTION("SL3516 cryptographic offloader"); 534 + MODULE_LICENSE("GPL"); 535 + MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
+61
drivers/crypto/gemini/sl3516-ce-rng.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * sl3516-ce-rng.c - hardware cryptographic offloader for SL3516 SoC. 4 + * 5 + * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com> 6 + * 7 + * This file handle the RNG found in the SL3516 crypto engine 8 + */ 9 + #include "sl3516-ce.h" 10 + #include <linux/pm_runtime.h> 11 + #include <linux/hw_random.h> 12 + 13 + static int sl3516_ce_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) 14 + { 15 + struct sl3516_ce_dev *ce; 16 + u32 *data = buf; 17 + size_t read = 0; 18 + int err; 19 + 20 + ce = container_of(rng, struct sl3516_ce_dev, trng); 21 + 22 + #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG 23 + ce->hwrng_stat_req++; 24 + ce->hwrng_stat_bytes += max; 25 + #endif 26 + 27 + err = pm_runtime_get_sync(ce->dev); 28 + if (err < 0) { 29 + pm_runtime_put_noidle(ce->dev); 30 + return err; 31 + } 32 + 33 + while (read < max) { 34 + *data = readl(ce->base + IPSEC_RAND_NUM_REG); 35 + data++; 36 + read += 4; 37 + } 38 + 39 + pm_runtime_put(ce->dev); 40 + 41 + return read; 42 + } 43 + 44 + int sl3516_ce_rng_register(struct sl3516_ce_dev *ce) 45 + { 46 + int ret; 47 + 48 + ce->trng.name = "SL3516 Crypto Engine RNG"; 49 + ce->trng.read = sl3516_ce_rng_read; 50 + ce->trng.quality = 700; 51 + 52 + ret = hwrng_register(&ce->trng); 53 + if (ret) 54 + dev_err(ce->dev, "Fail to register the RNG\n"); 55 + return ret; 56 + } 57 + 58 + void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce) 59 + { 60 + hwrng_unregister(&ce->trng); 61 + }
+347
drivers/crypto/gemini/sl3516-ce.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * sl3516-ce.h - hardware cryptographic offloader for cortina/gemini SoC 4 + * 5 + * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com> 6 + * 7 + * General notes on this driver: 8 + * Called either Crypto Acceleration Engine Module, Security Acceleration Engine 9 + * or IPSEC module in the datasheet, it will be called Crypto Engine for short 10 + * in this driver. 11 + * The CE was designed to handle IPSEC and wifi(TKIP WEP) protocol. 12 + * It can handle AES, DES, 3DES, MD5, WEP, TKIP, SHA1, HMAC(MD5), HMAC(SHA1), 13 + * Michael cipher/digest suites. 14 + * It acts the same as a network hw, with both RX and TX chained descriptors. 15 + */ 16 + #include <crypto/aes.h> 17 + #include <crypto/engine.h> 18 + #include <crypto/scatterwalk.h> 19 + #include <crypto/skcipher.h> 20 + #include <linux/crypto.h> 21 + #include <linux/debugfs.h> 22 + #include <linux/hw_random.h> 23 + 24 + #define TQ0_TYPE_DATA 0 25 + #define TQ0_TYPE_CTRL BIT(0) 26 + #define TQ1_CIPHER BIT(1) 27 + #define TQ2_AUTH BIT(2) 28 + #define TQ3_IV BIT(3) 29 + #define TQ4_KEY0 BIT(4) 30 + #define TQ5_KEY4 BIT(5) 31 + #define TQ6_KEY6 BIT(6) 32 + #define TQ7_AKEY0 BIT(7) 33 + #define TQ8_AKEY2 BIT(8) 34 + #define TQ9_AKEY2 BIT(9) 35 + 36 + #define ECB_AES 0x2 37 + 38 + #define DESC_LAST 0x01 39 + #define DESC_FIRST 0x02 40 + 41 + #define IPSEC_ID 0x0000 42 + #define IPSEC_STATUS_REG 0x00a8 43 + #define IPSEC_RAND_NUM_REG 0x00ac 44 + #define IPSEC_DMA_DEVICE_ID 0xff00 45 + #define IPSEC_DMA_STATUS 0xff04 46 + #define IPSEC_TXDMA_CTRL 0xff08 47 + #define IPSEC_TXDMA_FIRST_DESC 0xff0c 48 + #define IPSEC_TXDMA_CURR_DESC 0xff10 49 + #define IPSEC_RXDMA_CTRL 0xff14 50 + #define IPSEC_RXDMA_FIRST_DESC 0xff18 51 + #define IPSEC_RXDMA_CURR_DESC 0xff1c 52 + #define IPSEC_TXDMA_BUF_ADDR 0xff28 53 + #define IPSEC_RXDMA_BUF_ADDR 0xff38 54 + #define IPSEC_RXDMA_BUF_SIZE 0xff30 55 + 56 + #define CE_ENCRYPTION 0x01 57 + #define CE_DECRYPTION 0x03 58 + 59 + #define MAXDESC 6 60 + 61 + #define DMA_STATUS_RS_EOFI BIT(22) 62 + #define DMA_STATUS_RS_PERR BIT(24) 63 + #define DMA_STATUS_RS_DERR BIT(25) 64 + #define DMA_STATUS_TS_EOFI BIT(27) 65 + #define DMA_STATUS_TS_PERR BIT(29) 66 + #define DMA_STATUS_TS_DERR BIT(30) 67 + 68 + #define TXDMA_CTRL_START BIT(31) 69 + #define TXDMA_CTRL_CONTINUE BIT(30) 70 + #define TXDMA_CTRL_CHAIN_MODE BIT(29) 71 + /* the burst value is not documented in the datasheet */ 72 + #define TXDMA_CTRL_BURST_UNK BIT(22) 73 + #define TXDMA_CTRL_INT_FAIL BIT(17) 74 + #define TXDMA_CTRL_INT_PERR BIT(16) 75 + 76 + #define RXDMA_CTRL_START BIT(31) 77 + #define RXDMA_CTRL_CONTINUE BIT(30) 78 + #define RXDMA_CTRL_CHAIN_MODE BIT(29) 79 + /* the burst value is not documented in the datasheet */ 80 + #define RXDMA_CTRL_BURST_UNK BIT(22) 81 + #define RXDMA_CTRL_INT_FINISH BIT(18) 82 + #define RXDMA_CTRL_INT_FAIL BIT(17) 83 + #define RXDMA_CTRL_INT_PERR BIT(16) 84 + #define RXDMA_CTRL_INT_EOD BIT(15) 85 + #define RXDMA_CTRL_INT_EOF BIT(14) 86 + 87 + #define CE_CPU 0 88 + #define CE_DMA 1 89 + 90 + /* 91 + * struct sl3516_ce_descriptor - descriptor for CE operations 92 + * @frame_ctrl: Information for the current descriptor 93 + * @flag_status: For send packet, describe flag of operations. 94 + * @buf_adr: pointer to a send/recv buffer for data packet 95 + * @next_desc: control linking to other descriptors 96 + */ 97 + struct descriptor { 98 + union { 99 + u32 raw; 100 + /* 101 + * struct desc_frame_ctrl - Information for the current descriptor 102 + * @buffer_size: the size of buffer at buf_adr 103 + * @desc_count: Upon completion of a DMA operation, DMA 104 + * write the number of descriptors used 105 + * for the current frame 106 + * @checksum: unknown 107 + * @authcomp: unknown 108 + * @perr: Protocol error during processing this descriptor 109 + * @derr: Data error during processing this descriptor 110 + * @own: 0 if owned by CPU, 1 for DMA 111 + */ 112 + struct desc_frame_ctrl { 113 + u32 buffer_size :16; 114 + u32 desc_count :6; 115 + u32 checksum :6; 116 + u32 authcomp :1; 117 + u32 perr :1; 118 + u32 derr :1; 119 + u32 own :1; 120 + } bits; 121 + } frame_ctrl; 122 + 123 + union { 124 + u32 raw; 125 + /* 126 + * struct desc_flag_status - flag for this descriptor 127 + * @tqflag: list of flag describing the type of operation 128 + * to be performed. 129 + */ 130 + struct desc_tx_flag_status { 131 + u32 tqflag :10; 132 + u32 unused :22; 133 + } tx_flag; 134 + } flag_status; 135 + 136 + u32 buf_adr; 137 + 138 + union { 139 + u32 next_descriptor; 140 + /* 141 + * struct desc_next - describe chaining of descriptors 142 + * @sof_eof: does the descriptor is first (0x11), 143 + * the last (0x01), middle of a chan (0x00) 144 + * or the only one (0x11) 145 + * @dec: AHB bus address increase (0), decrease (1) 146 + * @eofie: End of frame interrupt enable 147 + * @ndar: Next descriptor address 148 + */ 149 + struct desc_next { 150 + u32 sof_eof :2; 151 + u32 dec :1; 152 + u32 eofie :1; 153 + u32 ndar :28; 154 + } bits; 155 + } next_desc; 156 + }; 157 + 158 + /* 159 + * struct control - The value of this register is used to set the 160 + * operation mode of the IPSec Module. 161 + * @process_id: Used to identify the process. The number will be copied 162 + * to the descriptor status of the received packet. 163 + * @auth_check_len: Number of 32-bit words to be checked or appended by the 164 + * authentication module 165 + * @auth_algorithm: 166 + * @auth_mode: 0:append 1:Check Authentication Result 167 + * @fcs_stream_copy: 0:enable 1:disable authentication stream copy 168 + * @mix_key_sel: 0:use rCipherKey0-3 1:use Key Mixer 169 + * @aesnk: AES Key Size 170 + * @cipher_algorithm: choice of CBC/ECE and AES/DES/3DES 171 + * @op_mode: Operation Mode for the IPSec Module 172 + */ 173 + struct pkt_control_header { 174 + u32 process_id :8; 175 + u32 auth_check_len :3; 176 + u32 un1 :1; 177 + u32 auth_algorithm :3; 178 + u32 auth_mode :1; 179 + u32 fcs_stream_copy :1; 180 + u32 un2 :2; 181 + u32 mix_key_sel :1; 182 + u32 aesnk :4; 183 + u32 cipher_algorithm :3; 184 + u32 un3 :1; 185 + u32 op_mode :4; 186 + }; 187 + 188 + struct pkt_control_cipher { 189 + u32 algorithm_len :16; 190 + u32 header_len :16; 191 + }; 192 + 193 + /* 194 + * struct pkt_control_ecb - control packet for ECB 195 + */ 196 + struct pkt_control_ecb { 197 + struct pkt_control_header control; 198 + struct pkt_control_cipher cipher; 199 + unsigned char key[AES_MAX_KEY_SIZE]; 200 + }; 201 + 202 + /* 203 + * struct sl3516_ce_dev - main container for all this driver information 204 + * @base: base address 205 + * @clks: clocks used 206 + * @reset: pointer to reset controller 207 + * @dev: the platform device 208 + * @engine: ptr to the crypto/crypto_engine 209 + * @complete: completion for the current task on this flow 210 + * @status: set to 1 by interrupt if task is done 211 + * @dtx: base DMA address for TX descriptors 212 + * @tx base address of TX descriptors 213 + * @drx: base DMA address for RX descriptors 214 + * @rx base address of RX descriptors 215 + * @ctx current used TX descriptor 216 + * @crx current used RX descriptor 217 + * @trng hw_random structure for RNG 218 + * @hwrng_stat_req number of HWRNG requests 219 + * @hwrng_stat_bytes total number of bytes generated by RNG 220 + * @stat_irq number of IRQ handled by CE 221 + * @stat_irq_tx number of TX IRQ handled by CE 222 + * @stat_irq_rx number of RX IRQ handled by CE 223 + * @stat_req number of requests handled by CE 224 + * @fallbak_sg_count_tx number of fallback due to destination SG count 225 + * @fallbak_sg_count_rx number of fallback due to source SG count 226 + * @fallbak_not_same_len number of fallback due to difference in SG length 227 + * @dbgfs_dir: Debugfs dentry for statistic directory 228 + * @dbgfs_stats: Debugfs dentry for statistic counters 229 + */ 230 + struct sl3516_ce_dev { 231 + void __iomem *base; 232 + struct clk *clks; 233 + struct reset_control *reset; 234 + struct device *dev; 235 + struct crypto_engine *engine; 236 + struct completion complete; 237 + int status; 238 + dma_addr_t dtx; 239 + struct descriptor *tx; 240 + dma_addr_t drx; 241 + struct descriptor *rx; 242 + int ctx; 243 + int crx; 244 + struct hwrng trng; 245 + unsigned long hwrng_stat_req; 246 + unsigned long hwrng_stat_bytes; 247 + unsigned long stat_irq; 248 + unsigned long stat_irq_tx; 249 + unsigned long stat_irq_rx; 250 + unsigned long stat_req; 251 + unsigned long fallback_sg_count_tx; 252 + unsigned long fallback_sg_count_rx; 253 + unsigned long fallback_not_same_len; 254 + unsigned long fallback_mod16; 255 + unsigned long fallback_align16; 256 + #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG 257 + struct dentry *dbgfs_dir; 258 + struct dentry *dbgfs_stats; 259 + #endif 260 + void *pctrl; 261 + dma_addr_t dctrl; 262 + }; 263 + 264 + struct sginfo { 265 + u32 addr; 266 + u32 len; 267 + }; 268 + 269 + /* 270 + * struct sl3516_ce_cipher_req_ctx - context for a skcipher request 271 + * @t_src: list of mapped SGs with their size 272 + * @t_dst: list of mapped SGs with their size 273 + * @op_dir: direction (encrypt vs decrypt) for this request 274 + * @pctrllen: the length of the ctrl packet 275 + * @tqflag: the TQflag to set in data packet 276 + * @h pointer to the pkt_control_cipher header 277 + * @nr_sgs: number of source SG 278 + * @nr_sgd: number of destination SG 279 + * @fallback_req: request struct for invoking the fallback skcipher TFM 280 + */ 281 + struct sl3516_ce_cipher_req_ctx { 282 + struct sginfo t_src[MAXDESC]; 283 + struct sginfo t_dst[MAXDESC]; 284 + u32 op_dir; 285 + unsigned int pctrllen; 286 + u32 tqflag; 287 + struct pkt_control_cipher *h; 288 + int nr_sgs; 289 + int nr_sgd; 290 + struct skcipher_request fallback_req; // keep at the end 291 + }; 292 + 293 + /* 294 + * struct sl3516_ce_cipher_tfm_ctx - context for a skcipher TFM 295 + * @enginectx: crypto_engine used by this TFM 296 + * @key: pointer to key data 297 + * @keylen: len of the key 298 + * @ce: pointer to the private data of driver handling this TFM 299 + * @fallback_tfm: pointer to the fallback TFM 300 + * 301 + * enginectx must be the first element 302 + */ 303 + struct sl3516_ce_cipher_tfm_ctx { 304 + struct crypto_engine_ctx enginectx; 305 + u32 *key; 306 + u32 keylen; 307 + struct sl3516_ce_dev *ce; 308 + struct crypto_skcipher *fallback_tfm; 309 + }; 310 + 311 + /* 312 + * struct sl3516_ce_alg_template - crypto_alg template 313 + * @type: the CRYPTO_ALG_TYPE for this template 314 + * @mode: value to be used in control packet for this algorithm 315 + * @ce: pointer to the sl3516_ce_dev structure associated with 316 + * this template 317 + * @alg: one of sub struct must be used 318 + * @stat_req: number of request done on this template 319 + * @stat_fb: number of request which has fallbacked 320 + * @stat_bytes: total data size done by this template 321 + */ 322 + struct sl3516_ce_alg_template { 323 + u32 type; 324 + u32 mode; 325 + struct sl3516_ce_dev *ce; 326 + union { 327 + struct skcipher_alg skcipher; 328 + } alg; 329 + unsigned long stat_req; 330 + unsigned long stat_fb; 331 + unsigned long stat_bytes; 332 + }; 333 + 334 + int sl3516_ce_enqueue(struct crypto_async_request *areq, u32 type); 335 + 336 + int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 337 + unsigned int keylen); 338 + int sl3516_ce_cipher_init(struct crypto_tfm *tfm); 339 + void sl3516_ce_cipher_exit(struct crypto_tfm *tfm); 340 + int sl3516_ce_skdecrypt(struct skcipher_request *areq); 341 + int sl3516_ce_skencrypt(struct skcipher_request *areq); 342 + 343 + int sl3516_ce_run_task(struct sl3516_ce_dev *ce, 344 + struct sl3516_ce_cipher_req_ctx *rctx, const char *name); 345 + 346 + int sl3516_ce_rng_register(struct sl3516_ce_dev *ce); 347 + void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce);