Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: amlogic - Add crypto accelerator for amlogic GXL

This patch adds support for the amlogic GXL cryptographic offloader present
on GXL SoCs.

This driver supports AES cipher in CBC/ECB mode.

Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
Reviewed-by: Neil Armstrong <narmstrong@baylibre.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Corentin Labbe and committed by
Herbert Xu
48fe583f f1fb7ea2

+911
+2
drivers/crypto/Kconfig
··· 808 808 809 809 source "drivers/crypto/hisilicon/Kconfig" 810 810 811 + source "drivers/crypto/amlogic/Kconfig" 812 + 811 813 endif # CRYPTO_HW
+1
drivers/crypto/Makefile
··· 48 48 obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ 49 49 obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ 50 50 obj-y += hisilicon/ 51 + obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
+24
drivers/crypto/amlogic/Kconfig
··· 1 + config CRYPTO_DEV_AMLOGIC_GXL 2 + tristate "Support for amlogic cryptographic offloader" 3 + default y if ARCH_MESON 4 + select CRYPTO_BLKCIPHER 5 + select CRYPTO_ENGINE 6 + select CRYPTO_ECB 7 + select CRYPTO_CBC 8 + select CRYPTO_AES 9 + help 10 + Select y here to have support for the cryptographic offloader 11 + available on Amlogic GXL SoC. 12 + This hardware handles AES ciphers in ECB/CBC mode. 13 + 14 + To compile this driver as a module, choose M here: the module 15 + will be called amlogic-gxl-crypto. 16 + 17 + config CRYPTO_DEV_AMLOGIC_GXL_DEBUG 18 + bool "Enable amlogic stats" 19 + depends on CRYPTO_DEV_AMLOGIC_GXL 20 + depends on DEBUG_FS 21 + help 22 + Say y to enable amlogic-crypto debug stats. 23 + This will create /sys/kernel/debug/gxl-crypto/stats for displaying 24 + the number of requests per flow and per algorithm.
+2
drivers/crypto/amlogic/Makefile
··· 1 + obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic-gxl-crypto.o 2 + amlogic-gxl-crypto-y := amlogic-gxl-core.o amlogic-gxl-cipher.o
+381
drivers/crypto/amlogic/amlogic-gxl-cipher.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC 4 + * 5 + * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com> 6 + * 7 + * This file add support for AES cipher with 128,192,256 bits keysize in 8 + * CBC and ECB mode. 9 + */ 10 + 11 + #include <linux/crypto.h> 12 + #include <linux/delay.h> 13 + #include <linux/io.h> 14 + #include <crypto/scatterwalk.h> 15 + #include <linux/scatterlist.h> 16 + #include <linux/dma-mapping.h> 17 + #include <crypto/internal/skcipher.h> 18 + #include "amlogic-gxl.h" 19 + 20 + static int get_engine_number(struct meson_dev *mc) 21 + { 22 + return atomic_inc_return(&mc->flow) % MAXFLOW; 23 + } 24 + 25 + static bool meson_cipher_need_fallback(struct skcipher_request *areq) 26 + { 27 + struct scatterlist *src_sg = areq->src; 28 + struct scatterlist *dst_sg = areq->dst; 29 + 30 + if (areq->cryptlen == 0) 31 + return true; 32 + 33 + if (sg_nents(src_sg) != sg_nents(dst_sg)) 34 + return true; 35 + 36 + /* KEY/IV descriptors use 3 desc */ 37 + if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3) 38 + return true; 39 + 40 + while (src_sg && dst_sg) { 41 + if ((src_sg->length % 16) != 0) 42 + return true; 43 + if ((dst_sg->length % 16) != 0) 44 + return true; 45 + if (src_sg->length != dst_sg->length) 46 + return true; 47 + if (!IS_ALIGNED(src_sg->offset, sizeof(u32))) 48 + return true; 49 + if (!IS_ALIGNED(dst_sg->offset, sizeof(u32))) 50 + return true; 51 + src_sg = sg_next(src_sg); 52 + dst_sg = sg_next(dst_sg); 53 + } 54 + 55 + return false; 56 + } 57 + 58 + static int meson_cipher_do_fallback(struct skcipher_request *areq) 59 + { 60 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 61 + struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 62 + struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 63 + int err; 64 + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG 65 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 66 + struct meson_alg_template *algt; 67 + #endif 68 + SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm); 69 + 70 + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG 71 + algt = container_of(alg, struct meson_alg_template, alg.skcipher); 72 + algt->stat_fb++; 73 + #endif 74 + skcipher_request_set_sync_tfm(req, op->fallback_tfm); 75 + skcipher_request_set_callback(req, areq->base.flags, NULL, NULL); 76 + skcipher_request_set_crypt(req, areq->src, areq->dst, 77 + areq->cryptlen, areq->iv); 78 + if (rctx->op_dir == MESON_DECRYPT) 79 + err = crypto_skcipher_decrypt(req); 80 + else 81 + err = crypto_skcipher_encrypt(req); 82 + skcipher_request_zero(req); 83 + return err; 84 + } 85 + 86 + static int meson_cipher(struct skcipher_request *areq) 87 + { 88 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 89 + struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 90 + struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 91 + struct meson_dev *mc = op->mc; 92 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 93 + struct meson_alg_template *algt; 94 + int flow = rctx->flow; 95 + unsigned int todo, eat, len; 96 + struct scatterlist *src_sg = areq->src; 97 + struct scatterlist *dst_sg = areq->dst; 98 + struct meson_desc *desc; 99 + int nr_sgs, nr_sgd; 100 + int i, err = 0; 101 + unsigned int keyivlen, ivsize, offset, tloffset; 102 + dma_addr_t phykeyiv; 103 + void *backup_iv = NULL, *bkeyiv; 104 + 105 + algt = container_of(alg, struct meson_alg_template, alg.skcipher); 106 + 107 + dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__, 108 + crypto_tfm_alg_name(areq->base.tfm), 109 + areq->cryptlen, 110 + rctx->op_dir, crypto_skcipher_ivsize(tfm), 111 + op->keylen, flow); 112 + 113 + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG 114 + algt->stat_req++; 115 + mc->chanlist[flow].stat_req++; 116 + #endif 117 + 118 + /* 119 + * The hardware expect a list of meson_desc structures. 120 + * The 2 first structures store key 121 + * The third stores IV 122 + */ 123 + bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA); 124 + if (!bkeyiv) 125 + return -ENOMEM; 126 + 127 + memcpy(bkeyiv, op->key, op->keylen); 128 + keyivlen = op->keylen; 129 + 130 + ivsize = crypto_skcipher_ivsize(tfm); 131 + if (areq->iv && ivsize > 0) { 132 + if (ivsize > areq->cryptlen) { 133 + dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen); 134 + return -EINVAL; 135 + } 136 + memcpy(bkeyiv + 32, areq->iv, ivsize); 137 + keyivlen = 48; 138 + if (rctx->op_dir == MESON_DECRYPT) { 139 + backup_iv = kzalloc(ivsize, GFP_KERNEL); 140 + if (!backup_iv) { 141 + err = -ENOMEM; 142 + goto theend; 143 + } 144 + offset = areq->cryptlen - ivsize; 145 + scatterwalk_map_and_copy(backup_iv, areq->src, offset, 146 + ivsize, 0); 147 + } 148 + } 149 + if (keyivlen == 24) 150 + keyivlen = 32; 151 + 152 + phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen, 153 + DMA_TO_DEVICE); 154 + if (dma_mapping_error(mc->dev, phykeyiv)) { 155 + dev_err(mc->dev, "Cannot DMA MAP KEY IV\n"); 156 + return -EFAULT; 157 + } 158 + 159 + tloffset = 0; 160 + eat = 0; 161 + i = 0; 162 + while (keyivlen > eat) { 163 + desc = &mc->chanlist[flow].tl[tloffset]; 164 + memset(desc, 0, sizeof(struct meson_desc)); 165 + todo = min(keyivlen - eat, 16u); 166 + desc->t_src = phykeyiv + i * 16; 167 + desc->t_dst = i * 16; 168 + desc->len = 16; 169 + desc->mode = MODE_KEY; 170 + desc->owner = 1; 171 + eat += todo; 172 + i++; 173 + tloffset++; 174 + } 175 + 176 + if (areq->src == areq->dst) { 177 + nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src), 178 + DMA_BIDIRECTIONAL); 179 + if (nr_sgs < 0) { 180 + dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs); 181 + err = -EINVAL; 182 + goto theend; 183 + } 184 + nr_sgd = nr_sgs; 185 + } else { 186 + nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src), 187 + DMA_TO_DEVICE); 188 + if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) { 189 + dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs); 190 + err = -EINVAL; 191 + goto theend; 192 + } 193 + nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst), 194 + DMA_FROM_DEVICE); 195 + if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) { 196 + dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd); 197 + err = -EINVAL; 198 + goto theend; 199 + } 200 + } 201 + 202 + src_sg = areq->src; 203 + dst_sg = areq->dst; 204 + len = areq->cryptlen; 205 + while (src_sg) { 206 + desc = &mc->chanlist[flow].tl[tloffset]; 207 + memset(desc, 0, sizeof(struct meson_desc)); 208 + 209 + desc->t_src = sg_dma_address(src_sg); 210 + desc->t_dst = sg_dma_address(dst_sg); 211 + todo = min(len, sg_dma_len(src_sg)); 212 + desc->owner = 1; 213 + desc->len = todo; 214 + desc->mode = op->keymode; 215 + desc->op_mode = algt->blockmode; 216 + desc->enc = rctx->op_dir; 217 + len -= todo; 218 + 219 + if (!sg_next(src_sg)) 220 + desc->eoc = 1; 221 + tloffset++; 222 + src_sg = sg_next(src_sg); 223 + dst_sg = sg_next(dst_sg); 224 + } 225 + 226 + reinit_completion(&mc->chanlist[flow].complete); 227 + mc->chanlist[flow].status = 0; 228 + writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2)); 229 + wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete, 230 + msecs_to_jiffies(500)); 231 + if (mc->chanlist[flow].status == 0) { 232 + dev_err(mc->dev, "DMA timeout for flow %d\n", flow); 233 + err = -EINVAL; 234 + } 235 + 236 + dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE); 237 + 238 + if (areq->src == areq->dst) { 239 + dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); 240 + } else { 241 + dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE); 242 + dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); 243 + } 244 + 245 + if (areq->iv && ivsize > 0) { 246 + if (rctx->op_dir == MESON_DECRYPT) { 247 + memcpy(areq->iv, backup_iv, ivsize); 248 + kzfree(backup_iv); 249 + } else { 250 + scatterwalk_map_and_copy(areq->iv, areq->dst, 251 + areq->cryptlen - ivsize, 252 + ivsize, 0); 253 + } 254 + } 255 + theend: 256 + kzfree(bkeyiv); 257 + 258 + return err; 259 + } 260 + 261 + static int meson_handle_cipher_request(struct crypto_engine *engine, 262 + void *areq) 263 + { 264 + int err; 265 + struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); 266 + 267 + err = meson_cipher(breq); 268 + crypto_finalize_skcipher_request(engine, breq, err); 269 + 270 + return 0; 271 + } 272 + 273 + int meson_skdecrypt(struct skcipher_request *areq) 274 + { 275 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 276 + struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 277 + struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 278 + struct crypto_engine *engine; 279 + int e; 280 + 281 + rctx->op_dir = MESON_DECRYPT; 282 + if (meson_cipher_need_fallback(areq)) 283 + return meson_cipher_do_fallback(areq); 284 + e = get_engine_number(op->mc); 285 + engine = op->mc->chanlist[e].engine; 286 + rctx->flow = e; 287 + 288 + return crypto_transfer_skcipher_request_to_engine(engine, areq); 289 + } 290 + 291 + int meson_skencrypt(struct skcipher_request *areq) 292 + { 293 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 294 + struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 295 + struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq); 296 + struct crypto_engine *engine; 297 + int e; 298 + 299 + rctx->op_dir = MESON_ENCRYPT; 300 + if (meson_cipher_need_fallback(areq)) 301 + return meson_cipher_do_fallback(areq); 302 + e = get_engine_number(op->mc); 303 + engine = op->mc->chanlist[e].engine; 304 + rctx->flow = e; 305 + 306 + return crypto_transfer_skcipher_request_to_engine(engine, areq); 307 + } 308 + 309 + int meson_cipher_init(struct crypto_tfm *tfm) 310 + { 311 + struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); 312 + struct meson_alg_template *algt; 313 + const char *name = crypto_tfm_alg_name(tfm); 314 + struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); 315 + struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); 316 + 317 + memset(op, 0, sizeof(struct meson_cipher_tfm_ctx)); 318 + 319 + algt = container_of(alg, struct meson_alg_template, alg.skcipher); 320 + op->mc = algt->mc; 321 + 322 + sktfm->reqsize = sizeof(struct meson_cipher_req_ctx); 323 + 324 + op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 325 + if (IS_ERR(op->fallback_tfm)) { 326 + dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n", 327 + name, PTR_ERR(op->fallback_tfm)); 328 + return PTR_ERR(op->fallback_tfm); 329 + } 330 + 331 + op->enginectx.op.do_one_request = meson_handle_cipher_request; 332 + op->enginectx.op.prepare_request = NULL; 333 + op->enginectx.op.unprepare_request = NULL; 334 + 335 + return 0; 336 + } 337 + 338 + void meson_cipher_exit(struct crypto_tfm *tfm) 339 + { 340 + struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); 341 + 342 + if (op->key) { 343 + memzero_explicit(op->key, op->keylen); 344 + kfree(op->key); 345 + } 346 + crypto_free_sync_skcipher(op->fallback_tfm); 347 + } 348 + 349 + int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 350 + unsigned int keylen) 351 + { 352 + struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); 353 + struct meson_dev *mc = op->mc; 354 + 355 + switch (keylen) { 356 + case 128 / 8: 357 + op->keymode = MODE_AES_128; 358 + break; 359 + case 192 / 8: 360 + op->keymode = MODE_AES_192; 361 + break; 362 + case 256 / 8: 363 + op->keymode = MODE_AES_256; 364 + break; 365 + default: 366 + dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen); 367 + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 368 + return -EINVAL; 369 + } 370 + if (op->key) { 371 + memzero_explicit(op->key, op->keylen); 372 + kfree(op->key); 373 + } 374 + op->keylen = keylen; 375 + op->key = kmalloc(keylen, GFP_KERNEL | GFP_DMA); 376 + if (!op->key) 377 + return -ENOMEM; 378 + memcpy(op->key, key, keylen); 379 + 380 + return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); 381 + }
+331
drivers/crypto/amlogic/amlogic-gxl-core.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * amlgoic-core.c - hardware cryptographic offloader for Amlogic GXL SoC 4 + * 5 + * Copyright (C) 2018-2019 Corentin Labbe <clabbe@baylibre.com> 6 + * 7 + * Core file which registers crypto algorithms supported by the hardware. 8 + */ 9 + #include <linux/clk.h> 10 + #include <linux/crypto.h> 11 + #include <linux/io.h> 12 + #include <linux/interrupt.h> 13 + #include <linux/irq.h> 14 + #include <linux/module.h> 15 + #include <linux/of.h> 16 + #include <linux/of_device.h> 17 + #include <linux/platform_device.h> 18 + #include <crypto/internal/skcipher.h> 19 + #include <linux/dma-mapping.h> 20 + 21 + #include "amlogic-gxl.h" 22 + 23 + static irqreturn_t meson_irq_handler(int irq, void *data) 24 + { 25 + struct meson_dev *mc = (struct meson_dev *)data; 26 + int flow; 27 + u32 p; 28 + 29 + for (flow = 0; flow < MAXFLOW; flow++) { 30 + if (mc->irqs[flow] == irq) { 31 + p = readl(mc->base + ((0x04 + flow) << 2)); 32 + if (p) { 33 + writel_relaxed(0xF, mc->base + ((0x4 + flow) << 2)); 34 + mc->chanlist[flow].status = 1; 35 + complete(&mc->chanlist[flow].complete); 36 + return IRQ_HANDLED; 37 + } 38 + dev_err(mc->dev, "%s %d Got irq for flow %d but ctrl is empty\n", __func__, irq, flow); 39 + } 40 + } 41 + 42 + dev_err(mc->dev, "%s %d from unknown irq\n", __func__, irq); 43 + return IRQ_HANDLED; 44 + } 45 + 46 + static struct meson_alg_template mc_algs[] = { 47 + { 48 + .type = CRYPTO_ALG_TYPE_SKCIPHER, 49 + .blockmode = MESON_OPMODE_CBC, 50 + .alg.skcipher = { 51 + .base = { 52 + .cra_name = "cbc(aes)", 53 + .cra_driver_name = "cbc-aes-gxl", 54 + .cra_priority = 400, 55 + .cra_blocksize = AES_BLOCK_SIZE, 56 + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 57 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, 58 + .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx), 59 + .cra_module = THIS_MODULE, 60 + .cra_alignmask = 0xf, 61 + .cra_init = meson_cipher_init, 62 + .cra_exit = meson_cipher_exit, 63 + }, 64 + .min_keysize = AES_MIN_KEY_SIZE, 65 + .max_keysize = AES_MAX_KEY_SIZE, 66 + .ivsize = AES_BLOCK_SIZE, 67 + .setkey = meson_aes_setkey, 68 + .encrypt = meson_skencrypt, 69 + .decrypt = meson_skdecrypt, 70 + } 71 + }, 72 + { 73 + .type = CRYPTO_ALG_TYPE_SKCIPHER, 74 + .blockmode = MESON_OPMODE_ECB, 75 + .alg.skcipher = { 76 + .base = { 77 + .cra_name = "ecb(aes)", 78 + .cra_driver_name = "ecb-aes-gxl", 79 + .cra_priority = 400, 80 + .cra_blocksize = AES_BLOCK_SIZE, 81 + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 82 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, 83 + .cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx), 84 + .cra_module = THIS_MODULE, 85 + .cra_alignmask = 0xf, 86 + .cra_init = meson_cipher_init, 87 + .cra_exit = meson_cipher_exit, 88 + }, 89 + .min_keysize = AES_MIN_KEY_SIZE, 90 + .max_keysize = AES_MAX_KEY_SIZE, 91 + .setkey = meson_aes_setkey, 92 + .encrypt = meson_skencrypt, 93 + .decrypt = meson_skdecrypt, 94 + } 95 + }, 96 + }; 97 + 98 + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG 99 + static int meson_dbgfs_read(struct seq_file *seq, void *v) 100 + { 101 + struct meson_dev *mc = seq->private; 102 + int i; 103 + 104 + for (i = 0; i < MAXFLOW; i++) 105 + seq_printf(seq, "Channel %d: nreq %lu\n", i, mc->chanlist[i].stat_req); 106 + 107 + for (i = 0; i < ARRAY_SIZE(mc_algs); i++) { 108 + switch (mc_algs[i].type) { 109 + case CRYPTO_ALG_TYPE_SKCIPHER: 110 + seq_printf(seq, "%s %s %lu %lu\n", 111 + mc_algs[i].alg.skcipher.base.cra_driver_name, 112 + mc_algs[i].alg.skcipher.base.cra_name, 113 + mc_algs[i].stat_req, mc_algs[i].stat_fb); 114 + break; 115 + } 116 + } 117 + return 0; 118 + } 119 + 120 + static int meson_dbgfs_open(struct inode *inode, struct file *file) 121 + { 122 + return single_open(file, meson_dbgfs_read, inode->i_private); 123 + } 124 + 125 + static const struct file_operations meson_debugfs_fops = { 126 + .owner = THIS_MODULE, 127 + .open = meson_dbgfs_open, 128 + .read = seq_read, 129 + .llseek = seq_lseek, 130 + .release = single_release, 131 + }; 132 + #endif 133 + 134 + static void meson_free_chanlist(struct meson_dev *mc, int i) 135 + { 136 + while (i >= 0) { 137 + crypto_engine_exit(mc->chanlist[i].engine); 138 + if (mc->chanlist[i].tl) 139 + dma_free_coherent(mc->dev, sizeof(struct meson_desc) * MAXDESC, 140 + mc->chanlist[i].tl, 141 + mc->chanlist[i].t_phy); 142 + i--; 143 + } 144 + } 145 + 146 + /* 147 + * Allocate the channel list structure 148 + */ 149 + static int meson_allocate_chanlist(struct meson_dev *mc) 150 + { 151 + int i, err; 152 + 153 + mc->chanlist = devm_kcalloc(mc->dev, MAXFLOW, 154 + sizeof(struct meson_flow), GFP_KERNEL); 155 + if (!mc->chanlist) 156 + return -ENOMEM; 157 + 158 + for (i = 0; i < MAXFLOW; i++) { 159 + init_completion(&mc->chanlist[i].complete); 160 + 161 + mc->chanlist[i].engine = crypto_engine_alloc_init(mc->dev, true); 162 + if (!mc->chanlist[i].engine) { 163 + dev_err(mc->dev, "Cannot allocate engine\n"); 164 + i--; 165 + goto error_engine; 166 + } 167 + err = crypto_engine_start(mc->chanlist[i].engine); 168 + if (err) { 169 + dev_err(mc->dev, "Cannot start engine\n"); 170 + goto error_engine; 171 + } 172 + mc->chanlist[i].tl = dma_alloc_coherent(mc->dev, 173 + sizeof(struct meson_desc) * MAXDESC, 174 + &mc->chanlist[i].t_phy, 175 + GFP_KERNEL); 176 + if (!mc->chanlist[i].tl) { 177 + err = -ENOMEM; 178 + goto error_engine; 179 + } 180 + } 181 + return 0; 182 + error_engine: 183 + meson_free_chanlist(mc, i); 184 + return err; 185 + } 186 + 187 + static int meson_register_algs(struct meson_dev *mc) 188 + { 189 + int err, i; 190 + 191 + for (i = 0; i < ARRAY_SIZE(mc_algs); i++) { 192 + mc_algs[i].mc = mc; 193 + switch (mc_algs[i].type) { 194 + case CRYPTO_ALG_TYPE_SKCIPHER: 195 + err = crypto_register_skcipher(&mc_algs[i].alg.skcipher); 196 + if (err) { 197 + dev_err(mc->dev, "Fail to register %s\n", 198 + mc_algs[i].alg.skcipher.base.cra_name); 199 + mc_algs[i].mc = NULL; 200 + return err; 201 + } 202 + break; 203 + } 204 + } 205 + 206 + return 0; 207 + } 208 + 209 + static void meson_unregister_algs(struct meson_dev *mc) 210 + { 211 + int i; 212 + 213 + for (i = 0; i < ARRAY_SIZE(mc_algs); i++) { 214 + if (!mc_algs[i].mc) 215 + continue; 216 + switch (mc_algs[i].type) { 217 + case CRYPTO_ALG_TYPE_SKCIPHER: 218 + crypto_unregister_skcipher(&mc_algs[i].alg.skcipher); 219 + break; 220 + } 221 + } 222 + } 223 + 224 + static int meson_crypto_probe(struct platform_device *pdev) 225 + { 226 + struct meson_dev *mc; 227 + int err, i; 228 + 229 + if (!pdev->dev.of_node) 230 + return -ENODEV; 231 + 232 + mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); 233 + if (!mc) 234 + return -ENOMEM; 235 + 236 + mc->dev = &pdev->dev; 237 + platform_set_drvdata(pdev, mc); 238 + 239 + mc->base = devm_platform_ioremap_resource(pdev, 0); 240 + if (IS_ERR(mc->base)) { 241 + err = PTR_ERR(mc->base); 242 + dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err); 243 + return err; 244 + } 245 + mc->busclk = devm_clk_get(&pdev->dev, "blkmv"); 246 + if (IS_ERR(mc->busclk)) { 247 + err = PTR_ERR(mc->busclk); 248 + dev_err(&pdev->dev, "Cannot get core clock err=%d\n", err); 249 + return err; 250 + } 251 + 252 + mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL); 253 + for (i = 0; i < MAXFLOW; i++) { 254 + mc->irqs[i] = platform_get_irq(pdev, i); 255 + if (mc->irqs[i] < 0) { 256 + dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i); 257 + return mc->irqs[i]; 258 + } 259 + 260 + err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0, 261 + "gxl-crypto", mc); 262 + if (err < 0) { 263 + dev_err(mc->dev, "Cannot request IRQ for flow %d\n", i); 264 + return err; 265 + } 266 + } 267 + 268 + err = clk_prepare_enable(mc->busclk); 269 + if (err != 0) { 270 + dev_err(&pdev->dev, "Cannot prepare_enable busclk\n"); 271 + return err; 272 + } 273 + 274 + err = meson_allocate_chanlist(mc); 275 + if (err) 276 + goto error_flow; 277 + 278 + err = meson_register_algs(mc); 279 + if (err) 280 + goto error_alg; 281 + 282 + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG 283 + mc->dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL); 284 + debugfs_create_file("stats", 0444, mc->dbgfs_dir, mc, &meson_debugfs_fops); 285 + #endif 286 + 287 + return 0; 288 + error_alg: 289 + meson_unregister_algs(mc); 290 + error_flow: 291 + meson_free_chanlist(mc, MAXFLOW); 292 + clk_disable_unprepare(mc->busclk); 293 + return err; 294 + } 295 + 296 + static int meson_crypto_remove(struct platform_device *pdev) 297 + { 298 + struct meson_dev *mc = platform_get_drvdata(pdev); 299 + 300 + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG 301 + debugfs_remove_recursive(mc->dbgfs_dir); 302 + #endif 303 + 304 + meson_unregister_algs(mc); 305 + 306 + meson_free_chanlist(mc, MAXFLOW); 307 + 308 + clk_disable_unprepare(mc->busclk); 309 + return 0; 310 + } 311 + 312 + static const struct of_device_id meson_crypto_of_match_table[] = { 313 + { .compatible = "amlogic,gxl-crypto", }, 314 + {} 315 + }; 316 + MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table); 317 + 318 + static struct platform_driver meson_crypto_driver = { 319 + .probe = meson_crypto_probe, 320 + .remove = meson_crypto_remove, 321 + .driver = { 322 + .name = "gxl-crypto", 323 + .of_match_table = meson_crypto_of_match_table, 324 + }, 325 + }; 326 + 327 + module_platform_driver(meson_crypto_driver); 328 + 329 + MODULE_DESCRIPTION("Amlogic GXL cryptographic offloader"); 330 + MODULE_LICENSE("GPL"); 331 + MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");
+170
drivers/crypto/amlogic/amlogic-gxl.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * amlogic.h - hardware cryptographic offloader for Amlogic SoC 4 + * 5 + * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com> 6 + */ 7 + #include <crypto/aes.h> 8 + #include <crypto/engine.h> 9 + #include <crypto/skcipher.h> 10 + #include <linux/debugfs.h> 11 + #include <linux/crypto.h> 12 + #include <linux/scatterlist.h> 13 + 14 + #define MODE_KEY 1 15 + #define MODE_AES_128 0x8 16 + #define MODE_AES_192 0x9 17 + #define MODE_AES_256 0xa 18 + 19 + #define MESON_DECRYPT 0 20 + #define MESON_ENCRYPT 1 21 + 22 + #define MESON_OPMODE_ECB 0 23 + #define MESON_OPMODE_CBC 1 24 + 25 + #define MAXFLOW 2 26 + 27 + #define MAXDESC 64 28 + 29 + /* 30 + * struct meson_desc - Descriptor for DMA operations 31 + * Note that without datasheet, some are unknown 32 + * @len: length of data to operate 33 + * @irq: Ignored by hardware 34 + * @eoc: End of descriptor 35 + * @loop: Unknown 36 + * @mode: Type of algorithm (AES, SHA) 37 + * @begin: Unknown 38 + * @end: Unknown 39 + * @op_mode: Blockmode (CBC, ECB) 40 + * @block: Unknown 41 + * @error: Unknown 42 + * @owner: owner of the descriptor, 1 own by HW 43 + * @t_src: Physical address of data to read 44 + * @t_dst: Physical address of data to write 45 + */ 46 + struct meson_desc { 47 + union { 48 + u32 t_status; 49 + struct { 50 + u32 len:17; 51 + u32 irq:1; 52 + u32 eoc:1; 53 + u32 loop:1; 54 + u32 mode:4; 55 + u32 begin:1; 56 + u32 end:1; 57 + u32 op_mode:2; 58 + u32 enc:1; 59 + u32 block:1; 60 + u32 error:1; 61 + u32 owner:1; 62 + }; 63 + }; 64 + u32 t_src; 65 + u32 t_dst; 66 + }; 67 + 68 + /* 69 + * struct meson_flow - Information used by each flow 70 + * @engine: ptr to the crypto_engine for this flow 71 + * @keylen: keylen for this flow operation 72 + * @complete: completion for the current task on this flow 73 + * @status: set to 1 by interrupt if task is done 74 + * @t_phy: Physical address of task 75 + * @tl: pointer to the current ce_task for this flow 76 + * @stat_req: number of request done by this flow 77 + */ 78 + struct meson_flow { 79 + struct crypto_engine *engine; 80 + struct completion complete; 81 + int status; 82 + unsigned int keylen; 83 + dma_addr_t t_phy; 84 + struct meson_desc *tl; 85 + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG 86 + unsigned long stat_req; 87 + #endif 88 + }; 89 + 90 + /* 91 + * struct meson_dev - main container for all this driver information 92 + * @base: base address of amlogic-crypto 93 + * @busclk: bus clock for amlogic-crypto 94 + * @dev: the platform device 95 + * @chanlist: array of all flow 96 + * @flow: flow to use in next request 97 + * @irqs: IRQ numbers for amlogic-crypto 98 + * @dbgfs_dir: Debugfs dentry for statistic directory 99 + * @dbgfs_stats: Debugfs dentry for statistic counters 100 + */ 101 + struct meson_dev { 102 + void __iomem *base; 103 + struct clk *busclk; 104 + struct device *dev; 105 + struct meson_flow *chanlist; 106 + atomic_t flow; 107 + int *irqs; 108 + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG 109 + struct dentry *dbgfs_dir; 110 + #endif 111 + }; 112 + 113 + /* 114 + * struct meson_cipher_req_ctx - context for a skcipher request 115 + * @op_dir: direction (encrypt vs decrypt) for this request 116 + * @flow: the flow to use for this request 117 + */ 118 + struct meson_cipher_req_ctx { 119 + u32 op_dir; 120 + int flow; 121 + }; 122 + 123 + /* 124 + * struct meson_cipher_tfm_ctx - context for a skcipher TFM 125 + * @enginectx: crypto_engine used by this TFM 126 + * @key: pointer to key data 127 + * @keylen: len of the key 128 + * @keymode: The keymode(type and size of key) associated with this TFM 129 + * @mc: pointer to the private data of driver handling this TFM 130 + * @fallback_tfm: pointer to the fallback TFM 131 + */ 132 + struct meson_cipher_tfm_ctx { 133 + struct crypto_engine_ctx enginectx; 134 + u32 *key; 135 + u32 keylen; 136 + u32 keymode; 137 + struct meson_dev *mc; 138 + struct crypto_sync_skcipher *fallback_tfm; 139 + }; 140 + 141 + /* 142 + * struct meson_alg_template - crypto_alg template 143 + * @type: the CRYPTO_ALG_TYPE for this template 144 + * @blockmode: the type of block operation 145 + * @mc: pointer to the meson_dev structure associated with this template 146 + * @alg: one of sub struct must be used 147 + * @stat_req: number of request done on this template 148 + * @stat_fb: total of all data len done on this template 149 + */ 150 + struct meson_alg_template { 151 + u32 type; 152 + u32 blockmode; 153 + union { 154 + struct skcipher_alg skcipher; 155 + } alg; 156 + struct meson_dev *mc; 157 + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG 158 + unsigned long stat_req; 159 + unsigned long stat_fb; 160 + #endif 161 + }; 162 + 163 + int meson_enqueue(struct crypto_async_request *areq, u32 type); 164 + 165 + int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 166 + unsigned int keylen); 167 + int meson_cipher_init(struct crypto_tfm *tfm); 168 + void meson_cipher_exit(struct crypto_tfm *tfm); 169 + int meson_skdecrypt(struct skcipher_request *areq); 170 + int meson_skencrypt(struct skcipher_request *areq);