Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: rockchip - add hash support for crypto engine in rk3288

Add md5 sha1 sha256 support for crypto engine in rk3288.

Signed-off-by: Zain Wang <zain.wang@rock-chips.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Zain Wang and committed by
Herbert Xu
bfd927ff 49abc0d2

+499 -14
+4
drivers/crypto/Kconfig
··· 508 508 depends on OF && ARCH_ROCKCHIP 509 509 select CRYPTO_AES 510 510 select CRYPTO_DES 511 + select CRYPTO_MD5 512 + select CRYPTO_SHA1 513 + select CRYPTO_SHA256 514 + select CRYPTO_HASH 511 515 select CRYPTO_BLKCIPHER 512 516 513 517 help
+1
drivers/crypto/rockchip/Makefile
··· 1 1 obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o 2 2 rk_crypto-objs := rk3288_crypto.o \ 3 3 rk3288_crypto_ablkcipher.o \ 4 + rk3288_crypto_ahash.o
+23 -5
drivers/crypto/rockchip/rk3288_crypto.c
··· 208 208 209 209 if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) 210 210 dev->ablk_req = ablkcipher_request_cast(async_req); 211 + else 212 + dev->ahash_req = ahash_request_cast(async_req); 211 213 err = dev->start(dev); 212 214 if (err) 213 215 dev->complete(dev, err); ··· 222 220 &rk_cbc_des_alg, 223 221 &rk_ecb_des3_ede_alg, 224 222 &rk_cbc_des3_ede_alg, 223 + &rk_ahash_sha1, 224 + &rk_ahash_sha256, 225 + &rk_ahash_md5, 225 226 }; 226 227 227 228 static int rk_crypto_register(struct rk_crypto_info *crypto_info) ··· 234 229 235 230 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { 236 231 rk_cipher_algs[i]->dev = crypto_info; 237 - err = crypto_register_alg(&rk_cipher_algs[i]->alg); 232 + if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 233 + err = crypto_register_alg( 234 + &rk_cipher_algs[i]->alg.crypto); 235 + else 236 + err = crypto_register_ahash( 237 + &rk_cipher_algs[i]->alg.hash); 238 238 if (err) 239 239 goto err_cipher_algs; 240 240 } 241 241 return 0; 242 242 243 243 err_cipher_algs: 244 - for (k = 0; k < i; k++) 245 - crypto_unregister_alg(&rk_cipher_algs[k]->alg); 244 + for (k = 0; k < i; k++) { 245 + if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 246 + crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto); 247 + else 248 + crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); 249 + } 246 250 return err; 247 251 } 248 252 ··· 259 245 { 260 246 unsigned int i; 261 247 262 - for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) 263 - crypto_unregister_alg(&rk_cipher_algs[i]->alg); 248 + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { 249 + if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 250 + crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto); 251 + else 252 + crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); 253 + } 264 254 } 265 255 266 256 static void rk_crypto_action(void *data)
+54 -2
drivers/crypto/rockchip/rk3288_crypto.h
··· 6 6 #include <crypto/algapi.h> 7 7 #include <linux/interrupt.h> 8 8 #include <linux/delay.h> 9 + #include <crypto/internal/hash.h> 10 + 11 + #include <crypto/md5.h> 12 + #include <crypto/sha.h> 9 13 10 14 #define _SBF(v, f) ((v) << (f)) 11 15 ··· 153 149 #define RK_CRYPTO_TDES_KEY3_0 0x0130 154 150 #define RK_CRYPTO_TDES_KEY3_1 0x0134 155 151 152 + /* HASH */ 153 + #define RK_CRYPTO_HASH_CTRL 0x0180 154 + #define RK_CRYPTO_HASH_SWAP_DO BIT(3) 155 + #define RK_CRYPTO_HASH_SWAP_DI BIT(2) 156 + #define RK_CRYPTO_HASH_SHA1 _SBF(0x00, 0) 157 + #define RK_CRYPTO_HASH_MD5 _SBF(0x01, 0) 158 + #define RK_CRYPTO_HASH_SHA256 _SBF(0x02, 0) 159 + #define RK_CRYPTO_HASH_PRNG _SBF(0x03, 0) 160 + 161 + #define RK_CRYPTO_HASH_STS 0x0184 162 + #define RK_CRYPTO_HASH_DONE BIT(0) 163 + 164 + #define RK_CRYPTO_HASH_MSG_LEN 0x0188 165 + #define RK_CRYPTO_HASH_DOUT_0 0x018c 166 + #define RK_CRYPTO_HASH_DOUT_1 0x0190 167 + #define RK_CRYPTO_HASH_DOUT_2 0x0194 168 + #define RK_CRYPTO_HASH_DOUT_3 0x0198 169 + #define RK_CRYPTO_HASH_DOUT_4 0x019c 170 + #define RK_CRYPTO_HASH_DOUT_5 0x01a0 171 + #define RK_CRYPTO_HASH_DOUT_6 0x01a4 172 + #define RK_CRYPTO_HASH_DOUT_7 0x01a8 173 + 156 174 #define CRYPTO_READ(dev, offset) \ 157 175 readl_relaxed(((dev)->reg + (offset))) 158 176 #define CRYPTO_WRITE(dev, offset, val) \ ··· 192 166 struct crypto_queue queue; 193 167 struct tasklet_struct crypto_tasklet; 194 168 struct ablkcipher_request *ablk_req; 169 + struct ahash_request *ahash_req; 195 170 /* device lock */ 196 171 spinlock_t lock; 197 172 ··· 222 195 void (*unload_data)(struct rk_crypto_info *dev); 223 196 }; 224 197 198 + /* the private variable of hash */ 199 + struct rk_ahash_ctx { 200 + struct rk_crypto_info *dev; 201 + /* for fallback */ 202 + struct crypto_ahash *fallback_tfm; 203 + }; 204 + 205 + /* the privete variable of hash for fallback */ 206 + struct rk_ahash_rctx { 207 + struct ahash_request fallback_req; 208 + }; 209 + 225 210 /* the private variable of cipher */ 226 211 struct rk_cipher_ctx { 227 212 struct rk_crypto_info *dev; 228 213 unsigned int keylen; 229 214 }; 230 215 216 + enum alg_type { 217 + ALG_TYPE_HASH, 218 + ALG_TYPE_CIPHER, 219 + }; 220 + 231 221 struct rk_crypto_tmp { 232 - struct rk_crypto_info *dev; 233 - struct crypto_alg alg; 222 + struct rk_crypto_info *dev; 223 + union { 224 + struct crypto_alg crypto; 225 + struct ahash_alg hash; 226 + } alg; 227 + enum alg_type type; 234 228 }; 235 229 236 230 extern struct rk_crypto_tmp rk_ecb_aes_alg; ··· 260 212 extern struct rk_crypto_tmp rk_cbc_des_alg; 261 213 extern struct rk_crypto_tmp rk_ecb_des3_ede_alg; 262 214 extern struct rk_crypto_tmp rk_cbc_des3_ede_alg; 215 + 216 + extern struct rk_crypto_tmp rk_ahash_sha1; 217 + extern struct rk_crypto_tmp rk_ahash_sha256; 218 + extern struct rk_crypto_tmp rk_ahash_md5; 263 219 264 220 #endif
+13 -7
drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
··· 336 336 struct crypto_alg *alg = tfm->__crt_alg; 337 337 struct rk_crypto_tmp *algt; 338 338 339 - algt = container_of(alg, struct rk_crypto_tmp, alg); 339 + algt = container_of(alg, struct rk_crypto_tmp, alg.crypto); 340 340 341 341 ctx->dev = algt->dev; 342 342 ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1; ··· 357 357 } 358 358 359 359 struct rk_crypto_tmp rk_ecb_aes_alg = { 360 - .alg = { 360 + .type = ALG_TYPE_CIPHER, 361 + .alg.crypto = { 361 362 .cra_name = "ecb(aes)", 362 363 .cra_driver_name = "ecb-aes-rk", 363 364 .cra_priority = 300, ··· 382 381 }; 383 382 384 383 struct rk_crypto_tmp rk_cbc_aes_alg = { 385 - .alg = { 384 + .type = ALG_TYPE_CIPHER, 385 + .alg.crypto = { 386 386 .cra_name = "cbc(aes)", 387 387 .cra_driver_name = "cbc-aes-rk", 388 388 .cra_priority = 300, ··· 408 406 }; 409 407 410 408 struct rk_crypto_tmp rk_ecb_des_alg = { 411 - .alg = { 409 + .type = ALG_TYPE_CIPHER, 410 + .alg.crypto = { 412 411 .cra_name = "ecb(des)", 413 412 .cra_driver_name = "ecb-des-rk", 414 413 .cra_priority = 300, ··· 433 430 }; 434 431 435 432 struct rk_crypto_tmp rk_cbc_des_alg = { 436 - .alg = { 433 + .type = ALG_TYPE_CIPHER, 434 + .alg.crypto = { 437 435 .cra_name = "cbc(des)", 438 436 .cra_driver_name = "cbc-des-rk", 439 437 .cra_priority = 300, ··· 459 455 }; 460 456 461 457 struct rk_crypto_tmp rk_ecb_des3_ede_alg = { 462 - .alg = { 458 + .type = ALG_TYPE_CIPHER, 459 + .alg.crypto = { 463 460 .cra_name = "ecb(des3_ede)", 464 461 .cra_driver_name = "ecb-des3-ede-rk", 465 462 .cra_priority = 300, ··· 485 480 }; 486 481 487 482 struct rk_crypto_tmp rk_cbc_des3_ede_alg = { 488 - .alg = { 483 + .type = ALG_TYPE_CIPHER, 484 + .alg.crypto = { 489 485 .cra_name = "cbc(des3_ede)", 490 486 .cra_driver_name = "cbc-des3-ede-rk", 491 487 .cra_priority = 300,
+404
drivers/crypto/rockchip/rk3288_crypto_ahash.c
··· 1 + /* 2 + * Crypto acceleration support for Rockchip RK3288 3 + * 4 + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd 5 + * 6 + * Author: Zain Wang <zain.wang@rock-chips.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * Some ideas are from marvell/cesa.c and s5p-sss.c driver. 13 + */ 14 + #include "rk3288_crypto.h" 15 + 16 + /* 17 + * IC can not process zero message hash, 18 + * so we put the fixed hash out when met zero message. 19 + */ 20 + 21 + static int zero_message_process(struct ahash_request *req) 22 + { 23 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 24 + int rk_digest_size = crypto_ahash_digestsize(tfm); 25 + 26 + switch (rk_digest_size) { 27 + case SHA1_DIGEST_SIZE: 28 + memcpy(req->result, sha1_zero_message_hash, rk_digest_size); 29 + break; 30 + case SHA256_DIGEST_SIZE: 31 + memcpy(req->result, sha256_zero_message_hash, rk_digest_size); 32 + break; 33 + case MD5_DIGEST_SIZE: 34 + memcpy(req->result, md5_zero_message_hash, rk_digest_size); 35 + break; 36 + default: 37 + return -EINVAL; 38 + } 39 + 40 + return 0; 41 + } 42 + 43 + static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err) 44 + { 45 + if (dev->ahash_req->base.complete) 46 + dev->ahash_req->base.complete(&dev->ahash_req->base, err); 47 + } 48 + 49 + static void rk_ahash_reg_init(struct rk_crypto_info *dev) 50 + { 51 + int reg_status = 0; 52 + 53 + reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | 54 + RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16); 55 + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status); 56 + 57 + reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL); 58 + reg_status &= (~RK_CRYPTO_HASH_FLUSH); 59 + reg_status |= _SBF(0xffff, 16); 60 + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status); 61 + 62 + memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32); 63 + 64 + CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA | 65 + RK_CRYPTO_HRDMA_DONE_ENA); 66 + 67 + CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT | 68 + RK_CRYPTO_HRDMA_DONE_INT); 69 + 70 + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode | 71 + RK_CRYPTO_HASH_SWAP_DO); 72 + 73 + CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO | 74 + RK_CRYPTO_BYTESWAP_BRFIFO | 75 + RK_CRYPTO_BYTESWAP_BTFIFO); 76 + 77 + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total); 78 + } 79 + 80 + static int rk_ahash_init(struct ahash_request *req) 81 + { 82 + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); 83 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 84 + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); 85 + 86 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 87 + rctx->fallback_req.base.flags = req->base.flags & 88 + CRYPTO_TFM_REQ_MAY_SLEEP; 89 + 90 + return crypto_ahash_init(&rctx->fallback_req); 91 + } 92 + 93 + static int rk_ahash_update(struct ahash_request *req) 94 + { 95 + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); 96 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 97 + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); 98 + 99 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 100 + rctx->fallback_req.base.flags = req->base.flags & 101 + CRYPTO_TFM_REQ_MAY_SLEEP; 102 + rctx->fallback_req.nbytes = req->nbytes; 103 + rctx->fallback_req.src = req->src; 104 + 105 + return crypto_ahash_update(&rctx->fallback_req); 106 + } 107 + 108 + static int rk_ahash_final(struct ahash_request *req) 109 + { 110 + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); 111 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 112 + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); 113 + 114 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 115 + rctx->fallback_req.base.flags = req->base.flags & 116 + CRYPTO_TFM_REQ_MAY_SLEEP; 117 + rctx->fallback_req.result = req->result; 118 + 119 + return crypto_ahash_final(&rctx->fallback_req); 120 + } 121 + 122 + static int rk_ahash_finup(struct ahash_request *req) 123 + { 124 + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); 125 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 126 + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); 127 + 128 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 129 + rctx->fallback_req.base.flags = req->base.flags & 130 + CRYPTO_TFM_REQ_MAY_SLEEP; 131 + 132 + rctx->fallback_req.nbytes = req->nbytes; 133 + rctx->fallback_req.src = req->src; 134 + rctx->fallback_req.result = req->result; 135 + 136 + return crypto_ahash_finup(&rctx->fallback_req); 137 + } 138 + 139 + static int rk_ahash_import(struct ahash_request *req, const void *in) 140 + { 141 + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); 142 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 143 + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); 144 + 145 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 146 + rctx->fallback_req.base.flags = req->base.flags & 147 + CRYPTO_TFM_REQ_MAY_SLEEP; 148 + 149 + return crypto_ahash_import(&rctx->fallback_req, in); 150 + } 151 + 152 + static int rk_ahash_export(struct ahash_request *req, void *out) 153 + { 154 + struct rk_ahash_rctx *rctx = ahash_request_ctx(req); 155 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 156 + struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); 157 + 158 + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 159 + rctx->fallback_req.base.flags = req->base.flags & 160 + CRYPTO_TFM_REQ_MAY_SLEEP; 161 + 162 + return crypto_ahash_export(&rctx->fallback_req, out); 163 + } 164 + 165 + static int rk_ahash_digest(struct ahash_request *req) 166 + { 167 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 168 + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 169 + struct rk_crypto_info *dev = NULL; 170 + unsigned long flags; 171 + int ret; 172 + 173 + if (!req->nbytes) 174 + return zero_message_process(req); 175 + 176 + dev = tctx->dev; 177 + dev->total = req->nbytes; 178 + dev->left_bytes = req->nbytes; 179 + dev->aligned = 0; 180 + dev->mode = 0; 181 + dev->align_size = 4; 182 + dev->sg_dst = NULL; 183 + dev->sg_src = req->src; 184 + dev->first = req->src; 185 + dev->nents = sg_nents(req->src); 186 + 187 + switch (crypto_ahash_digestsize(tfm)) { 188 + case SHA1_DIGEST_SIZE: 189 + dev->mode = RK_CRYPTO_HASH_SHA1; 190 + break; 191 + case SHA256_DIGEST_SIZE: 192 + dev->mode = RK_CRYPTO_HASH_SHA256; 193 + break; 194 + case MD5_DIGEST_SIZE: 195 + dev->mode = RK_CRYPTO_HASH_MD5; 196 + break; 197 + default: 198 + return -EINVAL; 199 + } 200 + 201 + rk_ahash_reg_init(dev); 202 + 203 + spin_lock_irqsave(&dev->lock, flags); 204 + ret = crypto_enqueue_request(&dev->queue, &req->base); 205 + spin_unlock_irqrestore(&dev->lock, flags); 206 + 207 + tasklet_schedule(&dev->crypto_tasklet); 208 + 209 + /* 210 + * it will take some time to process date after last dma transmission. 211 + * 212 + * waiting time is relative with the last date len, 213 + * so cannot set a fixed time here. 214 + * 10-50 makes system not call here frequently wasting 215 + * efficiency, and make it response quickly when dma 216 + * complete. 217 + */ 218 + while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) 219 + usleep_range(10, 50); 220 + 221 + memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, 222 + crypto_ahash_digestsize(tfm)); 223 + 224 + return 0; 225 + } 226 + 227 + static void crypto_ahash_dma_start(struct rk_crypto_info *dev) 228 + { 229 + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in); 230 + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4); 231 + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | 232 + (RK_CRYPTO_HASH_START << 16)); 233 + } 234 + 235 + static int rk_ahash_set_data_start(struct rk_crypto_info *dev) 236 + { 237 + int err; 238 + 239 + err = dev->load_data(dev, dev->sg_src, NULL); 240 + if (!err) 241 + crypto_ahash_dma_start(dev); 242 + return err; 243 + } 244 + 245 + static int rk_ahash_start(struct rk_crypto_info *dev) 246 + { 247 + return rk_ahash_set_data_start(dev); 248 + } 249 + 250 + static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) 251 + { 252 + int err = 0; 253 + 254 + dev->unload_data(dev); 255 + if (dev->left_bytes) { 256 + if (dev->aligned) { 257 + if (sg_is_last(dev->sg_src)) { 258 + dev_warn(dev->dev, "[%s:%d], Lack of data\n", 259 + __func__, __LINE__); 260 + err = -ENOMEM; 261 + goto out_rx; 262 + } 263 + dev->sg_src = sg_next(dev->sg_src); 264 + } 265 + err = rk_ahash_set_data_start(dev); 266 + } else { 267 + dev->complete(dev, 0); 268 + } 269 + 270 + out_rx: 271 + return err; 272 + } 273 + 274 + static int rk_cra_hash_init(struct crypto_tfm *tfm) 275 + { 276 + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); 277 + struct rk_crypto_tmp *algt; 278 + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); 279 + 280 + const char *alg_name = crypto_tfm_alg_name(tfm); 281 + 282 + algt = container_of(alg, struct rk_crypto_tmp, alg.hash); 283 + 284 + tctx->dev = algt->dev; 285 + tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL); 286 + if (!tctx->dev->addr_vir) { 287 + dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n"); 288 + return -ENOMEM; 289 + } 290 + tctx->dev->start = rk_ahash_start; 291 + tctx->dev->update = rk_ahash_crypto_rx; 292 + tctx->dev->complete = rk_ahash_crypto_complete; 293 + 294 + /* for fallback */ 295 + tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, 296 + CRYPTO_ALG_NEED_FALLBACK); 297 + if (IS_ERR(tctx->fallback_tfm)) { 298 + dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); 299 + return PTR_ERR(tctx->fallback_tfm); 300 + } 301 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 302 + sizeof(struct rk_ahash_rctx) + 303 + crypto_ahash_reqsize(tctx->fallback_tfm)); 304 + 305 + return tctx->dev->enable_clk(tctx->dev); 306 + } 307 + 308 + static void rk_cra_hash_exit(struct crypto_tfm *tfm) 309 + { 310 + struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); 311 + 312 + free_page((unsigned long)tctx->dev->addr_vir); 313 + return tctx->dev->disable_clk(tctx->dev); 314 + } 315 + 316 + struct rk_crypto_tmp rk_ahash_sha1 = { 317 + .type = ALG_TYPE_HASH, 318 + .alg.hash = { 319 + .init = rk_ahash_init, 320 + .update = rk_ahash_update, 321 + .final = rk_ahash_final, 322 + .finup = rk_ahash_finup, 323 + .export = rk_ahash_export, 324 + .import = rk_ahash_import, 325 + .digest = rk_ahash_digest, 326 + .halg = { 327 + .digestsize = SHA1_DIGEST_SIZE, 328 + .statesize = sizeof(struct sha1_state), 329 + .base = { 330 + .cra_name = "sha1", 331 + .cra_driver_name = "rk-sha1", 332 + .cra_priority = 300, 333 + .cra_flags = CRYPTO_ALG_ASYNC | 334 + CRYPTO_ALG_NEED_FALLBACK, 335 + .cra_blocksize = SHA1_BLOCK_SIZE, 336 + .cra_ctxsize = sizeof(struct rk_ahash_ctx), 337 + .cra_alignmask = 3, 338 + .cra_init = rk_cra_hash_init, 339 + .cra_exit = rk_cra_hash_exit, 340 + .cra_module = THIS_MODULE, 341 + } 342 + } 343 + } 344 + }; 345 + 346 + struct rk_crypto_tmp rk_ahash_sha256 = { 347 + .type = ALG_TYPE_HASH, 348 + .alg.hash = { 349 + .init = rk_ahash_init, 350 + .update = rk_ahash_update, 351 + .final = rk_ahash_final, 352 + .finup = rk_ahash_finup, 353 + .export = rk_ahash_export, 354 + .import = rk_ahash_import, 355 + .digest = rk_ahash_digest, 356 + .halg = { 357 + .digestsize = SHA256_DIGEST_SIZE, 358 + .statesize = sizeof(struct sha256_state), 359 + .base = { 360 + .cra_name = "sha256", 361 + .cra_driver_name = "rk-sha256", 362 + .cra_priority = 300, 363 + .cra_flags = CRYPTO_ALG_ASYNC | 364 + CRYPTO_ALG_NEED_FALLBACK, 365 + .cra_blocksize = SHA256_BLOCK_SIZE, 366 + .cra_ctxsize = sizeof(struct rk_ahash_ctx), 367 + .cra_alignmask = 3, 368 + .cra_init = rk_cra_hash_init, 369 + .cra_exit = rk_cra_hash_exit, 370 + .cra_module = THIS_MODULE, 371 + } 372 + } 373 + } 374 + }; 375 + 376 + struct rk_crypto_tmp rk_ahash_md5 = { 377 + .type = ALG_TYPE_HASH, 378 + .alg.hash = { 379 + .init = rk_ahash_init, 380 + .update = rk_ahash_update, 381 + .final = rk_ahash_final, 382 + .finup = rk_ahash_finup, 383 + .export = rk_ahash_export, 384 + .import = rk_ahash_import, 385 + .digest = rk_ahash_digest, 386 + .halg = { 387 + .digestsize = MD5_DIGEST_SIZE, 388 + .statesize = sizeof(struct md5_state), 389 + .base = { 390 + .cra_name = "md5", 391 + .cra_driver_name = "rk-md5", 392 + .cra_priority = 300, 393 + .cra_flags = CRYPTO_ALG_ASYNC | 394 + CRYPTO_ALG_NEED_FALLBACK, 395 + .cra_blocksize = SHA1_BLOCK_SIZE, 396 + .cra_ctxsize = sizeof(struct rk_ahash_ctx), 397 + .cra_alignmask = 3, 398 + .cra_init = rk_cra_hash_init, 399 + .cra_exit = rk_cra_hash_exit, 400 + .cra_module = THIS_MODULE, 401 + } 402 + } 403 + } 404 + };