Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: rockchip - Add support for RK3399

The RK3399 has 2 rk3288 compatible crypto device named crypto0 and
crypto1. The only difference is lack of RSA in crypto1.

We need to add driver support for 2 parallel instance as only one need
to register crypto algorithms.
Then the driver will round robin each request on each device.

For avoiding complexity (device bringup after a TFM is created), PM is
modified to be handled per request.
Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Corentin Labbe and committed by
Herbert Xu
9dcd71c8 c5a1e104

+124 -69
+73 -19
drivers/crypto/rockchip/rk3288_crypto.c
··· 19 19 #include <linux/crypto.h> 20 20 #include <linux/reset.h> 21 21 22 + static struct rockchip_ip rocklist = { 23 + .dev_list = LIST_HEAD_INIT(rocklist.dev_list), 24 + .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock), 25 + }; 26 + 27 + struct rk_crypto_info *get_rk_crypto(void) 28 + { 29 + struct rk_crypto_info *first; 30 + 31 + spin_lock(&rocklist.lock); 32 + first = list_first_entry_or_null(&rocklist.dev_list, 33 + struct rk_crypto_info, list); 34 + list_rotate_left(&rocklist.dev_list); 35 + spin_unlock(&rocklist.lock); 36 + return first; 37 + } 38 + 22 39 static const struct rk_variant rk3288_variant = { 23 40 .num_clks = 4, 24 41 .rkclks = { ··· 44 27 }; 45 28 46 29 static const struct rk_variant rk3328_variant = { 30 + .num_clks = 3, 31 + }; 32 + 33 + static const struct rk_variant rk3399_variant = { 47 34 .num_clks = 3, 48 35 }; 49 36 ··· 104 83 } 105 84 106 85 /* 107 - * Power management strategy: The device is suspended unless a TFM exists for 108 - * one of the algorithms proposed by this driver. 86 + * Power management strategy: The device is suspended until a request 87 + * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s. 109 88 */ 110 89 static int rk_crypto_pm_suspend(struct device *dev) 111 90 { ··· 187 166 #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG 188 167 static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) 189 168 { 169 + struct rk_crypto_info *dd; 190 170 unsigned int i; 171 + 172 + spin_lock(&rocklist.lock); 173 + list_for_each_entry(dd, &rocklist.dev_list, list) { 174 + seq_printf(seq, "%s %s requests: %lu\n", 175 + dev_driver_string(dd->dev), dev_name(dd->dev), 176 + dd->nreq); 177 + } 178 + spin_unlock(&rocklist.lock); 191 179 192 180 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { 193 181 if (!rk_cipher_algs[i]->dev) ··· 227 197 228 198 DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs); 229 199 #endif 200 + 201 + static void register_debugfs(struct rk_crypto_info *crypto_info) 202 + { 203 + #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG 204 + /* Ignore error of debugfs */ 205 + rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL); 206 + rocklist.dbgfs_stats = debugfs_create_file("stats", 0444, 207 + rocklist.dbgfs_dir, 208 + &rocklist, 209 + &rk_crypto_debugfs_fops); 210 + #endif 211 + } 230 212 231 213 static int rk_crypto_register(struct rk_crypto_info *crypto_info) 232 214 { ··· 297 255 { .compatible = "rockchip,rk3328-crypto", 298 256 .data = &rk3328_variant, 299 257 }, 258 + { .compatible = "rockchip,rk3399-crypto", 259 + .data = &rk3399_variant, 260 + }, 300 261 {} 301 262 }; 302 263 MODULE_DEVICE_TABLE(of, crypto_of_id_table); ··· 307 262 static int rk_crypto_probe(struct platform_device *pdev) 308 263 { 309 264 struct device *dev = &pdev->dev; 310 - struct rk_crypto_info *crypto_info; 265 + struct rk_crypto_info *crypto_info, *first; 311 266 int err = 0; 312 267 313 268 crypto_info = devm_kzalloc(&pdev->dev, ··· 370 325 if (err) 371 326 goto err_pm; 372 327 373 - err = rk_crypto_register(crypto_info); 374 - if (err) { 375 - dev_err(dev, "err in register alg"); 376 - goto err_register_alg; 328 + spin_lock(&rocklist.lock); 329 + first = list_first_entry_or_null(&rocklist.dev_list, 330 + struct rk_crypto_info, list); 331 + list_add_tail(&crypto_info->list, &rocklist.dev_list); 332 + spin_unlock(&rocklist.lock); 333 + 334 + if (!first) { 335 + err = rk_crypto_register(crypto_info); 336 + if (err) { 337 + dev_err(dev, "Fail to register crypto algorithms"); 338 + goto err_register_alg; 339 + } 340 + 341 + register_debugfs(crypto_info); 377 342 } 378 343 379 - #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG 380 - /* Ignore error of debugfs */ 381 - crypto_info->dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL); 382 - crypto_info->dbgfs_stats = debugfs_create_file("stats", 0444, 383 - crypto_info->dbgfs_dir, 384 - crypto_info, 385 - &rk_crypto_debugfs_fops); 386 - #endif 387 - 388 - dev_info(dev, "Crypto Accelerator successfully registered\n"); 389 344 return 0; 390 345 391 346 err_register_alg: ··· 400 355 static int rk_crypto_remove(struct platform_device *pdev) 401 356 { 402 357 struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); 358 + struct rk_crypto_info *first; 403 359 360 + spin_lock_bh(&rocklist.lock); 361 + list_del(&crypto_tmp->list); 362 + first = list_first_entry_or_null(&rocklist.dev_list, 363 + struct rk_crypto_info, list); 364 + spin_unlock_bh(&rocklist.lock); 365 + 366 + if (!first) { 404 367 #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG 405 - debugfs_remove_recursive(crypto_tmp->dbgfs_dir); 368 + debugfs_remove_recursive(rocklist.dbgfs_dir); 406 369 #endif 407 - rk_crypto_unregister(); 370 + rk_crypto_unregister(); 371 + } 408 372 rk_crypto_pm_exit(crypto_tmp); 409 373 crypto_engine_exit(crypto_tmp->engine); 410 374 return 0;
+18 -7
drivers/crypto/rockchip/rk3288_crypto.h
··· 190 190 191 191 #define RK_MAX_CLKS 4 192 192 193 + /* 194 + * struct rockchip_ip - struct for managing a list of RK crypto instance 195 + * @dev_list: Used for doing a list of rk_crypto_info 196 + * @lock: Control access to dev_list 197 + * @dbgfs_dir: Debugfs dentry for statistic directory 198 + * @dbgfs_stats: Debugfs dentry for statistic counters 199 + */ 200 + struct rockchip_ip { 201 + struct list_head dev_list; 202 + spinlock_t lock; /* Control access to dev_list */ 203 + struct dentry *dbgfs_dir; 204 + struct dentry *dbgfs_stats; 205 + }; 206 + 193 207 struct rk_clks { 194 208 const char *name; 195 209 unsigned long max; ··· 215 201 }; 216 202 217 203 struct rk_crypto_info { 204 + struct list_head list; 218 205 struct device *dev; 219 206 struct clk_bulk_data *clks; 220 207 int num_clks; ··· 223 208 void __iomem *reg; 224 209 int irq; 225 210 const struct rk_variant *variant; 211 + unsigned long nreq; 226 212 struct crypto_engine *engine; 227 213 struct completion complete; 228 214 int status; 229 - #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG 230 - struct dentry *dbgfs_dir; 231 - struct dentry *dbgfs_stats; 232 - #endif 233 215 }; 234 216 235 217 /* the private variable of hash */ 236 218 struct rk_ahash_ctx { 237 219 struct crypto_engine_ctx enginectx; 238 - struct rk_crypto_info *dev; 239 220 /* for fallback */ 240 221 struct crypto_ahash *fallback_tfm; 241 222 }; ··· 247 236 /* the private variable of cipher */ 248 237 struct rk_cipher_ctx { 249 238 struct crypto_engine_ctx enginectx; 250 - struct rk_crypto_info *dev; 251 239 unsigned int keylen; 252 240 u8 key[AES_MAX_KEY_SIZE]; 253 241 u8 iv[AES_BLOCK_SIZE]; ··· 262 252 263 253 struct rk_crypto_tmp { 264 254 u32 type; 265 - struct rk_crypto_info *dev; 255 + struct rk_crypto_info *dev; 266 256 union { 267 257 struct skcipher_alg skcipher; 268 258 struct ahash_alg hash; ··· 286 276 extern struct rk_crypto_tmp rk_ahash_sha256; 287 277 extern struct rk_crypto_tmp rk_ahash_md5; 288 278 279 + struct rk_crypto_info *get_rk_crypto(void); 289 280 #endif
+17 -22
drivers/crypto/rockchip/rk3288_crypto_ahash.c
··· 199 199 static int rk_ahash_digest(struct ahash_request *req) 200 200 { 201 201 struct rk_ahash_rctx *rctx = ahash_request_ctx(req); 202 - struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 203 - struct rk_crypto_info *dev = tctx->dev; 202 + struct rk_crypto_info *dev; 203 + struct crypto_engine *engine; 204 204 205 205 if (rk_ahash_need_fallback(req)) 206 206 return rk_ahash_digest_fb(req); ··· 208 208 if (!req->nbytes) 209 209 return zero_message_process(req); 210 210 211 - rctx->dev = dev; 211 + dev = get_rk_crypto(); 212 212 213 - return crypto_transfer_hash_request_to_engine(dev->engine, req); 213 + rctx->dev = dev; 214 + engine = dev->engine; 215 + 216 + return crypto_transfer_hash_request_to_engine(engine, req); 214 217 } 215 218 216 219 static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) ··· 263 260 int i; 264 261 u32 v; 265 262 263 + err = pm_runtime_resume_and_get(rkc->dev); 264 + if (err) 265 + return err; 266 + 266 267 rctx->mode = 0; 267 268 268 269 algt->stat_req++; 270 + rkc->nreq++; 269 271 270 272 switch (crypto_ahash_digestsize(tfm)) { 271 273 case SHA1_DIGEST_SIZE: ··· 321 313 } 322 314 323 315 theend: 316 + pm_runtime_put_autosuspend(rkc->dev); 317 + 324 318 local_bh_disable(); 325 319 crypto_finalize_hash_request(engine, breq, err); 326 320 local_bh_enable(); ··· 333 323 static int rk_cra_hash_init(struct crypto_tfm *tfm) 334 324 { 335 325 struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); 336 - struct rk_crypto_tmp *algt; 337 - struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); 338 - 339 326 const char *alg_name = crypto_tfm_alg_name(tfm); 340 - int err; 341 - 342 - algt = container_of(alg, struct rk_crypto_tmp, alg.hash); 343 - 344 - tctx->dev = algt->dev; 327 + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); 328 + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); 345 329 346 330 /* for fallback */ 347 331 tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, 348 332 CRYPTO_ALG_NEED_FALLBACK); 349 333 if (IS_ERR(tctx->fallback_tfm)) { 350 - dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); 334 + dev_err(algt->dev->dev, "Could not load fallback driver.\n"); 351 335 return PTR_ERR(tctx->fallback_tfm); 352 336 } 353 337 ··· 353 349 tctx->enginectx.op.prepare_request = rk_hash_prepare; 354 350 tctx->enginectx.op.unprepare_request = rk_hash_unprepare; 355 351 356 - err = pm_runtime_resume_and_get(tctx->dev->dev); 357 - if (err < 0) 358 - goto error_pm; 359 - 360 352 return 0; 361 - error_pm: 362 - crypto_free_ahash(tctx->fallback_tfm); 363 - 364 - return err; 365 353 } 366 354 367 355 static void rk_cra_hash_exit(struct crypto_tfm *tfm) ··· 361 365 struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); 362 366 363 367 crypto_free_ahash(tctx->fallback_tfm); 364 - pm_runtime_put_autosuspend(tctx->dev->dev); 365 368 } 366 369 367 370 struct rk_crypto_tmp rk_ahash_sha1 = {
+16 -21
drivers/crypto/rockchip/rk3288_crypto_skcipher.c
··· 17 17 static int rk_cipher_need_fallback(struct skcipher_request *req) 18 18 { 19 19 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 20 - unsigned int bs = crypto_skcipher_blocksize(tfm); 21 20 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 22 21 struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); 23 22 struct scatterlist *sgs, *sgd; 24 23 unsigned int stodo, dtodo, len; 24 + unsigned int bs = crypto_skcipher_blocksize(tfm); 25 25 26 26 if (!req->cryptlen) 27 27 return true; ··· 84 84 85 85 static int rk_cipher_handle_req(struct skcipher_request *req) 86 86 { 87 - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 88 - struct rk_cipher_ctx *tctx = crypto_skcipher_ctx(tfm); 89 87 struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); 90 - struct rk_crypto_info *rkc = tctx->dev; 91 - struct crypto_engine *engine = rkc->engine; 88 + struct rk_crypto_info *rkc; 89 + struct crypto_engine *engine; 92 90 93 91 if (rk_cipher_need_fallback(req)) 94 92 return rk_cipher_fallback(req); 95 93 94 + rkc = get_rk_crypto(); 95 + 96 + engine = rkc->engine; 96 97 rctx->dev = rkc; 97 98 98 99 return crypto_transfer_skcipher_request_to_engine(engine, req); ··· 308 307 struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); 309 308 struct rk_crypto_info *rkc = rctx->dev; 310 309 310 + err = pm_runtime_resume_and_get(rkc->dev); 311 + if (err) 312 + return err; 313 + 311 314 algt->stat_req++; 315 + rkc->nreq++; 312 316 313 317 ivsize = crypto_skcipher_ivsize(tfm); 314 318 if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { ··· 407 401 } 408 402 409 403 theend: 404 + pm_runtime_put_autosuspend(rkc->dev); 405 + 410 406 local_bh_disable(); 411 407 crypto_finalize_skcipher_request(engine, areq, err); 412 408 local_bh_enable(); ··· 428 420 static int rk_cipher_tfm_init(struct crypto_skcipher *tfm) 429 421 { 430 422 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); 431 - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 432 423 const char *name = crypto_tfm_alg_name(&tfm->base); 433 - struct rk_crypto_tmp *algt; 434 - int err; 435 - 436 - algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); 437 - 438 - ctx->dev = algt->dev; 424 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 425 + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); 439 426 440 427 ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 441 428 if (IS_ERR(ctx->fallback_tfm)) { 442 - dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", 429 + dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", 443 430 name, PTR_ERR(ctx->fallback_tfm)); 444 431 return PTR_ERR(ctx->fallback_tfm); 445 432 } ··· 444 441 445 442 ctx->enginectx.op.do_one_request = rk_cipher_run; 446 443 447 - err = pm_runtime_resume_and_get(ctx->dev->dev); 448 - if (err < 0) 449 - goto error_pm; 450 - 451 444 return 0; 452 - error_pm: 453 - crypto_free_skcipher(ctx->fallback_tfm); 454 - return err; 455 445 } 456 446 457 447 static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm) ··· 453 457 454 458 memzero_explicit(ctx->key, ctx->keylen); 455 459 crypto_free_skcipher(ctx->fallback_tfm); 456 - pm_runtime_put_autosuspend(ctx->dev->dev); 457 460 } 458 461 459 462 struct rk_crypto_tmp rk_ecb_aes_alg = {