Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: caam - fix return code in completion callbacks

Modify drive to provide a valid errno (and not the HW error ID)
to the user, via completion callbacks.

A "valid errno" is currently not explicitly mentioned in the docs,
however the error code is expected to match the one returned by the
generic SW implementation.

Note: in most error cases caam/qi and caam/qi2 returned -EIO; align all
caam drivers to return -EINVAL.

While here, ratelimit prints triggered by fuzz testing, such that
console is not flooded.

Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Horia Geantă and committed by
Herbert Xu
1984aaee 51fab3d7

+104 -117
+12 -14
drivers/crypto/caam/caamalg.c
··· 930 930 { 931 931 struct aead_request *req = context; 932 932 struct aead_edesc *edesc; 933 + int ecode = 0; 933 934 934 935 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 935 936 936 937 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 937 938 938 939 if (err) 939 - caam_jr_strstatus(jrdev, err); 940 + ecode = caam_jr_strstatus(jrdev, err); 940 941 941 942 aead_unmap(jrdev, edesc, req); 942 943 943 944 kfree(edesc); 944 945 945 - aead_request_complete(req, err); 946 + aead_request_complete(req, ecode); 946 947 } 947 948 948 949 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ··· 951 950 { 952 951 struct aead_request *req = context; 953 952 struct aead_edesc *edesc; 953 + int ecode = 0; 954 954 955 955 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 956 956 957 957 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 958 958 959 959 if (err) 960 - caam_jr_strstatus(jrdev, err); 960 + ecode = caam_jr_strstatus(jrdev, err); 961 961 962 962 aead_unmap(jrdev, edesc, req); 963 963 964 - /* 965 - * verify hw auth check passed else return -EBADMSG 966 - */ 967 - if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) 968 - err = -EBADMSG; 969 - 970 964 kfree(edesc); 971 965 972 - aead_request_complete(req, err); 966 + aead_request_complete(req, ecode); 973 967 } 974 968 975 969 static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, ··· 974 978 struct skcipher_edesc *edesc; 975 979 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 976 980 int ivsize = crypto_skcipher_ivsize(skcipher); 981 + int ecode = 0; 977 982 978 983 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 979 984 980 985 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); 981 986 982 987 if (err) 983 - caam_jr_strstatus(jrdev, err); 988 + ecode = caam_jr_strstatus(jrdev, err); 984 989 985 990 skcipher_unmap(jrdev, edesc, req); 986 991 ··· 1005 1008 1006 1009 kfree(edesc); 1007 1010 1008 - skcipher_request_complete(req, err); 1011 + skcipher_request_complete(req, ecode); 1009 1012 } 1010 1013 1011 1014 static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ··· 1015 1018 struct skcipher_edesc *edesc; 1016 1019 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1017 1020 int ivsize = crypto_skcipher_ivsize(skcipher); 1021 + int ecode = 0; 1018 1022 1019 1023 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1020 1024 1021 1025 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); 1022 1026 if (err) 1023 - caam_jr_strstatus(jrdev, err); 1027 + ecode = caam_jr_strstatus(jrdev, err); 1024 1028 1025 1029 skcipher_unmap(jrdev, edesc, req); 1026 1030 ··· 1045 1047 1046 1048 kfree(edesc); 1047 1049 1048 - skcipher_request_complete(req, err); 1050 + skcipher_request_complete(req, ecode); 1049 1051 } 1050 1052 1051 1053 /*
+5 -16
drivers/crypto/caam/caamalg_qi.c
··· 884 884 885 885 qidev = caam_ctx->qidev; 886 886 887 - if (unlikely(status)) { 888 - u32 ssrc = status & JRSTA_SSRC_MASK; 889 - u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; 890 - 891 - caam_jr_strstatus(qidev, status); 892 - /* 893 - * verify hw auth check passed else return -EBADMSG 894 - */ 895 - if (ssrc == JRSTA_SSRC_CCB_ERROR && 896 - err_id == JRSTA_CCBERR_ERRID_ICVCHK) 897 - ecode = -EBADMSG; 898 - else 899 - ecode = -EIO; 900 - } 887 + if (unlikely(status)) 888 + ecode = caam_jr_strstatus(qidev, status); 901 889 902 890 edesc = container_of(drv_req, typeof(*edesc), drv_req); 903 891 aead_unmap(qidev, edesc, aead_req); ··· 1178 1190 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); 1179 1191 struct device *qidev = caam_ctx->qidev; 1180 1192 int ivsize = crypto_skcipher_ivsize(skcipher); 1193 + int ecode = 0; 1181 1194 1182 1195 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1183 1196 1184 1197 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1185 1198 1186 1199 if (status) 1187 - caam_jr_strstatus(qidev, status); 1200 + ecode = caam_jr_strstatus(qidev, status); 1188 1201 1189 1202 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1190 1203 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ··· 1204 1215 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); 1205 1216 1206 1217 qi_cache_free(edesc); 1207 - skcipher_request_complete(req, status); 1218 + skcipher_request_complete(req, ecode); 1208 1219 } 1209 1220 1210 1221 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+18 -44
drivers/crypto/caam/caamalg_qi2.c
··· 1228 1228 1229 1229 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 1230 1230 1231 - if (unlikely(status)) { 1232 - caam_qi2_strstatus(ctx->dev, status); 1233 - ecode = -EIO; 1234 - } 1231 + if (unlikely(status)) 1232 + ecode = caam_qi2_strstatus(ctx->dev, status); 1235 1233 1236 1234 aead_unmap(ctx->dev, edesc, req); 1237 1235 qi_cache_free(edesc); ··· 1249 1251 1250 1252 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 1251 1253 1252 - if (unlikely(status)) { 1253 - caam_qi2_strstatus(ctx->dev, status); 1254 - /* 1255 - * verify hw auth check passed else return -EBADMSG 1256 - */ 1257 - if ((status & JRSTA_CCBERR_ERRID_MASK) == 1258 - JRSTA_CCBERR_ERRID_ICVCHK) 1259 - ecode = -EBADMSG; 1260 - else 1261 - ecode = -EIO; 1262 - } 1254 + if (unlikely(status)) 1255 + ecode = caam_qi2_strstatus(ctx->dev, status); 1263 1256 1264 1257 aead_unmap(ctx->dev, edesc, req); 1265 1258 qi_cache_free(edesc); ··· 1342 1353 1343 1354 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 1344 1355 1345 - if (unlikely(status)) { 1346 - caam_qi2_strstatus(ctx->dev, status); 1347 - ecode = -EIO; 1348 - } 1356 + if (unlikely(status)) 1357 + ecode = caam_qi2_strstatus(ctx->dev, status); 1349 1358 1350 1359 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1351 1360 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ··· 1378 1391 1379 1392 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 1380 1393 1381 - if (unlikely(status)) { 1382 - caam_qi2_strstatus(ctx->dev, status); 1383 - ecode = -EIO; 1384 - } 1394 + if (unlikely(status)) 1395 + ecode = caam_qi2_strstatus(ctx->dev, status); 1385 1396 1386 1397 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1387 1398 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ··· 3080 3095 3081 3096 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 3082 3097 3083 - if (err) 3084 - caam_qi2_strstatus(res->dev, err); 3085 - 3086 - res->err = err; 3098 + res->err = err ? caam_qi2_strstatus(res->dev, err) : 0; 3087 3099 complete(&res->completion); 3088 3100 } 3089 3101 ··· 3265 3283 3266 3284 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 3267 3285 3268 - if (unlikely(status)) { 3269 - caam_qi2_strstatus(ctx->dev, status); 3270 - ecode = -EIO; 3271 - } 3286 + if (unlikely(status)) 3287 + ecode = caam_qi2_strstatus(ctx->dev, status); 3272 3288 3273 3289 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); 3274 3290 memcpy(req->result, state->caam_ctx, digestsize); ··· 3291 3311 3292 3312 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 3293 3313 3294 - if (unlikely(status)) { 3295 - caam_qi2_strstatus(ctx->dev, status); 3296 - ecode = -EIO; 3297 - } 3314 + if (unlikely(status)) 3315 + ecode = caam_qi2_strstatus(ctx->dev, status); 3298 3316 3299 3317 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); 3300 3318 switch_buf(state); ··· 3322 3344 3323 3345 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 3324 3346 3325 - if (unlikely(status)) { 3326 - caam_qi2_strstatus(ctx->dev, status); 3327 - ecode = -EIO; 3328 - } 3347 + if (unlikely(status)) 3348 + ecode = caam_qi2_strstatus(ctx->dev, status); 3329 3349 3330 3350 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); 3331 3351 memcpy(req->result, state->caam_ctx, digestsize); ··· 3348 3372 3349 3373 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 3350 3374 3351 - if (unlikely(status)) { 3352 - caam_qi2_strstatus(ctx->dev, status); 3353 - ecode = -EIO; 3354 - } 3375 + if (unlikely(status)) 3376 + ecode = caam_qi2_strstatus(ctx->dev, status); 3355 3377 3356 3378 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); 3357 3379 switch_buf(state); ··· 4675 4701 4676 4702 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; 4677 4703 if (unlikely(fd_err)) 4678 - dev_err(priv->dev, "FD error: %08x\n", fd_err); 4704 + dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err); 4679 4705 4680 4706 /* 4681 4707 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
+12 -8
drivers/crypto/caam/caamhash.c
··· 584 584 int digestsize = crypto_ahash_digestsize(ahash); 585 585 struct caam_hash_state *state = ahash_request_ctx(req); 586 586 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 587 + int ecode = 0; 587 588 588 589 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 589 590 590 591 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 591 592 if (err) 592 - caam_jr_strstatus(jrdev, err); 593 + ecode = caam_jr_strstatus(jrdev, err); 593 594 594 595 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 595 596 memcpy(req->result, state->caam_ctx, digestsize); ··· 600 599 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 601 600 ctx->ctx_len, 1); 602 601 603 - req->base.complete(&req->base, err); 602 + req->base.complete(&req->base, ecode); 604 603 } 605 604 606 605 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, ··· 612 611 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 613 612 struct caam_hash_state *state = ahash_request_ctx(req); 614 613 int digestsize = crypto_ahash_digestsize(ahash); 614 + int ecode = 0; 615 615 616 616 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 617 617 618 618 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 619 619 if (err) 620 - caam_jr_strstatus(jrdev, err); 620 + ecode = caam_jr_strstatus(jrdev, err); 621 621 622 622 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 623 623 switch_buf(state); ··· 632 630 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 633 631 digestsize, 1); 634 632 635 - req->base.complete(&req->base, err); 633 + req->base.complete(&req->base, ecode); 636 634 } 637 635 638 636 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, ··· 644 642 int digestsize = crypto_ahash_digestsize(ahash); 645 643 struct caam_hash_state *state = ahash_request_ctx(req); 646 644 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 645 + int ecode = 0; 647 646 648 647 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 649 648 650 649 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 651 650 if (err) 652 - caam_jr_strstatus(jrdev, err); 651 + ecode = caam_jr_strstatus(jrdev, err); 653 652 654 653 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 655 654 memcpy(req->result, state->caam_ctx, digestsize); ··· 660 657 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 661 658 ctx->ctx_len, 1); 662 659 663 - req->base.complete(&req->base, err); 660 + req->base.complete(&req->base, ecode); 664 661 } 665 662 666 663 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, ··· 672 669 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 673 670 struct caam_hash_state *state = ahash_request_ctx(req); 674 671 int digestsize = crypto_ahash_digestsize(ahash); 672 + int ecode = 0; 675 673 676 674 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 677 675 678 676 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 679 677 if (err) 680 - caam_jr_strstatus(jrdev, err); 678 + ecode = caam_jr_strstatus(jrdev, err); 681 679 682 680 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 683 681 switch_buf(state); ··· 692 688 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 693 689 digestsize, 1); 694 690 695 - req->base.complete(&req->base, err); 691 + req->base.complete(&req->base, ecode); 696 692 } 697 693 698 694 /*
+12 -8
drivers/crypto/caam/caampkc.c
··· 107 107 { 108 108 struct akcipher_request *req = context; 109 109 struct rsa_edesc *edesc; 110 + int ecode = 0; 110 111 111 112 if (err) 112 - caam_jr_strstatus(dev, err); 113 + ecode = caam_jr_strstatus(dev, err); 113 114 114 115 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 115 116 ··· 118 117 rsa_io_unmap(dev, edesc, req); 119 118 kfree(edesc); 120 119 121 - akcipher_request_complete(req, err); 120 + akcipher_request_complete(req, ecode); 122 121 } 123 122 124 123 static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, ··· 126 125 { 127 126 struct akcipher_request *req = context; 128 127 struct rsa_edesc *edesc; 128 + int ecode = 0; 129 129 130 130 if (err) 131 - caam_jr_strstatus(dev, err); 131 + ecode = caam_jr_strstatus(dev, err); 132 132 133 133 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 134 134 ··· 137 135 rsa_io_unmap(dev, edesc, req); 138 136 kfree(edesc); 139 137 140 - akcipher_request_complete(req, err); 138 + akcipher_request_complete(req, ecode); 141 139 } 142 140 143 141 static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, ··· 145 143 { 146 144 struct akcipher_request *req = context; 147 145 struct rsa_edesc *edesc; 146 + int ecode = 0; 148 147 149 148 if (err) 150 - caam_jr_strstatus(dev, err); 149 + ecode = caam_jr_strstatus(dev, err); 151 150 152 151 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 153 152 ··· 156 153 rsa_io_unmap(dev, edesc, req); 157 154 kfree(edesc); 158 155 159 - akcipher_request_complete(req, err); 156 + akcipher_request_complete(req, ecode); 160 157 } 161 158 162 159 static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, ··· 164 161 { 165 162 struct akcipher_request *req = context; 166 163 struct rsa_edesc *edesc; 164 + int ecode = 0; 167 165 168 166 if (err) 169 - caam_jr_strstatus(dev, err); 167 + ecode = caam_jr_strstatus(dev, err); 170 168 171 169 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 172 170 ··· 175 171 rsa_io_unmap(dev, edesc, req); 176 172 kfree(edesc); 177 173 178 - akcipher_request_complete(req, err); 174 + akcipher_request_complete(req, ecode); 179 175 } 180 176 181 177 /**
+38 -22
drivers/crypto/caam/error.c
··· 211 211 "Secure key generation", 212 212 }; 213 213 214 - static void report_ccb_status(struct device *jrdev, const u32 status, 215 - const char *error) 214 + static int report_ccb_status(struct device *jrdev, const u32 status, 215 + const char *error) 216 216 { 217 217 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> 218 218 JRSTA_CCBERR_CHAID_SHIFT; ··· 248 248 * CCB ICV check failures are part of normal operation life; 249 249 * we leave the upper layers to do what they want with them. 250 250 */ 251 - if (err_id != JRSTA_CCBERR_ERRID_ICVCHK) 252 - dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", 253 - status, error, idx_str, idx, 254 - cha_str, cha_err_code, 255 - err_str, err_err_code); 251 + if (err_id == JRSTA_CCBERR_ERRID_ICVCHK) 252 + return -EBADMSG; 253 + 254 + dev_err_ratelimited(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", status, 255 + error, idx_str, idx, cha_str, cha_err_code, 256 + err_str, err_err_code); 257 + 258 + return -EINVAL; 256 259 } 257 260 258 - static void report_jump_status(struct device *jrdev, const u32 status, 259 - const char *error) 261 + static int report_jump_status(struct device *jrdev, const u32 status, 262 + const char *error) 260 263 { 261 264 dev_err(jrdev, "%08x: %s: %s() not implemented\n", 262 265 status, error, __func__); 266 + 267 + return -EINVAL; 263 268 } 264 269 265 - static void report_deco_status(struct device *jrdev, const u32 status, 266 - const char *error) 270 + static int report_deco_status(struct device *jrdev, const u32 status, 271 + const char *error) 267 272 { 268 273 u8 err_id = status & JRSTA_DECOERR_ERROR_MASK; 269 274 u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> ··· 294 289 295 290 dev_err(jrdev, "%08x: %s: %s %d: %s%s\n", 296 291 status, error, idx_str, idx, err_str, err_err_code); 292 + 293 + return -EINVAL; 297 294 } 298 295 299 - static void report_qi_status(struct device *qidev, const u32 status, 300 - const char *error) 296 + static int report_qi_status(struct device *qidev, const u32 status, 297 + const char *error) 301 298 { 302 299 u8 err_id = status & JRSTA_QIERR_ERROR_MASK; 303 300 const char *err_str = "unidentified error value 0x"; ··· 317 310 318 311 dev_err(qidev, "%08x: %s: %s%s\n", 319 312 status, error, err_str, err_err_code); 313 + 314 + return -EINVAL; 320 315 } 321 316 322 - static void report_jr_status(struct device *jrdev, const u32 status, 323 - const char *error) 317 + static int report_jr_status(struct device *jrdev, const u32 status, 318 + const char *error) 324 319 { 325 320 dev_err(jrdev, "%08x: %s: %s() not implemented\n", 326 321 status, error, __func__); 322 + 323 + return -EINVAL; 327 324 } 328 325 329 - static void report_cond_code_status(struct device *jrdev, const u32 status, 330 - const char *error) 326 + static int report_cond_code_status(struct device *jrdev, const u32 status, 327 + const char *error) 331 328 { 332 329 dev_err(jrdev, "%08x: %s: %s() not implemented\n", 333 330 status, error, __func__); 331 + 332 + return -EINVAL; 334 333 } 335 334 336 - void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) 335 + int caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) 337 336 { 338 337 static const struct stat_src { 339 - void (*report_ssed)(struct device *jrdev, const u32 status, 340 - const char *error); 338 + int (*report_ssed)(struct device *jrdev, const u32 status, 339 + const char *error); 341 340 const char *error; 342 341 } status_src[16] = { 343 342 { NULL, "No error" }, ··· 371 358 * Otherwise print the error source name. 372 359 */ 373 360 if (status_src[ssrc].report_ssed) 374 - status_src[ssrc].report_ssed(jrdev, status, error); 375 - else if (error) 361 + return status_src[ssrc].report_ssed(jrdev, status, error); 362 + 363 + if (error) 376 364 dev_err(jrdev, "%d: %s\n", ssrc, error); 377 365 else 378 366 dev_err(jrdev, "%d: unknown error source\n", ssrc); 367 + 368 + return -EINVAL; 379 369 } 380 370 EXPORT_SYMBOL(caam_strstatus); 381 371
+1 -1
drivers/crypto/caam/error.h
··· 12 12 13 13 #define CAAM_ERROR_STR_MAX 302 14 14 15 - void caam_strstatus(struct device *dev, u32 status, bool qi_v2); 15 + int caam_strstatus(struct device *dev, u32 status, bool qi_v2); 16 16 17 17 #define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) 18 18 #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
+3 -2
drivers/crypto/caam/key_gen.c
··· 15 15 void *context) 16 16 { 17 17 struct split_key_result *res = context; 18 + int ecode = 0; 18 19 19 20 dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 20 21 21 22 if (err) 22 - caam_jr_strstatus(dev, err); 23 + ecode = caam_jr_strstatus(dev, err); 23 24 24 - res->err = err; 25 + res->err = ecode; 25 26 26 27 complete(&res->completion); 27 28 }
+3 -2
drivers/crypto/caam/qi.c
··· 577 577 578 578 if (ssrc != JRSTA_SSRC_CCB_ERROR || 579 579 err_id != JRSTA_CCBERR_ERRID_ICVCHK) 580 - dev_err(qidev, "Error: %#x in CAAM response FD\n", 581 - status); 580 + dev_err_ratelimited(qidev, 581 + "Error: %#x in CAAM response FD\n", 582 + status); 582 583 } 583 584 584 585 if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {