Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccree - Don't use %pK through printk

In the past %pK was preferable to %p as it would not leak raw pointer
values into the kernel log.
Since commit ad67b74d2469 ("printk: hash addresses printed with %p")
the regular %p has been improved to avoid this issue.
Furthermore, restricted pointers ("%pK") were never meant to be used
through printk(). They can still unintentionally leak raw pointers or
acquire sleeping locks in atomic contexts.

Switch to the regular pointer formatting which is safer and
easier to reason about.

Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Thomas Weißschuh and committed by
Herbert Xu
c71187c1 e109b8ee

+44 -44
+27 -27
drivers/crypto/ccree/cc_buffer_mgr.c
··· 224 224 /* Set MLLI size for the bypass operation */ 225 225 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); 226 226 227 - dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n", 227 + dev_dbg(dev, "MLLI params: virt_addr=%p dma_addr=%pad mlli_len=0x%X\n", 228 228 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr, 229 229 mlli_params->mlli_len); 230 230 ··· 239 239 { 240 240 unsigned int index = sgl_data->num_of_buffers; 241 241 242 - dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", 242 + dev_dbg(dev, "index=%u nents=%u sgl=%p data_len=0x%08X is_last=%d\n", 243 243 index, nents, sgl, data_len, is_last_table); 244 244 sgl_data->nents[index] = nents; 245 245 sgl_data->entry[index].sgl = sgl; ··· 298 298 dev_err(dev, "dma_map_sg() config buffer failed\n"); 299 299 return -ENOMEM; 300 300 } 301 - dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", 301 + dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n", 302 302 &sg_dma_address(&areq_ctx->ccm_adata_sg), 303 303 sg_page(&areq_ctx->ccm_adata_sg), 304 304 sg_virt(&areq_ctx->ccm_adata_sg), ··· 323 323 dev_err(dev, "dma_map_sg() src buffer failed\n"); 324 324 return -ENOMEM; 325 325 } 326 - dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", 326 + dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n", 327 327 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), 328 328 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, 329 329 areq_ctx->buff_sg->length); ··· 359 359 if (src != dst) { 360 360 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); 361 361 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); 362 - dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); 363 - dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); 362 + dev_dbg(dev, "Unmapped req->dst=%p\n", sg_virt(dst)); 363 + dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src)); 364 364 } else { 365 365 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); 366 - dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); 366 + dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src)); 367 367 } 368 368 } 369 369 ··· 391 391 req_ctx->gen_ctx.iv_dma_addr = 392 392 dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); 393 393 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { 394 - dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", 394 + dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n", 395 395 ivsize, info); 396 396 return -ENOMEM; 397 397 } 398 - dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", 398 + dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n", 399 399 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr); 400 400 } else { 401 401 req_ctx->gen_ctx.iv_dma_addr = 0; ··· 506 506 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || 507 507 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && 508 508 (areq_ctx->mlli_params.mlli_virt_addr)) { 509 - dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", 509 + dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n", 510 510 &areq_ctx->mlli_params.mlli_dma_addr, 511 511 areq_ctx->mlli_params.mlli_virt_addr); 512 512 dma_pool_free(areq_ctx->mlli_params.curr_pool, ··· 514 514 areq_ctx->mlli_params.mlli_dma_addr); 515 515 } 516 516 517 - dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", 517 + dev_dbg(dev, "Unmapping src sgl: req->src=%p areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", 518 518 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, 519 519 areq_ctx->assoclen, req->cryptlen); 520 520 521 521 dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); 522 522 if (req->src != req->dst) { 523 - dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", 523 + dev_dbg(dev, "Unmapping dst sgl: req->dst=%p\n", 524 524 sg_virt(req->dst)); 525 525 dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); 526 526 } ··· 566 566 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, 567 567 DMA_BIDIRECTIONAL); 568 568 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { 569 - dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", 569 + dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n", 570 570 hw_iv_size, req->iv); 571 571 kfree_sensitive(areq_ctx->gen_ctx.iv); 572 572 areq_ctx->gen_ctx.iv = NULL; ··· 574 574 goto chain_iv_exit; 575 575 } 576 576 577 - dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", 577 + dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n", 578 578 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); 579 579 580 580 chain_iv_exit: ··· 977 977 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, 978 978 DMA_BIDIRECTIONAL); 979 979 if (dma_mapping_error(dev, dma_addr)) { 980 - dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", 980 + dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n", 981 981 MAX_MAC_SIZE, areq_ctx->mac_buf); 982 982 rc = -ENOMEM; 983 983 goto aead_map_failure; ··· 991 991 DMA_TO_DEVICE); 992 992 993 993 if (dma_mapping_error(dev, dma_addr)) { 994 - dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", 994 + dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n", 995 995 AES_BLOCK_SIZE, addr); 996 996 areq_ctx->ccm_iv0_dma_addr = 0; 997 997 rc = -ENOMEM; ··· 1009 1009 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, 1010 1010 DMA_BIDIRECTIONAL); 1011 1011 if (dma_mapping_error(dev, dma_addr)) { 1012 - dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", 1012 + dev_err(dev, "Mapping hkey %u B at va=%p for DMA failed\n", 1013 1013 AES_BLOCK_SIZE, areq_ctx->hkey); 1014 1014 rc = -ENOMEM; 1015 1015 goto aead_map_failure; ··· 1019 1019 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, 1020 1020 AES_BLOCK_SIZE, DMA_TO_DEVICE); 1021 1021 if (dma_mapping_error(dev, dma_addr)) { 1022 - dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", 1022 + dev_err(dev, "Mapping gcm_len_block %u B at va=%p for DMA failed\n", 1023 1023 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); 1024 1024 rc = -ENOMEM; 1025 1025 goto aead_map_failure; ··· 1030 1030 AES_BLOCK_SIZE, DMA_TO_DEVICE); 1031 1031 1032 1032 if (dma_mapping_error(dev, dma_addr)) { 1033 - dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", 1033 + dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%p for DMA failed\n", 1034 1034 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); 1035 1035 areq_ctx->gcm_iv_inc1_dma_addr = 0; 1036 1036 rc = -ENOMEM; ··· 1042 1042 AES_BLOCK_SIZE, DMA_TO_DEVICE); 1043 1043 1044 1044 if (dma_mapping_error(dev, dma_addr)) { 1045 - dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", 1045 + dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%p for DMA failed\n", 1046 1046 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); 1047 1047 areq_ctx->gcm_iv_inc2_dma_addr = 0; 1048 1048 rc = -ENOMEM; ··· 1152 1152 u32 dummy = 0; 1153 1153 u32 mapped_nents = 0; 1154 1154 1155 - dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n", 1155 + dev_dbg(dev, "final params : curr_buff=%p curr_buff_cnt=0x%X nbytes = 0x%X src=%p curr_index=%u\n", 1156 1156 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); 1157 1157 /* Init the type of the dma buffer */ 1158 1158 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; ··· 1236 1236 u32 dummy = 0; 1237 1237 u32 mapped_nents = 0; 1238 1238 1239 - dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n", 1239 + dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n", 1240 1240 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); 1241 1241 /* Init the type of the dma buffer */ 1242 1242 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; ··· 1246 1246 areq_ctx->in_nents = 0; 1247 1247 1248 1248 if (total_in_len < block_size) { 1249 - dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", 1249 + dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n", 1250 1250 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); 1251 1251 areq_ctx->in_nents = sg_nents_for_len(src, nbytes); 1252 1252 sg_copy_to_buffer(src, areq_ctx->in_nents, ··· 1265 1265 1266 1266 /* Copy the new residue to next buffer */ 1267 1267 if (*next_buff_cnt) { 1268 - dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", 1268 + dev_dbg(dev, " handle residue: next buff %p skip data %u residue %u\n", 1269 1269 next_buff, (update_data_len - *curr_buff_cnt), 1270 1270 *next_buff_cnt); 1271 1271 cc_copy_sg_portion(dev, next_buff, src, ··· 1338 1338 *allocated and should be released 1339 1339 */ 1340 1340 if (areq_ctx->mlli_params.curr_pool) { 1341 - dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", 1341 + dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n", 1342 1342 &areq_ctx->mlli_params.mlli_dma_addr, 1343 1343 areq_ctx->mlli_params.mlli_virt_addr); 1344 1344 dma_pool_free(areq_ctx->mlli_params.curr_pool, ··· 1347 1347 } 1348 1348 1349 1349 if (src && areq_ctx->in_nents) { 1350 - dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", 1350 + dev_dbg(dev, "Unmapped sg src: virt=%p dma=%pad len=0x%X\n", 1351 1351 sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); 1352 1352 dma_unmap_sg(dev, src, 1353 1353 areq_ctx->in_nents, DMA_TO_DEVICE); 1354 1354 } 1355 1355 1356 1356 if (*prev_len) { 1357 - dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n", 1357 + dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%p dma=%pad len 0x%X\n", 1358 1358 sg_virt(areq_ctx->buff_sg), 1359 1359 &sg_dma_address(areq_ctx->buff_sg), 1360 1360 sg_dma_len(areq_ctx->buff_sg));
+2 -2
drivers/crypto/ccree/cc_cipher.c
··· 211 211 max_key_buf_size, 212 212 DMA_TO_DEVICE); 213 213 if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { 214 - dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", 214 + dev_err(dev, "Mapping Key %u B at va=%p for DMA failed\n", 215 215 max_key_buf_size, ctx_p->user.key); 216 216 goto free_key; 217 217 } 218 - dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", 218 + dev_dbg(dev, "Mapped key %u B at va=%p to dma=%pad\n", 219 219 max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); 220 220 221 221 return 0;
+15 -15
drivers/crypto/ccree/cc_hash.c
··· 125 125 digestsize); 126 126 return -ENOMEM; 127 127 } 128 - dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n", 128 + dev_dbg(dev, "Mapped digest result buffer %u B at va=%p to dma=%pad\n", 129 129 digestsize, state->digest_result_buff, 130 130 &state->digest_result_dma_addr); 131 131 ··· 184 184 dma_map_single(dev, state->digest_buff, 185 185 ctx->inter_digestsize, DMA_BIDIRECTIONAL); 186 186 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) { 187 - dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n", 187 + dev_err(dev, "Mapping digest len %d B at va=%p for DMA failed\n", 188 188 ctx->inter_digestsize, state->digest_buff); 189 189 return -EINVAL; 190 190 } 191 - dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n", 191 + dev_dbg(dev, "Mapped digest %d B at va=%p to dma=%pad\n", 192 192 ctx->inter_digestsize, state->digest_buff, 193 193 &state->digest_buff_dma_addr); 194 194 ··· 197 197 dma_map_single(dev, state->digest_bytes_len, 198 198 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL); 199 199 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) { 200 - dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n", 200 + dev_err(dev, "Mapping digest len %u B at va=%p for DMA failed\n", 201 201 HASH_MAX_LEN_SIZE, state->digest_bytes_len); 202 202 goto unmap_digest_buf; 203 203 } 204 - dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n", 204 + dev_dbg(dev, "Mapped digest len %u B at va=%p to dma=%pad\n", 205 205 HASH_MAX_LEN_SIZE, state->digest_bytes_len, 206 206 &state->digest_bytes_len_dma_addr); 207 207 } ··· 212 212 ctx->inter_digestsize, 213 213 DMA_BIDIRECTIONAL); 214 214 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) { 215 - dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n", 215 + dev_err(dev, "Mapping opad digest %d B at va=%p for DMA failed\n", 216 216 ctx->inter_digestsize, 217 217 state->opad_digest_buff); 218 218 goto unmap_digest_len; 219 219 } 220 - dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n", 220 + dev_dbg(dev, "Mapped opad digest %d B at va=%p to dma=%pad\n", 221 221 ctx->inter_digestsize, state->opad_digest_buff, 222 222 &state->opad_digest_dma_addr); 223 223 } ··· 272 272 if (state->digest_result_dma_addr) { 273 273 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize, 274 274 DMA_BIDIRECTIONAL); 275 - dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n", 275 + dev_dbg(dev, "unmpa digest result buffer va (%p) pa (%pad) len %u\n", 276 276 state->digest_result_buff, 277 277 &state->digest_result_dma_addr, digestsize); 278 278 memcpy(result, state->digest_result_buff, digestsize); ··· 287 287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 288 288 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); 289 289 290 - dev_dbg(dev, "req=%pK\n", req); 290 + dev_dbg(dev, "req=%p\n", req); 291 291 292 292 if (err != -EINPROGRESS) { 293 293 /* Not a BACKLOG notification */ ··· 306 306 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); 307 307 u32 digestsize = crypto_ahash_digestsize(tfm); 308 308 309 - dev_dbg(dev, "req=%pK\n", req); 309 + dev_dbg(dev, "req=%p\n", req); 310 310 311 311 if (err != -EINPROGRESS) { 312 312 /* Not a BACKLOG notification */ ··· 326 326 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); 327 327 u32 digestsize = crypto_ahash_digestsize(tfm); 328 328 329 - dev_dbg(dev, "req=%pK\n", req); 329 + dev_dbg(dev, "req=%p\n", req); 330 330 331 331 if (err != -EINPROGRESS) { 332 332 /* Not a BACKLOG notification */ ··· 1077 1077 dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff), 1078 1078 DMA_BIDIRECTIONAL); 1079 1079 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) { 1080 - dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n", 1080 + dev_err(dev, "Mapping digest len %zu B at va=%p for DMA failed\n", 1081 1081 sizeof(ctx->digest_buff), ctx->digest_buff); 1082 1082 goto fail; 1083 1083 } 1084 - dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n", 1084 + dev_dbg(dev, "Mapped digest %zu B at va=%p to dma=%pad\n", 1085 1085 sizeof(ctx->digest_buff), ctx->digest_buff, 1086 1086 &ctx->digest_buff_dma_addr); 1087 1087 ··· 1090 1090 sizeof(ctx->opad_tmp_keys_buff), 1091 1091 DMA_BIDIRECTIONAL); 1092 1092 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) { 1093 - dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n", 1093 + dev_err(dev, "Mapping opad digest %zu B at va=%p for DMA failed\n", 1094 1094 sizeof(ctx->opad_tmp_keys_buff), 1095 1095 ctx->opad_tmp_keys_buff); 1096 1096 goto fail; 1097 1097 } 1098 - dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n", 1098 + dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%p to dma=%pad\n", 1099 1099 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff, 1100 1100 &ctx->opad_tmp_keys_dma_addr); 1101 1101