Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

blk-crypto-fallback: properly prefix function and struct names

For clarity, avoid using just the "blk_crypto_" prefix for functions and
structs that are specific to blk-crypto-fallback. Instead, use
"blk_crypto_fallback_". Some places already did this, but others
didn't.

This is also a prerequisite for using "struct blk_crypto_keyslot" to
mean a generic blk-crypto keyslot (which is what it sounds like).
Rename the fallback one to "struct blk_crypto_fallback_keyslot".

No change in behavior.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-2-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Eric Biggers and committed by
Jens Axboe
eebcafae 435c2acb

+30 -29
+30 -29
block/blk-crypto-fallback.c
··· 73 73 static DEFINE_MUTEX(tfms_init_lock); 74 74 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; 75 75 76 - static struct blk_crypto_keyslot { 76 + static struct blk_crypto_fallback_keyslot { 77 77 enum blk_crypto_mode_num crypto_mode; 78 78 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; 79 79 } *blk_crypto_keyslots; ··· 89 89 */ 90 90 static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE]; 91 91 92 - static void blk_crypto_evict_keyslot(unsigned int slot) 92 + static void blk_crypto_fallback_evict_keyslot(unsigned int slot) 93 93 { 94 - struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; 94 + struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot]; 95 95 enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode; 96 96 int err; 97 97 ··· 104 104 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; 105 105 } 106 106 107 - static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm, 108 - const struct blk_crypto_key *key, 109 - unsigned int slot) 107 + static int blk_crypto_fallback_keyslot_program(struct blk_keyslot_manager *ksm, 108 + const struct blk_crypto_key *key, 109 + unsigned int slot) 110 110 { 111 - struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; 111 + struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot]; 112 112 const enum blk_crypto_mode_num crypto_mode = 113 113 key->crypto_cfg.crypto_mode; 114 114 int err; 115 115 116 116 if (crypto_mode != slotp->crypto_mode && 117 117 slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) 118 - blk_crypto_evict_keyslot(slot); 118 + blk_crypto_fallback_evict_keyslot(slot); 119 119 120 120 slotp->crypto_mode = crypto_mode; 121 121 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, 122 122 key->size); 123 123 if (err) { 124 - blk_crypto_evict_keyslot(slot); 124 + blk_crypto_fallback_evict_keyslot(slot); 125 125 return err; 126 126 } 127 127 return 0; 128 128 } 129 129 130 - static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, 131 - const struct blk_crypto_key *key, 132 - unsigned int slot) 130 + static int blk_crypto_fallback_keyslot_evict(struct blk_keyslot_manager *ksm, 131 + const struct blk_crypto_key *key, 132 + unsigned int slot) 133 133 { 134 - blk_crypto_evict_keyslot(slot); 134 + blk_crypto_fallback_evict_keyslot(slot); 135 135 return 0; 136 136 } 137 137 ··· 141 141 * hardware. 142 142 */ 143 143 static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = { 144 - .keyslot_program = blk_crypto_keyslot_program, 145 - .keyslot_evict = blk_crypto_keyslot_evict, 144 + .keyslot_program = blk_crypto_fallback_keyslot_program, 145 + .keyslot_evict = blk_crypto_fallback_keyslot_evict, 146 146 }; 147 147 148 148 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) ··· 160 160 bio_endio(src_bio); 161 161 } 162 162 163 - static struct bio *blk_crypto_clone_bio(struct bio *bio_src) 163 + static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) 164 164 { 165 165 struct bvec_iter iter; 166 166 struct bio_vec bv; ··· 187 187 return bio; 188 188 } 189 189 190 - static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot, 191 - struct skcipher_request **ciph_req_ret, 192 - struct crypto_wait *wait) 190 + static bool 191 + blk_crypto_fallback_alloc_cipher_req(struct blk_ksm_keyslot *slot, 192 + struct skcipher_request **ciph_req_ret, 193 + struct crypto_wait *wait) 193 194 { 194 195 struct skcipher_request *ciph_req; 195 - const struct blk_crypto_keyslot *slotp; 196 + const struct blk_crypto_fallback_keyslot *slotp; 196 197 int keyslot_idx = blk_ksm_get_slot_idx(slot); 197 198 198 199 slotp = &blk_crypto_keyslots[keyslot_idx]; ··· 211 210 return true; 212 211 } 213 212 214 - static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr) 213 + static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) 215 214 { 216 215 struct bio *bio = *bio_ptr; 217 216 unsigned int i = 0; ··· 278 277 blk_status_t blk_st; 279 278 280 279 /* Split the bio if it's too big for single page bvec */ 281 - if (!blk_crypto_split_bio_if_needed(bio_ptr)) 280 + if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr)) 282 281 return false; 283 282 284 283 src_bio = *bio_ptr; ··· 286 285 data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; 287 286 288 287 /* Allocate bounce bio for encryption */ 289 - enc_bio = blk_crypto_clone_bio(src_bio); 288 + enc_bio = blk_crypto_fallback_clone_bio(src_bio); 290 289 if (!enc_bio) { 291 290 src_bio->bi_status = BLK_STS_RESOURCE; 292 291 return false; ··· 303 302 } 304 303 305 304 /* and then allocate an skcipher_request for it */ 306 - if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { 305 + if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { 307 306 src_bio->bi_status = BLK_STS_RESOURCE; 308 307 goto out_release_keyslot; 309 308 } ··· 405 404 } 406 405 407 406 /* and then allocate an skcipher_request for it */ 408 - if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { 407 + if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { 409 408 bio->bi_status = BLK_STS_RESOURCE; 410 409 goto out; 411 410 } ··· 475 474 * @bio_ptr: pointer to the bio to prepare 476 475 * 477 476 * If bio is doing a WRITE operation, this splits the bio into two parts if it's 478 - * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio 479 - * for the first part, encrypts it, and update bio_ptr to point to the bounce 480 - * bio. 477 + * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a 478 + * bounce bio for the first part, encrypts it, and updates bio_ptr to point to 479 + * the bounce bio. 481 480 * 482 481 * For a READ operation, we mark the bio for decryption by using bi_private and 483 482 * bi_end_io. ··· 612 611 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) 613 612 { 614 613 const char *cipher_str = blk_crypto_modes[mode_num].cipher_str; 615 - struct blk_crypto_keyslot *slotp; 614 + struct blk_crypto_fallback_keyslot *slotp; 616 615 unsigned int i; 617 616 int err = 0; 618 617