Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Inline encryption support for fscrypt
4 *
5 * Copyright 2019 Google LLC
6 */
7
8/*
9 * With "inline encryption", the block layer handles the decryption/encryption
10 * as part of the bio, instead of the filesystem doing the crypto itself via
11 * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still
12 * provides the key and IV to use.
13 */
14
15#include <linux/blk-crypto.h>
16#include <linux/blkdev.h>
17#include <linux/buffer_head.h>
18#include <linux/sched/mm.h>
19#include <linux/slab.h>
20#include <linux/uio.h>
21
22#include "fscrypt_private.h"
23
24static struct block_device **fscrypt_get_devices(struct super_block *sb,
25 unsigned int *num_devs)
26{
27 struct block_device **devs;
28
29 if (sb->s_cop->get_devices) {
30 devs = sb->s_cop->get_devices(sb, num_devs);
31 if (devs)
32 return devs;
33 }
34 devs = kmalloc(sizeof(*devs), GFP_KERNEL);
35 if (!devs)
36 return ERR_PTR(-ENOMEM);
37 devs[0] = sb->s_bdev;
38 *num_devs = 1;
39 return devs;
40}
41
42static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_inode_info *ci)
43{
44 const struct super_block *sb = ci->ci_inode->i_sb;
45 unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
46 int dun_bits;
47
48 if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
49 return offsetofend(union fscrypt_iv, nonce);
50
51 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
52 return sizeof(__le64);
53
54 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
55 return sizeof(__le32);
56
57 /* Default case: IVs are just the file data unit index */
58 dun_bits = fscrypt_max_file_dun_bits(sb, ci->ci_data_unit_bits);
59 return DIV_ROUND_UP(dun_bits, 8);
60}
61
62/*
63 * Log a message when starting to use blk-crypto (native) or blk-crypto-fallback
64 * for an encryption mode for the first time. This is the blk-crypto
65 * counterpart to the message logged when starting to use the crypto API for the
66 * first time. A limitation is that these messages don't convey which specific
67 * filesystems or files are using each implementation. However, *usually*
68 * systems use just one implementation per mode, which makes these messages
69 * helpful for debugging problems where the "wrong" implementation is used.
70 */
71static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
72 struct block_device **devs,
73 unsigned int num_devs,
74 const struct blk_crypto_config *cfg)
75{
76 unsigned int i;
77
78 for (i = 0; i < num_devs; i++) {
79 if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
80 blk_crypto_config_supported_natively(devs[i], cfg)) {
81 if (!xchg(&mode->logged_blk_crypto_native, 1))
82 pr_info("fscrypt: %s using blk-crypto (native)\n",
83 mode->friendly_name);
84 } else if (!xchg(&mode->logged_blk_crypto_fallback, 1)) {
85 pr_info("fscrypt: %s using blk-crypto-fallback\n",
86 mode->friendly_name);
87 }
88 }
89}
90
91/* Enable inline encryption for this file if supported. */
92int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci,
93 bool is_hw_wrapped_key)
94{
95 const struct inode *inode = ci->ci_inode;
96 struct super_block *sb = inode->i_sb;
97 struct blk_crypto_config crypto_cfg;
98 struct block_device **devs;
99 unsigned int num_devs;
100 unsigned int i;
101
102 /* The file must need contents encryption, not filenames encryption */
103 if (!S_ISREG(inode->i_mode))
104 return 0;
105
106 /* The crypto mode must have a blk-crypto counterpart */
107 if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
108 return 0;
109
110 /* The filesystem must be mounted with -o inlinecrypt */
111 if (!(sb->s_flags & SB_INLINECRYPT))
112 return 0;
113
114 /*
115 * When a page contains multiple logically contiguous filesystem blocks,
116 * some filesystem code only calls fscrypt_mergeable_bio() for the first
117 * block in the page. This is fine for most of fscrypt's IV generation
118 * strategies, where contiguous blocks imply contiguous IVs. But it
119 * doesn't work with IV_INO_LBLK_32. For now, simply exclude
120 * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption.
121 */
122 if ((fscrypt_policy_flags(&ci->ci_policy) &
123 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
124 sb->s_blocksize != PAGE_SIZE)
125 return 0;
126
127 /*
128 * On all the filesystem's block devices, blk-crypto must support the
129 * crypto configuration that the file would use.
130 */
131 crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
132 crypto_cfg.data_unit_size = 1U << ci->ci_data_unit_bits;
133 crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
134 crypto_cfg.key_type = is_hw_wrapped_key ?
135 BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW;
136
137 devs = fscrypt_get_devices(sb, &num_devs);
138 if (IS_ERR(devs))
139 return PTR_ERR(devs);
140
141 for (i = 0; i < num_devs; i++) {
142 if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
143 goto out_free_devs;
144 }
145
146 fscrypt_log_blk_crypto_impl(ci->ci_mode, devs, num_devs, &crypto_cfg);
147
148 ci->ci_inlinecrypt = true;
149out_free_devs:
150 kfree(devs);
151
152 return 0;
153}
154
155int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
156 const u8 *key_bytes, size_t key_size,
157 bool is_hw_wrapped,
158 const struct fscrypt_inode_info *ci)
159{
160 const struct inode *inode = ci->ci_inode;
161 struct super_block *sb = inode->i_sb;
162 enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
163 enum blk_crypto_key_type key_type = is_hw_wrapped ?
164 BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW;
165 struct blk_crypto_key *blk_key;
166 struct block_device **devs;
167 unsigned int num_devs;
168 unsigned int i;
169 int err;
170
171 blk_key = kmalloc(sizeof(*blk_key), GFP_KERNEL);
172 if (!blk_key)
173 return -ENOMEM;
174
175 err = blk_crypto_init_key(blk_key, key_bytes, key_size, key_type,
176 crypto_mode, fscrypt_get_dun_bytes(ci),
177 1U << ci->ci_data_unit_bits);
178 if (err) {
179 fscrypt_err(inode, "error %d initializing blk-crypto key", err);
180 goto fail;
181 }
182
183 /* Start using blk-crypto on all the filesystem's block devices. */
184 devs = fscrypt_get_devices(sb, &num_devs);
185 if (IS_ERR(devs)) {
186 err = PTR_ERR(devs);
187 goto fail;
188 }
189 for (i = 0; i < num_devs; i++) {
190 err = blk_crypto_start_using_key(devs[i], blk_key);
191 if (err)
192 break;
193 }
194 kfree(devs);
195 if (err) {
196 fscrypt_err(inode, "error %d starting to use blk-crypto", err);
197 goto fail;
198 }
199
200 /*
201 * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared().
202 * I.e., here we publish ->blk_key with a RELEASE barrier so that
203 * concurrent tasks can ACQUIRE it. Note that this concurrency is only
204 * possible for per-mode keys, not for per-file keys.
205 */
206 smp_store_release(&prep_key->blk_key, blk_key);
207 return 0;
208
209fail:
210 kfree_sensitive(blk_key);
211 return err;
212}
213
214void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
215 struct fscrypt_prepared_key *prep_key)
216{
217 struct blk_crypto_key *blk_key = prep_key->blk_key;
218 struct block_device **devs;
219 unsigned int num_devs;
220 unsigned int i;
221
222 if (!blk_key)
223 return;
224
225 /* Evict the key from all the filesystem's block devices. */
226 devs = fscrypt_get_devices(sb, &num_devs);
227 if (!IS_ERR(devs)) {
228 for (i = 0; i < num_devs; i++)
229 blk_crypto_evict_key(devs[i], blk_key);
230 kfree(devs);
231 }
232 kfree_sensitive(blk_key);
233}
234
235/*
236 * Ask the inline encryption hardware to derive the software secret from a
237 * hardware-wrapped key. Returns -EOPNOTSUPP if hardware-wrapped keys aren't
238 * supported on this filesystem or hardware.
239 */
240int fscrypt_derive_sw_secret(struct super_block *sb,
241 const u8 *wrapped_key, size_t wrapped_key_size,
242 u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
243{
244 int err;
245
246 /* The filesystem must be mounted with -o inlinecrypt. */
247 if (!(sb->s_flags & SB_INLINECRYPT)) {
248 fscrypt_warn(NULL,
249 "%s: filesystem not mounted with inlinecrypt\n",
250 sb->s_id);
251 return -EOPNOTSUPP;
252 }
253
254 err = blk_crypto_derive_sw_secret(sb->s_bdev, wrapped_key,
255 wrapped_key_size, sw_secret);
256 if (err == -EOPNOTSUPP)
257 fscrypt_warn(NULL,
258 "%s: block device doesn't support hardware-wrapped keys\n",
259 sb->s_id);
260 return err;
261}
262
263bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
264{
265 return inode->i_crypt_info->ci_inlinecrypt;
266}
267EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
268
269static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
270 u64 lblk_num,
271 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
272{
273 u64 index = lblk_num << ci->ci_data_units_per_block_bits;
274 union fscrypt_iv iv;
275 int i;
276
277 fscrypt_generate_iv(&iv, index, ci);
278
279 BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
280 memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
281 for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
282 dun[i] = le64_to_cpu(iv.dun[i]);
283}
284
285/**
286 * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
287 * @bio: a bio which will eventually be submitted to the file
288 * @inode: the file's inode
289 * @first_lblk: the first file logical block number in the I/O
290 * @gfp_mask: memory allocation flags - these must be a waiting mask so that
291 * bio_crypt_set_ctx can't fail.
292 *
293 * If the contents of the file should be encrypted (or decrypted) with inline
294 * encryption, then assign the appropriate encryption context to the bio.
295 *
296 * Normally the bio should be newly allocated (i.e. no pages added yet), as
297 * otherwise fscrypt_mergeable_bio() won't work as intended.
298 *
299 * The encryption context will be freed automatically when the bio is freed.
300 */
301void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
302 u64 first_lblk, gfp_t gfp_mask)
303{
304 const struct fscrypt_inode_info *ci;
305 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
306
307 if (!fscrypt_inode_uses_inline_crypto(inode))
308 return;
309 ci = inode->i_crypt_info;
310
311 fscrypt_generate_dun(ci, first_lblk, dun);
312 bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
313}
314EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
315
316/* Extract the inode and logical block number from a buffer_head. */
317static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
318 const struct inode **inode_ret,
319 u64 *lblk_num_ret)
320{
321 struct folio *folio = bh->b_folio;
322 const struct address_space *mapping;
323 const struct inode *inode;
324
325 /*
326 * The ext4 journal (jbd2) can submit a buffer_head it directly created
327 * for a non-pagecache page. fscrypt doesn't care about these.
328 */
329 mapping = folio_mapping(folio);
330 if (!mapping)
331 return false;
332 inode = mapping->host;
333
334 *inode_ret = inode;
335 *lblk_num_ret = ((u64)folio->index << (PAGE_SHIFT - inode->i_blkbits)) +
336 (bh_offset(bh) >> inode->i_blkbits);
337 return true;
338}
339
340/**
341 * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline
342 * crypto
343 * @bio: a bio which will eventually be submitted to the file
344 * @first_bh: the first buffer_head for which I/O will be submitted
345 * @gfp_mask: memory allocation flags
346 *
347 * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
348 * of an inode and block number directly.
349 */
350void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
351 const struct buffer_head *first_bh,
352 gfp_t gfp_mask)
353{
354 const struct inode *inode;
355 u64 first_lblk;
356
357 if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
358 fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
359}
360EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
361
362/**
363 * fscrypt_mergeable_bio() - test whether data can be added to a bio
364 * @bio: the bio being built up
365 * @inode: the inode for the next part of the I/O
366 * @next_lblk: the next file logical block number in the I/O
367 *
368 * When building a bio which may contain data which should undergo inline
369 * encryption (or decryption) via fscrypt, filesystems should call this function
370 * to ensure that the resulting bio contains only contiguous data unit numbers.
371 * This will return false if the next part of the I/O cannot be merged with the
372 * bio because either the encryption key would be different or the encryption
373 * data unit numbers would be discontiguous.
374 *
375 * fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
376 *
377 * This function isn't required in cases where crypto-mergeability is ensured in
378 * another way, such as I/O targeting only a single file (and thus a single key)
379 * combined with fscrypt_limit_io_blocks() to ensure DUN contiguity.
380 *
381 * Return: true iff the I/O is mergeable
382 */
383bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
384 u64 next_lblk)
385{
386 const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
387 u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
388
389 if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
390 return false;
391 if (!bc)
392 return true;
393
394 /*
395 * Comparing the key pointers is good enough, as all I/O for each key
396 * uses the same pointer. I.e., there's currently no need to support
397 * merging requests where the keys are the same but the pointers differ.
398 */
399 if (bc->bc_key != inode->i_crypt_info->ci_enc_key.blk_key)
400 return false;
401
402 fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
403 return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
404}
405EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
406
407/**
408 * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio
409 * @bio: the bio being built up
410 * @next_bh: the next buffer_head for which I/O will be submitted
411 *
412 * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of
413 * an inode and block number directly.
414 *
415 * Return: true iff the I/O is mergeable
416 */
417bool fscrypt_mergeable_bio_bh(struct bio *bio,
418 const struct buffer_head *next_bh)
419{
420 const struct inode *inode;
421 u64 next_lblk;
422
423 if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
424 return !bio->bi_crypt_context;
425
426 return fscrypt_mergeable_bio(bio, inode, next_lblk);
427}
428EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
429
430/**
431 * fscrypt_dio_supported() - check whether DIO (direct I/O) is supported on an
432 * inode, as far as encryption is concerned
433 * @inode: the inode in question
434 *
435 * Return: %true if there are no encryption constraints that prevent DIO from
436 * being supported; %false if DIO is unsupported. (Note that in the
437 * %true case, the filesystem might have other, non-encryption-related
438 * constraints that prevent DIO from actually being supported. Also, on
439 * encrypted files the filesystem is still responsible for only allowing
440 * DIO when requests are filesystem-block-aligned.)
441 */
442bool fscrypt_dio_supported(struct inode *inode)
443{
444 int err;
445
446 /* If the file is unencrypted, no veto from us. */
447 if (!fscrypt_needs_contents_encryption(inode))
448 return true;
449
450 /*
451 * We only support DIO with inline crypto, not fs-layer crypto.
452 *
453 * To determine whether the inode is using inline crypto, we have to set
454 * up the key if it wasn't already done. This is because in the current
455 * design of fscrypt, the decision of whether to use inline crypto or
456 * not isn't made until the inode's encryption key is being set up. In
457 * the DIO read/write case, the key will always be set up already, since
458 * the file will be open. But in the case of statx(), the key might not
459 * be set up yet, as the file might not have been opened yet.
460 */
461 err = fscrypt_require_key(inode);
462 if (err) {
463 /*
464 * Key unavailable or couldn't be set up. This edge case isn't
465 * worth worrying about; just report that DIO is unsupported.
466 */
467 return false;
468 }
469 return fscrypt_inode_uses_inline_crypto(inode);
470}
471EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
472
473/**
474 * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
475 * @inode: the file on which I/O is being done
476 * @lblk: the block at which the I/O is being started from
477 * @nr_blocks: the number of blocks we want to submit starting at @lblk
478 *
479 * Determine the limit to the number of blocks that can be submitted in a bio
480 * targeting @lblk without causing a data unit number (DUN) discontiguity.
481 *
482 * This is normally just @nr_blocks, as normally the DUNs just increment along
483 * with the logical blocks. (Or the file is not encrypted.)
484 *
485 * In rare cases, fscrypt can be using an IV generation method that allows the
486 * DUN to wrap around within logically contiguous blocks, and that wraparound
487 * will occur. If this happens, a value less than @nr_blocks will be returned
488 * so that the wraparound doesn't occur in the middle of a bio, which would
489 * cause encryption/decryption to produce wrong results.
490 *
491 * Return: the actual number of blocks that can be submitted
492 */
493u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
494{
495 const struct fscrypt_inode_info *ci;
496 u32 dun;
497
498 if (!fscrypt_inode_uses_inline_crypto(inode))
499 return nr_blocks;
500
501 if (nr_blocks <= 1)
502 return nr_blocks;
503
504 ci = inode->i_crypt_info;
505 if (!(fscrypt_policy_flags(&ci->ci_policy) &
506 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
507 return nr_blocks;
508
509 /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
510
511 dun = ci->ci_hashed_ino + lblk;
512
513 return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
514}
515EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);