Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
"The major change this cycle is deleting ext4's copy of the file system
encryption code and switching things over to using the copies in
fs/crypto. I've updated the MAINTAINERS file to add an entry for
fs/crypto listing Jaeguk Kim and myself as the maintainers.

There are also a number of bug fixes, most notably for some problems
found by American Fuzzy Lop (AFL) courtesy of Vegard Nossum. Also
fixed is a writeback deadlock detected by generic/130, and some
potential races in the metadata checksum code"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (21 commits)
ext4: verify extent header depth
ext4: short-cut orphan cleanup on error
ext4: fix reference counting bug on block allocation error
MAINTAINRES: fs-crypto maintainers update
ext4 crypto: migrate into vfs's crypto engine
ext2: fix filesystem deadlock while reading corrupted xattr block
ext4: fix project quota accounting without quota limits enabled
ext4: validate s_reserved_gdt_blocks on mount
ext4: remove unused page_idx
ext4: don't call ext4_should_journal_data() on the journal inode
ext4: Fix WARN_ON_ONCE in ext4_commit_super()
ext4: fix deadlock during page writeback
ext4: correct error value of function verifying dx checksum
ext4: avoid modifying checksum fields directly during checksum verification
ext4: check for extents that wrap around
jbd2: make journal y2038 safe
jbd2: track more dependencies on transaction commit
jbd2: move lockdep tracking to journal_s
jbd2: move lockdep instrumentation for jbd2 handles
ext4: respect the nobarrier mount option in nojournal mode
...

+544 -2109
+7
MAINTAINERS
··· 4942 4942 F: fs/fscache/ 4943 4943 F: include/linux/fscache*.h 4944 4944 4945 + FS-CRYPTO: FILE SYSTEM LEVEL ENCRYPTION SUPPORT 4946 + M: Theodore Y. Ts'o <tytso@mit.edu> 4947 + M: Jaegeuk Kim <jaegeuk@kernel.org> 4948 + S: Supported 4949 + F: fs/crypto/ 4950 + F: include/linux/fscrypto.h 4951 + 4945 4952 F2FS FILE SYSTEM 4946 4953 M: Jaegeuk Kim <jaegeuk@kernel.org> 4947 4954 M: Changman Lee <cm224.lee@samsung.com>
+21
fs/ext2/balloc.c
··· 1194 1194 } 1195 1195 1196 1196 /* 1197 + * Returns 1 if the passed-in block region is valid; 0 if some part overlaps 1198 + * with filesystem metadata blocksi. 1199 + */ 1200 + int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk, 1201 + unsigned int count) 1202 + { 1203 + if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || 1204 + (start_blk + count < start_blk) || 1205 + (start_blk > le32_to_cpu(sbi->s_es->s_blocks_count))) 1206 + return 0; 1207 + 1208 + /* Ensure we do not step over superblock */ 1209 + if ((start_blk <= sbi->s_sb_block) && 1210 + (start_blk + count >= sbi->s_sb_block)) 1211 + return 0; 1212 + 1213 + 1214 + return 1; 1215 + } 1216 + 1217 + /* 1197 1218 * ext2_new_blocks() -- core block(s) allocation function 1198 1219 * @inode: file inode 1199 1220 * @goal: given target block(filesystem wide)
+3
fs/ext2/ext2.h
··· 367 367 */ 368 368 #define EXT2_VALID_FS 0x0001 /* Unmounted cleanly */ 369 369 #define EXT2_ERROR_FS 0x0002 /* Errors detected */ 370 + #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 370 371 371 372 /* 372 373 * Mount flags ··· 740 739 extern ext2_fsblk_t ext2_new_block(struct inode *, unsigned long, int *); 741 740 extern ext2_fsblk_t ext2_new_blocks(struct inode *, unsigned long, 742 741 unsigned long *, int *); 742 + extern int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk, 743 + unsigned int count); 743 744 extern void ext2_free_blocks (struct inode *, unsigned long, 744 745 unsigned long); 745 746 extern unsigned long ext2_count_free_blocks (struct super_block *);
+10
fs/ext2/inode.c
··· 1389 1389 ei->i_frag_size = raw_inode->i_fsize; 1390 1390 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 1391 1391 ei->i_dir_acl = 0; 1392 + 1393 + if (ei->i_file_acl && 1394 + !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) { 1395 + ext2_error(sb, "ext2_iget", "bad extended attribute block %u", 1396 + ei->i_file_acl); 1397 + brelse(bh); 1398 + ret = -EFSCORRUPTED; 1399 + goto bad_inode; 1400 + } 1401 + 1392 1402 if (S_ISREG(inode->i_mode)) 1393 1403 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32; 1394 1404 else
+9
fs/ext2/xattr.c
··· 759 759 ext2_xattr_delete_inode(struct inode *inode) 760 760 { 761 761 struct buffer_head *bh = NULL; 762 + struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb); 762 763 763 764 down_write(&EXT2_I(inode)->xattr_sem); 764 765 if (!EXT2_I(inode)->i_file_acl) 765 766 goto cleanup; 767 + 768 + if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 0)) { 769 + ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 770 + "inode %ld: xattr block %d is out of data blocks range", 771 + inode->i_ino, EXT2_I(inode)->i_file_acl); 772 + goto cleanup; 773 + } 774 + 766 775 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 767 776 if (!bh) { 768 777 ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
+2 -10
fs/ext4/Kconfig
··· 99 99 extended attributes for file security labels, say N. 100 100 101 101 config EXT4_ENCRYPTION 102 - tristate "Ext4 Encryption" 102 + bool "Ext4 Encryption" 103 103 depends on EXT4_FS 104 - select CRYPTO_AES 105 - select CRYPTO_CBC 106 - select CRYPTO_ECB 107 - select CRYPTO_XTS 108 - select CRYPTO_CTS 109 - select CRYPTO_CTR 110 - select CRYPTO_SHA256 111 - select KEYS 112 - select ENCRYPTED_KEYS 104 + select FS_ENCRYPTION 113 105 help 114 106 Enable encryption of ext4 files and directories. This 115 107 feature is similar to ecryptfs, but it is more memory
-2
fs/ext4/Makefile
··· 12 12 13 13 ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o 14 14 ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o 15 - ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o crypto.o \ 16 - crypto_key.o crypto_fname.o
+6 -1
fs/ext4/balloc.c
··· 208 208 memset(bh->b_data, 0, sb->s_blocksize); 209 209 210 210 bit_max = ext4_num_base_meta_clusters(sb, block_group); 211 + if ((bit_max >> 3) >= bh->b_size) 212 + return -EFSCORRUPTED; 213 + 211 214 for (bit = 0; bit < bit_max; bit++) 212 215 ext4_set_bit(bit, bh->b_data); 213 216 ··· 613 610 614 611 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 615 612 616 - jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 613 + smp_mb(); 614 + if (EXT4_SB(sb)->s_mb_free_pending) 615 + jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 617 616 return 1; 618 617 } 619 618
-537
fs/ext4/crypto.c
··· 1 - /* 2 - * linux/fs/ext4/crypto.c 3 - * 4 - * Copyright (C) 2015, Google, Inc. 5 - * 6 - * This contains encryption functions for ext4 7 - * 8 - * Written by Michael Halcrow, 2014. 9 - * 10 - * Filename encryption additions 11 - * Uday Savagaonkar, 2014 12 - * Encryption policy handling additions 13 - * Ildar Muslukhov, 2014 14 - * 15 - * This has not yet undergone a rigorous security audit. 16 - * 17 - * The usage of AES-XTS should conform to recommendations in NIST 18 - * Special Publication 800-38E and IEEE P1619/D16. 19 - */ 20 - 21 - #include <crypto/skcipher.h> 22 - #include <keys/user-type.h> 23 - #include <keys/encrypted-type.h> 24 - #include <linux/ecryptfs.h> 25 - #include <linux/gfp.h> 26 - #include <linux/kernel.h> 27 - #include <linux/key.h> 28 - #include <linux/list.h> 29 - #include <linux/mempool.h> 30 - #include <linux/module.h> 31 - #include <linux/mutex.h> 32 - #include <linux/random.h> 33 - #include <linux/scatterlist.h> 34 - #include <linux/spinlock_types.h> 35 - #include <linux/namei.h> 36 - 37 - #include "ext4_extents.h" 38 - #include "xattr.h" 39 - 40 - /* Encryption added and removed here! (L: */ 41 - 42 - static unsigned int num_prealloc_crypto_pages = 32; 43 - static unsigned int num_prealloc_crypto_ctxs = 128; 44 - 45 - module_param(num_prealloc_crypto_pages, uint, 0444); 46 - MODULE_PARM_DESC(num_prealloc_crypto_pages, 47 - "Number of crypto pages to preallocate"); 48 - module_param(num_prealloc_crypto_ctxs, uint, 0444); 49 - MODULE_PARM_DESC(num_prealloc_crypto_ctxs, 50 - "Number of crypto contexts to preallocate"); 51 - 52 - static mempool_t *ext4_bounce_page_pool; 53 - 54 - static LIST_HEAD(ext4_free_crypto_ctxs); 55 - static DEFINE_SPINLOCK(ext4_crypto_ctx_lock); 56 - 57 - static struct kmem_cache *ext4_crypto_ctx_cachep; 58 - struct kmem_cache *ext4_crypt_info_cachep; 59 - 60 - /** 61 - * ext4_release_crypto_ctx() - Releases an encryption context 62 - * @ctx: The encryption context to release. 63 - * 64 - * If the encryption context was allocated from the pre-allocated pool, returns 65 - * it to that pool. Else, frees it. 66 - * 67 - * If there's a bounce page in the context, this frees that. 68 - */ 69 - void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) 70 - { 71 - unsigned long flags; 72 - 73 - if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) 74 - mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool); 75 - ctx->w.bounce_page = NULL; 76 - ctx->w.control_page = NULL; 77 - if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { 78 - kmem_cache_free(ext4_crypto_ctx_cachep, ctx); 79 - } else { 80 - spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); 81 - list_add(&ctx->free_list, &ext4_free_crypto_ctxs); 82 - spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); 83 - } 84 - } 85 - 86 - /** 87 - * ext4_get_crypto_ctx() - Gets an encryption context 88 - * @inode: The inode for which we are doing the crypto 89 - * 90 - * Allocates and initializes an encryption context. 91 - * 92 - * Return: An allocated and initialized encryption context on success; error 93 - * value or NULL otherwise. 94 - */ 95 - struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode, 96 - gfp_t gfp_flags) 97 - { 98 - struct ext4_crypto_ctx *ctx = NULL; 99 - int res = 0; 100 - unsigned long flags; 101 - struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; 102 - 103 - if (ci == NULL) 104 - return ERR_PTR(-ENOKEY); 105 - 106 - /* 107 - * We first try getting the ctx from a free list because in 108 - * the common case the ctx will have an allocated and 109 - * initialized crypto tfm, so it's probably a worthwhile 110 - * optimization. For the bounce page, we first try getting it 111 - * from the kernel allocator because that's just about as fast 112 - * as getting it from a list and because a cache of free pages 113 - * should generally be a "last resort" option for a filesystem 114 - * to be able to do its job. 115 - */ 116 - spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); 117 - ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs, 118 - struct ext4_crypto_ctx, free_list); 119 - if (ctx) 120 - list_del(&ctx->free_list); 121 - spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); 122 - if (!ctx) { 123 - ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags); 124 - if (!ctx) { 125 - res = -ENOMEM; 126 - goto out; 127 - } 128 - ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; 129 - } else { 130 - ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; 131 - } 132 - ctx->flags &= ~EXT4_WRITE_PATH_FL; 133 - 134 - out: 135 - if (res) { 136 - if (!IS_ERR_OR_NULL(ctx)) 137 - ext4_release_crypto_ctx(ctx); 138 - ctx = ERR_PTR(res); 139 - } 140 - return ctx; 141 - } 142 - 143 - struct workqueue_struct *ext4_read_workqueue; 144 - static DEFINE_MUTEX(crypto_init); 145 - 146 - /** 147 - * ext4_exit_crypto() - Shutdown the ext4 encryption system 148 - */ 149 - void ext4_exit_crypto(void) 150 - { 151 - struct ext4_crypto_ctx *pos, *n; 152 - 153 - list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) 154 - kmem_cache_free(ext4_crypto_ctx_cachep, pos); 155 - INIT_LIST_HEAD(&ext4_free_crypto_ctxs); 156 - if (ext4_bounce_page_pool) 157 - mempool_destroy(ext4_bounce_page_pool); 158 - ext4_bounce_page_pool = NULL; 159 - if (ext4_read_workqueue) 160 - destroy_workqueue(ext4_read_workqueue); 161 - ext4_read_workqueue = NULL; 162 - if (ext4_crypto_ctx_cachep) 163 - kmem_cache_destroy(ext4_crypto_ctx_cachep); 164 - ext4_crypto_ctx_cachep = NULL; 165 - if (ext4_crypt_info_cachep) 166 - kmem_cache_destroy(ext4_crypt_info_cachep); 167 - ext4_crypt_info_cachep = NULL; 168 - } 169 - 170 - /** 171 - * ext4_init_crypto() - Set up for ext4 encryption. 172 - * 173 - * We only call this when we start accessing encrypted files, since it 174 - * results in memory getting allocated that wouldn't otherwise be used. 175 - * 176 - * Return: Zero on success, non-zero otherwise. 177 - */ 178 - int ext4_init_crypto(void) 179 - { 180 - int i, res = -ENOMEM; 181 - 182 - mutex_lock(&crypto_init); 183 - if (ext4_read_workqueue) 184 - goto already_initialized; 185 - ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0); 186 - if (!ext4_read_workqueue) 187 - goto fail; 188 - 189 - ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx, 190 - SLAB_RECLAIM_ACCOUNT); 191 - if (!ext4_crypto_ctx_cachep) 192 - goto fail; 193 - 194 - ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info, 195 - SLAB_RECLAIM_ACCOUNT); 196 - if (!ext4_crypt_info_cachep) 197 - goto fail; 198 - 199 - for (i = 0; i < num_prealloc_crypto_ctxs; i++) { 200 - struct ext4_crypto_ctx *ctx; 201 - 202 - ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); 203 - if (!ctx) { 204 - res = -ENOMEM; 205 - goto fail; 206 - } 207 - list_add(&ctx->free_list, &ext4_free_crypto_ctxs); 208 - } 209 - 210 - ext4_bounce_page_pool = 211 - mempool_create_page_pool(num_prealloc_crypto_pages, 0); 212 - if (!ext4_bounce_page_pool) { 213 - res = -ENOMEM; 214 - goto fail; 215 - } 216 - already_initialized: 217 - mutex_unlock(&crypto_init); 218 - return 0; 219 - fail: 220 - ext4_exit_crypto(); 221 - mutex_unlock(&crypto_init); 222 - return res; 223 - } 224 - 225 - void ext4_restore_control_page(struct page *data_page) 226 - { 227 - struct ext4_crypto_ctx *ctx = 228 - (struct ext4_crypto_ctx *)page_private(data_page); 229 - 230 - set_page_private(data_page, (unsigned long)NULL); 231 - ClearPagePrivate(data_page); 232 - unlock_page(data_page); 233 - ext4_release_crypto_ctx(ctx); 234 - } 235 - 236 - /** 237 - * ext4_crypt_complete() - The completion callback for page encryption 238 - * @req: The asynchronous encryption request context 239 - * @res: The result of the encryption operation 240 - */ 241 - static void ext4_crypt_complete(struct crypto_async_request *req, int res) 242 - { 243 - struct ext4_completion_result *ecr = req->data; 244 - 245 - if (res == -EINPROGRESS) 246 - return; 247 - ecr->res = res; 248 - complete(&ecr->completion); 249 - } 250 - 251 - typedef enum { 252 - EXT4_DECRYPT = 0, 253 - EXT4_ENCRYPT, 254 - } ext4_direction_t; 255 - 256 - static int ext4_page_crypto(struct inode *inode, 257 - ext4_direction_t rw, 258 - pgoff_t index, 259 - struct page *src_page, 260 - struct page *dest_page, 261 - gfp_t gfp_flags) 262 - 263 - { 264 - u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; 265 - struct skcipher_request *req = NULL; 266 - DECLARE_EXT4_COMPLETION_RESULT(ecr); 267 - struct scatterlist dst, src; 268 - struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; 269 - struct crypto_skcipher *tfm = ci->ci_ctfm; 270 - int res = 0; 271 - 272 - req = skcipher_request_alloc(tfm, gfp_flags); 273 - if (!req) { 274 - printk_ratelimited(KERN_ERR 275 - "%s: crypto_request_alloc() failed\n", 276 - __func__); 277 - return -ENOMEM; 278 - } 279 - skcipher_request_set_callback( 280 - req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 281 - ext4_crypt_complete, &ecr); 282 - 283 - BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index)); 284 - memcpy(xts_tweak, &index, sizeof(index)); 285 - memset(&xts_tweak[sizeof(index)], 0, 286 - EXT4_XTS_TWEAK_SIZE - sizeof(index)); 287 - 288 - sg_init_table(&dst, 1); 289 - sg_set_page(&dst, dest_page, PAGE_SIZE, 0); 290 - sg_init_table(&src, 1); 291 - sg_set_page(&src, src_page, PAGE_SIZE, 0); 292 - skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, 293 - xts_tweak); 294 - if (rw == EXT4_DECRYPT) 295 - res = crypto_skcipher_decrypt(req); 296 - else 297 - res = crypto_skcipher_encrypt(req); 298 - if (res == -EINPROGRESS || res == -EBUSY) { 299 - wait_for_completion(&ecr.completion); 300 - res = ecr.res; 301 - } 302 - skcipher_request_free(req); 303 - if (res) { 304 - printk_ratelimited( 305 - KERN_ERR 306 - "%s: crypto_skcipher_encrypt() returned %d\n", 307 - __func__, res); 308 - return res; 309 - } 310 - return 0; 311 - } 312 - 313 - static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx, 314 - gfp_t gfp_flags) 315 - { 316 - ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags); 317 - if (ctx->w.bounce_page == NULL) 318 - return ERR_PTR(-ENOMEM); 319 - ctx->flags |= EXT4_WRITE_PATH_FL; 320 - return ctx->w.bounce_page; 321 - } 322 - 323 - /** 324 - * ext4_encrypt() - Encrypts a page 325 - * @inode: The inode for which the encryption should take place 326 - * @plaintext_page: The page to encrypt. Must be locked. 327 - * 328 - * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx 329 - * encryption context. 330 - * 331 - * Called on the page write path. The caller must call 332 - * ext4_restore_control_page() on the returned ciphertext page to 333 - * release the bounce buffer and the encryption context. 334 - * 335 - * Return: An allocated page with the encrypted content on success. Else, an 336 - * error value or NULL. 337 - */ 338 - struct page *ext4_encrypt(struct inode *inode, 339 - struct page *plaintext_page, 340 - gfp_t gfp_flags) 341 - { 342 - struct ext4_crypto_ctx *ctx; 343 - struct page *ciphertext_page = NULL; 344 - int err; 345 - 346 - BUG_ON(!PageLocked(plaintext_page)); 347 - 348 - ctx = ext4_get_crypto_ctx(inode, gfp_flags); 349 - if (IS_ERR(ctx)) 350 - return (struct page *) ctx; 351 - 352 - /* The encryption operation will require a bounce page. */ 353 - ciphertext_page = alloc_bounce_page(ctx, gfp_flags); 354 - if (IS_ERR(ciphertext_page)) 355 - goto errout; 356 - ctx->w.control_page = plaintext_page; 357 - err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index, 358 - plaintext_page, ciphertext_page, gfp_flags); 359 - if (err) { 360 - ciphertext_page = ERR_PTR(err); 361 - errout: 362 - ext4_release_crypto_ctx(ctx); 363 - return ciphertext_page; 364 - } 365 - SetPagePrivate(ciphertext_page); 366 - set_page_private(ciphertext_page, (unsigned long)ctx); 367 - lock_page(ciphertext_page); 368 - return ciphertext_page; 369 - } 370 - 371 - /** 372 - * ext4_decrypt() - Decrypts a page in-place 373 - * @ctx: The encryption context. 374 - * @page: The page to decrypt. Must be locked. 375 - * 376 - * Decrypts page in-place using the ctx encryption context. 377 - * 378 - * Called from the read completion callback. 379 - * 380 - * Return: Zero on success, non-zero otherwise. 381 - */ 382 - int ext4_decrypt(struct page *page) 383 - { 384 - BUG_ON(!PageLocked(page)); 385 - 386 - return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT, 387 - page->index, page, page, GFP_NOFS); 388 - } 389 - 390 - int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, 391 - ext4_fsblk_t pblk, ext4_lblk_t len) 392 - { 393 - struct ext4_crypto_ctx *ctx; 394 - struct page *ciphertext_page = NULL; 395 - struct bio *bio; 396 - int ret, err = 0; 397 - 398 - #if 0 399 - ext4_msg(inode->i_sb, KERN_CRIT, 400 - "ext4_encrypted_zeroout ino %lu lblk %u len %u", 401 - (unsigned long) inode->i_ino, lblk, len); 402 - #endif 403 - 404 - BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); 405 - 406 - ctx = ext4_get_crypto_ctx(inode, GFP_NOFS); 407 - if (IS_ERR(ctx)) 408 - return PTR_ERR(ctx); 409 - 410 - ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); 411 - if (IS_ERR(ciphertext_page)) { 412 - err = PTR_ERR(ciphertext_page); 413 - goto errout; 414 - } 415 - 416 - while (len--) { 417 - err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk, 418 - ZERO_PAGE(0), ciphertext_page, 419 - GFP_NOFS); 420 - if (err) 421 - goto errout; 422 - 423 - bio = bio_alloc(GFP_NOWAIT, 1); 424 - if (!bio) { 425 - err = -ENOMEM; 426 - goto errout; 427 - } 428 - bio->bi_bdev = inode->i_sb->s_bdev; 429 - bio->bi_iter.bi_sector = 430 - pblk << (inode->i_sb->s_blocksize_bits - 9); 431 - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 432 - ret = bio_add_page(bio, ciphertext_page, 433 - inode->i_sb->s_blocksize, 0); 434 - if (ret != inode->i_sb->s_blocksize) { 435 - /* should never happen! */ 436 - ext4_msg(inode->i_sb, KERN_ERR, 437 - "bio_add_page failed: %d", ret); 438 - WARN_ON(1); 439 - bio_put(bio); 440 - err = -EIO; 441 - goto errout; 442 - } 443 - err = submit_bio_wait(bio); 444 - if ((err == 0) && bio->bi_error) 445 - err = -EIO; 446 - bio_put(bio); 447 - if (err) 448 - goto errout; 449 - lblk++; pblk++; 450 - } 451 - err = 0; 452 - errout: 453 - ext4_release_crypto_ctx(ctx); 454 - return err; 455 - } 456 - 457 - bool ext4_valid_contents_enc_mode(uint32_t mode) 458 - { 459 - return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS); 460 - } 461 - 462 - /** 463 - * ext4_validate_encryption_key_size() - Validate the encryption key size 464 - * @mode: The key mode. 465 - * @size: The key size to validate. 466 - * 467 - * Return: The validated key size for @mode. Zero if invalid. 468 - */ 469 - uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size) 470 - { 471 - if (size == ext4_encryption_key_size(mode)) 472 - return size; 473 - return 0; 474 - } 475 - 476 - /* 477 - * Validate dentries for encrypted directories to make sure we aren't 478 - * potentially caching stale data after a key has been added or 479 - * removed. 480 - */ 481 - static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags) 482 - { 483 - struct dentry *dir; 484 - struct ext4_crypt_info *ci; 485 - int dir_has_key, cached_with_key; 486 - 487 - if (flags & LOOKUP_RCU) 488 - return -ECHILD; 489 - 490 - dir = dget_parent(dentry); 491 - if (!ext4_encrypted_inode(d_inode(dir))) { 492 - dput(dir); 493 - return 0; 494 - } 495 - ci = EXT4_I(d_inode(dir))->i_crypt_info; 496 - if (ci && ci->ci_keyring_key && 497 - (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 498 - (1 << KEY_FLAG_REVOKED) | 499 - (1 << KEY_FLAG_DEAD)))) 500 - ci = NULL; 501 - 502 - /* this should eventually be an flag in d_flags */ 503 - cached_with_key = dentry->d_fsdata != NULL; 504 - dir_has_key = (ci != NULL); 505 - dput(dir); 506 - 507 - /* 508 - * If the dentry was cached without the key, and it is a 509 - * negative dentry, it might be a valid name. We can't check 510 - * if the key has since been made available due to locking 511 - * reasons, so we fail the validation so ext4_lookup() can do 512 - * this check. 513 - * 514 - * We also fail the validation if the dentry was created with 515 - * the key present, but we no longer have the key, or vice versa. 516 - */ 517 - if ((!cached_with_key && d_is_negative(dentry)) || 518 - (!cached_with_key && dir_has_key) || 519 - (cached_with_key && !dir_has_key)) { 520 - #if 0 /* Revalidation debug */ 521 - char buf[80]; 522 - char *cp = simple_dname(dentry, buf, sizeof(buf)); 523 - 524 - if (IS_ERR(cp)) 525 - cp = (char *) "???"; 526 - pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata, 527 - cached_with_key, d_is_negative(dentry), 528 - dir_has_key); 529 - #endif 530 - return 0; 531 - } 532 - return 1; 533 - } 534 - 535 - const struct dentry_operations ext4_encrypted_d_ops = { 536 - .d_revalidate = ext4_d_revalidate, 537 - };
-468
fs/ext4/crypto_fname.c
··· 1 - /* 2 - * linux/fs/ext4/crypto_fname.c 3 - * 4 - * Copyright (C) 2015, Google, Inc. 5 - * 6 - * This contains functions for filename crypto management in ext4 7 - * 8 - * Written by Uday Savagaonkar, 2014. 9 - * 10 - * This has not yet undergone a rigorous security audit. 11 - * 12 - */ 13 - 14 - #include <crypto/skcipher.h> 15 - #include <keys/encrypted-type.h> 16 - #include <keys/user-type.h> 17 - #include <linux/gfp.h> 18 - #include <linux/kernel.h> 19 - #include <linux/key.h> 20 - #include <linux/list.h> 21 - #include <linux/mempool.h> 22 - #include <linux/random.h> 23 - #include <linux/scatterlist.h> 24 - #include <linux/spinlock_types.h> 25 - 26 - #include "ext4.h" 27 - #include "ext4_crypto.h" 28 - #include "xattr.h" 29 - 30 - /** 31 - * ext4_dir_crypt_complete() - 32 - */ 33 - static void ext4_dir_crypt_complete(struct crypto_async_request *req, int res) 34 - { 35 - struct ext4_completion_result *ecr = req->data; 36 - 37 - if (res == -EINPROGRESS) 38 - return; 39 - ecr->res = res; 40 - complete(&ecr->completion); 41 - } 42 - 43 - bool ext4_valid_filenames_enc_mode(uint32_t mode) 44 - { 45 - return (mode == EXT4_ENCRYPTION_MODE_AES_256_CTS); 46 - } 47 - 48 - static unsigned max_name_len(struct inode *inode) 49 - { 50 - return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize : 51 - EXT4_NAME_LEN; 52 - } 53 - 54 - /** 55 - * ext4_fname_encrypt() - 56 - * 57 - * This function encrypts the input filename, and returns the length of the 58 - * ciphertext. Errors are returned as negative numbers. We trust the caller to 59 - * allocate sufficient memory to oname string. 60 - */ 61 - static int ext4_fname_encrypt(struct inode *inode, 62 - const struct qstr *iname, 63 - struct ext4_str *oname) 64 - { 65 - u32 ciphertext_len; 66 - struct skcipher_request *req = NULL; 67 - DECLARE_EXT4_COMPLETION_RESULT(ecr); 68 - struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; 69 - struct crypto_skcipher *tfm = ci->ci_ctfm; 70 - int res = 0; 71 - char iv[EXT4_CRYPTO_BLOCK_SIZE]; 72 - struct scatterlist src_sg, dst_sg; 73 - int padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK); 74 - char *workbuf, buf[32], *alloc_buf = NULL; 75 - unsigned lim = max_name_len(inode); 76 - 77 - if (iname->len <= 0 || iname->len > lim) 78 - return -EIO; 79 - 80 - ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ? 81 - EXT4_CRYPTO_BLOCK_SIZE : iname->len; 82 - ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding); 83 - ciphertext_len = (ciphertext_len > lim) 84 - ? lim : ciphertext_len; 85 - 86 - if (ciphertext_len <= sizeof(buf)) { 87 - workbuf = buf; 88 - } else { 89 - alloc_buf = kmalloc(ciphertext_len, GFP_NOFS); 90 - if (!alloc_buf) 91 - return -ENOMEM; 92 - workbuf = alloc_buf; 93 - } 94 - 95 - /* Allocate request */ 96 - req = skcipher_request_alloc(tfm, GFP_NOFS); 97 - if (!req) { 98 - printk_ratelimited( 99 - KERN_ERR "%s: crypto_request_alloc() failed\n", __func__); 100 - kfree(alloc_buf); 101 - return -ENOMEM; 102 - } 103 - skcipher_request_set_callback(req, 104 - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 105 - ext4_dir_crypt_complete, &ecr); 106 - 107 - /* Copy the input */ 108 - memcpy(workbuf, iname->name, iname->len); 109 - if (iname->len < ciphertext_len) 110 - memset(workbuf + iname->len, 0, ciphertext_len - iname->len); 111 - 112 - /* Initialize IV */ 113 - memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE); 114 - 115 - /* Create encryption request */ 116 - sg_init_one(&src_sg, workbuf, ciphertext_len); 117 - sg_init_one(&dst_sg, oname->name, ciphertext_len); 118 - skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv); 119 - res = crypto_skcipher_encrypt(req); 120 - if (res == -EINPROGRESS || res == -EBUSY) { 121 - wait_for_completion(&ecr.completion); 122 - res = ecr.res; 123 - } 124 - kfree(alloc_buf); 125 - skcipher_request_free(req); 126 - if (res < 0) { 127 - printk_ratelimited( 128 - KERN_ERR "%s: Error (error code %d)\n", __func__, res); 129 - } 130 - oname->len = ciphertext_len; 131 - return res; 132 - } 133 - 134 - /* 135 - * ext4_fname_decrypt() 136 - * This function decrypts the input filename, and returns 137 - * the length of the plaintext. 138 - * Errors are returned as negative numbers. 139 - * We trust the caller to allocate sufficient memory to oname string. 140 - */ 141 - static int ext4_fname_decrypt(struct inode *inode, 142 - const struct ext4_str *iname, 143 - struct ext4_str *oname) 144 - { 145 - struct ext4_str tmp_in[2], tmp_out[1]; 146 - struct skcipher_request *req = NULL; 147 - DECLARE_EXT4_COMPLETION_RESULT(ecr); 148 - struct scatterlist src_sg, dst_sg; 149 - struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; 150 - struct crypto_skcipher *tfm = ci->ci_ctfm; 151 - int res = 0; 152 - char iv[EXT4_CRYPTO_BLOCK_SIZE]; 153 - unsigned lim = max_name_len(inode); 154 - 155 - if (iname->len <= 0 || iname->len > lim) 156 - return -EIO; 157 - 158 - tmp_in[0].name = iname->name; 159 - tmp_in[0].len = iname->len; 160 - tmp_out[0].name = oname->name; 161 - 162 - /* Allocate request */ 163 - req = skcipher_request_alloc(tfm, GFP_NOFS); 164 - if (!req) { 165 - printk_ratelimited( 166 - KERN_ERR "%s: crypto_request_alloc() failed\n", __func__); 167 - return -ENOMEM; 168 - } 169 - skcipher_request_set_callback(req, 170 - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 171 - ext4_dir_crypt_complete, &ecr); 172 - 173 - /* Initialize IV */ 174 - memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE); 175 - 176 - /* Create encryption request */ 177 - sg_init_one(&src_sg, iname->name, iname->len); 178 - sg_init_one(&dst_sg, oname->name, oname->len); 179 - skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); 180 - res = crypto_skcipher_decrypt(req); 181 - if (res == -EINPROGRESS || res == -EBUSY) { 182 - wait_for_completion(&ecr.completion); 183 - res = ecr.res; 184 - } 185 - skcipher_request_free(req); 186 - if (res < 0) { 187 - printk_ratelimited( 188 - KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n", 189 - __func__, res); 190 - return res; 191 - } 192 - 193 - oname->len = strnlen(oname->name, iname->len); 194 - return oname->len; 195 - } 196 - 197 - static const char *lookup_table = 198 - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,"; 199 - 200 - /** 201 - * ext4_fname_encode_digest() - 202 - * 203 - * Encodes the input digest using characters from the set [a-zA-Z0-9_+]. 204 - * The encoded string is roughly 4/3 times the size of the input string. 205 - */ 206 - static int digest_encode(const char *src, int len, char *dst) 207 - { 208 - int i = 0, bits = 0, ac = 0; 209 - char *cp = dst; 210 - 211 - while (i < len) { 212 - ac += (((unsigned char) src[i]) << bits); 213 - bits += 8; 214 - do { 215 - *cp++ = lookup_table[ac & 0x3f]; 216 - ac >>= 6; 217 - bits -= 6; 218 - } while (bits >= 6); 219 - i++; 220 - } 221 - if (bits) 222 - *cp++ = lookup_table[ac & 0x3f]; 223 - return cp - dst; 224 - } 225 - 226 - static int digest_decode(const char *src, int len, char *dst) 227 - { 228 - int i = 0, bits = 0, ac = 0; 229 - const char *p; 230 - char *cp = dst; 231 - 232 - while (i < len) { 233 - p = strchr(lookup_table, src[i]); 234 - if (p == NULL || src[i] == 0) 235 - return -2; 236 - ac += (p - lookup_table) << bits; 237 - bits += 6; 238 - if (bits >= 8) { 239 - *cp++ = ac & 0xff; 240 - ac >>= 8; 241 - bits -= 8; 242 - } 243 - i++; 244 - } 245 - if (ac) 246 - return -1; 247 - return cp - dst; 248 - } 249 - 250 - /** 251 - * ext4_fname_crypto_round_up() - 252 - * 253 - * Return: The next multiple of block size 254 - */ 255 - u32 ext4_fname_crypto_round_up(u32 size, u32 blksize) 256 - { 257 - return ((size+blksize-1)/blksize)*blksize; 258 - } 259 - 260 - unsigned ext4_fname_encrypted_size(struct inode *inode, u32 ilen) 261 - { 262 - struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; 263 - int padding = 32; 264 - 265 - if (ci) 266 - padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK); 267 - if (ilen < EXT4_CRYPTO_BLOCK_SIZE) 268 - ilen = EXT4_CRYPTO_BLOCK_SIZE; 269 - return ext4_fname_crypto_round_up(ilen, padding); 270 - } 271 - 272 - /* 273 - * ext4_fname_crypto_alloc_buffer() - 274 - * 275 - * Allocates an output buffer that is sufficient for the crypto operation 276 - * specified by the context and the direction. 277 - */ 278 - int ext4_fname_crypto_alloc_buffer(struct inode *inode, 279 - u32 ilen, struct ext4_str *crypto_str) 280 - { 281 - unsigned int olen = ext4_fname_encrypted_size(inode, ilen); 282 - 283 - crypto_str->len = olen; 284 - if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) 285 - olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2; 286 - /* Allocated buffer can hold one more character to null-terminate the 287 - * string */ 288 - crypto_str->name = kmalloc(olen+1, GFP_NOFS); 289 - if (!(crypto_str->name)) 290 - return -ENOMEM; 291 - return 0; 292 - } 293 - 294 - /** 295 - * ext4_fname_crypto_free_buffer() - 296 - * 297 - * Frees the buffer allocated for crypto operation. 298 - */ 299 - void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str) 300 - { 301 - if (!crypto_str) 302 - return; 303 - kfree(crypto_str->name); 304 - crypto_str->name = NULL; 305 - } 306 - 307 - /** 308 - * ext4_fname_disk_to_usr() - converts a filename from disk space to user space 309 - */ 310 - int _ext4_fname_disk_to_usr(struct inode *inode, 311 - struct dx_hash_info *hinfo, 312 - const struct ext4_str *iname, 313 - struct ext4_str *oname) 314 - { 315 - char buf[24]; 316 - int ret; 317 - 318 - if (iname->len < 3) { 319 - /*Check for . and .. */ 320 - if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') { 321 - oname->name[0] = '.'; 322 - oname->name[iname->len-1] = '.'; 323 - oname->len = iname->len; 324 - return oname->len; 325 - } 326 - } 327 - if (iname->len < EXT4_CRYPTO_BLOCK_SIZE) { 328 - EXT4_ERROR_INODE(inode, "encrypted inode too small"); 329 - return -EUCLEAN; 330 - } 331 - if (EXT4_I(inode)->i_crypt_info) 332 - return ext4_fname_decrypt(inode, iname, oname); 333 - 334 - if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) { 335 - ret = digest_encode(iname->name, iname->len, oname->name); 336 - oname->len = ret; 337 - return ret; 338 - } 339 - if (hinfo) { 340 - memcpy(buf, &hinfo->hash, 4); 341 - memcpy(buf+4, &hinfo->minor_hash, 4); 342 - } else 343 - memset(buf, 0, 8); 344 - memcpy(buf + 8, iname->name + iname->len - 16, 16); 345 - oname->name[0] = '_'; 346 - ret = digest_encode(buf, 24, oname->name+1); 347 - oname->len = ret + 1; 348 - return ret + 1; 349 - } 350 - 351 - int ext4_fname_disk_to_usr(struct inode *inode, 352 - struct dx_hash_info *hinfo, 353 - const struct ext4_dir_entry_2 *de, 354 - struct ext4_str *oname) 355 - { 356 - struct ext4_str iname = {.name = (unsigned char *) de->name, 357 - .len = de->name_len }; 358 - 359 - return _ext4_fname_disk_to_usr(inode, hinfo, &iname, oname); 360 - } 361 - 362 - 363 - /** 364 - * ext4_fname_usr_to_disk() - converts a filename from user space to disk space 365 - */ 366 - int ext4_fname_usr_to_disk(struct inode *inode, 367 - const struct qstr *iname, 368 - struct ext4_str *oname) 369 - { 370 - int res; 371 - struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; 372 - 373 - if (iname->len < 3) { 374 - /*Check for . and .. */ 375 - if (iname->name[0] == '.' && 376 - iname->name[iname->len-1] == '.') { 377 - oname->name[0] = '.'; 378 - oname->name[iname->len-1] = '.'; 379 - oname->len = iname->len; 380 - return oname->len; 381 - } 382 - } 383 - if (ci) { 384 - res = ext4_fname_encrypt(inode, iname, oname); 385 - return res; 386 - } 387 - /* Without a proper key, a user is not allowed to modify the filenames 388 - * in a directory. Consequently, a user space name cannot be mapped to 389 - * a disk-space name */ 390 - return -EACCES; 391 - } 392 - 393 - int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname, 394 - int lookup, struct ext4_filename *fname) 395 - { 396 - struct ext4_crypt_info *ci; 397 - int ret = 0, bigname = 0; 398 - 399 - memset(fname, 0, sizeof(struct ext4_filename)); 400 - fname->usr_fname = iname; 401 - 402 - if (!ext4_encrypted_inode(dir) || 403 - ((iname->name[0] == '.') && 404 - ((iname->len == 1) || 405 - ((iname->name[1] == '.') && (iname->len == 2))))) { 406 - fname->disk_name.name = (unsigned char *) iname->name; 407 - fname->disk_name.len = iname->len; 408 - return 0; 409 - } 410 - ret = ext4_get_encryption_info(dir); 411 - if (ret) 412 - return ret; 413 - ci = EXT4_I(dir)->i_crypt_info; 414 - if (ci) { 415 - ret = ext4_fname_crypto_alloc_buffer(dir, iname->len, 416 - &fname->crypto_buf); 417 - if (ret < 0) 418 - return ret; 419 - ret = ext4_fname_encrypt(dir, iname, &fname->crypto_buf); 420 - if (ret < 0) 421 - goto errout; 422 - fname->disk_name.name = fname->crypto_buf.name; 423 - fname->disk_name.len = fname->crypto_buf.len; 424 - return 0; 425 - } 426 - if (!lookup) 427 - return -EACCES; 428 - 429 - /* We don't have the key and we are doing a lookup; decode the 430 - * user-supplied name 431 - */ 432 - if (iname->name[0] == '_') 433 - bigname = 1; 434 - if ((bigname && (iname->len != 33)) || 435 - (!bigname && (iname->len > 43))) 436 - return -ENOENT; 437 - 438 - fname->crypto_buf.name = kmalloc(32, GFP_KERNEL); 439 - if (fname->crypto_buf.name == NULL) 440 - return -ENOMEM; 441 - ret = digest_decode(iname->name + bigname, iname->len - bigname, 442 - fname->crypto_buf.name); 443 - if (ret < 0) { 444 - ret = -ENOENT; 445 - goto errout; 446 - } 447 - fname->crypto_buf.len = ret; 448 - if (bigname) { 449 - memcpy(&fname->hinfo.hash, fname->crypto_buf.name, 4); 450 - memcpy(&fname->hinfo.minor_hash, fname->crypto_buf.name + 4, 4); 451 - } else { 452 - fname->disk_name.name = fname->crypto_buf.name; 453 - fname->disk_name.len = fname->crypto_buf.len; 454 - } 455 - return 0; 456 - errout: 457 - kfree(fname->crypto_buf.name); 458 - fname->crypto_buf.name = NULL; 459 - return ret; 460 - } 461 - 462 - void ext4_fname_free_filename(struct ext4_filename *fname) 463 - { 464 - kfree(fname->crypto_buf.name); 465 - fname->crypto_buf.name = NULL; 466 - fname->usr_fname = NULL; 467 - fname->disk_name.name = NULL; 468 - }
-274
fs/ext4/crypto_key.c
··· 1 - /* 2 - * linux/fs/ext4/crypto_key.c 3 - * 4 - * Copyright (C) 2015, Google, Inc. 5 - * 6 - * This contains encryption key functions for ext4 7 - * 8 - * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015. 9 - */ 10 - 11 - #include <crypto/skcipher.h> 12 - #include <keys/encrypted-type.h> 13 - #include <keys/user-type.h> 14 - #include <linux/random.h> 15 - #include <linux/scatterlist.h> 16 - #include <uapi/linux/keyctl.h> 17 - 18 - #include "ext4.h" 19 - #include "xattr.h" 20 - 21 - static void derive_crypt_complete(struct crypto_async_request *req, int rc) 22 - { 23 - struct ext4_completion_result *ecr = req->data; 24 - 25 - if (rc == -EINPROGRESS) 26 - return; 27 - 28 - ecr->res = rc; 29 - complete(&ecr->completion); 30 - } 31 - 32 - /** 33 - * ext4_derive_key_aes() - Derive a key using AES-128-ECB 34 - * @deriving_key: Encryption key used for derivation. 35 - * @source_key: Source key to which to apply derivation. 36 - * @derived_key: Derived key. 37 - * 38 - * Return: Zero on success; non-zero otherwise. 39 - */ 40 - static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE], 41 - char source_key[EXT4_AES_256_XTS_KEY_SIZE], 42 - char derived_key[EXT4_AES_256_XTS_KEY_SIZE]) 43 - { 44 - int res = 0; 45 - struct skcipher_request *req = NULL; 46 - DECLARE_EXT4_COMPLETION_RESULT(ecr); 47 - struct scatterlist src_sg, dst_sg; 48 - struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); 49 - 50 - if (IS_ERR(tfm)) { 51 - res = PTR_ERR(tfm); 52 - tfm = NULL; 53 - goto out; 54 - } 55 - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); 56 - req = skcipher_request_alloc(tfm, GFP_NOFS); 57 - if (!req) { 58 - res = -ENOMEM; 59 - goto out; 60 - } 61 - skcipher_request_set_callback(req, 62 - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 63 - derive_crypt_complete, &ecr); 64 - res = crypto_skcipher_setkey(tfm, deriving_key, 65 - EXT4_AES_128_ECB_KEY_SIZE); 66 - if (res < 0) 67 - goto out; 68 - sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE); 69 - sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE); 70 - skcipher_request_set_crypt(req, &src_sg, &dst_sg, 71 - EXT4_AES_256_XTS_KEY_SIZE, NULL); 72 - res = crypto_skcipher_encrypt(req); 73 - if (res == -EINPROGRESS || res == -EBUSY) { 74 - wait_for_completion(&ecr.completion); 75 - res = ecr.res; 76 - } 77 - 78 - out: 79 - skcipher_request_free(req); 80 - crypto_free_skcipher(tfm); 81 - return res; 82 - } 83 - 84 - void ext4_free_crypt_info(struct ext4_crypt_info *ci) 85 - { 86 - if (!ci) 87 - return; 88 - 89 - if (ci->ci_keyring_key) 90 - key_put(ci->ci_keyring_key); 91 - crypto_free_skcipher(ci->ci_ctfm); 92 - kmem_cache_free(ext4_crypt_info_cachep, ci); 93 - } 94 - 95 - void ext4_free_encryption_info(struct inode *inode, 96 - struct ext4_crypt_info *ci) 97 - { 98 - struct ext4_inode_info *ei = EXT4_I(inode); 99 - struct ext4_crypt_info *prev; 100 - 101 - if (ci == NULL) 102 - ci = ACCESS_ONCE(ei->i_crypt_info); 103 - if (ci == NULL) 104 - return; 105 - prev = cmpxchg(&ei->i_crypt_info, ci, NULL); 106 - if (prev != ci) 107 - return; 108 - 109 - ext4_free_crypt_info(ci); 110 - } 111 - 112 - int _ext4_get_encryption_info(struct inode *inode) 113 - { 114 - struct ext4_inode_info *ei = EXT4_I(inode); 115 - struct ext4_crypt_info *crypt_info; 116 - char full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE + 117 - (EXT4_KEY_DESCRIPTOR_SIZE * 2) + 1]; 118 - struct key *keyring_key = NULL; 119 - struct ext4_encryption_key *master_key; 120 - struct ext4_encryption_context ctx; 121 - const struct user_key_payload *ukp; 122 - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 123 - struct crypto_skcipher *ctfm; 124 - const char *cipher_str; 125 - char raw_key[EXT4_MAX_KEY_SIZE]; 126 - char mode; 127 - int res; 128 - 129 - if (!ext4_read_workqueue) { 130 - res = ext4_init_crypto(); 131 - if (res) 132 - return res; 133 - } 134 - 135 - retry: 136 - crypt_info = ACCESS_ONCE(ei->i_crypt_info); 137 - if (crypt_info) { 138 - if (!crypt_info->ci_keyring_key || 139 - key_validate(crypt_info->ci_keyring_key) == 0) 140 - return 0; 141 - ext4_free_encryption_info(inode, crypt_info); 142 - goto retry; 143 - } 144 - 145 - res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, 146 - EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, 147 - &ctx, sizeof(ctx)); 148 - if (res < 0) { 149 - if (!DUMMY_ENCRYPTION_ENABLED(sbi)) 150 - return res; 151 - ctx.contents_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS; 152 - ctx.filenames_encryption_mode = 153 - EXT4_ENCRYPTION_MODE_AES_256_CTS; 154 - ctx.flags = 0; 155 - } else if (res != sizeof(ctx)) 156 - return -EINVAL; 157 - res = 0; 158 - 159 - crypt_info = kmem_cache_alloc(ext4_crypt_info_cachep, GFP_KERNEL); 160 - if (!crypt_info) 161 - return -ENOMEM; 162 - 163 - crypt_info->ci_flags = ctx.flags; 164 - crypt_info->ci_data_mode = ctx.contents_encryption_mode; 165 - crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; 166 - crypt_info->ci_ctfm = NULL; 167 - crypt_info->ci_keyring_key = NULL; 168 - memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, 169 - sizeof(crypt_info->ci_master_key)); 170 - if (S_ISREG(inode->i_mode)) 171 - mode = crypt_info->ci_data_mode; 172 - else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 173 - mode = crypt_info->ci_filename_mode; 174 - else 175 - BUG(); 176 - switch (mode) { 177 - case EXT4_ENCRYPTION_MODE_AES_256_XTS: 178 - cipher_str = "xts(aes)"; 179 - break; 180 - case EXT4_ENCRYPTION_MODE_AES_256_CTS: 181 - cipher_str = "cts(cbc(aes))"; 182 - break; 183 - default: 184 - printk_once(KERN_WARNING 185 - "ext4: unsupported key mode %d (ino %u)\n", 186 - mode, (unsigned) inode->i_ino); 187 - res = -ENOKEY; 188 - goto out; 189 - } 190 - if (DUMMY_ENCRYPTION_ENABLED(sbi)) { 191 - memset(raw_key, 0x42, EXT4_AES_256_XTS_KEY_SIZE); 192 - goto got_key; 193 - } 194 - memcpy(full_key_descriptor, EXT4_KEY_DESC_PREFIX, 195 - EXT4_KEY_DESC_PREFIX_SIZE); 196 - sprintf(full_key_descriptor + EXT4_KEY_DESC_PREFIX_SIZE, 197 - "%*phN", EXT4_KEY_DESCRIPTOR_SIZE, 198 - ctx.master_key_descriptor); 199 - full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE + 200 - (2 * EXT4_KEY_DESCRIPTOR_SIZE)] = '\0'; 201 - keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); 202 - if (IS_ERR(keyring_key)) { 203 - res = PTR_ERR(keyring_key); 204 - keyring_key = NULL; 205 - goto out; 206 - } 207 - crypt_info->ci_keyring_key = keyring_key; 208 - if (keyring_key->type != &key_type_logon) { 209 - printk_once(KERN_WARNING 210 - "ext4: key type must be logon\n"); 211 - res = -ENOKEY; 212 - goto out; 213 - } 214 - down_read(&keyring_key->sem); 215 - ukp = user_key_payload(keyring_key); 216 - if (ukp->datalen != sizeof(struct ext4_encryption_key)) { 217 - res = -EINVAL; 218 - up_read(&keyring_key->sem); 219 - goto out; 220 - } 221 - master_key = (struct ext4_encryption_key *)ukp->data; 222 - BUILD_BUG_ON(EXT4_AES_128_ECB_KEY_SIZE != 223 - EXT4_KEY_DERIVATION_NONCE_SIZE); 224 - if (master_key->size != EXT4_AES_256_XTS_KEY_SIZE) { 225 - printk_once(KERN_WARNING 226 - "ext4: key size incorrect: %d\n", 227 - master_key->size); 228 - res = -ENOKEY; 229 - up_read(&keyring_key->sem); 230 - goto out; 231 - } 232 - res = ext4_derive_key_aes(ctx.nonce, master_key->raw, 233 - raw_key); 234 - up_read(&keyring_key->sem); 235 - if (res) 236 - goto out; 237 - got_key: 238 - ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); 239 - if (!ctfm || IS_ERR(ctfm)) { 240 - res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; 241 - printk(KERN_DEBUG 242 - "%s: error %d (inode %u) allocating crypto tfm\n", 243 - __func__, res, (unsigned) inode->i_ino); 244 - goto out; 245 - } 246 - crypt_info->ci_ctfm = ctfm; 247 - crypto_skcipher_clear_flags(ctfm, ~0); 248 - crypto_tfm_set_flags(crypto_skcipher_tfm(ctfm), 249 - CRYPTO_TFM_REQ_WEAK_KEY); 250 - res = crypto_skcipher_setkey(ctfm, raw_key, 251 - ext4_encryption_key_size(mode)); 252 - if (res) 253 - goto out; 254 - memzero_explicit(raw_key, sizeof(raw_key)); 255 - if (cmpxchg(&ei->i_crypt_info, NULL, crypt_info) != NULL) { 256 - ext4_free_crypt_info(crypt_info); 257 - goto retry; 258 - } 259 - return 0; 260 - 261 - out: 262 - if (res == -ENOKEY) 263 - res = 0; 264 - ext4_free_crypt_info(crypt_info); 265 - memzero_explicit(raw_key, sizeof(raw_key)); 266 - return res; 267 - } 268 - 269 - int ext4_has_encryption_key(struct inode *inode) 270 - { 271 - struct ext4_inode_info *ei = EXT4_I(inode); 272 - 273 - return (ei->i_crypt_info != NULL); 274 - }
-229
fs/ext4/crypto_policy.c
··· 1 - /* 2 - * linux/fs/ext4/crypto_policy.c 3 - * 4 - * Copyright (C) 2015, Google, Inc. 5 - * 6 - * This contains encryption policy functions for ext4 7 - * 8 - * Written by Michael Halcrow, 2015. 9 - */ 10 - 11 - #include <linux/random.h> 12 - #include <linux/string.h> 13 - #include <linux/types.h> 14 - 15 - #include "ext4_jbd2.h" 16 - #include "ext4.h" 17 - #include "xattr.h" 18 - 19 - static int ext4_inode_has_encryption_context(struct inode *inode) 20 - { 21 - int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, 22 - EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0); 23 - return (res > 0); 24 - } 25 - 26 - /* 27 - * check whether the policy is consistent with the encryption context 28 - * for the inode 29 - */ 30 - static int ext4_is_encryption_context_consistent_with_policy( 31 - struct inode *inode, const struct ext4_encryption_policy *policy) 32 - { 33 - struct ext4_encryption_context ctx; 34 - int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, 35 - EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, 36 - sizeof(ctx)); 37 - if (res != sizeof(ctx)) 38 - return 0; 39 - return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, 40 - EXT4_KEY_DESCRIPTOR_SIZE) == 0 && 41 - (ctx.flags == 42 - policy->flags) && 43 - (ctx.contents_encryption_mode == 44 - policy->contents_encryption_mode) && 45 - (ctx.filenames_encryption_mode == 46 - policy->filenames_encryption_mode)); 47 - } 48 - 49 - static int ext4_create_encryption_context_from_policy( 50 - struct inode *inode, const struct ext4_encryption_policy *policy) 51 - { 52 - struct ext4_encryption_context ctx; 53 - handle_t *handle; 54 - int res, res2; 55 - 56 - res = ext4_convert_inline_data(inode); 57 - if (res) 58 - return res; 59 - 60 - ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; 61 - memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, 62 - EXT4_KEY_DESCRIPTOR_SIZE); 63 - if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) { 64 - printk(KERN_WARNING 65 - "%s: Invalid contents encryption mode %d\n", __func__, 66 - policy->contents_encryption_mode); 67 - return -EINVAL; 68 - } 69 - if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) { 70 - printk(KERN_WARNING 71 - "%s: Invalid filenames encryption mode %d\n", __func__, 72 - policy->filenames_encryption_mode); 73 - return -EINVAL; 74 - } 75 - if (policy->flags & ~EXT4_POLICY_FLAGS_VALID) 76 - return -EINVAL; 77 - ctx.contents_encryption_mode = policy->contents_encryption_mode; 78 - ctx.filenames_encryption_mode = policy->filenames_encryption_mode; 79 - ctx.flags = policy->flags; 80 - BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); 81 - get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); 82 - 83 - handle = ext4_journal_start(inode, EXT4_HT_MISC, 84 - ext4_jbd2_credits_xattr(inode)); 85 - if (IS_ERR(handle)) 86 - return PTR_ERR(handle); 87 - res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, 88 - EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, 89 - sizeof(ctx), 0); 90 - if (!res) { 91 - ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); 92 - res = ext4_mark_inode_dirty(handle, inode); 93 - if (res) 94 - EXT4_ERROR_INODE(inode, "Failed to mark inode dirty"); 95 - } 96 - res2 = ext4_journal_stop(handle); 97 - if (!res) 98 - res = res2; 99 - return res; 100 - } 101 - 102 - int ext4_process_policy(const struct ext4_encryption_policy *policy, 103 - struct inode *inode) 104 - { 105 - if (policy->version != 0) 106 - return -EINVAL; 107 - 108 - if (!ext4_inode_has_encryption_context(inode)) { 109 - if (!S_ISDIR(inode->i_mode)) 110 - return -EINVAL; 111 - if (!ext4_empty_dir(inode)) 112 - return -ENOTEMPTY; 113 - return ext4_create_encryption_context_from_policy(inode, 114 - policy); 115 - } 116 - 117 - if (ext4_is_encryption_context_consistent_with_policy(inode, policy)) 118 - return 0; 119 - 120 - printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n", 121 - __func__); 122 - return -EINVAL; 123 - } 124 - 125 - int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy) 126 - { 127 - struct ext4_encryption_context ctx; 128 - 129 - int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, 130 - EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, 131 - &ctx, sizeof(ctx)); 132 - if (res != sizeof(ctx)) 133 - return -ENOENT; 134 - if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1) 135 - return -EINVAL; 136 - policy->version = 0; 137 - policy->contents_encryption_mode = ctx.contents_encryption_mode; 138 - policy->filenames_encryption_mode = ctx.filenames_encryption_mode; 139 - policy->flags = ctx.flags; 140 - memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, 141 - EXT4_KEY_DESCRIPTOR_SIZE); 142 - return 0; 143 - } 144 - 145 - int ext4_is_child_context_consistent_with_parent(struct inode *parent, 146 - struct inode *child) 147 - { 148 - struct ext4_crypt_info *parent_ci, *child_ci; 149 - int res; 150 - 151 - if ((parent == NULL) || (child == NULL)) { 152 - pr_err("parent %p child %p\n", parent, child); 153 - WARN_ON(1); /* Should never happen */ 154 - return 0; 155 - } 156 - /* no restrictions if the parent directory is not encrypted */ 157 - if (!ext4_encrypted_inode(parent)) 158 - return 1; 159 - /* if the child directory is not encrypted, this is always a problem */ 160 - if (!ext4_encrypted_inode(child)) 161 - return 0; 162 - res = ext4_get_encryption_info(parent); 163 - if (res) 164 - return 0; 165 - res = ext4_get_encryption_info(child); 166 - if (res) 167 - return 0; 168 - parent_ci = EXT4_I(parent)->i_crypt_info; 169 - child_ci = EXT4_I(child)->i_crypt_info; 170 - if (!parent_ci && !child_ci) 171 - return 1; 172 - if (!parent_ci || !child_ci) 173 - return 0; 174 - 175 - return (memcmp(parent_ci->ci_master_key, 176 - child_ci->ci_master_key, 177 - EXT4_KEY_DESCRIPTOR_SIZE) == 0 && 178 - (parent_ci->ci_data_mode == child_ci->ci_data_mode) && 179 - (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) && 180 - (parent_ci->ci_flags == child_ci->ci_flags)); 181 - } 182 - 183 - /** 184 - * ext4_inherit_context() - Sets a child context from its parent 185 - * @parent: Parent inode from which the context is inherited. 186 - * @child: Child inode that inherits the context from @parent. 187 - * 188 - * Return: Zero on success, non-zero otherwise 189 - */ 190 - int ext4_inherit_context(struct inode *parent, struct inode *child) 191 - { 192 - struct ext4_encryption_context ctx; 193 - struct ext4_crypt_info *ci; 194 - int res; 195 - 196 - res = ext4_get_encryption_info(parent); 197 - if (res < 0) 198 - return res; 199 - ci = EXT4_I(parent)->i_crypt_info; 200 - if (ci == NULL) 201 - return -ENOKEY; 202 - 203 - ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; 204 - if (DUMMY_ENCRYPTION_ENABLED(EXT4_SB(parent->i_sb))) { 205 - ctx.contents_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS; 206 - ctx.filenames_encryption_mode = 207 - EXT4_ENCRYPTION_MODE_AES_256_CTS; 208 - ctx.flags = 0; 209 - memset(ctx.master_key_descriptor, 0x42, 210 - EXT4_KEY_DESCRIPTOR_SIZE); 211 - res = 0; 212 - } else { 213 - ctx.contents_encryption_mode = ci->ci_data_mode; 214 - ctx.filenames_encryption_mode = ci->ci_filename_mode; 215 - ctx.flags = ci->ci_flags; 216 - memcpy(ctx.master_key_descriptor, ci->ci_master_key, 217 - EXT4_KEY_DESCRIPTOR_SIZE); 218 - } 219 - get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); 220 - res = ext4_xattr_set(child, EXT4_XATTR_INDEX_ENCRYPTION, 221 - EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, 222 - sizeof(ctx), 0); 223 - if (!res) { 224 - ext4_set_inode_flag(child, EXT4_INODE_ENCRYPT); 225 - ext4_clear_inode_state(child, EXT4_STATE_MAY_INLINE_DATA); 226 - res = ext4_get_encryption_info(child); 227 - } 228 - return res; 229 - }
+14 -12
fs/ext4/dir.c
··· 109 109 struct super_block *sb = inode->i_sb; 110 110 struct buffer_head *bh = NULL; 111 111 int dir_has_error = 0; 112 - struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}; 112 + struct fscrypt_str fstr = FSTR_INIT(NULL, 0); 113 113 114 114 if (ext4_encrypted_inode(inode)) { 115 - err = ext4_get_encryption_info(inode); 115 + err = fscrypt_get_encryption_info(inode); 116 116 if (err && err != -ENOKEY) 117 117 return err; 118 118 } ··· 139 139 } 140 140 141 141 if (ext4_encrypted_inode(inode)) { 142 - err = ext4_fname_crypto_alloc_buffer(inode, EXT4_NAME_LEN, 143 - &fname_crypto_str); 142 + err = fscrypt_fname_alloc_buffer(inode, EXT4_NAME_LEN, &fstr); 144 143 if (err < 0) 145 144 return err; 146 145 } ··· 252 253 get_dtype(sb, de->file_type))) 253 254 goto done; 254 255 } else { 255 - int save_len = fname_crypto_str.len; 256 + int save_len = fstr.len; 257 + struct fscrypt_str de_name = 258 + FSTR_INIT(de->name, 259 + de->name_len); 256 260 257 261 /* Directory is encrypted */ 258 - err = ext4_fname_disk_to_usr(inode, 259 - NULL, de, &fname_crypto_str); 260 - fname_crypto_str.len = save_len; 262 + err = fscrypt_fname_disk_to_usr(inode, 263 + 0, 0, &de_name, &fstr); 264 + fstr.len = save_len; 261 265 if (err < 0) 262 266 goto errout; 263 267 if (!dir_emit(ctx, 264 - fname_crypto_str.name, err, 268 + fstr.name, err, 265 269 le32_to_cpu(de->inode), 266 270 get_dtype(sb, de->file_type))) 267 271 goto done; ··· 283 281 err = 0; 284 282 errout: 285 283 #ifdef CONFIG_EXT4_FS_ENCRYPTION 286 - ext4_fname_crypto_free_buffer(&fname_crypto_str); 284 + fscrypt_fname_free_buffer(&fstr); 287 285 #endif 288 286 brelse(bh); 289 287 return err; ··· 434 432 int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, 435 433 __u32 minor_hash, 436 434 struct ext4_dir_entry_2 *dirent, 437 - struct ext4_str *ent_name) 435 + struct fscrypt_str *ent_name) 438 436 { 439 437 struct rb_node **p, *parent = NULL; 440 438 struct fname *fname, *new_fn; ··· 611 609 static int ext4_dir_open(struct inode * inode, struct file * filp) 612 610 { 613 611 if (ext4_encrypted_inode(inode)) 614 - return ext4_get_encryption_info(inode) ? -EACCES : 0; 612 + return fscrypt_get_encryption_info(inode) ? -EACCES : 0; 615 613 return 0; 616 614 } 617 615
+81 -140
fs/ext4/ext4.h
··· 32 32 #include <linux/percpu_counter.h> 33 33 #include <linux/ratelimit.h> 34 34 #include <crypto/hash.h> 35 + #include <linux/fscrypto.h> 35 36 #include <linux/falloc.h> 36 37 #include <linux/percpu-rwsem.h> 37 38 #ifdef __KERNEL__ ··· 609 608 #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010 610 609 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020 611 610 612 - /* Encryption algorithms */ 613 - #define EXT4_ENCRYPTION_MODE_INVALID 0 614 - #define EXT4_ENCRYPTION_MODE_AES_256_XTS 1 615 - #define EXT4_ENCRYPTION_MODE_AES_256_GCM 2 616 - #define EXT4_ENCRYPTION_MODE_AES_256_CBC 3 617 - #define EXT4_ENCRYPTION_MODE_AES_256_CTS 4 618 - 619 - #include "ext4_crypto.h" 620 - 621 611 /* 622 612 * ioctl commands 623 613 */ ··· 630 638 #define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64) 631 639 #define EXT4_IOC_SWAP_BOOT _IO('f', 17) 632 640 #define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18) 633 - #define EXT4_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct ext4_encryption_policy) 634 - #define EXT4_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) 635 - #define EXT4_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct ext4_encryption_policy) 641 + #define EXT4_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY 642 + #define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT 643 + #define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY 636 644 637 645 #ifndef FS_IOC_FSGETXATTR 638 646 /* Until the uapi changes get merged for project quota... */ ··· 1074 1082 /* Precomputed uuid+inum+igen checksum for seeding inode checksums */ 1075 1083 __u32 i_csum_seed; 1076 1084 1077 - #ifdef CONFIG_EXT4_FS_ENCRYPTION 1078 - /* Encryption params */ 1079 - struct ext4_crypt_info *i_crypt_info; 1080 - #endif 1081 1085 kprojid_t i_projid; 1082 1086 }; 1083 1087 ··· 1332 1344 /* Number of quota types we support */ 1333 1345 #define EXT4_MAXQUOTAS 3 1334 1346 1347 + #ifdef CONFIG_EXT4_FS_ENCRYPTION 1348 + #define EXT4_KEY_DESC_PREFIX "ext4:" 1349 + #define EXT4_KEY_DESC_PREFIX_SIZE 5 1350 + #endif 1351 + 1335 1352 /* 1336 1353 * fourth extended-fs super-block data in memory 1337 1354 */ ··· 1423 1430 unsigned short *s_mb_offsets; 1424 1431 unsigned int *s_mb_maxs; 1425 1432 unsigned int s_group_info_size; 1433 + unsigned int s_mb_free_pending; 1426 1434 1427 1435 /* tunables */ 1428 1436 unsigned long s_stripe; ··· 1506 1512 1507 1513 /* Barrier between changing inodes' journal flags and writepages ops. */ 1508 1514 struct percpu_rw_semaphore s_journal_flag_rwsem; 1515 + 1516 + /* Encryption support */ 1517 + #ifdef CONFIG_EXT4_FS_ENCRYPTION 1518 + u8 key_prefix[EXT4_KEY_DESC_PREFIX_SIZE]; 1519 + u8 key_prefix_size; 1520 + #endif 1509 1521 }; 1510 1522 1511 1523 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) ··· 1610 1610 /* 1611 1611 * Returns true if the inode is inode is encrypted 1612 1612 */ 1613 - static inline int ext4_encrypted_inode(struct inode *inode) 1614 - { 1615 - #ifdef CONFIG_EXT4_FS_ENCRYPTION 1616 - return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT); 1617 - #else 1618 - return 0; 1619 - #endif 1620 - } 1621 - 1622 1613 #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime 1623 1614 1624 1615 /* ··· 2073 2082 2074 2083 struct ext4_filename { 2075 2084 const struct qstr *usr_fname; 2076 - struct ext4_str disk_name; 2085 + struct fscrypt_str disk_name; 2077 2086 struct dx_hash_info hinfo; 2078 2087 #ifdef CONFIG_EXT4_FS_ENCRYPTION 2079 - struct ext4_str crypto_buf; 2088 + struct fscrypt_str crypto_buf; 2080 2089 #endif 2081 2090 }; 2082 2091 ··· 2287 2296 struct ext4_group_desc *gdp); 2288 2297 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *); 2289 2298 2290 - /* crypto_policy.c */ 2291 - int ext4_is_child_context_consistent_with_parent(struct inode *parent, 2292 - struct inode *child); 2293 - int ext4_inherit_context(struct inode *parent, struct inode *child); 2294 - void ext4_to_hex(char *dst, char *src, size_t src_size); 2295 - int ext4_process_policy(const struct ext4_encryption_policy *policy, 2296 - struct inode *inode); 2297 - int ext4_get_policy(struct inode *inode, 2298 - struct ext4_encryption_policy *policy); 2299 - 2300 - /* crypto.c */ 2301 - extern struct kmem_cache *ext4_crypt_info_cachep; 2302 - bool ext4_valid_contents_enc_mode(uint32_t mode); 2303 - uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size); 2304 - extern struct workqueue_struct *ext4_read_workqueue; 2305 - struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode, 2306 - gfp_t gfp_flags); 2307 - void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx); 2308 - void ext4_restore_control_page(struct page *data_page); 2309 - struct page *ext4_encrypt(struct inode *inode, 2310 - struct page *plaintext_page, 2311 - gfp_t gfp_flags); 2312 - int ext4_decrypt(struct page *page); 2313 - int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, 2314 - ext4_fsblk_t pblk, ext4_lblk_t len); 2315 - extern const struct dentry_operations ext4_encrypted_d_ops; 2316 - 2317 - #ifdef CONFIG_EXT4_FS_ENCRYPTION 2318 - int ext4_init_crypto(void); 2319 - void ext4_exit_crypto(void); 2320 2299 static inline int ext4_sb_has_crypto(struct super_block *sb) 2321 2300 { 2322 2301 return ext4_has_feature_encrypt(sb); 2323 2302 } 2324 - #else 2325 - static inline int ext4_init_crypto(void) { return 0; } 2326 - static inline void ext4_exit_crypto(void) { } 2327 - static inline int ext4_sb_has_crypto(struct super_block *sb) 2328 - { 2329 - return 0; 2330 - } 2331 - #endif 2332 2303 2333 - /* crypto_fname.c */ 2334 - bool ext4_valid_filenames_enc_mode(uint32_t mode); 2335 - u32 ext4_fname_crypto_round_up(u32 size, u32 blksize); 2336 - unsigned ext4_fname_encrypted_size(struct inode *inode, u32 ilen); 2337 - int ext4_fname_crypto_alloc_buffer(struct inode *inode, 2338 - u32 ilen, struct ext4_str *crypto_str); 2339 - int _ext4_fname_disk_to_usr(struct inode *inode, 2340 - struct dx_hash_info *hinfo, 2341 - const struct ext4_str *iname, 2342 - struct ext4_str *oname); 2343 - int ext4_fname_disk_to_usr(struct inode *inode, 2344 - struct dx_hash_info *hinfo, 2345 - const struct ext4_dir_entry_2 *de, 2346 - struct ext4_str *oname); 2347 - int ext4_fname_usr_to_disk(struct inode *inode, 2348 - const struct qstr *iname, 2349 - struct ext4_str *oname); 2350 - #ifdef CONFIG_EXT4_FS_ENCRYPTION 2351 - void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str); 2352 - int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname, 2353 - int lookup, struct ext4_filename *fname); 2354 - void ext4_fname_free_filename(struct ext4_filename *fname); 2355 - #else 2356 - static inline 2357 - int ext4_setup_fname_crypto(struct inode *inode) 2304 + static inline bool ext4_encrypted_inode(struct inode *inode) 2358 2305 { 2359 - return 0; 2306 + return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT); 2360 2307 } 2361 - static inline void ext4_fname_crypto_free_buffer(struct ext4_str *p) { } 2308 + 2309 + #ifdef CONFIG_EXT4_FS_ENCRYPTION 2362 2310 static inline int ext4_fname_setup_filename(struct inode *dir, 2363 - const struct qstr *iname, 2364 - int lookup, struct ext4_filename *fname) 2311 + const struct qstr *iname, 2312 + int lookup, struct ext4_filename *fname) 2313 + { 2314 + struct fscrypt_name name; 2315 + int err; 2316 + 2317 + memset(fname, 0, sizeof(struct ext4_filename)); 2318 + 2319 + err = fscrypt_setup_filename(dir, iname, lookup, &name); 2320 + 2321 + fname->usr_fname = name.usr_fname; 2322 + fname->disk_name = name.disk_name; 2323 + fname->hinfo.hash = name.hash; 2324 + fname->hinfo.minor_hash = name.minor_hash; 2325 + fname->crypto_buf = name.crypto_buf; 2326 + return err; 2327 + } 2328 + 2329 + static inline void ext4_fname_free_filename(struct ext4_filename *fname) 2330 + { 2331 + struct fscrypt_name name; 2332 + 2333 + name.crypto_buf = fname->crypto_buf; 2334 + fscrypt_free_filename(&name); 2335 + 2336 + fname->crypto_buf.name = NULL; 2337 + fname->usr_fname = NULL; 2338 + fname->disk_name.name = NULL; 2339 + } 2340 + #else 2341 + static inline int ext4_fname_setup_filename(struct inode *dir, 2342 + const struct qstr *iname, 2343 + int lookup, struct ext4_filename *fname) 2365 2344 { 2366 2345 fname->usr_fname = iname; 2367 2346 fname->disk_name.name = (unsigned char *) iname->name; ··· 2339 2378 return 0; 2340 2379 } 2341 2380 static inline void ext4_fname_free_filename(struct ext4_filename *fname) { } 2381 + 2382 + #define fscrypt_set_d_op(i) 2383 + #define fscrypt_get_ctx fscrypt_notsupp_get_ctx 2384 + #define fscrypt_release_ctx fscrypt_notsupp_release_ctx 2385 + #define fscrypt_encrypt_page fscrypt_notsupp_encrypt_page 2386 + #define fscrypt_decrypt_page fscrypt_notsupp_decrypt_page 2387 + #define fscrypt_decrypt_bio_pages fscrypt_notsupp_decrypt_bio_pages 2388 + #define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page 2389 + #define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page 2390 + #define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range 2391 + #define fscrypt_process_policy fscrypt_notsupp_process_policy 2392 + #define fscrypt_get_policy fscrypt_notsupp_get_policy 2393 + #define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context 2394 + #define fscrypt_inherit_context fscrypt_notsupp_inherit_context 2395 + #define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info 2396 + #define fscrypt_put_encryption_info fscrypt_notsupp_put_encryption_info 2397 + #define fscrypt_setup_filename fscrypt_notsupp_setup_filename 2398 + #define fscrypt_free_filename fscrypt_notsupp_free_filename 2399 + #define fscrypt_fname_encrypted_size fscrypt_notsupp_fname_encrypted_size 2400 + #define fscrypt_fname_alloc_buffer fscrypt_notsupp_fname_alloc_buffer 2401 + #define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer 2402 + #define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr 2403 + #define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk 2342 2404 #endif 2343 - 2344 - 2345 - /* crypto_key.c */ 2346 - void ext4_free_crypt_info(struct ext4_crypt_info *ci); 2347 - void ext4_free_encryption_info(struct inode *inode, struct ext4_crypt_info *ci); 2348 - int _ext4_get_encryption_info(struct inode *inode); 2349 - 2350 - #ifdef CONFIG_EXT4_FS_ENCRYPTION 2351 - int ext4_has_encryption_key(struct inode *inode); 2352 - 2353 - static inline int ext4_get_encryption_info(struct inode *inode) 2354 - { 2355 - struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; 2356 - 2357 - if (!ci || 2358 - (ci->ci_keyring_key && 2359 - (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 2360 - (1 << KEY_FLAG_REVOKED) | 2361 - (1 << KEY_FLAG_DEAD))))) 2362 - return _ext4_get_encryption_info(inode); 2363 - return 0; 2364 - } 2365 - 2366 - static inline struct ext4_crypt_info *ext4_encryption_info(struct inode *inode) 2367 - { 2368 - return EXT4_I(inode)->i_crypt_info; 2369 - } 2370 - 2371 - #else 2372 - static inline int ext4_has_encryption_key(struct inode *inode) 2373 - { 2374 - return 0; 2375 - } 2376 - static inline int ext4_get_encryption_info(struct inode *inode) 2377 - { 2378 - return 0; 2379 - } 2380 - static inline struct ext4_crypt_info *ext4_encryption_info(struct inode *inode) 2381 - { 2382 - return NULL; 2383 - } 2384 - #endif 2385 - 2386 2405 2387 2406 /* dir.c */ 2388 2407 extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, ··· 2376 2435 extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, 2377 2436 __u32 minor_hash, 2378 2437 struct ext4_dir_entry_2 *dirent, 2379 - struct ext4_str *ent_name); 2438 + struct fscrypt_str *ent_name); 2380 2439 extern void ext4_htree_free_dir_info(struct dir_private_info *p); 2381 2440 extern int ext4_find_dest_de(struct inode *dir, struct inode *inode, 2382 2441 struct buffer_head *bh, ··· 2564 2623 void *entry_buf, 2565 2624 int buf_size, 2566 2625 int csum_size); 2567 - extern int ext4_empty_dir(struct inode *inode); 2626 + extern bool ext4_empty_dir(struct inode *inode); 2568 2627 2569 2628 /* resize.c */ 2570 2629 extern int ext4_group_add(struct super_block *sb, ··· 3046 3105 struct ext4_dir_entry_2 *de_del, 3047 3106 struct buffer_head *bh, 3048 3107 int *has_inline_data); 3049 - extern int empty_inline_dir(struct inode *dir, int *has_inline_data); 3108 + extern bool empty_inline_dir(struct inode *dir, int *has_inline_data); 3050 3109 extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode, 3051 3110 struct ext4_dir_entry_2 **parent_de, 3052 3111 int *retval);
-159
fs/ext4/ext4_crypto.h
··· 1 - /* 2 - * linux/fs/ext4/ext4_crypto.h 3 - * 4 - * Copyright (C) 2015, Google, Inc. 5 - * 6 - * This contains encryption header content for ext4 7 - * 8 - * Written by Michael Halcrow, 2015. 9 - */ 10 - 11 - #ifndef _EXT4_CRYPTO_H 12 - #define _EXT4_CRYPTO_H 13 - 14 - #include <linux/fs.h> 15 - 16 - #define EXT4_KEY_DESCRIPTOR_SIZE 8 17 - 18 - /* Policy provided via an ioctl on the topmost directory */ 19 - struct ext4_encryption_policy { 20 - char version; 21 - char contents_encryption_mode; 22 - char filenames_encryption_mode; 23 - char flags; 24 - char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; 25 - } __attribute__((__packed__)); 26 - 27 - #define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1 28 - #define EXT4_KEY_DERIVATION_NONCE_SIZE 16 29 - 30 - #define EXT4_POLICY_FLAGS_PAD_4 0x00 31 - #define EXT4_POLICY_FLAGS_PAD_8 0x01 32 - #define EXT4_POLICY_FLAGS_PAD_16 0x02 33 - #define EXT4_POLICY_FLAGS_PAD_32 0x03 34 - #define EXT4_POLICY_FLAGS_PAD_MASK 0x03 35 - #define EXT4_POLICY_FLAGS_VALID 0x03 36 - 37 - /** 38 - * Encryption context for inode 39 - * 40 - * Protector format: 41 - * 1 byte: Protector format (1 = this version) 42 - * 1 byte: File contents encryption mode 43 - * 1 byte: File names encryption mode 44 - * 1 byte: Reserved 45 - * 8 bytes: Master Key descriptor 46 - * 16 bytes: Encryption Key derivation nonce 47 - */ 48 - struct ext4_encryption_context { 49 - char format; 50 - char contents_encryption_mode; 51 - char filenames_encryption_mode; 52 - char flags; 53 - char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; 54 - char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE]; 55 - } __attribute__((__packed__)); 56 - 57 - /* Encryption parameters */ 58 - #define EXT4_XTS_TWEAK_SIZE 16 59 - #define EXT4_AES_128_ECB_KEY_SIZE 16 60 - #define EXT4_AES_256_GCM_KEY_SIZE 32 61 - #define EXT4_AES_256_CBC_KEY_SIZE 32 62 - #define EXT4_AES_256_CTS_KEY_SIZE 32 63 - #define EXT4_AES_256_XTS_KEY_SIZE 64 64 - #define EXT4_MAX_KEY_SIZE 64 65 - 66 - #define EXT4_KEY_DESC_PREFIX "ext4:" 67 - #define EXT4_KEY_DESC_PREFIX_SIZE 5 68 - 69 - /* This is passed in from userspace into the kernel keyring */ 70 - struct ext4_encryption_key { 71 - __u32 mode; 72 - char raw[EXT4_MAX_KEY_SIZE]; 73 - __u32 size; 74 - } __attribute__((__packed__)); 75 - 76 - struct ext4_crypt_info { 77 - char ci_data_mode; 78 - char ci_filename_mode; 79 - char ci_flags; 80 - struct crypto_skcipher *ci_ctfm; 81 - struct key *ci_keyring_key; 82 - char ci_master_key[EXT4_KEY_DESCRIPTOR_SIZE]; 83 - }; 84 - 85 - #define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 86 - #define EXT4_WRITE_PATH_FL 0x00000002 87 - 88 - struct ext4_crypto_ctx { 89 - union { 90 - struct { 91 - struct page *bounce_page; /* Ciphertext page */ 92 - struct page *control_page; /* Original page */ 93 - } w; 94 - struct { 95 - struct bio *bio; 96 - struct work_struct work; 97 - } r; 98 - struct list_head free_list; /* Free list */ 99 - }; 100 - char flags; /* Flags */ 101 - char mode; /* Encryption mode for tfm */ 102 - }; 103 - 104 - struct ext4_completion_result { 105 - struct completion completion; 106 - int res; 107 - }; 108 - 109 - #define DECLARE_EXT4_COMPLETION_RESULT(ecr) \ 110 - struct ext4_completion_result ecr = { \ 111 - COMPLETION_INITIALIZER((ecr).completion), 0 } 112 - 113 - static inline int ext4_encryption_key_size(int mode) 114 - { 115 - switch (mode) { 116 - case EXT4_ENCRYPTION_MODE_AES_256_XTS: 117 - return EXT4_AES_256_XTS_KEY_SIZE; 118 - case EXT4_ENCRYPTION_MODE_AES_256_GCM: 119 - return EXT4_AES_256_GCM_KEY_SIZE; 120 - case EXT4_ENCRYPTION_MODE_AES_256_CBC: 121 - return EXT4_AES_256_CBC_KEY_SIZE; 122 - case EXT4_ENCRYPTION_MODE_AES_256_CTS: 123 - return EXT4_AES_256_CTS_KEY_SIZE; 124 - default: 125 - BUG(); 126 - } 127 - return 0; 128 - } 129 - 130 - #define EXT4_FNAME_NUM_SCATTER_ENTRIES 4 131 - #define EXT4_CRYPTO_BLOCK_SIZE 16 132 - #define EXT4_FNAME_CRYPTO_DIGEST_SIZE 32 133 - 134 - struct ext4_str { 135 - unsigned char *name; 136 - u32 len; 137 - }; 138 - 139 - /** 140 - * For encrypted symlinks, the ciphertext length is stored at the beginning 141 - * of the string in little-endian format. 142 - */ 143 - struct ext4_encrypted_symlink_data { 144 - __le16 len; 145 - char encrypted_path[1]; 146 - } __attribute__((__packed__)); 147 - 148 - /** 149 - * This function is used to calculate the disk space required to 150 - * store a filename of length l in encrypted symlink format. 151 - */ 152 - static inline u32 encrypted_symlink_data_len(u32 l) 153 - { 154 - if (l < EXT4_CRYPTO_BLOCK_SIZE) 155 - l = EXT4_CRYPTO_BLOCK_SIZE; 156 - return (l + sizeof(struct ext4_encrypted_symlink_data) - 1); 157 - } 158 - 159 - #endif /* _EXT4_CRYPTO_H */
+9 -1
fs/ext4/ext4_jbd2.h
··· 175 175 * There is no guaranteed calling order of multiple registered callbacks on 176 176 * the same transaction. 177 177 */ 178 + static inline void _ext4_journal_callback_add(handle_t *handle, 179 + struct ext4_journal_cb_entry *jce) 180 + { 181 + /* Add the jce to transaction's private list */ 182 + list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list); 183 + } 184 + 178 185 static inline void ext4_journal_callback_add(handle_t *handle, 179 186 void (*func)(struct super_block *sb, 180 187 struct ext4_journal_cb_entry *jce, ··· 194 187 /* Add the jce to transaction's private list */ 195 188 jce->jce_func = func; 196 189 spin_lock(&sbi->s_md_lock); 197 - list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list); 190 + _ext4_journal_callback_add(handle, jce); 198 191 spin_unlock(&sbi->s_md_lock); 199 192 } 193 + 200 194 201 195 /** 202 196 * ext4_journal_callback_del: delete a registered callback
+10 -2
fs/ext4/extents.c
··· 381 381 ext4_fsblk_t block = ext4_ext_pblock(ext); 382 382 int len = ext4_ext_get_actual_len(ext); 383 383 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 384 - ext4_lblk_t last = lblock + len - 1; 385 384 386 - if (len == 0 || lblock > last) 385 + /* 386 + * We allow neither: 387 + * - zero length 388 + * - overflow/wrap-around 389 + */ 390 + if (lblock + len <= lblock) 387 391 return 0; 388 392 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 389 393 } ··· 476 472 } 477 473 if (!ext4_valid_extent_entries(inode, eh, depth)) { 478 474 error_msg = "invalid extent entries"; 475 + goto corrupted; 476 + } 477 + if (unlikely(depth > 32)) { 478 + error_msg = "too large eh_depth"; 479 479 goto corrupted; 480 480 } 481 481 /* Verify checksum on non-root extent tree nodes */
+5 -5
fs/ext4/file.c
··· 303 303 struct inode *inode = file->f_mapping->host; 304 304 305 305 if (ext4_encrypted_inode(inode)) { 306 - int err = ext4_get_encryption_info(inode); 306 + int err = fscrypt_get_encryption_info(inode); 307 307 if (err) 308 308 return 0; 309 - if (ext4_encryption_info(inode) == NULL) 309 + if (!fscrypt_has_encryption_key(inode)) 310 310 return -ENOKEY; 311 311 } 312 312 file_accessed(file); ··· 362 362 } 363 363 } 364 364 if (ext4_encrypted_inode(inode)) { 365 - ret = ext4_get_encryption_info(inode); 365 + ret = fscrypt_get_encryption_info(inode); 366 366 if (ret) 367 367 return -EACCES; 368 - if (ext4_encryption_info(inode) == NULL) 368 + if (!fscrypt_has_encryption_key(inode)) 369 369 return -ENOKEY; 370 370 } 371 371 372 372 dir = dget_parent(file_dentry(filp)); 373 373 if (ext4_encrypted_inode(d_inode(dir)) && 374 - !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) { 374 + !fscrypt_has_permitted_context(d_inode(dir), inode)) { 375 375 ext4_warning(inode->i_sb, 376 376 "Inconsistent encryption contexts: %lu/%lu", 377 377 (unsigned long) d_inode(dir)->i_ino,
+4 -1
fs/ext4/fsync.c
··· 106 106 } 107 107 108 108 if (!journal) { 109 - ret = generic_file_fsync(file, start, end, datasync); 109 + ret = __generic_file_fsync(file, start, end, datasync); 110 110 if (!ret && !hlist_empty(&inode->i_dentry)) 111 111 ret = ext4_sync_parent(inode); 112 + if (test_opt(inode->i_sb, BARRIER)) 113 + goto issue_flush; 112 114 goto out; 113 115 } 114 116 ··· 142 140 needs_barrier = true; 143 141 ret = jbd2_complete_transaction(journal, commit_tid); 144 142 if (needs_barrier) { 143 + issue_flush: 145 144 err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 146 145 if (!ret) 147 146 ret = err;
+4 -3
fs/ext4/ialloc.c
··· 767 767 if ((ext4_encrypted_inode(dir) || 768 768 DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) && 769 769 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 770 - err = ext4_get_encryption_info(dir); 770 + err = fscrypt_get_encryption_info(dir); 771 771 if (err) 772 772 return ERR_PTR(err); 773 - if (ext4_encryption_info(dir) == NULL) 773 + if (!fscrypt_has_encryption_key(dir)) 774 774 return ERR_PTR(-EPERM); 775 775 if (!handle) 776 776 nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb); ··· 1115 1115 } 1116 1116 1117 1117 if (encrypt) { 1118 - err = ext4_inherit_context(dir, inode); 1118 + /* give pointer to avoid set_context with journal ops. */ 1119 + err = fscrypt_inherit_context(dir, inode, &encrypt, true); 1119 1120 if (err) 1120 1121 goto fail_free_drop; 1121 1122 }
+7 -7
fs/ext4/inline.c
··· 1326 1326 struct ext4_iloc iloc; 1327 1327 void *dir_buf = NULL; 1328 1328 struct ext4_dir_entry_2 fake; 1329 - struct ext4_str tmp_str; 1329 + struct fscrypt_str tmp_str; 1330 1330 1331 1331 ret = ext4_get_inode_loc(inode, &iloc); 1332 1332 if (ret) ··· 1739 1739 return (struct ext4_dir_entry_2 *)(inline_pos + offset); 1740 1740 } 1741 1741 1742 - int empty_inline_dir(struct inode *dir, int *has_inline_data) 1742 + bool empty_inline_dir(struct inode *dir, int *has_inline_data) 1743 1743 { 1744 1744 int err, inline_size; 1745 1745 struct ext4_iloc iloc; 1746 1746 void *inline_pos; 1747 1747 unsigned int offset; 1748 1748 struct ext4_dir_entry_2 *de; 1749 - int ret = 1; 1749 + bool ret = true; 1750 1750 1751 1751 err = ext4_get_inode_loc(dir, &iloc); 1752 1752 if (err) { 1753 1753 EXT4_ERROR_INODE(dir, "error %d getting inode %lu block", 1754 1754 err, dir->i_ino); 1755 - return 1; 1755 + return true; 1756 1756 } 1757 1757 1758 1758 down_read(&EXT4_I(dir)->xattr_sem); ··· 1766 1766 ext4_warning(dir->i_sb, 1767 1767 "bad inline directory (dir #%lu) - no `..'", 1768 1768 dir->i_ino); 1769 - ret = 1; 1769 + ret = true; 1770 1770 goto out; 1771 1771 } 1772 1772 ··· 1784 1784 dir->i_ino, le32_to_cpu(de->inode), 1785 1785 le16_to_cpu(de->rec_len), de->name_len, 1786 1786 inline_size); 1787 - ret = 1; 1787 + ret = true; 1788 1788 goto out; 1789 1789 } 1790 1790 if (le32_to_cpu(de->inode)) { 1791 - ret = 0; 1791 + ret = false; 1792 1792 goto out; 1793 1793 } 1794 1794 offset += ext4_rec_len_from_disk(de->rec_len, inline_size);
+55 -26
fs/ext4/inode.c
··· 51 51 struct ext4_inode_info *ei) 52 52 { 53 53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54 - __u16 csum_lo; 55 - __u16 csum_hi = 0; 56 54 __u32 csum; 55 + __u16 dummy_csum = 0; 56 + int offset = offsetof(struct ext4_inode, i_checksum_lo); 57 + unsigned int csum_size = sizeof(dummy_csum); 57 58 58 - csum_lo = le16_to_cpu(raw->i_checksum_lo); 59 - raw->i_checksum_lo = 0; 60 - if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 61 - EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 62 - csum_hi = le16_to_cpu(raw->i_checksum_hi); 63 - raw->i_checksum_hi = 0; 59 + csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); 60 + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); 61 + offset += csum_size; 62 + csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 63 + EXT4_GOOD_OLD_INODE_SIZE - offset); 64 + 65 + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 66 + offset = offsetof(struct ext4_inode, i_checksum_hi); 67 + csum = ext4_chksum(sbi, csum, (__u8 *)raw + 68 + EXT4_GOOD_OLD_INODE_SIZE, 69 + offset - EXT4_GOOD_OLD_INODE_SIZE); 70 + if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 71 + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, 72 + csum_size); 73 + offset += csum_size; 74 + csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, 75 + EXT4_INODE_SIZE(inode->i_sb) - 76 + offset); 77 + } 64 78 } 65 - 66 - csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, 67 - EXT4_INODE_SIZE(inode->i_sb)); 68 - 69 - raw->i_checksum_lo = cpu_to_le16(csum_lo); 70 - if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 71 - EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 72 - raw->i_checksum_hi = cpu_to_le16(csum_hi); 73 79 74 80 return csum; 75 81 } ··· 211 205 * Note that directories do not have this problem because they 212 206 * don't use page cache. 213 207 */ 214 - if (ext4_should_journal_data(inode) && 215 - (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && 216 - inode->i_ino != EXT4_JOURNAL_INO) { 208 + if (inode->i_ino != EXT4_JOURNAL_INO && 209 + ext4_should_journal_data(inode) && 210 + (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 217 211 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 218 212 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 219 213 ··· 392 386 int ret; 393 387 394 388 if (ext4_encrypted_inode(inode)) 395 - return ext4_encrypted_zeroout(inode, lblk, pblk, len); 389 + return fscrypt_zeroout_range(inode, lblk, pblk, len); 396 390 397 391 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); 398 392 if (ret > 0) ··· 1158 1152 if (unlikely(err)) 1159 1153 page_zero_new_buffers(page, from, to); 1160 1154 else if (decrypt) 1161 - err = ext4_decrypt(page); 1155 + err = fscrypt_decrypt_page(page); 1162 1156 return err; 1163 1157 } 1164 1158 #endif ··· 2754 2748 done = true; 2755 2749 } 2756 2750 } 2757 - ext4_journal_stop(handle); 2751 + /* 2752 + * Caution: If the handle is synchronous, 2753 + * ext4_journal_stop() can wait for transaction commit 2754 + * to finish which may depend on writeback of pages to 2755 + * complete or on page lock to be released. In that 2756 + * case, we have to wait until after after we have 2757 + * submitted all the IO, released page locks we hold, 2758 + * and dropped io_end reference (for extent conversion 2759 + * to be able to complete) before stopping the handle. 2760 + */ 2761 + if (!ext4_handle_valid(handle) || handle->h_sync == 0) { 2762 + ext4_journal_stop(handle); 2763 + handle = NULL; 2764 + } 2758 2765 /* Submit prepared bio */ 2759 2766 ext4_io_submit(&mpd.io_submit); 2760 2767 /* Unlock pages we didn't use */ 2761 2768 mpage_release_unused_pages(&mpd, give_up_on_write); 2762 - /* Drop our io_end reference we got from init */ 2763 - ext4_put_io_end(mpd.io_submit.io_end); 2769 + /* 2770 + * Drop our io_end reference we got from init. We have 2771 + * to be careful and use deferred io_end finishing if 2772 + * we are still holding the transaction as we can 2773 + * release the last reference to io_end which may end 2774 + * up doing unwritten extent conversion. 2775 + */ 2776 + if (handle) { 2777 + ext4_put_io_end_defer(mpd.io_submit.io_end); 2778 + ext4_journal_stop(handle); 2779 + } else 2780 + ext4_put_io_end(mpd.io_submit.io_end); 2764 2781 2765 2782 if (ret == -ENOSPC && sbi->s_journal) { 2766 2783 /* ··· 3735 3706 if (S_ISREG(inode->i_mode) && 3736 3707 ext4_encrypted_inode(inode)) { 3737 3708 /* We expect the key to be set. */ 3738 - BUG_ON(!ext4_has_encryption_key(inode)); 3709 + BUG_ON(!fscrypt_has_encryption_key(inode)); 3739 3710 BUG_ON(blocksize != PAGE_SIZE); 3740 - WARN_ON_ONCE(ext4_decrypt(page)); 3711 + WARN_ON_ONCE(fscrypt_decrypt_page(page)); 3741 3712 } 3742 3713 } 3743 3714 if (ext4_should_journal_data(inode)) {
+15 -23
fs/ext4/ioctl.c
··· 308 308 kprojid_t kprojid; 309 309 struct ext4_iloc iloc; 310 310 struct ext4_inode *raw_inode; 311 + struct dquot *transfer_to[MAXQUOTAS] = { }; 311 312 312 313 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 313 314 EXT4_FEATURE_RO_COMPAT_PROJECT)) { ··· 362 361 if (err) 363 362 goto out_stop; 364 363 365 - if (sb_has_quota_limits_enabled(sb, PRJQUOTA)) { 366 - struct dquot *transfer_to[MAXQUOTAS] = { }; 367 - 368 - transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 369 - if (!IS_ERR(transfer_to[PRJQUOTA])) { 370 - err = __dquot_transfer(inode, transfer_to); 371 - dqput(transfer_to[PRJQUOTA]); 372 - if (err) 373 - goto out_dirty; 374 - } 364 + transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 365 + if (!IS_ERR(transfer_to[PRJQUOTA])) { 366 + err = __dquot_transfer(inode, transfer_to); 367 + dqput(transfer_to[PRJQUOTA]); 368 + if (err) 369 + goto out_dirty; 375 370 } 371 + 376 372 EXT4_I(inode)->i_projid = kprojid; 377 373 inode->i_ctime = ext4_current_time(inode); 378 374 out_dirty: ··· 770 772 return ext4_ext_precache(inode); 771 773 case EXT4_IOC_SET_ENCRYPTION_POLICY: { 772 774 #ifdef CONFIG_EXT4_FS_ENCRYPTION 773 - struct ext4_encryption_policy policy; 774 - int err = 0; 775 + struct fscrypt_policy policy; 775 776 776 777 if (copy_from_user(&policy, 777 - (struct ext4_encryption_policy __user *)arg, 778 - sizeof(policy))) { 779 - err = -EFAULT; 780 - goto encryption_policy_out; 781 - } 782 - 783 - err = ext4_process_policy(&policy, inode); 784 - encryption_policy_out: 785 - return err; 778 + (struct fscrypt_policy __user *)arg, 779 + sizeof(policy))) 780 + return -EFAULT; 781 + return fscrypt_process_policy(inode, &policy); 786 782 #else 787 783 return -EOPNOTSUPP; 788 784 #endif ··· 819 827 } 820 828 case EXT4_IOC_GET_ENCRYPTION_POLICY: { 821 829 #ifdef CONFIG_EXT4_FS_ENCRYPTION 822 - struct ext4_encryption_policy policy; 830 + struct fscrypt_policy policy; 823 831 int err = 0; 824 832 825 833 if (!ext4_encrypted_inode(inode)) 826 834 return -ENOENT; 827 - err = ext4_get_policy(inode, &policy); 835 + err = fscrypt_get_policy(inode, &policy); 828 836 if (err) 829 837 return err; 830 838 if (copy_to_user((void __user *)arg, &policy, sizeof(policy)))
+13 -16
fs/ext4/mballoc.c
··· 2627 2627 2628 2628 spin_lock_init(&sbi->s_md_lock); 2629 2629 spin_lock_init(&sbi->s_bal_lock); 2630 + sbi->s_mb_free_pending = 0; 2630 2631 2631 2632 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 2632 2633 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; ··· 2815 2814 /* we expect to find existing buddy because it's pinned */ 2816 2815 BUG_ON(err != 0); 2817 2816 2817 + spin_lock(&EXT4_SB(sb)->s_md_lock); 2818 + EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 2819 + spin_unlock(&EXT4_SB(sb)->s_md_lock); 2818 2820 2819 2821 db = e4b.bd_info; 2820 2822 /* there are blocks to put in buddy to make them really free */ ··· 2943 2939 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 2944 2940 "fs metadata", block, block+len); 2945 2941 /* File system mounted not to panic on error 2946 - * Fix the bitmap and repeat the block allocation 2942 + * Fix the bitmap and return EFSCORRUPTED 2947 2943 * We leak some of the blocks here. 2948 2944 */ 2949 2945 ext4_lock_group(sb, ac->ac_b_ex.fe_group); ··· 2952 2948 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2953 2949 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2954 2950 if (!err) 2955 - err = -EAGAIN; 2951 + err = -EFSCORRUPTED; 2956 2952 goto out_err; 2957 2953 } 2958 2954 ··· 4517 4513 } 4518 4514 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 4519 4515 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 4520 - if (*errp == -EAGAIN) { 4521 - /* 4522 - * drop the reference that we took 4523 - * in ext4_mb_use_best_found 4524 - */ 4525 - ext4_mb_release_context(ac); 4526 - ac->ac_b_ex.fe_group = 0; 4527 - ac->ac_b_ex.fe_start = 0; 4528 - ac->ac_b_ex.fe_len = 0; 4529 - ac->ac_status = AC_STATUS_CONTINUE; 4530 - goto repeat; 4531 - } else if (*errp) { 4516 + if (*errp) { 4532 4517 ext4_discard_allocated_blocks(ac); 4533 4518 goto errout; 4534 4519 } else { ··· 4576 4583 { 4577 4584 ext4_group_t group = e4b->bd_group; 4578 4585 ext4_grpblk_t cluster; 4586 + ext4_grpblk_t clusters = new_entry->efd_count; 4579 4587 struct ext4_free_data *entry; 4580 4588 struct ext4_group_info *db = e4b->bd_info; 4581 4589 struct super_block *sb = e4b->bd_sb; ··· 4643 4649 } 4644 4650 } 4645 4651 /* Add the extent to transaction's private list */ 4646 - ext4_journal_callback_add(handle, ext4_free_data_callback, 4647 - &new_entry->efd_jce); 4652 + new_entry->efd_jce.jce_func = ext4_free_data_callback; 4653 + spin_lock(&sbi->s_md_lock); 4654 + _ext4_journal_callback_add(handle, &new_entry->efd_jce); 4655 + sbi->s_mb_free_pending += clusters; 4656 + spin_unlock(&sbi->s_md_lock); 4648 4657 return 0; 4649 4658 } 4650 4659
+69 -75
fs/ext4/namei.c
··· 420 420 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 421 421 struct ext4_inode_info *ei = EXT4_I(inode); 422 422 __u32 csum; 423 - __le32 save_csum; 424 423 int size; 424 + __u32 dummy_csum = 0; 425 + int offset = offsetof(struct dx_tail, dt_checksum); 425 426 426 427 size = count_offset + (count * sizeof(struct dx_entry)); 427 - save_csum = t->dt_checksum; 428 - t->dt_checksum = 0; 429 428 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); 430 - csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail)); 431 - t->dt_checksum = save_csum; 429 + csum = ext4_chksum(sbi, csum, (__u8 *)t, offset); 430 + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); 432 431 433 432 return cpu_to_le32(csum); 434 433 } ··· 445 446 c = get_dx_countlimit(inode, dirent, &count_offset); 446 447 if (!c) { 447 448 EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); 448 - return 1; 449 + return 0; 449 450 } 450 451 limit = le16_to_cpu(c->limit); 451 452 count = le16_to_cpu(c->count); 452 453 if (count_offset + (limit * sizeof(struct dx_entry)) > 453 454 EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { 454 455 warn_no_space_for_csum(inode); 455 - return 1; 456 + return 0; 456 457 } 457 458 t = (struct dx_tail *)(((struct dx_entry *)c) + limit); 458 459 ··· 611 612 #ifdef CONFIG_EXT4_FS_ENCRYPTION 612 613 int len; 613 614 char *name; 614 - struct ext4_str fname_crypto_str 615 - = {.name = NULL, .len = 0}; 615 + struct fscrypt_str fname_crypto_str = 616 + FSTR_INIT(NULL, 0); 616 617 int res = 0; 617 618 618 619 name = de->name; 619 620 len = de->name_len; 620 - if (ext4_encrypted_inode(inode)) 621 - res = ext4_get_encryption_info(dir); 621 + if (ext4_encrypted_inode(dir)) 622 + res = fscrypt_get_encryption_info(dir); 622 623 if (res) { 623 624 printk(KERN_WARNING "Error setting up" 624 625 " fname crypto: %d\n", res); 625 626 } 626 - if (ctx == NULL) { 627 + if (!fscrypt_has_encryption_key(dir)) { 627 628 /* Directory is not encrypted */ 628 629 ext4fs_dirhash(de->name, 629 630 de->name_len, &h); ··· 632 633 (unsigned) ((char *) de 633 634 - base)); 634 635 } else { 636 + struct fscrypt_str de_name = 637 + FSTR_INIT(name, len); 638 + 635 639 /* Directory is encrypted */ 636 - res = ext4_fname_crypto_alloc_buffer( 637 - ctx, de->name_len, 640 + res = fscrypt_fname_alloc_buffer( 641 + dir, len, 638 642 &fname_crypto_str); 639 - if (res < 0) { 643 + if (res < 0) 640 644 printk(KERN_WARNING "Error " 641 645 "allocating crypto " 642 646 "buffer--skipping " 643 647 "crypto\n"); 644 - ctx = NULL; 645 - } 646 - res = ext4_fname_disk_to_usr(ctx, NULL, de, 647 - &fname_crypto_str); 648 + res = fscrypt_fname_disk_to_usr(dir, 649 + 0, 0, &de_name, 650 + &fname_crypto_str); 648 651 if (res < 0) { 649 652 printk(KERN_WARNING "Error " 650 653 "converting filename " ··· 663 662 printk("%*.s:(E)%x.%u ", len, name, 664 663 h.hash, (unsigned) ((char *) de 665 664 - base)); 666 - ext4_fname_crypto_free_buffer( 667 - &fname_crypto_str); 665 + fscrypt_fname_free_buffer( 666 + &fname_crypto_str); 668 667 } 669 668 #else 670 669 int len = de->name_len; ··· 953 952 struct buffer_head *bh; 954 953 struct ext4_dir_entry_2 *de, *top; 955 954 int err = 0, count = 0; 956 - struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}, tmp_str; 955 + struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0), tmp_str; 957 956 958 957 dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n", 959 958 (unsigned long)block)); ··· 968 967 #ifdef CONFIG_EXT4_FS_ENCRYPTION 969 968 /* Check if the directory is encrypted */ 970 969 if (ext4_encrypted_inode(dir)) { 971 - err = ext4_get_encryption_info(dir); 970 + err = fscrypt_get_encryption_info(dir); 972 971 if (err < 0) { 973 972 brelse(bh); 974 973 return err; 975 974 } 976 - err = ext4_fname_crypto_alloc_buffer(dir, EXT4_NAME_LEN, 975 + err = fscrypt_fname_alloc_buffer(dir, EXT4_NAME_LEN, 977 976 &fname_crypto_str); 978 977 if (err < 0) { 979 978 brelse(bh); ··· 1004 1003 &tmp_str); 1005 1004 } else { 1006 1005 int save_len = fname_crypto_str.len; 1006 + struct fscrypt_str de_name = FSTR_INIT(de->name, 1007 + de->name_len); 1007 1008 1008 1009 /* Directory is encrypted */ 1009 - err = ext4_fname_disk_to_usr(dir, hinfo, de, 1010 - &fname_crypto_str); 1010 + err = fscrypt_fname_disk_to_usr(dir, hinfo->hash, 1011 + hinfo->minor_hash, &de_name, 1012 + &fname_crypto_str); 1011 1013 if (err < 0) { 1012 1014 count = err; 1013 1015 goto errout; ··· 1029 1025 errout: 1030 1026 brelse(bh); 1031 1027 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1032 - ext4_fname_crypto_free_buffer(&fname_crypto_str); 1028 + fscrypt_fname_free_buffer(&fname_crypto_str); 1033 1029 #endif 1034 1030 return count; 1035 1031 } ··· 1054 1050 int count = 0; 1055 1051 int ret, err; 1056 1052 __u32 hashval; 1057 - struct ext4_str tmp_str; 1053 + struct fscrypt_str tmp_str; 1058 1054 1059 1055 dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", 1060 1056 start_hash, start_minor_hash)); ··· 1568 1564 struct ext4_dir_entry_2 *de; 1569 1565 struct buffer_head *bh; 1570 1566 1571 - if (ext4_encrypted_inode(dir)) { 1572 - int res = ext4_get_encryption_info(dir); 1567 + if (ext4_encrypted_inode(dir)) { 1568 + int res = fscrypt_get_encryption_info(dir); 1573 1569 1574 1570 /* 1575 - * This should be a properly defined flag for 1576 - * dentry->d_flags when we uplift this to the VFS. 1577 - * d_fsdata is set to (void *) 1 if if the dentry is 1571 + * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is 1578 1572 * created while the directory was encrypted and we 1579 - * don't have access to the key. 1573 + * have access to the key. 1580 1574 */ 1581 - dentry->d_fsdata = NULL; 1582 - if (ext4_encryption_info(dir)) 1583 - dentry->d_fsdata = (void *) 1; 1584 - d_set_d_op(dentry, &ext4_encrypted_d_ops); 1585 - if (res && res != -ENOKEY) 1586 - return ERR_PTR(res); 1587 - } 1575 + if (fscrypt_has_encryption_key(dir)) 1576 + fscrypt_set_encrypted_dentry(dentry); 1577 + fscrypt_set_d_op(dentry); 1578 + if (res && res != -ENOKEY) 1579 + return ERR_PTR(res); 1580 + } 1588 1581 1589 - if (dentry->d_name.len > EXT4_NAME_LEN) 1590 - return ERR_PTR(-ENAMETOOLONG); 1582 + if (dentry->d_name.len > EXT4_NAME_LEN) 1583 + return ERR_PTR(-ENAMETOOLONG); 1591 1584 1592 1585 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); 1593 1586 if (IS_ERR(bh)) ··· 1611 1610 } 1612 1611 if (!IS_ERR(inode) && ext4_encrypted_inode(dir) && 1613 1612 (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && 1614 - !ext4_is_child_context_consistent_with_parent(dir, 1615 - inode)) { 1613 + !fscrypt_has_permitted_context(dir, inode)) { 1616 1614 int nokey = ext4_encrypted_inode(inode) && 1617 - !ext4_encryption_info(inode); 1618 - 1615 + !fscrypt_has_encryption_key(inode); 1619 1616 iput(inode); 1620 1617 if (nokey) 1621 1618 return ERR_PTR(-ENOKEY); ··· 2690 2691 /* 2691 2692 * routine to check that the specified directory is empty (for rmdir) 2692 2693 */ 2693 - int ext4_empty_dir(struct inode *inode) 2694 + bool ext4_empty_dir(struct inode *inode) 2694 2695 { 2695 2696 unsigned int offset; 2696 2697 struct buffer_head *bh; 2697 2698 struct ext4_dir_entry_2 *de, *de1; 2698 2699 struct super_block *sb; 2699 - int err = 0; 2700 2700 2701 2701 if (ext4_has_inline_data(inode)) { 2702 2702 int has_inline_data = 1; 2703 + int ret; 2703 2704 2704 - err = empty_inline_dir(inode, &has_inline_data); 2705 + ret = empty_inline_dir(inode, &has_inline_data); 2705 2706 if (has_inline_data) 2706 - return err; 2707 + return ret; 2707 2708 } 2708 2709 2709 2710 sb = inode->i_sb; 2710 2711 if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) { 2711 2712 EXT4_ERROR_INODE(inode, "invalid size"); 2712 - return 1; 2713 + return true; 2713 2714 } 2714 2715 bh = ext4_read_dirblock(inode, 0, EITHER); 2715 2716 if (IS_ERR(bh)) 2716 - return 1; 2717 + return true; 2717 2718 2718 2719 de = (struct ext4_dir_entry_2 *) bh->b_data; 2719 2720 de1 = ext4_next_entry(de, sb->s_blocksize); ··· 2722 2723 strcmp(".", de->name) || strcmp("..", de1->name)) { 2723 2724 ext4_warning_inode(inode, "directory missing '.' and/or '..'"); 2724 2725 brelse(bh); 2725 - return 1; 2726 + return true; 2726 2727 } 2727 2728 offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) + 2728 2729 ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize); ··· 2730 2731 while (offset < inode->i_size) { 2731 2732 if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { 2732 2733 unsigned int lblock; 2733 - err = 0; 2734 2734 brelse(bh); 2735 2735 lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb); 2736 2736 bh = ext4_read_dirblock(inode, lblock, EITHER); 2737 2737 if (IS_ERR(bh)) 2738 - return 1; 2738 + return true; 2739 2739 de = (struct ext4_dir_entry_2 *) bh->b_data; 2740 2740 } 2741 2741 if (ext4_check_dir_entry(inode, NULL, de, bh, ··· 2746 2748 } 2747 2749 if (le32_to_cpu(de->inode)) { 2748 2750 brelse(bh); 2749 - return 0; 2751 + return false; 2750 2752 } 2751 2753 offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); 2752 2754 de = ext4_next_entry(de, sb->s_blocksize); 2753 2755 } 2754 2756 brelse(bh); 2755 - return 1; 2757 + return true; 2756 2758 } 2757 2759 2758 2760 /* ··· 3075 3077 int err, len = strlen(symname); 3076 3078 int credits; 3077 3079 bool encryption_required; 3078 - struct ext4_str disk_link; 3079 - struct ext4_encrypted_symlink_data *sd = NULL; 3080 + struct fscrypt_str disk_link; 3081 + struct fscrypt_symlink_data *sd = NULL; 3080 3082 3081 3083 disk_link.len = len + 1; 3082 3084 disk_link.name = (char *) symname; ··· 3084 3086 encryption_required = (ext4_encrypted_inode(dir) || 3085 3087 DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))); 3086 3088 if (encryption_required) { 3087 - err = ext4_get_encryption_info(dir); 3089 + err = fscrypt_get_encryption_info(dir); 3088 3090 if (err) 3089 3091 return err; 3090 - if (ext4_encryption_info(dir) == NULL) 3092 + if (!fscrypt_has_encryption_key(dir)) 3091 3093 return -EPERM; 3092 - disk_link.len = (ext4_fname_encrypted_size(dir, len) + 3093 - sizeof(struct ext4_encrypted_symlink_data)); 3094 + disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + 3095 + sizeof(struct fscrypt_symlink_data)); 3094 3096 sd = kzalloc(disk_link.len, GFP_KERNEL); 3095 3097 if (!sd) 3096 3098 return -ENOMEM; ··· 3138 3140 3139 3141 if (encryption_required) { 3140 3142 struct qstr istr; 3141 - struct ext4_str ostr; 3143 + struct fscrypt_str ostr = 3144 + FSTR_INIT(sd->encrypted_path, disk_link.len); 3142 3145 3143 3146 istr.name = (const unsigned char *) symname; 3144 3147 istr.len = len; 3145 - ostr.name = sd->encrypted_path; 3146 - ostr.len = disk_link.len; 3147 - err = ext4_fname_usr_to_disk(inode, &istr, &ostr); 3148 + err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); 3148 3149 if (err < 0) 3149 3150 goto err_drop_inode; 3150 3151 sd->len = cpu_to_le16(ostr.len); ··· 3232 3235 if (inode->i_nlink >= EXT4_LINK_MAX) 3233 3236 return -EMLINK; 3234 3237 if (ext4_encrypted_inode(dir) && 3235 - !ext4_is_child_context_consistent_with_parent(dir, inode)) 3238 + !fscrypt_has_permitted_context(dir, inode)) 3236 3239 return -EPERM; 3237 3240 3238 3241 if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && ··· 3555 3558 3556 3559 if ((old.dir != new.dir) && 3557 3560 ext4_encrypted_inode(new.dir) && 3558 - !ext4_is_child_context_consistent_with_parent(new.dir, 3559 - old.inode)) { 3561 + !fscrypt_has_permitted_context(new.dir, old.inode)) { 3560 3562 retval = -EPERM; 3561 3563 goto end_rename; 3562 3564 } ··· 3727 3731 if ((ext4_encrypted_inode(old_dir) || 3728 3732 ext4_encrypted_inode(new_dir)) && 3729 3733 (old_dir != new_dir) && 3730 - (!ext4_is_child_context_consistent_with_parent(new_dir, 3731 - old.inode) || 3732 - !ext4_is_child_context_consistent_with_parent(old_dir, 3733 - new.inode))) 3734 + (!fscrypt_has_permitted_context(new_dir, old.inode) || 3735 + !fscrypt_has_permitted_context(old_dir, new.inode))) 3734 3736 return -EPERM; 3735 3737 3736 3738 if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT) &&
+6 -7
fs/ext4/page-io.c
··· 24 24 #include <linux/slab.h> 25 25 #include <linux/mm.h> 26 26 #include <linux/backing-dev.h> 27 + #include <linux/fscrypto.h> 27 28 28 29 #include "ext4_jbd2.h" 29 30 #include "xattr.h" ··· 68 67 struct page *page = bvec->bv_page; 69 68 #ifdef CONFIG_EXT4_FS_ENCRYPTION 70 69 struct page *data_page = NULL; 71 - struct ext4_crypto_ctx *ctx = NULL; 72 70 #endif 73 71 struct buffer_head *bh, *head; 74 72 unsigned bio_start = bvec->bv_offset; ··· 82 82 if (!page->mapping) { 83 83 /* The bounce data pages are unmapped. */ 84 84 data_page = page; 85 - ctx = (struct ext4_crypto_ctx *)page_private(data_page); 86 - page = ctx->w.control_page; 85 + fscrypt_pullback_bio_page(&page, false); 87 86 } 88 87 #endif 89 88 ··· 112 113 local_irq_restore(flags); 113 114 if (!under_io) { 114 115 #ifdef CONFIG_EXT4_FS_ENCRYPTION 115 - if (ctx) 116 - ext4_restore_control_page(data_page); 116 + if (data_page) 117 + fscrypt_restore_control_page(data_page); 117 118 #endif 118 119 end_page_writeback(page); 119 120 } ··· 472 473 gfp_t gfp_flags = GFP_NOFS; 473 474 474 475 retry_encrypt: 475 - data_page = ext4_encrypt(inode, page, gfp_flags); 476 + data_page = fscrypt_encrypt_page(inode, page, gfp_flags); 476 477 if (IS_ERR(data_page)) { 477 478 ret = PTR_ERR(data_page); 478 479 if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { ··· 510 511 if (ret) { 511 512 out: 512 513 if (data_page) 513 - ext4_restore_control_page(data_page); 514 + fscrypt_restore_control_page(data_page); 514 515 printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); 515 516 redirty_page_for_writepage(wbc, page); 516 517 do {
+6 -42
fs/ext4/readpage.c
··· 46 46 47 47 #include "ext4.h" 48 48 49 - /* 50 - * Call ext4_decrypt on every single page, reusing the encryption 51 - * context. 52 - */ 53 - static void completion_pages(struct work_struct *work) 54 - { 55 - #ifdef CONFIG_EXT4_FS_ENCRYPTION 56 - struct ext4_crypto_ctx *ctx = 57 - container_of(work, struct ext4_crypto_ctx, r.work); 58 - struct bio *bio = ctx->r.bio; 59 - struct bio_vec *bv; 60 - int i; 61 - 62 - bio_for_each_segment_all(bv, bio, i) { 63 - struct page *page = bv->bv_page; 64 - 65 - int ret = ext4_decrypt(page); 66 - if (ret) { 67 - WARN_ON_ONCE(1); 68 - SetPageError(page); 69 - } else 70 - SetPageUptodate(page); 71 - unlock_page(page); 72 - } 73 - ext4_release_crypto_ctx(ctx); 74 - bio_put(bio); 75 - #else 76 - BUG(); 77 - #endif 78 - } 79 - 80 49 static inline bool ext4_bio_encrypted(struct bio *bio) 81 50 { 82 51 #ifdef CONFIG_EXT4_FS_ENCRYPTION ··· 73 104 int i; 74 105 75 106 if (ext4_bio_encrypted(bio)) { 76 - struct ext4_crypto_ctx *ctx = bio->bi_private; 77 - 78 107 if (bio->bi_error) { 79 - ext4_release_crypto_ctx(ctx); 108 + fscrypt_release_ctx(bio->bi_private); 80 109 } else { 81 - INIT_WORK(&ctx->r.work, completion_pages); 82 - ctx->r.bio = bio; 83 - queue_work(ext4_read_workqueue, &ctx->r.work); 110 + fscrypt_decrypt_bio_pages(bio->bi_private, bio); 84 111 return; 85 112 } 86 113 } ··· 100 135 unsigned nr_pages) 101 136 { 102 137 struct bio *bio = NULL; 103 - unsigned page_idx; 104 138 sector_t last_block_in_bio = 0; 105 139 106 140 struct inode *inode = mapping->host; ··· 121 157 map.m_len = 0; 122 158 map.m_flags = 0; 123 159 124 - for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { 160 + for (; nr_pages; nr_pages--) { 125 161 int fully_mapped = 1; 126 162 unsigned first_hole = blocks_per_page; 127 163 ··· 239 275 bio = NULL; 240 276 } 241 277 if (bio == NULL) { 242 - struct ext4_crypto_ctx *ctx = NULL; 278 + struct fscrypt_ctx *ctx = NULL; 243 279 244 280 if (ext4_encrypted_inode(inode) && 245 281 S_ISREG(inode->i_mode)) { 246 - ctx = ext4_get_crypto_ctx(inode, GFP_NOFS); 282 + ctx = fscrypt_get_ctx(inode, GFP_NOFS); 247 283 if (IS_ERR(ctx)) 248 284 goto set_error_page; 249 285 } ··· 251 287 min_t(int, nr_pages, BIO_MAX_PAGES)); 252 288 if (!bio) { 253 289 if (ctx) 254 - ext4_release_crypto_ctx(ctx); 290 + fscrypt_release_ctx(ctx); 255 291 goto set_error_page; 256 292 } 257 293 bio->bi_bdev = bdev;
+133 -29
fs/ext4/super.c
··· 945 945 ei->i_datasync_tid = 0; 946 946 atomic_set(&ei->i_unwritten, 0); 947 947 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); 948 - #ifdef CONFIG_EXT4_FS_ENCRYPTION 949 - ei->i_crypt_info = NULL; 950 - #endif 951 948 return &ei->vfs_inode; 952 949 } 953 950 ··· 1023 1026 EXT4_I(inode)->jinode = NULL; 1024 1027 } 1025 1028 #ifdef CONFIG_EXT4_FS_ENCRYPTION 1026 - if (EXT4_I(inode)->i_crypt_info) 1027 - ext4_free_encryption_info(inode, EXT4_I(inode)->i_crypt_info); 1029 + fscrypt_put_encryption_info(inode, NULL); 1028 1030 #endif 1029 1031 } 1030 1032 ··· 1089 1093 wait & ~__GFP_DIRECT_RECLAIM); 1090 1094 return try_to_free_buffers(page); 1091 1095 } 1096 + 1097 + #ifdef CONFIG_EXT4_FS_ENCRYPTION 1098 + static int ext4_get_context(struct inode *inode, void *ctx, size_t len) 1099 + { 1100 + return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION, 1101 + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); 1102 + } 1103 + 1104 + static int ext4_key_prefix(struct inode *inode, u8 **key) 1105 + { 1106 + *key = EXT4_SB(inode->i_sb)->key_prefix; 1107 + return EXT4_SB(inode->i_sb)->key_prefix_size; 1108 + } 1109 + 1110 + static int ext4_prepare_context(struct inode *inode) 1111 + { 1112 + return ext4_convert_inline_data(inode); 1113 + } 1114 + 1115 + static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, 1116 + void *fs_data) 1117 + { 1118 + handle_t *handle; 1119 + int res, res2; 1120 + 1121 + /* fs_data is null when internally used. */ 1122 + if (fs_data) { 1123 + res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, 1124 + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, 1125 + len, 0); 1126 + if (!res) { 1127 + ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); 1128 + ext4_clear_inode_state(inode, 1129 + EXT4_STATE_MAY_INLINE_DATA); 1130 + } 1131 + return res; 1132 + } 1133 + 1134 + handle = ext4_journal_start(inode, EXT4_HT_MISC, 1135 + ext4_jbd2_credits_xattr(inode)); 1136 + if (IS_ERR(handle)) 1137 + return PTR_ERR(handle); 1138 + 1139 + res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, 1140 + EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, 1141 + len, 0); 1142 + if (!res) { 1143 + ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); 1144 + res = ext4_mark_inode_dirty(handle, inode); 1145 + if (res) 1146 + EXT4_ERROR_INODE(inode, "Failed to mark inode dirty"); 1147 + } 1148 + res2 = ext4_journal_stop(handle); 1149 + if (!res) 1150 + res = res2; 1151 + return res; 1152 + } 1153 + 1154 + static int ext4_dummy_context(struct inode *inode) 1155 + { 1156 + return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb)); 1157 + } 1158 + 1159 + static unsigned ext4_max_namelen(struct inode *inode) 1160 + { 1161 + return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize : 1162 + EXT4_NAME_LEN; 1163 + } 1164 + 1165 + static struct fscrypt_operations ext4_cryptops = { 1166 + .get_context = ext4_get_context, 1167 + .key_prefix = ext4_key_prefix, 1168 + .prepare_context = ext4_prepare_context, 1169 + .set_context = ext4_set_context, 1170 + .dummy_context = ext4_dummy_context, 1171 + .is_encrypted = ext4_encrypted_inode, 1172 + .empty_dir = ext4_empty_dir, 1173 + .max_namelen = ext4_max_namelen, 1174 + }; 1175 + #else 1176 + static struct fscrypt_operations ext4_cryptops = { 1177 + .is_encrypted = ext4_encrypted_inode, 1178 + }; 1179 + #endif 1092 1180 1093 1181 #ifdef CONFIG_QUOTA 1094 1182 static char *quotatypes[] = INITQFNAMES; ··· 2148 2068 static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group, 2149 2069 struct ext4_group_desc *gdp) 2150 2070 { 2151 - int offset; 2071 + int offset = offsetof(struct ext4_group_desc, bg_checksum); 2152 2072 __u16 crc = 0; 2153 2073 __le32 le_group = cpu_to_le32(block_group); 2154 2074 struct ext4_sb_info *sbi = EXT4_SB(sb); 2155 2075 2156 2076 if (ext4_has_metadata_csum(sbi->s_sb)) { 2157 2077 /* Use new metadata_csum algorithm */ 2158 - __le16 save_csum; 2159 2078 __u32 csum32; 2079 + __u16 dummy_csum = 0; 2160 2080 2161 - save_csum = gdp->bg_checksum; 2162 - gdp->bg_checksum = 0; 2163 2081 csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, 2164 2082 sizeof(le_group)); 2165 - csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, 2166 - sbi->s_desc_size); 2167 - gdp->bg_checksum = save_csum; 2083 + csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset); 2084 + csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum, 2085 + sizeof(dummy_csum)); 2086 + offset += sizeof(dummy_csum); 2087 + if (offset < sbi->s_desc_size) 2088 + csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset, 2089 + sbi->s_desc_size - offset); 2168 2090 2169 2091 crc = csum32 & 0xFFFF; 2170 2092 goto out; ··· 2175 2093 /* old crc16 code */ 2176 2094 if (!ext4_has_feature_gdt_csum(sb)) 2177 2095 return 0; 2178 - 2179 - offset = offsetof(struct ext4_group_desc, bg_checksum); 2180 2096 2181 2097 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); 2182 2098 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); ··· 2357 2277 2358 2278 while (es->s_last_orphan) { 2359 2279 struct inode *inode; 2280 + 2281 + /* 2282 + * We may have encountered an error during cleanup; if 2283 + * so, skip the rest. 2284 + */ 2285 + if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 2286 + jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); 2287 + es->s_last_orphan = 0; 2288 + break; 2289 + } 2360 2290 2361 2291 inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); 2362 2292 if (IS_ERR(inode)) { ··· 3506 3416 goto failed_mount; 3507 3417 } 3508 3418 3419 + if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { 3420 + ext4_msg(sb, KERN_ERR, 3421 + "Number of reserved GDT blocks insanely large: %d", 3422 + le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks)); 3423 + goto failed_mount; 3424 + } 3425 + 3509 3426 if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { 3510 3427 err = bdev_dax_supported(sb, blocksize); 3511 3428 if (err) ··· 3783 3686 sb->s_op = &ext4_sops; 3784 3687 sb->s_export_op = &ext4_export_ops; 3785 3688 sb->s_xattr = ext4_xattr_handlers; 3689 + sb->s_cop = &ext4_cryptops; 3786 3690 #ifdef CONFIG_QUOTA 3787 3691 sb->dq_op = &ext4_quota_operations; 3788 3692 if (ext4_has_feature_quota(sb)) ··· 4094 3996 ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); 4095 3997 4096 3998 kfree(orig_data); 3999 + #ifdef CONFIG_EXT4_FS_ENCRYPTION 4000 + memcpy(sbi->key_prefix, EXT4_KEY_DESC_PREFIX, 4001 + EXT4_KEY_DESC_PREFIX_SIZE); 4002 + sbi->key_prefix_size = EXT4_KEY_DESC_PREFIX_SIZE; 4003 + #endif 4097 4004 return 0; 4098 4005 4099 4006 cantfind_ext4: ··· 4430 4327 4431 4328 if (!sbh || block_device_ejected(sb)) 4432 4329 return error; 4433 - if (buffer_write_io_error(sbh)) { 4434 - /* 4435 - * Oh, dear. A previous attempt to write the 4436 - * superblock failed. This could happen because the 4437 - * USB device was yanked out. Or it could happen to 4438 - * be a transient write error and maybe the block will 4439 - * be remapped. Nothing we can do but to retry the 4440 - * write and hope for the best. 4441 - */ 4442 - ext4_msg(sb, KERN_ERR, "previous I/O error to " 4443 - "superblock detected"); 4444 - clear_buffer_write_io_error(sbh); 4445 - set_buffer_uptodate(sbh); 4446 - } 4447 4330 /* 4448 4331 * If the file system is mounted read-only, don't update the 4449 4332 * superblock write time. This avoids updating the superblock ··· 4460 4371 &EXT4_SB(sb)->s_freeinodes_counter)); 4461 4372 BUFFER_TRACE(sbh, "marking dirty"); 4462 4373 ext4_superblock_csum_set(sb); 4374 + lock_buffer(sbh); 4375 + if (buffer_write_io_error(sbh)) { 4376 + /* 4377 + * Oh, dear. A previous attempt to write the 4378 + * superblock failed. This could happen because the 4379 + * USB device was yanked out. Or it could happen to 4380 + * be a transient write error and maybe the block will 4381 + * be remapped. Nothing we can do but to retry the 4382 + * write and hope for the best. 4383 + */ 4384 + ext4_msg(sb, KERN_ERR, "previous I/O error to " 4385 + "superblock detected"); 4386 + clear_buffer_write_io_error(sbh); 4387 + set_buffer_uptodate(sbh); 4388 + } 4463 4389 mark_buffer_dirty(sbh); 4390 + unlock_buffer(sbh); 4464 4391 if (sync) { 4465 4392 error = __sync_dirty_buffer(sbh, 4466 4393 test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC); ··· 5527 5422 5528 5423 static void __exit ext4_exit_fs(void) 5529 5424 { 5530 - ext4_exit_crypto(); 5531 5425 ext4_destroy_lazyinit_thread(); 5532 5426 unregister_as_ext2(); 5533 5427 unregister_as_ext3();
+15 -20
fs/ext4/symlink.c
··· 22 22 #include "ext4.h" 23 23 #include "xattr.h" 24 24 25 - #ifdef CONFIG_EXT4_FS_ENCRYPTION 26 25 static const char *ext4_encrypted_get_link(struct dentry *dentry, 27 26 struct inode *inode, 28 27 struct delayed_call *done) 29 28 { 30 29 struct page *cpage = NULL; 31 30 char *caddr, *paddr = NULL; 32 - struct ext4_str cstr, pstr; 33 - struct ext4_encrypted_symlink_data *sd; 31 + struct fscrypt_str cstr, pstr; 32 + struct fscrypt_symlink_data *sd; 34 33 loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1); 35 34 int res; 36 - u32 plen, max_size = inode->i_sb->s_blocksize; 35 + u32 max_size = inode->i_sb->s_blocksize; 37 36 38 37 if (!dentry) 39 38 return ERR_PTR(-ECHILD); 40 39 41 - res = ext4_get_encryption_info(inode); 40 + res = fscrypt_get_encryption_info(inode); 42 41 if (res) 43 42 return ERR_PTR(res); 44 43 ··· 53 54 } 54 55 55 56 /* Symlink is encrypted */ 56 - sd = (struct ext4_encrypted_symlink_data *)caddr; 57 + sd = (struct fscrypt_symlink_data *)caddr; 57 58 cstr.name = sd->encrypted_path; 58 59 cstr.len = le16_to_cpu(sd->len); 59 - if ((cstr.len + 60 - sizeof(struct ext4_encrypted_symlink_data) - 1) > 61 - max_size) { 60 + if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { 62 61 /* Symlink data on the disk is corrupted */ 63 62 res = -EFSCORRUPTED; 64 63 goto errout; 65 64 } 66 - plen = (cstr.len < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) ? 67 - EXT4_FNAME_CRYPTO_DIGEST_SIZE*2 : cstr.len; 68 - paddr = kmalloc(plen + 1, GFP_NOFS); 69 - if (!paddr) { 70 - res = -ENOMEM; 65 + 66 + res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr); 67 + if (res) 71 68 goto errout; 72 - } 73 - pstr.name = paddr; 74 - pstr.len = plen; 75 - res = _ext4_fname_disk_to_usr(inode, NULL, &cstr, &pstr); 69 + 70 + res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); 76 71 if (res < 0) 77 72 goto errout; 73 + 74 + paddr = pstr.name; 75 + 78 76 /* Null-terminate the name */ 79 - if (res <= plen) 77 + if (res <= pstr.len) 80 78 paddr[res] = '\0'; 81 79 if (cpage) 82 80 put_page(cpage); ··· 95 99 .listxattr = ext4_listxattr, 96 100 .removexattr = generic_removexattr, 97 101 }; 98 - #endif 99 102 100 103 const struct inode_operations ext4_symlink_inode_operations = { 101 104 .readlink = generic_readlink,
+7 -6
fs/ext4/xattr.c
··· 121 121 { 122 122 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 123 123 __u32 csum; 124 - __le32 save_csum; 125 124 __le64 dsk_block_nr = cpu_to_le64(block_nr); 125 + __u32 dummy_csum = 0; 126 + int offset = offsetof(struct ext4_xattr_header, h_checksum); 126 127 127 - save_csum = hdr->h_checksum; 128 - hdr->h_checksum = 0; 129 128 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr, 130 129 sizeof(dsk_block_nr)); 131 - csum = ext4_chksum(sbi, csum, (__u8 *)hdr, 132 - EXT4_BLOCK_SIZE(inode->i_sb)); 130 + csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset); 131 + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); 132 + offset += sizeof(dummy_csum); 133 + csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset, 134 + EXT4_BLOCK_SIZE(inode->i_sb) - offset); 133 135 134 - hdr->h_checksum = save_csum; 135 136 return cpu_to_le32(csum); 136 137 } 137 138
+1 -1
fs/jbd2/commit.c
··· 124 124 struct commit_header *tmp; 125 125 struct buffer_head *bh; 126 126 int ret; 127 - struct timespec now = current_kernel_time(); 127 + struct timespec64 now = current_kernel_time64(); 128 128 129 129 *cbh = NULL; 130 130
+5
fs/jbd2/journal.c
··· 691 691 { 692 692 int err = 0; 693 693 694 + jbd2_might_wait_for_commit(journal); 694 695 read_lock(&journal->j_state_lock); 695 696 #ifdef CONFIG_JBD2_DEBUG 696 697 if (!tid_geq(journal->j_commit_request, tid)) { ··· 1092 1091 1093 1092 static journal_t * journal_init_common (void) 1094 1093 { 1094 + static struct lock_class_key jbd2_trans_commit_key; 1095 1095 journal_t *journal; 1096 1096 int err; 1097 1097 ··· 1127 1125 } 1128 1126 1129 1127 spin_lock_init(&journal->j_history_lock); 1128 + 1129 + lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle", 1130 + &jbd2_trans_commit_key, 0); 1130 1131 1131 1132 return journal; 1132 1133 }
+8 -9
fs/jbd2/transaction.c
··· 182 182 int needed; 183 183 int total = blocks + rsv_blocks; 184 184 185 + jbd2_might_wait_for_commit(journal); 186 + 185 187 /* 186 188 * If the current transaction is locked down for commit, wait 187 189 * for the lock to be released. ··· 384 382 read_unlock(&journal->j_state_lock); 385 383 current->journal_info = handle; 386 384 387 - lock_map_acquire(&handle->h_lockdep_map); 385 + rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_); 388 386 jbd2_journal_free_transaction(new_transaction); 389 387 return 0; 390 388 } 391 - 392 - static struct lock_class_key jbd2_handle_key; 393 389 394 390 /* Allocate a new handle. This should probably be in a slab... */ 395 391 static handle_t *new_handle(int nblocks) ··· 397 397 return NULL; 398 398 handle->h_buffer_credits = nblocks; 399 399 handle->h_ref = 1; 400 - 401 - lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle", 402 - &jbd2_handle_key, 0); 403 400 404 401 return handle; 405 402 } ··· 669 672 if (need_to_start) 670 673 jbd2_log_start_commit(journal, tid); 671 674 672 - lock_map_release(&handle->h_lockdep_map); 675 + rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_); 673 676 handle->h_buffer_credits = nblocks; 674 677 ret = start_this_handle(journal, handle, gfp_mask); 675 678 return ret; ··· 696 699 void jbd2_journal_lock_updates(journal_t *journal) 697 700 { 698 701 DEFINE_WAIT(wait); 702 + 703 + jbd2_might_wait_for_commit(journal); 699 704 700 705 write_lock(&journal->j_state_lock); 701 706 ++journal->j_barrier_count; ··· 1749 1750 wake_up(&journal->j_wait_transaction_locked); 1750 1751 } 1751 1752 1753 + rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_); 1754 + 1752 1755 if (wait_for_commit) 1753 1756 err = jbd2_log_wait_commit(journal, tid); 1754 - 1755 - lock_map_release(&handle->h_lockdep_map); 1756 1757 1757 1758 if (handle->h_rsv_handle) 1758 1759 jbd2_journal_free_reserved(handle->h_rsv_handle);
+19 -4
include/linux/jbd2.h
··· 491 491 492 492 unsigned long h_start_jiffies; 493 493 unsigned int h_requested_credits; 494 - 495 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 496 - struct lockdep_map h_lockdep_map; 497 - #endif 498 494 }; 499 495 500 496 ··· 789 793 * @j_proc_entry: procfs entry for the jbd statistics directory 790 794 * @j_stats: Overall statistics 791 795 * @j_private: An opaque pointer to fs-private information. 796 + * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies 792 797 */ 793 798 794 799 struct journal_s ··· 1032 1035 1033 1036 /* Precomputed journal UUID checksum for seeding other checksums */ 1034 1037 __u32 j_csum_seed; 1038 + 1039 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 1040 + /* 1041 + * Lockdep entity to track transaction commit dependencies. Handles 1042 + * hold this "lock" for read, when we wait for commit, we acquire the 1043 + * "lock" for writing. This matches the properties of jbd2 journalling 1044 + * where the running transaction has to wait for all handles to be 1045 + * dropped to commit that transaction and also acquiring a handle may 1046 + * require transaction commit to finish. 1047 + */ 1048 + struct lockdep_map j_trans_commit_map; 1049 + #endif 1035 1050 }; 1051 + 1052 + #define jbd2_might_wait_for_commit(j) \ 1053 + do { \ 1054 + rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \ 1055 + rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \ 1056 + } while (0) 1036 1057 1037 1058 /* journal feature predicate functions */ 1038 1059 #define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \