Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-4.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs/fscrypto fixes from Jaegeuk Kim:
"In addition to f2fs/fscrypto fixes, I've added one patch which
prevents RCU mode lookup in d_revalidate, as Al mentioned.

These patches fix f2fs and fscrypto based on -rc3 bug fixes in ext4
crypto, which have not yet been fully propagated as follows.

- use of dget_parent and file_dentry to avoid crashes
- disallow RCU-mode lookup in d_invalidate
- disallow -ENOMEM in the core data encryption path"

* tag 'for-linus-4.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs:
ext4/fscrypto: avoid RCU lookup in d_revalidate
fscrypto: don't let data integrity writebacks fail with ENOMEM
f2fs: use dget_parent and file_dentry in f2fs_file_open
fscrypto: use dget_parent() in fscrypt_d_revalidate()

+62 -30
+33 -20
fs/crypto/crypto.c
··· 26 26 #include <linux/ratelimit.h> 27 27 #include <linux/bio.h> 28 28 #include <linux/dcache.h> 29 + #include <linux/namei.h> 29 30 #include <linux/fscrypto.h> 30 31 #include <linux/ecryptfs.h> 31 32 ··· 82 81 /** 83 82 * fscrypt_get_ctx() - Gets an encryption context 84 83 * @inode: The inode for which we are doing the crypto 84 + * @gfp_flags: The gfp flag for memory allocation 85 85 * 86 86 * Allocates and initializes an encryption context. 87 87 * 88 88 * Return: An allocated and initialized encryption context on success; error 89 89 * value or NULL otherwise. 90 90 */ 91 - struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode) 91 + struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags) 92 92 { 93 93 struct fscrypt_ctx *ctx = NULL; 94 94 struct fscrypt_info *ci = inode->i_crypt_info; ··· 115 113 list_del(&ctx->free_list); 116 114 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 117 115 if (!ctx) { 118 - ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 116 + ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags); 119 117 if (!ctx) 120 118 return ERR_PTR(-ENOMEM); 121 119 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; ··· 149 147 150 148 static int do_page_crypto(struct inode *inode, 151 149 fscrypt_direction_t rw, pgoff_t index, 152 - struct page *src_page, struct page *dest_page) 150 + struct page *src_page, struct page *dest_page, 151 + gfp_t gfp_flags) 153 152 { 154 153 u8 xts_tweak[FS_XTS_TWEAK_SIZE]; 155 154 struct skcipher_request *req = NULL; ··· 160 157 struct crypto_skcipher *tfm = ci->ci_ctfm; 161 158 int res = 0; 162 159 163 - req = skcipher_request_alloc(tfm, GFP_NOFS); 160 + req = skcipher_request_alloc(tfm, gfp_flags); 164 161 if (!req) { 165 162 printk_ratelimited(KERN_ERR 166 163 "%s: crypto_request_alloc() failed\n", ··· 202 199 return 0; 203 200 } 204 201 205 - static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx) 202 + static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags) 206 203 { 207 - ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, 208 - GFP_NOWAIT); 204 + ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); 209 205 if (ctx->w.bounce_page == NULL) 210 206 return ERR_PTR(-ENOMEM); 211 207 ctx->flags |= FS_WRITE_PATH_FL; ··· 215 213 * fscypt_encrypt_page() - Encrypts a page 216 214 * @inode: The inode for which the encryption should take place 217 215 * @plaintext_page: The page to encrypt. Must be locked. 216 + * @gfp_flags: The gfp flag for memory allocation 218 217 * 219 218 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx 220 219 * encryption context. ··· 228 225 * error value or NULL. 229 226 */ 230 227 struct page *fscrypt_encrypt_page(struct inode *inode, 231 - struct page *plaintext_page) 228 + struct page *plaintext_page, gfp_t gfp_flags) 232 229 { 233 230 struct fscrypt_ctx *ctx; 234 231 struct page *ciphertext_page = NULL; ··· 236 233 237 234 BUG_ON(!PageLocked(plaintext_page)); 238 235 239 - ctx = fscrypt_get_ctx(inode); 236 + ctx = fscrypt_get_ctx(inode, gfp_flags); 240 237 if (IS_ERR(ctx)) 241 238 return (struct page *)ctx; 242 239 243 240 /* The encryption operation will require a bounce page. */ 244 - ciphertext_page = alloc_bounce_page(ctx); 241 + ciphertext_page = alloc_bounce_page(ctx, gfp_flags); 245 242 if (IS_ERR(ciphertext_page)) 246 243 goto errout; 247 244 248 245 ctx->w.control_page = plaintext_page; 249 246 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index, 250 - plaintext_page, ciphertext_page); 247 + plaintext_page, ciphertext_page, 248 + gfp_flags); 251 249 if (err) { 252 250 ciphertext_page = ERR_PTR(err); 253 251 goto errout; ··· 279 275 BUG_ON(!PageLocked(page)); 280 276 281 277 return do_page_crypto(page->mapping->host, 282 - FS_DECRYPT, page->index, page, page); 278 + FS_DECRYPT, page->index, page, page, GFP_NOFS); 283 279 } 284 280 EXPORT_SYMBOL(fscrypt_decrypt_page); 285 281 ··· 293 289 294 290 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); 295 291 296 - ctx = fscrypt_get_ctx(inode); 292 + ctx = fscrypt_get_ctx(inode, GFP_NOFS); 297 293 if (IS_ERR(ctx)) 298 294 return PTR_ERR(ctx); 299 295 300 - ciphertext_page = alloc_bounce_page(ctx); 296 + ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); 301 297 if (IS_ERR(ciphertext_page)) { 302 298 err = PTR_ERR(ciphertext_page); 303 299 goto errout; ··· 305 301 306 302 while (len--) { 307 303 err = do_page_crypto(inode, FS_ENCRYPT, lblk, 308 - ZERO_PAGE(0), ciphertext_page); 304 + ZERO_PAGE(0), ciphertext_page, 305 + GFP_NOFS); 309 306 if (err) 310 307 goto errout; 311 308 312 - bio = bio_alloc(GFP_KERNEL, 1); 309 + bio = bio_alloc(GFP_NOWAIT, 1); 313 310 if (!bio) { 314 311 err = -ENOMEM; 315 312 goto errout; ··· 350 345 */ 351 346 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 352 347 { 353 - struct inode *dir = d_inode(dentry->d_parent); 354 - struct fscrypt_info *ci = dir->i_crypt_info; 348 + struct dentry *dir; 349 + struct fscrypt_info *ci; 355 350 int dir_has_key, cached_with_key; 356 351 357 - if (!dir->i_sb->s_cop->is_encrypted(dir)) 358 - return 0; 352 + if (flags & LOOKUP_RCU) 353 + return -ECHILD; 359 354 355 + dir = dget_parent(dentry); 356 + if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) { 357 + dput(dir); 358 + return 0; 359 + } 360 + 361 + ci = d_inode(dir)->i_crypt_info; 360 362 if (ci && ci->ci_keyring_key && 361 363 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 362 364 (1 << KEY_FLAG_REVOKED) | ··· 375 363 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 376 364 spin_unlock(&dentry->d_lock); 377 365 dir_has_key = (ci != NULL); 366 + dput(dir); 378 367 379 368 /* 380 369 * If the dentry was cached without the key, and it is a
+4
fs/ext4/crypto.c
··· 32 32 #include <linux/random.h> 33 33 #include <linux/scatterlist.h> 34 34 #include <linux/spinlock_types.h> 35 + #include <linux/namei.h> 35 36 36 37 #include "ext4_extents.h" 37 38 #include "xattr.h" ··· 482 481 struct dentry *dir; 483 482 struct ext4_crypt_info *ci; 484 483 int dir_has_key, cached_with_key; 484 + 485 + if (flags & LOOKUP_RCU) 486 + return -ECHILD; 485 487 486 488 dir = dget_parent(dentry); 487 489 if (!ext4_encrypted_inode(d_inode(dir))) {
+13 -3
fs/f2fs/data.c
··· 992 992 if (f2fs_encrypted_inode(inode) && 993 993 S_ISREG(inode->i_mode)) { 994 994 995 - ctx = fscrypt_get_ctx(inode); 995 + ctx = fscrypt_get_ctx(inode, GFP_NOFS); 996 996 if (IS_ERR(ctx)) 997 997 goto set_error_page; 998 998 ··· 1092 1092 } 1093 1093 1094 1094 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { 1095 + gfp_t gfp_flags = GFP_NOFS; 1095 1096 1096 1097 /* wait for GCed encrypted page writeback */ 1097 1098 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode), 1098 1099 fio->old_blkaddr); 1099 - 1100 - fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page); 1100 + retry_encrypt: 1101 + fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page, 1102 + gfp_flags); 1101 1103 if (IS_ERR(fio->encrypted_page)) { 1102 1104 err = PTR_ERR(fio->encrypted_page); 1105 + if (err == -ENOMEM) { 1106 + /* flush pending ios and wait for a while */ 1107 + f2fs_flush_merged_bios(F2FS_I_SB(inode)); 1108 + congestion_wait(BLK_RW_ASYNC, HZ/50); 1109 + gfp_flags |= __GFP_NOFAIL; 1110 + err = 0; 1111 + goto retry_encrypt; 1112 + } 1103 1113 goto out_writepage; 1104 1114 } 1105 1115 }
+7 -3
fs/f2fs/file.c
··· 441 441 static int f2fs_file_open(struct inode *inode, struct file *filp) 442 442 { 443 443 int ret = generic_file_open(inode, filp); 444 - struct inode *dir = filp->f_path.dentry->d_parent->d_inode; 444 + struct dentry *dir; 445 445 446 446 if (!ret && f2fs_encrypted_inode(inode)) { 447 447 ret = fscrypt_get_encryption_info(inode); ··· 450 450 if (!fscrypt_has_encryption_key(inode)) 451 451 return -ENOKEY; 452 452 } 453 - if (f2fs_encrypted_inode(dir) && 454 - !fscrypt_has_permitted_context(dir, inode)) 453 + dir = dget_parent(file_dentry(filp)); 454 + if (f2fs_encrypted_inode(d_inode(dir)) && 455 + !fscrypt_has_permitted_context(d_inode(dir), inode)) { 456 + dput(dir); 455 457 return -EPERM; 458 + } 459 + dput(dir); 456 460 return ret; 457 461 } 458 462
+5 -4
include/linux/fscrypto.h
··· 263 263 extern struct kmem_cache *fscrypt_info_cachep; 264 264 int fscrypt_initialize(void); 265 265 266 - extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *); 266 + extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t); 267 267 extern void fscrypt_release_ctx(struct fscrypt_ctx *); 268 - extern struct page *fscrypt_encrypt_page(struct inode *, struct page *); 268 + extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t); 269 269 extern int fscrypt_decrypt_page(struct page *); 270 270 extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *); 271 271 extern void fscrypt_pullback_bio_page(struct page **, bool); ··· 299 299 #endif 300 300 301 301 /* crypto.c */ 302 - static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i) 302 + static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i, 303 + gfp_t f) 303 304 { 304 305 return ERR_PTR(-EOPNOTSUPP); 305 306 } ··· 311 310 } 312 311 313 312 static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i, 314 - struct page *p) 313 + struct page *p, gfp_t f) 315 314 { 316 315 return ERR_PTR(-EOPNOTSUPP); 317 316 }