Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs update from Al Viro:

- misc stable fixes

- trivial kernel-doc and comment fixups

- remove never-used block_page_mkwrite() wrapper function, and rename
the function that is _actually_ used to not have double underscores.

* 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
fs: 9p: cache.h: Add #define of include guard
vfs: remove stale comment in inode_operations
vfs: remove unused wrapper block_page_mkwrite()
binfmt_elf: Correct `arch_check_elf's description
fs: fix writeback.c kernel-doc warnings
fs: fix inode.c kernel-doc warning
fs/pipe.c: return error code rather than 0 in pipe_write()
fs/pipe.c: preserve alloc_file() error code
binfmt_elf: Don't clobber passed executable's file header
FS-Cache: Handle a write to the page immediately beyond the EOF marker
cachefiles: perform test on s_blocksize when opening cache file.
FS-Cache: Don't override netfs's primary_index if registering failed
FS-Cache: Increase reference of parent after registering, netfs success
debugfs: fix refcount imbalance in start_creating

+85 -101
+1
fs/9p/cache.h
··· 21 21 */ 22 22 23 23 #ifndef _9P_CACHE_H 24 + #define _9P_CACHE_H 24 25 #ifdef CONFIG_9P_FSCACHE 25 26 #include <linux/fscache.h> 26 27 #include <linux/spinlock.h>
+6 -6
fs/binfmt_elf.c
··· 488 488 } 489 489 490 490 /** 491 - * arch_check_elf() - check a PT_LOPROC..PT_HIPROC ELF program header 491 + * arch_check_elf() - check an ELF executable 492 492 * @ehdr: The main ELF header 493 493 * @has_interp: True if the ELF has an interpreter, else false. 494 494 * @state: Architecture-specific state preserved throughout the process ··· 760 760 */ 761 761 would_dump(bprm, interpreter); 762 762 763 - retval = kernel_read(interpreter, 0, bprm->buf, 764 - BINPRM_BUF_SIZE); 765 - if (retval != BINPRM_BUF_SIZE) { 763 + /* Get the exec headers */ 764 + retval = kernel_read(interpreter, 0, 765 + (void *)&loc->interp_elf_ex, 766 + sizeof(loc->interp_elf_ex)); 767 + if (retval != sizeof(loc->interp_elf_ex)) { 766 768 if (retval >= 0) 767 769 retval = -EIO; 768 770 goto out_free_dentry; 769 771 } 770 772 771 - /* Get the exec headers */ 772 - loc->interp_elf_ex = *((struct elfhdr *)bprm->buf); 773 773 break; 774 774 } 775 775 elf_ppnt++;
+2 -22
fs/buffer.c
··· 2420 2420 * unlock the page. 2421 2421 * 2422 2422 * Direct callers of this function should protect against filesystem freezing 2423 - * using sb_start_write() - sb_end_write() functions. 2423 + * using sb_start_pagefault() - sb_end_pagefault() functions. 2424 2424 */ 2425 - int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2425 + int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2426 2426 get_block_t get_block) 2427 2427 { 2428 2428 struct page *page = vmf->page; ··· 2458 2458 out_unlock: 2459 2459 unlock_page(page); 2460 2460 return ret; 2461 - } 2462 - EXPORT_SYMBOL(__block_page_mkwrite); 2463 - 2464 - int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2465 - get_block_t get_block) 2466 - { 2467 - int ret; 2468 - struct super_block *sb = file_inode(vma->vm_file)->i_sb; 2469 - 2470 - sb_start_pagefault(sb); 2471 - 2472 - /* 2473 - * Update file times before taking page lock. We may end up failing the 2474 - * fault so this update may be superfluous but who really cares... 2475 - */ 2476 - file_update_time(vma->vm_file); 2477 - 2478 - ret = __block_page_mkwrite(vma, vmf, get_block); 2479 - sb_end_pagefault(sb); 2480 - return block_page_mkwrite_return(ret); 2481 2461 } 2482 2462 EXPORT_SYMBOL(block_page_mkwrite); 2483 2463
+2
fs/cachefiles/namei.c
··· 655 655 aops = d_backing_inode(object->dentry)->i_mapping->a_ops; 656 656 if (!aops->bmap) 657 657 goto check_error; 658 + if (object->dentry->d_sb->s_blocksize > PAGE_SIZE) 659 + goto check_error; 658 660 659 661 object->backer = object->dentry; 660 662 } else {
+36 -35
fs/cachefiles/rdwr.c
··· 414 414 ASSERT(inode->i_mapping->a_ops->readpages); 415 415 416 416 /* calculate the shift required to use bmap */ 417 - if (inode->i_sb->s_blocksize > PAGE_SIZE) 418 - goto enobufs; 419 - 420 417 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 421 418 422 419 op->op.flags &= FSCACHE_OP_KEEP_FLAGS; ··· 708 711 ASSERT(inode->i_mapping->a_ops->readpages); 709 712 710 713 /* calculate the shift required to use bmap */ 711 - if (inode->i_sb->s_blocksize > PAGE_SIZE) 712 - goto all_enobufs; 713 - 714 714 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 715 715 716 716 pagevec_init(&pagevec, 0); ··· 899 905 cache = container_of(object->fscache.cache, 900 906 struct cachefiles_cache, cache); 901 907 908 + pos = (loff_t)page->index << PAGE_SHIFT; 909 + 910 + /* We mustn't write more data than we have, so we have to beware of a 911 + * partial page at EOF. 912 + */ 913 + eof = object->fscache.store_limit_l; 914 + if (pos >= eof) 915 + goto error; 916 + 902 917 /* write the page to the backing filesystem and let it store it in its 903 918 * own time */ 904 919 path.mnt = cache->mnt; ··· 915 912 file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred); 916 913 if (IS_ERR(file)) { 917 914 ret = PTR_ERR(file); 918 - } else { 919 - pos = (loff_t) page->index << PAGE_SHIFT; 915 + goto error_2; 916 + } 920 917 921 - /* we mustn't write more data than we have, so we have 922 - * to beware of a partial page at EOF */ 923 - eof = object->fscache.store_limit_l; 924 - len = PAGE_SIZE; 925 - if (eof & ~PAGE_MASK) { 926 - ASSERTCMP(pos, <, eof); 927 - if (eof - pos < PAGE_SIZE) { 928 - _debug("cut short %llx to %llx", 929 - pos, eof); 930 - len = eof - pos; 931 - ASSERTCMP(pos + len, ==, eof); 932 - } 918 + len = PAGE_SIZE; 919 + if (eof & ~PAGE_MASK) { 920 + if (eof - pos < PAGE_SIZE) { 921 + _debug("cut short %llx to %llx", 922 + pos, eof); 923 + len = eof - pos; 924 + ASSERTCMP(pos + len, ==, eof); 933 925 } 934 - 935 - data = kmap(page); 936 - ret = __kernel_write(file, data, len, &pos); 937 - kunmap(page); 938 - if (ret != len) 939 - ret = -EIO; 940 - fput(file); 941 926 } 942 927 943 - if (ret < 0) { 944 - if (ret == -EIO) 945 - cachefiles_io_error_obj( 946 - object, "Write page to backing file failed"); 947 - ret = -ENOBUFS; 948 - } 928 + data = kmap(page); 929 + ret = __kernel_write(file, data, len, &pos); 930 + kunmap(page); 931 + fput(file); 932 + if (ret != len) 933 + goto error_eio; 949 934 950 - _leave(" = %d", ret); 951 - return ret; 935 + _leave(" = 0"); 936 + return 0; 937 + 938 + error_eio: 939 + ret = -EIO; 940 + error_2: 941 + if (ret == -EIO) 942 + cachefiles_io_error_obj(object, 943 + "Write page to backing file failed"); 944 + error: 945 + _leave(" = -ENOBUFS [%d]", ret); 946 + return -ENOBUFS; 952 947 } 953 948 954 949 /*
+5 -1
fs/debugfs/inode.c
··· 271 271 dput(dentry); 272 272 dentry = ERR_PTR(-EEXIST); 273 273 } 274 - if (IS_ERR(dentry)) 274 + 275 + if (IS_ERR(dentry)) { 275 276 mutex_unlock(&d_inode(parent)->i_mutex); 277 + simple_release_fs(&debugfs_mount, &debugfs_mount_count); 278 + } 279 + 276 280 return dentry; 277 281 } 278 282
+2 -2
fs/ext4/inode.c
··· 5283 5283 !ext4_should_journal_data(inode) && 5284 5284 !ext4_nonda_switch(inode->i_sb)) { 5285 5285 do { 5286 - ret = __block_page_mkwrite(vma, vmf, 5286 + ret = block_page_mkwrite(vma, vmf, 5287 5287 ext4_da_get_block_prep); 5288 5288 } while (ret == -ENOSPC && 5289 5289 ext4_should_retry_alloc(inode->i_sb, &retries)); ··· 5330 5330 ret = VM_FAULT_SIGBUS; 5331 5331 goto out; 5332 5332 } 5333 - ret = __block_page_mkwrite(vma, vmf, get_block); 5333 + ret = block_page_mkwrite(vma, vmf, get_block); 5334 5334 if (!ret && ext4_should_journal_data(inode)) { 5335 5335 if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 5336 5336 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
+18 -20
fs/fscache/netfs.c
··· 22 22 int __fscache_register_netfs(struct fscache_netfs *netfs) 23 23 { 24 24 struct fscache_netfs *ptr; 25 + struct fscache_cookie *cookie; 25 26 int ret; 26 27 27 28 _enter("{%s}", netfs->name); ··· 30 29 INIT_LIST_HEAD(&netfs->link); 31 30 32 31 /* allocate a cookie for the primary index */ 33 - netfs->primary_index = 34 - kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); 32 + cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); 35 33 36 - if (!netfs->primary_index) { 34 + if (!cookie) { 37 35 _leave(" = -ENOMEM"); 38 36 return -ENOMEM; 39 37 } 40 38 41 39 /* initialise the primary index cookie */ 42 - atomic_set(&netfs->primary_index->usage, 1); 43 - atomic_set(&netfs->primary_index->n_children, 0); 44 - atomic_set(&netfs->primary_index->n_active, 1); 40 + atomic_set(&cookie->usage, 1); 41 + atomic_set(&cookie->n_children, 0); 42 + atomic_set(&cookie->n_active, 1); 45 43 46 - netfs->primary_index->def = &fscache_fsdef_netfs_def; 47 - netfs->primary_index->parent = &fscache_fsdef_index; 48 - netfs->primary_index->netfs_data = netfs; 49 - netfs->primary_index->flags = 1 << FSCACHE_COOKIE_ENABLED; 44 + cookie->def = &fscache_fsdef_netfs_def; 45 + cookie->parent = &fscache_fsdef_index; 46 + cookie->netfs_data = netfs; 47 + cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; 50 48 51 - atomic_inc(&netfs->primary_index->parent->usage); 52 - atomic_inc(&netfs->primary_index->parent->n_children); 53 - 54 - spin_lock_init(&netfs->primary_index->lock); 55 - INIT_HLIST_HEAD(&netfs->primary_index->backing_objects); 49 + spin_lock_init(&cookie->lock); 50 + INIT_HLIST_HEAD(&cookie->backing_objects); 56 51 57 52 /* check the netfs type is not already present */ 58 53 down_write(&fscache_addremove_sem); ··· 59 62 goto already_registered; 60 63 } 61 64 65 + atomic_inc(&cookie->parent->usage); 66 + atomic_inc(&cookie->parent->n_children); 67 + 68 + netfs->primary_index = cookie; 62 69 list_add(&netfs->link, &fscache_netfs_list); 63 70 ret = 0; 64 71 ··· 71 70 already_registered: 72 71 up_write(&fscache_addremove_sem); 73 72 74 - if (ret < 0) { 75 - netfs->primary_index->parent = NULL; 76 - __fscache_cookie_put(netfs->primary_index); 77 - netfs->primary_index = NULL; 78 - } 73 + if (ret < 0) 74 + kmem_cache_free(fscache_cookie_jar, cookie); 79 75 80 76 _leave(" = %d", ret); 81 77 return ret;
+1 -1
fs/fscache/page.c
··· 816 816 goto superseded; 817 817 page = results[0]; 818 818 _debug("gang %d [%lx]", n, page->index); 819 - if (page->index > op->store_limit) { 819 + if (page->index >= op->store_limit) { 820 820 fscache_stat(&fscache_n_store_pages_over_limit); 821 821 goto superseded; 822 822 }
+1 -1
fs/nilfs2/file.c
··· 109 109 goto out; 110 110 111 111 file_update_time(vma->vm_file); 112 - ret = __block_page_mkwrite(vma, vmf, nilfs_get_block); 112 + ret = block_page_mkwrite(vma, vmf, nilfs_get_block); 113 113 if (ret) { 114 114 nilfs_transaction_abort(inode->i_sb); 115 115 goto out;
+10 -8
fs/pipe.c
··· 366 366 int offset = buf->offset + buf->len; 367 367 368 368 if (ops->can_merge && offset + chars <= PAGE_SIZE) { 369 - int error = ops->confirm(pipe, buf); 370 - if (error) 369 + ret = ops->confirm(pipe, buf); 370 + if (ret) 371 371 goto out; 372 372 373 373 ret = copy_page_from_iter(buf->page, offset, chars, from); 374 374 if (unlikely(ret < chars)) { 375 - error = -EFAULT; 375 + ret = -EFAULT; 376 376 goto out; 377 377 } 378 378 do_wakeup = 1; 379 - buf->len += chars; 380 - ret = chars; 379 + buf->len += ret; 381 380 if (!iov_iter_count(from)) 382 381 goto out; 383 382 } ··· 692 693 693 694 d_instantiate(path.dentry, inode); 694 695 695 - err = -ENFILE; 696 696 f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops); 697 - if (IS_ERR(f)) 697 + if (IS_ERR(f)) { 698 + err = PTR_ERR(f); 698 699 goto err_dentry; 700 + } 699 701 700 702 f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)); 701 703 f->private_data = inode->i_pipe; 702 704 703 705 res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops); 704 - if (IS_ERR(res[0])) 706 + if (IS_ERR(res[0])) { 707 + err = PTR_ERR(res[0]); 705 708 goto err_file; 709 + } 706 710 707 711 path_get(&path); 708 712 res[0]->private_data = inode->i_pipe;
+1 -1
fs/xfs/xfs_file.c
··· 1506 1506 ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_direct, 1507 1507 xfs_end_io_dax_write); 1508 1508 } else { 1509 - ret = __block_page_mkwrite(vma, vmf, xfs_get_blocks); 1509 + ret = block_page_mkwrite(vma, vmf, xfs_get_blocks); 1510 1510 ret = block_page_mkwrite_return(ret); 1511 1511 } 1512 1512
-2
include/linux/buffer_head.h
··· 227 227 get_block_t *, loff_t *); 228 228 int generic_cont_expand_simple(struct inode *inode, loff_t size); 229 229 int block_commit_write(struct page *page, unsigned from, unsigned to); 230 - int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 231 - get_block_t get_block); 232 230 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 233 231 get_block_t get_block); 234 232 /* Convert errno to return value from ->page_mkwrite() call */
-2
include/linux/fs.h
··· 1665 1665 umode_t create_mode, int *opened); 1666 1666 int (*tmpfile) (struct inode *, struct dentry *, umode_t); 1667 1667 int (*set_acl)(struct inode *, struct posix_acl *, int); 1668 - 1669 - /* WARNING: probably going away soon, do not use! */ 1670 1668 } ____cacheline_aligned; 1671 1669 1672 1670 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,