Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'bugfixes' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6

* 'bugfixes' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6:
NFS: Fix panic after nfs_umount()
nfs: remove extraneous and problematic calls to nfs_clear_request
nfs: kernel should return EPROTONOSUPPORT when not support NFSv4
NFS: Fix fcntl F_GETLK not reporting some conflicts
nfs: Discard ACL cache on mode update
NFS: Readdir cleanups
NFS: nfs_readdir_search_for_cookie() don't mark as eof if cookie not found
NFS: Fix a memory leak in nfs_readdir
Call the filesystem back whenever a page is removed from the page cache
NFS: Ensure we use the correct cookie in nfs_readdir_xdr_filler

+85 -52
+6 -1
Documentation/filesystems/Locking
··· 173 173 sector_t (*bmap)(struct address_space *, sector_t); 174 174 int (*invalidatepage) (struct page *, unsigned long); 175 175 int (*releasepage) (struct page *, int); 176 + void (*freepage)(struct page *); 176 177 int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 177 178 loff_t offset, unsigned long nr_segs); 178 179 int (*launder_page) (struct page *); 179 180 180 181 locking rules: 181 - All except set_page_dirty may block 182 + All except set_page_dirty and freepage may block 182 183 183 184 BKL PageLocked(page) i_mutex 184 185 writepage: no yes, unlocks (see below) ··· 194 193 bmap: no 195 194 invalidatepage: no yes 196 195 releasepage: no yes 196 + freepage: no yes 197 197 direct_IO: no 198 198 launder_page: no yes 199 199 ··· 289 287 buffers from the page in preparation for freeing it. It returns zero to 290 288 indicate that the buffers are (or may be) freeable. If ->releasepage is zero, 291 289 the kernel assumes that the fs has no private interest in the buffers. 290 + 291 + ->freepage() is called when the kernel is done dropping the page 292 + from the page cache. 292 293 293 294 ->launder_page() may be called prior to releasing a page if 294 295 it is still found to be dirty. It returns zero if the page was successfully
+7
Documentation/filesystems/vfs.txt
··· 534 534 sector_t (*bmap)(struct address_space *, sector_t); 535 535 int (*invalidatepage) (struct page *, unsigned long); 536 536 int (*releasepage) (struct page *, int); 537 + void (*freepage)(struct page *); 537 538 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 538 539 loff_t offset, unsigned long nr_segs); 539 540 struct page* (*get_xip_page)(struct address_space *, sector_t, ··· 678 677 that all pages are invalidated, then its releasepage will 679 678 need to ensure this. Possibly it can clear the PageUptodate 680 679 bit if it cannot free private data yet. 680 + 681 + freepage: freepage is called once the page is no longer visible in 682 + the page cache in order to allow the cleanup of any private 683 + data. Since it may be called by the memory reclaimer, it 684 + should not assume that the original address_space mapping still 685 + exists, and it should not block. 681 686 682 687 direct_IO: called by the generic read/write routines to perform 683 688 direct_IO - that is IO requests which bypass the page cache
+35 -41
fs/nfs/dir.c
··· 57 57 struct inode *, struct dentry *); 58 58 static int nfs_fsync_dir(struct file *, int); 59 59 static loff_t nfs_llseek_dir(struct file *, loff_t, int); 60 - static int nfs_readdir_clear_array(struct page*, gfp_t); 60 + static void nfs_readdir_clear_array(struct page*); 61 61 62 62 const struct file_operations nfs_dir_operations = { 63 63 .llseek = nfs_llseek_dir, ··· 83 83 .setattr = nfs_setattr, 84 84 }; 85 85 86 - const struct address_space_operations nfs_dir_addr_space_ops = { 87 - .releasepage = nfs_readdir_clear_array, 86 + const struct address_space_operations nfs_dir_aops = { 87 + .freepage = nfs_readdir_clear_array, 88 88 }; 89 89 90 90 #ifdef CONFIG_NFS_V3 ··· 178 178 struct page *page; 179 179 unsigned long page_index; 180 180 u64 *dir_cookie; 181 + u64 last_cookie; 181 182 loff_t current_index; 182 183 decode_dirent_t decode; 183 184 ··· 214 213 * we are freeing strings created by nfs_add_to_readdir_array() 215 214 */ 216 215 static 217 - int nfs_readdir_clear_array(struct page *page, gfp_t mask) 216 + void nfs_readdir_clear_array(struct page *page) 218 217 { 219 - struct nfs_cache_array *array = nfs_readdir_get_array(page); 218 + struct nfs_cache_array *array; 220 219 int i; 221 220 222 - if (IS_ERR(array)) 223 - return PTR_ERR(array); 221 + array = kmap_atomic(page, KM_USER0); 224 222 for (i = 0; i < array->size; i++) 225 223 kfree(array->array[i].string.name); 226 - nfs_readdir_release_array(page); 227 - return 0; 224 + kunmap_atomic(array, KM_USER0); 228 225 } 229 226 230 227 /* ··· 271 272 goto out; 272 273 array->last_cookie = entry->cookie; 273 274 array->size++; 274 - if (entry->eof == 1) 275 + if (entry->eof != 0) 275 276 array->eof_index = array->size; 276 277 out: 277 278 nfs_readdir_release_array(page); ··· 311 312 for (i = 0; i < array->size; i++) { 312 313 if (array->array[i].cookie == *desc->dir_cookie) { 313 314 desc->cache_entry_index = i; 314 - status = 0; 315 - goto out; 315 + return 0; 316 316 } 317 317 } 318 - if (i == array->eof_index) { 319 - desc->eof = 1; 318 + if (array->eof_index >= 0) { 320 319 status = -EBADCOOKIE; 320 + if (*desc->dir_cookie == array->last_cookie) 321 + desc->eof = 1; 321 322 } 322 - out: 323 323 return status; 324 324 } 325 325 ··· 326 328 int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc) 327 329 { 328 330 struct nfs_cache_array *array; 329 - int status = -EBADCOOKIE; 330 - 331 - if (desc->dir_cookie == NULL) 332 - goto out; 331 + int status; 333 332 334 333 array = nfs_readdir_get_array(desc->page); 335 334 if (IS_ERR(array)) { ··· 339 344 else 340 345 status = nfs_readdir_search_for_cookie(array, desc); 341 346 347 + if (status == -EAGAIN) { 348 + desc->last_cookie = array->last_cookie; 349 + desc->page_index++; 350 + } 342 351 nfs_readdir_release_array(desc->page); 343 352 out: 344 353 return status; ··· 489 490 490 491 count++; 491 492 492 - if (desc->plus == 1) 493 + if (desc->plus != 0) 493 494 nfs_prime_dcache(desc->file->f_path.dentry, entry); 494 495 495 496 status = nfs_readdir_add_to_array(entry, page); ··· 497 498 break; 498 499 } while (!entry->eof); 499 500 500 - if (count == 0 || (status == -EBADCOOKIE && entry->eof == 1)) { 501 + if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) { 501 502 array = nfs_readdir_get_array(page); 502 503 if (!IS_ERR(array)) { 503 504 array->eof_index = array->size; ··· 562 563 unsigned int array_size = ARRAY_SIZE(pages); 563 564 564 565 entry.prev_cookie = 0; 565 - entry.cookie = *desc->dir_cookie; 566 + entry.cookie = desc->last_cookie; 566 567 entry.eof = 0; 567 568 entry.fh = nfs_alloc_fhandle(); 568 569 entry.fattr = nfs_alloc_fattr(); ··· 635 636 static 636 637 void cache_page_release(nfs_readdir_descriptor_t *desc) 637 638 { 639 + if (!desc->page->mapping) 640 + nfs_readdir_clear_array(desc->page); 638 641 page_cache_release(desc->page); 639 642 desc->page = NULL; 640 643 } ··· 661 660 return PTR_ERR(desc->page); 662 661 663 662 res = nfs_readdir_search_array(desc); 664 - if (res == 0) 665 - return 0; 666 - cache_page_release(desc); 663 + if (res != 0) 664 + cache_page_release(desc); 667 665 return res; 668 666 } 669 667 ··· 672 672 { 673 673 int res; 674 674 675 - if (desc->page_index == 0) 675 + if (desc->page_index == 0) { 676 676 desc->current_index = 0; 677 - while (1) { 678 - res = find_cache_page(desc); 679 - if (res != -EAGAIN) 680 - break; 681 - desc->page_index++; 677 + desc->last_cookie = 0; 682 678 } 679 + do { 680 + res = find_cache_page(desc); 681 + } while (res == -EAGAIN); 683 682 return res; 684 - } 685 - 686 - static inline unsigned int dt_type(struct inode *inode) 687 - { 688 - return (inode->i_mode >> 12) & 15; 689 683 } 690 684 691 685 /* ··· 711 717 break; 712 718 } 713 719 file->f_pos++; 714 - desc->cache_entry_index = i; 715 720 if (i < (array->size-1)) 716 721 *desc->dir_cookie = array->array[i+1].cookie; 717 722 else 718 723 *desc->dir_cookie = array->last_cookie; 719 724 } 720 - if (i == array->eof_index) 725 + if (array->eof_index >= 0) 721 726 desc->eof = 1; 722 727 723 728 nfs_readdir_release_array(desc->page); ··· 757 764 } 758 765 759 766 desc->page_index = 0; 767 + desc->last_cookie = *desc->dir_cookie; 760 768 desc->page = page; 761 769 762 770 status = nfs_readdir_xdr_to_array(desc, page, inode); ··· 785 791 struct inode *inode = dentry->d_inode; 786 792 nfs_readdir_descriptor_t my_desc, 787 793 *desc = &my_desc; 788 - int res = -ENOMEM; 794 + int res; 789 795 790 796 dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n", 791 797 dentry->d_parent->d_name.name, dentry->d_name.name, ··· 810 816 if (res < 0) 811 817 goto out; 812 818 813 - while (desc->eof != 1) { 819 + do { 814 820 res = readdir_search_pagecache(desc); 815 821 816 822 if (res == -EBADCOOKIE) { ··· 838 844 res = nfs_do_filldir(desc, dirent, filldir); 839 845 if (res < 0) 840 846 break; 841 - } 847 + } while (!desc->eof); 842 848 out: 843 849 nfs_unblock_sillyrename(dentry); 844 850 if (res > 0)
+2
fs/nfs/file.c
··· 693 693 { 694 694 struct inode *inode = filp->f_mapping->host; 695 695 int status = 0; 696 + unsigned int saved_type = fl->fl_type; 696 697 697 698 /* Try local locking first */ 698 699 posix_test_lock(filp, fl); ··· 701 700 /* found a conflict */ 702 701 goto out; 703 702 } 703 + fl->fl_type = saved_type; 704 704 705 705 if (nfs_have_delegation(inode, FMODE_READ)) 706 706 goto out_noconflict;
+1
fs/nfs/inode.c
··· 289 289 } else if (S_ISDIR(inode->i_mode)) { 290 290 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops; 291 291 inode->i_fop = &nfs_dir_operations; 292 + inode->i_data.a_ops = &nfs_dir_aops; 292 293 if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)) 293 294 set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags); 294 295 /* Deal with crossing mountpoints */
+2 -2
fs/nfs/mount_clnt.c
··· 505 505 506 506 static struct rpc_version mnt_version1 = { 507 507 .number = 1, 508 - .nrprocs = 2, 508 + .nrprocs = ARRAY_SIZE(mnt_procedures), 509 509 .procs = mnt_procedures, 510 510 }; 511 511 512 512 static struct rpc_version mnt_version3 = { 513 513 .number = 3, 514 - .nrprocs = 2, 514 + .nrprocs = ARRAY_SIZE(mnt3_procedures), 515 515 .procs = mnt3_procedures, 516 516 }; 517 517
+9
fs/nfs/nfs4proc.c
··· 3361 3361 ret = nfs_revalidate_inode(server, inode); 3362 3362 if (ret < 0) 3363 3363 return ret; 3364 + if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 3365 + nfs_zap_acl_cache(inode); 3364 3366 ret = nfs4_read_cached_acl(inode, buf, buflen); 3365 3367 if (ret != -ENOENT) 3366 3368 return ret; ··· 3391 3389 nfs_inode_return_delegation(inode); 3392 3390 buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3393 3391 ret = nfs4_call_sync(server, &msg, &arg, &res, 1); 3392 + /* 3393 + * Acl update can result in inode attribute update. 3394 + * so mark the attribute cache invalid. 3395 + */ 3396 + spin_lock(&inode->i_lock); 3397 + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 3398 + spin_unlock(&inode->i_lock); 3394 3399 nfs_access_zap_cache(inode); 3395 3400 nfs_zap_acl_cache(inode); 3396 3401 return ret;
+2 -2
fs/nfs/pagelist.c
··· 115 115 { 116 116 if (!nfs_lock_request_dontget(req)) 117 117 return 0; 118 - if (req->wb_page != NULL) 118 + if (test_bit(PG_MAPPED, &req->wb_flags)) 119 119 radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 120 120 return 1; 121 121 } ··· 125 125 */ 126 126 void nfs_clear_page_tag_locked(struct nfs_page *req) 127 127 { 128 - if (req->wb_page != NULL) { 128 + if (test_bit(PG_MAPPED, &req->wb_flags)) { 129 129 struct inode *inode = req->wb_context->path.dentry->d_inode; 130 130 struct nfs_inode *nfsi = NFS_I(inode); 131 131
-1
fs/nfs/read.c
··· 152 152 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 153 153 req->wb_bytes, 154 154 (long long)req_offset(req)); 155 - nfs_clear_request(req); 156 155 nfs_release_request(req); 157 156 } 158 157
-4
fs/nfs/super.c
··· 1069 1069 mnt->flags |= NFS_MOUNT_VER3; 1070 1070 mnt->version = 3; 1071 1071 break; 1072 - #ifdef CONFIG_NFS_V4 1073 1072 case Opt_v4: 1074 1073 mnt->flags &= ~NFS_MOUNT_VER3; 1075 1074 mnt->version = 4; 1076 1075 break; 1077 - #endif 1078 1076 case Opt_udp: 1079 1077 mnt->flags &= ~NFS_MOUNT_TCP; 1080 1078 mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP; ··· 1284 1286 mnt->flags |= NFS_MOUNT_VER3; 1285 1287 mnt->version = 3; 1286 1288 break; 1287 - #ifdef CONFIG_NFS_V4 1288 1289 case NFS4_VERSION: 1289 1290 mnt->flags &= ~NFS_MOUNT_VER3; 1290 1291 mnt->version = 4; 1291 1292 break; 1292 - #endif 1293 1293 default: 1294 1294 goto out_invalid_value; 1295 1295 }
+2 -1
fs/nfs/write.c
··· 390 390 if (nfs_have_delegation(inode, FMODE_WRITE)) 391 391 nfsi->change_attr++; 392 392 } 393 + set_bit(PG_MAPPED, &req->wb_flags); 393 394 SetPagePrivate(req->wb_page); 394 395 set_page_private(req->wb_page, (unsigned long)req); 395 396 nfsi->npages++; ··· 416 415 spin_lock(&inode->i_lock); 417 416 set_page_private(req->wb_page, 0); 418 417 ClearPagePrivate(req->wb_page); 418 + clear_bit(PG_MAPPED, &req->wb_flags); 419 419 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); 420 420 nfsi->npages--; 421 421 if (!nfsi->npages) { ··· 424 422 iput(inode); 425 423 } else 426 424 spin_unlock(&inode->i_lock); 427 - nfs_clear_request(req); 428 425 nfs_release_request(req); 429 426 } 430 427
+1
include/linux/fs.h
··· 602 602 sector_t (*bmap)(struct address_space *, sector_t); 603 603 void (*invalidatepage) (struct page *, unsigned long); 604 604 int (*releasepage) (struct page *, gfp_t); 605 + void (*freepage)(struct page *); 605 606 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 606 607 loff_t offset, unsigned long nr_segs); 607 608 int (*get_xip_mem)(struct address_space *, pgoff_t, int,
+1
include/linux/nfs_fs.h
··· 401 401 #endif /* CONFIG_NFS_V3 */ 402 402 extern const struct file_operations nfs_file_operations; 403 403 extern const struct address_space_operations nfs_file_aops; 404 + extern const struct address_space_operations nfs_dir_aops; 404 405 405 406 static inline struct nfs_open_context *nfs_file_open_context(struct file *filp) 406 407 {
+1
include/linux/nfs_page.h
··· 29 29 */ 30 30 enum { 31 31 PG_BUSY = 0, 32 + PG_MAPPED, 32 33 PG_CLEAN, 33 34 PG_NEED_COMMIT, 34 35 PG_NEED_RESCHED,
+5
mm/filemap.c
··· 143 143 void remove_from_page_cache(struct page *page) 144 144 { 145 145 struct address_space *mapping = page->mapping; 146 + void (*freepage)(struct page *); 146 147 147 148 BUG_ON(!PageLocked(page)); 148 149 150 + freepage = mapping->a_ops->freepage; 149 151 spin_lock_irq(&mapping->tree_lock); 150 152 __remove_from_page_cache(page); 151 153 spin_unlock_irq(&mapping->tree_lock); 152 154 mem_cgroup_uncharge_cache_page(page); 155 + 156 + if (freepage) 157 + freepage(page); 153 158 } 154 159 EXPORT_SYMBOL(remove_from_page_cache); 155 160
+4
mm/truncate.c
··· 390 390 __remove_from_page_cache(page); 391 391 spin_unlock_irq(&mapping->tree_lock); 392 392 mem_cgroup_uncharge_cache_page(page); 393 + 394 + if (mapping->a_ops->freepage) 395 + mapping->a_ops->freepage(page); 396 + 393 397 page_cache_release(page); /* pagecache ref */ 394 398 return 1; 395 399 failed:
+7
mm/vmscan.c
··· 494 494 spin_unlock_irq(&mapping->tree_lock); 495 495 swapcache_free(swap, page); 496 496 } else { 497 + void (*freepage)(struct page *); 498 + 499 + freepage = mapping->a_ops->freepage; 500 + 497 501 __remove_from_page_cache(page); 498 502 spin_unlock_irq(&mapping->tree_lock); 499 503 mem_cgroup_uncharge_cache_page(page); 504 + 505 + if (freepage != NULL) 506 + freepage(page); 500 507 } 501 508 502 509 return 1;