Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

NFS: Flesh out nfs_invalidate_page()

In the case of a call to truncate_inode_pages(), we should really try to
cancel any pending writes on the page.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>

+60 -25
+5 -1
fs/nfs/file.c
··· 303 303 304 304 static void nfs_invalidate_page(struct page *page, unsigned long offset) 305 305 { 306 - /* FIXME: we really should cancel any unstarted writes on this page */ 306 + struct inode *inode = page->mapping->host; 307 + 308 + /* Cancel any unstarted writes on this page */ 309 + if (offset == 0) 310 + nfs_sync_inode_wait(inode, page->index, 1, FLUSH_INVALIDATE); 307 311 } 308 312 309 313 static int nfs_release_page(struct page *page, gfp_t gfp)
+28 -19
fs/nfs/pagelist.c
··· 325 325 326 326 /** 327 327 * nfs_scan_list - Scan a list for matching requests 328 + * @nfsi: NFS inode 328 329 * @head: One of the NFS inode request lists 329 330 * @dst: Destination list 330 331 * @idx_start: lower bound of page->index to scan ··· 337 336 * The requests are *not* checked to ensure that they form a contiguous set. 338 337 * You must be holding the inode's req_lock when calling this function 339 338 */ 340 - int 341 - nfs_scan_list(struct list_head *head, struct list_head *dst, 342 - unsigned long idx_start, unsigned int npages) 339 + int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, 340 + struct list_head *dst, unsigned long idx_start, 341 + unsigned int npages) 343 342 { 344 - struct list_head *pos, *tmp; 345 - struct nfs_page *req; 346 - unsigned long idx_end; 347 - int res; 343 + struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; 344 + struct nfs_page *req; 345 + unsigned long idx_end; 346 + int found, i; 347 + int res; 348 348 349 349 res = 0; 350 350 if (npages == 0) ··· 353 351 else 354 352 idx_end = idx_start + npages - 1; 355 353 356 - list_for_each_safe(pos, tmp, head) { 357 - 358 - req = nfs_list_entry(pos); 359 - 360 - if (req->wb_index < idx_start) 361 - continue; 362 - if (req->wb_index > idx_end) 354 + for (;;) { 355 + found = radix_tree_gang_lookup(&nfsi->nfs_page_tree, 356 + (void **)&pgvec[0], idx_start, 357 + NFS_SCAN_MAXENTRIES); 358 + if (found <= 0) 363 359 break; 360 + for (i = 0; i < found; i++) { 361 + req = pgvec[i]; 362 + if (req->wb_index > idx_end) 363 + goto out; 364 + idx_start = req->wb_index + 1; 365 + if (req->wb_list_head != head) 366 + continue; 367 + if (nfs_set_page_writeback_locked(req)) { 368 + nfs_list_remove_request(req); 369 + nfs_list_add_request(req, dst); 370 + res++; 371 + } 372 + } 364 373 365 - if (!nfs_set_page_writeback_locked(req)) 366 - continue; 367 - nfs_list_remove_request(req); 368 - nfs_list_add_request(req, dst); 369 - res++; 370 374 } 375 + out: 371 376 return res; 372 377 } 373 378
+24 -3
fs/nfs/write.c
··· 579 579 return ret; 580 580 } 581 581 582 + static void nfs_cancel_requests(struct list_head *head) 583 + { 584 + struct nfs_page *req; 585 + while(!list_empty(head)) { 586 + req = nfs_list_entry(head->next); 587 + nfs_list_remove_request(req); 588 + nfs_inode_remove_request(req); 589 + nfs_clear_page_writeback(req); 590 + } 591 + } 592 + 582 593 /* 583 594 * nfs_scan_dirty - Scan an inode for dirty requests 584 595 * @inode: NFS inode to scan ··· 634 623 int res = 0; 635 624 636 625 if (nfsi->ncommit != 0) { 637 - res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages); 626 + res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages); 638 627 nfsi->ncommit -= res; 639 628 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) 640 629 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); ··· 1502 1491 pages = nfs_scan_dirty(inode, &head, idx_start, npages); 1503 1492 if (pages != 0) { 1504 1493 spin_unlock(&nfsi->req_lock); 1505 - ret = nfs_flush_list(inode, &head, pages, how); 1494 + if (how & FLUSH_INVALIDATE) 1495 + nfs_cancel_requests(&head); 1496 + else 1497 + ret = nfs_flush_list(inode, &head, pages, how); 1506 1498 spin_lock(&nfsi->req_lock); 1507 1499 continue; 1508 1500 } 1509 1501 if (nocommit) 1510 1502 break; 1511 - pages = nfs_scan_commit(inode, &head, 0, 0); 1503 + pages = nfs_scan_commit(inode, &head, idx_start, npages); 1512 1504 if (pages == 0) 1513 1505 break; 1506 + if (how & FLUSH_INVALIDATE) { 1507 + spin_unlock(&nfsi->req_lock); 1508 + nfs_cancel_requests(&head); 1509 + spin_lock(&nfsi->req_lock); 1510 + continue; 1511 + } 1512 + pages += nfs_scan_commit(inode, &head, 0, 0); 1514 1513 spin_unlock(&nfsi->req_lock); 1515 1514 ret = nfs_commit_list(inode, &head, how); 1516 1515 spin_lock(&nfsi->req_lock);
+1
include/linux/nfs_fs.h
··· 61 61 #define FLUSH_LOWPRI 8 /* low priority background flush */ 62 62 #define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */ 63 63 #define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */ 64 + #define FLUSH_INVALIDATE 64 /* Invalidate the page cache */ 64 65 65 66 #ifdef __KERNEL__ 66 67
+2 -2
include/linux/nfs_page.h
··· 63 63 64 64 extern int nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst, 65 65 unsigned long idx_start, unsigned int npages); 66 - extern int nfs_scan_list(struct list_head *, struct list_head *, 67 - unsigned long, unsigned int); 66 + extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst, 67 + unsigned long idx_start, unsigned int npages); 68 68 extern int nfs_coalesce_requests(struct list_head *, struct list_head *, 69 69 unsigned int); 70 70 extern int nfs_wait_on_request(struct nfs_page *);