NFS: Fix race in nfs_set_page_dirty

Protect nfs_set_page_dirty() against races with nfs_inode_add_request.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Trond Myklebust and committed by Linus Torvalds 2b82f190 612c9384

+14 -3
+14 -3
fs/nfs/write.c
··· 388 388 } 389 389 SetPagePrivate(req->wb_page); 390 390 set_page_private(req->wb_page, (unsigned long)req); 391 + if (PageDirty(req->wb_page)) 392 + set_bit(PG_NEED_FLUSH, &req->wb_flags); 391 393 nfsi->npages++; 392 394 atomic_inc(&req->wb_count); 393 395 return 0; ··· 409 407 set_page_private(req->wb_page, 0); 410 408 ClearPagePrivate(req->wb_page); 411 409 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); 410 + if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags)) 411 + __set_page_dirty_nobuffers(req->wb_page); 412 412 nfsi->npages--; 413 413 if (!nfsi->npages) { 414 414 spin_unlock(&nfsi->req_lock); ··· 1531 1527 1532 1528 int nfs_set_page_dirty(struct page *page) 1533 1529 { 1530 + spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; 1534 1531 struct nfs_page *req; 1532 + int ret; 1535 1533 1536 - req = nfs_page_find_request(page); 1534 + spin_lock(req_lock); 1535 + req = nfs_page_find_request_locked(page); 1537 1536 if (req != NULL) { 1538 1537 /* Mark any existing write requests for flushing */ 1539 - set_bit(PG_NEED_FLUSH, &req->wb_flags); 1538 + ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); 1539 + spin_unlock(req_lock); 1540 1540 nfs_release_request(req); 1541 + return ret; 1541 1542 } 1542 - return __set_page_dirty_nobuffers(page); 1543 + ret = __set_page_dirty_nobuffers(page); 1544 + spin_unlock(req_lock); 1545 + return ret; 1543 1546 } 1544 1547 1545 1548