+11
-2
fs/nfs/file.c
+11
-2
fs/nfs/file.c
···
27
#include <linux/pagemap.h>
28
#include <linux/aio.h>
29
#include <linux/gfp.h>
30
31
#include <asm/uaccess.h>
32
#include <asm/system.h>
···
494
*/
495
static int nfs_release_page(struct page *page, gfp_t gfp)
496
{
497
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
498
499
/* Only do I/O if gfp is a superset of GFP_KERNEL */
500
-
if ((gfp & GFP_KERNEL) == GFP_KERNEL)
501
-
nfs_wb_page(page->mapping->host, page);
502
/* If PagePrivate() is set, then the page is not freeable */
503
if (PagePrivate(page))
504
return 0;
···
27
#include <linux/pagemap.h>
28
#include <linux/aio.h>
29
#include <linux/gfp.h>
30
+
#include <linux/swap.h>
31
32
#include <asm/uaccess.h>
33
#include <asm/system.h>
···
493
*/
494
static int nfs_release_page(struct page *page, gfp_t gfp)
495
{
496
+
struct address_space *mapping = page->mapping;
497
+
498
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
499
500
/* Only do I/O if gfp is a superset of GFP_KERNEL */
501
+
if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
502
+
int how = FLUSH_SYNC;
503
+
504
+
/* Don't let kswapd deadlock waiting for OOM RPC calls */
505
+
if (current_is_kswapd())
506
+
how = 0;
507
+
nfs_commit_inode(mapping->host, how);
508
+
}
509
/* If PagePrivate() is set, then the page is not freeable */
510
if (PagePrivate(page))
511
return 0;
+1
-1
fs/nfs/nfsroot.c
+1
-1
fs/nfs/nfsroot.c
+19
-8
fs/nfs/write.c
+19
-8
fs/nfs/write.c
···
222
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
223
}
224
225
-
static struct nfs_page *nfs_find_and_lock_request(struct page *page)
226
{
227
struct inode *inode = page->mapping->host;
228
struct nfs_page *req;
···
241
* request as dirty (in which case we don't care).
242
*/
243
spin_unlock(&inode->i_lock);
244
-
ret = nfs_wait_on_request(req);
245
nfs_release_request(req);
246
if (ret != 0)
247
return ERR_PTR(ret);
···
259
* May return an error if the user signalled nfs_wait_on_request().
260
*/
261
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
262
-
struct page *page)
263
{
264
struct nfs_page *req;
265
int ret = 0;
266
267
-
req = nfs_find_and_lock_request(page);
268
if (!req)
269
goto out;
270
ret = PTR_ERR(req);
···
286
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
287
{
288
struct inode *inode = page->mapping->host;
289
290
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
291
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
292
293
nfs_pageio_cond_complete(pgio, page->index);
294
-
return nfs_page_async_flush(pgio, page);
295
}
296
297
/*
···
1390
.rpc_release = nfs_commit_release,
1391
};
1392
1393
-
static int nfs_commit_inode(struct inode *inode, int how)
1394
{
1395
LIST_HEAD(head);
1396
int may_wait = how & FLUSH_SYNC;
···
1454
return ret;
1455
}
1456
#else
1457
-
static int nfs_commit_inode(struct inode *inode, int how)
1458
{
1459
return 0;
1460
}
···
1557
1558
nfs_fscache_release_page(page, GFP_KERNEL);
1559
1560
-
req = nfs_find_and_lock_request(page);
1561
ret = PTR_ERR(req);
1562
if (IS_ERR(req))
1563
goto out;
···
222
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
223
}
224
225
+
static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
226
{
227
struct inode *inode = page->mapping->host;
228
struct nfs_page *req;
···
241
* request as dirty (in which case we don't care).
242
*/
243
spin_unlock(&inode->i_lock);
244
+
if (!nonblock)
245
+
ret = nfs_wait_on_request(req);
246
+
else
247
+
ret = -EAGAIN;
248
nfs_release_request(req);
249
if (ret != 0)
250
return ERR_PTR(ret);
···
256
* May return an error if the user signalled nfs_wait_on_request().
257
*/
258
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
259
+
struct page *page, bool nonblock)
260
{
261
struct nfs_page *req;
262
int ret = 0;
263
264
+
req = nfs_find_and_lock_request(page, nonblock);
265
if (!req)
266
goto out;
267
ret = PTR_ERR(req);
···
283
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
284
{
285
struct inode *inode = page->mapping->host;
286
+
int ret;
287
288
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
289
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
290
291
nfs_pageio_cond_complete(pgio, page->index);
292
+
ret = nfs_page_async_flush(pgio, page,
293
+
wbc->sync_mode == WB_SYNC_NONE ||
294
+
wbc->nonblocking != 0);
295
+
if (ret == -EAGAIN) {
296
+
redirty_page_for_writepage(wbc, page);
297
+
ret = 0;
298
+
}
299
+
return ret;
300
}
301
302
/*
···
1379
.rpc_release = nfs_commit_release,
1380
};
1381
1382
+
int nfs_commit_inode(struct inode *inode, int how)
1383
{
1384
LIST_HEAD(head);
1385
int may_wait = how & FLUSH_SYNC;
···
1443
return ret;
1444
}
1445
#else
1446
+
int nfs_commit_inode(struct inode *inode, int how)
1447
{
1448
return 0;
1449
}
···
1546
1547
nfs_fscache_release_page(page, GFP_KERNEL);
1548
1549
+
req = nfs_find_and_lock_request(page, false);
1550
ret = PTR_ERR(req);
1551
if (IS_ERR(req))
1552
goto out;
+1
include/linux/nfs_fs.h
+1
include/linux/nfs_fs.h
···
493
extern int nfs_wb_page(struct inode *inode, struct page* page);
494
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
495
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
496
extern struct nfs_write_data *nfs_commitdata_alloc(void);
497
extern void nfs_commit_free(struct nfs_write_data *wdata);
498
#endif
···
493
extern int nfs_wb_page(struct inode *inode, struct page* page);
494
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
495
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
496
+
extern int nfs_commit_inode(struct inode *, int);
497
extern struct nfs_write_data *nfs_commitdata_alloc(void);
498
extern void nfs_commit_free(struct nfs_write_data *wdata);
499
#endif