···1268 depends on INET1269 select LOCKD1270 select SUNRPC01271 help1272 If you are connected to some other (usually local) Unix computer1273 (using SLIP, PLIP, PPP or Ethernet) and want to mount files residing···1310 3 of the NFS protocol.13111312 If unsure, say Y.000000000013131314config NFS_V41315 bool "Provide NFSv4 client support (EXPERIMENTAL)"···1364 select LOCKD1365 select SUNRPC1366 select EXPORTFS01367 help1368 If you want your Linux box to act as an NFS *server*, so that other1369 computers on your local network which support NFS can access certain···1388 To compile the NFS server support as a module, choose M here: the1389 module will be called nfsd. If unsure, say N.139000001391config NFSD_V31392 bool "Provide NFSv3 server support"1393 depends on NFSD1394 help1395 If you would like to include the NFSv3 server as well as the NFSv21396 server, say Y here. If unsure, say Y.000000000013971398config NFSD_V41399 bool "Provide NFSv4 server support (EXPERIMENTAL)"···14521453config EXPORTFS1454 tristate00000000014551456config SUNRPC1457 tristate
···1268 depends on INET1269 select LOCKD1270 select SUNRPC1271+ select NFS_ACL_SUPPORT if NFS_V3_ACL1272 help1273 If you are connected to some other (usually local) Unix computer1274 (using SLIP, PLIP, PPP or Ethernet) and want to mount files residing···1309 3 of the NFS protocol.13101311 If unsure, say Y.1312+1313+config NFS_V3_ACL1314+ bool "Provide client support for the NFSv3 ACL protocol extension"1315+ depends on NFS_V31316+ help1317+ Implement the NFSv3 ACL protocol extension for manipulating POSIX1318+ Access Control Lists. The server should also be compiled with1319+ the NFSv3 ACL protocol extension; see the CONFIG_NFSD_V3_ACL option.1320+1321+ If unsure, say N.13221323config NFS_V41324 bool "Provide NFSv4 client support (EXPERIMENTAL)"···1353 select LOCKD1354 select SUNRPC1355 select EXPORTFS1356+ select NFS_ACL_SUPPORT if NFSD_V3_ACL || NFSD_V2_ACL1357 help1358 If you want your Linux box to act as an NFS *server*, so that other1359 computers on your local network which support NFS can access certain···1376 To compile the NFS server support as a module, choose M here: the1377 module will be called nfsd. If unsure, say N.13781379+config NFSD_V2_ACL1380+ bool1381+ depends on NFSD1382+1383config NFSD_V31384 bool "Provide NFSv3 server support"1385 depends on NFSD1386 help1387 If you would like to include the NFSv3 server as well as the NFSv21388 server, say Y here. If unsure, say Y.1389+1390+config NFSD_V3_ACL1391+ bool "Provide server support for the NFSv3 ACL protocol extension"1392+ depends on NFSD_V31393+ select NFSD_V2_ACL1394+ help1395+ Implement the NFSv3 ACL protocol extension for manipulating POSIX1396+ Access Control Lists on exported file systems. NFS clients should1397+ be compiled with the NFSv3 ACL protocol extension; see the1398+ CONFIG_NFS_V3_ACL option. If unsure, say N.13991400config NFSD_V41401 bool "Provide NFSv4 server support (EXPERIMENTAL)"···14261427config EXPORTFS1428 tristate1429+1430+config NFS_ACL_SUPPORT1431+ tristate1432+ select FS_POSIX_ACL1433+1434+config NFS_COMMON1435+ bool1436+ depends on NFSD || NFS_FS1437+ default y14381439config SUNRPC1440 tristate
···31 * This is the representation of a blocked client lock.32 */33struct nlm_wait {34- struct nlm_wait * b_next; /* linked list */35 wait_queue_head_t b_wait; /* where to wait on */36 struct nlm_host * b_host;37 struct file_lock * b_lock; /* local file lock */···39 u32 b_status; /* grant callback status */40};4142-static struct nlm_wait * nlm_blocked;0000000000000000000000000000000004344/*45 * Block on a lock46 */47-int48-nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp)49{50- struct nlm_wait block, **head;51- int err;52- u32 pstate;5354- block.b_host = host;55- block.b_lock = fl;56- init_waitqueue_head(&block.b_wait);57- block.b_status = NLM_LCK_BLOCKED;58- block.b_next = nlm_blocked;59- nlm_blocked = █60-61- /* Remember pseudo nsm state */62- pstate = host->h_state;6364 /* Go to sleep waiting for GRANT callback. Some servers seem65 * to lose callbacks, however, so we're going to poll from···96 * a 1 minute timeout would do. See the comment before97 * nlmclnt_lock for an explanation.98 */99- sleep_on_timeout(&block.b_wait, 30*HZ);00100101- for (head = &nlm_blocked; *head; head = &(*head)->b_next) {102- if (*head == &block) {103- *head = block.b_next;104- break;105- }106 }107108- if (!signalled()) {109- *statp = block.b_status;110- return 0;111- }112-113- /* Okay, we were interrupted. Cancel the pending request114- * unless the server has rebooted.115- */116- if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0)117- printk(KERN_NOTICE118- "lockd: CANCEL call failed (errno %d)\n", -err);119-120- return -ERESTARTSYS;121}122123/*···115nlmclnt_grant(struct nlm_lock *lock)116{117 struct nlm_wait *block;0118119 /*120 * Look up blocked request based on arguments. 121 * Warning: must not use cookie to match it!122 */123- for (block = nlm_blocked; block; block = block->b_next) {124- if (nlm_compare_locks(block->b_lock, &lock->fl))125- break;000000126 }127-128- /* Ooops, no blocked request found. */129- if (block == NULL)130- return nlm_lck_denied;131-132- /* Alright, we found the lock. Set the return status and133- * wake up the caller.134- */135- block->b_status = NLM_LCK_GRANTED;136- wake_up(&block->b_wait);137-138- return nlm_granted;139}140141/*···241 host->h_reclaiming = 0;242243 /* Now, wake up all processes that sleep on a blocked lock */244- for (block = nlm_blocked; block; block = block->b_next) {245 if (block->b_host == host) {246 block->b_status = NLM_LCK_DENIED_GRACE_PERIOD;247 wake_up(&block->b_wait);
···31 * This is the representation of a blocked client lock.32 */33struct nlm_wait {34+ struct list_head b_list; /* linked list */35 wait_queue_head_t b_wait; /* where to wait on */36 struct nlm_host * b_host;37 struct file_lock * b_lock; /* local file lock */···39 u32 b_status; /* grant callback status */40};4142+static LIST_HEAD(nlm_blocked);43+44+/*45+ * Queue up a lock for blocking so that the GRANTED request can see it46+ */47+int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl)48+{49+ struct nlm_wait *block;50+51+ BUG_ON(req->a_block != NULL);52+ block = kmalloc(sizeof(*block), GFP_KERNEL);53+ if (block == NULL)54+ return -ENOMEM;55+ block->b_host = host;56+ block->b_lock = fl;57+ init_waitqueue_head(&block->b_wait);58+ block->b_status = NLM_LCK_BLOCKED;59+60+ list_add(&block->b_list, &nlm_blocked);61+ req->a_block = block;62+63+ return 0;64+}65+66+void nlmclnt_finish_block(struct nlm_rqst *req)67+{68+ struct nlm_wait *block = req->a_block;69+70+ if (block == NULL)71+ return;72+ req->a_block = NULL;73+ list_del(&block->b_list);74+ kfree(block);75+}7677/*78 * Block on a lock79 */80+long nlmclnt_block(struct nlm_rqst *req, long timeout)081{82+ struct nlm_wait *block = req->a_block;83+ long ret;08485+ /* A borken server might ask us to block even if we didn't86+ * request it. Just say no!87+ */88+ if (!req->a_args.block)89+ return -EAGAIN;00009091 /* Go to sleep waiting for GRANT callback. Some servers seem92 * to lose callbacks, however, so we're going to poll from···69 * a 1 minute timeout would do. See the comment before70 * nlmclnt_lock for an explanation.71 */72+ ret = wait_event_interruptible_timeout(block->b_wait,73+ block->b_status != NLM_LCK_BLOCKED,74+ timeout);7576+ if (block->b_status != NLM_LCK_BLOCKED) {77+ req->a_res.status = block->b_status;78+ block->b_status = NLM_LCK_BLOCKED;0079 }8081+ return ret;00000000000082}8384/*···100nlmclnt_grant(struct nlm_lock *lock)101{102 struct nlm_wait *block;103+ u32 res = nlm_lck_denied;104105 /*106 * Look up blocked request based on arguments. 107 * Warning: must not use cookie to match it!108 */109+ list_for_each_entry(block, &nlm_blocked, b_list) {110+ if (nlm_compare_locks(block->b_lock, &lock->fl)) {111+ /* Alright, we found a lock. Set the return status112+ * and wake up the caller113+ */114+ block->b_status = NLM_LCK_GRANTED;115+ wake_up(&block->b_wait);116+ res = nlm_granted;117+ }118 }119+ return res;00000000000120}121122/*···230 host->h_reclaiming = 0;231232 /* Now, wake up all processes that sleep on a blocked lock */233+ list_for_each_entry(block, &nlm_blocked, b_list) {234 if (block->b_host == host) {235 block->b_status = NLM_LCK_DENIED_GRACE_PERIOD;236 wake_up(&block->b_wait);
+32-8
fs/lockd/clntproc.c
···2122#define NLMDBG_FACILITY NLMDBG_CLIENT23#define NLMCLNT_GRACE_WAIT (5*HZ)02425static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);26static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);···554{555 struct nlm_host *host = req->a_host;556 struct nlm_res *resp = &req->a_res;557- int status;0558559 if (!host->h_monitored && nsm_monitor(host) < 0) {560 printk(KERN_NOTICE "lockd: failed to monitor %s\n",···564 goto out;565 }566567- do {568- if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0) {569- if (resp->status != NLM_LCK_BLOCKED)570- break;571- status = nlmclnt_block(host, fl, &resp->status);572- }573 if (status < 0)574 goto out;575- } while (resp->status == NLM_LCK_BLOCKED && req->a_args.block);000000000000000000000576577 if (resp->status == NLM_LCK_GRANTED) {578 fl->fl_u.nfs_fl.state = host->h_state;···598 do_vfs_lock(fl);599 }600 status = nlm_stat_to_errno(resp->status);00000601out:602 nlmclnt_release_lockargs(req);603 return status;
···2122#define NLMDBG_FACILITY NLMDBG_CLIENT23#define NLMCLNT_GRACE_WAIT (5*HZ)24+#define NLMCLNT_POLL_TIMEOUT (30*HZ)2526static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);27static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);···553{554 struct nlm_host *host = req->a_host;555 struct nlm_res *resp = &req->a_res;556+ long timeout;557+ int status;558559 if (!host->h_monitored && nsm_monitor(host) < 0) {560 printk(KERN_NOTICE "lockd: failed to monitor %s\n",···562 goto out;563 }564565+ if (req->a_args.block) {566+ status = nlmclnt_prepare_block(req, host, fl);0000567 if (status < 0)568 goto out;569+ }570+ for(;;) {571+ status = nlmclnt_call(req, NLMPROC_LOCK);572+ if (status < 0)573+ goto out_unblock;574+ if (resp->status != NLM_LCK_BLOCKED)575+ break;576+ /* Wait on an NLM blocking lock */577+ timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT);578+ /* Did a reclaimer thread notify us of a server reboot? */579+ if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD)580+ continue;581+ if (resp->status != NLM_LCK_BLOCKED)582+ break;583+ if (timeout >= 0)584+ continue;585+ /* We were interrupted. Send a CANCEL request to the server586+ * and exit587+ */588+ status = (int)timeout;589+ goto out_unblock;590+ }591592 if (resp->status == NLM_LCK_GRANTED) {593 fl->fl_u.nfs_fl.state = host->h_state;···579 do_vfs_lock(fl);580 }581 status = nlm_stat_to_errno(resp->status);582+out_unblock:583+ nlmclnt_finish_block(req);584+ /* Cancel the blocked request if it is still pending */585+ if (resp->status == NLM_LCK_BLOCKED)586+ nlmclnt_cancel(host, fl);587out:588 nlmclnt_release_lockargs(req);589 return status;
+3-5
fs/lockd/host.c
···189 goto forgetit;190191 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);00192193 /* Existing NLM servers accept AUTH_UNIX only */194 clnt = rpc_create_client(xprt, host->h_name, &nlm_program,195 host->h_version, RPC_AUTH_UNIX);196- if (IS_ERR(clnt)) {197- xprt_destroy(xprt);198 goto forgetit;199- }200 clnt->cl_autobind = 1; /* turn on pmap queries */201- xprt->nocong = 1; /* No congestion control for NLM */202- xprt->resvport = 1; /* NLM requires a reserved port */203204 host->h_rpcclnt = clnt;205 }
···189 goto forgetit;190191 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);192+ xprt->nocong = 1; /* No congestion control for NLM */193+ xprt->resvport = 1; /* NLM requires a reserved port */194195 /* Existing NLM servers accept AUTH_UNIX only */196 clnt = rpc_create_client(xprt, host->h_name, &nlm_program,197 host->h_version, RPC_AUTH_UNIX);198+ if (IS_ERR(clnt))0199 goto forgetit;0200 clnt->cl_autobind = 1; /* turn on pmap queries */00201202 host->h_rpcclnt = clnt;203 }
···32#include <linux/smp_lock.h>33#include <linux/namei.h>34035#include "delegation.h"3637#define NFS_PARANOIA 1···51static int nfs_rename(struct inode *, struct dentry *,52 struct inode *, struct dentry *);53static int nfs_fsync_dir(struct file *, struct dentry *, int);05455struct file_operations nfs_dir_operations = {056 .read = generic_read_dir,57 .readdir = nfs_readdir,58 .open = nfs_opendir,···77 .setattr = nfs_setattr,78};7900000000000000000000080#ifdef CONFIG_NFS_V48182static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);···114 .permission = nfs_permission,115 .getattr = nfs_getattr,116 .setattr = nfs_setattr,000117};118119#endif /* CONFIG_NFS_V4 */···143 struct page *page;144 unsigned long page_index;145 u32 *ptr;146- u64 target;0147 struct nfs_entry *entry;148 decode_dirent_t decode;149 int plus;···192 NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;193 /* Ensure consistent page alignment of the data.194 * Note: assumes we have exclusive access to this mapping either195- * throught inode->i_sem or some other mechanism.196 */197- if (page->index == 0) {198- invalidate_inode_pages(inode->i_mapping);199- NFS_I(inode)->readdir_timestamp = timestamp;200- }201 unlock_page(page);202 return 0;203 error:···228229/*230 * Given a pointer to a buffer that has already been filled by a call231- * to readdir, find the next entry.232 *233 * If the end of the buffer has been reached, return -EAGAIN, if not,234 * return the offset within the buffer of the next entry to be235 * read.236 */237static inline238-int find_dirent(nfs_readdir_descriptor_t *desc, struct page *page)239{240 struct nfs_entry *entry = desc->entry;241 int loop_count = 0,242 status;243244 while((status = dir_decode(desc)) == 0) {245- dfprintk(VFS, "NFS: found cookie %Lu\n", (long long)entry->cookie);246- if (entry->prev_cookie == desc->target)247 break;248 if (loop_count++ > 200) {249 loop_count = 0;···255}256257/*258- * Find the given page, and call find_dirent() in order to try to259- * return the next entry.000000000000000000000000000000000000260 */261static inline262int find_dirent_page(nfs_readdir_descriptor_t *desc)···315 /* NOTE: Someone else may have changed the READDIRPLUS flag */316 desc->page = page;317 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */318- status = find_dirent(desc, page);000319 if (status < 0)320 dir_page_release(desc);321 out:···333 * Recurse through the page cache pages, and return a334 * filled nfs_entry structure of the next directory entry if possible.335 *336- * The target for the search is 'desc->target'.0337 */338static inline339int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)···342 int loop_count = 0;343 int res;344345- dfprintk(VFS, "NFS: readdir_search_pagecache() searching for cookie %Lu\n", (long long)desc->target);000000000346 for (;;) {347 res = find_dirent_page(desc);348 if (res != -EAGAIN)···388 int loop_count = 0,389 res;390391- dfprintk(VFS, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n", (long long)desc->target);392393 for(;;) {394 unsigned d_type = DT_UNKNOWN;···408 }409410 res = filldir(dirent, entry->name, entry->len, 411- entry->prev_cookie, fileid, d_type);412 if (res < 0)413 break;414- file->f_pos = desc->target = entry->cookie;0415 if (dir_decode(desc) != 0) {416 desc->page_index ++;417 break;···425 dir_page_release(desc);426 if (dentry != NULL)427 dput(dentry);428- dfprintk(VFS, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", (long long)desc->target, res);429 return res;430}431···451 struct page *page = NULL;452 int status;453454- dfprintk(VFS, "NFS: uncached_readdir() searching for cookie %Lu\n", (long long)desc->target);455456 page = alloc_page(GFP_HIGHUSER);457 if (!page) {458 status = -ENOMEM;459 goto out;460 }461- desc->error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->target,462 page,463 NFS_SERVER(inode)->dtsize,464 desc->plus);···467 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */468 if (desc->error >= 0) {469 if ((status = dir_decode(desc)) == 0)470- desc->entry->prev_cookie = desc->target;471 } else472 status = -EIO;473 if (status < 0)···488 goto out;489}490491-/* The file offset position is now represented as a true offset into the492- * page cache as is the case in most of the other filesystems.0493 */494static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)495{···512 }513514 /*515- * filp->f_pos points to the file offset in the page cache.516- * but if the cache has meanwhile been zapped, we need to517- * read from the last dirent to revalidate f_pos518- * itself.519 */520 memset(desc, 0, sizeof(*desc));521522 desc->file = filp;523- desc->target = filp->f_pos;524 desc->decode = NFS_PROTO(inode)->decode_dirent;525 desc->plus = NFS_USE_READDIRPLUS(inode);526···532533 while(!desc->entry->eof) {534 res = readdir_search_pagecache(desc);0535 if (res == -EBADCOOKIE) {536 /* This means either end of directory */537- if (desc->entry->cookie != desc->target) {538 /* Or that the server has 'lost' a cookie */539 res = uncached_readdir(desc, dirent, filldir);540 if (res >= 0)···566 if (res < 0)567 return res;568 return 0;0000000000000000000000569}570571/*
···32#include <linux/smp_lock.h>33#include <linux/namei.h>3435+#include "nfs4_fs.h"36#include "delegation.h"3738#define NFS_PARANOIA 1···50static int nfs_rename(struct inode *, struct dentry *,51 struct inode *, struct dentry *);52static int nfs_fsync_dir(struct file *, struct dentry *, int);53+static loff_t nfs_llseek_dir(struct file *, loff_t, int);5455struct file_operations nfs_dir_operations = {56+ .llseek = nfs_llseek_dir,57 .read = generic_read_dir,58 .readdir = nfs_readdir,59 .open = nfs_opendir,···74 .setattr = nfs_setattr,75};7677+#ifdef CONFIG_NFS_V378+struct inode_operations nfs3_dir_inode_operations = {79+ .create = nfs_create,80+ .lookup = nfs_lookup,81+ .link = nfs_link,82+ .unlink = nfs_unlink,83+ .symlink = nfs_symlink,84+ .mkdir = nfs_mkdir,85+ .rmdir = nfs_rmdir,86+ .mknod = nfs_mknod,87+ .rename = nfs_rename,88+ .permission = nfs_permission,89+ .getattr = nfs_getattr,90+ .setattr = nfs_setattr,91+ .listxattr = nfs3_listxattr,92+ .getxattr = nfs3_getxattr,93+ .setxattr = nfs3_setxattr,94+ .removexattr = nfs3_removexattr,95+};96+#endif /* CONFIG_NFS_V3 */97+98#ifdef CONFIG_NFS_V499100static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);···90 .permission = nfs_permission,91 .getattr = nfs_getattr,92 .setattr = nfs_setattr,93+ .getxattr = nfs4_getxattr,94+ .setxattr = nfs4_setxattr,95+ .listxattr = nfs4_listxattr,96};9798#endif /* CONFIG_NFS_V4 */···116 struct page *page;117 unsigned long page_index;118 u32 *ptr;119+ u64 *dir_cookie;120+ loff_t current_index;121 struct nfs_entry *entry;122 decode_dirent_t decode;123 int plus;···164 NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;165 /* Ensure consistent page alignment of the data.166 * Note: assumes we have exclusive access to this mapping either167+ * through inode->i_sem or some other mechanism.168 */169+ if (page->index == 0)170+ invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1);00171 unlock_page(page);172 return 0;173 error:···202203/*204 * Given a pointer to a buffer that has already been filled by a call205+ * to readdir, find the next entry with cookie '*desc->dir_cookie'.206 *207 * If the end of the buffer has been reached, return -EAGAIN, if not,208 * return the offset within the buffer of the next entry to be209 * read.210 */211static inline212+int find_dirent(nfs_readdir_descriptor_t *desc)213{214 struct nfs_entry *entry = desc->entry;215 int loop_count = 0,216 status;217218 while((status = dir_decode(desc)) == 0) {219+ dfprintk(VFS, "NFS: found cookie %Lu\n", (unsigned long long)entry->cookie);220+ if (entry->prev_cookie == *desc->dir_cookie)221 break;222 if (loop_count++ > 200) {223 loop_count = 0;···229}230231/*232+ * Given a pointer to a buffer that has already been filled by a call233+ * to readdir, find the entry at offset 'desc->file->f_pos'.234+ *235+ * If the end of the buffer has been reached, return -EAGAIN, if not,236+ * return the offset within the buffer of the next entry to be237+ * read.238+ */239+static inline240+int find_dirent_index(nfs_readdir_descriptor_t *desc)241+{242+ struct nfs_entry *entry = desc->entry;243+ int loop_count = 0,244+ status;245+246+ for(;;) {247+ status = dir_decode(desc);248+ if (status)249+ break;250+251+ dfprintk(VFS, "NFS: found cookie %Lu at index %Ld\n", (unsigned long long)entry->cookie, desc->current_index);252+253+ if (desc->file->f_pos == desc->current_index) {254+ *desc->dir_cookie = entry->cookie;255+ break;256+ }257+ desc->current_index++;258+ if (loop_count++ > 200) {259+ loop_count = 0;260+ schedule();261+ }262+ }263+ dfprintk(VFS, "NFS: find_dirent_index() returns %d\n", status);264+ return status;265+}266+267+/*268+ * Find the given page, and call find_dirent() or find_dirent_index in269+ * order to try to return the next entry.270 */271static inline272int find_dirent_page(nfs_readdir_descriptor_t *desc)···253 /* NOTE: Someone else may have changed the READDIRPLUS flag */254 desc->page = page;255 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */256+ if (*desc->dir_cookie != 0)257+ status = find_dirent(desc);258+ else259+ status = find_dirent_index(desc);260 if (status < 0)261 dir_page_release(desc);262 out:···268 * Recurse through the page cache pages, and return a269 * filled nfs_entry structure of the next directory entry if possible.270 *271+ * The target for the search is '*desc->dir_cookie' if non-0,272+ * 'desc->file->f_pos' otherwise273 */274static inline275int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)···276 int loop_count = 0;277 int res;278279+ /* Always search-by-index from the beginning of the cache */280+ if (*desc->dir_cookie == 0) {281+ dfprintk(VFS, "NFS: readdir_search_pagecache() searching for offset %Ld\n", (long long)desc->file->f_pos);282+ desc->page_index = 0;283+ desc->entry->cookie = desc->entry->prev_cookie = 0;284+ desc->entry->eof = 0;285+ desc->current_index = 0;286+ } else287+ dfprintk(VFS, "NFS: readdir_search_pagecache() searching for cookie %Lu\n", (unsigned long long)*desc->dir_cookie);288+289 for (;;) {290 res = find_dirent_page(desc);291 if (res != -EAGAIN)···313 int loop_count = 0,314 res;315316+ dfprintk(VFS, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n", (long long)entry->cookie);317318 for(;;) {319 unsigned d_type = DT_UNKNOWN;···333 }334335 res = filldir(dirent, entry->name, entry->len, 336+ file->f_pos, fileid, d_type);337 if (res < 0)338 break;339+ file->f_pos++;340+ *desc->dir_cookie = entry->cookie;341 if (dir_decode(desc) != 0) {342 desc->page_index ++;343 break;···349 dir_page_release(desc);350 if (dentry != NULL)351 dput(dentry);352+ dfprintk(VFS, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", (unsigned long long)*desc->dir_cookie, res);353 return res;354}355···375 struct page *page = NULL;376 int status;377378+ dfprintk(VFS, "NFS: uncached_readdir() searching for cookie %Lu\n", (unsigned long long)*desc->dir_cookie);379380 page = alloc_page(GFP_HIGHUSER);381 if (!page) {382 status = -ENOMEM;383 goto out;384 }385+ desc->error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, *desc->dir_cookie,386 page,387 NFS_SERVER(inode)->dtsize,388 desc->plus);···391 desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */392 if (desc->error >= 0) {393 if ((status = dir_decode(desc)) == 0)394+ desc->entry->prev_cookie = *desc->dir_cookie;395 } else396 status = -EIO;397 if (status < 0)···412 goto out;413}414415+/* The file offset position represents the dirent entry number. A416+ last cookie cache takes care of the common case of reading the417+ whole directory.418 */419static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)420{···435 }436437 /*438+ * filp->f_pos points to the dirent entry number.439+ * *desc->dir_cookie has the cookie for the next entry. We have440+ * to either find the entry with the appropriate number or441+ * revalidate the cookie.442 */443 memset(desc, 0, sizeof(*desc));444445 desc->file = filp;446+ desc->dir_cookie = &((struct nfs_open_context *)filp->private_data)->dir_cookie;447 desc->decode = NFS_PROTO(inode)->decode_dirent;448 desc->plus = NFS_USE_READDIRPLUS(inode);449···455456 while(!desc->entry->eof) {457 res = readdir_search_pagecache(desc);458+459 if (res == -EBADCOOKIE) {460 /* This means either end of directory */461+ if (*desc->dir_cookie && desc->entry->cookie != *desc->dir_cookie) {462 /* Or that the server has 'lost' a cookie */463 res = uncached_readdir(desc, dirent, filldir);464 if (res >= 0)···488 if (res < 0)489 return res;490 return 0;491+}492+493+loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)494+{495+ down(&filp->f_dentry->d_inode->i_sem);496+ switch (origin) {497+ case 1:498+ offset += filp->f_pos;499+ case 0:500+ if (offset >= 0)501+ break;502+ default:503+ offset = -EINVAL;504+ goto out;505+ }506+ if (offset != filp->f_pos) {507+ filp->f_pos = offset;508+ ((struct nfs_open_context *)filp->private_data)->dir_cookie = 0;509+ }510+out:511+ up(&filp->f_dentry->d_inode->i_sem);512+ return offset;513}514515/*
+1-1
fs/nfs/direct.c
···517 result = tot_bytes;518519out:520- nfs_end_data_update_defer(inode);521 nfs_writedata_free(wdata);522 return result;523
···517 result = tot_bytes;518519out:520+ nfs_end_data_update(inode);521 nfs_writedata_free(wdata);522 return result;523
+41-7
fs/nfs/file.c
···71 .setattr = nfs_setattr,72};7300000000000074/* Hack for future NFS swap support */75#ifndef IS_SWAPFILE76# define IS_SWAPFILE(inode) (0)···128}129130/**000000000000000131 * nfs_revalidate_size - Revalidate the file size132 * @inode - pointer to inode struct133 * @file - pointer to struct file···164 goto force_reval;165 if (nfsi->npages != 0)166 return 0;167- return nfs_revalidate_inode(server, inode);0168force_reval:169 return __nfs_revalidate_inode(server, inode);170}···226 dentry->d_parent->d_name.name, dentry->d_name.name,227 (unsigned long) count, (unsigned long) pos);228229- result = nfs_revalidate_inode(NFS_SERVER(inode), inode);230 if (!result)231 result = generic_file_aio_read(iocb, buf, count, pos);232 return result;···244 dentry->d_parent->d_name.name, dentry->d_name.name,245 (unsigned long) count, (unsigned long long) *ppos);246247- res = nfs_revalidate_inode(NFS_SERVER(inode), inode);248 if (!res)249 res = generic_file_sendfile(filp, ppos, count, actor, target);250 return res;···260 dfprintk(VFS, "nfs: mmap(%s/%s)\n",261 dentry->d_parent->d_name.name, dentry->d_name.name);262263- status = nfs_revalidate_inode(NFS_SERVER(inode), inode);264 if (!status)265 status = generic_file_mmap(file, vma);266 return status;···349 result = -EBUSY;350 if (IS_SWAPFILE(inode))351 goto out_swapfile;352- result = nfs_revalidate_inode(NFS_SERVER(inode), inode);353- if (result)354- goto out;000000355356 result = count;357 if (!count)
···71 .setattr = nfs_setattr,72};7374+#ifdef CONFIG_NFS_V375+struct inode_operations nfs3_file_inode_operations = {76+ .permission = nfs_permission,77+ .getattr = nfs_getattr,78+ .setattr = nfs_setattr,79+ .listxattr = nfs3_listxattr,80+ .getxattr = nfs3_getxattr,81+ .setxattr = nfs3_setxattr,82+ .removexattr = nfs3_removexattr,83+};84+#endif /* CONFIG_NFS_v3 */85+86/* Hack for future NFS swap support */87#ifndef IS_SWAPFILE88# define IS_SWAPFILE(inode) (0)···116}117118/**119+ * nfs_revalidate_file - Revalidate the page cache & related metadata120+ * @inode - pointer to inode struct121+ * @file - pointer to file122+ */123+static int nfs_revalidate_file(struct inode *inode, struct file *filp)124+{125+ int retval = 0;126+127+ if ((NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode))128+ retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode);129+ nfs_revalidate_mapping(inode, filp->f_mapping);130+ return 0;131+}132+133+/**134 * nfs_revalidate_size - Revalidate the file size135 * @inode - pointer to inode struct136 * @file - pointer to struct file···137 goto force_reval;138 if (nfsi->npages != 0)139 return 0;140+ if (!(NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode))141+ return 0;142force_reval:143 return __nfs_revalidate_inode(server, inode);144}···198 dentry->d_parent->d_name.name, dentry->d_name.name,199 (unsigned long) count, (unsigned long) pos);200201+ result = nfs_revalidate_file(inode, iocb->ki_filp);202 if (!result)203 result = generic_file_aio_read(iocb, buf, count, pos);204 return result;···216 dentry->d_parent->d_name.name, dentry->d_name.name,217 (unsigned long) count, (unsigned long long) *ppos);218219+ res = nfs_revalidate_file(inode, filp);220 if (!res)221 res = generic_file_sendfile(filp, ppos, count, actor, target);222 return res;···232 dfprintk(VFS, "nfs: mmap(%s/%s)\n",233 dentry->d_parent->d_name.name, dentry->d_name.name);234235+ status = nfs_revalidate_file(inode, file);236 if (!status)237 status = generic_file_mmap(file, vma);238 return status;···321 result = -EBUSY;322 if (IS_SWAPFILE(inode))323 goto out_swapfile;324+ /*325+ * O_APPEND implies that we must revalidate the file length.326+ */327+ if (iocb->ki_filp->f_flags & O_APPEND) {328+ result = nfs_revalidate_file_size(inode, iocb->ki_filp);329+ if (result)330+ goto out;331+ }332+ nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);333334 result = count;335 if (!count)
···1+/*2+ * linux/fs/nfs/nfs4_fs.h3+ *4+ * Copyright (C) 2005 Trond Myklebust5+ *6+ * NFSv4-specific filesystem definitions and declarations7+ */8+9+#ifndef __LINUX_FS_NFS_NFS4_FS_H10+#define __LINUX_FS_NFS_NFS4_FS_H11+12+#ifdef CONFIG_NFS_V413+14+struct idmap;15+16+/*17+ * In a seqid-mutating op, this macro controls which error return18+ * values trigger incrementation of the seqid.19+ *20+ * from rfc 3010:21+ * The client MUST monotonically increment the sequence number for the22+ * CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE23+ * operations. This is true even in the event that the previous24+ * operation that used the sequence number received an error. The only25+ * exception to this rule is if the previous operation received one of26+ * the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID,27+ * NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR,28+ * NFSERR_RESOURCE, NFSERR_NOFILEHANDLE.29+ *30+ */31+#define seqid_mutating_err(err) \32+(((err) != NFSERR_STALE_CLIENTID) && \33+ ((err) != NFSERR_STALE_STATEID) && \34+ ((err) != NFSERR_BAD_STATEID) && \35+ ((err) != NFSERR_BAD_SEQID) && \36+ ((err) != NFSERR_BAD_XDR) && \37+ ((err) != NFSERR_RESOURCE) && \38+ ((err) != NFSERR_NOFILEHANDLE))39+40+enum nfs4_client_state {41+ NFS4CLNT_OK = 0,42+};43+44+/*45+ * The nfs4_client identifies our client state to the server.46+ */47+struct nfs4_client {48+ struct list_head cl_servers; /* Global list of servers */49+ struct in_addr cl_addr; /* Server identifier */50+ u64 cl_clientid; /* constant */51+ nfs4_verifier cl_confirm;52+ unsigned long cl_state;53+54+ u32 cl_lockowner_id;55+56+ /*57+ * The following rwsem ensures exclusive access to the server58+ * while we recover the state following a lease expiration.59+ */60+ struct rw_semaphore cl_sem;61+62+ struct list_head cl_delegations;63+ struct list_head cl_state_owners;64+ struct list_head cl_unused;65+ int cl_nunused;66+ spinlock_t cl_lock;67+ atomic_t cl_count;68+69+ struct rpc_clnt * cl_rpcclient;70+ struct rpc_cred * cl_cred;71+72+ struct list_head cl_superblocks; /* List of nfs_server structs */73+74+ unsigned long cl_lease_time;75+ unsigned long cl_last_renewal;76+ struct work_struct cl_renewd;77+ struct work_struct cl_recoverd;78+79+ wait_queue_head_t cl_waitq;80+ struct rpc_wait_queue cl_rpcwaitq;81+82+ /* used for the setclientid verifier */83+ struct timespec cl_boot_time;84+85+ /* idmapper */86+ struct idmap * cl_idmap;87+88+ /* Our own IP address, as a null-terminated string.89+ * This is used to generate the clientid, and the callback address.90+ */91+ char cl_ipaddr[16];92+ unsigned char cl_id_uniquifier;93+};94+95+/*96+ * NFS4 state_owners and lock_owners are simply labels for ordered97+ * sequences of RPC calls. Their sole purpose is to provide once-only98+ * semantics by allowing the server to identify replayed requests.99+ *100+ * The ->so_sema is held during all state_owner seqid-mutating operations:101+ * OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize102+ * so_seqid.103+ */104+struct nfs4_state_owner {105+ struct list_head so_list; /* per-clientid list of state_owners */106+ struct nfs4_client *so_client;107+ u32 so_id; /* 32-bit identifier, unique */108+ struct semaphore so_sema;109+ u32 so_seqid; /* protected by so_sema */110+ atomic_t so_count;111+112+ struct rpc_cred *so_cred; /* Associated cred */113+ struct list_head so_states;114+ struct list_head so_delegations;115+};116+117+/*118+ * struct nfs4_state maintains the client-side state for a given119+ * (state_owner,inode) tuple (OPEN) or state_owner (LOCK).120+ *121+ * OPEN:122+ * In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server,123+ * we need to know how many files are open for reading or writing on a124+ * given inode. This information too is stored here.125+ *126+ * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)127+ */128+129+struct nfs4_lock_state {130+ struct list_head ls_locks; /* Other lock stateids */131+ struct nfs4_state * ls_state; /* Pointer to open state */132+ fl_owner_t ls_owner; /* POSIX lock owner */133+#define NFS_LOCK_INITIALIZED 1134+ int ls_flags;135+ u32 ls_seqid;136+ u32 ls_id;137+ nfs4_stateid ls_stateid;138+ atomic_t ls_count;139+};140+141+/* bits for nfs4_state->flags */142+enum {143+ LK_STATE_IN_USE,144+ NFS_DELEGATED_STATE,145+};146+147+struct nfs4_state {148+ struct list_head open_states; /* List of states for the same state_owner */149+ struct list_head inode_states; /* List of states for the same inode */150+ struct list_head lock_states; /* List of subservient lock stateids */151+152+ struct nfs4_state_owner *owner; /* Pointer to the open owner */153+ struct inode *inode; /* Pointer to the inode */154+155+ unsigned long flags; /* Do we hold any locks? */156+ struct semaphore lock_sema; /* Serializes file locking operations */157+ spinlock_t state_lock; /* Protects the lock_states list */158+159+ nfs4_stateid stateid;160+161+ unsigned int nreaders;162+ unsigned int nwriters;163+ int state; /* State on the server (R,W, or RW) */164+ atomic_t count;165+};166+167+168+struct nfs4_exception {169+ long timeout;170+ int retry;171+};172+173+struct nfs4_state_recovery_ops {174+ int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);175+ int (*recover_lock)(struct nfs4_state *, struct file_lock *);176+};177+178+extern struct dentry_operations nfs4_dentry_operations;179+extern struct inode_operations nfs4_dir_inode_operations;180+181+/* inode.c */182+extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);183+extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int);184+extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);185+186+187+/* nfs4proc.c */188+extern int nfs4_map_errors(int err);189+extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short);190+extern int nfs4_proc_setclientid_confirm(struct nfs4_client *);191+extern int nfs4_proc_async_renew(struct nfs4_client *);192+extern int nfs4_proc_renew(struct nfs4_client *);193+extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode);194+extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);195+extern int nfs4_open_revalidate(struct inode *, struct dentry *, int);196+197+extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;198+extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops;199+200+extern const u32 nfs4_fattr_bitmap[2];201+extern const u32 nfs4_statfs_bitmap[2];202+extern const u32 nfs4_pathconf_bitmap[2];203+extern const u32 nfs4_fsinfo_bitmap[2];204+205+/* nfs4renewd.c */206+extern void nfs4_schedule_state_renewal(struct nfs4_client *);207+extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);208+extern void nfs4_kill_renewd(struct nfs4_client *);209+extern void nfs4_renew_state(void *);210+211+/* nfs4state.c */212+extern void init_nfsv4_state(struct nfs_server *);213+extern void destroy_nfsv4_state(struct nfs_server *);214+extern struct nfs4_client *nfs4_get_client(struct in_addr *);215+extern void nfs4_put_client(struct nfs4_client *clp);216+extern int nfs4_init_client(struct nfs4_client *clp);217+extern struct nfs4_client *nfs4_find_client(struct in_addr *);218+extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *);219+220+extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);221+extern void nfs4_put_state_owner(struct nfs4_state_owner *);222+extern void nfs4_drop_state_owner(struct nfs4_state_owner *);223+extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);224+extern void nfs4_put_open_state(struct nfs4_state *);225+extern void nfs4_close_state(struct nfs4_state *, mode_t);226+extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);227+extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);228+extern void nfs4_schedule_state_recovery(struct nfs4_client *);229+extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);230+extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);231+extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);232+233+extern const nfs4_stateid zero_stateid;234+235+/* nfs4xdr.c */236+extern uint32_t *nfs4_decode_dirent(uint32_t *p, struct nfs_entry *entry, int plus);237+extern struct rpc_procinfo nfs4_procedures[];238+239+struct nfs4_mount_data;240+241+/* callback_xdr.c */242+extern struct svc_version nfs4_callback_version1;243+244+#else245+246+#define init_nfsv4_state(server) do { } while (0)247+#define destroy_nfsv4_state(server) do { } while (0)248+#define nfs4_put_state_owner(inode, owner) do { } while (0)249+#define nfs4_put_open_state(state) do { } while (0)250+#define nfs4_close_state(a, b) do { } while (0)251+252+#endif /* CONFIG_NFS_V4 */253+#endif /* __LINUX_FS_NFS_NFS4_FS.H */
+339-90
fs/nfs/nfs4proc.c
···48#include <linux/smp_lock.h>49#include <linux/namei.h>50051#include "delegation.h"5253#define NFSDBG_FACILITY NFSDBG_PROC···62static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception);63extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);64extern struct rpc_procinfo nfs4_procedures[];65-66-extern nfs4_stateid zero_stateid;6768/* Prevent leaks of NFSv4 errors into userland */69int nfs4_map_errors(int err)···103 | FATTR4_WORD1_SPACE_TOTAL104};105106-u32 nfs4_pathconf_bitmap[2] = {107 FATTR4_WORD0_MAXLINK108 | FATTR4_WORD0_MAXNAME,109 0···123124 BUG_ON(readdir->count < 80);125 if (cookie > 2) {126- readdir->cookie = (cookie > 2) ? cookie : 0;127 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));128 return;129 }···269 int err;270 do {271 err = _nfs4_open_reclaim(sp, state);272- switch (err) {273- case 0:274- case -NFS4ERR_STALE_CLIENTID:275- case -NFS4ERR_STALE_STATEID:276- case -NFS4ERR_EXPIRED:277- return err;278- }279- err = nfs4_handle_exception(server, err, &exception);280 } while (exception.retry);281 return err;282}···503 goto out_nodeleg;504}50500000000000000506static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)507{508 struct nfs_inode *nfsi = NFS_I(state->inode);···529 continue;530 get_nfs_open_context(ctx);531 spin_unlock(&state->inode->i_lock);532- status = _nfs4_open_expired(sp, state, ctx->dentry);533 put_nfs_open_context(ctx);534 return status;535 }···756757 fattr->valid = 0;758759- if (state != NULL)760 msg.rpc_cred = state->owner->so_cred;761- if (sattr->ia_valid & ATTR_SIZE)762- nfs4_copy_stateid(&arg.stateid, state, NULL);763- else764 memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));765766 return rpc_call_sync(server->client, &msg, 0);···1123nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,1124 struct iattr *sattr)1125{1126- struct inode * inode = dentry->d_inode;1127- int size_change = sattr->ia_valid & ATTR_SIZE;1128- struct nfs4_state *state = NULL;1129- int need_iput = 0;1130 int status;11311132 fattr->valid = 0;11331134- if (size_change) {1135- struct rpc_cred *cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);1136- if (IS_ERR(cred))1137- return PTR_ERR(cred);0000001138 state = nfs4_find_state(inode, cred, FMODE_WRITE);1139- if (state == NULL) {1140- state = nfs4_open_delegated(dentry->d_inode,1141- FMODE_WRITE, cred);1142- if (IS_ERR(state))1143- state = nfs4_do_open(dentry->d_parent->d_inode,1144- dentry, FMODE_WRITE,1145- NULL, cred);1146- need_iput = 1;1147- }1148- put_rpccred(cred);1149- if (IS_ERR(state))1150- return PTR_ERR(state);1151-1152- if (state->inode != inode) {1153- printk(KERN_WARNING "nfs: raced in setattr (%p != %p), returning -EIO\n", inode, state->inode);1154- status = -EIO;1155- goto out;1156- }1157 }01158 status = nfs4_do_setattr(NFS_SERVER(inode), fattr,1159 NFS_FH(inode), sattr, state);1160-out:1161- if (state) {1162- inode = state->inode;1163 nfs4_close_state(state, FMODE_WRITE);1164- if (need_iput)1165- iput(inode);1166- }1167 return status;1168}1169···1722 };1723 int status;172400001725 lock_kernel();1726 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);1727 res.pgbase = args.pgbase;···1733 if (status == 0)1734 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);1735 unlock_kernel();01736 return status;1737}1738···2159 return 0;2160}216100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002162static int2163nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server)2164{···2631 down_read(&clp->cl_sem);2632 nlo.clientid = clp->cl_clientid;2633 down(&state->lock_sema);2634- lsp = nfs4_find_lock_state(state, request->fl_owner);2635- if (lsp)2636- nlo.id = lsp->ls_id; 2637- else {2638- spin_lock(&clp->cl_lock);2639- nlo.id = nfs4_alloc_lockowner_id(clp);2640- spin_unlock(&clp->cl_lock);2641- }2642 arg.u.lockt = &nlo;2643 status = rpc_call_sync(server->client, &msg, 0);2644 if (!status) {···2656 request->fl_pid = 0;2657 status = 0;2658 }2659- if (lsp)2660- nfs4_put_lock_state(lsp);2661 up(&state->lock_sema);2662 up_read(&clp->cl_sem);2663 return status;···2716 };2717 struct nfs4_lock_state *lsp;2718 struct nfs_locku_opargs luargs;2719- int status = 0;27202721 down_read(&clp->cl_sem);2722 down(&state->lock_sema);2723- lsp = nfs4_find_lock_state(state, request->fl_owner);2724- if (!lsp)2725 goto out;02726 /* We might have lost the locks! */2727- if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) {2728- luargs.seqid = lsp->ls_seqid;2729- memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));2730- arg.u.locku = &luargs;2731- status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);2732- nfs4_increment_lock_seqid(status, lsp);2733- }27342735- if (status == 0) {2736 memcpy(&lsp->ls_stateid, &res.u.stateid, 2737 sizeof(lsp->ls_stateid));2738- nfs4_notify_unlck(state, request, lsp);2739- }2740- nfs4_put_lock_state(lsp);2741out:2742 up(&state->lock_sema);2743 if (status == 0)···2761{2762 struct inode *inode = state->inode;2763 struct nfs_server *server = NFS_SERVER(inode);2764- struct nfs4_lock_state *lsp;2765 struct nfs_lockargs arg = {2766 .fh = NFS_FH(inode),2767 .type = nfs4_lck_type(cmd, request),···2783 };2784 int status;27852786- lsp = nfs4_get_lock_state(state, request->fl_owner);2787- if (lsp == NULL)2788- return -ENOMEM;2789 if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) {2790 struct nfs4_state_owner *owner = state->owner;2791 struct nfs_open_to_lock otl = {···2804 * seqid mutating errors */2805 nfs4_increment_seqid(status, owner);2806 up(&owner->so_sema);00002807 } else {2808 struct nfs_exist_lock el = {2809 .seqid = lsp->ls_seqid,2810 };2811 memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid));2812 largs.u.exist_lock = ⪙2813- largs.new_lock_owner = 0;2814 arg.u.lock = &largs;2815 status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);002816 }2817- /* increment seqid on success, and * seqid mutating errors*/2818- nfs4_increment_lock_seqid(status, lsp);2819 /* save the returned stateid. */2820- if (status == 0) {2821 memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));2822- lsp->ls_flags |= NFS_LOCK_INITIALIZED;2823- if (!reclaim)2824- nfs4_notify_setlk(state, request, lsp);2825- } else if (status == -NFS4ERR_DENIED)2826 status = -EAGAIN;2827- nfs4_put_lock_state(lsp);2828 return status;2829}28302831static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)2832{2833- return _nfs4_do_setlk(state, F_SETLK, request, 1);00000000002834}28352836static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)2837{2838- return _nfs4_do_setlk(state, F_SETLK, request, 0);00000000002839}28402841static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)···28642865 down_read(&clp->cl_sem);2866 down(&state->lock_sema);2867- status = _nfs4_do_setlk(state, cmd, request, 0);002868 up(&state->lock_sema);2869 if (status == 0) {2870 /* Note: we always want to sleep here! */···2924 if (signalled())2925 break;2926 } while(status < 0);2927-2928 return status;000000000000000000000000000000000000000000002929}29302931struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = {···2981 .recover_lock = nfs4_lock_expired,2982};29830000000002984struct nfs_rpc_ops nfs_v4_clientops = {2985 .version = 4, /* protocol version */2986 .dentry_ops = &nfs4_dentry_operations,2987 .dir_inode_ops = &nfs4_dir_inode_operations,02988 .getroot = nfs4_proc_get_root,2989 .getattr = nfs4_proc_getattr,2990 .setattr = nfs4_proc_setattr,···3025 .file_open = nfs4_proc_file_open,3026 .file_release = nfs4_proc_file_release,3027 .lock = nfs4_proc_lock,03028};30293030/*
···48#include <linux/smp_lock.h>49#include <linux/namei.h>5051+#include "nfs4_fs.h"52#include "delegation.h"5354#define NFSDBG_FACILITY NFSDBG_PROC···61static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception);62extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);63extern struct rpc_procinfo nfs4_procedures[];006465/* Prevent leaks of NFSv4 errors into userland */66int nfs4_map_errors(int err)···104 | FATTR4_WORD1_SPACE_TOTAL105};106107+const u32 nfs4_pathconf_bitmap[2] = {108 FATTR4_WORD0_MAXLINK109 | FATTR4_WORD0_MAXNAME,110 0···124125 BUG_ON(readdir->count < 80);126 if (cookie > 2) {127+ readdir->cookie = cookie;128 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));129 return;130 }···270 int err;271 do {272 err = _nfs4_open_reclaim(sp, state);273+ if (err != -NFS4ERR_DELAY)274+ break;275+ nfs4_handle_exception(server, err, &exception);00000276 } while (exception.retry);277 return err;278}···509 goto out_nodeleg;510}511512+static inline int nfs4_do_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry)513+{514+ struct nfs_server *server = NFS_SERVER(dentry->d_inode);515+ struct nfs4_exception exception = { };516+ int err;517+518+ do {519+ err = _nfs4_open_expired(sp, state, dentry);520+ if (err == -NFS4ERR_DELAY)521+ nfs4_handle_exception(server, err, &exception);522+ } while (exception.retry);523+ return err;524+}525+526static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)527{528 struct nfs_inode *nfsi = NFS_I(state->inode);···521 continue;522 get_nfs_open_context(ctx);523 spin_unlock(&state->inode->i_lock);524+ status = nfs4_do_open_expired(sp, state, ctx->dentry);525 put_nfs_open_context(ctx);526 return status;527 }···748749 fattr->valid = 0;750751+ if (state != NULL) {752 msg.rpc_cred = state->owner->so_cred;753+ nfs4_copy_stateid(&arg.stateid, state, current->files);754+ } else0755 memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));756757 return rpc_call_sync(server->client, &msg, 0);···1116nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,1117 struct iattr *sattr)1118{1119+ struct rpc_cred *cred;1120+ struct inode *inode = dentry->d_inode;1121+ struct nfs4_state *state;01122 int status;11231124 fattr->valid = 0;11251126+ cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);1127+ if (IS_ERR(cred))1128+ return PTR_ERR(cred);1129+ /* Search for an existing WRITE delegation first */1130+ state = nfs4_open_delegated(inode, FMODE_WRITE, cred);1131+ if (!IS_ERR(state)) {1132+ /* NB: nfs4_open_delegated() bumps the inode->i_count */1133+ iput(inode);1134+ } else {1135+ /* Search for an existing open(O_WRITE) stateid */1136 state = nfs4_find_state(inode, cred, FMODE_WRITE);0000000000000000001137 }1138+1139 status = nfs4_do_setattr(NFS_SERVER(inode), fattr,1140 NFS_FH(inode), sattr, state);1141+ if (state != NULL)001142 nfs4_close_state(state, FMODE_WRITE);1143+ put_rpccred(cred);001144 return status;1145}1146···1731 };1732 int status;17331734+ dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __FUNCTION__,1735+ dentry->d_parent->d_name.name,1736+ dentry->d_name.name,1737+ (unsigned long long)cookie);1738 lock_kernel();1739 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);1740 res.pgbase = args.pgbase;···1738 if (status == 0)1739 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);1740 unlock_kernel();1741+ dprintk("%s: returns %d\n", __FUNCTION__, status);1742 return status;1743}1744···2163 return 0;2164}21652166+static inline int nfs4_server_supports_acls(struct nfs_server *server)2167+{2168+ return (server->caps & NFS_CAP_ACLS)2169+ && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)2170+ && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);2171+}2172+2173+/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that2174+ * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on2175+ * the stack.2176+ */2177+#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)2178+2179+static void buf_to_pages(const void *buf, size_t buflen,2180+ struct page **pages, unsigned int *pgbase)2181+{2182+ const void *p = buf;2183+2184+ *pgbase = offset_in_page(buf);2185+ p -= *pgbase;2186+ while (p < buf + buflen) {2187+ *(pages++) = virt_to_page(p);2188+ p += PAGE_CACHE_SIZE;2189+ }2190+}2191+2192+struct nfs4_cached_acl {2193+ int cached;2194+ size_t len;2195+ char data[0];2196+};2197+2198+static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)2199+{2200+ struct nfs_inode *nfsi = NFS_I(inode);2201+2202+ spin_lock(&inode->i_lock);2203+ kfree(nfsi->nfs4_acl);2204+ nfsi->nfs4_acl = acl;2205+ spin_unlock(&inode->i_lock);2206+}2207+2208+static void nfs4_zap_acl_attr(struct inode *inode)2209+{2210+ nfs4_set_cached_acl(inode, NULL);2211+}2212+2213+static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)2214+{2215+ struct nfs_inode *nfsi = NFS_I(inode);2216+ struct nfs4_cached_acl *acl;2217+ int ret = -ENOENT;2218+2219+ spin_lock(&inode->i_lock);2220+ acl = nfsi->nfs4_acl;2221+ if (acl == NULL)2222+ goto out;2223+ if (buf == NULL) /* user is just asking for length */2224+ goto out_len;2225+ if (acl->cached == 0)2226+ goto out;2227+ ret = -ERANGE; /* see getxattr(2) man page */2228+ if (acl->len > buflen)2229+ goto out;2230+ memcpy(buf, acl->data, acl->len);2231+out_len:2232+ ret = acl->len;2233+out:2234+ spin_unlock(&inode->i_lock);2235+ return ret;2236+}2237+2238+static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)2239+{2240+ struct nfs4_cached_acl *acl;2241+2242+ if (buf && acl_len <= PAGE_SIZE) {2243+ acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);2244+ if (acl == NULL)2245+ goto out;2246+ acl->cached = 1;2247+ memcpy(acl->data, buf, acl_len);2248+ } else {2249+ acl = kmalloc(sizeof(*acl), GFP_KERNEL);2250+ if (acl == NULL)2251+ goto out;2252+ acl->cached = 0;2253+ }2254+ acl->len = acl_len;2255+out:2256+ nfs4_set_cached_acl(inode, acl);2257+}2258+2259+static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)2260+{2261+ struct page *pages[NFS4ACL_MAXPAGES];2262+ struct nfs_getaclargs args = {2263+ .fh = NFS_FH(inode),2264+ .acl_pages = pages,2265+ .acl_len = buflen,2266+ };2267+ size_t resp_len = buflen;2268+ void *resp_buf;2269+ struct rpc_message msg = {2270+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],2271+ .rpc_argp = &args,2272+ .rpc_resp = &resp_len,2273+ };2274+ struct page *localpage = NULL;2275+ int ret;2276+2277+ if (buflen < PAGE_SIZE) {2278+ /* As long as we're doing a round trip to the server anyway,2279+ * let's be prepared for a page of acl data. */2280+ localpage = alloc_page(GFP_KERNEL);2281+ resp_buf = page_address(localpage);2282+ if (localpage == NULL)2283+ return -ENOMEM;2284+ args.acl_pages[0] = localpage;2285+ args.acl_pgbase = 0;2286+ args.acl_len = PAGE_SIZE;2287+ } else {2288+ resp_buf = buf;2289+ buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);2290+ }2291+ ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);2292+ if (ret)2293+ goto out_free;2294+ if (resp_len > args.acl_len)2295+ nfs4_write_cached_acl(inode, NULL, resp_len);2296+ else2297+ nfs4_write_cached_acl(inode, resp_buf, resp_len);2298+ if (buf) {2299+ ret = -ERANGE;2300+ if (resp_len > buflen)2301+ goto out_free;2302+ if (localpage)2303+ memcpy(buf, resp_buf, resp_len);2304+ }2305+ ret = resp_len;2306+out_free:2307+ if (localpage)2308+ __free_page(localpage);2309+ return ret;2310+}2311+2312+static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)2313+{2314+ struct nfs_server *server = NFS_SERVER(inode);2315+ int ret;2316+2317+ if (!nfs4_server_supports_acls(server))2318+ return -EOPNOTSUPP;2319+ ret = nfs_revalidate_inode(server, inode);2320+ if (ret < 0)2321+ return ret;2322+ ret = nfs4_read_cached_acl(inode, buf, buflen);2323+ if (ret != -ENOENT)2324+ return ret;2325+ return nfs4_get_acl_uncached(inode, buf, buflen);2326+}2327+2328+static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)2329+{2330+ struct nfs_server *server = NFS_SERVER(inode);2331+ struct page *pages[NFS4ACL_MAXPAGES];2332+ struct nfs_setaclargs arg = {2333+ .fh = NFS_FH(inode),2334+ .acl_pages = pages,2335+ .acl_len = buflen,2336+ };2337+ struct rpc_message msg = {2338+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],2339+ .rpc_argp = &arg,2340+ .rpc_resp = NULL,2341+ };2342+ int ret;2343+2344+ if (!nfs4_server_supports_acls(server))2345+ return -EOPNOTSUPP;2346+ buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);2347+ ret = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0);2348+ if (ret == 0)2349+ nfs4_write_cached_acl(inode, buf, buflen);2350+ return ret;2351+}2352+2353static int2354nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server)2355{···2448 down_read(&clp->cl_sem);2449 nlo.clientid = clp->cl_clientid;2450 down(&state->lock_sema);2451+ status = nfs4_set_lock_state(state, request);2452+ if (status != 0)2453+ goto out;2454+ lsp = request->fl_u.nfs4_fl.owner;2455+ nlo.id = lsp->ls_id; 0002456 arg.u.lockt = &nlo;2457 status = rpc_call_sync(server->client, &msg, 0);2458 if (!status) {···2476 request->fl_pid = 0;2477 status = 0;2478 }2479+out:02480 up(&state->lock_sema);2481 up_read(&clp->cl_sem);2482 return status;···2537 };2538 struct nfs4_lock_state *lsp;2539 struct nfs_locku_opargs luargs;2540+ int status;25412542 down_read(&clp->cl_sem);2543 down(&state->lock_sema);2544+ status = nfs4_set_lock_state(state, request);2545+ if (status != 0)2546 goto out;2547+ lsp = request->fl_u.nfs4_fl.owner;2548 /* We might have lost the locks! */2549+ if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0)2550+ goto out;2551+ luargs.seqid = lsp->ls_seqid;2552+ memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));2553+ arg.u.locku = &luargs;2554+ status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);2555+ nfs4_increment_lock_seqid(status, lsp);25562557+ if (status == 0)2558 memcpy(&lsp->ls_stateid, &res.u.stateid, 2559 sizeof(lsp->ls_stateid));0002560out:2561 up(&state->lock_sema);2562 if (status == 0)···2584{2585 struct inode *inode = state->inode;2586 struct nfs_server *server = NFS_SERVER(inode);2587+ struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;2588 struct nfs_lockargs arg = {2589 .fh = NFS_FH(inode),2590 .type = nfs4_lck_type(cmd, request),···2606 };2607 int status;26080002609 if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) {2610 struct nfs4_state_owner *owner = state->owner;2611 struct nfs_open_to_lock otl = {···2630 * seqid mutating errors */2631 nfs4_increment_seqid(status, owner);2632 up(&owner->so_sema);2633+ if (status == 0) {2634+ lsp->ls_flags |= NFS_LOCK_INITIALIZED;2635+ lsp->ls_seqid++;2636+ }2637 } else {2638 struct nfs_exist_lock el = {2639 .seqid = lsp->ls_seqid,2640 };2641 memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid));2642 largs.u.exist_lock = ⪙02643 arg.u.lock = &largs;2644 status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);2645+ /* increment seqid on success, and * seqid mutating errors*/2646+ nfs4_increment_lock_seqid(status, lsp);2647 }002648 /* save the returned stateid. */2649+ if (status == 0)2650 memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));2651+ else if (status == -NFS4ERR_DENIED)0002652 status = -EAGAIN;02653 return status;2654}26552656static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)2657{2658+ struct nfs_server *server = NFS_SERVER(state->inode);2659+ struct nfs4_exception exception = { };2660+ int err;2661+2662+ do {2663+ err = _nfs4_do_setlk(state, F_SETLK, request, 1);2664+ if (err != -NFS4ERR_DELAY)2665+ break;2666+ nfs4_handle_exception(server, err, &exception);2667+ } while (exception.retry);2668+ return err;2669}26702671static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)2672{2673+ struct nfs_server *server = NFS_SERVER(state->inode);2674+ struct nfs4_exception exception = { };2675+ int err;2676+2677+ do {2678+ err = _nfs4_do_setlk(state, F_SETLK, request, 0);2679+ if (err != -NFS4ERR_DELAY)2680+ break;2681+ nfs4_handle_exception(server, err, &exception);2682+ } while (exception.retry);2683+ return err;2684}26852686static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)···26712672 down_read(&clp->cl_sem);2673 down(&state->lock_sema);2674+ status = nfs4_set_lock_state(state, request);2675+ if (status == 0)2676+ status = _nfs4_do_setlk(state, cmd, request, 0);2677 up(&state->lock_sema);2678 if (status == 0) {2679 /* Note: we always want to sleep here! */···2729 if (signalled())2730 break;2731 } while(status < 0);02732 return status;2733+}2734+2735+2736+#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"2737+2738+int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,2739+ size_t buflen, int flags)2740+{2741+ struct inode *inode = dentry->d_inode;2742+2743+ if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)2744+ return -EOPNOTSUPP;2745+2746+ if (!S_ISREG(inode->i_mode) &&2747+ (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))2748+ return -EPERM;2749+2750+ return nfs4_proc_set_acl(inode, buf, buflen);2751+}2752+2753+/* The getxattr man page suggests returning -ENODATA for unknown attributes,2754+ * and that's what we'll do for e.g. user attributes that haven't been set.2755+ * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported2756+ * attributes in kernel-managed attribute namespaces. */2757+ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf,2758+ size_t buflen)2759+{2760+ struct inode *inode = dentry->d_inode;2761+2762+ if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)2763+ return -EOPNOTSUPP;2764+2765+ return nfs4_proc_get_acl(inode, buf, buflen);2766+}2767+2768+ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen)2769+{2770+ size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1;2771+2772+ if (buf && buflen < len)2773+ return -ERANGE;2774+ if (buf)2775+ memcpy(buf, XATTR_NAME_NFSV4_ACL, len);2776+ return len;2777}27782779struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = {···2743 .recover_lock = nfs4_lock_expired,2744};27452746+static struct inode_operations nfs4_file_inode_operations = {2747+ .permission = nfs_permission,2748+ .getattr = nfs_getattr,2749+ .setattr = nfs_setattr,2750+ .getxattr = nfs4_getxattr,2751+ .setxattr = nfs4_setxattr,2752+ .listxattr = nfs4_listxattr,2753+};2754+2755struct nfs_rpc_ops nfs_v4_clientops = {2756 .version = 4, /* protocol version */2757 .dentry_ops = &nfs4_dentry_operations,2758 .dir_inode_ops = &nfs4_dir_inode_operations,2759+ .file_inode_ops = &nfs4_file_inode_operations,2760 .getroot = nfs4_proc_get_root,2761 .getattr = nfs4_proc_getattr,2762 .setattr = nfs4_proc_setattr,···2777 .file_open = nfs4_proc_file_open,2778 .file_release = nfs4_proc_file_release,2779 .lock = nfs4_proc_lock,2780+ .clear_acl_cache = nfs4_zap_acl_attr,2781};27822783/*
···107 smp_mb__before_clear_bit();108 clear_bit(PG_BUSY, &req->wb_flags);109 smp_mb__after_clear_bit();110- wake_up_all(&req->wb_context->waitq);111 nfs_release_request(req);000000000000000000000000000112}113114/**···177 nfs_page_free(req);178}179180-/**181- * nfs_list_add_request - Insert a request into a sorted list182- * @req: request183- * @head: head of list into which to insert the request.184- *185- * Note that the wb_list is sorted by page index in order to facilitate186- * coalescing of requests.187- * We use an insertion sort that is optimized for the case of appended188- * writes.189- */190-void191-nfs_list_add_request(struct nfs_page *req, struct list_head *head)192{193- struct list_head *pos;194195-#ifdef NFS_PARANOIA196- if (!list_empty(&req->wb_list)) {197- printk(KERN_ERR "NFS: Add to list failed!\n");198- BUG();199- }200-#endif201- list_for_each_prev(pos, head) {202- struct nfs_page *p = nfs_list_entry(pos);203- if (p->wb_index < req->wb_index)204- break;205- }206- list_add(&req->wb_list, pos);207- req->wb_list_head = head;208}209210/**···198int199nfs_wait_on_request(struct nfs_page *req)200{201- struct inode *inode = req->wb_context->dentry->d_inode;202- struct rpc_clnt *clnt = NFS_CLIENT(inode);0203204- if (!NFS_WBACK_BUSY(req))205- return 0;206- return nfs_wait_event(clnt, req->wb_context->waitq, !NFS_WBACK_BUSY(req));000000000207}208209/**···261 return npages;262}26300000000000000000000000000000000000000000000000000000000264/**265 * nfs_scan_list - Scan a list for matching requests266 * @head: One of the NFS inode request lists···354 if (req->wb_index > idx_end)355 break;356357- if (!nfs_lock_request(req))358 continue;359 nfs_list_remove_request(req);360 nfs_list_add_request(req, dst);
···107 smp_mb__before_clear_bit();108 clear_bit(PG_BUSY, &req->wb_flags);109 smp_mb__after_clear_bit();110+ wake_up_bit(&req->wb_flags, PG_BUSY);111 nfs_release_request(req);112+}113+114+/**115+ * nfs_set_page_writeback_locked - Lock a request for writeback116+ * @req:117+ */118+int nfs_set_page_writeback_locked(struct nfs_page *req)119+{120+ struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);121+122+ if (!nfs_lock_request(req))123+ return 0;124+ radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);125+ return 1;126+}127+128+/**129+ * nfs_clear_page_writeback - Unlock request and wake up sleepers130+ */131+void nfs_clear_page_writeback(struct nfs_page *req)132+{133+ struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);134+135+ spin_lock(&nfsi->req_lock);136+ radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);137+ spin_unlock(&nfsi->req_lock);138+ nfs_unlock_request(req);139}140141/**···150 nfs_page_free(req);151}152153+static int nfs_wait_bit_interruptible(void *word)00000000000154{155+ int ret = 0;156157+ if (signal_pending(current))158+ ret = -ERESTARTSYS;159+ else160+ schedule();161+ return ret;00000000162}163164/**···190int191nfs_wait_on_request(struct nfs_page *req)192{193+ struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode);194+ sigset_t oldmask;195+ int ret = 0;196197+ if (!test_bit(PG_BUSY, &req->wb_flags))198+ goto out;199+ /*200+ * Note: the call to rpc_clnt_sigmask() suffices to ensure that we201+ * are not interrupted if intr flag is not set202+ */203+ rpc_clnt_sigmask(clnt, &oldmask);204+ ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,205+ nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);206+ rpc_clnt_sigunmask(clnt, &oldmask);207+out:208+ return ret;209}210211/**···243 return npages;244}245246+#define NFS_SCAN_MAXENTRIES 16247+/**248+ * nfs_scan_lock_dirty - Scan the radix tree for dirty requests249+ * @nfsi: NFS inode250+ * @dst: Destination list251+ * @idx_start: lower bound of page->index to scan252+ * @npages: idx_start + npages sets the upper bound to scan.253+ *254+ * Moves elements from one of the inode request lists.255+ * If the number of requests is set to 0, the entire address_space256+ * starting at index idx_start, is scanned.257+ * The requests are *not* checked to ensure that they form a contiguous set.258+ * You must be holding the inode's req_lock when calling this function259+ */260+int261+nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,262+ unsigned long idx_start, unsigned int npages)263+{264+ struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];265+ struct nfs_page *req;266+ unsigned long idx_end;267+ int found, i;268+ int res;269+270+ res = 0;271+ if (npages == 0)272+ idx_end = ~0;273+ else274+ idx_end = idx_start + npages - 1;275+276+ for (;;) {277+ found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,278+ (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES,279+ NFS_PAGE_TAG_DIRTY);280+ if (found <= 0)281+ break;282+ for (i = 0; i < found; i++) {283+ req = pgvec[i];284+ if (req->wb_index > idx_end)285+ goto out;286+287+ idx_start = req->wb_index + 1;288+289+ if (nfs_set_page_writeback_locked(req)) {290+ radix_tree_tag_clear(&nfsi->nfs_page_tree,291+ req->wb_index, NFS_PAGE_TAG_DIRTY);292+ nfs_list_remove_request(req);293+ nfs_list_add_request(req, dst);294+ res++;295+ }296+ }297+ }298+out:299+ return res;300+}301+302/**303 * nfs_scan_list - Scan a list for matching requests304 * @head: One of the NFS inode request lists···280 if (req->wb_index > idx_end)281 break;282283+ if (!nfs_set_page_writeback_locked(req))284 continue;285 nfs_list_remove_request(req);286 nfs_list_add_request(req, dst);
···220 ClearPageError(page);221222io_error:223- nfs_end_data_update_defer(inode);224 nfs_writedata_free(wdata);225 return written ? written : result;226}···352 if (err < 0)353 goto out;354 }355- err = nfs_commit_inode(inode, 0, 0, wb_priority(wbc));356 if (err > 0) {357 wbc->nr_to_write -= err;358 err = 0;···401 nfsi->npages--;402 if (!nfsi->npages) {403 spin_unlock(&nfsi->req_lock);404- nfs_end_data_update_defer(inode);405 iput(inode);406 } else407 spin_unlock(&nfsi->req_lock);···446 struct nfs_inode *nfsi = NFS_I(inode);447448 spin_lock(&nfsi->req_lock);00449 nfs_list_add_request(req, &nfsi->dirty);450 nfsi->ndirty++;451 spin_unlock(&nfsi->req_lock);···505506 spin_lock(&nfsi->req_lock);507 next = idx_start;508- while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) {509 if (req->wb_index > idx_end)510 break;511512 next = req->wb_index + 1;513- if (!NFS_WBACK_BUSY(req))514- continue;515516 atomic_inc(&req->wb_count);517 spin_unlock(&nfsi->req_lock);···539nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)540{541 struct nfs_inode *nfsi = NFS_I(inode);542- int res;543- res = nfs_scan_list(&nfsi->dirty, dst, idx_start, npages);544- nfsi->ndirty -= res;545- sub_page_state(nr_dirty,res);546- if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))547- printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");000548 return res;549}550···566nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)567{568 struct nfs_inode *nfsi = NFS_I(inode);569- int res;570- res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages);571- nfsi->ncommit -= res;572- if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))573- printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");000574 return res;575}576#endif···757 * is entirely in cache, it may be more efficient to avoid758 * fragmenting write requests.759 */760- if (PageUptodate(page) && inode->i_flock == NULL) {761 loff_t end_offs = i_size_read(inode) - 1;762 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;763···828#else829 nfs_inode_remove_request(req);830#endif831- nfs_unlock_request(req);832}833834static inline int flush_task_priority(int how)···959 nfs_writedata_free(data);960 }961 nfs_mark_request_dirty(req);962- nfs_unlock_request(req);963 return -ENOMEM;964}965···1009 struct nfs_page *req = nfs_list_entry(head->next);1010 nfs_list_remove_request(req);1011 nfs_mark_request_dirty(req);1012- nfs_unlock_request(req);1013 }1014 return -ENOMEM;1015}···1036 req = nfs_list_entry(head->next);1037 nfs_list_remove_request(req);1038 nfs_mark_request_dirty(req);1039- nfs_unlock_request(req);1040 }1041 return error;1042}···1128 nfs_inode_remove_request(req);1129#endif1130 next:1131- nfs_unlock_request(req);1132 }1133}1134···1217 struct nfs_write_data *data, int how)1218{1219 struct rpc_task *task = &data->task;1220- struct nfs_page *first, *last;1221 struct inode *inode;1222- loff_t start, end, len;12231224 /* Set up the RPC argument and reply structs1225 * NB: take care not to mess about with data->commit et al. */12261227 list_splice_init(head, &data->pages);1228 first = nfs_list_entry(data->pages.next);1229- last = nfs_list_entry(data->pages.prev);1230 inode = first->wb_context->dentry->d_inode;1231-1232- /*1233- * Determine the offset range of requests in the COMMIT call.1234- * We rely on the fact that data->pages is an ordered list...1235- */1236- start = req_offset(first);1237- end = req_offset(last) + last->wb_bytes;1238- len = end - start;1239- /* If 'len' is not a 32-bit quantity, pass '0' in the COMMIT call */1240- if (end >= i_size_read(inode) || len < 0 || len > (~((u32)0) >> 1))1241- len = 0;12421243 data->inode = inode;1244 data->cred = first->wb_context->cred;12451246 data->args.fh = NFS_FH(data->inode);1247- data->args.offset = start;1248- data->args.count = len;1249- data->res.count = len;01250 data->res.fattr = &data->fattr;1251 data->res.verf = &data->verf;1252···1273 req = nfs_list_entry(head->next);1274 nfs_list_remove_request(req);1275 nfs_mark_request_commit(req);1276- nfs_unlock_request(req);1277 }1278 return -ENOMEM;1279}···1319 dprintk(" mismatch\n");1320 nfs_mark_request_dirty(req);1321 next:1322- nfs_unlock_request(req);1323 res++;1324 }1325 sub_page_state(nr_unstable,res);···1337 spin_lock(&nfsi->req_lock);1338 res = nfs_scan_dirty(inode, &head, idx_start, npages);1339 spin_unlock(&nfsi->req_lock);1340- if (res)1341- error = nfs_flush_list(&head, NFS_SERVER(inode)->wpages, how);000000001342 if (error < 0)1343 return error;1344 return res;1345}13461347#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)1348-int nfs_commit_inode(struct inode *inode, unsigned long idx_start,1349- unsigned int npages, int how)1350{1351 struct nfs_inode *nfsi = NFS_I(inode);1352 LIST_HEAD(head);···1361 error = 0;13621363 spin_lock(&nfsi->req_lock);1364- res = nfs_scan_commit(inode, &head, idx_start, npages);01365 if (res) {1366- res += nfs_scan_commit(inode, &head, 0, 0);1367- spin_unlock(&nfsi->req_lock);1368 error = nfs_commit_list(&head, how);1369- } else1370- spin_unlock(&nfsi->req_lock);1371- if (error < 0)1372- return error;1373 return res;1374}1375#endif···1389 error = nfs_flush_inode(inode, idx_start, npages, how);1390#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)1391 if (error == 0)1392- error = nfs_commit_inode(inode, idx_start, npages, how);1393#endif1394 } while (error > 0);1395 return error;
···220 ClearPageError(page);221222io_error:223+ nfs_end_data_update(inode);224 nfs_writedata_free(wdata);225 return written ? written : result;226}···352 if (err < 0)353 goto out;354 }355+ err = nfs_commit_inode(inode, wb_priority(wbc));356 if (err > 0) {357 wbc->nr_to_write -= err;358 err = 0;···401 nfsi->npages--;402 if (!nfsi->npages) {403 spin_unlock(&nfsi->req_lock);404+ nfs_end_data_update(inode);405 iput(inode);406 } else407 spin_unlock(&nfsi->req_lock);···446 struct nfs_inode *nfsi = NFS_I(inode);447448 spin_lock(&nfsi->req_lock);449+ radix_tree_tag_set(&nfsi->nfs_page_tree,450+ req->wb_index, NFS_PAGE_TAG_DIRTY);451 nfs_list_add_request(req, &nfsi->dirty);452 nfsi->ndirty++;453 spin_unlock(&nfsi->req_lock);···503504 spin_lock(&nfsi->req_lock);505 next = idx_start;506+ while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {507 if (req->wb_index > idx_end)508 break;509510 next = req->wb_index + 1;511+ BUG_ON(!NFS_WBACK_BUSY(req));0512513 atomic_inc(&req->wb_count);514 spin_unlock(&nfsi->req_lock);···538nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)539{540 struct nfs_inode *nfsi = NFS_I(inode);541+ int res = 0;542+543+ if (nfsi->ndirty != 0) {544+ res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);545+ nfsi->ndirty -= res;546+ sub_page_state(nr_dirty,res);547+ if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))548+ printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");549+ }550 return res;551}552···562nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)563{564 struct nfs_inode *nfsi = NFS_I(inode);565+ int res = 0;566+567+ if (nfsi->ncommit != 0) {568+ res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages);569+ nfsi->ncommit -= res;570+ if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))571+ printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");572+ }573 return res;574}575#endif···750 * is entirely in cache, it may be more efficient to avoid751 * fragmenting write requests.752 */753+ if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {754 loff_t end_offs = i_size_read(inode) - 1;755 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;756···821#else822 nfs_inode_remove_request(req);823#endif824+ nfs_clear_page_writeback(req);825}826827static inline int flush_task_priority(int how)···952 nfs_writedata_free(data);953 }954 nfs_mark_request_dirty(req);955+ nfs_clear_page_writeback(req);956 return -ENOMEM;957}958···1002 struct nfs_page *req = nfs_list_entry(head->next);1003 nfs_list_remove_request(req);1004 nfs_mark_request_dirty(req);1005+ nfs_clear_page_writeback(req);1006 }1007 return -ENOMEM;1008}···1029 req = nfs_list_entry(head->next);1030 nfs_list_remove_request(req);1031 nfs_mark_request_dirty(req);1032+ nfs_clear_page_writeback(req);1033 }1034 return error;1035}···1121 nfs_inode_remove_request(req);1122#endif1123 next:1124+ nfs_clear_page_writeback(req);1125 }1126}1127···1210 struct nfs_write_data *data, int how)1211{1212 struct rpc_task *task = &data->task;1213+ struct nfs_page *first;1214 struct inode *inode;012151216 /* Set up the RPC argument and reply structs1217 * NB: take care not to mess about with data->commit et al. */12181219 list_splice_init(head, &data->pages);1220 first = nfs_list_entry(data->pages.next);01221 inode = first->wb_context->dentry->d_inode;0000000000012221223 data->inode = inode;1224 data->cred = first->wb_context->cred;12251226 data->args.fh = NFS_FH(data->inode);1227+ /* Note: we always request a commit of the entire inode */1228+ data->args.offset = 0;1229+ data->args.count = 0;1230+ data->res.count = 0;1231 data->res.fattr = &data->fattr;1232 data->res.verf = &data->verf;1233···1278 req = nfs_list_entry(head->next);1279 nfs_list_remove_request(req);1280 nfs_mark_request_commit(req);1281+ nfs_clear_page_writeback(req);1282 }1283 return -ENOMEM;1284}···1324 dprintk(" mismatch\n");1325 nfs_mark_request_dirty(req);1326 next:1327+ nfs_clear_page_writeback(req);1328 res++;1329 }1330 sub_page_state(nr_unstable,res);···1342 spin_lock(&nfsi->req_lock);1343 res = nfs_scan_dirty(inode, &head, idx_start, npages);1344 spin_unlock(&nfsi->req_lock);1345+ if (res) {1346+ struct nfs_server *server = NFS_SERVER(inode);1347+1348+ /* For single writes, FLUSH_STABLE is more efficient */1349+ if (res == nfsi->npages && nfsi->npages <= server->wpages) {1350+ if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize)1351+ how |= FLUSH_STABLE;1352+ }1353+ error = nfs_flush_list(&head, server->wpages, how);1354+ }1355 if (error < 0)1356 return error;1357 return res;1358}13591360#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)1361+int nfs_commit_inode(struct inode *inode, int how)01362{1363 struct nfs_inode *nfsi = NFS_I(inode);1364 LIST_HEAD(head);···1359 error = 0;13601361 spin_lock(&nfsi->req_lock);1362+ res = nfs_scan_commit(inode, &head, 0, 0);1363+ spin_unlock(&nfsi->req_lock);1364 if (res) {001365 error = nfs_commit_list(&head, how);1366+ if (error < 0)1367+ return error;1368+ }01369 return res;1370}1371#endif···1389 error = nfs_flush_inode(inode, idx_start, npages, how);1390#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)1391 if (error == 0)1392+ error = nfs_commit_inode(inode, how);1393#endif1394 } while (error > 0);1395 return error;
+7
fs/nfs_common/Makefile
···0000000
···1+#2+# Makefile for Linux filesystem routines that are shared by client and server.3+#4+5+obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o6+7+nfs_acl-objs := nfsacl.o
···185 return vec->iov_len <= PAGE_SIZE;186}18700000000000188static inline int svc_take_page(struct svc_rqst *rqstp)189{190 if (rqstp->rq_arghi <= rqstp->rq_argused)···251};252253/*254- * RPC program255 */256struct svc_program {0257 u32 pg_prog; /* program number */258 unsigned int pg_lovers; /* lowest version */259 unsigned int pg_hivers; /* lowest version */
···185 return vec->iov_len <= PAGE_SIZE;186}187188+static inline struct page *189+svc_take_res_page(struct svc_rqst *rqstp)190+{191+ if (rqstp->rq_arghi <= rqstp->rq_argused)192+ return NULL;193+ rqstp->rq_arghi--;194+ rqstp->rq_respages[rqstp->rq_resused] =195+ rqstp->rq_argpages[rqstp->rq_arghi];196+ return rqstp->rq_respages[rqstp->rq_resused++];197+}198+199static inline int svc_take_page(struct svc_rqst *rqstp)200{201 if (rqstp->rq_arghi <= rqstp->rq_argused)···240};241242/*243+ * List of RPC programs on the same transport endpoint244 */245struct svc_program {246+ struct svc_program * pg_next; /* other programs (same xprt) */247 u32 pg_prog; /* program number */248 unsigned int pg_lovers; /* lowest version */249 unsigned int pg_hivers; /* lowest version */
+19-2
include/linux/sunrpc/xdr.h
···146extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);147extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int);148extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int);149-extern int read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len);0150151/*152 * Helper structure for copying from an sk_buff.···161162typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);163164-extern void xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,165 skb_reader_t *, skb_read_actor_t);166167struct socket;168struct sockaddr;169extern int xdr_sendpages(struct socket *, struct sockaddr *, int,170 struct xdr_buf *, unsigned int, int);0000000000000000171172/*173 * Provide some simple tools for XDR buffer overflow-checking etc.
···146extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);147extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int);148extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int);149+extern int read_bytes_from_xdr_buf(struct xdr_buf *, int, void *, int);150+extern int write_bytes_to_xdr_buf(struct xdr_buf *, int, void *, int);151152/*153 * Helper structure for copying from an sk_buff.···160161typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);162163+extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,164 skb_reader_t *, skb_read_actor_t);165166struct socket;167struct sockaddr;168extern int xdr_sendpages(struct socket *, struct sockaddr *, int,169 struct xdr_buf *, unsigned int, int);170+171+extern int xdr_encode_word(struct xdr_buf *, int, u32);172+extern int xdr_decode_word(struct xdr_buf *, int, u32 *);173+174+struct xdr_array2_desc;175+typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem);176+struct xdr_array2_desc {177+ unsigned int elem_size;178+ unsigned int array_len;179+ xdr_xcode_elem_t xcode;180+};181+182+extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,183+ struct xdr_array2_desc *desc);184+extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,185+ struct xdr_array2_desc *desc);186187/*188 * Provide some simple tools for XDR buffer overflow-checking etc.
+3-3
net/sunrpc/auth.c
···66 u32 flavor = pseudoflavor_to_flavor(pseudoflavor);6768 if (flavor >= RPC_AUTH_MAXFLAVOR || !(ops = auth_flavors[flavor]))69- return NULL;70 auth = ops->create(clnt, pseudoflavor);71- if (!auth)72- return NULL;73 if (clnt->cl_auth)74 rpcauth_destroy(clnt->cl_auth);75 clnt->cl_auth = auth;
···66 u32 flavor = pseudoflavor_to_flavor(pseudoflavor);6768 if (flavor >= RPC_AUTH_MAXFLAVOR || !(ops = auth_flavors[flavor]))69+ return ERR_PTR(-EINVAL);70 auth = ops->create(clnt, pseudoflavor);71+ if (IS_ERR(auth))72+ return auth;73 if (clnt->cl_auth)74 rpcauth_destroy(clnt->cl_auth);75 clnt->cl_auth = auth;
+11-7
net/sunrpc/auth_gss/auth_gss.c
···660{661 struct gss_auth *gss_auth;662 struct rpc_auth * auth;0663664 dprintk("RPC: creating GSS authenticator for client %p\n",clnt);665666 if (!try_module_get(THIS_MODULE))667- return NULL;668 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))669 goto out_dec;670 gss_auth->client = clnt;0671 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);672 if (!gss_auth->mech) {673 printk(KERN_WARNING "%s: Pseudoflavor %d not found!",···677 goto err_free;678 }679 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);680- /* FIXME: Will go away once privacy support is merged in */681- if (gss_auth->service == RPC_GSS_SVC_PRIVACY)682- gss_auth->service = RPC_GSS_SVC_INTEGRITY;683 INIT_LIST_HEAD(&gss_auth->upcalls);684 spin_lock_init(&gss_auth->lock);685 auth = &gss_auth->rpc_auth;···688 auth->au_flavor = flavor;689 atomic_set(&auth->au_count, 1);690691- if (rpcauth_init_credcache(auth, GSS_CRED_EXPIRE) < 0)0692 goto err_put_mech;693694 snprintf(gss_auth->path, sizeof(gss_auth->path), "%s/%s",695 clnt->cl_pathname,696 gss_auth->mech->gm_name);697 gss_auth->dentry = rpc_mkpipe(gss_auth->path, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);698- if (IS_ERR(gss_auth->dentry))0699 goto err_put_mech;0700701 return auth;702err_put_mech:···708 kfree(gss_auth);709out_dec:710 module_put(THIS_MODULE);711- return NULL;712}713714static void
···290 return;291 }292 } else293- wake_up(&task->u.tk_wait.waitq);294}295296/*···555}556557/*00000000000000000000000000000000558 * This is the RPC `scheduler' (or rather, the finite state machine).559 */560static int __rpc_execute(struct rpc_task *task)···598599 BUG_ON(RPC_IS_QUEUED(task));600601- restarted:602- while (1) {603 /*604 * Garbage collection of pending timers...605 */···631 * by someone else.632 */633 if (!RPC_IS_QUEUED(task)) {634- if (!task->tk_action)0000635 break;636- lock_kernel();637- task->tk_action(task);638- unlock_kernel();639 }640641 /*···656657 /* sync task: sleep here */658 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);659- if (RPC_TASK_UNINTERRUPTIBLE(task)) {660- __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task));661- } else {662- __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status);0663 /*664 * When a sync task receives a signal, it exits with665 * -ERESTARTSYS. In order to catch any callbacks that666 * clean up after sleeping on some queue, we don't667 * break the loop here, but go around once more.668 */669- if (status == -ERESTARTSYS) {670- dprintk("RPC: %4d got signal\n", task->tk_pid);671- task->tk_flags |= RPC_TASK_KILLED;672- rpc_exit(task, -ERESTARTSYS);673- rpc_wake_up_task(task);674- }675 }676 rpc_set_running(task);677 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);678- }679-680- if (task->tk_exit) {681- lock_kernel();682- task->tk_exit(task);683- unlock_kernel();684- /* If tk_action is non-null, the user wants us to restart */685- if (task->tk_action) {686- if (!RPC_ASSASSINATED(task)) {687- /* Release RPC slot and buffer memory */688- if (task->tk_rqstp)689- xprt_release(task);690- rpc_free(task);691- goto restarted;692- }693- printk(KERN_ERR "RPC: dead task tries to walk away.\n");694- }695 }696697 dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);···773774 /* Initialize workqueue for async tasks */775 task->tk_workqueue = rpciod_workqueue;776- if (!RPC_IS_ASYNC(task))777- init_waitqueue_head(&task->u.tk_wait.waitq);778779 if (clnt) {780 atomic_inc(&clnt->cl_users);
···290 return;291 }292 } else293+ wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);294}295296/*···555}556557/*558+ * Helper that calls task->tk_exit if it exists and then returns559+ * true if we should exit __rpc_execute.560+ */561+static inline int __rpc_do_exit(struct rpc_task *task)562+{563+ if (task->tk_exit != NULL) {564+ lock_kernel();565+ task->tk_exit(task);566+ unlock_kernel();567+ /* If tk_action is non-null, we should restart the call */568+ if (task->tk_action != NULL) {569+ if (!RPC_ASSASSINATED(task)) {570+ /* Release RPC slot and buffer memory */571+ xprt_release(task);572+ rpc_free(task);573+ return 0;574+ }575+ printk(KERN_ERR "RPC: dead task tried to walk away.\n");576+ }577+ }578+ return 1;579+}580+581+static int rpc_wait_bit_interruptible(void *word)582+{583+ if (signal_pending(current))584+ return -ERESTARTSYS;585+ schedule();586+ return 0;587+}588+589+/*590 * This is the RPC `scheduler' (or rather, the finite state machine).591 */592static int __rpc_execute(struct rpc_task *task)···566567 BUG_ON(RPC_IS_QUEUED(task));568569+ for (;;) {0570 /*571 * Garbage collection of pending timers...572 */···600 * by someone else.601 */602 if (!RPC_IS_QUEUED(task)) {603+ if (task->tk_action != NULL) {604+ lock_kernel();605+ task->tk_action(task);606+ unlock_kernel();607+ } else if (__rpc_do_exit(task))608 break;000609 }610611 /*···624625 /* sync task: sleep here */626 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);627+ /* Note: Caller should be using rpc_clnt_sigmask() */628+ status = out_of_line_wait_on_bit(&task->tk_runstate,629+ RPC_TASK_QUEUED, rpc_wait_bit_interruptible,630+ TASK_INTERRUPTIBLE);631+ if (status == -ERESTARTSYS) {632 /*633 * When a sync task receives a signal, it exits with634 * -ERESTARTSYS. In order to catch any callbacks that635 * clean up after sleeping on some queue, we don't636 * break the loop here, but go around once more.637 */638+ dprintk("RPC: %4d got signal\n", task->tk_pid);639+ task->tk_flags |= RPC_TASK_KILLED;640+ rpc_exit(task, -ERESTARTSYS);641+ rpc_wake_up_task(task);00642 }643 rpc_set_running(task);644 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);00000000000000000645 }646647 dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);···759760 /* Initialize workqueue for async tasks */761 task->tk_workqueue = rpciod_workqueue;00762763 if (clnt) {764 atomic_inc(&clnt->cl_users);
+5-1
net/sunrpc/sunrpc_syms.c
···42/* RPC client functions */43EXPORT_SYMBOL(rpc_create_client);44EXPORT_SYMBOL(rpc_clone_client);045EXPORT_SYMBOL(rpc_destroy_client);46EXPORT_SYMBOL(rpc_shutdown_client);47EXPORT_SYMBOL(rpc_release_client);···6263/* Client transport */64EXPORT_SYMBOL(xprt_create_proto);65-EXPORT_SYMBOL(xprt_destroy);66EXPORT_SYMBOL(xprt_set_timeout);67EXPORT_SYMBOL(xprt_udp_slot_table_entries);68EXPORT_SYMBOL(xprt_tcp_slot_table_entries);···129EXPORT_SYMBOL(xdr_encode_pages);130EXPORT_SYMBOL(xdr_inline_pages);131EXPORT_SYMBOL(xdr_shift_buf);0000132EXPORT_SYMBOL(xdr_buf_from_iov);133EXPORT_SYMBOL(xdr_buf_subsegment);134EXPORT_SYMBOL(xdr_buf_read_netobj);
···42/* RPC client functions */43EXPORT_SYMBOL(rpc_create_client);44EXPORT_SYMBOL(rpc_clone_client);45+EXPORT_SYMBOL(rpc_bind_new_program);46EXPORT_SYMBOL(rpc_destroy_client);47EXPORT_SYMBOL(rpc_shutdown_client);48EXPORT_SYMBOL(rpc_release_client);···6162/* Client transport */63EXPORT_SYMBOL(xprt_create_proto);064EXPORT_SYMBOL(xprt_set_timeout);65EXPORT_SYMBOL(xprt_udp_slot_table_entries);66EXPORT_SYMBOL(xprt_tcp_slot_table_entries);···129EXPORT_SYMBOL(xdr_encode_pages);130EXPORT_SYMBOL(xdr_inline_pages);131EXPORT_SYMBOL(xdr_shift_buf);132+EXPORT_SYMBOL(xdr_encode_word);133+EXPORT_SYMBOL(xdr_decode_word);134+EXPORT_SYMBOL(xdr_encode_array2);135+EXPORT_SYMBOL(xdr_decode_array2);136EXPORT_SYMBOL(xdr_buf_from_iov);137EXPORT_SYMBOL(xdr_buf_subsegment);138EXPORT_SYMBOL(xdr_buf_read_netobj);
+19-17
net/sunrpc/svc.c
···35 if (!(serv = (struct svc_serv *) kmalloc(sizeof(*serv), GFP_KERNEL)))36 return NULL;37 memset(serv, 0, sizeof(*serv));038 serv->sv_program = prog;39 serv->sv_nrthreads = 1;40 serv->sv_stats = prog->pg_stats;41 serv->sv_bufsz = bufsize? bufsize : 4096;42- prog->pg_lovers = prog->pg_nvers-1;43 xdrsize = 0;44- for (vers=0; vers<prog->pg_nvers ; vers++)45- if (prog->pg_vers[vers]) {46- prog->pg_hivers = vers;47- if (prog->pg_lovers > vers)48- prog->pg_lovers = vers;49- if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)50- xdrsize = prog->pg_vers[vers]->vs_xdrsize;51- }000052 serv->sv_xdrsize = xdrsize;53 INIT_LIST_HEAD(&serv->sv_threads);54 INIT_LIST_HEAD(&serv->sv_sockets);55 INIT_LIST_HEAD(&serv->sv_tempsocks);56 INIT_LIST_HEAD(&serv->sv_permsocks);57 spin_lock_init(&serv->sv_lock);58-59- serv->sv_name = prog->pg_name;6061 /* Remove any stale portmap registrations */62 svc_register(serv, 0, 0);···283 rqstp->rq_res.len = 0;284 rqstp->rq_res.page_base = 0;285 rqstp->rq_res.page_len = 0;0286 rqstp->rq_res.tail[0].iov_len = 0;287 /* tcp needs a space for the record length... */288 if (rqstp->rq_prot == IPPROTO_TCP)···341 goto sendit;342 }343344- if (prog != progp->pg_prog)000345 goto err_bad_prog;346347 if (vers >= progp->pg_nvers ||···457 goto sendit;458459err_bad_prog:460-#ifdef RPC_PARANOIA461- if (prog != 100227 || progp->pg_prog != 100003)462- printk("svc: unknown program %d (me %d)\n", prog, progp->pg_prog);463- /* else it is just a Solaris client seeing if ACLs are supported */464-#endif465 serv->sv_stats->rpcbadfmt++;466 svc_putu32(resv, rpc_prog_unavail);467 goto sendit;
···35 if (!(serv = (struct svc_serv *) kmalloc(sizeof(*serv), GFP_KERNEL)))36 return NULL;37 memset(serv, 0, sizeof(*serv));38+ serv->sv_name = prog->pg_name;39 serv->sv_program = prog;40 serv->sv_nrthreads = 1;41 serv->sv_stats = prog->pg_stats;42 serv->sv_bufsz = bufsize? bufsize : 4096;043 xdrsize = 0;44+ while (prog) {45+ prog->pg_lovers = prog->pg_nvers-1;46+ for (vers=0; vers<prog->pg_nvers ; vers++)47+ if (prog->pg_vers[vers]) {48+ prog->pg_hivers = vers;49+ if (prog->pg_lovers > vers)50+ prog->pg_lovers = vers;51+ if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)52+ xdrsize = prog->pg_vers[vers]->vs_xdrsize;53+ }54+ prog = prog->pg_next;55+ }56 serv->sv_xdrsize = xdrsize;57 INIT_LIST_HEAD(&serv->sv_threads);58 INIT_LIST_HEAD(&serv->sv_sockets);59 INIT_LIST_HEAD(&serv->sv_tempsocks);60 INIT_LIST_HEAD(&serv->sv_permsocks);61 spin_lock_init(&serv->sv_lock);006263 /* Remove any stale portmap registrations */64 svc_register(serv, 0, 0);···281 rqstp->rq_res.len = 0;282 rqstp->rq_res.page_base = 0;283 rqstp->rq_res.page_len = 0;284+ rqstp->rq_res.buflen = PAGE_SIZE;285 rqstp->rq_res.tail[0].iov_len = 0;286 /* tcp needs a space for the record length... */287 if (rqstp->rq_prot == IPPROTO_TCP)···338 goto sendit;339 }340341+ for (progp = serv->sv_program; progp; progp = progp->pg_next)342+ if (prog == progp->pg_prog)343+ break;344+ if (progp == NULL)345 goto err_bad_prog;346347 if (vers >= progp->pg_nvers ||···451 goto sendit;452453err_bad_prog:454+ dprintk("svc: unknown program %d\n", prog);0000455 serv->sv_stats->rpcbadfmt++;456 svc_putu32(resv, rpc_prog_unavail);457 goto sendit;
+288-10
net/sunrpc/xdr.c
···176 xdr->buflen += len;177}178179-void180xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,181 skb_reader_t *desc,182 skb_read_actor_t copy_actor)183{184 struct page **ppage = xdr->pages;185 unsigned int len, pglen = xdr->page_len;0186 int ret;187188 len = xdr->head[0].iov_len;189 if (base < len) {190 len -= base;191 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);0192 if (ret != len || !desc->count)193- return;194 base = 0;195 } else196 base -= len;···212 do {213 char *kaddr;21400000000000215 len = PAGE_CACHE_SIZE;216 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);217 if (base) {···238 }239 flush_dcache_page(*ppage);240 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);0241 if (ret != len || !desc->count)242- return;243 ppage++;244 } while ((pglen -= len) != 0);245copy_tail:246 len = xdr->tail[0].iov_len;247 if (base < len)248- copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);00249}250251···632void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)633{634 struct kvec *iov = buf->head;06350636 xdr->buf = buf;637 xdr->iov = iov;638- xdr->end = (uint32_t *)((char *)iov->iov_base + iov->iov_len);639- buf->len = iov->iov_len = (char *)p - (char *)iov->iov_base;640- xdr->p = p;0000000000641}642EXPORT_SYMBOL(xdr_init_encode);643···887 return status;888}889890-static int891-read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)00000000000000000000000000892{893 u32 raw;894 int status;···924 return status;925 *obj = ntohl(raw);926 return 0;00000000927}928929/* If the netobj starting offset bytes from the start of xdr_buf is contained···944 u32 tail_offset = buf->head[0].iov_len + buf->page_len;945 u32 obj_end_offset;946947- if (read_u32_from_xdr_buf(buf, offset, &obj->len))948 goto out;949 obj_end_offset = offset + 4 + obj->len;950···976 return 0;977out:978 return -1;000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000979}
···176 xdr->buflen += len;177}178179+ssize_t180xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,181 skb_reader_t *desc,182 skb_read_actor_t copy_actor)183{184 struct page **ppage = xdr->pages;185 unsigned int len, pglen = xdr->page_len;186+ ssize_t copied = 0;187 int ret;188189 len = xdr->head[0].iov_len;190 if (base < len) {191 len -= base;192 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);193+ copied += ret;194 if (ret != len || !desc->count)195+ goto out;196 base = 0;197 } else198 base -= len;···210 do {211 char *kaddr;212213+ /* ACL likes to be lazy in allocating pages - ACLs214+ * are small by default but can get huge. */215+ if (unlikely(*ppage == NULL)) {216+ *ppage = alloc_page(GFP_ATOMIC);217+ if (unlikely(*ppage == NULL)) {218+ if (copied == 0)219+ copied = -ENOMEM;220+ goto out;221+ }222+ }223+224 len = PAGE_CACHE_SIZE;225 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);226 if (base) {···225 }226 flush_dcache_page(*ppage);227 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);228+ copied += ret;229 if (ret != len || !desc->count)230+ goto out;231 ppage++;232 } while ((pglen -= len) != 0);233copy_tail:234 len = xdr->tail[0].iov_len;235 if (base < len)236+ copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);237+out:238+ return copied;239}240241···616void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)617{618 struct kvec *iov = buf->head;619+ int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;620621+ BUG_ON(scratch_len < 0);622 xdr->buf = buf;623 xdr->iov = iov;624+ xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len);625+ xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len);626+ BUG_ON(iov->iov_len > scratch_len);627+628+ if (p != xdr->p && p != NULL) {629+ size_t len;630+631+ BUG_ON(p < xdr->p || p > xdr->end);632+ len = (char *)p - (char *)xdr->p;633+ xdr->p = p;634+ buf->len += len;635+ iov->iov_len += len;636+ }637}638EXPORT_SYMBOL(xdr_init_encode);639···859 return status;860}861862+/* obj is assumed to point to allocated memory of size at least len: */863+int864+write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)865+{866+ struct xdr_buf subbuf;867+ int this_len;868+ int status;869+870+ status = xdr_buf_subsegment(buf, &subbuf, base, len);871+ if (status)872+ goto out;873+ this_len = min(len, (int)subbuf.head[0].iov_len);874+ memcpy(subbuf.head[0].iov_base, obj, this_len);875+ len -= this_len;876+ obj += this_len;877+ this_len = min(len, (int)subbuf.page_len);878+ if (this_len)879+ _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len);880+ len -= this_len;881+ obj += this_len;882+ this_len = min(len, (int)subbuf.tail[0].iov_len);883+ memcpy(subbuf.tail[0].iov_base, obj, this_len);884+out:885+ return status;886+}887+888+int889+xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)890{891 u32 raw;892 int status;···870 return status;871 *obj = ntohl(raw);872 return 0;873+}874+875+int876+xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)877+{878+ u32 raw = htonl(obj);879+880+ return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));881}882883/* If the netobj starting offset bytes from the start of xdr_buf is contained···882 u32 tail_offset = buf->head[0].iov_len + buf->page_len;883 u32 obj_end_offset;884885+ if (xdr_decode_word(buf, offset, &obj->len))886 goto out;887 obj_end_offset = offset + 4 + obj->len;888···914 return 0;915out:916 return -1;917+}918+919+/* Returns 0 on success, or else a negative error code. */920+static int921+xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,922+ struct xdr_array2_desc *desc, int encode)923+{924+ char *elem = NULL, *c;925+ unsigned int copied = 0, todo, avail_here;926+ struct page **ppages = NULL;927+ int err;928+929+ if (encode) {930+ if (xdr_encode_word(buf, base, desc->array_len) != 0)931+ return -EINVAL;932+ } else {933+ if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||934+ (unsigned long) base + 4 + desc->array_len *935+ desc->elem_size > buf->len)936+ return -EINVAL;937+ }938+ base += 4;939+940+ if (!desc->xcode)941+ return 0;942+943+ todo = desc->array_len * desc->elem_size;944+945+ /* process head */946+ if (todo && base < buf->head->iov_len) {947+ c = buf->head->iov_base + base;948+ avail_here = min_t(unsigned int, todo,949+ buf->head->iov_len - base);950+ todo -= avail_here;951+952+ while (avail_here >= desc->elem_size) {953+ err = desc->xcode(desc, c);954+ if (err)955+ goto out;956+ c += desc->elem_size;957+ avail_here -= desc->elem_size;958+ }959+ if (avail_here) {960+ if (!elem) {961+ elem = kmalloc(desc->elem_size, GFP_KERNEL);962+ err = -ENOMEM;963+ if (!elem)964+ goto out;965+ }966+ if (encode) {967+ err = desc->xcode(desc, elem);968+ if (err)969+ goto out;970+ memcpy(c, elem, avail_here);971+ } else972+ memcpy(elem, c, avail_here);973+ copied = avail_here;974+ }975+ base = buf->head->iov_len; /* align to start of pages */976+ }977+978+ /* process pages array */979+ base -= buf->head->iov_len;980+ if (todo && base < buf->page_len) {981+ unsigned int avail_page;982+983+ avail_here = min(todo, buf->page_len - base);984+ todo -= avail_here;985+986+ base += buf->page_base;987+ ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);988+ base &= ~PAGE_CACHE_MASK;989+ avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,990+ avail_here);991+ c = kmap(*ppages) + base;992+993+ while (avail_here) {994+ avail_here -= avail_page;995+ if (copied || avail_page < desc->elem_size) {996+ unsigned int l = min(avail_page,997+ desc->elem_size - copied);998+ if (!elem) {999+ elem = kmalloc(desc->elem_size,1000+ GFP_KERNEL);1001+ err = -ENOMEM;1002+ if (!elem)1003+ goto out;1004+ }1005+ if (encode) {1006+ if (!copied) {1007+ err = desc->xcode(desc, elem);1008+ if (err)1009+ goto out;1010+ }1011+ memcpy(c, elem + copied, l);1012+ copied += l;1013+ if (copied == desc->elem_size)1014+ copied = 0;1015+ } else {1016+ memcpy(elem + copied, c, l);1017+ copied += l;1018+ if (copied == desc->elem_size) {1019+ err = desc->xcode(desc, elem);1020+ if (err)1021+ goto out;1022+ copied = 0;1023+ }1024+ }1025+ avail_page -= l;1026+ c += l;1027+ }1028+ while (avail_page >= desc->elem_size) {1029+ err = desc->xcode(desc, c);1030+ if (err)1031+ goto out;1032+ c += desc->elem_size;1033+ avail_page -= desc->elem_size;1034+ }1035+ if (avail_page) {1036+ unsigned int l = min(avail_page,1037+ desc->elem_size - copied);1038+ if (!elem) {1039+ elem = kmalloc(desc->elem_size,1040+ GFP_KERNEL);1041+ err = -ENOMEM;1042+ if (!elem)1043+ goto out;1044+ }1045+ if (encode) {1046+ if (!copied) {1047+ err = desc->xcode(desc, elem);1048+ if (err)1049+ goto out;1050+ }1051+ memcpy(c, elem + copied, l);1052+ copied += l;1053+ if (copied == desc->elem_size)1054+ copied = 0;1055+ } else {1056+ memcpy(elem + copied, c, l);1057+ copied += l;1058+ if (copied == desc->elem_size) {1059+ err = desc->xcode(desc, elem);1060+ if (err)1061+ goto out;1062+ copied = 0;1063+ }1064+ }1065+ }1066+ if (avail_here) {1067+ kunmap(*ppages);1068+ ppages++;1069+ c = kmap(*ppages);1070+ }1071+1072+ avail_page = min(avail_here,1073+ (unsigned int) PAGE_CACHE_SIZE);1074+ }1075+ base = buf->page_len; /* align to start of tail */1076+ }1077+1078+ /* process tail */1079+ base -= buf->page_len;1080+ if (todo) {1081+ c = buf->tail->iov_base + base;1082+ if (copied) {1083+ unsigned int l = desc->elem_size - copied;1084+1085+ if (encode)1086+ memcpy(c, elem + copied, l);1087+ else {1088+ memcpy(elem + copied, c, l);1089+ err = desc->xcode(desc, elem);1090+ if (err)1091+ goto out;1092+ }1093+ todo -= l;1094+ c += l;1095+ }1096+ while (todo) {1097+ err = desc->xcode(desc, c);1098+ if (err)1099+ goto out;1100+ c += desc->elem_size;1101+ todo -= desc->elem_size;1102+ }1103+ }1104+ err = 0;1105+1106+out:1107+ if (elem)1108+ kfree(elem);1109+ if (ppages)1110+ kunmap(*ppages);1111+ return err;1112+}1113+1114+int1115+xdr_decode_array2(struct xdr_buf *buf, unsigned int base,1116+ struct xdr_array2_desc *desc)1117+{1118+ if (base >= buf->len)1119+ return -EINVAL;1120+1121+ return xdr_xcode_array2(buf, base, desc, 0);1122+}1123+1124+int1125+xdr_encode_array2(struct xdr_buf *buf, unsigned int base,1126+ struct xdr_array2_desc *desc)1127+{1128+ if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >1129+ buf->head->iov_len + buf->page_len + buf->tail->iov_len)1130+ return -EINVAL;1131+1132+ return xdr_xcode_array2(buf, base, desc, 1);1133}