Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nfs-for-4.3-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client updates from Trond Myklebust:
"Highlights include:

Stable patches:
- Fix atomicity of pNFS commit list updates
- Fix NFSv4 handling of open(O_CREAT|O_EXCL|O_RDONLY)
- nfs_set_pgio_error sometimes misses errors
- Fix a thinko in xs_connect()
- Fix borkage in _same_data_server_addrs_locked()
- Fix a NULL pointer dereference of migration recovery ops for v4.2
client
- Don't let the ctime override attribute barriers.
- Revert "NFSv4: Remove incorrect check in can_open_delegated()"
- Ensure flexfiles pNFS driver updates the inode after write finishes
- flexfiles must not pollute the attribute cache with attrbutes from
the DS
- Fix a protocol error in layoutreturn
- Fix a protocol issue with NFSv4.1 CLOSE stateids

Bugfixes + cleanups
- pNFS blocks bugfixes from Christoph
- Various cleanups from Anna
- More fixes for delegation corner cases
- Don't fsync twice for O_SYNC/IS_SYNC files
- Fix pNFS and flexfiles layoutstats bugs
- pnfs/flexfiles: avoid duplicate tracking of mirror data
- pnfs: Fix layoutget/layoutreturn/return-on-close serialisation
issues
- pnfs/flexfiles: error handling retries a layoutget before fallback
to MDS

Features:
- Full support for the OPEN NFS4_CREATE_EXCLUSIVE4_1 mode from
Kinglong
- More RDMA client transport improvements from Chuck
- Removal of the deprecated ib_reg_phys_mr() and ib_rereg_phys_mr()
verbs from the SUNRPC, Lustre and core infiniband tree.
- Optimise away the close-to-open getattr if there is no cached data"

* tag 'nfs-for-4.3-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (108 commits)
NFSv4: Respect the server imposed limit on how many changes we may cache
NFSv4: Express delegation limit in units of pages
Revert "NFS: Make close(2) asynchronous when closing NFS O_DIRECT files"
NFS: Optimise away the close-to-open getattr if there is no cached data
NFSv4.1/flexfiles: Clean up ff_layout_write_done_cb/ff_layout_commit_done_cb
NFSv4.1/flexfiles: Mark the layout for return in ff_layout_io_track_ds_error()
nfs: Remove unneeded checking of the return value from scnprintf
nfs: Fix truncated client owner id without proto type
NFSv4.1/flexfiles: Mark layout for return if the mirrors are invalid
NFSv4.1/flexfiles: RW layouts are valid only if all mirrors are valid
NFSv4.1/flexfiles: Fix incorrect usage of pnfs_generic_mark_devid_invalid()
NFSv4.1/flexfiles: Fix freeing of mirrors
NFSv4.1/pNFS: Don't request a minimal read layout beyond the end of file
NFSv4.1/pnfs: Handle LAYOUTGET return values correctly
NFSv4.1/pnfs: Don't ask for a read layout for an empty file.
NFSv4.1: Fix a protocol issue with CLOSE stateids
NFSv4.1/flexfiles: Don't mark the entire deviceid as bad for file errors
SUNRPC: Prevent SYN+SYNACK+RST storms
SUNRPC: xs_reset_transport must mark the connection as disconnected
NFSv4.1/pnfs: Ensure layoutreturn reserves space for the opaque payload
...

+1320 -1088
+9
Documentation/kernel-parameters.txt
··· 2285 2285 The default parameter value of '0' causes the kernel 2286 2286 not to attempt recovery of lost locks. 2287 2287 2288 + nfs4.layoutstats_timer = 2289 + [NFSv4.2] Change the rate at which the kernel sends 2290 + layoutstats to the pNFS metadata server. 2291 + 2292 + Setting this to value to 0 causes the kernel to use 2293 + whatever value is the default set by the layout 2294 + driver. A non-zero value sets the minimum interval 2295 + in seconds between layoutstats transmissions. 2296 + 2288 2297 nfsd.nfs4_disable_idmapping= 2289 2298 [NFSv4] When set to the default of '1', the NFSv4 2290 2299 server will return only numeric uids and gids to
-67
drivers/infiniband/core/verbs.c
··· 1144 1144 } 1145 1145 EXPORT_SYMBOL(ib_get_dma_mr); 1146 1146 1147 - struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 1148 - struct ib_phys_buf *phys_buf_array, 1149 - int num_phys_buf, 1150 - int mr_access_flags, 1151 - u64 *iova_start) 1152 - { 1153 - struct ib_mr *mr; 1154 - int err; 1155 - 1156 - err = ib_check_mr_access(mr_access_flags); 1157 - if (err) 1158 - return ERR_PTR(err); 1159 - 1160 - if (!pd->device->reg_phys_mr) 1161 - return ERR_PTR(-ENOSYS); 1162 - 1163 - mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, 1164 - mr_access_flags, iova_start); 1165 - 1166 - if (!IS_ERR(mr)) { 1167 - mr->device = pd->device; 1168 - mr->pd = pd; 1169 - mr->uobject = NULL; 1170 - atomic_inc(&pd->usecnt); 1171 - atomic_set(&mr->usecnt, 0); 1172 - } 1173 - 1174 - return mr; 1175 - } 1176 - EXPORT_SYMBOL(ib_reg_phys_mr); 1177 - 1178 - int ib_rereg_phys_mr(struct ib_mr *mr, 1179 - int mr_rereg_mask, 1180 - struct ib_pd *pd, 1181 - struct ib_phys_buf *phys_buf_array, 1182 - int num_phys_buf, 1183 - int mr_access_flags, 1184 - u64 *iova_start) 1185 - { 1186 - struct ib_pd *old_pd; 1187 - int ret; 1188 - 1189 - ret = ib_check_mr_access(mr_access_flags); 1190 - if (ret) 1191 - return ret; 1192 - 1193 - if (!mr->device->rereg_phys_mr) 1194 - return -ENOSYS; 1195 - 1196 - if (atomic_read(&mr->usecnt)) 1197 - return -EBUSY; 1198 - 1199 - old_pd = mr->pd; 1200 - 1201 - ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, 1202 - phys_buf_array, num_phys_buf, 1203 - mr_access_flags, iova_start); 1204 - 1205 - if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) { 1206 - atomic_dec(&old_pd->usecnt); 1207 - atomic_inc(&pd->usecnt); 1208 - } 1209 - 1210 - return ret; 1211 - } 1212 - EXPORT_SYMBOL(ib_rereg_phys_mr); 1213 - 1214 1147 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) 1215 1148 { 1216 1149 return mr->device->query_mr ?
+1 -18
fs/nfs/blocklayout/blocklayout.h
··· 46 46 47 47 struct pnfs_block_dev; 48 48 49 - enum pnfs_block_volume_type { 50 - PNFS_BLOCK_VOLUME_SIMPLE = 0, 51 - PNFS_BLOCK_VOLUME_SLICE = 1, 52 - PNFS_BLOCK_VOLUME_CONCAT = 2, 53 - PNFS_BLOCK_VOLUME_STRIPE = 3, 54 - }; 55 - 56 49 #define PNFS_BLOCK_MAX_UUIDS 4 57 50 #define PNFS_BLOCK_MAX_DEVICES 64 58 51 ··· 110 117 struct pnfs_block_dev_map *map); 111 118 }; 112 119 113 - enum exstate4 { 114 - PNFS_BLOCK_READWRITE_DATA = 0, 115 - PNFS_BLOCK_READ_DATA = 1, 116 - PNFS_BLOCK_INVALID_DATA = 2, /* mapped, but data is invalid */ 117 - PNFS_BLOCK_NONE_DATA = 3 /* unmapped, it's a hole */ 118 - }; 119 - 120 120 /* sector_t fields are all in 512-byte sectors */ 121 121 struct pnfs_block_extent { 122 122 union { ··· 120 134 sector_t be_f_offset; /* the starting offset in the file */ 121 135 sector_t be_length; /* the size of the extent */ 122 136 sector_t be_v_offset; /* the starting offset in the volume */ 123 - enum exstate4 be_state; /* the state of this extent */ 137 + enum pnfs_block_extent_state be_state; /* the state of this extent */ 124 138 #define EXTENT_WRITTEN 1 125 139 #define EXTENT_COMMITTING 2 126 140 unsigned int be_tag; 127 141 }; 128 - 129 - /* on the wire size of the extent */ 130 - #define BL_EXTENT_SIZE (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE) 131 142 132 143 struct pnfs_block_layout { 133 144 struct pnfs_layout_hdr bl_layout;
+7 -2
fs/nfs/blocklayout/dev.c
··· 22 22 kfree(dev->children); 23 23 } else { 24 24 if (dev->bdev) 25 - blkdev_put(dev->bdev, FMODE_READ); 25 + blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE); 26 26 } 27 27 } 28 28 ··· 65 65 return -EIO; 66 66 p = xdr_decode_hyper(p, &b->simple.sigs[i].offset); 67 67 b->simple.sigs[i].sig_len = be32_to_cpup(p++); 68 + if (b->simple.sigs[i].sig_len > PNFS_BLOCK_UUID_LEN) { 69 + pr_info("signature too long: %d\n", 70 + b->simple.sigs[i].sig_len); 71 + return -EIO; 72 + } 68 73 69 74 p = xdr_inline_decode(xdr, b->simple.sigs[i].sig_len); 70 75 if (!p) ··· 200 195 if (!dev) 201 196 return -EIO; 202 197 203 - d->bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); 198 + d->bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL); 204 199 if (IS_ERR(d->bdev)) { 205 200 printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n", 206 201 MAJOR(dev), MINOR(dev), PTR_ERR(d->bdev));
+11 -8
fs/nfs/blocklayout/extent_tree.c
··· 462 462 return err; 463 463 } 464 464 465 + static size_t ext_tree_layoutupdate_size(size_t count) 466 + { 467 + return sizeof(__be32) /* number of entries */ + 468 + PNFS_BLOCK_EXTENT_SIZE * count; 469 + } 470 + 465 471 static void ext_tree_free_commitdata(struct nfs4_layoutcommit_args *arg, 466 472 size_t buffer_size) 467 473 { ··· 495 489 continue; 496 490 497 491 (*count)++; 498 - if (*count * BL_EXTENT_SIZE > buffer_size) { 492 + if (ext_tree_layoutupdate_size(*count) > buffer_size) { 499 493 /* keep counting.. */ 500 494 ret = -ENOSPC; 501 495 continue; ··· 536 530 if (unlikely(ret)) { 537 531 ext_tree_free_commitdata(arg, buffer_size); 538 532 539 - buffer_size = sizeof(__be32) + BL_EXTENT_SIZE * count; 533 + buffer_size = ext_tree_layoutupdate_size(count); 540 534 count = 0; 541 535 542 536 arg->layoutupdate_pages = ··· 555 549 } 556 550 557 551 *start_p = cpu_to_be32(count); 558 - arg->layoutupdate_len = sizeof(__be32) + BL_EXTENT_SIZE * count; 552 + arg->layoutupdate_len = ext_tree_layoutupdate_size(count); 559 553 560 554 if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) { 561 - __be32 *p = start_p; 555 + void *p = start_p, *end = p + arg->layoutupdate_len; 562 556 int i = 0; 563 557 564 - for (p = start_p; 565 - p < start_p + arg->layoutupdate_len; 566 - p += PAGE_SIZE) { 558 + for ( ; p < end; p += PAGE_SIZE) 567 559 arg->layoutupdate_pages[i++] = vmalloc_to_page(p); 568 - } 569 560 } 570 561 571 562 dprintk("%s found %zu ranges\n", __func__, count);
-4
fs/nfs/callback.c
··· 162 162 spin_lock_init(&serv->sv_cb_lock); 163 163 init_waitqueue_head(&serv->sv_cb_waitq); 164 164 rqstp = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE); 165 - if (IS_ERR(rqstp)) { 166 - svc_xprt_put(serv->sv_bc_xprt); 167 - serv->sv_bc_xprt = NULL; 168 - } 169 165 dprintk("--> %s return %d\n", __func__, PTR_ERR_OR_ZERO(rqstp)); 170 166 return rqstp; 171 167 }
+7 -2
fs/nfs/callback_proc.c
··· 40 40 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 41 41 42 42 inode = nfs_delegation_find_inode(cps->clp, &args->fh); 43 - if (inode == NULL) 43 + if (inode == NULL) { 44 + trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL, 45 + -ntohl(res->status)); 44 46 goto out; 47 + } 45 48 nfsi = NFS_I(inode); 46 49 rcu_read_lock(); 47 50 delegation = rcu_dereference(nfsi->delegation); ··· 63 60 res->status = 0; 64 61 out_iput: 65 62 rcu_read_unlock(); 63 + trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status)); 66 64 iput(inode); 67 65 out: 68 66 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status)); ··· 198 194 spin_unlock(&ino->i_lock); 199 195 pnfs_free_lseg_list(&free_me_list); 200 196 pnfs_put_layout_hdr(lo); 197 + trace_nfs4_cb_layoutrecall_inode(clp, &args->cbl_fh, ino, -rv); 201 198 iput(ino); 202 199 out: 203 200 return rv; ··· 559 554 status = htonl(NFS4_OK); 560 555 561 556 nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid); 562 - nfs41_server_notify_target_slotid_update(cps->clp); 557 + nfs41_notify_server(cps->clp); 563 558 out: 564 559 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 565 560 return status;
+2 -111
fs/nfs/client.c
··· 20 20 #include <linux/stat.h> 21 21 #include <linux/errno.h> 22 22 #include <linux/unistd.h> 23 + #include <linux/sunrpc/addr.h> 23 24 #include <linux/sunrpc/clnt.h> 24 25 #include <linux/sunrpc/stats.h> 25 26 #include <linux/sunrpc/metrics.h> ··· 286 285 } 287 286 EXPORT_SYMBOL_GPL(nfs_put_client); 288 287 289 - #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 290 - /* 291 - * Test if two ip6 socket addresses refer to the same socket by 292 - * comparing relevant fields. The padding bytes specifically, are not 293 - * compared. sin6_flowinfo is not compared because it only affects QoS 294 - * and sin6_scope_id is only compared if the address is "link local" 295 - * because "link local" addresses need only be unique to a specific 296 - * link. Conversely, ordinary unicast addresses might have different 297 - * sin6_scope_id. 298 - * 299 - * The caller should ensure both socket addresses are AF_INET6. 300 - */ 301 - static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, 302 - const struct sockaddr *sa2) 303 - { 304 - const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1; 305 - const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2; 306 - 307 - if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr)) 308 - return 0; 309 - else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL) 310 - return sin1->sin6_scope_id == sin2->sin6_scope_id; 311 - 312 - return 1; 313 - } 314 - #else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */ 315 - static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, 316 - const struct sockaddr *sa2) 317 - { 318 - return 0; 319 - } 320 - #endif 321 - 322 - /* 323 - * Test if two ip4 socket addresses refer to the same socket, by 324 - * comparing relevant fields. The padding bytes specifically, are 325 - * not compared. 326 - * 327 - * The caller should ensure both socket addresses are AF_INET. 328 - */ 329 - static int nfs_sockaddr_match_ipaddr4(const struct sockaddr *sa1, 330 - const struct sockaddr *sa2) 331 - { 332 - const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1; 333 - const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2; 334 - 335 - return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; 336 - } 337 - 338 - static int nfs_sockaddr_cmp_ip6(const struct sockaddr *sa1, 339 - const struct sockaddr *sa2) 340 - { 341 - const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1; 342 - const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2; 343 - 344 - return nfs_sockaddr_match_ipaddr6(sa1, sa2) && 345 - (sin1->sin6_port == sin2->sin6_port); 346 - } 347 - 348 - static int nfs_sockaddr_cmp_ip4(const struct sockaddr *sa1, 349 - const struct sockaddr *sa2) 350 - { 351 - const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1; 352 - const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2; 353 - 354 - return nfs_sockaddr_match_ipaddr4(sa1, sa2) && 355 - (sin1->sin_port == sin2->sin_port); 356 - } 357 - 358 - #if defined(CONFIG_NFS_V4_1) 359 - /* 360 - * Test if two socket addresses represent the same actual socket, 361 - * by comparing (only) relevant fields, excluding the port number. 362 - */ 363 - int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1, 364 - const struct sockaddr *sa2) 365 - { 366 - if (sa1->sa_family != sa2->sa_family) 367 - return 0; 368 - 369 - switch (sa1->sa_family) { 370 - case AF_INET: 371 - return nfs_sockaddr_match_ipaddr4(sa1, sa2); 372 - case AF_INET6: 373 - return nfs_sockaddr_match_ipaddr6(sa1, sa2); 374 - } 375 - return 0; 376 - } 377 - EXPORT_SYMBOL_GPL(nfs_sockaddr_match_ipaddr); 378 - #endif /* CONFIG_NFS_V4_1 */ 379 - 380 - /* 381 - * Test if two socket addresses represent the same actual socket, 382 - * by comparing (only) relevant fields, including the port number. 383 - */ 384 - static int nfs_sockaddr_cmp(const struct sockaddr *sa1, 385 - const struct sockaddr *sa2) 386 - { 387 - if (sa1->sa_family != sa2->sa_family) 388 - return 0; 389 - 390 - switch (sa1->sa_family) { 391 - case AF_INET: 392 - return nfs_sockaddr_cmp_ip4(sa1, sa2); 393 - case AF_INET6: 394 - return nfs_sockaddr_cmp_ip6(sa1, sa2); 395 - } 396 - return 0; 397 - } 398 - 399 288 /* 400 289 * Find an nfs_client on the list that matches the initialisation data 401 290 * that is supplied. ··· 312 421 if (clp->cl_minorversion != data->minorversion) 313 422 continue; 314 423 /* Match the full socket address */ 315 - if (!nfs_sockaddr_cmp(sap, clap)) 424 + if (!rpc_cmp_addr_port(sap, clap)) 316 425 continue; 317 426 318 427 atomic_inc(&clp->cl_count);
+27 -2
fs/nfs/delegation.c
··· 175 175 if (delegation->inode != NULL) { 176 176 nfs4_stateid_copy(&delegation->stateid, &res->delegation); 177 177 delegation->type = res->delegation_type; 178 - delegation->maxsize = res->maxsize; 178 + delegation->pagemod_limit = res->pagemod_limit; 179 179 oldcred = delegation->cred; 180 180 delegation->cred = get_rpccred(cred); 181 181 clear_bit(NFS_DELEGATION_NEED_RECLAIM, ··· 337 337 return -ENOMEM; 338 338 nfs4_stateid_copy(&delegation->stateid, &res->delegation); 339 339 delegation->type = res->delegation_type; 340 - delegation->maxsize = res->maxsize; 340 + delegation->pagemod_limit = res->pagemod_limit; 341 341 delegation->change_attr = inode->i_version; 342 342 delegation->cred = get_rpccred(cred); 343 343 delegation->inode = inode; ··· 897 897 nfs4_stateid_copy(dst, &delegation->stateid); 898 898 nfs_mark_delegation_referenced(delegation); 899 899 } 900 + rcu_read_unlock(); 901 + return ret; 902 + } 903 + 904 + /** 905 + * nfs4_delegation_flush_on_close - Check if we must flush file on close 906 + * @inode: inode to check 907 + * 908 + * This function checks the number of outstanding writes to the file 909 + * against the delegation 'space_limit' field to see if 910 + * the spec requires us to flush the file on close. 911 + */ 912 + bool nfs4_delegation_flush_on_close(const struct inode *inode) 913 + { 914 + struct nfs_inode *nfsi = NFS_I(inode); 915 + struct nfs_delegation *delegation; 916 + bool ret = true; 917 + 918 + rcu_read_lock(); 919 + delegation = rcu_dereference(nfsi->delegation); 920 + if (delegation == NULL || !(delegation->type & FMODE_WRITE)) 921 + goto out; 922 + if (nfsi->nrequests < delegation->pagemod_limit) 923 + ret = false; 924 + out: 900 925 rcu_read_unlock(); 901 926 return ret; 902 927 }
+2 -1
fs/nfs/delegation.h
··· 18 18 struct inode *inode; 19 19 nfs4_stateid stateid; 20 20 fmode_t type; 21 - loff_t maxsize; 21 + unsigned long pagemod_limit; 22 22 __u64 change_attr; 23 23 unsigned long flags; 24 24 spinlock_t lock; ··· 61 61 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation); 62 62 int nfs4_have_delegation(struct inode *inode, fmode_t flags); 63 63 int nfs4_check_delegation(struct inode *inode, fmode_t flags); 64 + bool nfs4_delegation_flush_on_close(const struct inode *inode); 64 65 65 66 #endif 66 67
+6 -14
fs/nfs/dir.c
··· 583 583 } 584 584 585 585 static 586 - void nfs_readdir_free_pagearray(struct page **pages, unsigned int npages) 586 + void nfs_readdir_free_pages(struct page **pages, unsigned int npages) 587 587 { 588 588 unsigned int i; 589 589 for (i = 0; i < npages; i++) 590 590 put_page(pages[i]); 591 591 } 592 592 593 - static 594 - void nfs_readdir_free_large_page(void *ptr, struct page **pages, 595 - unsigned int npages) 596 - { 597 - nfs_readdir_free_pagearray(pages, npages); 598 - } 599 - 600 593 /* 601 594 * nfs_readdir_large_page will allocate pages that must be freed with a call 602 - * to nfs_readdir_free_large_page 595 + * to nfs_readdir_free_pagearray 603 596 */ 604 597 static 605 - int nfs_readdir_large_page(struct page **pages, unsigned int npages) 598 + int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages) 606 599 { 607 600 unsigned int i; 608 601 ··· 608 615 return 0; 609 616 610 617 out_freepages: 611 - nfs_readdir_free_pagearray(pages, i); 618 + nfs_readdir_free_pages(pages, i); 612 619 return -ENOMEM; 613 620 } 614 621 ··· 616 623 int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode) 617 624 { 618 625 struct page *pages[NFS_MAX_READDIR_PAGES]; 619 - void *pages_ptr = NULL; 620 626 struct nfs_entry entry; 621 627 struct file *file = desc->file; 622 628 struct nfs_cache_array *array; ··· 645 653 memset(array, 0, sizeof(struct nfs_cache_array)); 646 654 array->eof_index = -1; 647 655 648 - status = nfs_readdir_large_page(pages, array_size); 656 + status = nfs_readdir_alloc_pages(pages, array_size); 649 657 if (status < 0) 650 658 goto out_release_array; 651 659 do { ··· 663 671 } 664 672 } while (array->eof_index < 0); 665 673 666 - nfs_readdir_free_large_page(pages_ptr, pages, array_size); 674 + nfs_readdir_free_pages(pages, array_size); 667 675 out_release_array: 668 676 nfs_readdir_release_array(page); 669 677 out_label_free:
+6 -15
fs/nfs/file.c
··· 82 82 dprintk("NFS: release(%pD2)\n", filp); 83 83 84 84 nfs_inc_stats(inode, NFSIOS_VFSRELEASE); 85 - return nfs_release(inode, filp); 85 + nfs_file_clear_open_context(filp); 86 + return 0; 86 87 } 87 88 EXPORT_SYMBOL_GPL(nfs_file_release); 88 89 ··· 142 141 /* 143 142 * Flush all dirty pages, and check for write errors. 144 143 */ 145 - int 144 + static int 146 145 nfs_file_flush(struct file *file, fl_owner_t id) 147 146 { 148 147 struct inode *inode = file_inode(file); ··· 153 152 if ((file->f_mode & FMODE_WRITE) == 0) 154 153 return 0; 155 154 156 - /* 157 - * If we're holding a write delegation, then just start the i/o 158 - * but don't wait for completion (or send a commit). 159 - */ 160 - if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 161 - return filemap_fdatawrite(file->f_mapping); 162 - 163 155 /* Flush writes to the server and return any errors */ 164 156 return vfs_fsync(file, 0); 165 157 } 166 - EXPORT_SYMBOL_GPL(nfs_file_flush); 167 158 168 159 ssize_t 169 160 nfs_file_read(struct kiocb *iocb, struct iov_iter *to) ··· 637 644 .page_mkwrite = nfs_vm_page_mkwrite, 638 645 }; 639 646 640 - static int nfs_need_sync_write(struct file *filp, struct inode *inode) 647 + static int nfs_need_check_write(struct file *filp, struct inode *inode) 641 648 { 642 649 struct nfs_open_context *ctx; 643 650 644 - if (IS_SYNC(inode) || (filp->f_flags & O_DSYNC)) 645 - return 1; 646 651 ctx = nfs_file_open_context(filp); 647 652 if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags) || 648 653 nfs_ctx_key_to_expire(ctx)) ··· 690 699 if (result > 0) 691 700 written = result; 692 701 693 - /* Return error values for O_DSYNC and IS_SYNC() */ 694 - if (result >= 0 && nfs_need_sync_write(file, inode)) { 702 + /* Return error values */ 703 + if (result >= 0 && nfs_need_check_write(file, inode)) { 695 704 int err = vfs_fsync(file, 0); 696 705 if (err < 0) 697 706 result = err;
+291 -133
fs/nfs/flexfilelayout/flexfilelayout.c
··· 34 34 ffl = kzalloc(sizeof(*ffl), gfp_flags); 35 35 if (ffl) { 36 36 INIT_LIST_HEAD(&ffl->error_list); 37 + INIT_LIST_HEAD(&ffl->mirrors); 37 38 return &ffl->generic_hdr; 38 39 } else 39 40 return NULL; ··· 136 135 return 0; 137 136 } 138 137 138 + static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1, 139 + const struct nfs4_ff_layout_mirror *m2) 140 + { 141 + int i, j; 142 + 143 + if (m1->fh_versions_cnt != m2->fh_versions_cnt) 144 + return false; 145 + for (i = 0; i < m1->fh_versions_cnt; i++) { 146 + bool found_fh = false; 147 + for (j = 0; j < m2->fh_versions_cnt; i++) { 148 + if (nfs_compare_fh(&m1->fh_versions[i], 149 + &m2->fh_versions[j]) == 0) { 150 + found_fh = true; 151 + break; 152 + } 153 + } 154 + if (!found_fh) 155 + return false; 156 + } 157 + return true; 158 + } 159 + 160 + static struct nfs4_ff_layout_mirror * 161 + ff_layout_add_mirror(struct pnfs_layout_hdr *lo, 162 + struct nfs4_ff_layout_mirror *mirror) 163 + { 164 + struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); 165 + struct nfs4_ff_layout_mirror *pos; 166 + struct inode *inode = lo->plh_inode; 167 + 168 + spin_lock(&inode->i_lock); 169 + list_for_each_entry(pos, &ff_layout->mirrors, mirrors) { 170 + if (mirror->mirror_ds != pos->mirror_ds) 171 + continue; 172 + if (!ff_mirror_match_fh(mirror, pos)) 173 + continue; 174 + if (atomic_inc_not_zero(&pos->ref)) { 175 + spin_unlock(&inode->i_lock); 176 + return pos; 177 + } 178 + } 179 + list_add(&mirror->mirrors, &ff_layout->mirrors); 180 + mirror->layout = lo; 181 + spin_unlock(&inode->i_lock); 182 + return mirror; 183 + } 184 + 185 + static void 186 + ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror) 187 + { 188 + struct inode *inode; 189 + if (mirror->layout == NULL) 190 + return; 191 + inode = mirror->layout->plh_inode; 192 + spin_lock(&inode->i_lock); 193 + list_del(&mirror->mirrors); 194 + spin_unlock(&inode->i_lock); 195 + mirror->layout = NULL; 196 + } 197 + 198 + static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags) 199 + { 200 + struct nfs4_ff_layout_mirror *mirror; 201 + 202 + mirror = kzalloc(sizeof(*mirror), gfp_flags); 203 + if (mirror != NULL) { 204 + spin_lock_init(&mirror->lock); 205 + atomic_set(&mirror->ref, 1); 206 + INIT_LIST_HEAD(&mirror->mirrors); 207 + } 208 + return mirror; 209 + } 210 + 211 + static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror) 212 + { 213 + ff_layout_remove_mirror(mirror); 214 + kfree(mirror->fh_versions); 215 + if (mirror->cred) 216 + put_rpccred(mirror->cred); 217 + nfs4_ff_layout_put_deviceid(mirror->mirror_ds); 218 + kfree(mirror); 219 + } 220 + 221 + static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror) 222 + { 223 + if (mirror != NULL && atomic_dec_and_test(&mirror->ref)) 224 + ff_layout_free_mirror(mirror); 225 + } 226 + 139 227 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls) 140 228 { 141 229 int i; ··· 234 144 /* normally mirror_ds is freed in 235 145 * .free_deviceid_node but we still do it here 236 146 * for .alloc_lseg error path */ 237 - if (fls->mirror_array[i]) { 238 - kfree(fls->mirror_array[i]->fh_versions); 239 - nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds); 240 - kfree(fls->mirror_array[i]); 241 - } 147 + ff_layout_put_mirror(fls->mirror_array[i]); 242 148 } 243 149 kfree(fls->mirror_array); 244 150 fls->mirror_array = NULL; ··· 265 179 ff_layout_free_mirror_array(fls); 266 180 kfree(fls); 267 181 } 182 + } 183 + 184 + static bool 185 + ff_lseg_range_is_after(const struct pnfs_layout_range *l1, 186 + const struct pnfs_layout_range *l2) 187 + { 188 + u64 end1, end2; 189 + 190 + if (l1->iomode != l2->iomode) 191 + return l1->iomode != IOMODE_READ; 192 + end1 = pnfs_calc_offset_end(l1->offset, l1->length); 193 + end2 = pnfs_calc_offset_end(l2->offset, l2->length); 194 + if (end1 < l2->offset) 195 + return false; 196 + if (end2 < l1->offset) 197 + return true; 198 + return l2->offset <= l1->offset; 199 + } 200 + 201 + static bool 202 + ff_lseg_merge(struct pnfs_layout_segment *new, 203 + struct pnfs_layout_segment *old) 204 + { 205 + u64 new_end, old_end; 206 + 207 + if (new->pls_range.iomode != old->pls_range.iomode) 208 + return false; 209 + old_end = pnfs_calc_offset_end(old->pls_range.offset, 210 + old->pls_range.length); 211 + if (old_end < new->pls_range.offset) 212 + return false; 213 + new_end = pnfs_calc_offset_end(new->pls_range.offset, 214 + new->pls_range.length); 215 + if (new_end < old->pls_range.offset) 216 + return false; 217 + 218 + /* Mergeable: copy info from 'old' to 'new' */ 219 + if (new_end < old_end) 220 + new_end = old_end; 221 + if (new->pls_range.offset < old->pls_range.offset) 222 + new->pls_range.offset = old->pls_range.offset; 223 + new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset, 224 + new_end); 225 + if (test_bit(NFS_LSEG_ROC, &old->pls_flags)) 226 + set_bit(NFS_LSEG_ROC, &new->pls_flags); 227 + if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags)) 228 + set_bit(NFS_LSEG_LAYOUTRETURN, &new->pls_flags); 229 + return true; 230 + } 231 + 232 + static void 233 + ff_layout_add_lseg(struct pnfs_layout_hdr *lo, 234 + struct pnfs_layout_segment *lseg, 235 + struct list_head *free_me) 236 + { 237 + pnfs_generic_layout_insert_lseg(lo, lseg, 238 + ff_lseg_range_is_after, 239 + ff_lseg_merge, 240 + free_me); 268 241 } 269 242 270 243 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls) ··· 391 246 goto out_err_free; 392 247 393 248 for (i = 0; i < fls->mirror_array_cnt; i++) { 249 + struct nfs4_ff_layout_mirror *mirror; 394 250 struct nfs4_deviceid devid; 395 251 struct nfs4_deviceid_node *idnode; 396 252 u32 ds_count; ··· 408 262 if (ds_count != 1) 409 263 goto out_err_free; 410 264 411 - fls->mirror_array[i] = 412 - kzalloc(sizeof(struct nfs4_ff_layout_mirror), 413 - gfp_flags); 265 + fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags); 414 266 if (fls->mirror_array[i] == NULL) { 415 267 rc = -ENOMEM; 416 268 goto out_err_free; 417 269 } 418 270 419 - spin_lock_init(&fls->mirror_array[i]->lock); 420 271 fls->mirror_array[i]->ds_count = ds_count; 421 - fls->mirror_array[i]->lseg = &fls->generic_hdr; 422 272 423 273 /* deviceid */ 424 274 rc = decode_deviceid(&stream, &devid); ··· 480 338 if (rc) 481 339 goto out_err_free; 482 340 341 + mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]); 342 + if (mirror != fls->mirror_array[i]) { 343 + ff_layout_free_mirror(fls->mirror_array[i]); 344 + fls->mirror_array[i] = mirror; 345 + } 346 + 483 347 dprintk("%s: uid %d gid %d\n", __func__, 484 348 fls->mirror_array[i]->uid, 485 349 fls->mirror_array[i]->gid); ··· 527 379 ff_layout_free_lseg(struct pnfs_layout_segment *lseg) 528 380 { 529 381 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 530 - int i; 531 382 532 383 dprintk("--> %s\n", __func__); 533 - 534 - for (i = 0; i < fls->mirror_array_cnt; i++) { 535 - if (fls->mirror_array[i]) { 536 - nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds); 537 - fls->mirror_array[i]->mirror_ds = NULL; 538 - if (fls->mirror_array[i]->cred) { 539 - put_rpccred(fls->mirror_array[i]->cred); 540 - fls->mirror_array[i]->cred = NULL; 541 - } 542 - } 543 - } 544 384 545 385 if (lseg->pls_range.iomode == IOMODE_RW) { 546 386 struct nfs4_flexfile_layout *ffl; ··· 555 419 } 556 420 557 421 static void 558 - nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer) 422 + nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) 559 423 { 560 424 /* first IO request? */ 561 425 if (atomic_inc_return(&timer->n_ops) == 1) { 562 - timer->start_time = ktime_get(); 426 + timer->start_time = now; 563 427 } 564 428 } 565 429 566 430 static ktime_t 567 - nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer) 431 + nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) 568 432 { 569 - ktime_t start, now; 433 + ktime_t start; 570 434 571 435 if (atomic_dec_return(&timer->n_ops) < 0) 572 436 WARN_ON_ONCE(1); 573 437 574 - now = ktime_get(); 575 438 start = timer->start_time; 576 439 timer->start_time = now; 577 440 return ktime_sub(now, start); 578 441 } 579 442 580 - static ktime_t 581 - nfs4_ff_layout_calc_completion_time(struct rpc_task *task) 582 - { 583 - return ktime_sub(ktime_get(), task->tk_start); 584 - } 585 - 586 443 static bool 587 444 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror, 588 - struct nfs4_ff_layoutstat *layoutstat) 445 + struct nfs4_ff_layoutstat *layoutstat, 446 + ktime_t now) 589 447 { 590 448 static const ktime_t notime = {0}; 591 - ktime_t now = ktime_get(); 449 + s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL; 592 450 593 - nfs4_ff_start_busy_timer(&layoutstat->busy_timer); 451 + nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now); 594 452 if (ktime_equal(mirror->start_time, notime)) 595 453 mirror->start_time = now; 596 454 if (ktime_equal(mirror->last_report_time, notime)) 597 455 mirror->last_report_time = now; 456 + if (layoutstats_timer != 0) 457 + report_interval = (s64)layoutstats_timer * 1000LL; 598 458 if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >= 599 - FF_LAYOUTSTATS_REPORT_INTERVAL) { 459 + report_interval) { 600 460 mirror->last_report_time = now; 601 461 return true; 602 462 } ··· 614 482 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat, 615 483 __u64 requested, 616 484 __u64 completed, 617 - ktime_t time_completed) 485 + ktime_t time_completed, 486 + ktime_t time_started) 618 487 { 619 488 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; 489 + ktime_t completion_time = ktime_sub(time_completed, time_started); 620 490 ktime_t timer; 621 491 622 492 iostat->ops_completed++; 623 493 iostat->bytes_completed += completed; 624 494 iostat->bytes_not_delivered += requested - completed; 625 495 626 - timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer); 496 + timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed); 627 497 iostat->total_busy_time = 628 498 ktime_add(iostat->total_busy_time, timer); 629 499 iostat->aggregate_completion_time = 630 - ktime_add(iostat->aggregate_completion_time, time_completed); 500 + ktime_add(iostat->aggregate_completion_time, 501 + completion_time); 631 502 } 632 503 633 504 static void 634 - nfs4_ff_layout_stat_io_start_read(struct nfs4_ff_layout_mirror *mirror, 635 - __u64 requested) 505 + nfs4_ff_layout_stat_io_start_read(struct inode *inode, 506 + struct nfs4_ff_layout_mirror *mirror, 507 + __u64 requested, ktime_t now) 636 508 { 637 509 bool report; 638 510 639 511 spin_lock(&mirror->lock); 640 - report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat); 512 + report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now); 641 513 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested); 642 514 spin_unlock(&mirror->lock); 643 515 644 516 if (report) 645 - pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode); 517 + pnfs_report_layoutstat(inode, GFP_KERNEL); 646 518 } 647 519 648 520 static void ··· 658 522 spin_lock(&mirror->lock); 659 523 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat, 660 524 requested, completed, 661 - nfs4_ff_layout_calc_completion_time(task)); 525 + ktime_get(), task->tk_start); 662 526 spin_unlock(&mirror->lock); 663 527 } 664 528 665 529 static void 666 - nfs4_ff_layout_stat_io_start_write(struct nfs4_ff_layout_mirror *mirror, 667 - __u64 requested) 530 + nfs4_ff_layout_stat_io_start_write(struct inode *inode, 531 + struct nfs4_ff_layout_mirror *mirror, 532 + __u64 requested, ktime_t now) 668 533 { 669 534 bool report; 670 535 671 536 spin_lock(&mirror->lock); 672 - report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat); 537 + report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now); 673 538 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested); 674 539 spin_unlock(&mirror->lock); 675 540 676 541 if (report) 677 - pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode); 542 + pnfs_report_layoutstat(inode, GFP_NOIO); 678 543 } 679 544 680 545 static void ··· 690 553 691 554 spin_lock(&mirror->lock); 692 555 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat, 693 - requested, completed, 694 - nfs4_ff_layout_calc_completion_time(task)); 556 + requested, completed, ktime_get(), task->tk_start); 695 557 spin_unlock(&mirror->lock); 696 558 } 697 559 ··· 864 728 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg); 865 729 866 730 /* no lseg means that pnfs is not in use, so no mirroring here */ 867 - pnfs_put_lseg(pgio->pg_lseg); 868 - pgio->pg_lseg = NULL; 869 731 nfs_pageio_reset_write_mds(pgio); 870 732 return 1; 871 733 } ··· 1065 931 if (task->tk_status >= 0) 1066 932 return 0; 1067 933 1068 - if (task->tk_status != -EJUKEBOX) { 934 + switch (task->tk_status) { 935 + /* File access problems. Don't mark the device as unavailable */ 936 + case -EACCES: 937 + case -ESTALE: 938 + case -EISDIR: 939 + case -EBADHANDLE: 940 + case -ELOOP: 941 + case -ENOSPC: 942 + break; 943 + case -EJUKEBOX: 944 + nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 945 + goto out_retry; 946 + default: 1069 947 dprintk("%s DS connection error %d\n", __func__, 1070 948 task->tk_status); 1071 949 nfs4_mark_deviceid_unavailable(devid); 1072 - if (ff_layout_has_available_ds(lseg)) 1073 - return -NFS4ERR_RESET_TO_PNFS; 1074 - else 1075 - return -NFS4ERR_RESET_TO_MDS; 1076 950 } 1077 - 1078 - if (task->tk_status == -EJUKEBOX) 1079 - nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 951 + /* FIXME: Need to prevent infinite looping here. */ 952 + return -NFS4ERR_RESET_TO_PNFS; 953 + out_retry: 1080 954 task->tk_status = 0; 1081 955 rpc_restart_call(task); 1082 956 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); ··· 1114 972 1115 973 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, 1116 974 int idx, u64 offset, u64 length, 1117 - u32 status, int opnum) 975 + u32 status, int opnum, int error) 1118 976 { 1119 977 struct nfs4_ff_layout_mirror *mirror; 1120 978 int err; 979 + 980 + if (status == 0) { 981 + switch (error) { 982 + case -ETIMEDOUT: 983 + case -EPFNOSUPPORT: 984 + case -EPROTONOSUPPORT: 985 + case -EOPNOTSUPP: 986 + case -ECONNREFUSED: 987 + case -ECONNRESET: 988 + case -EHOSTDOWN: 989 + case -EHOSTUNREACH: 990 + case -ENETUNREACH: 991 + case -EADDRINUSE: 992 + case -ENOBUFS: 993 + case -EPIPE: 994 + case -EPERM: 995 + status = NFS4ERR_NXIO; 996 + break; 997 + case -EACCES: 998 + status = NFS4ERR_ACCESS; 999 + break; 1000 + default: 1001 + return; 1002 + } 1003 + } 1121 1004 1122 1005 mirror = FF_LAYOUT_COMP(lseg, idx); 1123 1006 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), 1124 1007 mirror, offset, length, status, opnum, 1125 1008 GFP_NOIO); 1009 + pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg); 1126 1010 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status); 1127 1011 } 1128 1012 ··· 1157 989 static int ff_layout_read_done_cb(struct rpc_task *task, 1158 990 struct nfs_pgio_header *hdr) 1159 991 { 1160 - struct inode *inode; 1161 992 int err; 1162 993 1163 994 trace_nfs4_pnfs_read(hdr, task->tk_status); 1164 - if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status) 1165 - hdr->res.op_status = NFS4ERR_NXIO; 1166 - if (task->tk_status < 0 && hdr->res.op_status) 995 + if (task->tk_status < 0) 1167 996 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, 1168 997 hdr->args.offset, hdr->args.count, 1169 - hdr->res.op_status, OP_READ); 998 + hdr->res.op_status, OP_READ, 999 + task->tk_status); 1170 1000 err = ff_layout_async_handle_error(task, hdr->args.context->state, 1171 1001 hdr->ds_clp, hdr->lseg, 1172 1002 hdr->pgio_mirror_idx); ··· 1176 1010 pnfs_read_resend_pnfs(hdr); 1177 1011 return task->tk_status; 1178 1012 case -NFS4ERR_RESET_TO_MDS: 1179 - inode = hdr->lseg->pls_layout->plh_inode; 1180 - pnfs_error_mark_layout_for_return(inode, hdr->lseg); 1181 1013 ff_layout_reset_read(hdr); 1182 1014 return task->tk_status; 1183 1015 case -EAGAIN: ··· 1225 1061 static int ff_layout_read_prepare_common(struct rpc_task *task, 1226 1062 struct nfs_pgio_header *hdr) 1227 1063 { 1228 - nfs4_ff_layout_stat_io_start_read( 1064 + nfs4_ff_layout_stat_io_start_read(hdr->inode, 1229 1065 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1230 - hdr->args.count); 1066 + hdr->args.count, 1067 + task->tk_start); 1231 1068 1232 1069 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { 1233 1070 rpc_exit(task, -EIO); ··· 1328 1163 static int ff_layout_write_done_cb(struct rpc_task *task, 1329 1164 struct nfs_pgio_header *hdr) 1330 1165 { 1331 - struct inode *inode; 1332 1166 int err; 1333 1167 1334 1168 trace_nfs4_pnfs_write(hdr, task->tk_status); 1335 - if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status) 1336 - hdr->res.op_status = NFS4ERR_NXIO; 1337 - if (task->tk_status < 0 && hdr->res.op_status) 1169 + if (task->tk_status < 0) 1338 1170 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, 1339 1171 hdr->args.offset, hdr->args.count, 1340 - hdr->res.op_status, OP_WRITE); 1172 + hdr->res.op_status, OP_WRITE, 1173 + task->tk_status); 1341 1174 err = ff_layout_async_handle_error(task, hdr->args.context->state, 1342 1175 hdr->ds_clp, hdr->lseg, 1343 1176 hdr->pgio_mirror_idx); 1344 1177 1345 1178 switch (err) { 1346 1179 case -NFS4ERR_RESET_TO_PNFS: 1180 + pnfs_set_retry_layoutget(hdr->lseg->pls_layout); 1181 + ff_layout_reset_write(hdr, true); 1182 + return task->tk_status; 1347 1183 case -NFS4ERR_RESET_TO_MDS: 1348 - inode = hdr->lseg->pls_layout->plh_inode; 1349 - pnfs_error_mark_layout_for_return(inode, hdr->lseg); 1350 - if (err == -NFS4ERR_RESET_TO_PNFS) { 1351 - pnfs_set_retry_layoutget(hdr->lseg->pls_layout); 1352 - ff_layout_reset_write(hdr, true); 1353 - } else { 1354 - pnfs_clear_retry_layoutget(hdr->lseg->pls_layout); 1355 - ff_layout_reset_write(hdr, false); 1356 - } 1184 + pnfs_clear_retry_layoutget(hdr->lseg->pls_layout); 1185 + ff_layout_reset_write(hdr, false); 1357 1186 return task->tk_status; 1358 1187 case -EAGAIN: 1359 1188 rpc_restart_call_prepare(task); ··· 1358 1199 hdr->res.verf->committed == NFS_DATA_SYNC) 1359 1200 ff_layout_set_layoutcommit(hdr); 1360 1201 1202 + /* zero out fattr since we don't care DS attr at all */ 1203 + hdr->fattr.valid = 0; 1204 + if (task->tk_status >= 0) 1205 + nfs_writeback_update_inode(hdr); 1206 + 1361 1207 return 0; 1362 1208 } 1363 1209 1364 1210 static int ff_layout_commit_done_cb(struct rpc_task *task, 1365 1211 struct nfs_commit_data *data) 1366 1212 { 1367 - struct inode *inode; 1368 1213 int err; 1369 1214 1370 1215 trace_nfs4_pnfs_commit_ds(data, task->tk_status); 1371 - if (task->tk_status == -ETIMEDOUT && !data->res.op_status) 1372 - data->res.op_status = NFS4ERR_NXIO; 1373 - if (task->tk_status < 0 && data->res.op_status) 1216 + if (task->tk_status < 0) 1374 1217 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index, 1375 1218 data->args.offset, data->args.count, 1376 - data->res.op_status, OP_COMMIT); 1219 + data->res.op_status, OP_COMMIT, 1220 + task->tk_status); 1377 1221 err = ff_layout_async_handle_error(task, NULL, data->ds_clp, 1378 1222 data->lseg, data->ds_commit_index); 1379 1223 1380 1224 switch (err) { 1381 1225 case -NFS4ERR_RESET_TO_PNFS: 1226 + pnfs_set_retry_layoutget(data->lseg->pls_layout); 1227 + pnfs_generic_prepare_to_resend_writes(data); 1228 + return -EAGAIN; 1382 1229 case -NFS4ERR_RESET_TO_MDS: 1383 - inode = data->lseg->pls_layout->plh_inode; 1384 - pnfs_error_mark_layout_for_return(inode, data->lseg); 1385 - if (err == -NFS4ERR_RESET_TO_PNFS) 1386 - pnfs_set_retry_layoutget(data->lseg->pls_layout); 1387 - else 1388 - pnfs_clear_retry_layoutget(data->lseg->pls_layout); 1230 + pnfs_clear_retry_layoutget(data->lseg->pls_layout); 1389 1231 pnfs_generic_prepare_to_resend_writes(data); 1390 1232 return -EAGAIN; 1391 1233 case -EAGAIN: ··· 1404 1244 static int ff_layout_write_prepare_common(struct rpc_task *task, 1405 1245 struct nfs_pgio_header *hdr) 1406 1246 { 1407 - nfs4_ff_layout_stat_io_start_write( 1247 + nfs4_ff_layout_stat_io_start_write(hdr->inode, 1408 1248 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1409 - hdr->args.count); 1249 + hdr->args.count, 1250 + task->tk_start); 1410 1251 1411 1252 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { 1412 1253 rpc_exit(task, -EIO); ··· 1486 1325 static void ff_layout_commit_prepare_common(struct rpc_task *task, 1487 1326 struct nfs_commit_data *cdata) 1488 1327 { 1489 - nfs4_ff_layout_stat_io_start_write( 1328 + nfs4_ff_layout_stat_io_start_write(cdata->inode, 1490 1329 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), 1491 - 0); 1330 + 0, task->tk_start); 1492 1331 } 1493 1332 1494 1333 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) ··· 2003 1842 *start = cpu_to_be32((xdr->p - start - 1) * 4); 2004 1843 } 2005 1844 2006 - static bool 1845 + static int 2007 1846 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args, 2008 - struct pnfs_layout_segment *pls, 2009 - int *dev_count, int dev_limit) 1847 + struct pnfs_layout_hdr *lo, 1848 + int dev_limit) 2010 1849 { 1850 + struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); 2011 1851 struct nfs4_ff_layout_mirror *mirror; 2012 1852 struct nfs4_deviceid_node *dev; 2013 1853 struct nfs42_layoutstat_devinfo *devinfo; 2014 - int i; 1854 + int i = 0; 2015 1855 2016 - for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) { 2017 - if (*dev_count >= dev_limit) 1856 + list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) { 1857 + if (i >= dev_limit) 2018 1858 break; 2019 - mirror = FF_LAYOUT_COMP(pls, i); 2020 - if (!mirror || !mirror->mirror_ds) 1859 + if (!mirror->mirror_ds) 2021 1860 continue; 2022 - dev = FF_LAYOUT_DEVID_NODE(pls, i); 2023 - devinfo = &args->devinfo[*dev_count]; 1861 + /* mirror refcount put in cleanup_layoutstats */ 1862 + if (!atomic_inc_not_zero(&mirror->ref)) 1863 + continue; 1864 + dev = &mirror->mirror_ds->id_node; 1865 + devinfo = &args->devinfo[i]; 2024 1866 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE); 2025 - devinfo->offset = pls->pls_range.offset; 2026 - devinfo->length = pls->pls_range.length; 2027 - /* well, we don't really know if IO is continuous or not! */ 2028 - devinfo->read_count = mirror->read_stat.io_stat.bytes_completed; 1867 + devinfo->offset = 0; 1868 + devinfo->length = NFS4_MAX_UINT64; 1869 + devinfo->read_count = mirror->read_stat.io_stat.ops_completed; 2029 1870 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed; 2030 - devinfo->write_count = mirror->write_stat.io_stat.bytes_completed; 1871 + devinfo->write_count = mirror->write_stat.io_stat.ops_completed; 2031 1872 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed; 2032 1873 devinfo->layout_type = LAYOUT_FLEX_FILES; 2033 1874 devinfo->layoutstats_encode = ff_layout_encode_layoutstats; 2034 1875 devinfo->layout_private = mirror; 2035 - /* lseg refcount put in cleanup_layoutstats */ 2036 - pnfs_get_lseg(pls); 2037 1876 2038 - ++(*dev_count); 1877 + i++; 2039 1878 } 2040 - 2041 - return *dev_count < dev_limit; 1879 + return i; 2042 1880 } 2043 1881 2044 1882 static int 2045 1883 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) 2046 1884 { 2047 - struct pnfs_layout_segment *pls; 1885 + struct nfs4_flexfile_layout *ff_layout; 1886 + struct nfs4_ff_layout_mirror *mirror; 2048 1887 int dev_count = 0; 2049 1888 2050 1889 spin_lock(&args->inode->i_lock); 2051 - list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) { 2052 - dev_count += FF_LAYOUT_MIRROR_COUNT(pls); 1890 + ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout); 1891 + list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) { 1892 + if (atomic_read(&mirror->ref) != 0) 1893 + dev_count ++; 2053 1894 } 2054 1895 spin_unlock(&args->inode->i_lock); 2055 1896 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */ ··· 2060 1897 __func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV); 2061 1898 dev_count = PNFS_LAYOUTSTATS_MAXDEV; 2062 1899 } 2063 - args->devinfo = kmalloc(dev_count * sizeof(*args->devinfo), GFP_KERNEL); 1900 + args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO); 2064 1901 if (!args->devinfo) 2065 1902 return -ENOMEM; 2066 1903 2067 - dev_count = 0; 2068 1904 spin_lock(&args->inode->i_lock); 2069 - list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) { 2070 - if (!ff_layout_mirror_prepare_stats(args, pls, &dev_count, 2071 - PNFS_LAYOUTSTATS_MAXDEV)) { 2072 - break; 2073 - } 2074 - } 1905 + args->num_dev = ff_layout_mirror_prepare_stats(args, 1906 + &ff_layout->generic_hdr, dev_count); 2075 1907 spin_unlock(&args->inode->i_lock); 2076 - args->num_dev = dev_count; 2077 1908 2078 1909 return 0; 2079 1910 } ··· 2081 1924 for (i = 0; i < data->args.num_dev; i++) { 2082 1925 mirror = data->args.devinfo[i].layout_private; 2083 1926 data->args.devinfo[i].layout_private = NULL; 2084 - pnfs_put_lseg(mirror->lseg); 1927 + ff_layout_put_mirror(mirror); 2085 1928 } 2086 1929 } 2087 1930 ··· 2093 1936 .free_layout_hdr = ff_layout_free_layout_hdr, 2094 1937 .alloc_lseg = ff_layout_alloc_lseg, 2095 1938 .free_lseg = ff_layout_free_lseg, 1939 + .add_lseg = ff_layout_add_lseg, 2096 1940 .pg_read_ops = &ff_layout_pg_read_ops, 2097 1941 .pg_write_ops = &ff_layout_pg_write_ops, 2098 1942 .get_ds_info = ff_layout_get_ds_info,
+4 -1
fs/nfs/flexfilelayout/flexfilelayout.h
··· 67 67 }; 68 68 69 69 struct nfs4_ff_layout_mirror { 70 - struct pnfs_layout_segment *lseg; /* back pointer */ 70 + struct pnfs_layout_hdr *layout; 71 + struct list_head mirrors; 71 72 u32 ds_count; 72 73 u32 efficiency; 73 74 struct nfs4_ff_layout_ds *mirror_ds; ··· 78 77 u32 uid; 79 78 u32 gid; 80 79 struct rpc_cred *cred; 80 + atomic_t ref; 81 81 spinlock_t lock; 82 82 struct nfs4_ff_layoutstat read_stat; 83 83 struct nfs4_ff_layoutstat write_stat; ··· 97 95 struct nfs4_flexfile_layout { 98 96 struct pnfs_layout_hdr generic_hdr; 99 97 struct pnfs_ds_commit_info commit_info; 98 + struct list_head mirrors; 100 99 struct list_head error_list; /* nfs4_ff_layout_ds_err */ 101 100 }; 102 101
+63 -19
fs/nfs/flexfilelayout/flexfilelayoutdev.c
··· 172 172 return NULL; 173 173 } 174 174 175 + static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg, 176 + struct nfs4_deviceid_node *devid) 177 + { 178 + nfs4_mark_deviceid_unavailable(devid); 179 + if (!ff_layout_has_available_ds(lseg)) 180 + pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, 181 + lseg); 182 + } 183 + 184 + static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg, 185 + struct nfs4_ff_layout_mirror *mirror) 186 + { 187 + if (mirror == NULL || mirror->mirror_ds == NULL) { 188 + pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, 189 + lseg); 190 + return false; 191 + } 192 + if (mirror->mirror_ds->ds == NULL) { 193 + struct nfs4_deviceid_node *devid; 194 + devid = &mirror->mirror_ds->id_node; 195 + ff_layout_mark_devid_invalid(lseg, devid); 196 + return false; 197 + } 198 + return true; 199 + } 200 + 175 201 static u64 176 202 end_offset(u64 start, u64 len) 177 203 { ··· 362 336 { 363 337 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx); 364 338 struct nfs_fh *fh = NULL; 365 - struct nfs4_deviceid_node *devid; 366 339 367 - if (mirror == NULL || mirror->mirror_ds == NULL || 368 - mirror->mirror_ds->ds == NULL) { 369 - printk(KERN_ERR "NFS: %s: No data server for mirror offset index %d\n", 340 + if (!ff_layout_mirror_valid(lseg, mirror)) { 341 + pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n", 370 342 __func__, mirror_idx); 371 - if (mirror && mirror->mirror_ds) { 372 - devid = &mirror->mirror_ds->id_node; 373 - pnfs_generic_mark_devid_invalid(devid); 374 - } 375 343 goto out; 376 344 } 377 345 ··· 388 368 unsigned int max_payload; 389 369 rpc_authflavor_t flavor; 390 370 391 - if (mirror == NULL || mirror->mirror_ds == NULL || 392 - mirror->mirror_ds->ds == NULL) { 393 - printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", 371 + if (!ff_layout_mirror_valid(lseg, mirror)) { 372 + pr_err_ratelimited("NFS: %s: No data server for offset index %d\n", 394 373 __func__, ds_idx); 395 - if (mirror && mirror->mirror_ds) { 396 - devid = &mirror->mirror_ds->id_node; 397 - pnfs_generic_mark_devid_invalid(devid); 398 - } 399 374 goto out; 400 375 } 401 376 ··· 515 500 range->offset, range->length)) 516 501 continue; 517 502 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE) 518 - * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4) 503 + * + array length + deviceid(NFS4_DEVICEID4_SIZE) 504 + * + status(4) + opnum(4) 519 505 */ 520 506 p = xdr_reserve_space(xdr, 521 - 24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); 507 + 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); 522 508 if (unlikely(!p)) 523 509 return -ENOBUFS; 524 510 p = xdr_encode_hyper(p, err->offset); 525 511 p = xdr_encode_hyper(p, err->length); 526 512 p = xdr_encode_opaque_fixed(p, &err->stateid, 527 513 NFS4_STATEID_SIZE); 514 + /* Encode 1 error */ 515 + *p++ = cpu_to_be32(1); 528 516 p = xdr_encode_opaque_fixed(p, &err->deviceid, 529 517 NFS4_DEVICEID4_SIZE); 530 518 *p++ = cpu_to_be32(err->status); ··· 543 525 return 0; 544 526 } 545 527 546 - bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg) 528 + static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg) 547 529 { 548 530 struct nfs4_ff_layout_mirror *mirror; 549 531 struct nfs4_deviceid_node *devid; 550 - int idx; 532 + u32 idx; 551 533 552 534 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { 553 535 mirror = FF_LAYOUT_COMP(lseg, idx); ··· 559 541 } 560 542 561 543 return false; 544 + } 545 + 546 + static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg) 547 + { 548 + struct nfs4_ff_layout_mirror *mirror; 549 + struct nfs4_deviceid_node *devid; 550 + u32 idx; 551 + 552 + for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { 553 + mirror = FF_LAYOUT_COMP(lseg, idx); 554 + if (!mirror || !mirror->mirror_ds) 555 + return false; 556 + devid = &mirror->mirror_ds->id_node; 557 + if (ff_layout_test_devid_unavailable(devid)) 558 + return false; 559 + } 560 + 561 + return FF_LAYOUT_MIRROR_COUNT(lseg) != 0; 562 + } 563 + 564 + bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg) 565 + { 566 + if (lseg->pls_range.iomode == IOMODE_READ) 567 + return ff_read_layout_has_available_ds(lseg); 568 + /* Note: RW layout needs all mirrors available */ 569 + return ff_rw_layout_has_available_ds(lseg); 562 570 } 563 571 564 572 module_param(dataserver_retrans, uint, 0644);
+34 -27
fs/nfs/inode.c
··· 504 504 { 505 505 struct inode *inode = d_inode(dentry); 506 506 struct nfs_fattr *fattr; 507 - int error = -ENOMEM; 507 + int error = 0; 508 508 509 509 nfs_inc_stats(inode, NFSIOS_VFSSETATTR); 510 510 ··· 513 513 attr->ia_valid &= ~ATTR_MODE; 514 514 515 515 if (attr->ia_valid & ATTR_SIZE) { 516 - loff_t i_size; 517 - 518 516 BUG_ON(!S_ISREG(inode->i_mode)); 519 517 520 - i_size = i_size_read(inode); 521 - if (attr->ia_size == i_size) 518 + error = inode_newsize_ok(inode, attr->ia_size); 519 + if (error) 520 + return error; 521 + 522 + if (attr->ia_size == i_size_read(inode)) 522 523 attr->ia_valid &= ~ATTR_SIZE; 523 - else if (attr->ia_size < i_size && IS_SWAPFILE(inode)) 524 - return -ETXTBSY; 525 524 } 526 525 527 526 /* Optimization: if the end result is no change, don't RPC */ ··· 535 536 nfs_sync_inode(inode); 536 537 537 538 fattr = nfs_alloc_fattr(); 538 - if (fattr == NULL) 539 + if (fattr == NULL) { 540 + error = -ENOMEM; 539 541 goto out; 542 + } 543 + 540 544 /* 541 545 * Return any delegations if we're going to change ACLs 542 546 */ ··· 761 759 * @ctx: pointer to context 762 760 * @is_sync: is this a synchronous close 763 761 * 764 - * always ensure that the attributes are up to date if we're mounted 765 - * with close-to-open semantics 762 + * Ensure that the attributes are up to date if we're mounted 763 + * with close-to-open semantics and we have cached data that will 764 + * need to be revalidated on open. 766 765 */ 767 766 void nfs_close_context(struct nfs_open_context *ctx, int is_sync) 768 767 { 768 + struct nfs_inode *nfsi; 769 769 struct inode *inode; 770 770 struct nfs_server *server; 771 771 ··· 776 772 if (!is_sync) 777 773 return; 778 774 inode = d_inode(ctx->dentry); 779 - if (!list_empty(&NFS_I(inode)->open_files)) 775 + nfsi = NFS_I(inode); 776 + if (inode->i_mapping->nrpages == 0) 777 + return; 778 + if (nfsi->cache_validity & NFS_INO_INVALID_DATA) 779 + return; 780 + if (!list_empty(&nfsi->open_files)) 780 781 return; 781 782 server = NFS_SERVER(inode); 782 783 if (server->flags & NFS_MOUNT_NOCTO) ··· 853 844 } 854 845 EXPORT_SYMBOL_GPL(put_nfs_open_context); 855 846 847 + static void put_nfs_open_context_sync(struct nfs_open_context *ctx) 848 + { 849 + __put_nfs_open_context(ctx, 1); 850 + } 851 + 856 852 /* 857 853 * Ensure that mmap has a recent RPC credential for use when writing out 858 854 * shared pages ··· 902 888 return ctx; 903 889 } 904 890 905 - static void nfs_file_clear_open_context(struct file *filp) 891 + void nfs_file_clear_open_context(struct file *filp) 906 892 { 907 893 struct nfs_open_context *ctx = nfs_file_open_context(filp); 908 894 ··· 913 899 spin_lock(&inode->i_lock); 914 900 list_move_tail(&ctx->list, &NFS_I(inode)->open_files); 915 901 spin_unlock(&inode->i_lock); 916 - __put_nfs_open_context(ctx, filp->f_flags & O_DIRECT ? 0 : 1); 902 + put_nfs_open_context_sync(ctx); 917 903 } 918 904 } 919 905 ··· 930 916 nfs_file_set_open_context(filp, ctx); 931 917 put_nfs_open_context(ctx); 932 918 nfs_fscache_open_file(inode, filp); 933 - return 0; 934 - } 935 - 936 - int nfs_release(struct inode *inode, struct file *filp) 937 - { 938 - nfs_file_clear_open_context(filp); 939 919 return 0; 940 920 } 941 921 ··· 1281 1273 return 0; 1282 1274 } 1283 1275 1284 - static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr) 1285 - { 1286 - if (!(fattr->valid & NFS_ATTR_FATTR_CTIME)) 1287 - return 0; 1288 - return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0; 1289 - } 1290 - 1291 1276 static atomic_long_t nfs_attr_generation_counter; 1292 1277 1293 1278 static unsigned long nfs_read_attr_generation_counter(void) ··· 1429 1428 const struct nfs_inode *nfsi = NFS_I(inode); 1430 1429 1431 1430 return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || 1432 - nfs_ctime_need_update(inode, fattr) || 1433 1431 ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); 1434 1432 } 1435 1433 ··· 1490 1490 static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr) 1491 1491 { 1492 1492 unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; 1493 + 1494 + /* 1495 + * Don't revalidate the pagecache if we hold a delegation, but do 1496 + * force an attribute update 1497 + */ 1498 + if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 1499 + invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED; 1493 1500 1494 1501 if (S_ISDIR(inode->i_mode)) 1495 1502 invalid |= NFS_INO_INVALID_DATA;
+10 -10
fs/nfs/internal.h
··· 219 219 } 220 220 #endif 221 221 222 - #ifdef CONFIG_NFS_V4_1 223 - int nfs_sockaddr_match_ipaddr(const struct sockaddr *, const struct sockaddr *); 224 - #endif 225 - 226 222 /* callback_xdr.c */ 227 223 extern struct svc_version nfs4_callback_version1; 228 224 extern struct svc_version nfs4_callback_version4; ··· 360 364 /* file.c */ 361 365 int nfs_file_fsync_commit(struct file *, loff_t, loff_t, int); 362 366 loff_t nfs_file_llseek(struct file *, loff_t, int); 363 - int nfs_file_flush(struct file *, fl_owner_t); 364 367 ssize_t nfs_file_read(struct kiocb *, struct iov_iter *); 365 368 ssize_t nfs_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, 366 369 size_t, unsigned int); ··· 485 490 void nfs_commitdata_release(struct nfs_commit_data *data); 486 491 void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, 487 492 struct nfs_commit_info *cinfo); 493 + void nfs_request_add_commit_list_locked(struct nfs_page *req, 494 + struct list_head *dst, 495 + struct nfs_commit_info *cinfo); 488 496 void nfs_request_remove_commit_list(struct nfs_page *req, 489 497 struct nfs_commit_info *cinfo); 490 498 void nfs_init_cinfo(struct nfs_commit_info *cinfo, ··· 621 623 * Record the page as unstable and mark its inode as dirty. 622 624 */ 623 625 static inline 624 - void nfs_mark_page_unstable(struct page *page) 626 + void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo) 625 627 { 626 - struct inode *inode = page_file_mapping(page)->host; 628 + if (!cinfo->dreq) { 629 + struct inode *inode = page_file_mapping(page)->host; 627 630 628 - inc_zone_page_state(page, NR_UNSTABLE_NFS); 629 - inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE); 630 - __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 631 + inc_zone_page_state(page, NR_UNSTABLE_NFS); 632 + inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE); 633 + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 634 + } 631 635 } 632 636 633 637 /*
+1
fs/nfs/nfs3xdr.c
··· 1103 1103 { 1104 1104 encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen); 1105 1105 encode_symlinkdata3(xdr, args); 1106 + xdr->buf->flags |= XDRBUF_WRITE; 1106 1107 } 1107 1108 1108 1109 /*
-2
fs/nfs/nfs42.h
··· 17 17 loff_t nfs42_proc_llseek(struct file *, loff_t, int); 18 18 int nfs42_proc_layoutstats_generic(struct nfs_server *, 19 19 struct nfs42_layoutstat_data *); 20 - /* nfs4.2xdr.h */ 21 - extern struct rpc_procinfo nfs4_2_procedures[]; 22 20 23 21 #endif /* __LINUX_FS_NFS_NFS4_2_H */
+2 -3
fs/nfs/nfs42xdr.c
··· 238 238 return -EIO; 239 239 } 240 240 241 - static int decode_layoutstats(struct xdr_stream *xdr, 242 - struct nfs42_layoutstat_res *res) 241 + static int decode_layoutstats(struct xdr_stream *xdr) 243 242 { 244 243 return decode_op_hdr(xdr, OP_LAYOUTSTATS); 245 244 } ··· 342 343 goto out; 343 344 WARN_ON(res->num_dev > PNFS_LAYOUTSTATS_MAXDEV); 344 345 for (i = 0; i < res->num_dev; i++) { 345 - status = decode_layoutstats(xdr, res); 346 + status = decode_layoutstats(xdr); 346 347 if (status) 347 348 goto out; 348 349 }
+1 -3
fs/nfs/nfs4_fs.h
··· 405 405 int nfs41_discover_server_trunking(struct nfs_client *clp, 406 406 struct nfs_client **, struct rpc_cred *); 407 407 extern void nfs4_schedule_session_recovery(struct nfs4_session *, int); 408 - extern void nfs41_server_notify_target_slotid_update(struct nfs_client *clp); 409 - extern void nfs41_server_notify_highest_slotid_update(struct nfs_client *clp); 410 - 408 + extern void nfs41_notify_server(struct nfs_client *); 411 409 #else 412 410 static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err) 413 411 {
+1 -4
fs/nfs/nfs4client.c
··· 729 729 return false; 730 730 731 731 /* Match only the IP address, not the port number */ 732 - if (!nfs_sockaddr_match_ipaddr(addr, clap)) 733 - return false; 734 - 735 - return true; 732 + return rpc_cmp_addr(addr, clap); 736 733 } 737 734 738 735 /*
+29 -3
fs/nfs/nfs4file.c
··· 6 6 #include <linux/fs.h> 7 7 #include <linux/falloc.h> 8 8 #include <linux/nfs_fs.h> 9 + #include "delegation.h" 9 10 #include "internal.h" 11 + #include "iostat.h" 10 12 #include "fscache.h" 11 13 #include "pnfs.h" 12 14 ··· 29 27 struct inode *dir; 30 28 unsigned openflags = filp->f_flags; 31 29 struct iattr attr; 32 - int opened = 0; 33 30 int err; 34 31 35 32 /* ··· 67 66 nfs_sync_inode(inode); 68 67 } 69 68 70 - inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, &opened); 69 + inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL); 71 70 if (IS_ERR(inode)) { 72 71 err = PTR_ERR(inode); 73 72 switch (err) { ··· 99 98 d_drop(dentry); 100 99 err = -EOPENSTALE; 101 100 goto out_put_ctx; 101 + } 102 + 103 + /* 104 + * Flush all dirty pages, and check for write errors. 105 + */ 106 + static int 107 + nfs4_file_flush(struct file *file, fl_owner_t id) 108 + { 109 + struct inode *inode = file_inode(file); 110 + 111 + dprintk("NFS: flush(%pD2)\n", file); 112 + 113 + nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 114 + if ((file->f_mode & FMODE_WRITE) == 0) 115 + return 0; 116 + 117 + /* 118 + * If we're holding a write delegation, then check if we're required 119 + * to flush the i/o on close. If not, then just start the i/o now. 120 + */ 121 + if (!nfs4_delegation_flush_on_close(inode)) 122 + return filemap_fdatawrite(file->f_mapping); 123 + 124 + /* Flush writes to the server and return any errors */ 125 + return vfs_fsync(file, 0); 102 126 } 103 127 104 128 static int ··· 204 178 .write_iter = nfs_file_write, 205 179 .mmap = nfs_file_mmap, 206 180 .open = nfs4_file_open, 207 - .flush = nfs_file_flush, 181 + .flush = nfs4_file_flush, 208 182 .release = nfs_file_release, 209 183 .fsync = nfs4_file_fsync, 210 184 .lock = nfs_lock,
+2 -12
fs/nfs/nfs4idmap.c
··· 184 184 .read = user_read, 185 185 }; 186 186 187 - static int nfs_idmap_init_keyring(void) 187 + int nfs_idmap_init(void) 188 188 { 189 189 struct cred *cred; 190 190 struct key *keyring; ··· 230 230 return ret; 231 231 } 232 232 233 - static void nfs_idmap_quit_keyring(void) 233 + void nfs_idmap_quit(void) 234 234 { 235 235 key_revoke(id_resolver_cache->thread_keyring); 236 236 unregister_key_type(&key_type_id_resolver); ··· 490 490 &idmap->idmap_pdo); 491 491 rpc_destroy_pipe_data(idmap->idmap_pipe); 492 492 kfree(idmap); 493 - } 494 - 495 - int nfs_idmap_init(void) 496 - { 497 - return nfs_idmap_init_keyring(); 498 - } 499 - 500 - void nfs_idmap_quit(void) 501 - { 502 - nfs_idmap_quit_keyring(); 503 493 } 504 494 505 495 static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
+84 -52
fs/nfs/nfs4proc.c
··· 586 586 spin_unlock(&tbl->slot_tbl_lock); 587 587 res->sr_slot = NULL; 588 588 if (send_new_highest_used_slotid) 589 - nfs41_server_notify_highest_slotid_update(session->clp); 589 + nfs41_notify_server(session->clp); 590 590 } 591 591 592 592 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) ··· 1150 1150 return ret; 1151 1151 } 1152 1152 1153 - static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) 1153 + static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1154 + enum open_claim_type4 claim) 1154 1155 { 1155 1156 if (delegation == NULL) 1156 1157 return 0; ··· 1159 1158 return 0; 1160 1159 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1161 1160 return 0; 1161 + switch (claim) { 1162 + case NFS4_OPEN_CLAIM_NULL: 1163 + case NFS4_OPEN_CLAIM_FH: 1164 + break; 1165 + case NFS4_OPEN_CLAIM_PREVIOUS: 1166 + if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1167 + break; 1168 + default: 1169 + return 0; 1170 + } 1162 1171 nfs_mark_delegation_referenced(delegation); 1163 1172 return 1; 1164 1173 } ··· 1231 1220 } 1232 1221 1233 1222 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1223 + nfs4_stateid *arg_stateid, 1234 1224 nfs4_stateid *stateid, fmode_t fmode) 1235 1225 { 1236 1226 clear_bit(NFS_O_RDWR_STATE, &state->flags); ··· 1250 1238 if (stateid == NULL) 1251 1239 return; 1252 1240 /* Handle races with OPEN */ 1253 - if (!nfs4_stateid_match_other(stateid, &state->open_stateid) || 1254 - !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1241 + if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1242 + (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1243 + !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { 1255 1244 nfs_resync_open_stateid_locked(state); 1256 1245 return; 1257 1246 } ··· 1261 1248 nfs4_stateid_copy(&state->open_stateid, stateid); 1262 1249 } 1263 1250 1264 - static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1251 + static void nfs_clear_open_stateid(struct nfs4_state *state, 1252 + nfs4_stateid *arg_stateid, 1253 + nfs4_stateid *stateid, fmode_t fmode) 1265 1254 { 1266 1255 write_seqlock(&state->seqlock); 1267 - nfs_clear_open_stateid_locked(state, stateid, fmode); 1256 + nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1268 1257 write_sequnlock(&state->seqlock); 1269 1258 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1270 1259 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); ··· 1391 1376 struct nfs_delegation *delegation; 1392 1377 int open_mode = opendata->o_arg.open_flags; 1393 1378 fmode_t fmode = opendata->o_arg.fmode; 1379 + enum open_claim_type4 claim = opendata->o_arg.claim; 1394 1380 nfs4_stateid stateid; 1395 1381 int ret = -EAGAIN; 1396 1382 ··· 1405 1389 spin_unlock(&state->owner->so_lock); 1406 1390 rcu_read_lock(); 1407 1391 delegation = rcu_dereference(nfsi->delegation); 1408 - if (!can_open_delegated(delegation, fmode)) { 1392 + if (!can_open_delegated(delegation, fmode, claim)) { 1409 1393 rcu_read_unlock(); 1410 1394 break; 1411 1395 } ··· 1868 1852 struct nfs4_opendata *data = calldata; 1869 1853 struct nfs4_state_owner *sp = data->owner; 1870 1854 struct nfs_client *clp = sp->so_server->nfs_client; 1855 + enum open_claim_type4 claim = data->o_arg.claim; 1871 1856 1872 1857 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1873 1858 goto out_wait; ··· 1883 1866 goto out_no_action; 1884 1867 rcu_read_lock(); 1885 1868 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1886 - if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && 1887 - data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH && 1888 - can_open_delegated(delegation, data->o_arg.fmode)) 1869 + if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 1889 1870 goto unlock_no_action; 1890 1871 rcu_read_unlock(); 1891 1872 } 1892 1873 /* Update client id. */ 1893 1874 data->o_arg.clientid = clp->cl_clientid; 1894 - switch (data->o_arg.claim) { 1875 + switch (claim) { 1876 + default: 1877 + break; 1895 1878 case NFS4_OPEN_CLAIM_PREVIOUS: 1896 1879 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1897 1880 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: ··· 2311 2294 * fields corresponding to attributes that were used to store the verifier. 2312 2295 * Make sure we clobber those fields in the later setattr call 2313 2296 */ 2314 - static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 2297 + static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 2298 + struct iattr *sattr, struct nfs4_label **label) 2315 2299 { 2316 - if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2300 + const u32 *attrset = opendata->o_res.attrset; 2301 + 2302 + if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2317 2303 !(sattr->ia_valid & ATTR_ATIME_SET)) 2318 2304 sattr->ia_valid |= ATTR_ATIME; 2319 2305 2320 - if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2306 + if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2321 2307 !(sattr->ia_valid & ATTR_MTIME_SET)) 2322 2308 sattr->ia_valid |= ATTR_MTIME; 2309 + 2310 + /* Except MODE, it seems harmless of setting twice. */ 2311 + if ((attrset[1] & FATTR4_WORD1_MODE)) 2312 + sattr->ia_valid &= ~ATTR_MODE; 2313 + 2314 + if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2315 + *label = NULL; 2323 2316 } 2324 2317 2325 2318 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, ··· 2452 2425 goto err_free_label; 2453 2426 state = ctx->state; 2454 2427 2455 - if ((opendata->o_arg.open_flags & O_EXCL) && 2428 + if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 2456 2429 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2457 - nfs4_exclusive_attrset(opendata, sattr); 2430 + nfs4_exclusive_attrset(opendata, sattr, &label); 2458 2431 2459 2432 nfs_fattr_init(opendata->o_res.f_attr); 2460 2433 status = nfs4_do_setattr(state->inode, cred, ··· 2466 2439 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2467 2440 } 2468 2441 } 2469 - if (opendata->file_created) 2442 + if (opened && opendata->file_created) 2470 2443 *opened |= FILE_CREATED; 2471 2444 2472 2445 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { ··· 2688 2661 switch (task->tk_status) { 2689 2662 case 0: 2690 2663 res_stateid = &calldata->res.stateid; 2691 - if (calldata->arg.fmode == 0 && calldata->roc) 2664 + if (calldata->roc) 2692 2665 pnfs_roc_set_barrier(state->inode, 2693 2666 calldata->roc_barrier); 2694 2667 renew_lease(server, calldata->timestamp); ··· 2711 2684 goto out_release; 2712 2685 } 2713 2686 } 2714 - nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode); 2687 + nfs_clear_open_stateid(state, &calldata->arg.stateid, 2688 + res_stateid, calldata->arg.fmode); 2715 2689 out_release: 2716 2690 nfs_release_seqid(calldata->arg.seqid); 2717 2691 nfs_refresh_inode(calldata->inode, calldata->res.fattr); ··· 2763 2735 goto out_no_action; 2764 2736 } 2765 2737 2766 - if (calldata->arg.fmode == 0) { 2738 + if (calldata->arg.fmode == 0) 2767 2739 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2768 - if (calldata->roc && 2769 - pnfs_roc_drain(inode, &calldata->roc_barrier, task)) { 2770 - nfs_release_seqid(calldata->arg.seqid); 2771 - goto out_wait; 2772 - } 2773 - } 2740 + if (calldata->roc) 2741 + pnfs_roc_get_barrier(inode, &calldata->roc_barrier); 2742 + 2774 2743 calldata->arg.share_access = 2775 2744 nfs4_map_atomic_open_share(NFS_SERVER(inode), 2776 2745 calldata->arg.fmode, 0); ··· 2908 2883 2909 2884 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2910 2885 { 2886 + u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion; 2911 2887 struct nfs4_server_caps_arg args = { 2912 2888 .fhandle = fhandle, 2889 + .bitmask = bitmask, 2913 2890 }; 2914 2891 struct nfs4_server_caps_res res = {}; 2915 2892 struct rpc_message msg = { ··· 2921 2894 }; 2922 2895 int status; 2923 2896 2897 + bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 2898 + FATTR4_WORD0_FH_EXPIRE_TYPE | 2899 + FATTR4_WORD0_LINK_SUPPORT | 2900 + FATTR4_WORD0_SYMLINK_SUPPORT | 2901 + FATTR4_WORD0_ACLSUPPORT; 2902 + if (minorversion) 2903 + bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 2904 + 2924 2905 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2925 2906 if (status == 0) { 2926 2907 /* Sanity check the server answers */ 2927 - switch (server->nfs_client->cl_minorversion) { 2908 + switch (minorversion) { 2928 2909 case 0: 2929 2910 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 2930 2911 res.attr_bitmask[2] = 0; ··· 2985 2950 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2986 2951 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2987 2952 server->cache_consistency_bitmask[2] = 0; 2953 + memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 2954 + sizeof(server->exclcreat_bitmask)); 2988 2955 server->acl_bitmask = res.acl_bitmask; 2989 2956 server->fh_expire_type = res.fh_expire_type; 2990 2957 } ··· 3589 3552 struct nfs4_label l, *ilabel = NULL; 3590 3553 struct nfs_open_context *ctx; 3591 3554 struct nfs4_state *state; 3592 - int opened = 0; 3593 3555 int status = 0; 3594 3556 3595 3557 ctx = alloc_nfs_open_context(dentry, FMODE_READ); ··· 3598 3562 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3599 3563 3600 3564 sattr->ia_mode &= ~current_umask(); 3601 - state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened); 3565 + state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 3602 3566 if (IS_ERR(state)) { 3603 3567 status = PTR_ERR(state); 3604 3568 goto out; ··· 5014 4978 int result; 5015 4979 size_t len; 5016 4980 char *str; 5017 - bool retried = false; 5018 4981 5019 4982 if (clp->cl_owner_id != NULL) 5020 4983 return 0; 5021 - retry: 4984 + 5022 4985 rcu_read_lock(); 5023 - len = 10 + strlen(clp->cl_ipaddr) + 1 + 4986 + len = 14 + strlen(clp->cl_ipaddr) + 1 + 5024 4987 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 5025 4988 1 + 5026 4989 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) + ··· 5045 5010 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)); 5046 5011 rcu_read_unlock(); 5047 5012 5048 - /* Did something change? */ 5049 - if (result >= len) { 5050 - kfree(str); 5051 - if (retried) 5052 - return -EINVAL; 5053 - retried = true; 5054 - goto retry; 5055 - } 5056 5013 clp->cl_owner_id = str; 5057 5014 return 0; 5058 5015 } ··· 5076 5049 clp->rpc_ops->version, clp->cl_minorversion, 5077 5050 nfs4_client_id_uniquifier, 5078 5051 clp->cl_rpcclient->cl_nodename); 5079 - if (result >= len) { 5080 - kfree(str); 5081 - return -EINVAL; 5082 - } 5083 5052 clp->cl_owner_id = str; 5084 5053 return 0; 5085 5054 } ··· 5111 5088 result = scnprintf(str, len, "Linux NFSv%u.%u %s", 5112 5089 clp->rpc_ops->version, clp->cl_minorversion, 5113 5090 clp->cl_rpcclient->cl_nodename); 5114 - if (result >= len) { 5115 - kfree(str); 5116 - return -EINVAL; 5117 - } 5118 5091 clp->cl_owner_id = str; 5119 5092 return 0; 5120 5093 } ··· 5308 5289 5309 5290 d_data = (struct nfs4_delegreturndata *)data; 5310 5291 5311 - if (d_data->roc && 5312 - pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task)) 5313 - return; 5292 + if (d_data->roc) 5293 + pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); 5314 5294 5315 5295 nfs4_setup_sequence(d_data->res.server, 5316 5296 &d_data->args.seq_args, ··· 7764 7746 case 0: 7765 7747 goto out; 7766 7748 /* 7749 + * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 7750 + * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 7751 + */ 7752 + case -NFS4ERR_BADLAYOUT: 7753 + goto out_overflow; 7754 + /* 7767 7755 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 7768 - * (or clients) writing to the same RAID stripe 7756 + * (or clients) writing to the same RAID stripe except when 7757 + * the minlength argument is 0 (see RFC5661 section 18.43.3). 7769 7758 */ 7770 7759 case -NFS4ERR_LAYOUTTRYLATER: 7760 + if (lgp->args.minlength == 0) 7761 + goto out_overflow; 7771 7762 /* 7772 7763 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall 7773 7764 * existing layout before getting a new one). ··· 7832 7805 rpc_restart_call_prepare(task); 7833 7806 out: 7834 7807 dprintk("<-- %s\n", __func__); 7808 + return; 7809 + out_overflow: 7810 + task->tk_status = -EOVERFLOW; 7811 + goto out; 7835 7812 } 7836 7813 7837 7814 static size_t max_response_pages(struct nfs_server *server) ··· 8692 8661 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8693 8662 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8694 8663 .state_renewal_ops = &nfs41_state_renewal_ops, 8664 + .mig_recovery_ops = &nfs41_mig_recovery_ops, 8695 8665 }; 8696 8666 #endif 8697 8667
+1 -11
fs/nfs/nfs4state.c
··· 2152 2152 } 2153 2153 EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery); 2154 2154 2155 - static void nfs41_ping_server(struct nfs_client *clp) 2155 + void nfs41_notify_server(struct nfs_client *clp) 2156 2156 { 2157 2157 /* Use CHECK_LEASE to ping the server with a SEQUENCE */ 2158 2158 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 2159 2159 nfs4_schedule_state_manager(clp); 2160 - } 2161 - 2162 - void nfs41_server_notify_target_slotid_update(struct nfs_client *clp) 2163 - { 2164 - nfs41_ping_server(clp); 2165 - } 2166 - 2167 - void nfs41_server_notify_highest_slotid_update(struct nfs_client *clp) 2168 - { 2169 - nfs41_ping_server(clp); 2170 2160 } 2171 2161 2172 2162 static void nfs4_reset_all_state(struct nfs_client *clp)
+61
fs/nfs/nfs4trace.h
··· 884 884 DEFINE_NFS4_GETATTR_EVENT(nfs4_lookup_root); 885 885 DEFINE_NFS4_GETATTR_EVENT(nfs4_fsinfo); 886 886 887 + DECLARE_EVENT_CLASS(nfs4_inode_callback_event, 888 + TP_PROTO( 889 + const struct nfs_client *clp, 890 + const struct nfs_fh *fhandle, 891 + const struct inode *inode, 892 + int error 893 + ), 894 + 895 + TP_ARGS(clp, fhandle, inode, error), 896 + 897 + TP_STRUCT__entry( 898 + __field(int, error) 899 + __field(dev_t, dev) 900 + __field(u32, fhandle) 901 + __field(u64, fileid) 902 + __string(dstaddr, clp ? 903 + rpc_peeraddr2str(clp->cl_rpcclient, 904 + RPC_DISPLAY_ADDR) : "unknown") 905 + ), 906 + 907 + TP_fast_assign( 908 + __entry->error = error; 909 + __entry->fhandle = nfs_fhandle_hash(fhandle); 910 + if (inode != NULL) { 911 + __entry->fileid = NFS_FILEID(inode); 912 + __entry->dev = inode->i_sb->s_dev; 913 + } else { 914 + __entry->fileid = 0; 915 + __entry->dev = 0; 916 + } 917 + __assign_str(dstaddr, clp ? 918 + rpc_peeraddr2str(clp->cl_rpcclient, 919 + RPC_DISPLAY_ADDR) : "unknown") 920 + ), 921 + 922 + TP_printk( 923 + "error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " 924 + "dstaddr=%s", 925 + __entry->error, 926 + show_nfsv4_errors(__entry->error), 927 + MAJOR(__entry->dev), MINOR(__entry->dev), 928 + (unsigned long long)__entry->fileid, 929 + __entry->fhandle, 930 + __get_str(dstaddr) 931 + ) 932 + ); 933 + 934 + #define DEFINE_NFS4_INODE_CALLBACK_EVENT(name) \ 935 + DEFINE_EVENT(nfs4_inode_callback_event, name, \ 936 + TP_PROTO( \ 937 + const struct nfs_client *clp, \ 938 + const struct nfs_fh *fhandle, \ 939 + const struct inode *inode, \ 940 + int error \ 941 + ), \ 942 + TP_ARGS(clp, fhandle, inode, error)) 943 + DEFINE_NFS4_INODE_CALLBACK_EVENT(nfs4_cb_getattr); 944 + DEFINE_NFS4_INODE_CALLBACK_EVENT(nfs4_cb_layoutrecall_inode); 945 + 946 + 887 947 DECLARE_EVENT_CLASS(nfs4_idmap_event, 888 948 TP_PROTO( 889 949 const char *name, ··· 1196 1136 1197 1137 DEFINE_NFS4_INODE_EVENT(nfs4_layoutcommit); 1198 1138 DEFINE_NFS4_INODE_EVENT(nfs4_layoutreturn); 1139 + DEFINE_NFS4_INODE_EVENT(nfs4_layoutreturn_on_close); 1199 1140 1200 1141 #endif /* CONFIG_NFS_V4_1 */ 1201 1142
+54 -21
fs/nfs/nfs4xdr.c
··· 400 400 #define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3) 401 401 #define encode_layoutreturn_maxsz (8 + op_encode_hdr_maxsz + \ 402 402 encode_stateid_maxsz + \ 403 - 1 /* FIXME: opaque lrf_body always empty at the moment */) 403 + 1 + \ 404 + XDR_QUADLEN(NFS4_OPAQUE_LIMIT)) 404 405 #define decode_layoutreturn_maxsz (op_decode_hdr_maxsz + \ 405 406 1 + decode_stateid_maxsz) 406 407 #define encode_secinfo_no_name_maxsz (op_encode_hdr_maxsz + 1) ··· 1002 1001 1003 1002 static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, 1004 1003 const struct nfs4_label *label, 1005 - const struct nfs_server *server) 1004 + const struct nfs_server *server, 1005 + bool excl_check) 1006 1006 { 1007 1007 char owner_name[IDMAP_NAMESZ]; 1008 1008 char owner_group[IDMAP_NAMESZ]; ··· 1069 1067 bmval[1] |= FATTR4_WORD1_TIME_MODIFY_SET; 1070 1068 len += 4; 1071 1069 } 1070 + 1071 + if (excl_check) { 1072 + const u32 *excl_bmval = server->exclcreat_bitmask; 1073 + bmval[0] &= excl_bmval[0]; 1074 + bmval[1] &= excl_bmval[1]; 1075 + bmval[2] &= excl_bmval[2]; 1076 + 1077 + if (!(excl_bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) 1078 + label = NULL; 1079 + } 1080 + 1072 1081 if (label) { 1073 1082 len += 4 + 4 + 4 + (XDR_QUADLEN(label->len) << 2); 1074 1083 bmval[2] |= FATTR4_WORD2_SECURITY_LABEL; ··· 1167 1154 case NF4LNK: 1168 1155 p = reserve_space(xdr, 4); 1169 1156 *p = cpu_to_be32(create->u.symlink.len); 1170 - xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len); 1157 + xdr_write_pages(xdr, create->u.symlink.pages, 0, 1158 + create->u.symlink.len); 1159 + xdr->buf->flags |= XDRBUF_WRITE; 1171 1160 break; 1172 1161 1173 1162 case NF4BLK: case NF4CHR: ··· 1183 1168 } 1184 1169 1185 1170 encode_string(xdr, create->name->len, create->name->name); 1186 - encode_attrs(xdr, create->attrs, create->label, create->server); 1171 + encode_attrs(xdr, create->attrs, create->label, create->server, false); 1187 1172 } 1188 1173 1189 1174 static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr) ··· 1397 1382 1398 1383 static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) 1399 1384 { 1400 - struct iattr dummy; 1401 1385 __be32 *p; 1402 1386 1403 1387 p = reserve_space(xdr, 4); 1404 1388 switch(arg->createmode) { 1405 1389 case NFS4_CREATE_UNCHECKED: 1406 1390 *p = cpu_to_be32(NFS4_CREATE_UNCHECKED); 1407 - encode_attrs(xdr, arg->u.attrs, arg->label, arg->server); 1391 + encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false); 1408 1392 break; 1409 1393 case NFS4_CREATE_GUARDED: 1410 1394 *p = cpu_to_be32(NFS4_CREATE_GUARDED); 1411 - encode_attrs(xdr, arg->u.attrs, arg->label, arg->server); 1395 + encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false); 1412 1396 break; 1413 1397 case NFS4_CREATE_EXCLUSIVE: 1414 1398 *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE); ··· 1416 1402 case NFS4_CREATE_EXCLUSIVE4_1: 1417 1403 *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1); 1418 1404 encode_nfs4_verifier(xdr, &arg->u.verifier); 1419 - dummy.ia_valid = 0; 1420 - encode_attrs(xdr, &dummy, arg->label, arg->server); 1405 + encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, true); 1421 1406 } 1422 1407 } 1423 1408 ··· 1672 1659 { 1673 1660 encode_op_hdr(xdr, OP_SETATTR, decode_setattr_maxsz, hdr); 1674 1661 encode_nfs4_stateid(xdr, &arg->stateid); 1675 - encode_attrs(xdr, arg->iap, arg->label, server); 1662 + encode_attrs(xdr, arg->iap, arg->label, server, false); 1676 1663 } 1677 1664 1678 1665 static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr) ··· 2593 2580 struct xdr_stream *xdr, 2594 2581 struct nfs4_server_caps_arg *args) 2595 2582 { 2583 + const u32 *bitmask = args->bitmask; 2596 2584 struct compound_hdr hdr = { 2597 2585 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2598 2586 }; ··· 2601 2587 encode_compound_hdr(xdr, req, &hdr); 2602 2588 encode_sequence(xdr, &args->seq_args, &hdr); 2603 2589 encode_putfh(xdr, args->fhandle, &hdr); 2604 - encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS| 2605 - FATTR4_WORD0_FH_EXPIRE_TYPE| 2606 - FATTR4_WORD0_LINK_SUPPORT| 2607 - FATTR4_WORD0_SYMLINK_SUPPORT| 2608 - FATTR4_WORD0_ACLSUPPORT, &hdr); 2590 + encode_getattr_three(xdr, bitmask[0], bitmask[1], bitmask[2], &hdr); 2609 2591 encode_nops(&hdr); 2610 2592 } 2611 2593 ··· 3376 3366 out_overflow: 3377 3367 print_overflow_msg(__func__, xdr); 3378 3368 return -EIO; 3369 + } 3370 + 3371 + static int decode_attr_exclcreat_supported(struct xdr_stream *xdr, 3372 + uint32_t *bitmap, uint32_t *bitmask) 3373 + { 3374 + if (likely(bitmap[2] & FATTR4_WORD2_SUPPATTR_EXCLCREAT)) { 3375 + int ret; 3376 + ret = decode_attr_bitmap(xdr, bitmask); 3377 + if (unlikely(ret < 0)) 3378 + return ret; 3379 + bitmap[2] &= ~FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3380 + } else 3381 + bitmask[0] = bitmask[1] = bitmask[2] = 0; 3382 + dprintk("%s: bitmask=%08x:%08x:%08x\n", __func__, 3383 + bitmask[0], bitmask[1], bitmask[2]); 3384 + return 0; 3379 3385 } 3380 3386 3381 3387 static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fh *fh) ··· 4347 4321 goto xdr_error; 4348 4322 if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0) 4349 4323 goto xdr_error; 4324 + if ((status = decode_attr_exclcreat_supported(xdr, bitmap, 4325 + res->exclcreat_bitmask)) != 0) 4326 + goto xdr_error; 4350 4327 status = verify_attr_len(xdr, savep, attrlen); 4351 4328 xdr_error: 4352 4329 dprintk("%s: xdr returned %d!\n", __func__, -status); ··· 4932 4903 } 4933 4904 4934 4905 /* This is too sick! */ 4935 - static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize) 4906 + static int decode_space_limit(struct xdr_stream *xdr, 4907 + unsigned long *pagemod_limit) 4936 4908 { 4937 4909 __be32 *p; 4938 4910 uint32_t limit_type, nblocks, blocksize; 4911 + u64 maxsize = 0; 4939 4912 4940 4913 p = xdr_inline_decode(xdr, 12); 4941 4914 if (unlikely(!p)) 4942 4915 goto out_overflow; 4943 4916 limit_type = be32_to_cpup(p++); 4944 4917 switch (limit_type) { 4945 - case 1: 4946 - xdr_decode_hyper(p, maxsize); 4918 + case NFS4_LIMIT_SIZE: 4919 + xdr_decode_hyper(p, &maxsize); 4947 4920 break; 4948 - case 2: 4921 + case NFS4_LIMIT_BLOCKS: 4949 4922 nblocks = be32_to_cpup(p++); 4950 4923 blocksize = be32_to_cpup(p); 4951 - *maxsize = (uint64_t)nblocks * (uint64_t)blocksize; 4924 + maxsize = (uint64_t)nblocks * (uint64_t)blocksize; 4952 4925 } 4926 + maxsize >>= PAGE_CACHE_SHIFT; 4927 + *pagemod_limit = min_t(u64, maxsize, ULONG_MAX); 4953 4928 return 0; 4954 4929 out_overflow: 4955 4930 print_overflow_msg(__func__, xdr); ··· 4981 4948 break; 4982 4949 case NFS4_OPEN_DELEGATE_WRITE: 4983 4950 res->delegation_type = FMODE_WRITE|FMODE_READ; 4984 - if (decode_space_limit(xdr, &res->maxsize) < 0) 4951 + if (decode_space_limit(xdr, &res->pagemod_limit) < 0) 4985 4952 return -EIO; 4986 4953 } 4987 4954 return decode_ace(xdr, NULL, res->server->nfs_client);
+2 -2
fs/nfs/pagelist.c
··· 77 77 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) 78 78 { 79 79 spin_lock(&hdr->lock); 80 - if (pos < hdr->io_start + hdr->good_bytes) { 81 - set_bit(NFS_IOHDR_ERROR, &hdr->flags); 80 + if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags) 81 + || pos < hdr->io_start + hdr->good_bytes) { 82 82 clear_bit(NFS_IOHDR_EOF, &hdr->flags); 83 83 hdr->good_bytes = pos - hdr->io_start; 84 84 hdr->error = error;
+135 -92
fs/nfs/pnfs.c
··· 368 368 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) 369 369 return false; 370 370 lo->plh_return_iomode = 0; 371 - lo->plh_block_lgets++; 372 371 pnfs_get_layout_hdr(lo); 373 372 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); 374 373 return true; ··· 816 817 return !pnfs_seqid_is_newer(seqid, lo->plh_barrier); 817 818 } 818 819 819 - static bool 820 - pnfs_layout_returning(const struct pnfs_layout_hdr *lo, 821 - struct pnfs_layout_range *range) 822 - { 823 - return test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) && 824 - (lo->plh_return_iomode == IOMODE_ANY || 825 - lo->plh_return_iomode == range->iomode); 826 - } 827 - 828 820 /* lget is set to 1 if called from inside send_layoutget call chain */ 829 821 static bool 830 - pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, 831 - struct pnfs_layout_range *range, int lget) 822 + pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo) 832 823 { 833 824 return lo->plh_block_lgets || 834 - test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || 835 - (list_empty(&lo->plh_segs) && 836 - (atomic_read(&lo->plh_outstanding) > lget)) || 837 - pnfs_layout_returning(lo, range); 825 + test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); 838 826 } 839 827 840 828 int ··· 833 847 834 848 dprintk("--> %s\n", __func__); 835 849 spin_lock(&lo->plh_inode->i_lock); 836 - if (pnfs_layoutgets_blocked(lo, range, 1)) { 850 + if (pnfs_layoutgets_blocked(lo)) { 837 851 status = -EAGAIN; 838 852 } else if (!nfs4_valid_open_stateid(open_state)) { 839 853 status = -EBADF; ··· 868 882 struct nfs_server *server = NFS_SERVER(ino); 869 883 struct nfs4_layoutget *lgp; 870 884 struct pnfs_layout_segment *lseg; 885 + loff_t i_size; 871 886 872 887 dprintk("--> %s\n", __func__); 873 888 ··· 876 889 if (lgp == NULL) 877 890 return NULL; 878 891 892 + i_size = i_size_read(ino); 893 + 879 894 lgp->args.minlength = PAGE_CACHE_SIZE; 880 895 if (lgp->args.minlength > range->length) 881 896 lgp->args.minlength = range->length; 897 + if (range->iomode == IOMODE_READ) { 898 + if (range->offset >= i_size) 899 + lgp->args.minlength = 0; 900 + else if (i_size - range->offset < lgp->args.minlength) 901 + lgp->args.minlength = i_size - range->offset; 902 + } 882 903 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; 883 904 lgp->args.range = *range; 884 905 lgp->args.type = server->pnfs_curr_ld->id; ··· 951 956 if (unlikely(lrp == NULL)) { 952 957 status = -ENOMEM; 953 958 spin_lock(&ino->i_lock); 954 - lo->plh_block_lgets--; 955 959 pnfs_clear_layoutreturn_waitbit(lo); 956 - rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq); 957 960 spin_unlock(&ino->i_lock); 958 961 pnfs_put_layout_hdr(lo); 959 962 goto out; ··· 1073 1080 struct pnfs_layout_segment *lseg, *tmp; 1074 1081 nfs4_stateid stateid; 1075 1082 LIST_HEAD(tmp_list); 1076 - bool found = false, layoutreturn = false; 1083 + bool found = false, layoutreturn = false, roc = false; 1077 1084 1078 1085 spin_lock(&ino->i_lock); 1079 1086 lo = nfsi->layout; 1080 - if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) || 1081 - test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) 1087 + if (!lo || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) 1082 1088 goto out_noroc; 1083 1089 1084 - /* Don't return layout if we hold a delegation */ 1090 + /* no roc if we hold a delegation */ 1085 1091 if (nfs4_check_delegation(ino, FMODE_READ)) 1086 1092 goto out_noroc; 1087 1093 ··· 1091 1099 goto out_noroc; 1092 1100 } 1093 1101 1102 + stateid = lo->plh_stateid; 1103 + /* always send layoutreturn if being marked so */ 1104 + if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1105 + &lo->plh_flags)) 1106 + layoutreturn = pnfs_prepare_layoutreturn(lo); 1107 + 1094 1108 pnfs_clear_retry_layoutget(lo); 1095 1109 list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list) 1096 - if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 1110 + /* If we are sending layoutreturn, invalidate all valid lsegs */ 1111 + if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 1097 1112 mark_lseg_invalid(lseg, &tmp_list); 1098 1113 found = true; 1099 1114 } 1100 - if (!found) 1101 - goto out_noroc; 1102 - lo->plh_block_lgets++; 1103 - pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */ 1115 + /* pnfs_prepare_layoutreturn() grabs lo ref and it will be put 1116 + * in pnfs_roc_release(). We don't really send a layoutreturn but 1117 + * still want others to view us like we are sending one! 1118 + * 1119 + * If pnfs_prepare_layoutreturn() fails, it means someone else is doing 1120 + * LAYOUTRETURN, so we proceed like there are no layouts to return. 1121 + * 1122 + * ROC in three conditions: 1123 + * 1. there are ROC lsegs 1124 + * 2. we don't send layoutreturn 1125 + * 3. no others are sending layoutreturn 1126 + */ 1127 + if (found && !layoutreturn && pnfs_prepare_layoutreturn(lo)) 1128 + roc = true; 1129 + 1130 + out_noroc: 1104 1131 spin_unlock(&ino->i_lock); 1105 1132 pnfs_free_lseg_list(&tmp_list); 1106 1133 pnfs_layoutcommit_inode(ino, true); 1107 - return true; 1108 - 1109 - out_noroc: 1110 - if (lo) { 1111 - stateid = lo->plh_stateid; 1112 - if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1113 - &lo->plh_flags)) 1114 - layoutreturn = pnfs_prepare_layoutreturn(lo); 1115 - } 1116 - spin_unlock(&ino->i_lock); 1117 - if (layoutreturn) { 1118 - pnfs_layoutcommit_inode(ino, true); 1134 + if (layoutreturn) 1119 1135 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); 1120 - } 1121 - return false; 1136 + return roc; 1122 1137 } 1123 1138 1124 1139 void pnfs_roc_release(struct inode *ino) ··· 1134 1135 1135 1136 spin_lock(&ino->i_lock); 1136 1137 lo = NFS_I(ino)->layout; 1137 - lo->plh_block_lgets--; 1138 + pnfs_clear_layoutreturn_waitbit(lo); 1138 1139 if (atomic_dec_and_test(&lo->plh_refcount)) { 1139 1140 pnfs_detach_layout_hdr(lo); 1140 1141 spin_unlock(&ino->i_lock); ··· 1152 1153 if (pnfs_seqid_is_newer(barrier, lo->plh_barrier)) 1153 1154 lo->plh_barrier = barrier; 1154 1155 spin_unlock(&ino->i_lock); 1156 + trace_nfs4_layoutreturn_on_close(ino, 0); 1155 1157 } 1156 1158 1157 - bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) 1159 + void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier) 1158 1160 { 1159 1161 struct nfs_inode *nfsi = NFS_I(ino); 1160 1162 struct pnfs_layout_hdr *lo; 1161 - struct pnfs_layout_segment *lseg; 1162 - nfs4_stateid stateid; 1163 1163 u32 current_seqid; 1164 - bool layoutreturn = false; 1165 1164 1166 1165 spin_lock(&ino->i_lock); 1167 - list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) { 1168 - if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) 1169 - continue; 1170 - if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) 1171 - continue; 1172 - rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); 1173 - spin_unlock(&ino->i_lock); 1174 - return true; 1175 - } 1176 1166 lo = nfsi->layout; 1177 1167 current_seqid = be32_to_cpu(lo->plh_stateid.seqid); 1178 1168 ··· 1169 1181 * a barrier, we choose the worst-case barrier. 1170 1182 */ 1171 1183 *barrier = current_seqid + atomic_read(&lo->plh_outstanding); 1172 - stateid = lo->plh_stateid; 1173 - if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1174 - &lo->plh_flags)) 1175 - layoutreturn = pnfs_prepare_layoutreturn(lo); 1176 - if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) 1177 - rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); 1178 - 1179 1184 spin_unlock(&ino->i_lock); 1180 - if (layoutreturn) { 1181 - pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); 1182 - return true; 1183 - } 1184 - return false; 1185 1185 } 1186 1186 1187 1187 /* ··· 1197 1221 return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ); 1198 1222 } 1199 1223 1200 - static void 1201 - pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo, 1202 - struct pnfs_layout_segment *lseg) 1224 + static bool 1225 + pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1, 1226 + const struct pnfs_layout_range *l2) 1203 1227 { 1204 - struct pnfs_layout_segment *lp; 1228 + return pnfs_lseg_range_cmp(l1, l2) > 0; 1229 + } 1230 + 1231 + static bool 1232 + pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg, 1233 + struct pnfs_layout_segment *old) 1234 + { 1235 + return false; 1236 + } 1237 + 1238 + void 1239 + pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo, 1240 + struct pnfs_layout_segment *lseg, 1241 + bool (*is_after)(const struct pnfs_layout_range *, 1242 + const struct pnfs_layout_range *), 1243 + bool (*do_merge)(struct pnfs_layout_segment *, 1244 + struct pnfs_layout_segment *), 1245 + struct list_head *free_me) 1246 + { 1247 + struct pnfs_layout_segment *lp, *tmp; 1205 1248 1206 1249 dprintk("%s:Begin\n", __func__); 1207 1250 1208 - list_for_each_entry(lp, &lo->plh_segs, pls_list) { 1209 - if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0) 1251 + list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) { 1252 + if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0) 1253 + continue; 1254 + if (do_merge(lseg, lp)) { 1255 + mark_lseg_invalid(lp, free_me); 1256 + continue; 1257 + } 1258 + if (is_after(&lseg->pls_range, &lp->pls_range)) 1210 1259 continue; 1211 1260 list_add_tail(&lseg->pls_list, &lp->pls_list); 1212 1261 dprintk("%s: inserted lseg %p " ··· 1252 1251 pnfs_get_layout_hdr(lo); 1253 1252 1254 1253 dprintk("%s:Return\n", __func__); 1254 + } 1255 + EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg); 1256 + 1257 + static void 1258 + pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo, 1259 + struct pnfs_layout_segment *lseg, 1260 + struct list_head *free_me) 1261 + { 1262 + struct inode *inode = lo->plh_inode; 1263 + struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 1264 + 1265 + if (ld->add_lseg != NULL) 1266 + ld->add_lseg(lo, lseg, free_me); 1267 + else 1268 + pnfs_generic_layout_insert_lseg(lo, lseg, 1269 + pnfs_lseg_range_is_after, 1270 + pnfs_lseg_no_merge, 1271 + free_me); 1255 1272 } 1256 1273 1257 1274 static struct pnfs_layout_hdr * ··· 1363 1344 ret = pnfs_get_lseg(lseg); 1364 1345 break; 1365 1346 } 1366 - if (lseg->pls_range.offset > range->offset) 1367 - break; 1368 1347 } 1369 1348 1370 1349 dprintk("%s:Return lseg %p ref %d\n", ··· 1455 1438 1456 1439 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) 1457 1440 { 1441 + if (!pnfs_should_retry_layoutget(lo)) 1442 + return false; 1458 1443 /* 1459 1444 * send layoutcommit as it can hold up layoutreturn due to lseg 1460 1445 * reference ··· 1501 1482 bool first; 1502 1483 1503 1484 if (!pnfs_enabled_sb(NFS_SERVER(ino))) 1485 + goto out; 1486 + 1487 + if (iomode == IOMODE_READ && i_size_read(ino) == 0) 1504 1488 goto out; 1505 1489 1506 1490 if (pnfs_within_mdsthreshold(ctx, ino, iomode)) ··· 1555 1533 * Because we free lsegs before sending LAYOUTRETURN, we need to wait 1556 1534 * for LAYOUTRETURN even if first is true. 1557 1535 */ 1558 - if (!lseg && pnfs_should_retry_layoutget(lo) && 1559 - test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { 1536 + if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { 1560 1537 spin_unlock(&ino->i_lock); 1561 1538 dprintk("%s wait for layoutreturn\n", __func__); 1562 1539 if (pnfs_prepare_to_retry_layoutget(lo)) { ··· 1568 1547 goto out_put_layout_hdr; 1569 1548 } 1570 1549 1571 - if (pnfs_layoutgets_blocked(lo, &arg, 0)) 1550 + if (pnfs_layoutgets_blocked(lo)) 1572 1551 goto out_unlock; 1573 1552 atomic_inc(&lo->plh_outstanding); 1574 1553 spin_unlock(&ino->i_lock); ··· 1614 1593 } 1615 1594 EXPORT_SYMBOL_GPL(pnfs_update_layout); 1616 1595 1596 + static bool 1597 + pnfs_sanity_check_layout_range(struct pnfs_layout_range *range) 1598 + { 1599 + switch (range->iomode) { 1600 + case IOMODE_READ: 1601 + case IOMODE_RW: 1602 + break; 1603 + default: 1604 + return false; 1605 + } 1606 + if (range->offset == NFS4_MAX_UINT64) 1607 + return false; 1608 + if (range->length == 0) 1609 + return false; 1610 + if (range->length != NFS4_MAX_UINT64 && 1611 + range->length > NFS4_MAX_UINT64 - range->offset) 1612 + return false; 1613 + return true; 1614 + } 1615 + 1617 1616 struct pnfs_layout_segment * 1618 1617 pnfs_layout_process(struct nfs4_layoutget *lgp) 1619 1618 { ··· 1642 1601 struct pnfs_layout_segment *lseg; 1643 1602 struct inode *ino = lo->plh_inode; 1644 1603 LIST_HEAD(free_me); 1645 - int status = 0; 1604 + int status = -EINVAL; 1605 + 1606 + if (!pnfs_sanity_check_layout_range(&res->range)) 1607 + goto out; 1646 1608 1647 1609 /* Inject layout blob into I/O device driver */ 1648 1610 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); ··· 1663 1619 lseg->pls_range = res->range; 1664 1620 1665 1621 spin_lock(&ino->i_lock); 1666 - if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { 1667 - dprintk("%s forget reply due to recall\n", __func__); 1668 - goto out_forget_reply; 1669 - } 1670 - 1671 - if (pnfs_layoutgets_blocked(lo, &lgp->args.range, 1)) { 1622 + if (pnfs_layoutgets_blocked(lo)) { 1672 1623 dprintk("%s forget reply due to state\n", __func__); 1673 1624 goto out_forget_reply; 1674 1625 } ··· 1690 1651 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 1691 1652 1692 1653 pnfs_get_lseg(lseg); 1693 - pnfs_layout_insert_lseg(lo, lseg); 1654 + pnfs_layout_insert_lseg(lo, lseg, &free_me); 1694 1655 1695 - if (res->return_on_close) { 1656 + if (res->return_on_close) 1696 1657 set_bit(NFS_LSEG_ROC, &lseg->pls_flags); 1697 - set_bit(NFS_LAYOUT_ROC, &lo->plh_flags); 1698 - } 1699 1658 1700 1659 spin_unlock(&ino->i_lock); 1701 1660 pnfs_free_lseg_list(&free_me); ··· 1729 1692 lseg->pls_range.length); 1730 1693 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); 1731 1694 mark_lseg_invalid(lseg, tmp_list); 1695 + set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1696 + &lo->plh_flags); 1732 1697 } 1733 1698 } 1734 1699 ··· 2306 2267 2307 2268 #if IS_ENABLED(CONFIG_NFS_V4_2) 2308 2269 int 2309 - pnfs_report_layoutstat(struct inode *inode) 2270 + pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags) 2310 2271 { 2311 2272 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 2312 2273 struct nfs_server *server = NFS_SERVER(inode); ··· 2333 2294 pnfs_get_layout_hdr(hdr); 2334 2295 spin_unlock(&inode->i_lock); 2335 2296 2336 - data = kzalloc(sizeof(*data), GFP_KERNEL); 2297 + data = kzalloc(sizeof(*data), gfp_flags); 2337 2298 if (!data) { 2338 2299 status = -ENOMEM; 2339 2300 goto out_put; ··· 2363 2324 } 2364 2325 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat); 2365 2326 #endif 2327 + 2328 + unsigned int layoutstats_timer; 2329 + module_param(layoutstats_timer, uint, 0644); 2330 + EXPORT_SYMBOL_GPL(layoutstats_timer);
+38 -10
fs/nfs/pnfs.h
··· 94 94 NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ 95 95 NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ 96 96 NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */ 97 - NFS_LAYOUT_ROC, /* some lseg had roc bit set */ 98 97 NFS_LAYOUT_RETURN, /* Return this layout ASAP */ 99 98 NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */ 100 99 NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ ··· 128 129 129 130 struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); 130 131 void (*free_lseg) (struct pnfs_layout_segment *lseg); 132 + void (*add_lseg) (struct pnfs_layout_hdr *layoutid, 133 + struct pnfs_layout_segment *lseg, 134 + struct list_head *free_me); 131 135 132 136 void (*return_range) (struct pnfs_layout_hdr *lo, 133 137 struct pnfs_layout_range *range); ··· 186 184 187 185 struct pnfs_layout_hdr { 188 186 atomic_t plh_refcount; 187 + atomic_t plh_outstanding; /* number of RPCs out */ 189 188 struct list_head plh_layouts; /* other client layouts */ 190 189 struct list_head plh_bulk_destroy; 191 190 struct list_head plh_segs; /* layout segments list */ 192 - nfs4_stateid plh_stateid; 193 - atomic_t plh_outstanding; /* number of RPCs out */ 194 191 unsigned long plh_block_lgets; /* block LAYOUTGET if >0 */ 195 - u32 plh_barrier; /* ignore lower seqids */ 196 192 unsigned long plh_retry_timestamp; 197 193 unsigned long plh_flags; 194 + nfs4_stateid plh_stateid; 195 + u32 plh_barrier; /* ignore lower seqids */ 198 196 enum pnfs_iomode plh_return_iomode; 199 197 loff_t plh_lwb; /* last write byte for layoutcommit */ 200 198 struct rpc_cred *plh_lc_cred; /* layoutcommit cred */ ··· 269 267 bool pnfs_roc(struct inode *ino); 270 268 void pnfs_roc_release(struct inode *ino); 271 269 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); 272 - bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task); 270 + void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier); 273 271 void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t); 274 272 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); 275 273 int pnfs_layoutcommit_inode(struct inode *inode, bool sync); ··· 287 285 enum pnfs_iomode iomode, 288 286 gfp_t gfp_flags); 289 287 void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo); 288 + 289 + void pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo, 290 + struct pnfs_layout_segment *lseg, 291 + bool (*is_after)(const struct pnfs_layout_range *lseg_range, 292 + const struct pnfs_layout_range *old), 293 + bool (*do_merge)(struct pnfs_layout_segment *lseg, 294 + struct pnfs_layout_segment *old), 295 + struct list_head *free_me); 290 296 291 297 void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp); 292 298 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *); ··· 539 529 nfss->pnfs_curr_ld->id == src->l_type); 540 530 } 541 531 532 + static inline u64 533 + pnfs_calc_offset_end(u64 offset, u64 len) 534 + { 535 + if (len == NFS4_MAX_UINT64 || len >= NFS4_MAX_UINT64 - offset) 536 + return NFS4_MAX_UINT64; 537 + return offset + len - 1; 538 + } 539 + 540 + static inline u64 541 + pnfs_calc_offset_length(u64 offset, u64 end) 542 + { 543 + if (end == NFS4_MAX_UINT64 || end <= offset) 544 + return NFS4_MAX_UINT64; 545 + return 1 + end - offset; 546 + } 547 + 548 + extern unsigned int layoutstats_timer; 549 + 542 550 #ifdef NFS_DEBUG 543 551 void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id); 544 552 #else 545 553 static inline void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id) 546 554 { 547 555 } 556 + 548 557 #endif /* NFS_DEBUG */ 549 558 #else /* CONFIG_NFS_V4_1 */ 550 559 ··· 634 605 { 635 606 } 636 607 637 - static inline bool 638 - pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) 608 + static inline void 609 + pnfs_roc_get_barrier(struct inode *ino, u32 *barrier) 639 610 { 640 - return false; 641 611 } 642 612 643 613 static inline void set_pnfs_layoutdriver(struct nfs_server *s, ··· 719 691 #endif /* CONFIG_NFS_V4_1 */ 720 692 721 693 #if IS_ENABLED(CONFIG_NFS_V4_2) 722 - int pnfs_report_layoutstat(struct inode *inode); 694 + int pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags); 723 695 #else 724 696 static inline int 725 - pnfs_report_layoutstat(struct inode *inode) 697 + pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags) 726 698 { 727 699 return 0; 728 700 }
+53 -33
fs/nfs/pnfs_nfs.c
··· 124 124 if (ret) { 125 125 cinfo->ds->nwritten -= ret; 126 126 cinfo->ds->ncommitting += ret; 127 - bucket->clseg = bucket->wlseg; 128 - if (list_empty(src)) 127 + if (bucket->clseg == NULL) 128 + bucket->clseg = pnfs_get_lseg(bucket->wlseg); 129 + if (list_empty(src)) { 130 + pnfs_put_lseg_locked(bucket->wlseg); 129 131 bucket->wlseg = NULL; 130 - else 131 - pnfs_get_lseg(bucket->clseg); 132 + } 132 133 } 133 134 return ret; 134 135 } ··· 183 182 struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 184 183 struct pnfs_commit_bucket *bucket; 185 184 struct pnfs_layout_segment *freeme; 185 + LIST_HEAD(pages); 186 186 int i; 187 187 188 + spin_lock(cinfo->lock); 188 189 for (i = idx; i < fl_cinfo->nbuckets; i++) { 189 190 bucket = &fl_cinfo->buckets[i]; 190 191 if (list_empty(&bucket->committing)) 191 192 continue; 192 - nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo, i); 193 - spin_lock(cinfo->lock); 194 193 freeme = bucket->clseg; 195 194 bucket->clseg = NULL; 195 + list_splice_init(&bucket->committing, &pages); 196 196 spin_unlock(cinfo->lock); 197 + nfs_retry_commit(&pages, freeme, cinfo, i); 197 198 pnfs_put_lseg(freeme); 199 + spin_lock(cinfo->lock); 198 200 } 201 + spin_unlock(cinfo->lock); 199 202 } 200 203 201 204 static unsigned int ··· 221 216 if (!data) 222 217 break; 223 218 data->ds_commit_index = i; 224 - spin_lock(cinfo->lock); 225 - data->lseg = bucket->clseg; 226 - bucket->clseg = NULL; 227 - spin_unlock(cinfo->lock); 228 219 list_add(&data->pages, list); 229 220 nreq++; 230 221 } ··· 228 227 /* Clean up on error */ 229 228 pnfs_generic_retry_commit(cinfo, i); 230 229 return nreq; 230 + } 231 + 232 + static inline 233 + void pnfs_fetch_commit_bucket_list(struct list_head *pages, 234 + struct nfs_commit_data *data, 235 + struct nfs_commit_info *cinfo) 236 + { 237 + struct pnfs_commit_bucket *bucket; 238 + 239 + bucket = &cinfo->ds->buckets[data->ds_commit_index]; 240 + spin_lock(cinfo->lock); 241 + list_splice_init(&bucket->committing, pages); 242 + data->lseg = bucket->clseg; 243 + bucket->clseg = NULL; 244 + spin_unlock(cinfo->lock); 245 + 231 246 } 232 247 233 248 /* This follows nfs_commit_list pretty closely */ ··· 260 243 if (!list_empty(mds_pages)) { 261 244 data = nfs_commitdata_alloc(); 262 245 if (data != NULL) { 263 - data->lseg = NULL; 246 + data->ds_commit_index = -1; 264 247 list_add(&data->pages, &list); 265 248 nreq++; 266 249 } else { ··· 282 265 283 266 list_for_each_entry_safe(data, tmp, &list, pages) { 284 267 list_del_init(&data->pages); 285 - if (!data->lseg) { 268 + if (data->ds_commit_index < 0) { 286 269 nfs_init_commit(data, mds_pages, NULL, cinfo); 287 270 nfs_initiate_commit(NFS_CLIENT(inode), data, 288 271 NFS_PROTO(data->inode), 289 272 data->mds_ops, how, 0); 290 273 } else { 291 - struct pnfs_commit_bucket *buckets; 274 + LIST_HEAD(pages); 292 275 293 - buckets = cinfo->ds->buckets; 294 - nfs_init_commit(data, 295 - &buckets[data->ds_commit_index].committing, 296 - data->lseg, 297 - cinfo); 276 + pnfs_fetch_commit_bucket_list(&pages, data, cinfo); 277 + nfs_init_commit(data, &pages, data->lseg, cinfo); 298 278 initiate_commit(data, how); 299 279 } 300 280 } ··· 373 359 return false; 374 360 } 375 361 362 + /* 363 + * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does, 364 + * declare a match. 365 + */ 376 366 static bool 377 367 _same_data_server_addrs_locked(const struct list_head *dsaddrs1, 378 368 const struct list_head *dsaddrs2) 379 369 { 380 370 struct nfs4_pnfs_ds_addr *da1, *da2; 371 + struct sockaddr *sa1, *sa2; 372 + bool match = false; 381 373 382 - /* step through both lists, comparing as we go */ 383 - for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node), 384 - da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node); 385 - da1 != NULL && da2 != NULL; 386 - da1 = list_entry(da1->da_node.next, typeof(*da1), da_node), 387 - da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) { 388 - if (!same_sockaddr((struct sockaddr *)&da1->da_addr, 389 - (struct sockaddr *)&da2->da_addr)) 390 - return false; 374 + list_for_each_entry(da1, dsaddrs1, da_node) { 375 + sa1 = (struct sockaddr *)&da1->da_addr; 376 + match = false; 377 + list_for_each_entry(da2, dsaddrs2, da_node) { 378 + sa2 = (struct sockaddr *)&da2->da_addr; 379 + match = same_sockaddr(sa1, sa2); 380 + if (match) 381 + break; 382 + } 383 + if (!match) 384 + break; 391 385 } 392 - if (da1 == NULL && da2 == NULL) 393 - return true; 394 - 395 - return false; 386 + return match; 396 387 } 397 388 398 389 /* ··· 882 863 } 883 864 set_bit(PG_COMMIT_TO_DS, &req->wb_flags); 884 865 cinfo->ds->nwritten++; 885 - spin_unlock(cinfo->lock); 886 866 887 - nfs_request_add_commit_list(req, list, cinfo); 867 + nfs_request_add_commit_list_locked(req, list, cinfo); 868 + spin_unlock(cinfo->lock); 869 + nfs_mark_page_unstable(req->wb_page, cinfo); 888 870 } 889 871 EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit); 890 872
+5 -2
fs/nfs/super.c
··· 381 381 ret = nfs_register_sysctl(); 382 382 if (ret < 0) 383 383 goto error_2; 384 - register_shrinker(&acl_shrinker); 384 + ret = register_shrinker(&acl_shrinker); 385 + if (ret < 0) 386 + goto error_3; 385 387 return 0; 386 - 388 + error_3: 389 + nfs_unregister_sysctl(); 387 390 error_2: 388 391 unregister_nfs4_fs(); 389 392 error_1:
+25 -11
fs/nfs/write.c
··· 768 768 } 769 769 770 770 /** 771 + * nfs_request_add_commit_list_locked - add request to a commit list 772 + * @req: pointer to a struct nfs_page 773 + * @dst: commit list head 774 + * @cinfo: holds list lock and accounting info 775 + * 776 + * This sets the PG_CLEAN bit, updates the cinfo count of 777 + * number of outstanding requests requiring a commit as well as 778 + * the MM page stats. 779 + * 780 + * The caller must hold the cinfo->lock, and the nfs_page lock. 781 + */ 782 + void 783 + nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, 784 + struct nfs_commit_info *cinfo) 785 + { 786 + set_bit(PG_CLEAN, &req->wb_flags); 787 + nfs_list_add_request(req, dst); 788 + cinfo->mds->ncommit++; 789 + } 790 + EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); 791 + 792 + /** 771 793 * nfs_request_add_commit_list - add request to a commit list 772 794 * @req: pointer to a struct nfs_page 773 795 * @dst: commit list head ··· 806 784 nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, 807 785 struct nfs_commit_info *cinfo) 808 786 { 809 - set_bit(PG_CLEAN, &(req)->wb_flags); 810 787 spin_lock(cinfo->lock); 811 - nfs_list_add_request(req, dst); 812 - cinfo->mds->ncommit++; 788 + nfs_request_add_commit_list_locked(req, dst, cinfo); 813 789 spin_unlock(cinfo->lock); 814 - if (!cinfo->dreq) 815 - nfs_mark_page_unstable(req->wb_page); 790 + nfs_mark_page_unstable(req->wb_page, cinfo); 816 791 } 817 792 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 818 793 ··· 1812 1793 return res; 1813 1794 } 1814 1795 1815 - static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 1796 + int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1816 1797 { 1817 1798 struct nfs_inode *nfsi = NFS_I(inode); 1818 1799 int flags = FLUSH_SYNC; ··· 1846 1827 out_mark_dirty: 1847 1828 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1848 1829 return ret; 1849 - } 1850 - 1851 - int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1852 - { 1853 - return nfs_commit_unstable_pages(inode, wbc); 1854 1830 } 1855 1831 EXPORT_SYMBOL_GPL(nfs_write_inode); 1856 1832
+1 -1
fs/nfsd/blocklayoutxdr.c
··· 101 101 } 102 102 103 103 nr_iomaps = be32_to_cpup(p++); 104 - expected = sizeof(__be32) + nr_iomaps * NFS4_BLOCK_EXTENT_SIZE; 104 + expected = sizeof(__be32) + nr_iomaps * PNFS_BLOCK_EXTENT_SIZE; 105 105 if (len != expected) { 106 106 dprintk("%s: extent array size mismatch: %u/%u\n", 107 107 __func__, len, expected);
-15
fs/nfsd/blocklayoutxdr.h
··· 7 7 struct iomap; 8 8 struct xdr_stream; 9 9 10 - enum pnfs_block_extent_state { 11 - PNFS_BLOCK_READWRITE_DATA = 0, 12 - PNFS_BLOCK_READ_DATA = 1, 13 - PNFS_BLOCK_INVALID_DATA = 2, 14 - PNFS_BLOCK_NONE_DATA = 3, 15 - }; 16 - 17 10 struct pnfs_block_extent { 18 11 struct nfsd4_deviceid vol_id; 19 12 u64 foff; 20 13 u64 len; 21 14 u64 soff; 22 15 enum pnfs_block_extent_state es; 23 - }; 24 - #define NFS4_BLOCK_EXTENT_SIZE 44 25 - 26 - enum pnfs_block_volume_type { 27 - PNFS_BLOCK_VOLUME_SIMPLE = 0, 28 - PNFS_BLOCK_VOLUME_SLICE = 1, 29 - PNFS_BLOCK_VOLUME_CONCAT = 2, 30 - PNFS_BLOCK_VOLUME_STRIPE = 3, 31 16 }; 32 17 33 18 /*
+18
include/linux/nfs4.h
··· 547 547 NOTIFY_DEVICEID4_DELETE = 1 << 2, 548 548 }; 549 549 550 + enum pnfs_block_volume_type { 551 + PNFS_BLOCK_VOLUME_SIMPLE = 0, 552 + PNFS_BLOCK_VOLUME_SLICE = 1, 553 + PNFS_BLOCK_VOLUME_CONCAT = 2, 554 + PNFS_BLOCK_VOLUME_STRIPE = 3, 555 + }; 556 + 557 + enum pnfs_block_extent_state { 558 + PNFS_BLOCK_READWRITE_DATA = 0, 559 + PNFS_BLOCK_READ_DATA = 1, 560 + PNFS_BLOCK_INVALID_DATA = 2, 561 + PNFS_BLOCK_NONE_DATA = 3, 562 + }; 563 + 564 + /* on the wire size of a block layout extent */ 565 + #define PNFS_BLOCK_EXTENT_SIZE \ 566 + (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE) 567 + 550 568 #define NFL4_UFLG_MASK 0x0000003F 551 569 #define NFL4_UFLG_DENSE 0x00000001 552 570 #define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
+1 -1
include/linux/nfs_fs.h
··· 353 353 extern void nfs_access_set_mask(struct nfs_access_entry *, u32); 354 354 extern int nfs_permission(struct inode *, int); 355 355 extern int nfs_open(struct inode *, struct file *); 356 - extern int nfs_release(struct inode *, struct file *); 357 356 extern int nfs_attribute_timeout(struct inode *inode); 358 357 extern int nfs_attribute_cache_expired(struct inode *inode); 359 358 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); ··· 370 371 extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode); 371 372 extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); 372 373 extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); 374 + extern void nfs_file_clear_open_context(struct file *flip); 373 375 extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); 374 376 extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); 375 377 extern u64 nfs_compat_user_ino64(u64 fileid);
+5
include/linux/nfs_fs_sb.h
··· 173 173 set of attributes supported 174 174 on this filesystem excluding 175 175 the label support bit. */ 176 + u32 exclcreat_bitmask[3]; 177 + /* V4 bitmask representing the 178 + set of attributes supported 179 + on this filesystem for the 180 + exclusive create. */ 176 181 u32 cache_consistency_bitmask[3]; 177 182 /* V4 bitmask representing the subset 178 183 of change attribute, size, ctime
+5 -3
include/linux/nfs_xdr.h
··· 379 379 struct stateowner_id id; 380 380 union { 381 381 struct { 382 - struct iattr * attrs; /* UNCHECKED, GUARDED */ 382 + struct iattr * attrs; /* UNCHECKED, GUARDED, EXCLUSIVE4_1 */ 383 383 nfs4_verifier verifier; /* EXCLUSIVE */ 384 384 }; 385 385 nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ ··· 389 389 const struct nfs_server *server; /* Needed for ID mapping */ 390 390 const u32 * bitmask; 391 391 const u32 * open_bitmap; 392 - __u32 claim; 392 + enum open_claim_type4 claim; 393 393 enum createmode4 createmode; 394 394 const struct nfs4_label *label; 395 395 }; ··· 406 406 const struct nfs_server *server; 407 407 fmode_t delegation_type; 408 408 nfs4_stateid delegation; 409 + unsigned long pagemod_limit; 409 410 __u32 do_recall; 410 - __u64 maxsize; 411 411 __u32 attrset[NFS4_BITMAP_SIZE]; 412 412 struct nfs4_string *owner; 413 413 struct nfs4_string *group_owner; ··· 1057 1057 struct nfs4_server_caps_arg { 1058 1058 struct nfs4_sequence_args seq_args; 1059 1059 struct nfs_fh *fhandle; 1060 + const u32 * bitmask; 1060 1061 }; 1061 1062 1062 1063 struct nfs4_server_caps_res { 1063 1064 struct nfs4_sequence_res seq_res; 1064 1065 u32 attr_bitmask[3]; 1066 + u32 exclcreat_bitmask[3]; 1065 1067 u32 acl_bitmask; 1066 1068 u32 has_links; 1067 1069 u32 has_symlinks;
+20 -7
include/linux/sunrpc/addr.h
··· 46 46 #define IPV6_SCOPE_DELIMITER '%' 47 47 #define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") 48 48 49 - static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, 50 - const struct sockaddr *sap2) 49 + static inline bool rpc_cmp_addr4(const struct sockaddr *sap1, 50 + const struct sockaddr *sap2) 51 51 { 52 52 const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; 53 53 const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; ··· 67 67 } 68 68 69 69 #if IS_ENABLED(CONFIG_IPV6) 70 - static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 71 - const struct sockaddr *sap2) 70 + static inline bool rpc_cmp_addr6(const struct sockaddr *sap1, 71 + const struct sockaddr *sap2) 72 72 { 73 73 const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; 74 74 const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; ··· 93 93 return true; 94 94 } 95 95 #else /* !(IS_ENABLED(CONFIG_IPV6) */ 96 - static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 96 + static inline bool rpc_cmp_addr6(const struct sockaddr *sap1, 97 97 const struct sockaddr *sap2) 98 98 { 99 99 return false; ··· 122 122 if (sap1->sa_family == sap2->sa_family) { 123 123 switch (sap1->sa_family) { 124 124 case AF_INET: 125 - return __rpc_cmp_addr4(sap1, sap2); 125 + return rpc_cmp_addr4(sap1, sap2); 126 126 case AF_INET6: 127 - return __rpc_cmp_addr6(sap1, sap2); 127 + return rpc_cmp_addr6(sap1, sap2); 128 128 } 129 129 } 130 130 return false; 131 + } 132 + 133 + /** 134 + * rpc_cmp_addr_port - compare the address and port number of two sockaddrs. 135 + * @sap1: first sockaddr 136 + * @sap2: second sockaddr 137 + */ 138 + static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1, 139 + const struct sockaddr *sap2) 140 + { 141 + if (!rpc_cmp_addr(sap1, sap2)) 142 + return false; 143 + return rpc_get_port(sap1) == rpc_get_port(sap2); 131 144 } 132 145 133 146 /**
+6 -2
include/linux/sunrpc/auth.h
··· 18 18 #include <linux/atomic.h> 19 19 #include <linux/rcupdate.h> 20 20 #include <linux/uidgid.h> 21 + #include <linux/utsname.h> 21 22 22 - /* size of the nodename buffer */ 23 - #define UNX_MAXNODENAME 32 23 + /* 24 + * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes, 25 + * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes. 26 + */ 27 + #define UNX_MAXNODENAME __NEW_UTS_LEN 24 28 25 29 struct rpcsec_gss_info; 26 30
+1 -1
include/linux/sunrpc/xprtrdma.h
··· 49 49 * a single chunk type per message is supported currently. 50 50 */ 51 51 #define RPCRDMA_MIN_SLOT_TABLE (2U) 52 - #define RPCRDMA_DEF_SLOT_TABLE (32U) 52 + #define RPCRDMA_DEF_SLOT_TABLE (128U) 53 53 #define RPCRDMA_MAX_SLOT_TABLE (256U) 54 54 55 55 #define RPCRDMA_DEF_INLINE (1024) /* default inline max */
-46
include/rdma/ib_verbs.h
··· 2760 2760 } 2761 2761 2762 2762 /** 2763 - * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 2764 - * by an HCA. 2765 - * @pd: The protection domain associated assigned to the registered region. 2766 - * @phys_buf_array: Specifies a list of physical buffers to use in the 2767 - * memory region. 2768 - * @num_phys_buf: Specifies the size of the phys_buf_array. 2769 - * @mr_access_flags: Specifies the memory access rights. 2770 - * @iova_start: The offset of the region's starting I/O virtual address. 2771 - */ 2772 - struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 2773 - struct ib_phys_buf *phys_buf_array, 2774 - int num_phys_buf, 2775 - int mr_access_flags, 2776 - u64 *iova_start); 2777 - 2778 - /** 2779 - * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. 2780 - * Conceptually, this call performs the functions deregister memory region 2781 - * followed by register physical memory region. Where possible, 2782 - * resources are reused instead of deallocated and reallocated. 2783 - * @mr: The memory region to modify. 2784 - * @mr_rereg_mask: A bit-mask used to indicate which of the following 2785 - * properties of the memory region are being modified. 2786 - * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies 2787 - * the new protection domain to associated with the memory region, 2788 - * otherwise, this parameter is ignored. 2789 - * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 2790 - * field specifies a list of physical buffers to use in the new 2791 - * translation, otherwise, this parameter is ignored. 2792 - * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 2793 - * field specifies the size of the phys_buf_array, otherwise, this 2794 - * parameter is ignored. 2795 - * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this 2796 - * field specifies the new memory access rights, otherwise, this 2797 - * parameter is ignored. 2798 - * @iova_start: The offset of the region's starting I/O virtual address. 2799 - */ 2800 - int ib_rereg_phys_mr(struct ib_mr *mr, 2801 - int mr_rereg_mask, 2802 - struct ib_pd *pd, 2803 - struct ib_phys_buf *phys_buf_array, 2804 - int num_phys_buf, 2805 - int mr_access_flags, 2806 - u64 *iova_start); 2807 - 2808 - /** 2809 2763 * ib_query_mr - Retrieves information about a specific memory region. 2810 2764 * @mr: The memory region to retrieve information about. 2811 2765 * @mr_attr: The attributes of the specified memory region.
+1 -1
include/uapi/linux/nfs4.h
··· 15 15 16 16 #include <linux/types.h> 17 17 18 - #define NFS4_BITMAP_SIZE 2 18 + #define NFS4_BITMAP_SIZE 3 19 19 #define NFS4_VERIFIER_SIZE 8 20 20 #define NFS4_STATEID_SEQID_SIZE 4 21 21 #define NFS4_STATEID_OTHER_SIZE 12
+1 -1
net/sunrpc/auth_unix.c
··· 23 23 }; 24 24 #define uc_uid uc_base.cr_uid 25 25 26 - #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) 26 + #define UNX_WRITESLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME)) 27 27 28 28 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 29 29 # define RPCDBG_FACILITY RPCDBG_AUTH
+19
net/sunrpc/xprtrdma/fmr_ops.c
··· 39 39 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 40 40 struct rpcrdma_create_data_internal *cdata) 41 41 { 42 + struct ib_device_attr *devattr = &ia->ri_devattr; 43 + struct ib_mr *mr; 44 + 45 + /* Obtain an lkey to use for the regbufs, which are 46 + * protected from remote access. 47 + */ 48 + if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { 49 + ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; 50 + } else { 51 + mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); 52 + if (IS_ERR(mr)) { 53 + pr_err("%s: ib_get_dma_mr for failed with %lX\n", 54 + __func__, PTR_ERR(mr)); 55 + return -ENOMEM; 56 + } 57 + ia->ri_dma_lkey = ia->ri_dma_mr->lkey; 58 + ia->ri_dma_mr = mr; 59 + } 60 + 42 61 return 0; 43 62 } 44 63
+5
net/sunrpc/xprtrdma/frwr_ops.c
··· 189 189 struct ib_device_attr *devattr = &ia->ri_devattr; 190 190 int depth, delta; 191 191 192 + /* Obtain an lkey to use for the regbufs, which are 193 + * protected from remote access. 194 + */ 195 + ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; 196 + 192 197 ia->ri_max_frmr_depth = 193 198 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, 194 199 devattr->max_fast_reg_page_list_len);
+24 -1
net/sunrpc/xprtrdma/physical_ops.c
··· 23 23 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 24 24 struct rpcrdma_create_data_internal *cdata) 25 25 { 26 + struct ib_device_attr *devattr = &ia->ri_devattr; 27 + struct ib_mr *mr; 28 + 29 + /* Obtain an rkey to use for RPC data payloads. 30 + */ 31 + mr = ib_get_dma_mr(ia->ri_pd, 32 + IB_ACCESS_LOCAL_WRITE | 33 + IB_ACCESS_REMOTE_WRITE | 34 + IB_ACCESS_REMOTE_READ); 35 + if (IS_ERR(mr)) { 36 + pr_err("%s: ib_get_dma_mr for failed with %lX\n", 37 + __func__, PTR_ERR(mr)); 38 + return -ENOMEM; 39 + } 40 + ia->ri_dma_mr = mr; 41 + 42 + /* Obtain an lkey to use for regbufs. 43 + */ 44 + if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 45 + ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; 46 + else 47 + ia->ri_dma_lkey = ia->ri_dma_mr->lkey; 48 + 26 49 return 0; 27 50 } 28 51 ··· 74 51 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 75 52 76 53 rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing)); 77 - seg->mr_rkey = ia->ri_bind_mem->rkey; 54 + seg->mr_rkey = ia->ri_dma_mr->rkey; 78 55 seg->mr_base = seg->mr_dma; 79 56 seg->mr_nsegs = 1; 80 57 return 1;
+101 -96
net/sunrpc/xprtrdma/rpc_rdma.c
··· 71 71 }; 72 72 #endif 73 73 74 + /* The client can send a request inline as long as the RPCRDMA header 75 + * plus the RPC call fit under the transport's inline limit. If the 76 + * combined call message size exceeds that limit, the client must use 77 + * the read chunk list for this operation. 78 + */ 79 + static bool rpcrdma_args_inline(struct rpc_rqst *rqst) 80 + { 81 + unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len; 82 + 83 + return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst); 84 + } 85 + 86 + /* The client can't know how large the actual reply will be. Thus it 87 + * plans for the largest possible reply for that particular ULP 88 + * operation. If the maximum combined reply message size exceeds that 89 + * limit, the client must provide a write list or a reply chunk for 90 + * this request. 91 + */ 92 + static bool rpcrdma_results_inline(struct rpc_rqst *rqst) 93 + { 94 + unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen; 95 + 96 + return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst); 97 + } 98 + 99 + static int 100 + rpcrdma_tail_pullup(struct xdr_buf *buf) 101 + { 102 + size_t tlen = buf->tail[0].iov_len; 103 + size_t skip = tlen & 3; 104 + 105 + /* Do not include the tail if it is only an XDR pad */ 106 + if (tlen < 4) 107 + return 0; 108 + 109 + /* xdr_write_pages() adds a pad at the beginning of the tail 110 + * if the content in "buf->pages" is unaligned. Force the 111 + * tail's actual content to land at the next XDR position 112 + * after the head instead. 113 + */ 114 + if (skip) { 115 + unsigned char *src, *dst; 116 + unsigned int count; 117 + 118 + src = buf->tail[0].iov_base; 119 + dst = buf->head[0].iov_base; 120 + dst += buf->head[0].iov_len; 121 + 122 + src += skip; 123 + tlen -= skip; 124 + 125 + dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n", 126 + __func__, skip, dst, src, tlen); 127 + 128 + for (count = tlen; count; count--) 129 + *dst++ = *src++; 130 + } 131 + 132 + return tlen; 133 + } 134 + 74 135 /* 75 136 * Chunk assembly from upper layer xdr_buf. 76 137 * ··· 182 121 /* Message overflows the seg array */ 183 122 if (len && n == nsegs) 184 123 return -EIO; 124 + 125 + /* When encoding the read list, the tail is always sent inline */ 126 + if (type == rpcrdma_readch) 127 + return n; 185 128 186 129 if (xdrbuf->tail[0].iov_len) { 187 130 /* the rpcrdma protocol allows us to omit any trailing ··· 362 297 * pre-registered memory buffer for this request. For small amounts 363 298 * of data, this is efficient. The cutoff value is tunable. 364 299 */ 365 - static int 366 - rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad) 300 + static void rpcrdma_inline_pullup(struct rpc_rqst *rqst) 367 301 { 368 302 int i, npages, curlen; 369 303 int copy_len; ··· 374 310 destp = rqst->rq_svec[0].iov_base; 375 311 curlen = rqst->rq_svec[0].iov_len; 376 312 destp += curlen; 377 - /* 378 - * Do optional padding where it makes sense. Alignment of write 379 - * payload can help the server, if our setting is accurate. 380 - */ 381 - pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/); 382 - if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH) 383 - pad = 0; /* don't pad this request */ 384 313 385 - dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n", 386 - __func__, pad, destp, rqst->rq_slen, curlen); 314 + dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n", 315 + __func__, destp, rqst->rq_slen, curlen); 387 316 388 317 copy_len = rqst->rq_snd_buf.page_len; 389 318 ··· 412 355 page_base = 0; 413 356 } 414 357 /* header now contains entire send message */ 415 - return pad; 416 358 } 417 359 418 360 /* ··· 436 380 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 437 381 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 438 382 char *base; 439 - size_t rpclen, padlen; 383 + size_t rpclen; 440 384 ssize_t hdrlen; 441 385 enum rpcrdma_chunktype rtype, wtype; 442 386 struct rpcrdma_msg *headerp; ··· 458 402 /* 459 403 * Chunks needed for results? 460 404 * 405 + * o Read ops return data as write chunk(s), header as inline. 461 406 * o If the expected result is under the inline threshold, all ops 462 - * return as inline (but see later). 407 + * return as inline. 463 408 * o Large non-read ops return as a single reply chunk. 464 - * o Large read ops return data as write chunk(s), header as inline. 465 - * 466 - * Note: the NFS code sending down multiple result segments implies 467 - * the op is one of read, readdir[plus], readlink or NFSv4 getacl. 468 409 */ 469 - 470 - /* 471 - * This code can handle read chunks, write chunks OR reply 472 - * chunks -- only one type. If the request is too big to fit 473 - * inline, then we will choose read chunks. If the request is 474 - * a READ, then use write chunks to separate the file data 475 - * into pages; otherwise use reply chunks. 476 - */ 477 - if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) 478 - wtype = rpcrdma_noch; 479 - else if (rqst->rq_rcv_buf.page_len == 0) 480 - wtype = rpcrdma_replych; 481 - else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) 410 + if (rqst->rq_rcv_buf.flags & XDRBUF_READ) 482 411 wtype = rpcrdma_writech; 412 + else if (rpcrdma_results_inline(rqst)) 413 + wtype = rpcrdma_noch; 483 414 else 484 415 wtype = rpcrdma_replych; 485 416 ··· 475 432 * 476 433 * o If the total request is under the inline threshold, all ops 477 434 * are sent as inline. 478 - * o Large non-write ops are sent with the entire message as a 479 - * single read chunk (protocol 0-position special case). 480 435 * o Large write ops transmit data as read chunk(s), header as 481 436 * inline. 437 + * o Large non-write ops are sent with the entire message as a 438 + * single read chunk (protocol 0-position special case). 482 439 * 483 - * Note: the NFS code sending down multiple argument segments 484 - * implies the op is a write. 485 - * TBD check NFSv4 setacl 440 + * This assumes that the upper layer does not present a request 441 + * that both has a data payload, and whose non-data arguments 442 + * by themselves are larger than the inline threshold. 486 443 */ 487 - if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) 444 + if (rpcrdma_args_inline(rqst)) { 488 445 rtype = rpcrdma_noch; 489 - else if (rqst->rq_snd_buf.page_len == 0) 490 - rtype = rpcrdma_areadch; 491 - else 446 + } else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) { 492 447 rtype = rpcrdma_readch; 448 + } else { 449 + r_xprt->rx_stats.nomsg_call_count++; 450 + headerp->rm_type = htonl(RDMA_NOMSG); 451 + rtype = rpcrdma_areadch; 452 + rpclen = 0; 453 + } 493 454 494 455 /* The following simplification is not true forever */ 495 456 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) ··· 505 458 } 506 459 507 460 hdrlen = RPCRDMA_HDRLEN_MIN; 508 - padlen = 0; 509 461 510 462 /* 511 463 * Pull up any extra send data into the preregistered buffer. ··· 513 467 */ 514 468 if (rtype == rpcrdma_noch) { 515 469 516 - padlen = rpcrdma_inline_pullup(rqst, 517 - RPCRDMA_INLINE_PAD_VALUE(rqst)); 470 + rpcrdma_inline_pullup(rqst); 518 471 519 - if (padlen) { 520 - headerp->rm_type = rdma_msgp; 521 - headerp->rm_body.rm_padded.rm_align = 522 - cpu_to_be32(RPCRDMA_INLINE_PAD_VALUE(rqst)); 523 - headerp->rm_body.rm_padded.rm_thresh = 524 - cpu_to_be32(RPCRDMA_INLINE_PAD_THRESH); 525 - headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero; 526 - headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; 527 - headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; 528 - hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ 529 - if (wtype != rpcrdma_noch) { 530 - dprintk("RPC: %s: invalid chunk list\n", 531 - __func__); 532 - return -EIO; 533 - } 534 - } else { 535 - headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero; 536 - headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero; 537 - headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero; 538 - /* new length after pullup */ 539 - rpclen = rqst->rq_svec[0].iov_len; 540 - /* 541 - * Currently we try to not actually use read inline. 542 - * Reply chunks have the desirable property that 543 - * they land, packed, directly in the target buffers 544 - * without headers, so they require no fixup. The 545 - * additional RDMA Write op sends the same amount 546 - * of data, streams on-the-wire and adds no overhead 547 - * on receive. Therefore, we request a reply chunk 548 - * for non-writes wherever feasible and efficient. 549 - */ 550 - if (wtype == rpcrdma_noch) 551 - wtype = rpcrdma_replych; 552 - } 553 - } 554 - 472 + headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero; 473 + headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero; 474 + headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero; 475 + /* new length after pullup */ 476 + rpclen = rqst->rq_svec[0].iov_len; 477 + } else if (rtype == rpcrdma_readch) 478 + rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf); 555 479 if (rtype != rpcrdma_noch) { 556 480 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, 557 481 headerp, rtype); ··· 534 518 if (hdrlen < 0) 535 519 return hdrlen; 536 520 537 - dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" 521 + dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd" 538 522 " headerp 0x%p base 0x%p lkey 0x%x\n", 539 - __func__, transfertypes[wtype], hdrlen, rpclen, padlen, 523 + __func__, transfertypes[wtype], hdrlen, rpclen, 540 524 headerp, base, rdmab_lkey(req->rl_rdmabuf)); 541 525 542 526 /* ··· 550 534 req->rl_send_iov[0].length = hdrlen; 551 535 req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); 552 536 537 + req->rl_niovs = 1; 538 + if (rtype == rpcrdma_areadch) 539 + return 0; 540 + 553 541 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); 554 542 req->rl_send_iov[1].length = rpclen; 555 543 req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); 556 544 557 545 req->rl_niovs = 2; 558 - 559 - if (padlen) { 560 - struct rpcrdma_ep *ep = &r_xprt->rx_ep; 561 - 562 - req->rl_send_iov[2].addr = rdmab_addr(ep->rep_padbuf); 563 - req->rl_send_iov[2].length = padlen; 564 - req->rl_send_iov[2].lkey = rdmab_lkey(ep->rep_padbuf); 565 - 566 - req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen; 567 - req->rl_send_iov[3].length = rqst->rq_slen - rpclen; 568 - req->rl_send_iov[3].lkey = rdmab_lkey(req->rl_sendbuf); 569 - 570 - req->rl_niovs = 4; 571 - } 572 - 573 546 return 0; 574 547 } 575 548
+35 -42
net/sunrpc/xprtrdma/transport.c
··· 175 175 } 176 176 177 177 static void 178 - xprt_rdma_format_addresses(struct rpc_xprt *xprt) 178 + xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap) 179 179 { 180 - struct sockaddr *sap = (struct sockaddr *) 181 - &rpcx_to_rdmad(xprt).addr; 182 180 char buf[128]; 183 181 184 182 switch (sap->sa_family) { ··· 300 302 struct rpc_xprt *xprt; 301 303 struct rpcrdma_xprt *new_xprt; 302 304 struct rpcrdma_ep *new_ep; 303 - struct sockaddr_in *sin; 305 + struct sockaddr *sap; 304 306 int rc; 305 307 306 308 if (args->addrlen > sizeof(xprt->addr)) { ··· 331 333 * Set up RDMA-specific connect data. 332 334 */ 333 335 334 - /* Put server RDMA address in local cdata */ 335 - memcpy(&cdata.addr, args->dstaddr, args->addrlen); 336 + sap = (struct sockaddr *)&cdata.addr; 337 + memcpy(sap, args->dstaddr, args->addrlen); 336 338 337 339 /* Ensure xprt->addr holds valid server TCP (not RDMA) 338 340 * address, for any side protocols which peek at it */ 339 341 xprt->prot = IPPROTO_TCP; 340 342 xprt->addrlen = args->addrlen; 341 - memcpy(&xprt->addr, &cdata.addr, xprt->addrlen); 343 + memcpy(&xprt->addr, sap, xprt->addrlen); 342 344 343 - sin = (struct sockaddr_in *)&cdata.addr; 344 - if (ntohs(sin->sin_port) != 0) 345 + if (rpc_get_port(sap)) 345 346 xprt_set_bound(xprt); 346 347 347 - dprintk("RPC: %s: %pI4:%u\n", 348 - __func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port)); 349 - 350 - /* Set max requests */ 351 348 cdata.max_requests = xprt->max_reqs; 352 349 353 - /* Set some length limits */ 354 350 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */ 355 351 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */ 356 352 ··· 367 375 368 376 new_xprt = rpcx_to_rdmax(xprt); 369 377 370 - rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr, 371 - xprt_rdma_memreg_strategy); 378 + rc = rpcrdma_ia_open(new_xprt, sap, xprt_rdma_memreg_strategy); 372 379 if (rc) 373 380 goto out1; 374 381 ··· 400 409 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker, 401 410 xprt_rdma_connect_worker); 402 411 403 - xprt_rdma_format_addresses(xprt); 412 + xprt_rdma_format_addresses(xprt, sap); 404 413 xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt); 405 414 if (xprt->max_payload == 0) 406 415 goto out4; ··· 411 420 if (!try_module_get(THIS_MODULE)) 412 421 goto out4; 413 422 423 + dprintk("RPC: %s: %s:%s\n", __func__, 424 + xprt->address_strings[RPC_DISPLAY_ADDR], 425 + xprt->address_strings[RPC_DISPLAY_PORT]); 414 426 return xprt; 415 427 416 428 out4: ··· 647 653 if (xprt_connected(xprt)) 648 654 idle_time = (long)(jiffies - xprt->last_used) / HZ; 649 655 650 - seq_printf(seq, 651 - "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu " 652 - "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n", 653 - 654 - 0, /* need a local port? */ 655 - xprt->stat.bind_count, 656 - xprt->stat.connect_count, 657 - xprt->stat.connect_time, 658 - idle_time, 659 - xprt->stat.sends, 660 - xprt->stat.recvs, 661 - xprt->stat.bad_xids, 662 - xprt->stat.req_u, 663 - xprt->stat.bklog_u, 664 - 665 - r_xprt->rx_stats.read_chunk_count, 666 - r_xprt->rx_stats.write_chunk_count, 667 - r_xprt->rx_stats.reply_chunk_count, 668 - r_xprt->rx_stats.total_rdma_request, 669 - r_xprt->rx_stats.total_rdma_reply, 670 - r_xprt->rx_stats.pullup_copy_count, 671 - r_xprt->rx_stats.fixup_copy_count, 672 - r_xprt->rx_stats.hardway_register_count, 673 - r_xprt->rx_stats.failed_marshal_count, 674 - r_xprt->rx_stats.bad_reply_count); 656 + seq_puts(seq, "\txprt:\trdma "); 657 + seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ", 658 + 0, /* need a local port? */ 659 + xprt->stat.bind_count, 660 + xprt->stat.connect_count, 661 + xprt->stat.connect_time, 662 + idle_time, 663 + xprt->stat.sends, 664 + xprt->stat.recvs, 665 + xprt->stat.bad_xids, 666 + xprt->stat.req_u, 667 + xprt->stat.bklog_u); 668 + seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu\n", 669 + r_xprt->rx_stats.read_chunk_count, 670 + r_xprt->rx_stats.write_chunk_count, 671 + r_xprt->rx_stats.reply_chunk_count, 672 + r_xprt->rx_stats.total_rdma_request, 673 + r_xprt->rx_stats.total_rdma_reply, 674 + r_xprt->rx_stats.pullup_copy_count, 675 + r_xprt->rx_stats.fixup_copy_count, 676 + r_xprt->rx_stats.hardway_register_count, 677 + r_xprt->rx_stats.failed_marshal_count, 678 + r_xprt->rx_stats.bad_reply_count, 679 + r_xprt->rx_stats.nomsg_call_count); 675 680 } 676 681 677 682 static int
+79 -155
net/sunrpc/xprtrdma/verbs.c
··· 52 52 #include <linux/prefetch.h> 53 53 #include <linux/sunrpc/addr.h> 54 54 #include <asm/bitops.h> 55 + #include <linux/module.h> /* try_module_get()/module_put() */ 55 56 56 57 #include "xprt_rdma.h" 57 58 ··· 415 414 return 0; 416 415 } 417 416 417 + static void rpcrdma_destroy_id(struct rdma_cm_id *id) 418 + { 419 + if (id) { 420 + module_put(id->device->owner); 421 + rdma_destroy_id(id); 422 + } 423 + } 424 + 418 425 static struct rdma_cm_id * 419 426 rpcrdma_create_id(struct rpcrdma_xprt *xprt, 420 427 struct rpcrdma_ia *ia, struct sockaddr *addr) ··· 449 440 } 450 441 wait_for_completion_interruptible_timeout(&ia->ri_done, 451 442 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1); 443 + 444 + /* FIXME: 445 + * Until xprtrdma supports DEVICE_REMOVAL, the provider must 446 + * be pinned while there are active NFS/RDMA mounts to prevent 447 + * hangs and crashes at umount time. 448 + */ 449 + if (!ia->ri_async_rc && !try_module_get(id->device->owner)) { 450 + dprintk("RPC: %s: Failed to get device module\n", 451 + __func__); 452 + ia->ri_async_rc = -ENODEV; 453 + } 452 454 rc = ia->ri_async_rc; 453 455 if (rc) 454 456 goto out; ··· 469 449 if (rc) { 470 450 dprintk("RPC: %s: rdma_resolve_route() failed %i\n", 471 451 __func__, rc); 472 - goto out; 452 + goto put; 473 453 } 474 454 wait_for_completion_interruptible_timeout(&ia->ri_done, 475 455 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1); 476 456 rc = ia->ri_async_rc; 477 457 if (rc) 478 - goto out; 458 + goto put; 479 459 480 460 return id; 481 - 461 + put: 462 + module_put(id->device->owner); 482 463 out: 483 464 rdma_destroy_id(id); 484 465 return ERR_PTR(rc); ··· 514 493 int 515 494 rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) 516 495 { 517 - int rc, mem_priv; 518 496 struct rpcrdma_ia *ia = &xprt->rx_ia; 519 497 struct ib_device_attr *devattr = &ia->ri_devattr; 498 + int rc; 499 + 500 + ia->ri_dma_mr = NULL; 520 501 521 502 ia->ri_id = rpcrdma_create_id(xprt, ia, addr); 522 503 if (IS_ERR(ia->ri_id)) { ··· 542 519 goto out3; 543 520 } 544 521 545 - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { 546 - ia->ri_have_dma_lkey = 1; 547 - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; 548 - } 549 - 550 522 if (memreg == RPCRDMA_FRMR) { 551 523 /* Requires both frmr reg and local dma lkey */ 552 524 if (((devattr->device_cap_flags & ··· 557 539 if (!ia->ri_device->alloc_fmr) { 558 540 dprintk("RPC: %s: MTHCAFMR registration " 559 541 "not supported by HCA\n", __func__); 560 - memreg = RPCRDMA_ALLPHYSICAL; 542 + goto out3; 561 543 } 562 544 } 563 545 564 - /* 565 - * Optionally obtain an underlying physical identity mapping in 566 - * order to do a memory window-based bind. This base registration 567 - * is protected from remote access - that is enabled only by binding 568 - * for the specific bytes targeted during each RPC operation, and 569 - * revoked after the corresponding completion similar to a storage 570 - * adapter. 571 - */ 572 546 switch (memreg) { 573 547 case RPCRDMA_FRMR: 574 548 ia->ri_ops = &rpcrdma_frwr_memreg_ops; 575 549 break; 576 550 case RPCRDMA_ALLPHYSICAL: 577 551 ia->ri_ops = &rpcrdma_physical_memreg_ops; 578 - mem_priv = IB_ACCESS_LOCAL_WRITE | 579 - IB_ACCESS_REMOTE_WRITE | 580 - IB_ACCESS_REMOTE_READ; 581 - goto register_setup; 552 + break; 582 553 case RPCRDMA_MTHCAFMR: 583 554 ia->ri_ops = &rpcrdma_fmr_memreg_ops; 584 - if (ia->ri_have_dma_lkey) 585 - break; 586 - mem_priv = IB_ACCESS_LOCAL_WRITE; 587 - register_setup: 588 - ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv); 589 - if (IS_ERR(ia->ri_bind_mem)) { 590 - printk(KERN_ALERT "%s: ib_get_dma_mr for " 591 - "phys register failed with %lX\n", 592 - __func__, PTR_ERR(ia->ri_bind_mem)); 593 - rc = -ENOMEM; 594 - goto out3; 595 - } 596 555 break; 597 556 default: 598 557 printk(KERN_ERR "RPC: Unsupported memory " ··· 587 592 ib_dealloc_pd(ia->ri_pd); 588 593 ia->ri_pd = NULL; 589 594 out2: 590 - rdma_destroy_id(ia->ri_id); 595 + rpcrdma_destroy_id(ia->ri_id); 591 596 ia->ri_id = NULL; 592 597 out1: 593 598 return rc; ··· 601 606 void 602 607 rpcrdma_ia_close(struct rpcrdma_ia *ia) 603 608 { 604 - int rc; 605 - 606 609 dprintk("RPC: %s: entering\n", __func__); 607 - if (ia->ri_bind_mem != NULL) { 608 - rc = ib_dereg_mr(ia->ri_bind_mem); 609 - dprintk("RPC: %s: ib_dereg_mr returned %i\n", 610 - __func__, rc); 611 - } 612 - 613 610 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { 614 611 if (ia->ri_id->qp) 615 612 rdma_destroy_qp(ia->ri_id); 616 - rdma_destroy_id(ia->ri_id); 613 + rpcrdma_destroy_id(ia->ri_id); 617 614 ia->ri_id = NULL; 618 615 } 619 616 ··· 626 639 struct ib_cq_init_attr cq_attr = {}; 627 640 int rc, err; 628 641 642 + if (devattr->max_sge < RPCRDMA_MAX_IOVS) { 643 + dprintk("RPC: %s: insufficient sge's available\n", 644 + __func__); 645 + return -ENOMEM; 646 + } 647 + 629 648 /* check provider's send/recv wr limits */ 630 649 if (cdata->max_requests > devattr->max_qp_wr) 631 650 cdata->max_requests = devattr->max_qp_wr; ··· 644 651 if (rc) 645 652 return rc; 646 653 ep->rep_attr.cap.max_recv_wr = cdata->max_requests; 647 - ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2); 654 + ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS; 648 655 ep->rep_attr.cap.max_recv_sge = 1; 649 656 ep->rep_attr.cap.max_inline_data = 0; 650 657 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 651 658 ep->rep_attr.qp_type = IB_QPT_RC; 652 659 ep->rep_attr.port_num = ~0; 653 - 654 - if (cdata->padding) { 655 - ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding, 656 - GFP_KERNEL); 657 - if (IS_ERR(ep->rep_padbuf)) 658 - return PTR_ERR(ep->rep_padbuf); 659 - } else 660 - ep->rep_padbuf = NULL; 661 660 662 661 dprintk("RPC: %s: requested max: dtos: send %d recv %d; " 663 662 "iovs: send %d recv %d\n", ··· 733 748 dprintk("RPC: %s: ib_destroy_cq returned %i\n", 734 749 __func__, err); 735 750 out1: 736 - rpcrdma_free_regbuf(ia, ep->rep_padbuf); 751 + if (ia->ri_dma_mr) 752 + ib_dereg_mr(ia->ri_dma_mr); 737 753 return rc; 738 754 } 739 755 ··· 761 775 ia->ri_id->qp = NULL; 762 776 } 763 777 764 - rpcrdma_free_regbuf(ia, ep->rep_padbuf); 765 - 766 778 rpcrdma_clean_cq(ep->rep_attr.recv_cq); 767 779 rc = ib_destroy_cq(ep->rep_attr.recv_cq); 768 780 if (rc) ··· 772 788 if (rc) 773 789 dprintk("RPC: %s: ib_destroy_cq returned %i\n", 774 790 __func__, rc); 791 + 792 + if (ia->ri_dma_mr) { 793 + rc = ib_dereg_mr(ia->ri_dma_mr); 794 + dprintk("RPC: %s: ib_dereg_mr returned %i\n", 795 + __func__, rc); 796 + } 775 797 } 776 798 777 799 /* ··· 815 825 if (ia->ri_device != id->device) { 816 826 printk("RPC: %s: can't reconnect on " 817 827 "different device!\n", __func__); 818 - rdma_destroy_id(id); 828 + rpcrdma_destroy_id(id); 819 829 rc = -ENETUNREACH; 820 830 goto out; 821 831 } ··· 824 834 if (rc) { 825 835 dprintk("RPC: %s: rdma_create_qp failed %i\n", 826 836 __func__, rc); 827 - rdma_destroy_id(id); 837 + rpcrdma_destroy_id(id); 828 838 rc = -ENETUNREACH; 829 839 goto out; 830 840 } ··· 835 845 write_unlock(&ia->ri_qplock); 836 846 837 847 rdma_destroy_qp(old); 838 - rdma_destroy_id(old); 848 + rpcrdma_destroy_id(old); 839 849 } else { 840 850 dprintk("RPC: %s: connecting...\n", __func__); 841 851 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); ··· 1219 1229 (unsigned long long)seg->mr_dma, seg->mr_dmalen); 1220 1230 } 1221 1231 1222 - static int 1223 - rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, 1224 - struct ib_mr **mrp, struct ib_sge *iov) 1225 - { 1226 - struct ib_phys_buf ipb; 1227 - struct ib_mr *mr; 1228 - int rc; 1229 - 1230 - /* 1231 - * All memory passed here was kmalloc'ed, therefore phys-contiguous. 1232 - */ 1233 - iov->addr = ib_dma_map_single(ia->ri_device, 1234 - va, len, DMA_BIDIRECTIONAL); 1235 - if (ib_dma_mapping_error(ia->ri_device, iov->addr)) 1236 - return -ENOMEM; 1237 - 1238 - iov->length = len; 1239 - 1240 - if (ia->ri_have_dma_lkey) { 1241 - *mrp = NULL; 1242 - iov->lkey = ia->ri_dma_lkey; 1243 - return 0; 1244 - } else if (ia->ri_bind_mem != NULL) { 1245 - *mrp = NULL; 1246 - iov->lkey = ia->ri_bind_mem->lkey; 1247 - return 0; 1248 - } 1249 - 1250 - ipb.addr = iov->addr; 1251 - ipb.size = iov->length; 1252 - mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1, 1253 - IB_ACCESS_LOCAL_WRITE, &iov->addr); 1254 - 1255 - dprintk("RPC: %s: phys convert: 0x%llx " 1256 - "registered 0x%llx length %d\n", 1257 - __func__, (unsigned long long)ipb.addr, 1258 - (unsigned long long)iov->addr, len); 1259 - 1260 - if (IS_ERR(mr)) { 1261 - *mrp = NULL; 1262 - rc = PTR_ERR(mr); 1263 - dprintk("RPC: %s: failed with %i\n", __func__, rc); 1264 - } else { 1265 - *mrp = mr; 1266 - iov->lkey = mr->lkey; 1267 - rc = 0; 1268 - } 1269 - 1270 - return rc; 1271 - } 1272 - 1273 - static int 1274 - rpcrdma_deregister_internal(struct rpcrdma_ia *ia, 1275 - struct ib_mr *mr, struct ib_sge *iov) 1276 - { 1277 - int rc; 1278 - 1279 - ib_dma_unmap_single(ia->ri_device, 1280 - iov->addr, iov->length, DMA_BIDIRECTIONAL); 1281 - 1282 - if (NULL == mr) 1283 - return 0; 1284 - 1285 - rc = ib_dereg_mr(mr); 1286 - if (rc) 1287 - dprintk("RPC: %s: ib_dereg_mr failed %i\n", __func__, rc); 1288 - return rc; 1289 - } 1290 - 1291 1232 /** 1292 1233 * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers 1293 1234 * @ia: controlling rpcrdma_ia ··· 1238 1317 rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) 1239 1318 { 1240 1319 struct rpcrdma_regbuf *rb; 1241 - int rc; 1320 + struct ib_sge *iov; 1242 1321 1243 - rc = -ENOMEM; 1244 1322 rb = kmalloc(sizeof(*rb) + size, flags); 1245 1323 if (rb == NULL) 1246 1324 goto out; 1247 1325 1248 - rb->rg_size = size; 1249 - rb->rg_owner = NULL; 1250 - rc = rpcrdma_register_internal(ia, rb->rg_base, size, 1251 - &rb->rg_mr, &rb->rg_iov); 1252 - if (rc) 1326 + iov = &rb->rg_iov; 1327 + iov->addr = ib_dma_map_single(ia->ri_device, 1328 + (void *)rb->rg_base, size, 1329 + DMA_BIDIRECTIONAL); 1330 + if (ib_dma_mapping_error(ia->ri_device, iov->addr)) 1253 1331 goto out_free; 1254 1332 1333 + iov->length = size; 1334 + iov->lkey = ia->ri_dma_lkey; 1335 + rb->rg_size = size; 1336 + rb->rg_owner = NULL; 1255 1337 return rb; 1256 1338 1257 1339 out_free: 1258 1340 kfree(rb); 1259 1341 out: 1260 - return ERR_PTR(rc); 1342 + return ERR_PTR(-ENOMEM); 1261 1343 } 1262 1344 1263 1345 /** ··· 1271 1347 void 1272 1348 rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) 1273 1349 { 1274 - if (rb) { 1275 - rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov); 1276 - kfree(rb); 1277 - } 1350 + struct ib_sge *iov; 1351 + 1352 + if (!rb) 1353 + return; 1354 + 1355 + iov = &rb->rg_iov; 1356 + ib_dma_unmap_single(ia->ri_device, 1357 + iov->addr, iov->length, DMA_BIDIRECTIONAL); 1358 + kfree(rb); 1278 1359 } 1279 1360 1280 1361 /* ··· 1292 1363 struct rpcrdma_ep *ep, 1293 1364 struct rpcrdma_req *req) 1294 1365 { 1366 + struct ib_device *device = ia->ri_device; 1295 1367 struct ib_send_wr send_wr, *send_wr_fail; 1296 1368 struct rpcrdma_rep *rep = req->rl_reply; 1297 - int rc; 1369 + struct ib_sge *iov = req->rl_send_iov; 1370 + int i, rc; 1298 1371 1299 1372 if (rep) { 1300 1373 rc = rpcrdma_ep_post_recv(ia, ep, rep); ··· 1307 1376 1308 1377 send_wr.next = NULL; 1309 1378 send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION; 1310 - send_wr.sg_list = req->rl_send_iov; 1379 + send_wr.sg_list = iov; 1311 1380 send_wr.num_sge = req->rl_niovs; 1312 1381 send_wr.opcode = IB_WR_SEND; 1313 - if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */ 1314 - ib_dma_sync_single_for_device(ia->ri_device, 1315 - req->rl_send_iov[3].addr, 1316 - req->rl_send_iov[3].length, 1317 - DMA_TO_DEVICE); 1318 - ib_dma_sync_single_for_device(ia->ri_device, 1319 - req->rl_send_iov[1].addr, 1320 - req->rl_send_iov[1].length, 1321 - DMA_TO_DEVICE); 1322 - ib_dma_sync_single_for_device(ia->ri_device, 1323 - req->rl_send_iov[0].addr, 1324 - req->rl_send_iov[0].length, 1325 - DMA_TO_DEVICE); 1382 + 1383 + for (i = 0; i < send_wr.num_sge; i++) 1384 + ib_dma_sync_single_for_device(device, iov[i].addr, 1385 + iov[i].length, DMA_TO_DEVICE); 1386 + dprintk("RPC: %s: posting %d s/g entries\n", 1387 + __func__, send_wr.num_sge); 1326 1388 1327 1389 if (DECR_CQCOUNT(ep) > 0) 1328 1390 send_wr.send_flags = 0;
+13 -14
net/sunrpc/xprtrdma/xprt_rdma.h
··· 64 64 struct ib_device *ri_device; 65 65 struct rdma_cm_id *ri_id; 66 66 struct ib_pd *ri_pd; 67 - struct ib_mr *ri_bind_mem; 67 + struct ib_mr *ri_dma_mr; 68 68 u32 ri_dma_lkey; 69 - int ri_have_dma_lkey; 70 69 struct completion ri_done; 71 70 int ri_async_rc; 72 71 unsigned int ri_max_frmr_depth; ··· 87 88 int rep_connected; 88 89 struct ib_qp_init_attr rep_attr; 89 90 wait_queue_head_t rep_connect_wait; 90 - struct rpcrdma_regbuf *rep_padbuf; 91 91 struct rdma_conn_param rep_remote_cma; 92 92 struct sockaddr_storage rep_remote_addr; 93 93 struct delayed_work rep_connect_worker; ··· 116 118 struct rpcrdma_regbuf { 117 119 size_t rg_size; 118 120 struct rpcrdma_req *rg_owner; 119 - struct ib_mr *rg_mr; 120 121 struct ib_sge rg_iov; 121 122 __be32 rg_base[0] __attribute__ ((aligned(256))); 122 123 }; ··· 161 164 * struct rpcrdma_buffer. N is the max number of outstanding requests. 162 165 */ 163 166 164 - /* temporary static scatter/gather max */ 165 - #define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */ 167 + #define RPCRDMA_MAX_DATA_SEGS ((1 * 1024 * 1024) / PAGE_SIZE) 166 168 #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ 167 169 168 170 struct rpcrdma_buffer; ··· 253 257 char *mr_offset; /* kva if no page, else offset */ 254 258 }; 255 259 260 + #define RPCRDMA_MAX_IOVS (2) 261 + 256 262 struct rpcrdma_req { 257 - unsigned int rl_niovs; /* 0, 2 or 4 */ 258 - unsigned int rl_nchunks; /* non-zero if chunks */ 259 - unsigned int rl_connect_cookie; /* retry detection */ 260 - struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ 263 + unsigned int rl_niovs; 264 + unsigned int rl_nchunks; 265 + unsigned int rl_connect_cookie; 266 + struct rpcrdma_buffer *rl_buffer; 261 267 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ 262 - struct ib_sge rl_send_iov[4]; /* for active requests */ 263 - struct rpcrdma_regbuf *rl_rdmabuf; 264 - struct rpcrdma_regbuf *rl_sendbuf; 265 - struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; 268 + struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS]; 269 + struct rpcrdma_regbuf *rl_rdmabuf; 270 + struct rpcrdma_regbuf *rl_sendbuf; 271 + struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; 266 272 }; 267 273 268 274 static inline struct rpcrdma_req * ··· 339 341 unsigned long hardway_register_count; 340 342 unsigned long failed_marshal_count; 341 343 unsigned long bad_reply_count; 344 + unsigned long nomsg_call_count; 342 345 }; 343 346 344 347 /*
+11 -7
net/sunrpc/xprtsock.c
··· 822 822 if (atomic_read(&transport->xprt.swapper)) 823 823 sk_clear_memalloc(sk); 824 824 825 + kernel_sock_shutdown(sock, SHUT_RDWR); 826 + 825 827 write_lock_bh(&sk->sk_callback_lock); 826 828 transport->inet = NULL; 827 829 transport->sock = NULL; ··· 831 829 sk->sk_user_data = NULL; 832 830 833 831 xs_restore_old_callbacks(transport, sk); 832 + xprt_clear_connected(xprt); 834 833 write_unlock_bh(&sk->sk_callback_lock); 835 834 xs_sock_reset_connection_flags(xprt); 836 835 ··· 1869 1866 sk->sk_data_ready = xs_local_data_ready; 1870 1867 sk->sk_write_space = xs_udp_write_space; 1871 1868 sk->sk_error_report = xs_error_report; 1872 - sk->sk_allocation = GFP_ATOMIC; 1869 + sk->sk_allocation = GFP_NOIO; 1873 1870 1874 1871 xprt_clear_connected(xprt); 1875 1872 ··· 2054 2051 sk->sk_user_data = xprt; 2055 2052 sk->sk_data_ready = xs_udp_data_ready; 2056 2053 sk->sk_write_space = xs_udp_write_space; 2057 - sk->sk_allocation = GFP_ATOMIC; 2054 + sk->sk_allocation = GFP_NOIO; 2058 2055 2059 2056 xprt_set_connected(xprt); 2060 2057 ··· 2156 2153 sk->sk_state_change = xs_tcp_state_change; 2157 2154 sk->sk_write_space = xs_tcp_write_space; 2158 2155 sk->sk_error_report = xs_error_report; 2159 - sk->sk_allocation = GFP_ATOMIC; 2156 + sk->sk_allocation = GFP_NOIO; 2160 2157 2161 2158 /* socket options */ 2162 2159 sock_reset_flag(sk, SOCK_LINGER); ··· 2282 2279 2283 2280 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); 2284 2281 2285 - /* Start by resetting any existing state */ 2286 - xs_reset_transport(transport); 2287 - 2288 - if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { 2282 + if (transport->sock != NULL) { 2289 2283 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2290 2284 "seconds\n", 2291 2285 xprt, xprt->reestablish_timeout / HZ); 2286 + 2287 + /* Start by resetting any existing state */ 2288 + xs_reset_transport(transport); 2289 + 2292 2290 queue_delayed_work(rpciod_workqueue, 2293 2291 &transport->connect_worker, 2294 2292 xprt->reestablish_timeout);