Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nfsd-5.6' of git://linux-nfs.org/~bfields/linux

Pull nfsd updates from Bruce Fields:
"Highlights:

- Server-to-server copy code from Olga.

To use it, client and both servers must have support, the target
server must be able to access the source server over NFSv4.2, and
the target server must have the inter_copy_offload_enable module
parameter set.

- Improvements and bugfixes for the new filehandle cache, especially
in the container case, from Trond

- Also from Trond, better reporting of write errors.

- Y2038 work from Arnd"

* tag 'nfsd-5.6' of git://linux-nfs.org/~bfields/linux: (55 commits)
sunrpc: expiry_time should be seconds not timeval
nfsd: make nfsd_filecache_wq variable static
nfsd4: fix double free in nfsd4_do_async_copy()
nfsd: convert file cache to use over/underflow safe refcount
nfsd: Define the file access mode enum for tracing
nfsd: Fix a perf warning
nfsd: Ensure sampling of the write verifier is atomic with the write
nfsd: Ensure sampling of the commit verifier is atomic with the commit
sunrpc: clean up cache entry add/remove from hashtable
sunrpc: Fix potential leaks in sunrpc_cache_unhash()
nfsd: Ensure exclusion between CLONE and WRITE errors
nfsd: Pass the nfsd_file as arguments to nfsd4_clone_file_range()
nfsd: Update the boot verifier on stable writes too.
nfsd: Fix stable writes
nfsd: Allow nfsd_vfs_write() to take the nfsd_file as an argument
nfsd: Fix a soft lockup race in nfsd_file_mark_find_or_create()
nfsd: Reduce the number of calls to nfsd_file_gc()
nfsd: Schedule the laundrette regularly irrespective of file errors
nfsd: Remove unused constant NFSD_FILE_LRU_RESCAN
nfsd: Containerise filecache laundrette
...

+1323 -328
+10
fs/nfsd/Kconfig
··· 134 134 135 135 If unsure, say N. 136 136 137 + config NFSD_V4_2_INTER_SSC 138 + bool "NFSv4.2 inter server to server COPY" 139 + depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2 140 + help 141 + This option enables support for NFSv4.2 inter server to 142 + server copy where the destination server calls the NFSv4.2 143 + client to read the data to copy from the source server. 144 + 145 + If unsure, say N. 146 + 137 147 config NFSD_V4_SECURITY_LABEL 138 148 bool "Provide Security Label support for NFSv4 server" 139 149 depends on NFSD_V4 && SECURITY
+237 -76
fs/nfsd/filecache.c
··· 27 27 #define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS) 28 28 #define NFSD_LAUNDRETTE_DELAY (2 * HZ) 29 29 30 - #define NFSD_FILE_LRU_RESCAN (0) 31 30 #define NFSD_FILE_SHUTDOWN (1) 32 31 #define NFSD_FILE_LRU_THRESHOLD (4096UL) 33 32 #define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2) ··· 43 44 44 45 static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits); 45 46 47 + struct nfsd_fcache_disposal { 48 + struct list_head list; 49 + struct work_struct work; 50 + struct net *net; 51 + spinlock_t lock; 52 + struct list_head freeme; 53 + struct rcu_head rcu; 54 + }; 55 + 56 + static struct workqueue_struct *nfsd_filecache_wq __read_mostly; 57 + 46 58 static struct kmem_cache *nfsd_file_slab; 47 59 static struct kmem_cache *nfsd_file_mark_slab; 48 60 static struct nfsd_fcache_bucket *nfsd_file_hashtbl; ··· 62 52 static struct fsnotify_group *nfsd_file_fsnotify_group; 63 53 static atomic_long_t nfsd_filecache_count; 64 54 static struct delayed_work nfsd_filecache_laundrette; 55 + static DEFINE_SPINLOCK(laundrette_lock); 56 + static LIST_HEAD(laundrettes); 65 57 66 - enum nfsd_file_laundrette_ctl { 67 - NFSD_FILE_LAUNDRETTE_NOFLUSH = 0, 68 - NFSD_FILE_LAUNDRETTE_MAY_FLUSH 69 - }; 58 + static void nfsd_file_gc(void); 70 59 71 60 static void 72 - nfsd_file_schedule_laundrette(enum nfsd_file_laundrette_ctl ctl) 61 + nfsd_file_schedule_laundrette(void) 73 62 { 74 63 long count = atomic_long_read(&nfsd_filecache_count); 75 64 76 65 if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags)) 77 66 return; 78 67 79 - /* Be more aggressive about scanning if over the threshold */ 80 - if (count > NFSD_FILE_LRU_THRESHOLD) 81 - mod_delayed_work(system_wq, &nfsd_filecache_laundrette, 0); 82 - else 83 - schedule_delayed_work(&nfsd_filecache_laundrette, NFSD_LAUNDRETTE_DELAY); 84 - 85 - if (ctl == NFSD_FILE_LAUNDRETTE_NOFLUSH) 86 - return; 87 - 88 - /* ...and don't delay flushing if we're out of control */ 89 - if (count >= NFSD_FILE_LRU_LIMIT) 90 - flush_delayed_work(&nfsd_filecache_laundrette); 68 + queue_delayed_work(system_wq, &nfsd_filecache_laundrette, 69 + NFSD_LAUNDRETTE_DELAY); 91 70 } 92 71 93 72 static void ··· 100 101 static struct nfsd_file_mark * 101 102 nfsd_file_mark_get(struct nfsd_file_mark *nfm) 102 103 { 103 - if (!atomic_inc_not_zero(&nfm->nfm_ref)) 104 + if (!refcount_inc_not_zero(&nfm->nfm_ref)) 104 105 return NULL; 105 106 return nfm; 106 107 } ··· 108 109 static void 109 110 nfsd_file_mark_put(struct nfsd_file_mark *nfm) 110 111 { 111 - if (atomic_dec_and_test(&nfm->nfm_ref)) { 112 - 112 + if (refcount_dec_and_test(&nfm->nfm_ref)) { 113 113 fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group); 114 114 fsnotify_put_mark(&nfm->nfm_mark); 115 115 } ··· 131 133 struct nfsd_file_mark, 132 134 nfm_mark)); 133 135 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex); 134 - fsnotify_put_mark(mark); 135 - if (likely(nfm)) 136 + if (nfm) { 137 + fsnotify_put_mark(mark); 136 138 break; 139 + } 140 + /* Avoid soft lockup race with nfsd_file_mark_put() */ 141 + fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group); 142 + fsnotify_put_mark(mark); 137 143 } else 138 144 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex); 139 145 ··· 147 145 return NULL; 148 146 fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group); 149 147 new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF; 150 - atomic_set(&new->nfm_ref, 1); 148 + refcount_set(&new->nfm_ref, 1); 151 149 152 150 err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0); 153 151 ··· 185 183 nf->nf_flags = 0; 186 184 nf->nf_inode = inode; 187 185 nf->nf_hashval = hashval; 188 - atomic_set(&nf->nf_ref, 1); 186 + refcount_set(&nf->nf_ref, 1); 189 187 nf->nf_may = may & NFSD_FILE_MAY_MASK; 190 188 if (may & NFSD_MAY_NOT_BREAK_LEASE) { 191 189 if (may & NFSD_MAY_WRITE) ··· 194 192 __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags); 195 193 } 196 194 nf->nf_mark = NULL; 195 + init_rwsem(&nf->nf_rwsem); 197 196 trace_nfsd_file_alloc(nf); 198 197 } 199 198 return nf; ··· 241 238 return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err)); 242 239 } 243 240 244 - static bool 245 - nfsd_file_in_use(struct nfsd_file *nf) 246 - { 247 - return nfsd_file_check_writeback(nf) || 248 - nfsd_file_check_write_error(nf); 249 - } 250 - 251 241 static void 252 242 nfsd_file_do_unhash(struct nfsd_file *nf) 253 243 { ··· 252 256 nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id)); 253 257 --nfsd_file_hashtbl[nf->nf_hashval].nfb_count; 254 258 hlist_del_rcu(&nf->nf_node); 255 - if (!list_empty(&nf->nf_lru)) 256 - list_lru_del(&nfsd_file_lru, &nf->nf_lru); 257 259 atomic_long_dec(&nfsd_filecache_count); 258 260 } 259 261 ··· 260 266 { 261 267 if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) { 262 268 nfsd_file_do_unhash(nf); 269 + if (!list_empty(&nf->nf_lru)) 270 + list_lru_del(&nfsd_file_lru, &nf->nf_lru); 263 271 return true; 264 272 } 265 273 return false; ··· 279 283 if (!nfsd_file_unhash(nf)) 280 284 return false; 281 285 /* keep final reference for nfsd_file_lru_dispose */ 282 - if (atomic_add_unless(&nf->nf_ref, -1, 1)) 286 + if (refcount_dec_not_one(&nf->nf_ref)) 283 287 return true; 284 288 285 289 list_add(&nf->nf_lru, dispose); 286 290 return true; 287 291 } 288 292 289 - static int 293 + static void 290 294 nfsd_file_put_noref(struct nfsd_file *nf) 291 295 { 292 - int count; 293 296 trace_nfsd_file_put(nf); 294 297 295 - count = atomic_dec_return(&nf->nf_ref); 296 - if (!count) { 298 + if (refcount_dec_and_test(&nf->nf_ref)) { 297 299 WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags)); 298 300 nfsd_file_free(nf); 299 301 } 300 - return count; 301 302 } 302 303 303 304 void 304 305 nfsd_file_put(struct nfsd_file *nf) 305 306 { 306 - bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0; 307 - bool unused = !nfsd_file_in_use(nf); 307 + bool is_hashed; 308 308 309 309 set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags); 310 - if (nfsd_file_put_noref(nf) == 1 && is_hashed && unused) 311 - nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_MAY_FLUSH); 310 + if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) { 311 + nfsd_file_put_noref(nf); 312 + return; 313 + } 314 + 315 + filemap_flush(nf->nf_file->f_mapping); 316 + is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0; 317 + nfsd_file_put_noref(nf); 318 + if (is_hashed) 319 + nfsd_file_schedule_laundrette(); 320 + if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT) 321 + nfsd_file_gc(); 312 322 } 313 323 314 324 struct nfsd_file * 315 325 nfsd_file_get(struct nfsd_file *nf) 316 326 { 317 - if (likely(atomic_inc_not_zero(&nf->nf_ref))) 327 + if (likely(refcount_inc_not_zero(&nf->nf_ref))) 318 328 return nf; 319 329 return NULL; 320 330 } ··· 346 344 while(!list_empty(dispose)) { 347 345 nf = list_first_entry(dispose, struct nfsd_file, nf_lru); 348 346 list_del(&nf->nf_lru); 349 - if (!atomic_dec_and_test(&nf->nf_ref)) 347 + if (!refcount_dec_and_test(&nf->nf_ref)) 350 348 continue; 351 349 if (nfsd_file_free(nf)) 352 350 flush = true; 353 351 } 354 352 if (flush) 355 353 flush_delayed_fput(); 354 + } 355 + 356 + static void 357 + nfsd_file_list_remove_disposal(struct list_head *dst, 358 + struct nfsd_fcache_disposal *l) 359 + { 360 + spin_lock(&l->lock); 361 + list_splice_init(&l->freeme, dst); 362 + spin_unlock(&l->lock); 363 + } 364 + 365 + static void 366 + nfsd_file_list_add_disposal(struct list_head *files, struct net *net) 367 + { 368 + struct nfsd_fcache_disposal *l; 369 + 370 + rcu_read_lock(); 371 + list_for_each_entry_rcu(l, &laundrettes, list) { 372 + if (l->net == net) { 373 + spin_lock(&l->lock); 374 + list_splice_tail_init(files, &l->freeme); 375 + spin_unlock(&l->lock); 376 + queue_work(nfsd_filecache_wq, &l->work); 377 + break; 378 + } 379 + } 380 + rcu_read_unlock(); 381 + } 382 + 383 + static void 384 + nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src, 385 + struct net *net) 386 + { 387 + struct nfsd_file *nf, *tmp; 388 + 389 + list_for_each_entry_safe(nf, tmp, src, nf_lru) { 390 + if (nf->nf_net == net) 391 + list_move_tail(&nf->nf_lru, dst); 392 + } 393 + } 394 + 395 + static void 396 + nfsd_file_dispose_list_delayed(struct list_head *dispose) 397 + { 398 + LIST_HEAD(list); 399 + struct nfsd_file *nf; 400 + 401 + while(!list_empty(dispose)) { 402 + nf = list_first_entry(dispose, struct nfsd_file, nf_lru); 403 + nfsd_file_list_add_pernet(&list, dispose, nf->nf_net); 404 + nfsd_file_list_add_disposal(&list, nf->nf_net); 405 + } 356 406 } 357 407 358 408 /* ··· 429 375 * counter. Here we check the counter and then test and clear the flag. 430 376 * That order is deliberate to ensure that we can do this locklessly. 431 377 */ 432 - if (atomic_read(&nf->nf_ref) > 1) 378 + if (refcount_read(&nf->nf_ref) > 1) 433 379 goto out_skip; 434 380 435 381 /* ··· 440 386 goto out_skip; 441 387 442 388 if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags)) 443 - goto out_rescan; 389 + goto out_skip; 444 390 445 391 if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) 446 392 goto out_skip; 447 393 448 394 list_lru_isolate_move(lru, &nf->nf_lru, head); 449 395 return LRU_REMOVED; 450 - out_rescan: 451 - set_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags); 452 396 out_skip: 453 397 return LRU_SKIP; 454 398 } 455 399 456 - static void 457 - nfsd_file_lru_dispose(struct list_head *head) 400 + static unsigned long 401 + nfsd_file_lru_walk_list(struct shrink_control *sc) 458 402 { 459 - while(!list_empty(head)) { 460 - struct nfsd_file *nf = list_first_entry(head, 461 - struct nfsd_file, nf_lru); 462 - list_del_init(&nf->nf_lru); 403 + LIST_HEAD(head); 404 + struct nfsd_file *nf; 405 + unsigned long ret; 406 + 407 + if (sc) 408 + ret = list_lru_shrink_walk(&nfsd_file_lru, sc, 409 + nfsd_file_lru_cb, &head); 410 + else 411 + ret = list_lru_walk(&nfsd_file_lru, 412 + nfsd_file_lru_cb, 413 + &head, LONG_MAX); 414 + list_for_each_entry(nf, &head, nf_lru) { 463 415 spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock); 464 416 nfsd_file_do_unhash(nf); 465 417 spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock); 466 - nfsd_file_put_noref(nf); 467 418 } 419 + nfsd_file_dispose_list_delayed(&head); 420 + return ret; 421 + } 422 + 423 + static void 424 + nfsd_file_gc(void) 425 + { 426 + nfsd_file_lru_walk_list(NULL); 427 + } 428 + 429 + static void 430 + nfsd_file_gc_worker(struct work_struct *work) 431 + { 432 + nfsd_file_gc(); 433 + nfsd_file_schedule_laundrette(); 468 434 } 469 435 470 436 static unsigned long ··· 496 422 static unsigned long 497 423 nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc) 498 424 { 499 - LIST_HEAD(head); 500 - unsigned long ret; 501 - 502 - ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &head); 503 - nfsd_file_lru_dispose(&head); 504 - return ret; 425 + return nfsd_file_lru_walk_list(sc); 505 426 } 506 427 507 428 static struct shrinker nfsd_file_shrinker = { ··· 558 489 559 490 __nfsd_file_close_inode(inode, hashval, &dispose); 560 491 trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose)); 561 - nfsd_file_dispose_list(&dispose); 492 + nfsd_file_dispose_list_delayed(&dispose); 562 493 } 563 494 564 495 /** ··· 574 505 nfsd_file_delayed_close(struct work_struct *work) 575 506 { 576 507 LIST_HEAD(head); 508 + struct nfsd_fcache_disposal *l = container_of(work, 509 + struct nfsd_fcache_disposal, work); 577 510 578 - list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb, &head, LONG_MAX); 579 - 580 - if (test_and_clear_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags)) 581 - nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_NOFLUSH); 582 - 583 - if (!list_empty(&head)) { 584 - nfsd_file_lru_dispose(&head); 585 - flush_delayed_fput(); 586 - } 511 + nfsd_file_list_remove_disposal(&head, l); 512 + nfsd_file_dispose_list(&head); 587 513 } 588 514 589 515 static int ··· 639 575 if (nfsd_file_hashtbl) 640 576 return 0; 641 577 578 + nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0); 579 + if (!nfsd_filecache_wq) 580 + goto out; 581 + 642 582 nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE, 643 583 sizeof(*nfsd_file_hashtbl), GFP_KERNEL); 644 584 if (!nfsd_file_hashtbl) { ··· 696 628 spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock); 697 629 } 698 630 699 - INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_delayed_close); 631 + INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker); 700 632 out: 701 633 return ret; 702 634 out_notifier: ··· 712 644 nfsd_file_mark_slab = NULL; 713 645 kfree(nfsd_file_hashtbl); 714 646 nfsd_file_hashtbl = NULL; 647 + destroy_workqueue(nfsd_filecache_wq); 648 + nfsd_filecache_wq = NULL; 715 649 goto out; 716 650 } 717 651 ··· 752 682 } 753 683 } 754 684 685 + static struct nfsd_fcache_disposal * 686 + nfsd_alloc_fcache_disposal(struct net *net) 687 + { 688 + struct nfsd_fcache_disposal *l; 689 + 690 + l = kmalloc(sizeof(*l), GFP_KERNEL); 691 + if (!l) 692 + return NULL; 693 + INIT_WORK(&l->work, nfsd_file_delayed_close); 694 + l->net = net; 695 + spin_lock_init(&l->lock); 696 + INIT_LIST_HEAD(&l->freeme); 697 + return l; 698 + } 699 + 700 + static void 701 + nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l) 702 + { 703 + rcu_assign_pointer(l->net, NULL); 704 + cancel_work_sync(&l->work); 705 + nfsd_file_dispose_list(&l->freeme); 706 + kfree_rcu(l, rcu); 707 + } 708 + 709 + static void 710 + nfsd_add_fcache_disposal(struct nfsd_fcache_disposal *l) 711 + { 712 + spin_lock(&laundrette_lock); 713 + list_add_tail_rcu(&l->list, &laundrettes); 714 + spin_unlock(&laundrette_lock); 715 + } 716 + 717 + static void 718 + nfsd_del_fcache_disposal(struct nfsd_fcache_disposal *l) 719 + { 720 + spin_lock(&laundrette_lock); 721 + list_del_rcu(&l->list); 722 + spin_unlock(&laundrette_lock); 723 + } 724 + 725 + static int 726 + nfsd_alloc_fcache_disposal_net(struct net *net) 727 + { 728 + struct nfsd_fcache_disposal *l; 729 + 730 + l = nfsd_alloc_fcache_disposal(net); 731 + if (!l) 732 + return -ENOMEM; 733 + nfsd_add_fcache_disposal(l); 734 + return 0; 735 + } 736 + 737 + static void 738 + nfsd_free_fcache_disposal_net(struct net *net) 739 + { 740 + struct nfsd_fcache_disposal *l; 741 + 742 + rcu_read_lock(); 743 + list_for_each_entry_rcu(l, &laundrettes, list) { 744 + if (l->net != net) 745 + continue; 746 + nfsd_del_fcache_disposal(l); 747 + rcu_read_unlock(); 748 + nfsd_free_fcache_disposal(l); 749 + return; 750 + } 751 + rcu_read_unlock(); 752 + } 753 + 754 + int 755 + nfsd_file_cache_start_net(struct net *net) 756 + { 757 + return nfsd_alloc_fcache_disposal_net(net); 758 + } 759 + 760 + void 761 + nfsd_file_cache_shutdown_net(struct net *net) 762 + { 763 + nfsd_file_cache_purge(net); 764 + nfsd_free_fcache_disposal_net(net); 765 + } 766 + 755 767 void 756 768 nfsd_file_cache_shutdown(void) 757 769 { ··· 858 706 nfsd_file_mark_slab = NULL; 859 707 kfree(nfsd_file_hashtbl); 860 708 nfsd_file_hashtbl = NULL; 709 + destroy_workqueue(nfsd_filecache_wq); 710 + nfsd_filecache_wq = NULL; 861 711 } 862 712 863 713 static bool ··· 943 789 struct nfsd_file *nf, *new; 944 790 struct inode *inode; 945 791 unsigned int hashval; 792 + bool retry = true; 946 793 947 794 /* FIXME: skip this if fh_dentry is already set? */ 948 795 status = fh_verify(rqstp, fhp, S_IFREG, ··· 979 824 980 825 /* Did construction of this file fail? */ 981 826 if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) { 827 + if (!retry) { 828 + status = nfserr_jukebox; 829 + goto out; 830 + } 831 + retry = false; 982 832 nfsd_file_put_noref(nf); 983 833 goto retry; 984 834 } ··· 1018 858 open_file: 1019 859 nf = new; 1020 860 /* Take reference for the hashtable */ 1021 - atomic_inc(&nf->nf_ref); 861 + refcount_inc(&nf->nf_ref); 1022 862 __set_bit(NFSD_FILE_HASHED, &nf->nf_flags); 1023 863 __set_bit(NFSD_FILE_PENDING, &nf->nf_flags); 1024 864 list_lru_add(&nfsd_file_lru, &nf->nf_lru); ··· 1027 867 nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount, 1028 868 nfsd_file_hashtbl[hashval].nfb_count); 1029 869 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock); 1030 - atomic_long_inc(&nfsd_filecache_count); 870 + if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD) 871 + nfsd_file_gc(); 1031 872 1032 873 nf->nf_mark = nfsd_file_mark_find_or_create(nf); 1033 874 if (nf->nf_mark)
+5 -2
fs/nfsd/filecache.h
··· 19 19 */ 20 20 struct nfsd_file_mark { 21 21 struct fsnotify_mark nfm_mark; 22 - atomic_t nfm_ref; 22 + refcount_t nfm_ref; 23 23 }; 24 24 25 25 /* ··· 43 43 unsigned long nf_flags; 44 44 struct inode *nf_inode; 45 45 unsigned int nf_hashval; 46 - atomic_t nf_ref; 46 + refcount_t nf_ref; 47 47 unsigned char nf_may; 48 48 struct nfsd_file_mark *nf_mark; 49 + struct rw_semaphore nf_rwsem; 49 50 }; 50 51 51 52 int nfsd_file_cache_init(void); 52 53 void nfsd_file_cache_purge(struct net *); 53 54 void nfsd_file_cache_shutdown(void); 55 + int nfsd_file_cache_start_net(struct net *net); 56 + void nfsd_file_cache_shutdown_net(struct net *net); 54 57 void nfsd_file_put(struct nfsd_file *nf); 55 58 struct nfsd_file *nfsd_file_get(struct nfsd_file *nf); 56 59 void nfsd_file_close_inode_sync(struct inode *inode);
+3 -3
fs/nfsd/netns.h
··· 40 40 41 41 struct lock_manager nfsd4_manager; 42 42 bool grace_ended; 43 - time_t boot_time; 43 + time64_t boot_time; 44 44 45 45 /* internal mount of the "nfsd" pseudofilesystem: */ 46 46 struct vfsmount *nfsd_mnt; ··· 92 92 bool in_grace; 93 93 const struct nfsd4_client_tracking_ops *client_tracking_ops; 94 94 95 - time_t nfsd4_lease; 96 - time_t nfsd4_grace; 95 + time64_t nfsd4_lease; 96 + time64_t nfsd4_grace; 97 97 bool somebody_reclaimed; 98 98 99 99 bool track_reclaim_completes;
+3 -2
fs/nfsd/nfs3proc.c
··· 203 203 RETURN_STATUS(nfserr_io); 204 204 nfserr = nfsd_write(rqstp, &resp->fh, argp->offset, 205 205 rqstp->rq_vec, nvecs, &cnt, 206 - resp->committed); 206 + resp->committed, resp->verf); 207 207 resp->count = cnt; 208 208 RETURN_STATUS(nfserr); 209 209 } ··· 683 683 RETURN_STATUS(nfserr_inval); 684 684 685 685 fh_copy(&resp->fh, &argp->fh); 686 - nfserr = nfsd_commit(rqstp, &resp->fh, argp->offset, argp->count); 686 + nfserr = nfsd_commit(rqstp, &resp->fh, argp->offset, argp->count, 687 + resp->verf); 687 688 688 689 RETURN_STATUS(nfserr); 689 690 }
+12 -24
fs/nfsd/nfs3xdr.c
··· 32 32 * XDR functions for basic NFS types 33 33 */ 34 34 static __be32 * 35 - encode_time3(__be32 *p, struct timespec *time) 35 + encode_time3(__be32 *p, struct timespec64 *time) 36 36 { 37 37 *p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec); 38 38 return p; 39 39 } 40 40 41 41 static __be32 * 42 - decode_time3(__be32 *p, struct timespec *time) 42 + decode_time3(__be32 *p, struct timespec64 *time) 43 43 { 44 44 time->tv_sec = ntohl(*p++); 45 45 time->tv_nsec = ntohl(*p++); ··· 167 167 struct kstat *stat) 168 168 { 169 169 struct user_namespace *userns = nfsd_user_namespace(rqstp); 170 - struct timespec ts; 171 170 *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]); 172 171 *p++ = htonl((u32) (stat->mode & S_IALLUGO)); 173 172 *p++ = htonl((u32) stat->nlink); ··· 182 183 *p++ = htonl((u32) MINOR(stat->rdev)); 183 184 p = encode_fsid(p, fhp); 184 185 p = xdr_encode_hyper(p, stat->ino); 185 - ts = timespec64_to_timespec(stat->atime); 186 - p = encode_time3(p, &ts); 187 - ts = timespec64_to_timespec(stat->mtime); 188 - p = encode_time3(p, &ts); 189 - ts = timespec64_to_timespec(stat->ctime); 190 - p = encode_time3(p, &ts); 186 + p = encode_time3(p, &stat->atime); 187 + p = encode_time3(p, &stat->mtime); 188 + p = encode_time3(p, &stat->ctime); 191 189 192 190 return p; 193 191 } ··· 273 277 stat.size = inode->i_size; 274 278 } 275 279 276 - fhp->fh_pre_mtime = timespec64_to_timespec(stat.mtime); 277 - fhp->fh_pre_ctime = timespec64_to_timespec(stat.ctime); 280 + fhp->fh_pre_mtime = stat.mtime; 281 + fhp->fh_pre_ctime = stat.ctime; 278 282 fhp->fh_pre_size = stat.size; 279 283 fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode); 280 284 fhp->fh_pre_saved = true; ··· 326 330 p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp)); 327 331 328 332 if ((args->check_guard = ntohl(*p++)) != 0) { 329 - struct timespec time; 333 + struct timespec64 time; 330 334 p = decode_time3(p, &time); 331 335 args->guardtime = time.tv_sec; 332 336 } ··· 747 751 nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p) 748 752 { 749 753 struct nfsd3_writeres *resp = rqstp->rq_resp; 750 - struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 751 - __be32 verf[2]; 752 754 753 755 p = encode_wcc_data(rqstp, p, &resp->fh); 754 756 if (resp->status == 0) { 755 757 *p++ = htonl(resp->count); 756 758 *p++ = htonl(resp->committed); 757 - /* unique identifier, y2038 overflow can be ignored */ 758 - nfsd_copy_boot_verifier(verf, nn); 759 - *p++ = verf[0]; 760 - *p++ = verf[1]; 759 + *p++ = resp->verf[0]; 760 + *p++ = resp->verf[1]; 761 761 } 762 762 return xdr_ressize_check(rqstp, p); 763 763 } ··· 1117 1125 nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p) 1118 1126 { 1119 1127 struct nfsd3_commitres *resp = rqstp->rq_resp; 1120 - struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1121 - __be32 verf[2]; 1122 1128 1123 1129 p = encode_wcc_data(rqstp, p, &resp->fh); 1124 1130 /* Write verifier */ 1125 1131 if (resp->status == 0) { 1126 - /* unique identifier, y2038 overflow can be ignored */ 1127 - nfsd_copy_boot_verifier(verf, nn); 1128 - *p++ = verf[0]; 1129 - *p++ = verf[1]; 1132 + *p++ = resp->verf[0]; 1133 + *p++ = resp->verf[1]; 1130 1134 } 1131 1135 return xdr_ressize_check(rqstp, p); 1132 1136 }
+10 -1
fs/nfsd/nfs4callback.c
··· 823 823 static int max_cb_time(struct net *net) 824 824 { 825 825 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 826 - return max(nn->nfsd4_lease/10, (time_t)1) * HZ; 826 + 827 + /* 828 + * nfsd4_lease is set to at most one hour in __nfsd4_write_time, 829 + * so we can use 32-bit math on it. Warn if that assumption 830 + * ever stops being true. 831 + */ 832 + if (WARN_ON_ONCE(nn->nfsd4_lease > 3600)) 833 + return 360 * HZ; 834 + 835 + return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ; 827 836 } 828 837 829 838 static struct workqueue_struct *callback_wq;
+1 -1
fs/nfsd/nfs4layouts.c
··· 675 675 676 676 /* Client gets 2 lease periods to return it */ 677 677 cutoff = ktime_add_ns(task->tk_start, 678 - nn->nfsd4_lease * NSEC_PER_SEC * 2); 678 + (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2); 679 679 680 680 if (ktime_before(now, cutoff)) { 681 681 rpc_delay(task, HZ/100); /* 10 mili-seconds */
+414 -50
fs/nfsd/nfs4proc.c
··· 37 37 #include <linux/falloc.h> 38 38 #include <linux/slab.h> 39 39 #include <linux/kthread.h> 40 + #include <linux/sunrpc/addr.h> 40 41 41 42 #include "idmap.h" 42 43 #include "cache.h" ··· 233 232 if (!*resfh) 234 233 return nfserr_jukebox; 235 234 fh_init(*resfh, NFS4_FHSIZE); 236 - open->op_truncate = 0; 235 + open->op_truncate = false; 237 236 238 237 if (open->op_create) { 239 238 /* FIXME: check session persistence and pnfs flags. ··· 366 365 if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL) 367 366 return nfserr_inval; 368 367 369 - open->op_created = 0; 368 + open->op_created = false; 370 369 /* 371 370 * RFC5661 18.51.3 372 371 * Before RECLAIM_COMPLETE done, server should deny new lock ··· 504 503 union nfsd4_op_u *u) 505 504 { 506 505 struct nfsd4_putfh *putfh = &u->putfh; 506 + __be32 ret; 507 507 508 508 fh_put(&cstate->current_fh); 509 509 cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen; 510 510 memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval, 511 511 putfh->pf_fhlen); 512 - return fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS); 512 + ret = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS); 513 + #ifdef CONFIG_NFSD_V4_2_INTER_SSC 514 + if (ret == nfserr_stale && putfh->no_verify) { 515 + SET_FH_FLAG(&cstate->current_fh, NFSD4_FH_FOREIGN); 516 + ret = 0; 517 + } 518 + #endif 519 + return ret; 513 520 } 514 521 515 522 static __be32 ··· 539 530 return nfserr_restorefh; 540 531 541 532 fh_dup2(&cstate->current_fh, &cstate->save_fh); 542 - if (HAS_STATE_ID(cstate, SAVED_STATE_ID_FLAG)) { 533 + if (HAS_CSTATE_FLAG(cstate, SAVED_STATE_ID_FLAG)) { 543 534 memcpy(&cstate->current_stateid, &cstate->save_stateid, sizeof(stateid_t)); 544 - SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 535 + SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 545 536 } 546 537 return nfs_ok; 547 538 } ··· 551 542 union nfsd4_op_u *u) 552 543 { 553 544 fh_dup2(&cstate->save_fh, &cstate->current_fh); 554 - if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG)) { 545 + if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG)) { 555 546 memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t)); 556 - SET_STATE_ID(cstate, SAVED_STATE_ID_FLAG); 547 + SET_CSTATE_FLAG(cstate, SAVED_STATE_ID_FLAG); 557 548 } 558 549 return nfs_ok; 559 550 } ··· 590 581 { 591 582 struct nfsd4_commit *commit = &u->commit; 592 583 593 - gen_boot_verifier(&commit->co_verf, SVC_NET(rqstp)); 594 584 return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset, 595 - commit->co_count); 585 + commit->co_count, 586 + (__be32 *)commit->co_verf.data); 596 587 } 597 588 598 589 static __be32 ··· 785 776 /* check stateid */ 786 777 status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, 787 778 &read->rd_stateid, RD_STATE, 788 - &read->rd_nf); 779 + &read->rd_nf, NULL); 789 780 if (status) { 790 781 dprintk("NFSD: nfsd4_read: couldn't process stateid!\n"); 791 782 goto out; ··· 957 948 if (setattr->sa_iattr.ia_valid & ATTR_SIZE) { 958 949 status = nfs4_preprocess_stateid_op(rqstp, cstate, 959 950 &cstate->current_fh, &setattr->sa_stateid, 960 - WR_STATE, NULL); 951 + WR_STATE, NULL, NULL); 961 952 if (status) { 962 953 dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n"); 963 954 return status; ··· 984 975 if (status) 985 976 goto out; 986 977 status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr, 987 - 0, (time_t)0); 978 + 0, (time64_t)0); 988 979 out: 989 980 fh_drop_write(&cstate->current_fh); 990 981 return status; ··· 1008 999 trace_nfsd_write_start(rqstp, &cstate->current_fh, 1009 1000 write->wr_offset, cnt); 1010 1001 status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, 1011 - stateid, WR_STATE, &nf); 1002 + stateid, WR_STATE, &nf, NULL); 1012 1003 if (status) { 1013 1004 dprintk("NFSD: nfsd4_write: couldn't process stateid!\n"); 1014 1005 return status; 1015 1006 } 1016 1007 1017 1008 write->wr_how_written = write->wr_stable_how; 1018 - gen_boot_verifier(&write->wr_verifier, SVC_NET(rqstp)); 1019 1009 1020 1010 nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist, 1021 1011 &write->wr_head, write->wr_buflen); 1022 1012 WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec)); 1023 1013 1024 - status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf->nf_file, 1014 + status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf, 1025 1015 write->wr_offset, rqstp->rq_vec, nvecs, &cnt, 1026 - write->wr_how_written); 1016 + write->wr_how_written, 1017 + (__be32 *)write->wr_verifier.data); 1027 1018 nfsd_file_put(nf); 1028 1019 1029 1020 write->wr_bytes_written = cnt; ··· 1043 1034 return nfserr_nofilehandle; 1044 1035 1045 1036 status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh, 1046 - src_stateid, RD_STATE, src); 1037 + src_stateid, RD_STATE, src, NULL); 1047 1038 if (status) { 1048 1039 dprintk("NFSD: %s: couldn't process src stateid!\n", __func__); 1049 1040 goto out; 1050 1041 } 1051 1042 1052 1043 status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, 1053 - dst_stateid, WR_STATE, dst); 1044 + dst_stateid, WR_STATE, dst, NULL); 1054 1045 if (status) { 1055 1046 dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__); 1056 1047 goto out_put_src; ··· 1085 1076 if (status) 1086 1077 goto out; 1087 1078 1088 - status = nfsd4_clone_file_range(src->nf_file, clone->cl_src_pos, 1089 - dst->nf_file, clone->cl_dst_pos, clone->cl_count, 1079 + status = nfsd4_clone_file_range(src, clone->cl_src_pos, 1080 + dst, clone->cl_dst_pos, clone->cl_count, 1090 1081 EX_ISSYNC(cstate->current_fh.fh_export)); 1091 1082 1092 1083 nfsd_file_put(dst); ··· 1143 1134 1144 1135 while ((copy = nfsd4_get_copy(clp)) != NULL) 1145 1136 nfsd4_stop_copy(copy); 1137 + } 1138 + #ifdef CONFIG_NFSD_V4_2_INTER_SSC 1139 + 1140 + extern struct file *nfs42_ssc_open(struct vfsmount *ss_mnt, 1141 + struct nfs_fh *src_fh, 1142 + nfs4_stateid *stateid); 1143 + extern void nfs42_ssc_close(struct file *filep); 1144 + 1145 + extern void nfs_sb_deactive(struct super_block *sb); 1146 + 1147 + #define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys" 1148 + 1149 + /** 1150 + * Support one copy source server for now. 1151 + */ 1152 + static __be32 1153 + nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp, 1154 + struct vfsmount **mount) 1155 + { 1156 + struct file_system_type *type; 1157 + struct vfsmount *ss_mnt; 1158 + struct nfs42_netaddr *naddr; 1159 + struct sockaddr_storage tmp_addr; 1160 + size_t tmp_addrlen, match_netid_len = 3; 1161 + char *startsep = "", *endsep = "", *match_netid = "tcp"; 1162 + char *ipaddr, *dev_name, *raw_data; 1163 + int len, raw_len; 1164 + __be32 status = nfserr_inval; 1165 + 1166 + naddr = &nss->u.nl4_addr; 1167 + tmp_addrlen = rpc_uaddr2sockaddr(SVC_NET(rqstp), naddr->addr, 1168 + naddr->addr_len, 1169 + (struct sockaddr *)&tmp_addr, 1170 + sizeof(tmp_addr)); 1171 + if (tmp_addrlen == 0) 1172 + goto out_err; 1173 + 1174 + if (tmp_addr.ss_family == AF_INET6) { 1175 + startsep = "["; 1176 + endsep = "]"; 1177 + match_netid = "tcp6"; 1178 + match_netid_len = 4; 1179 + } 1180 + 1181 + if (naddr->netid_len != match_netid_len || 1182 + strncmp(naddr->netid, match_netid, naddr->netid_len)) 1183 + goto out_err; 1184 + 1185 + /* Construct the raw data for the vfs_kern_mount call */ 1186 + len = RPC_MAX_ADDRBUFLEN + 1; 1187 + ipaddr = kzalloc(len, GFP_KERNEL); 1188 + if (!ipaddr) 1189 + goto out_err; 1190 + 1191 + rpc_ntop((struct sockaddr *)&tmp_addr, ipaddr, len); 1192 + 1193 + /* 2 for ipv6 endsep and startsep. 3 for ":/" and trailing '/0'*/ 1194 + 1195 + raw_len = strlen(NFSD42_INTERSSC_MOUNTOPS) + strlen(ipaddr); 1196 + raw_data = kzalloc(raw_len, GFP_KERNEL); 1197 + if (!raw_data) 1198 + goto out_free_ipaddr; 1199 + 1200 + snprintf(raw_data, raw_len, NFSD42_INTERSSC_MOUNTOPS, ipaddr); 1201 + 1202 + status = nfserr_nodev; 1203 + type = get_fs_type("nfs"); 1204 + if (!type) 1205 + goto out_free_rawdata; 1206 + 1207 + /* Set the server:<export> for the vfs_kern_mount call */ 1208 + dev_name = kzalloc(len + 5, GFP_KERNEL); 1209 + if (!dev_name) 1210 + goto out_free_rawdata; 1211 + snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep); 1212 + 1213 + /* Use an 'internal' mount: SB_KERNMOUNT -> MNT_INTERNAL */ 1214 + ss_mnt = vfs_kern_mount(type, SB_KERNMOUNT, dev_name, raw_data); 1215 + module_put(type->owner); 1216 + if (IS_ERR(ss_mnt)) 1217 + goto out_free_devname; 1218 + 1219 + status = 0; 1220 + *mount = ss_mnt; 1221 + 1222 + out_free_devname: 1223 + kfree(dev_name); 1224 + out_free_rawdata: 1225 + kfree(raw_data); 1226 + out_free_ipaddr: 1227 + kfree(ipaddr); 1228 + out_err: 1229 + return status; 1230 + } 1231 + 1232 + static void 1233 + nfsd4_interssc_disconnect(struct vfsmount *ss_mnt) 1234 + { 1235 + nfs_sb_deactive(ss_mnt->mnt_sb); 1236 + mntput(ss_mnt); 1237 + } 1238 + 1239 + /** 1240 + * nfsd4_setup_inter_ssc 1241 + * 1242 + * Verify COPY destination stateid. 1243 + * Connect to the source server with NFSv4.1. 1244 + * Create the source struct file for nfsd_copy_range. 1245 + * Called with COPY cstate: 1246 + * SAVED_FH: source filehandle 1247 + * CURRENT_FH: destination filehandle 1248 + */ 1249 + static __be32 1250 + nfsd4_setup_inter_ssc(struct svc_rqst *rqstp, 1251 + struct nfsd4_compound_state *cstate, 1252 + struct nfsd4_copy *copy, struct vfsmount **mount) 1253 + { 1254 + struct svc_fh *s_fh = NULL; 1255 + stateid_t *s_stid = &copy->cp_src_stateid; 1256 + __be32 status = nfserr_inval; 1257 + 1258 + /* Verify the destination stateid and set dst struct file*/ 1259 + status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, 1260 + &copy->cp_dst_stateid, 1261 + WR_STATE, &copy->nf_dst, NULL); 1262 + if (status) 1263 + goto out; 1264 + 1265 + status = nfsd4_interssc_connect(&copy->cp_src, rqstp, mount); 1266 + if (status) 1267 + goto out; 1268 + 1269 + s_fh = &cstate->save_fh; 1270 + 1271 + copy->c_fh.size = s_fh->fh_handle.fh_size; 1272 + memcpy(copy->c_fh.data, &s_fh->fh_handle.fh_base, copy->c_fh.size); 1273 + copy->stateid.seqid = cpu_to_be32(s_stid->si_generation); 1274 + memcpy(copy->stateid.other, (void *)&s_stid->si_opaque, 1275 + sizeof(stateid_opaque_t)); 1276 + 1277 + status = 0; 1278 + out: 1279 + return status; 1280 + } 1281 + 1282 + static void 1283 + nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src, 1284 + struct nfsd_file *dst) 1285 + { 1286 + nfs42_ssc_close(src->nf_file); 1287 + nfsd_file_put(src); 1288 + nfsd_file_put(dst); 1289 + mntput(ss_mnt); 1290 + } 1291 + 1292 + #else /* CONFIG_NFSD_V4_2_INTER_SSC */ 1293 + 1294 + static __be32 1295 + nfsd4_setup_inter_ssc(struct svc_rqst *rqstp, 1296 + struct nfsd4_compound_state *cstate, 1297 + struct nfsd4_copy *copy, 1298 + struct vfsmount **mount) 1299 + { 1300 + *mount = NULL; 1301 + return nfserr_inval; 1302 + } 1303 + 1304 + static void 1305 + nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src, 1306 + struct nfsd_file *dst) 1307 + { 1308 + } 1309 + 1310 + static void 1311 + nfsd4_interssc_disconnect(struct vfsmount *ss_mnt) 1312 + { 1313 + } 1314 + 1315 + static struct file *nfs42_ssc_open(struct vfsmount *ss_mnt, 1316 + struct nfs_fh *src_fh, 1317 + nfs4_stateid *stateid) 1318 + { 1319 + return NULL; 1320 + } 1321 + #endif /* CONFIG_NFSD_V4_2_INTER_SSC */ 1322 + 1323 + static __be32 1324 + nfsd4_setup_intra_ssc(struct svc_rqst *rqstp, 1325 + struct nfsd4_compound_state *cstate, 1326 + struct nfsd4_copy *copy) 1327 + { 1328 + return nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid, 1329 + &copy->nf_src, &copy->cp_dst_stateid, 1330 + &copy->nf_dst); 1331 + } 1332 + 1333 + static void 1334 + nfsd4_cleanup_intra_ssc(struct nfsd_file *src, struct nfsd_file *dst) 1335 + { 1336 + nfsd_file_put(src); 1337 + nfsd_file_put(dst); 1146 1338 } 1147 1339 1148 1340 static void nfsd4_cb_offload_release(struct nfsd4_callback *cb) ··· 1410 1200 status = nfs_ok; 1411 1201 } 1412 1202 1413 - nfsd_file_put(copy->nf_src); 1414 - nfsd_file_put(copy->nf_dst); 1203 + if (!copy->cp_intra) /* Inter server SSC */ 1204 + nfsd4_cleanup_inter_ssc(copy->ss_mnt, copy->nf_src, 1205 + copy->nf_dst); 1206 + else 1207 + nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst); 1208 + 1415 1209 return status; 1416 1210 } 1417 1211 1418 - static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst) 1212 + static int dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst) 1419 1213 { 1420 1214 dst->cp_src_pos = src->cp_src_pos; 1421 1215 dst->cp_dst_pos = src->cp_dst_pos; ··· 1429 1215 memcpy(&dst->fh, &src->fh, sizeof(src->fh)); 1430 1216 dst->cp_clp = src->cp_clp; 1431 1217 dst->nf_dst = nfsd_file_get(src->nf_dst); 1432 - dst->nf_src = nfsd_file_get(src->nf_src); 1218 + dst->cp_intra = src->cp_intra; 1219 + if (src->cp_intra) /* for inter, file_src doesn't exist yet */ 1220 + dst->nf_src = nfsd_file_get(src->nf_src); 1221 + 1433 1222 memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid)); 1223 + memcpy(&dst->cp_src, &src->cp_src, sizeof(struct nl4_server)); 1224 + memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid)); 1225 + memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh)); 1226 + dst->ss_mnt = src->ss_mnt; 1227 + 1228 + return 0; 1434 1229 } 1435 1230 1436 1231 static void cleanup_async_copy(struct nfsd4_copy *copy) 1437 1232 { 1438 - nfs4_free_cp_state(copy); 1233 + nfs4_free_copy_state(copy); 1439 1234 nfsd_file_put(copy->nf_dst); 1440 - nfsd_file_put(copy->nf_src); 1235 + if (copy->cp_intra) 1236 + nfsd_file_put(copy->nf_src); 1441 1237 spin_lock(&copy->cp_clp->async_lock); 1442 1238 list_del(&copy->copies); 1443 1239 spin_unlock(&copy->cp_clp->async_lock); ··· 1459 1235 struct nfsd4_copy *copy = (struct nfsd4_copy *)data; 1460 1236 struct nfsd4_copy *cb_copy; 1461 1237 1238 + if (!copy->cp_intra) { /* Inter server SSC */ 1239 + copy->nf_src = kzalloc(sizeof(struct nfsd_file), GFP_KERNEL); 1240 + if (!copy->nf_src) { 1241 + copy->nfserr = nfserr_serverfault; 1242 + nfsd4_interssc_disconnect(copy->ss_mnt); 1243 + goto do_callback; 1244 + } 1245 + copy->nf_src->nf_file = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh, 1246 + &copy->stateid); 1247 + if (IS_ERR(copy->nf_src->nf_file)) { 1248 + copy->nfserr = nfserr_offload_denied; 1249 + nfsd4_interssc_disconnect(copy->ss_mnt); 1250 + goto do_callback; 1251 + } 1252 + } 1253 + 1462 1254 copy->nfserr = nfsd4_do_copy(copy, 0); 1255 + do_callback: 1463 1256 cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); 1464 1257 if (!cb_copy) 1465 1258 goto out; ··· 1488 1247 &nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD); 1489 1248 nfsd4_run_cb(&cb_copy->cp_cb); 1490 1249 out: 1250 + if (!copy->cp_intra) 1251 + kfree(copy->nf_src); 1491 1252 cleanup_async_copy(copy); 1492 1253 return 0; 1493 1254 } ··· 1502 1259 __be32 status; 1503 1260 struct nfsd4_copy *async_copy = NULL; 1504 1261 1505 - status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid, 1506 - &copy->nf_src, &copy->cp_dst_stateid, 1507 - &copy->nf_dst); 1508 - if (status) 1509 - goto out; 1262 + if (!copy->cp_intra) { /* Inter server SSC */ 1263 + if (!inter_copy_offload_enable || copy->cp_synchronous) { 1264 + status = nfserr_notsupp; 1265 + goto out; 1266 + } 1267 + status = nfsd4_setup_inter_ssc(rqstp, cstate, copy, 1268 + &copy->ss_mnt); 1269 + if (status) 1270 + return nfserr_offload_denied; 1271 + } else { 1272 + status = nfsd4_setup_intra_ssc(rqstp, cstate, copy); 1273 + if (status) 1274 + return status; 1275 + } 1510 1276 1511 1277 copy->cp_clp = cstate->clp; 1512 1278 memcpy(&copy->fh, &cstate->current_fh.fh_handle, ··· 1526 1274 status = nfserrno(-ENOMEM); 1527 1275 async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); 1528 1276 if (!async_copy) 1529 - goto out; 1530 - if (!nfs4_init_cp_state(nn, copy)) { 1531 - kfree(async_copy); 1532 - goto out; 1533 - } 1277 + goto out_err; 1278 + if (!nfs4_init_copy_state(nn, copy)) 1279 + goto out_err; 1534 1280 refcount_set(&async_copy->refcount, 1); 1535 1281 memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid, 1536 1282 sizeof(copy->cp_stateid)); 1537 - dup_copy_fields(copy, async_copy); 1283 + status = dup_copy_fields(copy, async_copy); 1284 + if (status) 1285 + goto out_err; 1538 1286 async_copy->copy_task = kthread_create(nfsd4_do_async_copy, 1539 1287 async_copy, "%s", "copy thread"); 1540 1288 if (IS_ERR(async_copy->copy_task)) ··· 1545 1293 spin_unlock(&async_copy->cp_clp->async_lock); 1546 1294 wake_up_process(async_copy->copy_task); 1547 1295 status = nfs_ok; 1548 - } else 1296 + } else { 1549 1297 status = nfsd4_do_copy(copy, 1); 1298 + } 1550 1299 out: 1551 1300 return status; 1552 1301 out_err: 1553 1302 if (async_copy) 1554 1303 cleanup_async_copy(async_copy); 1304 + status = nfserrno(-ENOMEM); 1305 + if (!copy->cp_intra) 1306 + nfsd4_interssc_disconnect(copy->ss_mnt); 1555 1307 goto out; 1556 1308 } 1557 1309 ··· 1566 1310 1567 1311 spin_lock(&clp->async_lock); 1568 1312 list_for_each_entry(copy, &clp->async_copies, copies) { 1569 - if (memcmp(&copy->cp_stateid, stateid, NFS4_STATEID_SIZE)) 1313 + if (memcmp(&copy->cp_stateid.stid, stateid, NFS4_STATEID_SIZE)) 1570 1314 continue; 1571 1315 refcount_inc(&copy->refcount); 1572 1316 spin_unlock(&clp->async_lock); ··· 1582 1326 union nfsd4_op_u *u) 1583 1327 { 1584 1328 struct nfsd4_offload_status *os = &u->offload_status; 1585 - __be32 status = 0; 1586 1329 struct nfsd4_copy *copy; 1587 1330 struct nfs4_client *clp = cstate->clp; 1588 1331 1589 1332 copy = find_async_copy(clp, &os->stateid); 1590 - if (copy) 1591 - nfsd4_stop_copy(copy); 1592 - else 1593 - status = nfserr_bad_stateid; 1333 + if (!copy) { 1334 + struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1594 1335 1336 + return manage_cpntf_state(nn, &os->stateid, clp, NULL); 1337 + } else 1338 + nfsd4_stop_copy(copy); 1339 + 1340 + return nfs_ok; 1341 + } 1342 + 1343 + static __be32 1344 + nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 1345 + union nfsd4_op_u *u) 1346 + { 1347 + struct nfsd4_copy_notify *cn = &u->copy_notify; 1348 + __be32 status; 1349 + struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1350 + struct nfs4_stid *stid; 1351 + struct nfs4_cpntf_state *cps; 1352 + struct nfs4_client *clp = cstate->clp; 1353 + 1354 + status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, 1355 + &cn->cpn_src_stateid, RD_STATE, NULL, 1356 + &stid); 1357 + if (status) 1358 + return status; 1359 + 1360 + cn->cpn_sec = nn->nfsd4_lease; 1361 + cn->cpn_nsec = 0; 1362 + 1363 + status = nfserrno(-ENOMEM); 1364 + cps = nfs4_alloc_init_cpntf_state(nn, stid); 1365 + if (!cps) 1366 + goto out; 1367 + memcpy(&cn->cpn_cnr_stateid, &cps->cp_stateid.stid, sizeof(stateid_t)); 1368 + memcpy(&cps->cp_p_stateid, &stid->sc_stateid, sizeof(stateid_t)); 1369 + memcpy(&cps->cp_p_clid, &clp->cl_clientid, sizeof(clientid_t)); 1370 + 1371 + /* For now, only return one server address in cpn_src, the 1372 + * address used by the client to connect to this server. 1373 + */ 1374 + cn->cpn_src.nl4_type = NL4_NETADDR; 1375 + status = nfsd4_set_netaddr((struct sockaddr *)&rqstp->rq_daddr, 1376 + &cn->cpn_src.u.nl4_addr); 1377 + WARN_ON_ONCE(status); 1378 + if (status) { 1379 + nfs4_put_cpntf_state(nn, cps); 1380 + goto out; 1381 + } 1382 + out: 1383 + nfs4_put_stid(stid); 1595 1384 return status; 1596 1385 } 1597 1386 ··· 1649 1348 1650 1349 status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, 1651 1350 &fallocate->falloc_stateid, 1652 - WR_STATE, &nf); 1351 + WR_STATE, &nf, NULL); 1653 1352 if (status != nfs_ok) { 1654 1353 dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n"); 1655 1354 return status; ··· 1708 1407 1709 1408 status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, 1710 1409 &seek->seek_stateid, 1711 - RD_STATE, &nf); 1410 + RD_STATE, &nf, NULL); 1712 1411 if (status) { 1713 1412 dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n"); 1714 1413 return status; ··· 2213 1912 - rqstp->rq_auth_slack; 2214 1913 } 2215 1914 1915 + #ifdef CONFIG_NFSD_V4_2_INTER_SSC 1916 + static void 1917 + check_if_stalefh_allowed(struct nfsd4_compoundargs *args) 1918 + { 1919 + struct nfsd4_op *op, *current_op = NULL, *saved_op = NULL; 1920 + struct nfsd4_copy *copy; 1921 + struct nfsd4_putfh *putfh; 1922 + int i; 1923 + 1924 + /* traverse all operation and if it's a COPY compound, mark the 1925 + * source filehandle to skip verification 1926 + */ 1927 + for (i = 0; i < args->opcnt; i++) { 1928 + op = &args->ops[i]; 1929 + if (op->opnum == OP_PUTFH) 1930 + current_op = op; 1931 + else if (op->opnum == OP_SAVEFH) 1932 + saved_op = current_op; 1933 + else if (op->opnum == OP_RESTOREFH) 1934 + current_op = saved_op; 1935 + else if (op->opnum == OP_COPY) { 1936 + copy = (struct nfsd4_copy *)&op->u; 1937 + if (!saved_op) { 1938 + op->status = nfserr_nofilehandle; 1939 + return; 1940 + } 1941 + putfh = (struct nfsd4_putfh *)&saved_op->u; 1942 + if (!copy->cp_intra) 1943 + putfh->no_verify = true; 1944 + } 1945 + } 1946 + } 1947 + #else 1948 + static void 1949 + check_if_stalefh_allowed(struct nfsd4_compoundargs *args) 1950 + { 1951 + } 1952 + #endif 1953 + 2216 1954 /* 2217 1955 * COMPOUND call. 2218 1956 */ ··· 2300 1960 resp->opcnt = 1; 2301 1961 goto encode_op; 2302 1962 } 1963 + check_if_stalefh_allowed(args); 2303 1964 2304 1965 trace_nfsd_compound(rqstp, args->opcnt); 2305 1966 while (!status && resp->opcnt < args->opcnt) { ··· 2316 1975 op->status = nfsd4_open_omfg(rqstp, cstate, op); 2317 1976 goto encode_op; 2318 1977 } 2319 - 2320 - if (!current_fh->fh_dentry) { 1978 + if (!current_fh->fh_dentry && 1979 + !HAS_FH_FLAG(current_fh, NFSD4_FH_FOREIGN)) { 2321 1980 if (!(op->opdesc->op_flags & ALLOWED_WITHOUT_FH)) { 2322 1981 op->status = nfserr_nofilehandle; 2323 1982 goto encode_op; 2324 1983 } 2325 - } else if (current_fh->fh_export->ex_fslocs.migrated && 1984 + } else if (current_fh->fh_export && 1985 + current_fh->fh_export->ex_fslocs.migrated && 2326 1986 !(op->opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) { 2327 1987 op->status = nfserr_moved; 2328 1988 goto encode_op; ··· 2367 2025 if (op->opdesc->op_flags & OP_CLEAR_STATEID) 2368 2026 clear_current_stateid(cstate); 2369 2027 2370 - if (need_wrongsec_check(rqstp)) 2028 + if (current_fh->fh_export && 2029 + need_wrongsec_check(rqstp)) 2371 2030 op->status = check_nfsd_access(current_fh->fh_export, rqstp); 2372 2031 } 2373 2032 encode_op: ··· 2633 2290 return (op_encode_hdr_size + 2634 2291 2 /* osr_count */ + 2635 2292 1 /* osr_complete<1> optional 0 for now */) * sizeof(__be32); 2293 + } 2294 + 2295 + static inline u32 nfsd4_copy_notify_rsize(struct svc_rqst *rqstp, 2296 + struct nfsd4_op *op) 2297 + { 2298 + return (op_encode_hdr_size + 2299 + 3 /* cnr_lease_time */ + 2300 + 1 /* We support one cnr_source_server */ + 2301 + 1 /* cnr_stateid seq */ + 2302 + op_encode_stateid_maxsz /* cnr_stateid */ + 2303 + 1 /* num cnr_source_server*/ + 2304 + 1 /* nl4_type */ + 2305 + 1 /* nl4 size */ + 2306 + XDR_QUADLEN(NFS4_OPAQUE_LIMIT) /*nl4_loc + nl4_loc_sz */) 2307 + * sizeof(__be32); 2636 2308 } 2637 2309 2638 2310 #ifdef CONFIG_NFSD_PNFS ··· 3073 2715 .op_flags = OP_MODIFIES_SOMETHING, 3074 2716 .op_name = "OP_OFFLOAD_CANCEL", 3075 2717 .op_rsize_bop = nfsd4_only_status_rsize, 2718 + }, 2719 + [OP_COPY_NOTIFY] = { 2720 + .op_func = nfsd4_copy_notify, 2721 + .op_flags = OP_MODIFIES_SOMETHING, 2722 + .op_name = "OP_COPY_NOTIFY", 2723 + .op_rsize_bop = nfsd4_copy_notify_rsize, 3076 2724 }, 3077 2725 }; 3078 2726
+4 -4
fs/nfsd/nfs4recover.c
··· 1445 1445 } 1446 1446 1447 1447 cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone; 1448 - cup->cu_u.cu_msg.cm_u.cm_gracetime = (int64_t)nn->boot_time; 1448 + cup->cu_u.cu_msg.cm_u.cm_gracetime = nn->boot_time; 1449 1449 ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); 1450 1450 if (!ret) 1451 1451 ret = cup->cu_u.cu_msg.cm_status; ··· 1782 1782 } 1783 1783 1784 1784 static char * 1785 - nfsd4_cltrack_grace_start(time_t grace_start) 1785 + nfsd4_cltrack_grace_start(time64_t grace_start) 1786 1786 { 1787 1787 int copied; 1788 1788 size_t len; ··· 1795 1795 if (!result) 1796 1796 return result; 1797 1797 1798 - copied = snprintf(result, len, GRACE_START_ENV_PREFIX "%ld", 1798 + copied = snprintf(result, len, GRACE_START_ENV_PREFIX "%lld", 1799 1799 grace_start); 1800 1800 if (copied >= len) { 1801 1801 /* just return nothing if output was truncated */ ··· 2004 2004 char *legacy; 2005 2005 char timestr[22]; /* FIXME: better way to determine max size? */ 2006 2006 2007 - sprintf(timestr, "%ld", nn->boot_time); 2007 + sprintf(timestr, "%lld", nn->boot_time); 2008 2008 legacy = nfsd4_cltrack_legacy_topdir(); 2009 2009 nfsd4_umh_cltrack_upcall("gracedone", timestr, legacy, NULL); 2010 2010 kfree(legacy);
+205 -57
fs/nfsd/nfs4state.c
··· 80 80 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); 81 81 static void nfs4_free_ol_stateid(struct nfs4_stid *stid); 82 82 void nfsd4_end_grace(struct nfsd_net *nn); 83 + static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps); 83 84 84 85 /* Locking: */ 85 86 ··· 171 170 clp->cl_clientid.cl_boot, 172 171 clp->cl_clientid.cl_id); 173 172 list_move_tail(&clp->cl_lru, &nn->client_lru); 174 - clp->cl_time = get_seconds(); 173 + clp->cl_time = ktime_get_boottime_seconds(); 175 174 } 176 175 177 176 static void put_client_renew_locked(struct nfs4_client *clp) ··· 723 722 /* Will be incremented before return to client: */ 724 723 refcount_set(&stid->sc_count, 1); 725 724 spin_lock_init(&stid->sc_lock); 725 + INIT_LIST_HEAD(&stid->sc_cp_list); 726 726 727 727 /* 728 728 * It shouldn't be a problem to reuse an opaque stateid value. ··· 743 741 /* 744 742 * Create a unique stateid_t to represent each COPY. 745 743 */ 746 - int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy) 744 + static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid, 745 + unsigned char sc_type) 747 746 { 748 747 int new_id; 749 748 749 + stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; 750 + stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; 751 + stid->sc_type = sc_type; 752 + 750 753 idr_preload(GFP_KERNEL); 751 754 spin_lock(&nn->s2s_cp_lock); 752 - new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, copy, 0, 0, GFP_NOWAIT); 755 + new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); 756 + stid->stid.si_opaque.so_id = new_id; 753 757 spin_unlock(&nn->s2s_cp_lock); 754 758 idr_preload_end(); 755 759 if (new_id < 0) 756 760 return 0; 757 - copy->cp_stateid.si_opaque.so_id = new_id; 758 - copy->cp_stateid.si_opaque.so_clid.cl_boot = nn->boot_time; 759 - copy->cp_stateid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; 760 761 return 1; 761 762 } 762 763 763 - void nfs4_free_cp_state(struct nfsd4_copy *copy) 764 + int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy) 765 + { 766 + return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID); 767 + } 768 + 769 + struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn, 770 + struct nfs4_stid *p_stid) 771 + { 772 + struct nfs4_cpntf_state *cps; 773 + 774 + cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL); 775 + if (!cps) 776 + return NULL; 777 + cps->cpntf_time = ktime_get_boottime_seconds(); 778 + refcount_set(&cps->cp_stateid.sc_count, 1); 779 + if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) 780 + goto out_free; 781 + spin_lock(&nn->s2s_cp_lock); 782 + list_add(&cps->cp_list, &p_stid->sc_cp_list); 783 + spin_unlock(&nn->s2s_cp_lock); 784 + return cps; 785 + out_free: 786 + kfree(cps); 787 + return NULL; 788 + } 789 + 790 + void nfs4_free_copy_state(struct nfsd4_copy *copy) 764 791 { 765 792 struct nfsd_net *nn; 766 793 794 + WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID); 767 795 nn = net_generic(copy->cp_clp->net, nfsd_net_id); 768 796 spin_lock(&nn->s2s_cp_lock); 769 - idr_remove(&nn->s2s_cp_stateids, copy->cp_stateid.si_opaque.so_id); 797 + idr_remove(&nn->s2s_cp_stateids, 798 + copy->cp_stateid.stid.si_opaque.so_id); 799 + spin_unlock(&nn->s2s_cp_lock); 800 + } 801 + 802 + static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid) 803 + { 804 + struct nfs4_cpntf_state *cps; 805 + struct nfsd_net *nn; 806 + 807 + nn = net_generic(net, nfsd_net_id); 808 + spin_lock(&nn->s2s_cp_lock); 809 + while (!list_empty(&stid->sc_cp_list)) { 810 + cps = list_first_entry(&stid->sc_cp_list, 811 + struct nfs4_cpntf_state, cp_list); 812 + _free_cpntf_state_locked(nn, cps); 813 + } 770 814 spin_unlock(&nn->s2s_cp_lock); 771 815 } 772 816 ··· 854 806 static DEFINE_SPINLOCK(blocked_delegations_lock); 855 807 static struct bloom_pair { 856 808 int entries, old_entries; 857 - time_t swap_time; 809 + time64_t swap_time; 858 810 int new; /* index into 'set' */ 859 811 DECLARE_BITMAP(set[2], 256); 860 812 } blocked_delegations; ··· 866 818 867 819 if (bd->entries == 0) 868 820 return 0; 869 - if (seconds_since_boot() - bd->swap_time > 30) { 821 + if (ktime_get_seconds() - bd->swap_time > 30) { 870 822 spin_lock(&blocked_delegations_lock); 871 - if (seconds_since_boot() - bd->swap_time > 30) { 823 + if (ktime_get_seconds() - bd->swap_time > 30) { 872 824 bd->entries -= bd->old_entries; 873 825 bd->old_entries = bd->entries; 874 826 memset(bd->set[bd->new], 0, 875 827 sizeof(bd->set[0])); 876 828 bd->new = 1-bd->new; 877 - bd->swap_time = seconds_since_boot(); 829 + bd->swap_time = ktime_get_seconds(); 878 830 } 879 831 spin_unlock(&blocked_delegations_lock); 880 832 } ··· 904 856 __set_bit((hash>>8)&255, bd->set[bd->new]); 905 857 __set_bit((hash>>16)&255, bd->set[bd->new]); 906 858 if (bd->entries == 0) 907 - bd->swap_time = seconds_since_boot(); 859 + bd->swap_time = ktime_get_seconds(); 908 860 bd->entries += 1; 909 861 spin_unlock(&blocked_delegations_lock); 910 862 } ··· 963 915 return; 964 916 } 965 917 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 918 + nfs4_free_cpntf_statelist(clp->net, s); 966 919 spin_unlock(&clp->cl_lock); 967 920 s->sc_free(s); 968 921 if (fp) ··· 1911 1862 */ 1912 1863 if (clid->cl_boot == (u32)nn->boot_time) 1913 1864 return 0; 1914 - dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", 1865 + dprintk("NFSD stale clientid (%08x/%08x) boot_time %08llx\n", 1915 1866 clid->cl_boot, clid->cl_id, nn->boot_time); 1916 1867 return 1; 1917 1868 } ··· 2264 2215 * This is opaque to client, so no need to byte-swap. Use 2265 2216 * __force to keep sparse happy 2266 2217 */ 2267 - verf[0] = (__force __be32)get_seconds(); 2218 + verf[0] = (__force __be32)(u32)ktime_get_real_seconds(); 2268 2219 verf[1] = (__force __be32)nn->clverifier_counter++; 2269 2220 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 2270 2221 } 2271 2222 2272 2223 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 2273 2224 { 2274 - clp->cl_clientid.cl_boot = nn->boot_time; 2225 + clp->cl_clientid.cl_boot = (u32)nn->boot_time; 2275 2226 clp->cl_clientid.cl_id = nn->clientid_counter++; 2276 2227 gen_confirm(clp, nn); 2277 2228 } ··· 2341 2292 clp->cl_nii_domain.len); 2342 2293 seq_printf(m, "\nImplementation name: "); 2343 2294 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); 2344 - seq_printf(m, "\nImplementation time: [%ld, %ld]\n", 2295 + seq_printf(m, "\nImplementation time: [%lld, %ld]\n", 2345 2296 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); 2346 2297 } 2347 2298 drop_client(clp); ··· 2661 2612 gen_clid(clp, nn); 2662 2613 kref_init(&clp->cl_nfsdfs.cl_ref); 2663 2614 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); 2664 - clp->cl_time = get_seconds(); 2615 + clp->cl_time = ktime_get_boottime_seconds(); 2665 2616 clear_bit(0, &clp->cl_cb_slot_busy); 2666 2617 copy_verf(clp, verf); 2667 2618 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); ··· 2995 2946 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); 2996 2947 if (!clp->cl_nii_name.data) 2997 2948 return nfserr_jukebox; 2998 - clp->cl_nii_time.tv_sec = exid->nii_time.tv_sec; 2999 - clp->cl_nii_time.tv_nsec = exid->nii_time.tv_nsec; 2949 + clp->cl_nii_time = exid->nii_time; 3000 2950 return 0; 3001 2951 } 3002 2952 ··· 3421 3373 case NFS4_CDFC4_BACK_OR_BOTH: 3422 3374 *dir = NFS4_CDFC4_BOTH; 3423 3375 return nfs_ok; 3424 - }; 3376 + } 3425 3377 return nfserr_inval; 3426 3378 } 3427 3379 ··· 4331 4283 last = oo->oo_last_closed_stid; 4332 4284 oo->oo_last_closed_stid = s; 4333 4285 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 4334 - oo->oo_time = get_seconds(); 4286 + oo->oo_time = ktime_get_boottime_seconds(); 4335 4287 spin_unlock(&nn->client_lock); 4336 4288 if (last) 4337 4289 nfs4_put_stid(&last->st_stid); ··· 4426 4378 */ 4427 4379 spin_lock(&state_lock); 4428 4380 if (dp->dl_time == 0) { 4429 - dp->dl_time = get_seconds(); 4381 + dp->dl_time = ktime_get_boottime_seconds(); 4430 4382 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); 4431 4383 } 4432 4384 spin_unlock(&state_lock); ··· 4538 4490 4539 4491 static __be32 lookup_clientid(clientid_t *clid, 4540 4492 struct nfsd4_compound_state *cstate, 4541 - struct nfsd_net *nn) 4493 + struct nfsd_net *nn, 4494 + bool sessions) 4542 4495 { 4543 4496 struct nfs4_client *found; 4544 4497 ··· 4560 4511 */ 4561 4512 WARN_ON_ONCE(cstate->session); 4562 4513 spin_lock(&nn->client_lock); 4563 - found = find_confirmed_client(clid, false, nn); 4514 + found = find_confirmed_client(clid, sessions, nn); 4564 4515 if (!found) { 4565 4516 spin_unlock(&nn->client_lock); 4566 4517 return nfserr_expired; ··· 4593 4544 if (open->op_file == NULL) 4594 4545 return nfserr_jukebox; 4595 4546 4596 - status = lookup_clientid(clientid, cstate, nn); 4547 + status = lookup_clientid(clientid, cstate, nn, false); 4597 4548 if (status) 4598 4549 return status; 4599 4550 clp = cstate->clp; ··· 4721 4672 return 0; 4722 4673 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 4723 4674 return nfserr_inval; 4724 - return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 4675 + return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0); 4725 4676 } 4726 4677 4727 4678 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, ··· 5182 5133 5183 5134 dprintk("process_renew(%08x/%08x): starting\n", 5184 5135 clid->cl_boot, clid->cl_id); 5185 - status = lookup_clientid(clid, cstate, nn); 5136 + status = lookup_clientid(clid, cstate, nn, false); 5186 5137 if (status) 5187 5138 goto out; 5188 5139 clp = cstate->clp; ··· 5233 5184 */ 5234 5185 static bool clients_still_reclaiming(struct nfsd_net *nn) 5235 5186 { 5236 - unsigned long now = get_seconds(); 5237 - unsigned long double_grace_period_end = nn->boot_time + 5238 - 2 * nn->nfsd4_lease; 5187 + time64_t double_grace_period_end = nn->boot_time + 5188 + 2 * nn->nfsd4_lease; 5239 5189 5240 5190 if (nn->track_reclaim_completes && 5241 5191 atomic_read(&nn->nr_reclaim_complete) == ··· 5247 5199 * If we've given them *two* lease times to reclaim, and they're 5248 5200 * still not done, give up: 5249 5201 */ 5250 - if (time_after(now, double_grace_period_end)) 5202 + if (ktime_get_boottime_seconds() > double_grace_period_end) 5251 5203 return false; 5252 5204 return true; 5253 5205 } 5254 5206 5255 - static time_t 5207 + static time64_t 5256 5208 nfs4_laundromat(struct nfsd_net *nn) 5257 5209 { 5258 5210 struct nfs4_client *clp; ··· 5261 5213 struct nfs4_ol_stateid *stp; 5262 5214 struct nfsd4_blocked_lock *nbl; 5263 5215 struct list_head *pos, *next, reaplist; 5264 - time_t cutoff = get_seconds() - nn->nfsd4_lease; 5265 - time_t t, new_timeo = nn->nfsd4_lease; 5216 + time64_t cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease; 5217 + time64_t t, new_timeo = nn->nfsd4_lease; 5218 + struct nfs4_cpntf_state *cps; 5219 + copy_stateid_t *cps_t; 5220 + int i; 5266 5221 5267 5222 dprintk("NFSD: laundromat service - starting\n"); 5268 5223 ··· 5276 5225 dprintk("NFSD: end of grace period\n"); 5277 5226 nfsd4_end_grace(nn); 5278 5227 INIT_LIST_HEAD(&reaplist); 5228 + 5229 + spin_lock(&nn->s2s_cp_lock); 5230 + idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { 5231 + cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); 5232 + if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID && 5233 + cps->cpntf_time > cutoff) 5234 + _free_cpntf_state_locked(nn, cps); 5235 + } 5236 + spin_unlock(&nn->s2s_cp_lock); 5237 + 5279 5238 spin_lock(&nn->client_lock); 5280 5239 list_for_each_safe(pos, next, &nn->client_lru) { 5281 5240 clp = list_entry(pos, struct nfs4_client, cl_lru); 5282 - if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { 5241 + if (clp->cl_time > cutoff) { 5283 5242 t = clp->cl_time - cutoff; 5284 5243 new_timeo = min(new_timeo, t); 5285 5244 break; ··· 5312 5251 spin_lock(&state_lock); 5313 5252 list_for_each_safe(pos, next, &nn->del_recall_lru) { 5314 5253 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 5315 - if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { 5254 + if (dp->dl_time > cutoff) { 5316 5255 t = dp->dl_time - cutoff; 5317 5256 new_timeo = min(new_timeo, t); 5318 5257 break; ··· 5332 5271 while (!list_empty(&nn->close_lru)) { 5333 5272 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, 5334 5273 oo_close_lru); 5335 - if (time_after((unsigned long)oo->oo_time, 5336 - (unsigned long)cutoff)) { 5274 + if (oo->oo_time > cutoff) { 5337 5275 t = oo->oo_time - cutoff; 5338 5276 new_timeo = min(new_timeo, t); 5339 5277 break; ··· 5362 5302 while (!list_empty(&nn->blocked_locks_lru)) { 5363 5303 nbl = list_first_entry(&nn->blocked_locks_lru, 5364 5304 struct nfsd4_blocked_lock, nbl_lru); 5365 - if (time_after((unsigned long)nbl->nbl_time, 5366 - (unsigned long)cutoff)) { 5305 + if (nbl->nbl_time > cutoff) { 5367 5306 t = nbl->nbl_time - cutoff; 5368 5307 new_timeo = min(new_timeo, t); 5369 5308 break; ··· 5379 5320 free_blocked_lock(nbl); 5380 5321 } 5381 5322 out: 5382 - new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 5323 + new_timeo = max_t(time64_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 5383 5324 return new_timeo; 5384 5325 } 5385 5326 ··· 5389 5330 static void 5390 5331 laundromat_main(struct work_struct *laundry) 5391 5332 { 5392 - time_t t; 5333 + time64_t t; 5393 5334 struct delayed_work *dwork = to_delayed_work(laundry); 5394 5335 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 5395 5336 laundromat_work); 5396 5337 5397 5338 t = nfs4_laundromat(nn); 5398 - dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); 5339 + dprintk("NFSD: laundromat_main - sleeping for %lld seconds\n", t); 5399 5340 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 5400 5341 } 5401 5342 ··· 5580 5521 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 5581 5522 CLOSE_STATEID(stateid)) 5582 5523 return nfserr_bad_stateid; 5583 - status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); 5524 + status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn, 5525 + false); 5584 5526 if (status == nfserr_stale_clientid) { 5585 5527 if (cstate->session) 5586 5528 return nfserr_bad_stateid; ··· 5660 5600 out: 5661 5601 return status; 5662 5602 } 5603 + static void 5604 + _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 5605 + { 5606 + WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID); 5607 + if (!refcount_dec_and_test(&cps->cp_stateid.sc_count)) 5608 + return; 5609 + list_del(&cps->cp_list); 5610 + idr_remove(&nn->s2s_cp_stateids, 5611 + cps->cp_stateid.stid.si_opaque.so_id); 5612 + kfree(cps); 5613 + } 5614 + /* 5615 + * A READ from an inter server to server COPY will have a 5616 + * copy stateid. Look up the copy notify stateid from the 5617 + * idr structure and take a reference on it. 5618 + */ 5619 + __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st, 5620 + struct nfs4_client *clp, 5621 + struct nfs4_cpntf_state **cps) 5622 + { 5623 + copy_stateid_t *cps_t; 5624 + struct nfs4_cpntf_state *state = NULL; 5625 + 5626 + if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) 5627 + return nfserr_bad_stateid; 5628 + spin_lock(&nn->s2s_cp_lock); 5629 + cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); 5630 + if (cps_t) { 5631 + state = container_of(cps_t, struct nfs4_cpntf_state, 5632 + cp_stateid); 5633 + if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) { 5634 + state = NULL; 5635 + goto unlock; 5636 + } 5637 + if (!clp) 5638 + refcount_inc(&state->cp_stateid.sc_count); 5639 + else 5640 + _free_cpntf_state_locked(nn, state); 5641 + } 5642 + unlock: 5643 + spin_unlock(&nn->s2s_cp_lock); 5644 + if (!state) 5645 + return nfserr_bad_stateid; 5646 + if (!clp && state) 5647 + *cps = state; 5648 + return 0; 5649 + } 5650 + 5651 + static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st, 5652 + struct nfs4_stid **stid) 5653 + { 5654 + __be32 status; 5655 + struct nfs4_cpntf_state *cps = NULL; 5656 + struct nfsd4_compound_state cstate; 5657 + 5658 + status = manage_cpntf_state(nn, st, NULL, &cps); 5659 + if (status) 5660 + return status; 5661 + 5662 + cps->cpntf_time = ktime_get_boottime_seconds(); 5663 + memset(&cstate, 0, sizeof(cstate)); 5664 + status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true); 5665 + if (status) 5666 + goto out; 5667 + status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid, 5668 + NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 5669 + stid, nn); 5670 + put_client_renew(cstate.clp); 5671 + out: 5672 + nfs4_put_cpntf_state(nn, cps); 5673 + return status; 5674 + } 5675 + 5676 + void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 5677 + { 5678 + spin_lock(&nn->s2s_cp_lock); 5679 + _free_cpntf_state_locked(nn, cps); 5680 + spin_unlock(&nn->s2s_cp_lock); 5681 + } 5663 5682 5664 5683 /* 5665 5684 * Checks for stateid operations ··· 5746 5607 __be32 5747 5608 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, 5748 5609 struct nfsd4_compound_state *cstate, struct svc_fh *fhp, 5749 - stateid_t *stateid, int flags, struct nfsd_file **nfp) 5610 + stateid_t *stateid, int flags, struct nfsd_file **nfp, 5611 + struct nfs4_stid **cstid) 5750 5612 { 5751 5613 struct inode *ino = d_inode(fhp->fh_dentry); 5752 5614 struct net *net = SVC_NET(rqstp); ··· 5769 5629 status = nfsd4_lookup_stateid(cstate, stateid, 5770 5630 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 5771 5631 &s, nn); 5632 + if (status == nfserr_bad_stateid) 5633 + status = find_cpntf_state(nn, stateid, &s); 5772 5634 if (status) 5773 5635 return status; 5774 5636 status = nfsd4_stid_check_stateid_generation(stateid, s, ··· 5798 5656 if (status == nfs_ok && nfp) 5799 5657 status = nfs4_check_file(rqstp, fhp, s, nfp, flags); 5800 5658 out: 5801 - if (s) 5802 - nfs4_put_stid(s); 5659 + if (s) { 5660 + if (!status && cstid) 5661 + *cstid = s; 5662 + else 5663 + nfs4_put_stid(s); 5664 + } 5803 5665 return status; 5804 5666 } 5805 5667 ··· 6696 6550 } 6697 6551 6698 6552 if (fl_flags & FL_SLEEP) { 6699 - nbl->nbl_time = jiffies; 6553 + nbl->nbl_time = ktime_get_boottime_seconds(); 6700 6554 spin_lock(&nn->blocked_locks_lock); 6701 6555 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 6702 6556 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); ··· 6803 6657 return nfserr_inval; 6804 6658 6805 6659 if (!nfsd4_has_session(cstate)) { 6806 - status = lookup_clientid(&lockt->lt_clientid, cstate, nn); 6660 + status = lookup_clientid(&lockt->lt_clientid, cstate, nn, 6661 + false); 6807 6662 if (status) 6808 6663 goto out; 6809 6664 } ··· 6988 6841 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 6989 6842 clid->cl_boot, clid->cl_id); 6990 6843 6991 - status = lookup_clientid(clid, cstate, nn); 6844 + status = lookup_clientid(clid, cstate, nn, false); 6992 6845 if (status) 6993 6846 return status; 6994 6847 ··· 7135 6988 __be32 status; 7136 6989 7137 6990 /* find clientid in conf_id_hashtbl */ 7138 - status = lookup_clientid(clid, cstate, nn); 6991 + status = lookup_clientid(clid, cstate, nn, false); 7139 6992 if (status) 7140 6993 return nfserr_reclaim_bad; 7141 6994 ··· 7788 7641 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 7789 7642 nn->conf_name_tree = RB_ROOT; 7790 7643 nn->unconf_name_tree = RB_ROOT; 7791 - nn->boot_time = get_seconds(); 7644 + nn->boot_time = ktime_get_real_seconds(); 7792 7645 nn->grace_ended = false; 7793 7646 nn->nfsd4_manager.block_opens = true; 7794 7647 INIT_LIST_HEAD(&nn->nfsd4_manager.list); ··· 7857 7710 nfsd4_client_tracking_init(net); 7858 7711 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) 7859 7712 goto skip_grace; 7860 - printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n", 7713 + printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", 7861 7714 nn->nfsd4_grace, net->ns.inum); 7862 7715 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 7863 7716 return 0; ··· 7933 7786 static void 7934 7787 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 7935 7788 { 7936 - if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) 7789 + if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) && 7790 + CURRENT_STATEID(stateid)) 7937 7791 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 7938 7792 } 7939 7793 ··· 7943 7795 { 7944 7796 if (cstate->minorversion) { 7945 7797 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 7946 - SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 7798 + SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 7947 7799 } 7948 7800 } 7949 7801 7950 7802 void 7951 7803 clear_current_stateid(struct nfsd4_compound_state *cstate) 7952 7804 { 7953 - CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 7805 + CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 7954 7806 } 7955 7807 7956 7808 /*
+154 -7
fs/nfsd/nfs4xdr.c
··· 40 40 #include <linux/utsname.h> 41 41 #include <linux/pagemap.h> 42 42 #include <linux/sunrpc/svcauth_gss.h> 43 + #include <linux/sunrpc/addr.h> 43 44 44 45 #include "idmap.h" 45 46 #include "acl.h" ··· 1745 1744 DECODE_TAIL; 1746 1745 } 1747 1746 1747 + static __be32 nfsd4_decode_nl4_server(struct nfsd4_compoundargs *argp, 1748 + struct nl4_server *ns) 1749 + { 1750 + DECODE_HEAD; 1751 + struct nfs42_netaddr *naddr; 1752 + 1753 + READ_BUF(4); 1754 + ns->nl4_type = be32_to_cpup(p++); 1755 + 1756 + /* currently support for 1 inter-server source server */ 1757 + switch (ns->nl4_type) { 1758 + case NL4_NETADDR: 1759 + naddr = &ns->u.nl4_addr; 1760 + 1761 + READ_BUF(4); 1762 + naddr->netid_len = be32_to_cpup(p++); 1763 + if (naddr->netid_len > RPCBIND_MAXNETIDLEN) 1764 + goto xdr_error; 1765 + 1766 + READ_BUF(naddr->netid_len + 4); /* 4 for uaddr len */ 1767 + COPYMEM(naddr->netid, naddr->netid_len); 1768 + 1769 + naddr->addr_len = be32_to_cpup(p++); 1770 + if (naddr->addr_len > RPCBIND_MAXUADDRLEN) 1771 + goto xdr_error; 1772 + 1773 + READ_BUF(naddr->addr_len); 1774 + COPYMEM(naddr->addr, naddr->addr_len); 1775 + break; 1776 + default: 1777 + goto xdr_error; 1778 + } 1779 + DECODE_TAIL; 1780 + } 1781 + 1748 1782 static __be32 1749 1783 nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy) 1750 1784 { 1751 1785 DECODE_HEAD; 1786 + struct nl4_server *ns_dummy; 1787 + int i, count; 1752 1788 1753 1789 status = nfsd4_decode_stateid(argp, &copy->cp_src_stateid); 1754 1790 if (status) ··· 1800 1762 p = xdr_decode_hyper(p, &copy->cp_count); 1801 1763 p++; /* ca_consecutive: we always do consecutive copies */ 1802 1764 copy->cp_synchronous = be32_to_cpup(p++); 1803 - /* tmp = be32_to_cpup(p); Source server list not supported */ 1765 + 1766 + count = be32_to_cpup(p++); 1767 + 1768 + copy->cp_intra = false; 1769 + if (count == 0) { /* intra-server copy */ 1770 + copy->cp_intra = true; 1771 + goto intra; 1772 + } 1773 + 1774 + /* decode all the supplied server addresses but use first */ 1775 + status = nfsd4_decode_nl4_server(argp, &copy->cp_src); 1776 + if (status) 1777 + return status; 1778 + 1779 + ns_dummy = kmalloc(sizeof(struct nl4_server), GFP_KERNEL); 1780 + if (ns_dummy == NULL) 1781 + return nfserrno(-ENOMEM); 1782 + for (i = 0; i < count - 1; i++) { 1783 + status = nfsd4_decode_nl4_server(argp, ns_dummy); 1784 + if (status) { 1785 + kfree(ns_dummy); 1786 + return status; 1787 + } 1788 + } 1789 + kfree(ns_dummy); 1790 + intra: 1804 1791 1805 1792 DECODE_TAIL; 1806 1793 } ··· 1835 1772 struct nfsd4_offload_status *os) 1836 1773 { 1837 1774 return nfsd4_decode_stateid(argp, &os->stateid); 1775 + } 1776 + 1777 + static __be32 1778 + nfsd4_decode_copy_notify(struct nfsd4_compoundargs *argp, 1779 + struct nfsd4_copy_notify *cn) 1780 + { 1781 + int status; 1782 + 1783 + status = nfsd4_decode_stateid(argp, &cn->cpn_src_stateid); 1784 + if (status) 1785 + return status; 1786 + return nfsd4_decode_nl4_server(argp, &cn->cpn_dst); 1838 1787 } 1839 1788 1840 1789 static __be32 ··· 1950 1875 /* new operations for NFSv4.2 */ 1951 1876 [OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate, 1952 1877 [OP_COPY] = (nfsd4_dec)nfsd4_decode_copy, 1953 - [OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_notsupp, 1878 + [OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_copy_notify, 1954 1879 [OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate, 1955 1880 [OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp, 1956 1881 [OP_LAYOUTERROR] = (nfsd4_dec)nfsd4_decode_notsupp, ··· 2099 2024 */ 2100 2025 static __be32 *encode_time_delta(__be32 *p, struct inode *inode) 2101 2026 { 2102 - struct timespec ts; 2027 + struct timespec64 ts; 2103 2028 u32 ns; 2104 2029 2105 2030 ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran); 2106 - ts = ns_to_timespec(ns); 2031 + ts = ns_to_timespec64(ns); 2107 2032 2108 2033 p = xdr_encode_hyper(p, ts.tv_sec); 2109 2034 *p++ = cpu_to_be32(ts.tv_nsec); ··· 4319 4244 } 4320 4245 4321 4246 static __be32 4247 + nfsd42_encode_nl4_server(struct nfsd4_compoundres *resp, struct nl4_server *ns) 4248 + { 4249 + struct xdr_stream *xdr = &resp->xdr; 4250 + struct nfs42_netaddr *addr; 4251 + __be32 *p; 4252 + 4253 + p = xdr_reserve_space(xdr, 4); 4254 + *p++ = cpu_to_be32(ns->nl4_type); 4255 + 4256 + switch (ns->nl4_type) { 4257 + case NL4_NETADDR: 4258 + addr = &ns->u.nl4_addr; 4259 + 4260 + /* netid_len, netid, uaddr_len, uaddr (port included 4261 + * in RPCBIND_MAXUADDRLEN) 4262 + */ 4263 + p = xdr_reserve_space(xdr, 4264 + 4 /* netid len */ + 4265 + (XDR_QUADLEN(addr->netid_len) * 4) + 4266 + 4 /* uaddr len */ + 4267 + (XDR_QUADLEN(addr->addr_len) * 4)); 4268 + if (!p) 4269 + return nfserr_resource; 4270 + 4271 + *p++ = cpu_to_be32(addr->netid_len); 4272 + p = xdr_encode_opaque_fixed(p, addr->netid, 4273 + addr->netid_len); 4274 + *p++ = cpu_to_be32(addr->addr_len); 4275 + p = xdr_encode_opaque_fixed(p, addr->addr, 4276 + addr->addr_len); 4277 + break; 4278 + default: 4279 + WARN_ON_ONCE(ns->nl4_type != NL4_NETADDR); 4280 + return nfserr_inval; 4281 + } 4282 + 4283 + return 0; 4284 + } 4285 + 4286 + static __be32 4322 4287 nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr, 4323 4288 struct nfsd4_copy *copy) 4324 4289 { ··· 4389 4274 *p++ = cpu_to_be32(0); 4390 4275 4391 4276 return nfserr; 4277 + } 4278 + 4279 + static __be32 4280 + nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr, 4281 + struct nfsd4_copy_notify *cn) 4282 + { 4283 + struct xdr_stream *xdr = &resp->xdr; 4284 + __be32 *p; 4285 + 4286 + if (nfserr) 4287 + return nfserr; 4288 + 4289 + /* 8 sec, 4 nsec */ 4290 + p = xdr_reserve_space(xdr, 12); 4291 + if (!p) 4292 + return nfserr_resource; 4293 + 4294 + /* cnr_lease_time */ 4295 + p = xdr_encode_hyper(p, cn->cpn_sec); 4296 + *p++ = cpu_to_be32(cn->cpn_nsec); 4297 + 4298 + /* cnr_stateid */ 4299 + nfserr = nfsd4_encode_stateid(xdr, &cn->cpn_cnr_stateid); 4300 + if (nfserr) 4301 + return nfserr; 4302 + 4303 + /* cnr_src.nl_nsvr */ 4304 + p = xdr_reserve_space(xdr, 4); 4305 + if (!p) 4306 + return nfserr_resource; 4307 + 4308 + *p++ = cpu_to_be32(1); 4309 + 4310 + return nfsd42_encode_nl4_server(resp, &cn->cpn_src); 4392 4311 } 4393 4312 4394 4313 static __be32 ··· 4522 4373 /* NFSv4.2 operations */ 4523 4374 [OP_ALLOCATE] = (nfsd4_enc)nfsd4_encode_noop, 4524 4375 [OP_COPY] = (nfsd4_enc)nfsd4_encode_copy, 4525 - [OP_COPY_NOTIFY] = (nfsd4_enc)nfsd4_encode_noop, 4376 + [OP_COPY_NOTIFY] = (nfsd4_enc)nfsd4_encode_copy_notify, 4526 4377 [OP_DEALLOCATE] = (nfsd4_enc)nfsd4_encode_noop, 4527 4378 [OP_IO_ADVISE] = (nfsd4_enc)nfsd4_encode_noop, 4528 4379 [OP_LAYOUTERROR] = (nfsd4_enc)nfsd4_encode_noop, ··· 4648 4499 { 4649 4500 __be32 *p; 4650 4501 struct nfs4_replay *rp = op->replay; 4651 - 4652 - BUG_ON(!rp); 4653 4502 4654 4503 p = xdr_reserve_space(xdr, 8 + rp->rp_buflen); 4655 4504 if (!p) {
+3 -3
fs/nfsd/nfsctl.c
··· 956 956 957 957 #ifdef CONFIG_NFSD_V4 958 958 static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, 959 - time_t *time, struct nfsd_net *nn) 959 + time64_t *time, struct nfsd_net *nn) 960 960 { 961 961 char *mesg = buf; 962 962 int rv, i; ··· 984 984 *time = i; 985 985 } 986 986 987 - return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n", *time); 987 + return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%lld\n", *time); 988 988 } 989 989 990 990 static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size, 991 - time_t *time, struct nfsd_net *nn) 991 + time64_t *time, struct nfsd_net *nn) 992 992 { 993 993 ssize_t rv; 994 994
+32 -2
fs/nfsd/nfsd.h
··· 19 19 #include <linux/sunrpc/svc.h> 20 20 #include <linux/sunrpc/svc_xprt.h> 21 21 #include <linux/sunrpc/msg_prot.h> 22 + #include <linux/sunrpc/addr.h> 22 23 23 24 #include <uapi/linux/nfsd/debug.h> 24 25 ··· 143 142 int nfs4_state_start_net(struct net *net); 144 143 void nfs4_state_shutdown(void); 145 144 void nfs4_state_shutdown_net(struct net *net); 146 - void nfs4_reset_lease(time_t leasetime); 147 145 int nfs4_reset_recoverydir(char *recdir); 148 146 char * nfs4_recoverydir(void); 149 147 bool nfsd4_spo_must_allow(struct svc_rqst *rqstp); ··· 153 153 static inline int nfs4_state_start_net(struct net *net) { return 0; } 154 154 static inline void nfs4_state_shutdown(void) { } 155 155 static inline void nfs4_state_shutdown_net(struct net *net) { } 156 - static inline void nfs4_reset_lease(time_t leasetime) { } 157 156 static inline int nfs4_reset_recoverydir(char *recdir) { return 0; } 158 157 static inline char * nfs4_recoverydir(void) {return NULL; } 159 158 static inline bool nfsd4_spo_must_allow(struct svc_rqst *rqstp) ··· 385 386 NFSD4_2_SECURITY_ATTRS) 386 387 387 388 extern const u32 nfsd_suppattrs[3][3]; 389 + 390 + static inline __be32 nfsd4_set_netaddr(struct sockaddr *addr, 391 + struct nfs42_netaddr *netaddr) 392 + { 393 + struct sockaddr_in *sin = (struct sockaddr_in *)addr; 394 + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 395 + unsigned int port; 396 + size_t ret_addr, ret_port; 397 + 398 + switch (addr->sa_family) { 399 + case AF_INET: 400 + port = ntohs(sin->sin_port); 401 + sprintf(netaddr->netid, "tcp"); 402 + netaddr->netid_len = 3; 403 + break; 404 + case AF_INET6: 405 + port = ntohs(sin6->sin6_port); 406 + sprintf(netaddr->netid, "tcp6"); 407 + netaddr->netid_len = 4; 408 + break; 409 + default: 410 + return nfserr_inval; 411 + } 412 + ret_addr = rpc_ntop(addr, netaddr->addr, sizeof(netaddr->addr)); 413 + ret_port = snprintf(netaddr->addr + ret_addr, 414 + RPCBIND_MAXUADDRLEN + 1 - ret_addr, 415 + ".%u.%u", port >> 8, port & 0xff); 416 + WARN_ON(ret_port >= RPCBIND_MAXUADDRLEN + 1 - ret_addr); 417 + netaddr->addr_len = ret_addr + ret_port; 418 + return 0; 419 + } 388 420 389 421 static inline bool bmval_is_subset(const u32 *bm1, const u32 *bm2) 390 422 {
+6 -3
fs/nfsd/nfsfh.h
··· 35 35 36 36 bool fh_locked; /* inode locked by us */ 37 37 bool fh_want_write; /* remount protection taken */ 38 - 38 + int fh_flags; /* FH flags */ 39 39 #ifdef CONFIG_NFSD_V3 40 40 bool fh_post_saved; /* post-op attrs saved */ 41 41 bool fh_pre_saved; /* pre-op attrs saved */ 42 42 43 43 /* Pre-op attributes saved during fh_lock */ 44 44 __u64 fh_pre_size; /* size before operation */ 45 - struct timespec fh_pre_mtime; /* mtime before oper */ 46 - struct timespec fh_pre_ctime; /* ctime before oper */ 45 + struct timespec64 fh_pre_mtime; /* mtime before oper */ 46 + struct timespec64 fh_pre_ctime; /* ctime before oper */ 47 47 /* 48 48 * pre-op nfsv4 change attr: note must check IS_I_VERSION(inode) 49 49 * to find out if it is valid. ··· 56 56 #endif /* CONFIG_NFSD_V3 */ 57 57 58 58 } svc_fh; 59 + #define NFSD4_FH_FOREIGN (1<<0) 60 + #define SET_FH_FLAG(c, f) ((c)->fh_flags |= (f)) 61 + #define HAS_FH_FLAG(c, f) ((c)->fh_flags & (f)) 59 62 60 63 enum nfsd_fsid { 61 64 FSID_DEV = 0,
+4 -4
fs/nfsd/nfsproc.c
··· 94 94 * Solaris, at least, doesn't seem to care what the time 95 95 * request is. We require it be within 30 minutes of now. 96 96 */ 97 - time_t delta = iap->ia_atime.tv_sec - get_seconds(); 97 + time64_t delta = iap->ia_atime.tv_sec - ktime_get_real_seconds(); 98 98 99 99 nfserr = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP); 100 100 if (nfserr) ··· 113 113 } 114 114 } 115 115 116 - nfserr = nfsd_setattr(rqstp, fhp, iap, 0, (time_t)0); 116 + nfserr = nfsd_setattr(rqstp, fhp, iap, 0, (time64_t)0); 117 117 done: 118 118 return nfsd_return_attrs(nfserr, resp); 119 119 } ··· 226 226 return nfserr_io; 227 227 nfserr = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh), 228 228 argp->offset, rqstp->rq_vec, nvecs, 229 - &cnt, NFS_DATA_SYNC); 229 + &cnt, NFS_DATA_SYNC, NULL); 230 230 return nfsd_return_attrs(nfserr, resp); 231 231 } 232 232 ··· 380 380 */ 381 381 attr->ia_valid &= ATTR_SIZE; 382 382 if (attr->ia_valid) 383 - nfserr = nfsd_setattr(rqstp, newfhp, attr, 0, (time_t)0); 383 + nfserr = nfsd_setattr(rqstp, newfhp, attr, 0, (time64_t)0); 384 384 } 385 385 386 386 out_unlock:
+16 -5
fs/nfsd/nfssvc.c
··· 31 31 32 32 #define NFSDDBG_FACILITY NFSDDBG_SVC 33 33 34 + bool inter_copy_offload_enable; 35 + EXPORT_SYMBOL_GPL(inter_copy_offload_enable); 36 + module_param(inter_copy_offload_enable, bool, 0644); 37 + MODULE_PARM_DESC(inter_copy_offload_enable, 38 + "Enable inter server to server copy offload. Default: false"); 39 + 34 40 extern struct svc_program nfsd_program; 35 41 static int nfsd(void *vrqstp); 36 42 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) ··· 397 391 ret = lockd_up(net, cred); 398 392 if (ret) 399 393 goto out_socks; 400 - nn->lockd_up = 1; 394 + nn->lockd_up = true; 401 395 } 402 396 403 - ret = nfs4_state_start_net(net); 397 + ret = nfsd_file_cache_start_net(net); 404 398 if (ret) 405 399 goto out_lockd; 400 + ret = nfs4_state_start_net(net); 401 + if (ret) 402 + goto out_filecache; 406 403 407 404 nn->nfsd_net_up = true; 408 405 return 0; 409 406 407 + out_filecache: 408 + nfsd_file_cache_shutdown_net(net); 410 409 out_lockd: 411 410 if (nn->lockd_up) { 412 411 lockd_down(net); 413 - nn->lockd_up = 0; 412 + nn->lockd_up = false; 414 413 } 415 414 out_socks: 416 415 nfsd_shutdown_generic(); ··· 426 415 { 427 416 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 428 417 429 - nfsd_file_cache_purge(net); 418 + nfsd_file_cache_shutdown_net(net); 430 419 nfs4_state_shutdown_net(net); 431 420 if (nn->lockd_up) { 432 421 lockd_down(net); 433 - nn->lockd_up = 0; 422 + nn->lockd_up = false; 434 423 } 435 424 nn->nfsd_net_up = false; 436 425 nfsd_shutdown_generic();
+36 -8
fs/nfsd/state.h
··· 56 56 stateid_opaque_t si_opaque; 57 57 } stateid_t; 58 58 59 + typedef struct { 60 + stateid_t stid; 61 + #define NFS4_COPY_STID 1 62 + #define NFS4_COPYNOTIFY_STID 2 63 + unsigned char sc_type; 64 + refcount_t sc_count; 65 + } copy_stateid_t; 66 + 59 67 #define STATEID_FMT "(%08x/%08x/%08x/%08x)" 60 68 #define STATEID_VAL(s) \ 61 69 (s)->si_opaque.so_clid.cl_boot, \ ··· 104 96 #define NFS4_REVOKED_DELEG_STID 16 105 97 #define NFS4_CLOSED_DELEG_STID 32 106 98 #define NFS4_LAYOUT_STID 64 99 + struct list_head sc_cp_list; 107 100 unsigned char sc_type; 108 101 stateid_t sc_stateid; 109 102 spinlock_t sc_lock; 110 103 struct nfs4_client *sc_client; 111 104 struct nfs4_file *sc_file; 112 105 void (*sc_free)(struct nfs4_stid *); 106 + }; 107 + 108 + /* Keep a list of stateids issued by the COPY_NOTIFY, associate it with the 109 + * parent OPEN/LOCK/DELEG stateid. 110 + */ 111 + struct nfs4_cpntf_state { 112 + copy_stateid_t cp_stateid; 113 + struct list_head cp_list; /* per parent nfs4_stid */ 114 + stateid_t cp_p_stateid; /* copy of parent's stateid */ 115 + clientid_t cp_p_clid; /* copy of parent's clid */ 116 + time64_t cpntf_time; /* last time stateid used */ 113 117 }; 114 118 115 119 /* ··· 152 132 struct list_head dl_recall_lru; /* delegation recalled */ 153 133 struct nfs4_clnt_odstate *dl_clnt_odstate; 154 134 u32 dl_type; 155 - time_t dl_time; 135 + time64_t dl_time; 156 136 /* For recall: */ 157 137 int dl_retries; 158 138 struct nfsd4_callback dl_recall; ··· 330 310 #endif 331 311 struct xdr_netobj cl_name; /* id generated by client */ 332 312 nfs4_verifier cl_verifier; /* generated by client */ 333 - time_t cl_time; /* time of last lease renewal */ 313 + time64_t cl_time; /* time of last lease renewal */ 334 314 struct sockaddr_storage cl_addr; /* client ipaddress */ 335 315 bool cl_mach_cred; /* SP4_MACH_CRED in force */ 336 316 struct svc_cred cl_cred; /* setclientid principal */ ··· 340 320 /* NFSv4.1 client implementation id: */ 341 321 struct xdr_netobj cl_nii_domain; 342 322 struct xdr_netobj cl_nii_name; 343 - struct timespec cl_nii_time; 323 + struct timespec64 cl_nii_time; 344 324 345 325 /* for v4.0 and v4.1 callbacks: */ 346 326 struct nfs4_cb_conn cl_cb_conn; ··· 469 449 */ 470 450 struct list_head oo_close_lru; 471 451 struct nfs4_ol_stateid *oo_last_closed_stid; 472 - time_t oo_time; /* time of placement on so_close_lru */ 452 + time64_t oo_time; /* time of placement on so_close_lru */ 473 453 #define NFS4_OO_CONFIRMED 1 474 454 unsigned char oo_flags; 475 455 }; ··· 626 606 struct nfsd4_blocked_lock { 627 607 struct list_head nbl_list; 628 608 struct list_head nbl_lru; 629 - unsigned long nbl_time; 609 + time64_t nbl_time; 630 610 struct file_lock nbl_lock; 631 611 struct knfsd_fh nbl_fh; 632 612 struct nfsd4_callback nbl_cb; ··· 638 618 639 619 extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, 640 620 struct nfsd4_compound_state *cstate, struct svc_fh *fhp, 641 - stateid_t *stateid, int flags, struct nfsd_file **filp); 621 + stateid_t *stateid, int flags, struct nfsd_file **filp, 622 + struct nfs4_stid **cstid); 642 623 __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 643 624 stateid_t *stateid, unsigned char typemask, 644 625 struct nfs4_stid **s, struct nfsd_net *nn); 645 626 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, 646 627 void (*sc_free)(struct nfs4_stid *)); 647 - int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy); 648 - void nfs4_free_cp_state(struct nfsd4_copy *copy); 628 + int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy); 629 + void nfs4_free_copy_state(struct nfsd4_copy *copy); 630 + struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn, 631 + struct nfs4_stid *p_stid); 649 632 void nfs4_unhash_stid(struct nfs4_stid *s); 650 633 void nfs4_put_stid(struct nfs4_stid *s); 651 634 void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid); ··· 678 655 extern void nfs4_put_copy(struct nfsd4_copy *copy); 679 656 extern struct nfsd4_copy * 680 657 find_async_copy(struct nfs4_client *clp, stateid_t *staetid); 658 + extern void nfs4_put_cpntf_state(struct nfsd_net *nn, 659 + struct nfs4_cpntf_state *cps); 660 + extern __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st, 661 + struct nfs4_client *clp, 662 + struct nfs4_cpntf_state **cps); 681 663 static inline void get_nfs4_file(struct nfs4_file *fi) 682 664 { 683 665 refcount_inc(&fi->fi_ref);
+14 -8
fs/nfsd/trace.h
··· 166 166 DEFINE_STATEID_EVENT(layout_recall_fail); 167 167 DEFINE_STATEID_EVENT(layout_recall_release); 168 168 169 + TRACE_DEFINE_ENUM(NFSD_FILE_HASHED); 170 + TRACE_DEFINE_ENUM(NFSD_FILE_PENDING); 171 + TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ); 172 + TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_WRITE); 173 + TRACE_DEFINE_ENUM(NFSD_FILE_REFERENCED); 174 + 169 175 #define show_nf_flags(val) \ 170 176 __print_flags(val, "|", \ 171 177 { 1 << NFSD_FILE_HASHED, "HASHED" }, \ ··· 201 195 TP_fast_assign( 202 196 __entry->nf_hashval = nf->nf_hashval; 203 197 __entry->nf_inode = nf->nf_inode; 204 - __entry->nf_ref = atomic_read(&nf->nf_ref); 198 + __entry->nf_ref = refcount_read(&nf->nf_ref); 205 199 __entry->nf_flags = nf->nf_flags; 206 200 __entry->nf_may = nf->nf_may; 207 201 __entry->nf_file = nf->nf_file; ··· 234 228 TP_ARGS(rqstp, hash, inode, may_flags, nf, status), 235 229 236 230 TP_STRUCT__entry( 237 - __field(__be32, xid) 231 + __field(u32, xid) 238 232 __field(unsigned int, hash) 239 233 __field(void *, inode) 240 234 __field(unsigned int, may_flags) ··· 242 236 __field(unsigned long, nf_flags) 243 237 __field(unsigned char, nf_may) 244 238 __field(struct file *, nf_file) 245 - __field(__be32, status) 239 + __field(u32, status) 246 240 ), 247 241 248 242 TP_fast_assign( 249 - __entry->xid = rqstp->rq_xid; 243 + __entry->xid = be32_to_cpu(rqstp->rq_xid); 250 244 __entry->hash = hash; 251 245 __entry->inode = inode; 252 246 __entry->may_flags = may_flags; 253 - __entry->nf_ref = nf ? atomic_read(&nf->nf_ref) : 0; 247 + __entry->nf_ref = nf ? refcount_read(&nf->nf_ref) : 0; 254 248 __entry->nf_flags = nf ? nf->nf_flags : 0; 255 249 __entry->nf_may = nf ? nf->nf_may : 0; 256 250 __entry->nf_file = nf ? nf->nf_file : NULL; 257 - __entry->status = status; 251 + __entry->status = be32_to_cpu(status); 258 252 ), 259 253 260 254 TP_printk("xid=0x%x hash=0x%x inode=0x%p may_flags=%s ref=%d nf_flags=%s nf_may=%s nf_file=0x%p status=%u", 261 - be32_to_cpu(__entry->xid), __entry->hash, __entry->inode, 255 + __entry->xid, __entry->hash, __entry->inode, 262 256 show_nf_may(__entry->may_flags), __entry->nf_ref, 263 257 show_nf_flags(__entry->nf_flags), 264 258 show_nf_may(__entry->nf_may), __entry->nf_file, 265 - be32_to_cpu(__entry->status)) 259 + __entry->status) 266 260 ); 267 261 268 262 DECLARE_EVENT_CLASS(nfsd_file_search_class,
+80 -29
fs/nfsd/vfs.c
··· 280 280 * Commit metadata changes to stable storage. 281 281 */ 282 282 static int 283 - commit_metadata(struct svc_fh *fhp) 283 + commit_inode_metadata(struct inode *inode) 284 284 { 285 - struct inode *inode = d_inode(fhp->fh_dentry); 286 285 const struct export_operations *export_ops = inode->i_sb->s_export_op; 287 - 288 - if (!EX_ISSYNC(fhp->fh_export)) 289 - return 0; 290 286 291 287 if (export_ops->commit_metadata) 292 288 return export_ops->commit_metadata(inode); 293 289 return sync_inode_metadata(inode, 1); 290 + } 291 + 292 + static int 293 + commit_metadata(struct svc_fh *fhp) 294 + { 295 + struct inode *inode = d_inode(fhp->fh_dentry); 296 + 297 + if (!EX_ISSYNC(fhp->fh_export)) 298 + return 0; 299 + return commit_inode_metadata(inode); 294 300 } 295 301 296 302 /* ··· 364 358 */ 365 359 __be32 366 360 nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, 367 - int check_guard, time_t guardtime) 361 + int check_guard, time64_t guardtime) 368 362 { 369 363 struct dentry *dentry; 370 364 struct inode *inode; ··· 530 524 } 531 525 #endif 532 526 533 - __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, 534 - u64 dst_pos, u64 count, bool sync) 527 + __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos, 528 + struct nfsd_file *nf_dst, u64 dst_pos, u64 count, bool sync) 535 529 { 530 + struct file *src = nf_src->nf_file; 531 + struct file *dst = nf_dst->nf_file; 536 532 loff_t cloned; 533 + __be32 ret = 0; 537 534 535 + down_write(&nf_dst->nf_rwsem); 538 536 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); 539 - if (cloned < 0) 540 - return nfserrno(cloned); 541 - if (count && cloned != count) 542 - return nfserrno(-EINVAL); 537 + if (cloned < 0) { 538 + ret = nfserrno(cloned); 539 + goto out_err; 540 + } 541 + if (count && cloned != count) { 542 + ret = nfserrno(-EINVAL); 543 + goto out_err; 544 + } 543 545 if (sync) { 544 546 loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX; 545 547 int status = vfs_fsync_range(dst, dst_pos, dst_end, 0); 546 - if (status < 0) 547 - return nfserrno(status); 548 + 549 + if (!status) 550 + status = commit_inode_metadata(file_inode(src)); 551 + if (status < 0) { 552 + nfsd_reset_boot_verifier(net_generic(nf_dst->nf_net, 553 + nfsd_net_id)); 554 + ret = nfserrno(status); 555 + } 548 556 } 549 - return 0; 557 + out_err: 558 + up_write(&nf_dst->nf_rwsem); 559 + return ret; 550 560 } 551 561 552 562 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, ··· 960 938 } 961 939 962 940 __be32 963 - nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, 941 + nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, 964 942 loff_t offset, struct kvec *vec, int vlen, 965 - unsigned long *cnt, int stable) 943 + unsigned long *cnt, int stable, 944 + __be32 *verf) 966 945 { 946 + struct file *file = nf->nf_file; 967 947 struct svc_export *exp; 968 948 struct iov_iter iter; 969 949 __be32 nfserr; ··· 996 972 flags |= RWF_SYNC; 997 973 998 974 iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt); 999 - host_err = vfs_iter_write(file, &iter, &pos, flags); 1000 - if (host_err < 0) 975 + if (flags & RWF_SYNC) { 976 + down_write(&nf->nf_rwsem); 977 + host_err = vfs_iter_write(file, &iter, &pos, flags); 978 + if (host_err < 0) 979 + nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp), 980 + nfsd_net_id)); 981 + up_write(&nf->nf_rwsem); 982 + } else { 983 + down_read(&nf->nf_rwsem); 984 + if (verf) 985 + nfsd_copy_boot_verifier(verf, 986 + net_generic(SVC_NET(rqstp), 987 + nfsd_net_id)); 988 + host_err = vfs_iter_write(file, &iter, &pos, flags); 989 + up_read(&nf->nf_rwsem); 990 + } 991 + if (host_err < 0) { 992 + nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp), 993 + nfsd_net_id)); 1001 994 goto out_nfserr; 995 + } 996 + *cnt = host_err; 1002 997 nfsdstats.io_write += *cnt; 1003 998 fsnotify_modify(file); 1004 999 ··· 1079 1036 */ 1080 1037 __be32 1081 1038 nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, 1082 - struct kvec *vec, int vlen, unsigned long *cnt, int stable) 1039 + struct kvec *vec, int vlen, unsigned long *cnt, int stable, 1040 + __be32 *verf) 1083 1041 { 1084 1042 struct nfsd_file *nf; 1085 1043 __be32 err; ··· 1091 1047 if (err) 1092 1048 goto out; 1093 1049 1094 - err = nfsd_vfs_write(rqstp, fhp, nf->nf_file, offset, vec, 1095 - vlen, cnt, stable); 1050 + err = nfsd_vfs_write(rqstp, fhp, nf, offset, vec, 1051 + vlen, cnt, stable, verf); 1096 1052 nfsd_file_put(nf); 1097 1053 out: 1098 1054 trace_nfsd_write_done(rqstp, fhp, offset, *cnt); ··· 1111 1067 */ 1112 1068 __be32 1113 1069 nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, 1114 - loff_t offset, unsigned long count) 1070 + loff_t offset, unsigned long count, __be32 *verf) 1115 1071 { 1116 1072 struct nfsd_file *nf; 1117 1073 loff_t end = LLONG_MAX; ··· 1130 1086 if (err) 1131 1087 goto out; 1132 1088 if (EX_ISSYNC(fhp->fh_export)) { 1133 - int err2 = vfs_fsync_range(nf->nf_file, offset, end, 0); 1089 + int err2; 1134 1090 1091 + down_write(&nf->nf_rwsem); 1092 + err2 = vfs_fsync_range(nf->nf_file, offset, end, 0); 1135 1093 switch (err2) { 1136 1094 case 0: 1095 + nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net, 1096 + nfsd_net_id)); 1137 1097 break; 1138 1098 case -EINVAL: 1139 1099 err = nfserr_notsupp; ··· 1147 1099 nfsd_reset_boot_verifier(net_generic(nf->nf_net, 1148 1100 nfsd_net_id)); 1149 1101 } 1150 - } 1102 + up_write(&nf->nf_rwsem); 1103 + } else 1104 + nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net, 1105 + nfsd_net_id)); 1151 1106 1152 1107 nfsd_file_put(nf); 1153 1108 out: ··· 1174 1123 if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) 1175 1124 iap->ia_valid &= ~(ATTR_UID|ATTR_GID); 1176 1125 if (iap->ia_valid) 1177 - return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0); 1126 + return nfsd_setattr(rqstp, resfhp, iap, 0, (time64_t)0); 1178 1127 /* Callers expect file metadata to be committed here */ 1179 1128 return nfserrno(commit_metadata(resfhp)); 1180 1129 } ··· 1437 1386 && d_inode(dchild)->i_atime.tv_sec == v_atime 1438 1387 && d_inode(dchild)->i_size == 0 ) { 1439 1388 if (created) 1440 - *created = 1; 1389 + *created = true; 1441 1390 break; 1442 1391 } 1443 1392 /* fall through */ ··· 1446 1395 && d_inode(dchild)->i_atime.tv_sec == v_atime 1447 1396 && d_inode(dchild)->i_size == 0 ) { 1448 1397 if (created) 1449 - *created = 1; 1398 + *created = true; 1450 1399 goto set_attr; 1451 1400 } 1452 1401 /* fall through */ ··· 1463 1412 goto out_nfserr; 1464 1413 } 1465 1414 if (created) 1466 - *created = 1; 1415 + *created = true; 1467 1416 1468 1417 nfsd_check_ignore_resizing(iap); 1469 1418
+11 -7
fs/nfsd/vfs.h
··· 34 34 #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) 35 35 #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) 36 36 37 + struct nfsd_file; 38 + 37 39 /* 38 40 * Callback function for readdir 39 41 */ ··· 50 48 const char *, unsigned int, 51 49 struct svc_export **, struct dentry **); 52 50 __be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *, 53 - struct iattr *, int, time_t); 51 + struct iattr *, int, time64_t); 54 52 int nfsd_mountpoint(struct dentry *, struct svc_export *); 55 53 #ifdef CONFIG_NFSD_V4 56 54 __be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *, 57 55 struct xdr_netobj *); 58 56 __be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *, 59 57 struct file *, loff_t, loff_t, int); 60 - __be32 nfsd4_clone_file_range(struct file *, u64, struct file *, 61 - u64, u64, bool); 58 + __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos, 59 + struct nfsd_file *nf_dst, u64 dst_pos, 60 + u64 count, bool sync); 62 61 #endif /* CONFIG_NFSD_V4 */ 63 62 __be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *, 64 63 char *name, int len, struct iattr *attrs, ··· 74 71 struct svc_fh *res, int createmode, 75 72 u32 *verifier, bool *truncp, bool *created); 76 73 __be32 nfsd_commit(struct svc_rqst *, struct svc_fh *, 77 - loff_t, unsigned long); 74 + loff_t, unsigned long, __be32 *verf); 78 75 #endif /* CONFIG_NFSD_V3 */ 79 76 int nfsd_open_break_lease(struct inode *, int); 80 77 __be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t, ··· 94 91 loff_t, struct kvec *, int, unsigned long *, 95 92 u32 *eof); 96 93 __be32 nfsd_write(struct svc_rqst *, struct svc_fh *, loff_t, 97 - struct kvec *, int, unsigned long *, int); 94 + struct kvec *, int, unsigned long *, 95 + int stable, __be32 *verf); 98 96 __be32 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, 99 - struct file *file, loff_t offset, 97 + struct nfsd_file *nf, loff_t offset, 100 98 struct kvec *vec, int vlen, unsigned long *cnt, 101 - int stable); 99 + int stable, __be32 *verf); 102 100 __be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, 103 101 char *, int *); 104 102 __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
+3 -1
fs/nfsd/xdr3.h
··· 14 14 struct svc_fh fh; 15 15 struct iattr attrs; 16 16 int check_guard; 17 - time_t guardtime; 17 + time64_t guardtime; 18 18 }; 19 19 20 20 struct nfsd3_diropargs { ··· 159 159 struct svc_fh fh; 160 160 unsigned long count; 161 161 int committed; 162 + __be32 verf[2]; 162 163 }; 163 164 164 165 struct nfsd3_renameres { ··· 224 223 struct nfsd3_commitres { 225 224 __be32 status; 226 225 struct svc_fh fh; 226 + __be32 verf[2]; 227 227 }; 228 228 229 229 struct nfsd3_getaclres {
+30 -9
fs/nfsd/xdr4.h
··· 46 46 #define CURRENT_STATE_ID_FLAG (1<<0) 47 47 #define SAVED_STATE_ID_FLAG (1<<1) 48 48 49 - #define SET_STATE_ID(c, f) ((c)->sid_flags |= (f)) 50 - #define HAS_STATE_ID(c, f) ((c)->sid_flags & (f)) 51 - #define CLEAR_STATE_ID(c, f) ((c)->sid_flags &= ~(f)) 49 + #define SET_CSTATE_FLAG(c, f) ((c)->sid_flags |= (f)) 50 + #define HAS_CSTATE_FLAG(c, f) ((c)->sid_flags & (f)) 51 + #define CLEAR_CSTATE_FLAG(c, f) ((c)->sid_flags &= ~(f)) 52 52 53 53 struct nfsd4_compound_state { 54 54 struct svc_fh current_fh; ··· 221 221 struct nfsd4_putfh { 222 222 u32 pf_fhlen; /* request */ 223 223 char *pf_fhval; /* request */ 224 + bool no_verify; /* represents foreigh fh */ 224 225 }; 225 226 226 227 struct nfsd4_open { ··· 519 518 520 519 struct nfsd4_copy { 521 520 /* request */ 522 - stateid_t cp_src_stateid; 523 - stateid_t cp_dst_stateid; 524 - u64 cp_src_pos; 525 - u64 cp_dst_pos; 526 - u64 cp_count; 521 + stateid_t cp_src_stateid; 522 + stateid_t cp_dst_stateid; 523 + u64 cp_src_pos; 524 + u64 cp_dst_pos; 525 + u64 cp_count; 526 + struct nl4_server cp_src; 527 + bool cp_intra; 527 528 528 529 /* both */ 529 530 bool cp_synchronous; ··· 543 540 struct nfsd_file *nf_src; 544 541 struct nfsd_file *nf_dst; 545 542 546 - stateid_t cp_stateid; 543 + copy_stateid_t cp_stateid; 547 544 548 545 struct list_head copies; 549 546 struct task_struct *copy_task; 550 547 refcount_t refcount; 551 548 bool stopped; 549 + 550 + struct vfsmount *ss_mnt; 551 + struct nfs_fh c_fh; 552 + nfs4_stateid stateid; 552 553 }; 554 + extern bool inter_copy_offload_enable; 553 555 554 556 struct nfsd4_seek { 555 557 /* request */ ··· 574 566 /* response */ 575 567 u64 count; 576 568 u32 status; 569 + }; 570 + 571 + struct nfsd4_copy_notify { 572 + /* request */ 573 + stateid_t cpn_src_stateid; 574 + struct nl4_server cpn_dst; 575 + 576 + /* response */ 577 + stateid_t cpn_cnr_stateid; 578 + u64 cpn_sec; 579 + u32 cpn_nsec; 580 + struct nl4_server cpn_src; 577 581 }; 578 582 579 583 struct nfsd4_op { ··· 647 627 struct nfsd4_clone clone; 648 628 struct nfsd4_copy copy; 649 629 struct nfsd4_offload_status offload_status; 630 + struct nfsd4_copy_notify copy_notify; 650 631 struct nfsd4_seek seek; 651 632 } u; 652 633 struct nfs4_replay * replay;
+4
net/sunrpc/auth_gss/svcauth_gss.c
··· 1248 1248 dprintk("RPC: No creds found!\n"); 1249 1249 goto out; 1250 1250 } else { 1251 + struct timespec64 boot; 1251 1252 1252 1253 /* steal creds */ 1253 1254 rsci.cred = ud->creds; ··· 1269 1268 &expiry, GFP_KERNEL); 1270 1269 if (status) 1271 1270 goto out; 1271 + 1272 + getboottime64(&boot); 1273 + expiry -= boot.tv_sec; 1272 1274 } 1273 1275 1274 1276 rsci.h.expiry_time = expiry;
+26 -22
net/sunrpc/cache.c
··· 77 77 return NULL; 78 78 } 79 79 80 + static void sunrpc_begin_cache_remove_entry(struct cache_head *ch, 81 + struct cache_detail *cd) 82 + { 83 + /* Must be called under cd->hash_lock */ 84 + hlist_del_init_rcu(&ch->cache_list); 85 + set_bit(CACHE_CLEANED, &ch->flags); 86 + cd->entries --; 87 + } 88 + 89 + static void sunrpc_end_cache_remove_entry(struct cache_head *ch, 90 + struct cache_detail *cd) 91 + { 92 + cache_fresh_unlocked(ch, cd); 93 + cache_put(ch, cd); 94 + } 95 + 80 96 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, 81 97 struct cache_head *key, 82 98 int hash) ··· 116 100 hlist_for_each_entry_rcu(tmp, head, cache_list) { 117 101 if (detail->match(tmp, key)) { 118 102 if (cache_is_expired(detail, tmp)) { 119 - hlist_del_init_rcu(&tmp->cache_list); 120 - detail->entries --; 103 + sunrpc_begin_cache_remove_entry(tmp, detail); 121 104 freeme = tmp; 122 105 break; 123 106 } ··· 132 117 cache_get(new); 133 118 spin_unlock(&detail->hash_lock); 134 119 135 - if (freeme) { 136 - cache_fresh_unlocked(freeme, detail); 137 - cache_put(freeme, detail); 138 - } 120 + if (freeme) 121 + sunrpc_end_cache_remove_entry(freeme, detail); 139 122 return new; 140 123 } 141 124 ··· 467 454 if (!cache_is_expired(current_detail, ch)) 468 455 continue; 469 456 470 - hlist_del_init_rcu(&ch->cache_list); 471 - current_detail->entries--; 457 + sunrpc_begin_cache_remove_entry(ch, current_detail); 472 458 rv = 1; 473 459 break; 474 460 } ··· 477 465 if (!ch) 478 466 current_index ++; 479 467 spin_unlock(&cache_list_lock); 480 - if (ch) { 481 - set_bit(CACHE_CLEANED, &ch->flags); 482 - cache_fresh_unlocked(ch, d); 483 - cache_put(ch, d); 484 - } 468 + if (ch) 469 + sunrpc_end_cache_remove_entry(ch, d); 485 470 } else 486 471 spin_unlock(&cache_list_lock); 487 472 ··· 534 525 for (i = 0; i < detail->hash_size; i++) { 535 526 head = &detail->hash_table[i]; 536 527 hlist_for_each_entry_safe(ch, tmp, head, cache_list) { 537 - hlist_del_init_rcu(&ch->cache_list); 538 - detail->entries--; 539 - 540 - set_bit(CACHE_CLEANED, &ch->flags); 528 + sunrpc_begin_cache_remove_entry(ch, detail); 541 529 spin_unlock(&detail->hash_lock); 542 - cache_fresh_unlocked(ch, detail); 543 - cache_put(ch, detail); 530 + sunrpc_end_cache_remove_entry(ch, detail); 544 531 spin_lock(&detail->hash_lock); 545 532 } 546 533 } ··· 1890 1885 { 1891 1886 spin_lock(&cd->hash_lock); 1892 1887 if (!hlist_unhashed(&h->cache_list)){ 1893 - hlist_del_init_rcu(&h->cache_list); 1894 - cd->entries--; 1888 + sunrpc_begin_cache_remove_entry(h, cd); 1895 1889 spin_unlock(&cd->hash_lock); 1896 - cache_put(h, cd); 1890 + sunrpc_end_cache_remove_entry(h, cd); 1897 1891 } else 1898 1892 spin_unlock(&cd->hash_lock); 1899 1893 }