Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dax: relocate some dax functions

dax_load_hole() will soon need to call dax_insert_mapping_entry(), so it
needs to be moved lower in dax.c so the definition exists.

dax_wake_mapping_entry_waiter() will soon be removed from dax.h and be
made static to dax.c, so we need to move its definition above all its
callers.

Link: http://lkml.kernel.org/r/20170724170616.25810-3-ross.zwisler@linux.intel.com
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Cc: "Darrick J. Wong" <darrick.wong@oracle.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Ross Zwisler and committed by
Linus Torvalds
e30331ff b2770da6

+69 -69
+69 -69
fs/dax.c
··· 121 121 } 122 122 123 123 /* 124 + * We do not necessarily hold the mapping->tree_lock when we call this 125 + * function so it is possible that 'entry' is no longer a valid item in the 126 + * radix tree. This is okay because all we really need to do is to find the 127 + * correct waitqueue where tasks might be waiting for that old 'entry' and 128 + * wake them. 129 + */ 130 + void dax_wake_mapping_entry_waiter(struct address_space *mapping, 131 + pgoff_t index, void *entry, bool wake_all) 132 + { 133 + struct exceptional_entry_key key; 134 + wait_queue_head_t *wq; 135 + 136 + wq = dax_entry_waitqueue(mapping, index, entry, &key); 137 + 138 + /* 139 + * Checking for locked entry and prepare_to_wait_exclusive() happens 140 + * under mapping->tree_lock, ditto for entry handling in our callers. 141 + * So at this point all tasks that could have seen our entry locked 142 + * must be in the waitqueue and the following check will see them. 143 + */ 144 + if (waitqueue_active(wq)) 145 + __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 146 + } 147 + 148 + /* 124 149 * Check whether the given slot is locked. The function must be called with 125 150 * mapping->tree_lock held 126 151 */ ··· 417 392 return entry; 418 393 } 419 394 420 - /* 421 - * We do not necessarily hold the mapping->tree_lock when we call this 422 - * function so it is possible that 'entry' is no longer a valid item in the 423 - * radix tree. This is okay because all we really need to do is to find the 424 - * correct waitqueue where tasks might be waiting for that old 'entry' and 425 - * wake them. 426 - */ 427 - void dax_wake_mapping_entry_waiter(struct address_space *mapping, 428 - pgoff_t index, void *entry, bool wake_all) 429 - { 430 - struct exceptional_entry_key key; 431 - wait_queue_head_t *wq; 432 - 433 - wq = dax_entry_waitqueue(mapping, index, entry, &key); 434 - 435 - /* 436 - * Checking for locked entry and prepare_to_wait_exclusive() happens 437 - * under mapping->tree_lock, ditto for entry handling in our callers. 438 - * So at this point all tasks that could have seen our entry locked 439 - * must be in the waitqueue and the following check will see them. 440 - */ 441 - if (waitqueue_active(wq)) 442 - __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 443 - } 444 - 445 395 static int __dax_invalidate_mapping_entry(struct address_space *mapping, 446 396 pgoff_t index, bool trunc) 447 397 { ··· 466 466 pgoff_t index) 467 467 { 468 468 return __dax_invalidate_mapping_entry(mapping, index, false); 469 - } 470 - 471 - /* 472 - * The user has performed a load from a hole in the file. Allocating 473 - * a new page in the file would cause excessive storage usage for 474 - * workloads with sparse files. We allocate a page cache page instead. 475 - * We'll kick it out of the page cache if it's ever written to, 476 - * otherwise it will simply fall out of the page cache under memory 477 - * pressure without ever having been dirtied. 478 - */ 479 - static int dax_load_hole(struct address_space *mapping, void **entry, 480 - struct vm_fault *vmf) 481 - { 482 - struct inode *inode = mapping->host; 483 - struct page *page; 484 - int ret; 485 - 486 - /* Hole page already exists? Return it... */ 487 - if (!radix_tree_exceptional_entry(*entry)) { 488 - page = *entry; 489 - goto finish_fault; 490 - } 491 - 492 - /* This will replace locked radix tree entry with a hole page */ 493 - page = find_or_create_page(mapping, vmf->pgoff, 494 - vmf->gfp_mask | __GFP_ZERO); 495 - if (!page) { 496 - ret = VM_FAULT_OOM; 497 - goto out; 498 - } 499 - 500 - finish_fault: 501 - vmf->page = page; 502 - ret = finish_fault(vmf); 503 - vmf->page = NULL; 504 - *entry = page; 505 - if (!ret) { 506 - /* Grab reference for PTE that is now referencing the page */ 507 - get_page(page); 508 - ret = VM_FAULT_NOPAGE; 509 - } 510 - out: 511 - trace_dax_load_hole(inode, vmf, ret); 512 - return ret; 513 469 } 514 470 515 471 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, ··· 896 940 return VM_FAULT_NOPAGE; 897 941 } 898 942 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); 943 + 944 + /* 945 + * The user has performed a load from a hole in the file. Allocating 946 + * a new page in the file would cause excessive storage usage for 947 + * workloads with sparse files. We allocate a page cache page instead. 948 + * We'll kick it out of the page cache if it's ever written to, 949 + * otherwise it will simply fall out of the page cache under memory 950 + * pressure without ever having been dirtied. 951 + */ 952 + static int dax_load_hole(struct address_space *mapping, void **entry, 953 + struct vm_fault *vmf) 954 + { 955 + struct inode *inode = mapping->host; 956 + struct page *page; 957 + int ret; 958 + 959 + /* Hole page already exists? Return it... */ 960 + if (!radix_tree_exceptional_entry(*entry)) { 961 + page = *entry; 962 + goto finish_fault; 963 + } 964 + 965 + /* This will replace locked radix tree entry with a hole page */ 966 + page = find_or_create_page(mapping, vmf->pgoff, 967 + vmf->gfp_mask | __GFP_ZERO); 968 + if (!page) { 969 + ret = VM_FAULT_OOM; 970 + goto out; 971 + } 972 + 973 + finish_fault: 974 + vmf->page = page; 975 + ret = finish_fault(vmf); 976 + vmf->page = NULL; 977 + *entry = page; 978 + if (!ret) { 979 + /* Grab reference for PTE that is now referencing the page */ 980 + get_page(page); 981 + ret = VM_FAULT_NOPAGE; 982 + } 983 + out: 984 + trace_dax_load_hole(inode, vmf, ret); 985 + return ret; 986 + } 899 987 900 988 static bool dax_range_is_aligned(struct block_device *bdev, 901 989 unsigned int offset, unsigned int length)