Merge tag 'for-linus-5.7-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux

Pull orangefs updates from Mike Marshall:
"A fix and two cleanups.

Fix:

- Christoph Hellwig noticed that some logic I added to
orangefs_file_read_iter introduced a race condition, so he sent a
reversion patch. I had to modify his patch since reverting at this
point broke Orangefs.

Cleanups:

- Christoph Hellwig noticed that we were doing some unnecessary work
in orangefs_flush, so he sent in a patch that removed the un-needed
code.

- Al Viro told me he had trouble building Orangefs. Orangefs should
be easy to build, even for Al :-).

I looked back at the test server build notes in orangefs.txt, just
in case that's where the trouble really is, and found a couple of
typos and made a couple of clarifications"

* tag 'for-linus-5.7-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux:
orangefs: clarify build steps for test server in orangefs.txt
orangefs: don't mess with I_DIRTY_TIMES in orangefs_flush
orangefs: get rid of knob code...

+26 -85
+19 -15
Documentation/filesystems/orangefs.rst
··· 41 42 http://www.orangefs.org/documentation/ 43 44 - 45 - Userspace Filesystem Source 46 - =========================== 47 - 48 - http://www.orangefs.org/download 49 - 50 - Orangefs versions prior to 2.9.3 would not be compatible with the 51 - upstream version of the kernel client. 52 - 53 - 54 Running ORANGEFS On a Single Server 55 =================================== 56 ··· 84 85 mount -t pvfs2 tcp://localhost:3334/orangefs /pvfsmnt 86 87 88 Building ORANGEFS on a Single Server 89 ==================================== ··· 105 106 :: 107 108 - ./configure --prefix=/opt/ofs --with-db-backend=lmdb 109 110 make 111 112 make install 113 114 - Create an orangefs config file:: 115 116 /opt/ofs/bin/pvfs2-genconfig /etc/pvfs2.conf 117 118 Create an /etc/pvfs2tab file:: 119 120 echo tcp://localhost:3334/orangefs /pvfsmnt pvfs2 defaults,noauto 0 0 > \ 121 /etc/pvfs2tab ··· 136 137 Start the server:: 138 139 - /opt/osf/sbin/pvfs2-server /etc/pvfs2.conf 140 141 Now the server should be running. Pvfs2-ls is a simple 142 test to verify that the server is running:: ··· 146 If stuff seems to be working, load the kernel module and 147 turn on the client core:: 148 149 - /opt/ofs/sbin/pvfs2-client -p /opt/osf/sbin/pvfs2-client-core 150 151 Mount your filesystem:: 152 153 - mount -t pvfs2 tcp://localhost:3334/orangefs /pvfsmnt 154 155 156 Running xfstests
··· 41 42 http://www.orangefs.org/documentation/ 43 44 Running ORANGEFS On a Single Server 45 =================================== 46 ··· 94 95 mount -t pvfs2 tcp://localhost:3334/orangefs /pvfsmnt 96 97 + Userspace Filesystem Source 98 + =========================== 99 + 100 + http://www.orangefs.org/download 101 + 102 + Orangefs versions prior to 2.9.3 would not be compatible with the 103 + upstream version of the kernel client. 104 + 105 106 Building ORANGEFS on a Single Server 107 ==================================== ··· 107 108 :: 109 110 + ./configure --prefix=/opt/ofs --with-db-backend=lmdb --disable-usrint 111 112 make 113 114 make install 115 116 + Create an orangefs config file by running pvfs2-genconfig and 117 + specifying a target config file. Pvfs2-genconfig will prompt you 118 + through. Generally it works fine to take the defaults, but you 119 + should use your server's hostname, rather than "localhost" when 120 + it comes to that question:: 121 122 /opt/ofs/bin/pvfs2-genconfig /etc/pvfs2.conf 123 124 Create an /etc/pvfs2tab file:: 125 + 126 + Localhost is fine for your pvfs2tab file: 127 128 echo tcp://localhost:3334/orangefs /pvfsmnt pvfs2 defaults,noauto 0 0 > \ 129 /etc/pvfs2tab ··· 132 133 Start the server:: 134 135 + /opt/ofs/sbin/pvfs2-server /etc/pvfs2.conf 136 137 Now the server should be running. Pvfs2-ls is a simple 138 test to verify that the server is running:: ··· 142 If stuff seems to be working, load the kernel module and 143 turn on the client core:: 144 145 + /opt/ofs/sbin/pvfs2-client -p /opt/ofs/sbin/pvfs2-client-core 146 147 Mount your filesystem:: 148 149 + mount -t pvfs2 tcp://`hostname`:3334/orangefs /pvfsmnt 150 151 152 Running xfstests
+1 -33
fs/orangefs/file.c
··· 346 struct iov_iter *iter) 347 { 348 int ret; 349 - struct orangefs_read_options *ro; 350 - 351 orangefs_stats.reads++; 352 - 353 - /* 354 - * Remember how they set "count" in read(2) or pread(2) or whatever - 355 - * users can use count as a knob to control orangefs io size and later 356 - * we can try to help them fill as many pages as possible in readpage. 357 - */ 358 - if (!iocb->ki_filp->private_data) { 359 - iocb->ki_filp->private_data = kmalloc(sizeof *ro, GFP_KERNEL); 360 - if (!iocb->ki_filp->private_data) 361 - return(ENOMEM); 362 - ro = iocb->ki_filp->private_data; 363 - ro->blksiz = iter->count; 364 - } 365 366 down_read(&file_inode(iocb->ki_filp)->i_rwsem); 367 ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp)); ··· 635 return rc; 636 } 637 638 - static int orangefs_file_open(struct inode * inode, struct file *file) 639 - { 640 - file->private_data = NULL; 641 - return generic_file_open(inode, file); 642 - } 643 - 644 static int orangefs_flush(struct file *file, fl_owner_t id) 645 { 646 /* ··· 645 * on an explicit fsync call. This duplicates historical OrangeFS 646 * behavior. 647 */ 648 - struct inode *inode = file->f_mapping->host; 649 int r; 650 - 651 - kfree(file->private_data); 652 - file->private_data = NULL; 653 - 654 - if (inode->i_state & I_DIRTY_TIME) { 655 - spin_lock(&inode->i_lock); 656 - inode->i_state &= ~I_DIRTY_TIME; 657 - spin_unlock(&inode->i_lock); 658 - mark_inode_dirty_sync(inode); 659 - } 660 661 r = filemap_write_and_wait_range(file->f_mapping, 0, LLONG_MAX); 662 if (r > 0) ··· 662 .lock = orangefs_lock, 663 .unlocked_ioctl = orangefs_ioctl, 664 .mmap = orangefs_file_mmap, 665 - .open = orangefs_file_open, 666 .flush = orangefs_flush, 667 .release = orangefs_file_release, 668 .fsync = orangefs_fsync,
··· 346 struct iov_iter *iter) 347 { 348 int ret; 349 orangefs_stats.reads++; 350 351 down_read(&file_inode(iocb->ki_filp)->i_rwsem); 352 ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp)); ··· 650 return rc; 651 } 652 653 static int orangefs_flush(struct file *file, fl_owner_t id) 654 { 655 /* ··· 666 * on an explicit fsync call. This duplicates historical OrangeFS 667 * behavior. 668 */ 669 int r; 670 671 r = filemap_write_and_wait_range(file->f_mapping, 0, LLONG_MAX); 672 if (r > 0) ··· 694 .lock = orangefs_lock, 695 .unlocked_ioctl = orangefs_ioctl, 696 .mmap = orangefs_file_mmap, 697 + .open = generic_file_open, 698 .flush = orangefs_flush, 699 .release = orangefs_file_release, 700 .fsync = orangefs_fsync,
+6 -33
fs/orangefs/inode.c
··· 259 pgoff_t index; /* which page */ 260 struct page *next_page; 261 char *kaddr; 262 - struct orangefs_read_options *ro = file->private_data; 263 loff_t read_size; 264 - loff_t roundedup; 265 int buffer_index = -1; /* orangefs shared memory slot */ 266 int slot_index; /* index into slot */ 267 int remaining; 268 269 /* 270 - * If they set some miniscule size for "count" in read(2) 271 - * (for example) then let's try to read a page, or the whole file 272 - * if it is smaller than a page. Once "count" goes over a page 273 - * then lets round up to the highest page size multiple that is 274 - * less than or equal to "count" and do that much orangefs IO and 275 - * try to fill as many pages as we can from it. 276 - * 277 - * "count" should be represented in ro->blksiz. 278 - * 279 - * inode->i_size = file size. 280 */ 281 - if (ro) { 282 - if (ro->blksiz < PAGE_SIZE) { 283 - if (inode->i_size < PAGE_SIZE) 284 - read_size = inode->i_size; 285 - else 286 - read_size = PAGE_SIZE; 287 - } else { 288 - roundedup = ((PAGE_SIZE - 1) & ro->blksiz) ? 289 - ((ro->blksiz + PAGE_SIZE) & ~(PAGE_SIZE -1)) : 290 - ro->blksiz; 291 - if (roundedup > inode->i_size) 292 - read_size = inode->i_size; 293 - else 294 - read_size = roundedup; 295 - 296 - } 297 - } else { 298 - read_size = PAGE_SIZE; 299 - } 300 - if (!read_size) 301 - read_size = PAGE_SIZE; 302 303 if (PageDirty(page)) 304 orangefs_launder_page(page);
··· 259 pgoff_t index; /* which page */ 260 struct page *next_page; 261 char *kaddr; 262 loff_t read_size; 263 int buffer_index = -1; /* orangefs shared memory slot */ 264 int slot_index; /* index into slot */ 265 int remaining; 266 267 /* 268 + * Get up to this many bytes from Orangefs at a time and try 269 + * to fill them into the page cache at once. Tests with dd made 270 + * this seem like a reasonable static number, if there was 271 + * interest perhaps this number could be made setable through 272 + * sysfs... 273 */ 274 + read_size = 524288; 275 276 if (PageDirty(page)) 277 orangefs_launder_page(page);
-4
fs/orangefs/orangefs-kernel.h
··· 239 kgid_t gid; 240 }; 241 242 - struct orangefs_read_options { 243 - ssize_t blksiz; 244 - }; 245 - 246 extern struct orangefs_stats orangefs_stats; 247 248 /*
··· 239 kgid_t gid; 240 }; 241 242 extern struct orangefs_stats orangefs_stats; 243 244 /*