Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nfs-for-3.18-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client updates from Trond Myklebust:
"Highlights include:

Stable fixes:
- fix an NFSv4.1 state renewal regression
- fix open/lock state recovery error handling
- fix lock recovery when CREATE_SESSION/SETCLIENTID_CONFIRM fails
- fix statd when reconnection fails
- don't wake tasks during connection abort
- don't start reboot recovery if lease check fails
- fix duplicate proc entries

Features:
- pNFS block driver fixes and clean ups from Christoph
- More code cleanups from Anna
- Improve mmap() writeback performance
- Replace use of PF_TRANS with a more generic mechanism for avoiding
deadlocks in nfs_release_page"

* tag 'nfs-for-3.18-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (66 commits)
NFSv4.1: Fix an NFSv4.1 state renewal regression
NFSv4: fix open/lock state recovery error handling
NFSv4: Fix lock recovery when CREATE_SESSION/SETCLIENTID_CONFIRM fails
NFS: Fabricate fscache server index key correctly
SUNRPC: Add missing support for RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT
NFSv3: Fix missing includes of nfs3_fs.h
NFS/SUNRPC: Remove other deadlock-avoidance mechanisms in nfs_release_page()
NFS: avoid waiting at all in nfs_release_page when congested.
NFS: avoid deadlocks with loop-back mounted NFS filesystems.
MM: export page_wakeup functions
SCHED: add some "wait..on_bit...timeout()" interfaces.
NFS: don't use STABLE writes during writeback.
NFSv4: use exponential retry on NFS4ERR_DELAY for async requests.
rpc: Add -EPERM processing for xs_udp_send_request()
rpc: return sent and err from xs_sendpages()
lockd: Try to reconnect if statd has moved
SUNRPC: Don't wake tasks during connection abort
Fixing lease renewal
nfs: fix duplicate proc entries
pnfs/blocklayout: Fix a 64-bit division/remainder issue in bl_map_stripe
...

+2529 -3315
+6
fs/lockd/mon.c
··· 159 159 160 160 msg.rpc_proc = &clnt->cl_procinfo[proc]; 161 161 status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN); 162 + if (status == -ECONNREFUSED) { 163 + dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n", 164 + status); 165 + rpc_force_rebind(clnt); 166 + status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN); 167 + } 162 168 if (status < 0) 163 169 dprintk("lockd: NSM upcall RPC failed, status=%d\n", 164 170 status);
+2 -1
fs/nfs/blocklayout/Makefile
··· 2 2 # Makefile for the pNFS block layout driver kernel module 3 3 # 4 4 obj-$(CONFIG_PNFS_BLOCK) += blocklayoutdriver.o 5 - blocklayoutdriver-objs := blocklayout.o extents.o blocklayoutdev.o blocklayoutdm.o 5 + 6 + blocklayoutdriver-y += blocklayout.o dev.o extent_tree.o rpc_pipefs.o
+438 -970
fs/nfs/blocklayout/blocklayout.c
··· 35 35 #include <linux/mount.h> 36 36 #include <linux/namei.h> 37 37 #include <linux/bio.h> /* struct bio */ 38 - #include <linux/buffer_head.h> /* various write calls */ 39 38 #include <linux/prefetch.h> 40 39 #include <linux/pagevec.h> 41 40 ··· 49 50 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>"); 50 51 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); 51 52 52 - static void print_page(struct page *page) 53 + static bool is_hole(struct pnfs_block_extent *be) 53 54 { 54 - dprintk("PRINTPAGE page %p\n", page); 55 - dprintk(" PagePrivate %d\n", PagePrivate(page)); 56 - dprintk(" PageUptodate %d\n", PageUptodate(page)); 57 - dprintk(" PageError %d\n", PageError(page)); 58 - dprintk(" PageDirty %d\n", PageDirty(page)); 59 - dprintk(" PageReferenced %d\n", PageReferenced(page)); 60 - dprintk(" PageLocked %d\n", PageLocked(page)); 61 - dprintk(" PageWriteback %d\n", PageWriteback(page)); 62 - dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page)); 63 - dprintk("\n"); 64 - } 65 - 66 - /* Given the be associated with isect, determine if page data needs to be 67 - * initialized. 68 - */ 69 - static int is_hole(struct pnfs_block_extent *be, sector_t isect) 70 - { 71 - if (be->be_state == PNFS_BLOCK_NONE_DATA) 72 - return 1; 73 - else if (be->be_state != PNFS_BLOCK_INVALID_DATA) 74 - return 0; 75 - else 76 - return !bl_is_sector_init(be->be_inval, isect); 77 - } 78 - 79 - /* Given the be associated with isect, determine if page data can be 80 - * written to disk. 81 - */ 82 - static int is_writable(struct pnfs_block_extent *be, sector_t isect) 83 - { 84 - return (be->be_state == PNFS_BLOCK_READWRITE_DATA || 85 - be->be_state == PNFS_BLOCK_INVALID_DATA); 55 + switch (be->be_state) { 56 + case PNFS_BLOCK_NONE_DATA: 57 + return true; 58 + case PNFS_BLOCK_INVALID_DATA: 59 + return be->be_tag ? false : true; 60 + default: 61 + return false; 62 + } 86 63 } 87 64 88 65 /* The data we are handed might be spread across several bios. We need ··· 66 91 */ 67 92 struct parallel_io { 68 93 struct kref refcnt; 69 - void (*pnfs_callback) (void *data, int num_se); 94 + void (*pnfs_callback) (void *data); 70 95 void *data; 71 - int bse_count; 72 96 }; 73 97 74 98 static inline struct parallel_io *alloc_parallel(void *data) ··· 78 104 if (rv) { 79 105 rv->data = data; 80 106 kref_init(&rv->refcnt); 81 - rv->bse_count = 0; 82 107 } 83 108 return rv; 84 109 } ··· 92 119 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt); 93 120 94 121 dprintk("%s enter\n", __func__); 95 - p->pnfs_callback(p->data, p->bse_count); 122 + p->pnfs_callback(p->data); 96 123 kfree(p); 97 124 } 98 125 ··· 114 141 return NULL; 115 142 } 116 143 117 - static struct bio *bl_alloc_init_bio(int npg, sector_t isect, 118 - struct pnfs_block_extent *be, 119 - void (*end_io)(struct bio *, int err), 120 - struct parallel_io *par) 144 + static struct bio * 145 + bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector, 146 + void (*end_io)(struct bio *, int err), struct parallel_io *par) 121 147 { 122 148 struct bio *bio; 123 149 ··· 128 156 } 129 157 130 158 if (bio) { 131 - bio->bi_iter.bi_sector = isect - be->be_f_offset + 132 - be->be_v_offset; 133 - bio->bi_bdev = be->be_mdev; 159 + bio->bi_iter.bi_sector = disk_sector; 160 + bio->bi_bdev = bdev; 134 161 bio->bi_end_io = end_io; 135 162 bio->bi_private = par; 136 163 } 137 164 return bio; 138 165 } 139 166 140 - static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw, 141 - sector_t isect, struct page *page, 142 - struct pnfs_block_extent *be, 143 - void (*end_io)(struct bio *, int err), 144 - struct parallel_io *par, 145 - unsigned int offset, int len) 167 + static struct bio * 168 + do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, 169 + struct page *page, struct pnfs_block_dev_map *map, 170 + struct pnfs_block_extent *be, 171 + void (*end_io)(struct bio *, int err), 172 + struct parallel_io *par, unsigned int offset, int *len) 146 173 { 147 - isect = isect + (offset >> SECTOR_SHIFT); 174 + struct pnfs_block_dev *dev = 175 + container_of(be->be_device, struct pnfs_block_dev, node); 176 + u64 disk_addr, end; 177 + 148 178 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__, 149 - npg, rw, (unsigned long long)isect, offset, len); 179 + npg, rw, (unsigned long long)isect, offset, *len); 180 + 181 + /* translate to device offset */ 182 + isect += be->be_v_offset; 183 + isect -= be->be_f_offset; 184 + 185 + /* translate to physical disk offset */ 186 + disk_addr = (u64)isect << SECTOR_SHIFT; 187 + if (disk_addr < map->start || disk_addr >= map->start + map->len) { 188 + if (!dev->map(dev, disk_addr, map)) 189 + return ERR_PTR(-EIO); 190 + bio = bl_submit_bio(rw, bio); 191 + } 192 + disk_addr += map->disk_offset; 193 + disk_addr -= map->start; 194 + 195 + /* limit length to what the device mapping allows */ 196 + end = disk_addr + *len; 197 + if (end >= map->start + map->len) 198 + *len = map->start + map->len - disk_addr; 199 + 150 200 retry: 151 201 if (!bio) { 152 - bio = bl_alloc_init_bio(npg, isect, be, end_io, par); 202 + bio = bl_alloc_init_bio(npg, map->bdev, 203 + disk_addr >> SECTOR_SHIFT, end_io, par); 153 204 if (!bio) 154 205 return ERR_PTR(-ENOMEM); 155 206 } 156 - if (bio_add_page(bio, page, len, offset) < len) { 207 + if (bio_add_page(bio, page, *len, offset) < *len) { 157 208 bio = bl_submit_bio(rw, bio); 158 209 goto retry; 159 210 } 160 211 return bio; 161 212 } 162 213 163 - static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw, 164 - sector_t isect, struct page *page, 165 - struct pnfs_block_extent *be, 166 - void (*end_io)(struct bio *, int err), 167 - struct parallel_io *par) 168 - { 169 - return do_add_page_to_bio(bio, npg, rw, isect, page, be, 170 - end_io, par, 0, PAGE_CACHE_SIZE); 171 - } 172 - 173 - /* This is basically copied from mpage_end_io_read */ 174 214 static void bl_end_io_read(struct bio *bio, int err) 175 215 { 176 216 struct parallel_io *par = bio->bi_private; 177 - struct bio_vec *bvec; 178 - int i; 179 - 180 - if (!err) 181 - bio_for_each_segment_all(bvec, bio, i) 182 - SetPageUptodate(bvec->bv_page); 183 217 184 218 if (err) { 185 219 struct nfs_pgio_header *header = par->data; ··· 194 216 header->pnfs_error = -EIO; 195 217 pnfs_set_lo_fail(header->lseg); 196 218 } 219 + 197 220 bio_put(bio); 198 221 put_parallel(par); 199 222 } ··· 210 231 } 211 232 212 233 static void 213 - bl_end_par_io_read(void *data, int unused) 234 + bl_end_par_io_read(void *data) 214 235 { 215 236 struct nfs_pgio_header *hdr = data; 216 237 ··· 220 241 } 221 242 222 243 static enum pnfs_try_status 223 - bl_read_pagelist(struct nfs_pgio_header *hdr) 244 + bl_read_pagelist(struct nfs_pgio_header *header) 224 245 { 225 - struct nfs_pgio_header *header = hdr; 226 - int i, hole; 246 + struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); 247 + struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 }; 227 248 struct bio *bio = NULL; 228 - struct pnfs_block_extent *be = NULL, *cow_read = NULL; 249 + struct pnfs_block_extent be; 229 250 sector_t isect, extent_length = 0; 230 251 struct parallel_io *par; 231 - loff_t f_offset = hdr->args.offset; 232 - size_t bytes_left = hdr->args.count; 252 + loff_t f_offset = header->args.offset; 253 + size_t bytes_left = header->args.count; 233 254 unsigned int pg_offset, pg_len; 234 - struct page **pages = hdr->args.pages; 235 - int pg_index = hdr->args.pgbase >> PAGE_CACHE_SHIFT; 255 + struct page **pages = header->args.pages; 256 + int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 236 257 const bool is_dio = (header->dreq != NULL); 258 + struct blk_plug plug; 259 + int i; 237 260 238 261 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, 239 - hdr->page_array.npages, f_offset, 240 - (unsigned int)hdr->args.count); 262 + header->page_array.npages, f_offset, 263 + (unsigned int)header->args.count); 241 264 242 - par = alloc_parallel(hdr); 265 + par = alloc_parallel(header); 243 266 if (!par) 244 - goto use_mds; 267 + return PNFS_NOT_ATTEMPTED; 245 268 par->pnfs_callback = bl_end_par_io_read; 246 - /* At this point, we can no longer jump to use_mds */ 269 + 270 + blk_start_plug(&plug); 247 271 248 272 isect = (sector_t) (f_offset >> SECTOR_SHIFT); 249 273 /* Code assumes extents are page-aligned */ 250 - for (i = pg_index; i < hdr->page_array.npages; i++) { 251 - if (!extent_length) { 274 + for (i = pg_index; i < header->page_array.npages; i++) { 275 + if (extent_length <= 0) { 252 276 /* We've used up the previous extent */ 253 - bl_put_extent(be); 254 - bl_put_extent(cow_read); 255 277 bio = bl_submit_bio(READ, bio); 278 + 256 279 /* Get the next one */ 257 - be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), 258 - isect, &cow_read); 259 - if (!be) { 280 + if (!ext_tree_lookup(bl, isect, &be, false)) { 260 281 header->pnfs_error = -EIO; 261 282 goto out; 262 283 } 263 - extent_length = be->be_length - 264 - (isect - be->be_f_offset); 265 - if (cow_read) { 266 - sector_t cow_length = cow_read->be_length - 267 - (isect - cow_read->be_f_offset); 268 - extent_length = min(extent_length, cow_length); 269 - } 284 + extent_length = be.be_length - (isect - be.be_f_offset); 270 285 } 271 286 287 + pg_offset = f_offset & ~PAGE_CACHE_MASK; 272 288 if (is_dio) { 273 - pg_offset = f_offset & ~PAGE_CACHE_MASK; 274 289 if (pg_offset + bytes_left > PAGE_CACHE_SIZE) 275 290 pg_len = PAGE_CACHE_SIZE - pg_offset; 276 291 else 277 292 pg_len = bytes_left; 278 - 279 - f_offset += pg_len; 280 - bytes_left -= pg_len; 281 - isect += (pg_offset >> SECTOR_SHIFT); 282 293 } else { 283 - pg_offset = 0; 294 + BUG_ON(pg_offset != 0); 284 295 pg_len = PAGE_CACHE_SIZE; 285 296 } 286 297 287 - hole = is_hole(be, isect); 288 - if (hole && !cow_read) { 298 + isect += (pg_offset >> SECTOR_SHIFT); 299 + extent_length -= (pg_offset >> SECTOR_SHIFT); 300 + 301 + if (is_hole(&be)) { 289 302 bio = bl_submit_bio(READ, bio); 290 303 /* Fill hole w/ zeroes w/o accessing device */ 291 304 dprintk("%s Zeroing page for hole\n", __func__); 292 305 zero_user_segment(pages[i], pg_offset, pg_len); 293 - print_page(pages[i]); 294 - SetPageUptodate(pages[i]); 295 - } else { 296 - struct pnfs_block_extent *be_read; 297 306 298 - be_read = (hole && cow_read) ? cow_read : be; 307 + /* invalidate map */ 308 + map.start = NFS4_MAX_UINT64; 309 + } else { 299 310 bio = do_add_page_to_bio(bio, 300 - hdr->page_array.npages - i, 311 + header->page_array.npages - i, 301 312 READ, 302 - isect, pages[i], be_read, 313 + isect, pages[i], &map, &be, 303 314 bl_end_io_read, par, 304 - pg_offset, pg_len); 315 + pg_offset, &pg_len); 305 316 if (IS_ERR(bio)) { 306 317 header->pnfs_error = PTR_ERR(bio); 307 318 bio = NULL; ··· 299 330 } 300 331 } 301 332 isect += (pg_len >> SECTOR_SHIFT); 302 - extent_length -= PAGE_CACHE_SECTORS; 333 + extent_length -= (pg_len >> SECTOR_SHIFT); 334 + f_offset += pg_len; 335 + bytes_left -= pg_len; 303 336 } 304 337 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { 305 - hdr->res.eof = 1; 306 - hdr->res.count = header->inode->i_size - hdr->args.offset; 338 + header->res.eof = 1; 339 + header->res.count = header->inode->i_size - header->args.offset; 307 340 } else { 308 - hdr->res.count = (isect << SECTOR_SHIFT) - hdr->args.offset; 341 + header->res.count = (isect << SECTOR_SHIFT) - header->args.offset; 309 342 } 310 343 out: 311 - bl_put_extent(be); 312 - bl_put_extent(cow_read); 313 344 bl_submit_bio(READ, bio); 345 + blk_finish_plug(&plug); 314 346 put_parallel(par); 315 347 return PNFS_ATTEMPTED; 316 - 317 - use_mds: 318 - dprintk("Giving up and using normal NFS\n"); 319 - return PNFS_NOT_ATTEMPTED; 320 - } 321 - 322 - static void mark_extents_written(struct pnfs_block_layout *bl, 323 - __u64 offset, __u32 count) 324 - { 325 - sector_t isect, end; 326 - struct pnfs_block_extent *be; 327 - struct pnfs_block_short_extent *se; 328 - 329 - dprintk("%s(%llu, %u)\n", __func__, offset, count); 330 - if (count == 0) 331 - return; 332 - isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT; 333 - end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK); 334 - end >>= SECTOR_SHIFT; 335 - while (isect < end) { 336 - sector_t len; 337 - be = bl_find_get_extent(bl, isect, NULL); 338 - BUG_ON(!be); /* FIXME */ 339 - len = min(end, be->be_f_offset + be->be_length) - isect; 340 - if (be->be_state == PNFS_BLOCK_INVALID_DATA) { 341 - se = bl_pop_one_short_extent(be->be_inval); 342 - BUG_ON(!se); 343 - bl_mark_for_commit(be, isect, len, se); 344 - } 345 - isect += len; 346 - bl_put_extent(be); 347 - } 348 - } 349 - 350 - static void bl_end_io_write_zero(struct bio *bio, int err) 351 - { 352 - struct parallel_io *par = bio->bi_private; 353 - struct bio_vec *bvec; 354 - int i; 355 - 356 - bio_for_each_segment_all(bvec, bio, i) { 357 - /* This is the zeroing page we added */ 358 - end_page_writeback(bvec->bv_page); 359 - page_cache_release(bvec->bv_page); 360 - } 361 - 362 - if (unlikely(err)) { 363 - struct nfs_pgio_header *header = par->data; 364 - 365 - if (!header->pnfs_error) 366 - header->pnfs_error = -EIO; 367 - pnfs_set_lo_fail(header->lseg); 368 - } 369 - bio_put(bio); 370 - put_parallel(par); 371 348 } 372 349 373 350 static void bl_end_io_write(struct bio *bio, int err) ··· 336 421 */ 337 422 static void bl_write_cleanup(struct work_struct *work) 338 423 { 339 - struct rpc_task *task; 340 - struct nfs_pgio_header *hdr; 424 + struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work); 425 + struct nfs_pgio_header *hdr = 426 + container_of(task, struct nfs_pgio_header, task); 427 + 341 428 dprintk("%s enter\n", __func__); 342 - task = container_of(work, struct rpc_task, u.tk_work); 343 - hdr = container_of(task, struct nfs_pgio_header, task); 429 + 344 430 if (likely(!hdr->pnfs_error)) { 345 - /* Marks for LAYOUTCOMMIT */ 346 - mark_extents_written(BLK_LSEG2EXT(hdr->lseg), 347 - hdr->args.offset, hdr->args.count); 431 + struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg); 432 + u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK; 433 + u64 end = (hdr->args.offset + hdr->args.count + 434 + PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK; 435 + 436 + ext_tree_mark_written(bl, start >> SECTOR_SHIFT, 437 + (end - start) >> SECTOR_SHIFT); 348 438 } 439 + 349 440 pnfs_ld_write_done(hdr); 350 441 } 351 442 352 443 /* Called when last of bios associated with a bl_write_pagelist call finishes */ 353 - static void bl_end_par_io_write(void *data, int num_se) 444 + static void bl_end_par_io_write(void *data) 354 445 { 355 446 struct nfs_pgio_header *hdr = data; 356 - 357 - if (unlikely(hdr->pnfs_error)) { 358 - bl_free_short_extents(&BLK_LSEG2EXT(hdr->lseg)->bl_inval, 359 - num_se); 360 - } 361 447 362 448 hdr->task.tk_status = hdr->pnfs_error; 363 449 hdr->verf.committed = NFS_FILE_SYNC; ··· 366 450 schedule_work(&hdr->task.u.tk_work); 367 451 } 368 452 369 - /* FIXME STUB - mark intersection of layout and page as bad, so is not 370 - * used again. 371 - */ 372 - static void mark_bad_read(void) 373 - { 374 - return; 375 - } 376 - 377 - /* 378 - * map_block: map a requested I/0 block (isect) into an offset in the LVM 379 - * block_device 380 - */ 381 - static void 382 - map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be) 383 - { 384 - dprintk("%s enter be=%p\n", __func__, be); 385 - 386 - set_buffer_mapped(bh); 387 - bh->b_bdev = be->be_mdev; 388 - bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >> 389 - (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT); 390 - 391 - dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n", 392 - __func__, (unsigned long long)isect, (long)bh->b_blocknr, 393 - bh->b_size); 394 - return; 395 - } 396 - 397 - static void 398 - bl_read_single_end_io(struct bio *bio, int error) 399 - { 400 - struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 401 - struct page *page = bvec->bv_page; 402 - 403 - /* Only one page in bvec */ 404 - unlock_page(page); 405 - } 406 - 407 - static int 408 - bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be, 409 - unsigned int offset, unsigned int len) 410 - { 411 - struct bio *bio; 412 - struct page *shadow_page; 413 - sector_t isect; 414 - char *kaddr, *kshadow_addr; 415 - int ret = 0; 416 - 417 - dprintk("%s: offset %u len %u\n", __func__, offset, len); 418 - 419 - shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 420 - if (shadow_page == NULL) 421 - return -ENOMEM; 422 - 423 - bio = bio_alloc(GFP_NOIO, 1); 424 - if (bio == NULL) 425 - return -ENOMEM; 426 - 427 - isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) + 428 - (offset / SECTOR_SIZE); 429 - 430 - bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset; 431 - bio->bi_bdev = be->be_mdev; 432 - bio->bi_end_io = bl_read_single_end_io; 433 - 434 - lock_page(shadow_page); 435 - if (bio_add_page(bio, shadow_page, 436 - SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) { 437 - unlock_page(shadow_page); 438 - bio_put(bio); 439 - return -EIO; 440 - } 441 - 442 - submit_bio(READ, bio); 443 - wait_on_page_locked(shadow_page); 444 - if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) { 445 - ret = -EIO; 446 - } else { 447 - kaddr = kmap_atomic(page); 448 - kshadow_addr = kmap_atomic(shadow_page); 449 - memcpy(kaddr + offset, kshadow_addr + offset, len); 450 - kunmap_atomic(kshadow_addr); 451 - kunmap_atomic(kaddr); 452 - } 453 - __free_page(shadow_page); 454 - bio_put(bio); 455 - 456 - return ret; 457 - } 458 - 459 - static int 460 - bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be, 461 - unsigned int dirty_offset, unsigned int dirty_len, 462 - bool full_page) 463 - { 464 - int ret = 0; 465 - unsigned int start, end; 466 - 467 - if (full_page) { 468 - start = 0; 469 - end = PAGE_CACHE_SIZE; 470 - } else { 471 - start = round_down(dirty_offset, SECTOR_SIZE); 472 - end = round_up(dirty_offset + dirty_len, SECTOR_SIZE); 473 - } 474 - 475 - dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len); 476 - if (!be) { 477 - zero_user_segments(page, start, dirty_offset, 478 - dirty_offset + dirty_len, end); 479 - if (start == 0 && end == PAGE_CACHE_SIZE && 480 - trylock_page(page)) { 481 - SetPageUptodate(page); 482 - unlock_page(page); 483 - } 484 - return ret; 485 - } 486 - 487 - if (start != dirty_offset) 488 - ret = bl_do_readpage_sync(page, be, start, dirty_offset - start); 489 - 490 - if (!ret && (dirty_offset + dirty_len < end)) 491 - ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len, 492 - end - dirty_offset - dirty_len); 493 - 494 - return ret; 495 - } 496 - 497 - /* Given an unmapped page, zero it or read in page for COW, page is locked 498 - * by caller. 499 - */ 500 - static int 501 - init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read) 502 - { 503 - struct buffer_head *bh = NULL; 504 - int ret = 0; 505 - sector_t isect; 506 - 507 - dprintk("%s enter, %p\n", __func__, page); 508 - BUG_ON(PageUptodate(page)); 509 - if (!cow_read) { 510 - zero_user_segment(page, 0, PAGE_SIZE); 511 - SetPageUptodate(page); 512 - goto cleanup; 513 - } 514 - 515 - bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0); 516 - if (!bh) { 517 - ret = -ENOMEM; 518 - goto cleanup; 519 - } 520 - 521 - isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT; 522 - map_block(bh, isect, cow_read); 523 - if (!bh_uptodate_or_lock(bh)) 524 - ret = bh_submit_read(bh); 525 - if (ret) 526 - goto cleanup; 527 - SetPageUptodate(page); 528 - 529 - cleanup: 530 - if (bh) 531 - free_buffer_head(bh); 532 - if (ret) { 533 - /* Need to mark layout with bad read...should now 534 - * just use nfs4 for reads and writes. 535 - */ 536 - mark_bad_read(); 537 - } 538 - return ret; 539 - } 540 - 541 - /* Find or create a zeroing page marked being writeback. 542 - * Return ERR_PTR on error, NULL to indicate skip this page and page itself 543 - * to indicate write out. 544 - */ 545 - static struct page * 546 - bl_find_get_zeroing_page(struct inode *inode, pgoff_t index, 547 - struct pnfs_block_extent *cow_read) 548 - { 549 - struct page *page; 550 - int locked = 0; 551 - page = find_get_page(inode->i_mapping, index); 552 - if (page) 553 - goto check_page; 554 - 555 - page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 556 - if (unlikely(!page)) { 557 - dprintk("%s oom\n", __func__); 558 - return ERR_PTR(-ENOMEM); 559 - } 560 - locked = 1; 561 - 562 - check_page: 563 - /* PageDirty: Other will write this out 564 - * PageWriteback: Other is writing this out 565 - * PageUptodate: It was read before 566 - */ 567 - if (PageDirty(page) || PageWriteback(page)) { 568 - print_page(page); 569 - if (locked) 570 - unlock_page(page); 571 - page_cache_release(page); 572 - return NULL; 573 - } 574 - 575 - if (!locked) { 576 - lock_page(page); 577 - locked = 1; 578 - goto check_page; 579 - } 580 - if (!PageUptodate(page)) { 581 - /* New page, readin or zero it */ 582 - init_page_for_write(page, cow_read); 583 - } 584 - set_page_writeback(page); 585 - unlock_page(page); 586 - 587 - return page; 588 - } 589 - 590 453 static enum pnfs_try_status 591 454 bl_write_pagelist(struct nfs_pgio_header *header, int sync) 592 455 { 593 - int i, ret, npg_zero, pg_index, last = 0; 456 + struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg); 457 + struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 }; 594 458 struct bio *bio = NULL; 595 - struct pnfs_block_extent *be = NULL, *cow_read = NULL; 596 - sector_t isect, last_isect = 0, extent_length = 0; 459 + struct pnfs_block_extent be; 460 + sector_t isect, extent_length = 0; 597 461 struct parallel_io *par = NULL; 598 462 loff_t offset = header->args.offset; 599 463 size_t count = header->args.count; 600 - unsigned int pg_offset, pg_len, saved_len; 601 464 struct page **pages = header->args.pages; 602 - struct page *page; 603 - pgoff_t index; 604 - u64 temp; 605 - int npg_per_block = 606 - NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT; 465 + int pg_index = pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 466 + unsigned int pg_len; 467 + struct blk_plug plug; 468 + int i; 607 469 608 470 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset); 609 471 610 - if (header->dreq != NULL && 611 - (!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) || 612 - !IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) { 613 - dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n"); 614 - goto out_mds; 615 - } 616 472 /* At this point, header->page_aray is a (sequential) list of nfs_pages. 617 473 * We want to write each, and if there is an error set pnfs_error 618 474 * to have it redone using nfs. 619 475 */ 620 476 par = alloc_parallel(header); 621 477 if (!par) 622 - goto out_mds; 478 + return PNFS_NOT_ATTEMPTED; 623 479 par->pnfs_callback = bl_end_par_io_write; 624 - /* At this point, have to be more careful with error handling */ 625 480 626 - isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT); 627 - be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read); 628 - if (!be || !is_writable(be, isect)) { 629 - dprintk("%s no matching extents!\n", __func__); 630 - goto out_mds; 631 - } 481 + blk_start_plug(&plug); 632 482 633 - /* First page inside INVALID extent */ 634 - if (be->be_state == PNFS_BLOCK_INVALID_DATA) { 635 - if (likely(!bl_push_one_short_extent(be->be_inval))) 636 - par->bse_count++; 637 - else 638 - goto out_mds; 639 - temp = offset >> PAGE_CACHE_SHIFT; 640 - npg_zero = do_div(temp, npg_per_block); 641 - isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) & 642 - (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT); 643 - extent_length = be->be_length - (isect - be->be_f_offset); 483 + /* we always write out the whole page */ 484 + offset = offset & (loff_t)PAGE_CACHE_MASK; 485 + isect = offset >> SECTOR_SHIFT; 644 486 645 - fill_invalid_ext: 646 - dprintk("%s need to zero %d pages\n", __func__, npg_zero); 647 - for (;npg_zero > 0; npg_zero--) { 648 - if (bl_is_sector_init(be->be_inval, isect)) { 649 - dprintk("isect %llu already init\n", 650 - (unsigned long long)isect); 651 - goto next_page; 652 - } 653 - /* page ref released in bl_end_io_write_zero */ 654 - index = isect >> PAGE_CACHE_SECTOR_SHIFT; 655 - dprintk("%s zero %dth page: index %lu isect %llu\n", 656 - __func__, npg_zero, index, 657 - (unsigned long long)isect); 658 - page = bl_find_get_zeroing_page(header->inode, index, 659 - cow_read); 660 - if (unlikely(IS_ERR(page))) { 661 - header->pnfs_error = PTR_ERR(page); 662 - goto out; 663 - } else if (page == NULL) 664 - goto next_page; 665 - 666 - ret = bl_mark_sectors_init(be->be_inval, isect, 667 - PAGE_CACHE_SECTORS); 668 - if (unlikely(ret)) { 669 - dprintk("%s bl_mark_sectors_init fail %d\n", 670 - __func__, ret); 671 - end_page_writeback(page); 672 - page_cache_release(page); 673 - header->pnfs_error = ret; 674 - goto out; 675 - } 676 - if (likely(!bl_push_one_short_extent(be->be_inval))) 677 - par->bse_count++; 678 - else { 679 - end_page_writeback(page); 680 - page_cache_release(page); 681 - header->pnfs_error = -ENOMEM; 682 - goto out; 683 - } 684 - /* FIXME: This should be done in bi_end_io */ 685 - mark_extents_written(BLK_LSEG2EXT(header->lseg), 686 - page->index << PAGE_CACHE_SHIFT, 687 - PAGE_CACHE_SIZE); 688 - 689 - bio = bl_add_page_to_bio(bio, npg_zero, WRITE, 690 - isect, page, be, 691 - bl_end_io_write_zero, par); 692 - if (IS_ERR(bio)) { 693 - header->pnfs_error = PTR_ERR(bio); 694 - bio = NULL; 695 - goto out; 696 - } 697 - next_page: 698 - isect += PAGE_CACHE_SECTORS; 699 - extent_length -= PAGE_CACHE_SECTORS; 700 - } 701 - if (last) 702 - goto write_done; 703 - } 704 - bio = bl_submit_bio(WRITE, bio); 705 - 706 - /* Middle pages */ 707 - pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 708 487 for (i = pg_index; i < header->page_array.npages; i++) { 709 - if (!extent_length) { 488 + if (extent_length <= 0) { 710 489 /* We've used up the previous extent */ 711 - bl_put_extent(be); 712 - bl_put_extent(cow_read); 713 490 bio = bl_submit_bio(WRITE, bio); 714 491 /* Get the next one */ 715 - be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), 716 - isect, &cow_read); 717 - if (!be || !is_writable(be, isect)) { 492 + if (!ext_tree_lookup(bl, isect, &be, true)) { 718 493 header->pnfs_error = -EINVAL; 719 494 goto out; 720 495 } 721 - if (be->be_state == PNFS_BLOCK_INVALID_DATA) { 722 - if (likely(!bl_push_one_short_extent( 723 - be->be_inval))) 724 - par->bse_count++; 725 - else { 726 - header->pnfs_error = -ENOMEM; 727 - goto out; 728 - } 729 - } 730 - extent_length = be->be_length - 731 - (isect - be->be_f_offset); 496 + 497 + extent_length = be.be_length - (isect - be.be_f_offset); 732 498 } 733 499 734 - dprintk("%s offset %lld count %Zu\n", __func__, offset, count); 735 - pg_offset = offset & ~PAGE_CACHE_MASK; 736 - if (pg_offset + count > PAGE_CACHE_SIZE) 737 - pg_len = PAGE_CACHE_SIZE - pg_offset; 738 - else 739 - pg_len = count; 740 - 741 - saved_len = pg_len; 742 - if (be->be_state == PNFS_BLOCK_INVALID_DATA && 743 - !bl_is_sector_init(be->be_inval, isect)) { 744 - ret = bl_read_partial_page_sync(pages[i], cow_read, 745 - pg_offset, pg_len, true); 746 - if (ret) { 747 - dprintk("%s bl_read_partial_page_sync fail %d\n", 748 - __func__, ret); 749 - header->pnfs_error = ret; 750 - goto out; 751 - } 752 - 753 - ret = bl_mark_sectors_init(be->be_inval, isect, 754 - PAGE_CACHE_SECTORS); 755 - if (unlikely(ret)) { 756 - dprintk("%s bl_mark_sectors_init fail %d\n", 757 - __func__, ret); 758 - header->pnfs_error = ret; 759 - goto out; 760 - } 761 - 762 - /* Expand to full page write */ 763 - pg_offset = 0; 764 - pg_len = PAGE_CACHE_SIZE; 765 - } else if ((pg_offset & (SECTOR_SIZE - 1)) || 766 - (pg_len & (SECTOR_SIZE - 1))){ 767 - /* ahh, nasty case. We have to do sync full sector 768 - * read-modify-write cycles. 769 - */ 770 - unsigned int saved_offset = pg_offset; 771 - ret = bl_read_partial_page_sync(pages[i], be, pg_offset, 772 - pg_len, false); 773 - pg_offset = round_down(pg_offset, SECTOR_SIZE); 774 - pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE) 775 - - pg_offset; 776 - } 777 - 778 - 500 + pg_len = PAGE_CACHE_SIZE; 779 501 bio = do_add_page_to_bio(bio, header->page_array.npages - i, 780 - WRITE, 781 - isect, pages[i], be, 502 + WRITE, isect, pages[i], &map, &be, 782 503 bl_end_io_write, par, 783 - pg_offset, pg_len); 504 + 0, &pg_len); 784 505 if (IS_ERR(bio)) { 785 506 header->pnfs_error = PTR_ERR(bio); 786 507 bio = NULL; 787 508 goto out; 788 509 } 789 - offset += saved_len; 790 - count -= saved_len; 791 - isect += PAGE_CACHE_SECTORS; 792 - last_isect = isect; 793 - extent_length -= PAGE_CACHE_SECTORS; 510 + 511 + offset += pg_len; 512 + count -= pg_len; 513 + isect += (pg_len >> SECTOR_SHIFT); 514 + extent_length -= (pg_len >> SECTOR_SHIFT); 794 515 } 795 516 796 - /* Last page inside INVALID extent */ 797 - if (be->be_state == PNFS_BLOCK_INVALID_DATA) { 798 - bio = bl_submit_bio(WRITE, bio); 799 - temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT; 800 - npg_zero = npg_per_block - do_div(temp, npg_per_block); 801 - if (npg_zero < npg_per_block) { 802 - last = 1; 803 - goto fill_invalid_ext; 804 - } 805 - } 806 - 807 - write_done: 808 517 header->res.count = header->args.count; 809 518 out: 810 - bl_put_extent(be); 811 - bl_put_extent(cow_read); 812 519 bl_submit_bio(WRITE, bio); 520 + blk_finish_plug(&plug); 813 521 put_parallel(par); 814 522 return PNFS_ATTEMPTED; 815 - out_mds: 816 - bl_put_extent(be); 817 - bl_put_extent(cow_read); 818 - kfree(par); 819 - return PNFS_NOT_ATTEMPTED; 820 - } 821 - 822 - /* FIXME - range ignored */ 823 - static void 824 - release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range) 825 - { 826 - int i; 827 - struct pnfs_block_extent *be; 828 - 829 - spin_lock(&bl->bl_ext_lock); 830 - for (i = 0; i < EXTENT_LISTS; i++) { 831 - while (!list_empty(&bl->bl_extents[i])) { 832 - be = list_first_entry(&bl->bl_extents[i], 833 - struct pnfs_block_extent, 834 - be_node); 835 - list_del(&be->be_node); 836 - bl_put_extent(be); 837 - } 838 - } 839 - spin_unlock(&bl->bl_ext_lock); 840 - } 841 - 842 - static void 843 - release_inval_marks(struct pnfs_inval_markings *marks) 844 - { 845 - struct pnfs_inval_tracking *pos, *temp; 846 - struct pnfs_block_short_extent *se, *stemp; 847 - 848 - list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) { 849 - list_del(&pos->it_link); 850 - kfree(pos); 851 - } 852 - 853 - list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) { 854 - list_del(&se->bse_node); 855 - kfree(se); 856 - } 857 - return; 858 523 } 859 524 860 525 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo) 861 526 { 862 527 struct pnfs_block_layout *bl = BLK_LO2EXT(lo); 528 + int err; 863 529 864 530 dprintk("%s enter\n", __func__); 865 - release_extents(bl, NULL); 866 - release_inval_marks(&bl->bl_inval); 531 + 532 + err = ext_tree_remove(bl, true, 0, LLONG_MAX); 533 + WARN_ON(err); 534 + 867 535 kfree(bl); 868 536 } 869 537 ··· 460 960 bl = kzalloc(sizeof(*bl), gfp_flags); 461 961 if (!bl) 462 962 return NULL; 963 + 964 + bl->bl_ext_rw = RB_ROOT; 965 + bl->bl_ext_ro = RB_ROOT; 463 966 spin_lock_init(&bl->bl_ext_lock); 464 - INIT_LIST_HEAD(&bl->bl_extents[0]); 465 - INIT_LIST_HEAD(&bl->bl_extents[1]); 466 - INIT_LIST_HEAD(&bl->bl_commit); 467 - INIT_LIST_HEAD(&bl->bl_committing); 468 - bl->bl_count = 0; 469 - bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT; 470 - BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize); 967 + 471 968 return &bl->bl_layout; 472 969 } 473 970 ··· 474 977 kfree(lseg); 475 978 } 476 979 477 - /* We pretty much ignore lseg, and store all data layout wide, so we 478 - * can correctly merge. 479 - */ 480 - static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo, 481 - struct nfs4_layoutget_res *lgr, 482 - gfp_t gfp_flags) 483 - { 484 - struct pnfs_layout_segment *lseg; 485 - int status; 980 + /* Tracks info needed to ensure extents in layout obey constraints of spec */ 981 + struct layout_verification { 982 + u32 mode; /* R or RW */ 983 + u64 start; /* Expected start of next non-COW extent */ 984 + u64 inval; /* Start of INVAL coverage */ 985 + u64 cowread; /* End of COW read coverage */ 986 + }; 486 987 487 - dprintk("%s enter\n", __func__); 488 - lseg = kzalloc(sizeof(*lseg), gfp_flags); 988 + /* Verify the extent meets the layout requirements of the pnfs-block draft, 989 + * section 2.3.1. 990 + */ 991 + static int verify_extent(struct pnfs_block_extent *be, 992 + struct layout_verification *lv) 993 + { 994 + if (lv->mode == IOMODE_READ) { 995 + if (be->be_state == PNFS_BLOCK_READWRITE_DATA || 996 + be->be_state == PNFS_BLOCK_INVALID_DATA) 997 + return -EIO; 998 + if (be->be_f_offset != lv->start) 999 + return -EIO; 1000 + lv->start += be->be_length; 1001 + return 0; 1002 + } 1003 + /* lv->mode == IOMODE_RW */ 1004 + if (be->be_state == PNFS_BLOCK_READWRITE_DATA) { 1005 + if (be->be_f_offset != lv->start) 1006 + return -EIO; 1007 + if (lv->cowread > lv->start) 1008 + return -EIO; 1009 + lv->start += be->be_length; 1010 + lv->inval = lv->start; 1011 + return 0; 1012 + } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) { 1013 + if (be->be_f_offset != lv->start) 1014 + return -EIO; 1015 + lv->start += be->be_length; 1016 + return 0; 1017 + } else if (be->be_state == PNFS_BLOCK_READ_DATA) { 1018 + if (be->be_f_offset > lv->start) 1019 + return -EIO; 1020 + if (be->be_f_offset < lv->inval) 1021 + return -EIO; 1022 + if (be->be_f_offset < lv->cowread) 1023 + return -EIO; 1024 + /* It looks like you might want to min this with lv->start, 1025 + * but you really don't. 1026 + */ 1027 + lv->inval = lv->inval + be->be_length; 1028 + lv->cowread = be->be_f_offset + be->be_length; 1029 + return 0; 1030 + } else 1031 + return -EIO; 1032 + } 1033 + 1034 + static int decode_sector_number(__be32 **rp, sector_t *sp) 1035 + { 1036 + uint64_t s; 1037 + 1038 + *rp = xdr_decode_hyper(*rp, &s); 1039 + if (s & 0x1ff) { 1040 + printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__); 1041 + return -1; 1042 + } 1043 + *sp = s >> SECTOR_SHIFT; 1044 + return 0; 1045 + } 1046 + 1047 + static int 1048 + bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo, 1049 + struct layout_verification *lv, struct list_head *extents, 1050 + gfp_t gfp_mask) 1051 + { 1052 + struct pnfs_block_extent *be; 1053 + struct nfs4_deviceid id; 1054 + int error; 1055 + __be32 *p; 1056 + 1057 + p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE); 1058 + if (!p) 1059 + return -EIO; 1060 + 1061 + be = kzalloc(sizeof(*be), GFP_NOFS); 1062 + if (!be) 1063 + return -ENOMEM; 1064 + 1065 + memcpy(&id, p, NFS4_DEVICEID4_SIZE); 1066 + p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); 1067 + 1068 + error = -EIO; 1069 + be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id, 1070 + lo->plh_lc_cred, gfp_mask); 1071 + if (!be->be_device) 1072 + goto out_free_be; 1073 + 1074 + /* 1075 + * The next three values are read in as bytes, but stored in the 1076 + * extent structure in 512-byte granularity. 1077 + */ 1078 + if (decode_sector_number(&p, &be->be_f_offset) < 0) 1079 + goto out_put_deviceid; 1080 + if (decode_sector_number(&p, &be->be_length) < 0) 1081 + goto out_put_deviceid; 1082 + if (decode_sector_number(&p, &be->be_v_offset) < 0) 1083 + goto out_put_deviceid; 1084 + be->be_state = be32_to_cpup(p++); 1085 + 1086 + error = verify_extent(be, lv); 1087 + if (error) { 1088 + dprintk("%s: extent verification failed\n", __func__); 1089 + goto out_put_deviceid; 1090 + } 1091 + 1092 + list_add_tail(&be->be_list, extents); 1093 + return 0; 1094 + 1095 + out_put_deviceid: 1096 + nfs4_put_deviceid_node(be->be_device); 1097 + out_free_be: 1098 + kfree(be); 1099 + return error; 1100 + } 1101 + 1102 + static struct pnfs_layout_segment * 1103 + bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr, 1104 + gfp_t gfp_mask) 1105 + { 1106 + struct layout_verification lv = { 1107 + .mode = lgr->range.iomode, 1108 + .start = lgr->range.offset >> SECTOR_SHIFT, 1109 + .inval = lgr->range.offset >> SECTOR_SHIFT, 1110 + .cowread = lgr->range.offset >> SECTOR_SHIFT, 1111 + }; 1112 + struct pnfs_block_layout *bl = BLK_LO2EXT(lo); 1113 + struct pnfs_layout_segment *lseg; 1114 + struct xdr_buf buf; 1115 + struct xdr_stream xdr; 1116 + struct page *scratch; 1117 + int status, i; 1118 + uint32_t count; 1119 + __be32 *p; 1120 + LIST_HEAD(extents); 1121 + 1122 + dprintk("---> %s\n", __func__); 1123 + 1124 + lseg = kzalloc(sizeof(*lseg), gfp_mask); 489 1125 if (!lseg) 490 1126 return ERR_PTR(-ENOMEM); 491 - status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags); 1127 + 1128 + status = -ENOMEM; 1129 + scratch = alloc_page(gfp_mask); 1130 + if (!scratch) 1131 + goto out; 1132 + 1133 + xdr_init_decode_pages(&xdr, &buf, 1134 + lgr->layoutp->pages, lgr->layoutp->len); 1135 + xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE); 1136 + 1137 + status = -EIO; 1138 + p = xdr_inline_decode(&xdr, 4); 1139 + if (unlikely(!p)) 1140 + goto out_free_scratch; 1141 + 1142 + count = be32_to_cpup(p++); 1143 + dprintk("%s: number of extents %d\n", __func__, count); 1144 + 1145 + /* 1146 + * Decode individual extents, putting them in temporary staging area 1147 + * until whole layout is decoded to make error recovery easier. 1148 + */ 1149 + for (i = 0; i < count; i++) { 1150 + status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask); 1151 + if (status) 1152 + goto process_extents; 1153 + } 1154 + 1155 + if (lgr->range.offset + lgr->range.length != 1156 + lv.start << SECTOR_SHIFT) { 1157 + dprintk("%s Final length mismatch\n", __func__); 1158 + status = -EIO; 1159 + goto process_extents; 1160 + } 1161 + 1162 + if (lv.start < lv.cowread) { 1163 + dprintk("%s Final uncovered COW extent\n", __func__); 1164 + status = -EIO; 1165 + } 1166 + 1167 + process_extents: 1168 + while (!list_empty(&extents)) { 1169 + struct pnfs_block_extent *be = 1170 + list_first_entry(&extents, struct pnfs_block_extent, 1171 + be_list); 1172 + list_del(&be->be_list); 1173 + 1174 + if (!status) 1175 + status = ext_tree_insert(bl, be); 1176 + 1177 + if (status) { 1178 + nfs4_put_deviceid_node(be->be_device); 1179 + kfree(be); 1180 + } 1181 + } 1182 + 1183 + out_free_scratch: 1184 + __free_page(scratch); 1185 + out: 1186 + dprintk("%s returns %d\n", __func__, status); 492 1187 if (status) { 493 - /* We don't want to call the full-blown bl_free_lseg, 494 - * since on error extents were not touched. 495 - */ 496 1188 kfree(lseg); 497 1189 return ERR_PTR(status); 498 1190 } ··· 689 1003 } 690 1004 691 1005 static void 692 - bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr, 693 - const struct nfs4_layoutcommit_args *arg) 1006 + bl_return_range(struct pnfs_layout_hdr *lo, 1007 + struct pnfs_layout_range *range) 694 1008 { 695 - dprintk("%s enter\n", __func__); 696 - encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg); 1009 + struct pnfs_block_layout *bl = BLK_LO2EXT(lo); 1010 + sector_t offset = range->offset >> SECTOR_SHIFT, end; 1011 + 1012 + if (range->offset % 8) { 1013 + dprintk("%s: offset %lld not block size aligned\n", 1014 + __func__, range->offset); 1015 + return; 1016 + } 1017 + 1018 + if (range->length != NFS4_MAX_UINT64) { 1019 + if (range->length % 8) { 1020 + dprintk("%s: length %lld not block size aligned\n", 1021 + __func__, range->length); 1022 + return; 1023 + } 1024 + 1025 + end = offset + (range->length >> SECTOR_SHIFT); 1026 + } else { 1027 + end = round_down(NFS4_MAX_UINT64, PAGE_SIZE); 1028 + } 1029 + 1030 + ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end); 1031 + } 1032 + 1033 + static int 1034 + bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg) 1035 + { 1036 + return ext_tree_prepare_commit(arg); 697 1037 } 698 1038 699 1039 static void 700 1040 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata) 701 1041 { 702 - struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout; 703 - 704 - dprintk("%s enter\n", __func__); 705 - clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status); 706 - } 707 - 708 - static void free_blk_mountid(struct block_mount_id *mid) 709 - { 710 - if (mid) { 711 - struct pnfs_block_dev *dev, *tmp; 712 - 713 - /* No need to take bm_lock as we are last user freeing bm_devlist */ 714 - list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) { 715 - list_del(&dev->bm_node); 716 - bl_free_block_dev(dev); 717 - } 718 - kfree(mid); 719 - } 720 - } 721 - 722 - /* This is mostly copied from the filelayout_get_device_info function. 723 - * It seems much of this should be at the generic pnfs level. 724 - */ 725 - static struct pnfs_block_dev * 726 - nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh, 727 - struct nfs4_deviceid *d_id) 728 - { 729 - struct pnfs_device *dev; 730 - struct pnfs_block_dev *rv; 731 - u32 max_resp_sz; 732 - int max_pages; 733 - struct page **pages = NULL; 734 - int i, rc; 735 - 736 - /* 737 - * Use the session max response size as the basis for setting 738 - * GETDEVICEINFO's maxcount 739 - */ 740 - max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 741 - max_pages = nfs_page_array_len(0, max_resp_sz); 742 - dprintk("%s max_resp_sz %u max_pages %d\n", 743 - __func__, max_resp_sz, max_pages); 744 - 745 - dev = kmalloc(sizeof(*dev), GFP_NOFS); 746 - if (!dev) { 747 - dprintk("%s kmalloc failed\n", __func__); 748 - return ERR_PTR(-ENOMEM); 749 - } 750 - 751 - pages = kcalloc(max_pages, sizeof(struct page *), GFP_NOFS); 752 - if (pages == NULL) { 753 - kfree(dev); 754 - return ERR_PTR(-ENOMEM); 755 - } 756 - for (i = 0; i < max_pages; i++) { 757 - pages[i] = alloc_page(GFP_NOFS); 758 - if (!pages[i]) { 759 - rv = ERR_PTR(-ENOMEM); 760 - goto out_free; 761 - } 762 - } 763 - 764 - memcpy(&dev->dev_id, d_id, sizeof(*d_id)); 765 - dev->layout_type = LAYOUT_BLOCK_VOLUME; 766 - dev->pages = pages; 767 - dev->pgbase = 0; 768 - dev->pglen = PAGE_SIZE * max_pages; 769 - dev->mincount = 0; 770 - dev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead; 771 - 772 - dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data); 773 - rc = nfs4_proc_getdeviceinfo(server, dev, NULL); 774 - dprintk("%s getdevice info returns %d\n", __func__, rc); 775 - if (rc) { 776 - rv = ERR_PTR(rc); 777 - goto out_free; 778 - } 779 - 780 - rv = nfs4_blk_decode_device(server, dev); 781 - out_free: 782 - for (i = 0; i < max_pages; i++) 783 - __free_page(pages[i]); 784 - kfree(pages); 785 - kfree(dev); 786 - return rv; 1042 + ext_tree_mark_committed(&lcdata->args, lcdata->res.status); 787 1043 } 788 1044 789 1045 static int 790 1046 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh) 791 1047 { 792 - struct block_mount_id *b_mt_id = NULL; 793 - struct pnfs_devicelist *dlist = NULL; 794 - struct pnfs_block_dev *bdev; 795 - LIST_HEAD(block_disklist); 796 - int status, i; 797 - 798 1048 dprintk("%s enter\n", __func__); 799 1049 800 1050 if (server->pnfs_blksize == 0) { 801 1051 dprintk("%s Server did not return blksize\n", __func__); 802 1052 return -EINVAL; 803 1053 } 804 - b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS); 805 - if (!b_mt_id) { 806 - status = -ENOMEM; 807 - goto out_error; 1054 + if (server->pnfs_blksize > PAGE_SIZE) { 1055 + printk(KERN_ERR "%s: pNFS blksize %d not supported.\n", 1056 + __func__, server->pnfs_blksize); 1057 + return -EINVAL; 808 1058 } 809 - /* Initialize nfs4 block layout mount id */ 810 - spin_lock_init(&b_mt_id->bm_lock); 811 - INIT_LIST_HEAD(&b_mt_id->bm_devlist); 812 1059 813 - dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS); 814 - if (!dlist) { 815 - status = -ENOMEM; 816 - goto out_error; 817 - } 818 - dlist->eof = 0; 819 - while (!dlist->eof) { 820 - status = nfs4_proc_getdevicelist(server, fh, dlist); 821 - if (status) 822 - goto out_error; 823 - dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n", 824 - __func__, dlist->num_devs, dlist->eof); 825 - for (i = 0; i < dlist->num_devs; i++) { 826 - bdev = nfs4_blk_get_deviceinfo(server, fh, 827 - &dlist->dev_id[i]); 828 - if (IS_ERR(bdev)) { 829 - status = PTR_ERR(bdev); 830 - goto out_error; 831 - } 832 - spin_lock(&b_mt_id->bm_lock); 833 - list_add(&bdev->bm_node, &b_mt_id->bm_devlist); 834 - spin_unlock(&b_mt_id->bm_lock); 835 - } 836 - } 837 - dprintk("%s SUCCESS\n", __func__); 838 - server->pnfs_ld_data = b_mt_id; 839 - 840 - out_return: 841 - kfree(dlist); 842 - return status; 843 - 844 - out_error: 845 - free_blk_mountid(b_mt_id); 846 - goto out_return; 847 - } 848 - 849 - static int 850 - bl_clear_layoutdriver(struct nfs_server *server) 851 - { 852 - struct block_mount_id *b_mt_id = server->pnfs_ld_data; 853 - 854 - dprintk("%s enter\n", __func__); 855 - free_blk_mountid(b_mt_id); 856 - dprintk("%s RETURNS\n", __func__); 857 1060 return 0; 858 1061 } 859 1062 860 1063 static bool 861 - is_aligned_req(struct nfs_page *req, unsigned int alignment) 1064 + is_aligned_req(struct nfs_pageio_descriptor *pgio, 1065 + struct nfs_page *req, unsigned int alignment) 862 1066 { 863 - return IS_ALIGNED(req->wb_offset, alignment) && 864 - IS_ALIGNED(req->wb_bytes, alignment); 1067 + /* 1068 + * Always accept buffered writes, higher layers take care of the 1069 + * right alignment. 1070 + */ 1071 + if (pgio->pg_dreq == NULL) 1072 + return true; 1073 + 1074 + if (!IS_ALIGNED(req->wb_offset, alignment)) 1075 + return false; 1076 + 1077 + if (IS_ALIGNED(req->wb_bytes, alignment)) 1078 + return true; 1079 + 1080 + if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) { 1081 + /* 1082 + * If the write goes up to the inode size, just write 1083 + * the full page. Data past the inode size is 1084 + * guaranteed to be zeroed by the higher level client 1085 + * code, and this behaviour is mandated by RFC 5663 1086 + * section 2.3.2. 1087 + */ 1088 + return true; 1089 + } 1090 + 1091 + return false; 865 1092 } 866 1093 867 1094 static void 868 1095 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 869 1096 { 870 - if (pgio->pg_dreq != NULL && 871 - !is_aligned_req(req, SECTOR_SIZE)) 1097 + if (!is_aligned_req(pgio, req, SECTOR_SIZE)) { 872 1098 nfs_pageio_reset_read_mds(pgio); 873 - else 874 - pnfs_generic_pg_init_read(pgio, req); 1099 + return; 1100 + } 1101 + 1102 + pnfs_generic_pg_init_read(pgio, req); 875 1103 } 876 1104 877 1105 /* ··· 796 1196 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, 797 1197 struct nfs_page *req) 798 1198 { 799 - if (pgio->pg_dreq != NULL && 800 - !is_aligned_req(req, SECTOR_SIZE)) 1199 + if (!is_aligned_req(pgio, req, SECTOR_SIZE)) 801 1200 return 0; 802 - 803 1201 return pnfs_generic_pg_test(pgio, prev, req); 804 1202 } 805 1203 ··· 827 1229 static void 828 1230 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 829 1231 { 830 - if (pgio->pg_dreq != NULL && 831 - !is_aligned_req(req, PAGE_CACHE_SIZE)) { 832 - nfs_pageio_reset_write_mds(pgio); 833 - } else { 834 - u64 wb_size; 835 - if (pgio->pg_dreq == NULL) 836 - wb_size = pnfs_num_cont_bytes(pgio->pg_inode, 837 - req->wb_index); 838 - else 839 - wb_size = nfs_dreq_bytes_left(pgio->pg_dreq); 1232 + u64 wb_size; 840 1233 841 - pnfs_generic_pg_init_write(pgio, req, wb_size); 1234 + if (!is_aligned_req(pgio, req, PAGE_SIZE)) { 1235 + nfs_pageio_reset_write_mds(pgio); 1236 + return; 842 1237 } 1238 + 1239 + if (pgio->pg_dreq == NULL) 1240 + wb_size = pnfs_num_cont_bytes(pgio->pg_inode, 1241 + req->wb_index); 1242 + else 1243 + wb_size = nfs_dreq_bytes_left(pgio->pg_dreq); 1244 + 1245 + pnfs_generic_pg_init_write(pgio, req, wb_size); 843 1246 } 844 1247 845 1248 /* ··· 851 1252 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, 852 1253 struct nfs_page *req) 853 1254 { 854 - if (pgio->pg_dreq != NULL && 855 - !is_aligned_req(req, PAGE_CACHE_SIZE)) 1255 + if (!is_aligned_req(pgio, req, PAGE_SIZE)) 856 1256 return 0; 857 - 858 1257 return pnfs_generic_pg_test(pgio, prev, req); 859 1258 } 860 1259 ··· 872 1275 .id = LAYOUT_BLOCK_VOLUME, 873 1276 .name = "LAYOUT_BLOCK_VOLUME", 874 1277 .owner = THIS_MODULE, 1278 + .flags = PNFS_LAYOUTRET_ON_SETATTR | 1279 + PNFS_READ_WHOLE_PAGE, 875 1280 .read_pagelist = bl_read_pagelist, 876 1281 .write_pagelist = bl_write_pagelist, 877 1282 .alloc_layout_hdr = bl_alloc_layout_hdr, 878 1283 .free_layout_hdr = bl_free_layout_hdr, 879 1284 .alloc_lseg = bl_alloc_lseg, 880 1285 .free_lseg = bl_free_lseg, 881 - .encode_layoutcommit = bl_encode_layoutcommit, 1286 + .return_range = bl_return_range, 1287 + .prepare_layoutcommit = bl_prepare_layoutcommit, 882 1288 .cleanup_layoutcommit = bl_cleanup_layoutcommit, 883 1289 .set_layoutdriver = bl_set_layoutdriver, 884 - .clear_layoutdriver = bl_clear_layoutdriver, 1290 + .alloc_deviceid_node = bl_alloc_deviceid_node, 1291 + .free_deviceid_node = bl_free_deviceid_node, 885 1292 .pg_read_ops = &bl_pg_read_ops, 886 1293 .pg_write_ops = &bl_pg_write_ops, 887 - }; 888 - 889 - static const struct rpc_pipe_ops bl_upcall_ops = { 890 - .upcall = rpc_pipe_generic_upcall, 891 - .downcall = bl_pipe_downcall, 892 - .destroy_msg = bl_pipe_destroy_msg, 893 - }; 894 - 895 - static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb, 896 - struct rpc_pipe *pipe) 897 - { 898 - struct dentry *dir, *dentry; 899 - 900 - dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME); 901 - if (dir == NULL) 902 - return ERR_PTR(-ENOENT); 903 - dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe); 904 - dput(dir); 905 - return dentry; 906 - } 907 - 908 - static void nfs4blocklayout_unregister_sb(struct super_block *sb, 909 - struct rpc_pipe *pipe) 910 - { 911 - if (pipe->dentry) 912 - rpc_unlink(pipe->dentry); 913 - } 914 - 915 - static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, 916 - void *ptr) 917 - { 918 - struct super_block *sb = ptr; 919 - struct net *net = sb->s_fs_info; 920 - struct nfs_net *nn = net_generic(net, nfs_net_id); 921 - struct dentry *dentry; 922 - int ret = 0; 923 - 924 - if (!try_module_get(THIS_MODULE)) 925 - return 0; 926 - 927 - if (nn->bl_device_pipe == NULL) { 928 - module_put(THIS_MODULE); 929 - return 0; 930 - } 931 - 932 - switch (event) { 933 - case RPC_PIPEFS_MOUNT: 934 - dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe); 935 - if (IS_ERR(dentry)) { 936 - ret = PTR_ERR(dentry); 937 - break; 938 - } 939 - nn->bl_device_pipe->dentry = dentry; 940 - break; 941 - case RPC_PIPEFS_UMOUNT: 942 - if (nn->bl_device_pipe->dentry) 943 - nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe); 944 - break; 945 - default: 946 - ret = -ENOTSUPP; 947 - break; 948 - } 949 - module_put(THIS_MODULE); 950 - return ret; 951 - } 952 - 953 - static struct notifier_block nfs4blocklayout_block = { 954 - .notifier_call = rpc_pipefs_event, 955 - }; 956 - 957 - static struct dentry *nfs4blocklayout_register_net(struct net *net, 958 - struct rpc_pipe *pipe) 959 - { 960 - struct super_block *pipefs_sb; 961 - struct dentry *dentry; 962 - 963 - pipefs_sb = rpc_get_sb_net(net); 964 - if (!pipefs_sb) 965 - return NULL; 966 - dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe); 967 - rpc_put_sb_net(net); 968 - return dentry; 969 - } 970 - 971 - static void nfs4blocklayout_unregister_net(struct net *net, 972 - struct rpc_pipe *pipe) 973 - { 974 - struct super_block *pipefs_sb; 975 - 976 - pipefs_sb = rpc_get_sb_net(net); 977 - if (pipefs_sb) { 978 - nfs4blocklayout_unregister_sb(pipefs_sb, pipe); 979 - rpc_put_sb_net(net); 980 - } 981 - } 982 - 983 - static int nfs4blocklayout_net_init(struct net *net) 984 - { 985 - struct nfs_net *nn = net_generic(net, nfs_net_id); 986 - struct dentry *dentry; 987 - 988 - init_waitqueue_head(&nn->bl_wq); 989 - nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); 990 - if (IS_ERR(nn->bl_device_pipe)) 991 - return PTR_ERR(nn->bl_device_pipe); 992 - dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe); 993 - if (IS_ERR(dentry)) { 994 - rpc_destroy_pipe_data(nn->bl_device_pipe); 995 - return PTR_ERR(dentry); 996 - } 997 - nn->bl_device_pipe->dentry = dentry; 998 - return 0; 999 - } 1000 - 1001 - static void nfs4blocklayout_net_exit(struct net *net) 1002 - { 1003 - struct nfs_net *nn = net_generic(net, nfs_net_id); 1004 - 1005 - nfs4blocklayout_unregister_net(net, nn->bl_device_pipe); 1006 - rpc_destroy_pipe_data(nn->bl_device_pipe); 1007 - nn->bl_device_pipe = NULL; 1008 - } 1009 - 1010 - static struct pernet_operations nfs4blocklayout_net_ops = { 1011 - .init = nfs4blocklayout_net_init, 1012 - .exit = nfs4blocklayout_net_exit, 1013 1294 }; 1014 1295 1015 1296 static int __init nfs4blocklayout_init(void) ··· 899 1424 ret = pnfs_register_layoutdriver(&blocklayout_type); 900 1425 if (ret) 901 1426 goto out; 902 - 903 - ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block); 1427 + ret = bl_init_pipefs(); 904 1428 if (ret) 905 - goto out_remove; 906 - ret = register_pernet_subsys(&nfs4blocklayout_net_ops); 907 - if (ret) 908 - goto out_notifier; 909 - out: 910 - return ret; 1429 + goto out_unregister; 1430 + return 0; 911 1431 912 - out_notifier: 913 - rpc_pipefs_notifier_unregister(&nfs4blocklayout_block); 914 - out_remove: 1432 + out_unregister: 915 1433 pnfs_unregister_layoutdriver(&blocklayout_type); 1434 + out: 916 1435 return ret; 917 1436 } 918 1437 ··· 915 1446 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n", 916 1447 __func__); 917 1448 918 - rpc_pipefs_notifier_unregister(&nfs4blocklayout_block); 919 - unregister_pernet_subsys(&nfs4blocklayout_net_ops); 1449 + bl_cleanup_pipefs(); 920 1450 pnfs_unregister_layoutdriver(&blocklayout_type); 921 1451 } 922 1452
+101 -108
fs/nfs/blocklayout/blocklayout.h
··· 44 44 #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) 45 45 #define SECTOR_SIZE (1 << SECTOR_SHIFT) 46 46 47 - struct block_mount_id { 48 - spinlock_t bm_lock; /* protects list */ 49 - struct list_head bm_devlist; /* holds pnfs_block_dev */ 47 + struct pnfs_block_dev; 48 + 49 + enum pnfs_block_volume_type { 50 + PNFS_BLOCK_VOLUME_SIMPLE = 0, 51 + PNFS_BLOCK_VOLUME_SLICE = 1, 52 + PNFS_BLOCK_VOLUME_CONCAT = 2, 53 + PNFS_BLOCK_VOLUME_STRIPE = 3, 54 + }; 55 + 56 + #define PNFS_BLOCK_MAX_UUIDS 4 57 + #define PNFS_BLOCK_MAX_DEVICES 64 58 + 59 + /* 60 + * Random upper cap for the uuid length to avoid unbounded allocation. 61 + * Not actually limited by the protocol. 62 + */ 63 + #define PNFS_BLOCK_UUID_LEN 128 64 + 65 + 66 + struct pnfs_block_volume { 67 + enum pnfs_block_volume_type type; 68 + union { 69 + struct { 70 + int len; 71 + int nr_sigs; 72 + struct { 73 + u64 offset; 74 + u32 sig_len; 75 + u8 sig[PNFS_BLOCK_UUID_LEN]; 76 + } sigs[PNFS_BLOCK_MAX_UUIDS]; 77 + } simple; 78 + struct { 79 + u64 start; 80 + u64 len; 81 + u32 volume; 82 + } slice; 83 + struct { 84 + u32 volumes_count; 85 + u32 volumes[PNFS_BLOCK_MAX_DEVICES]; 86 + } concat; 87 + struct { 88 + u64 chunk_size; 89 + u32 volumes_count; 90 + u32 volumes[PNFS_BLOCK_MAX_DEVICES]; 91 + } stripe; 92 + }; 93 + }; 94 + 95 + struct pnfs_block_dev_map { 96 + sector_t start; 97 + sector_t len; 98 + 99 + sector_t disk_offset; 100 + struct block_device *bdev; 50 101 }; 51 102 52 103 struct pnfs_block_dev { 53 - struct list_head bm_node; 54 - struct nfs4_deviceid bm_mdevid; /* associated devid */ 55 - struct block_device *bm_mdev; /* meta device itself */ 56 - struct net *net; 104 + struct nfs4_deviceid_node node; 105 + 106 + u64 start; 107 + u64 len; 108 + 109 + u32 nr_children; 110 + struct pnfs_block_dev *children; 111 + u64 chunk_size; 112 + 113 + struct block_device *bdev; 114 + u64 disk_offset; 115 + 116 + bool (*map)(struct pnfs_block_dev *dev, u64 offset, 117 + struct pnfs_block_dev_map *map); 57 118 }; 58 119 59 120 enum exstate4 { ··· 124 63 PNFS_BLOCK_NONE_DATA = 3 /* unmapped, it's a hole */ 125 64 }; 126 65 127 - #define MY_MAX_TAGS (15) /* tag bitnums used must be less than this */ 128 - 129 - struct my_tree { 130 - sector_t mtt_step_size; /* Internal sector alignment */ 131 - struct list_head mtt_stub; /* Should be a radix tree */ 132 - }; 133 - 134 - struct pnfs_inval_markings { 135 - spinlock_t im_lock; 136 - struct my_tree im_tree; /* Sectors that need LAYOUTCOMMIT */ 137 - sector_t im_block_size; /* Server blocksize in sectors */ 138 - struct list_head im_extents; /* Short extents for INVAL->RW conversion */ 139 - }; 140 - 141 - struct pnfs_inval_tracking { 142 - struct list_head it_link; 143 - int it_sector; 144 - int it_tags; 145 - }; 146 - 147 66 /* sector_t fields are all in 512-byte sectors */ 148 67 struct pnfs_block_extent { 149 - struct kref be_refcnt; 150 - struct list_head be_node; /* link into lseg list */ 151 - struct nfs4_deviceid be_devid; /* FIXME: could use device cache instead */ 152 - struct block_device *be_mdev; 68 + union { 69 + struct rb_node be_node; 70 + struct list_head be_list; 71 + }; 72 + struct nfs4_deviceid_node *be_device; 153 73 sector_t be_f_offset; /* the starting offset in the file */ 154 74 sector_t be_length; /* the size of the extent */ 155 75 sector_t be_v_offset; /* the starting offset in the volume */ 156 76 enum exstate4 be_state; /* the state of this extent */ 157 - struct pnfs_inval_markings *be_inval; /* tracks INVAL->RW transition */ 77 + #define EXTENT_WRITTEN 1 78 + #define EXTENT_COMMITTING 2 79 + unsigned int be_tag; 158 80 }; 159 81 160 - /* Shortened extent used by LAYOUTCOMMIT */ 161 - struct pnfs_block_short_extent { 162 - struct list_head bse_node; 163 - struct nfs4_deviceid bse_devid; 164 - struct block_device *bse_mdev; 165 - sector_t bse_f_offset; /* the starting offset in the file */ 166 - sector_t bse_length; /* the size of the extent */ 167 - }; 168 - 169 - static inline void 170 - BL_INIT_INVAL_MARKS(struct pnfs_inval_markings *marks, sector_t blocksize) 171 - { 172 - spin_lock_init(&marks->im_lock); 173 - INIT_LIST_HEAD(&marks->im_tree.mtt_stub); 174 - INIT_LIST_HEAD(&marks->im_extents); 175 - marks->im_block_size = blocksize; 176 - marks->im_tree.mtt_step_size = min((sector_t)PAGE_CACHE_SECTORS, 177 - blocksize); 178 - } 179 - 180 - enum extentclass4 { 181 - RW_EXTENT = 0, /* READWRTE and INVAL */ 182 - RO_EXTENT = 1, /* READ and NONE */ 183 - EXTENT_LISTS = 2, 184 - }; 185 - 186 - static inline int bl_choose_list(enum exstate4 state) 187 - { 188 - if (state == PNFS_BLOCK_READ_DATA || state == PNFS_BLOCK_NONE_DATA) 189 - return RO_EXTENT; 190 - else 191 - return RW_EXTENT; 192 - } 82 + /* on the wire size of the extent */ 83 + #define BL_EXTENT_SIZE (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE) 193 84 194 85 struct pnfs_block_layout { 195 - struct pnfs_layout_hdr bl_layout; 196 - struct pnfs_inval_markings bl_inval; /* tracks INVAL->RW transition */ 86 + struct pnfs_layout_hdr bl_layout; 87 + struct rb_root bl_ext_rw; 88 + struct rb_root bl_ext_ro; 197 89 spinlock_t bl_ext_lock; /* Protects list manipulation */ 198 - struct list_head bl_extents[EXTENT_LISTS]; /* R and RW extents */ 199 - struct list_head bl_commit; /* Needs layout commit */ 200 - struct list_head bl_committing; /* Layout committing */ 201 - unsigned int bl_count; /* entries in bl_commit */ 202 - sector_t bl_blocksize; /* Server blocksize in sectors */ 203 90 }; 204 - 205 - #define BLK_ID(lo) ((struct block_mount_id *)(NFS_SERVER(lo->plh_inode)->pnfs_ld_data)) 206 91 207 92 static inline struct pnfs_block_layout * 208 93 BLK_LO2EXT(struct pnfs_layout_hdr *lo) ··· 178 171 #define BL_DEVICE_REQUEST_PROC 0x1 /* User level process succeeds */ 179 172 #define BL_DEVICE_REQUEST_ERR 0x2 /* User level process fails */ 180 173 181 - /* blocklayoutdev.c */ 182 - ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t); 183 - void bl_pipe_destroy_msg(struct rpc_pipe_msg *); 184 - void nfs4_blkdev_put(struct block_device *bdev); 185 - struct pnfs_block_dev *nfs4_blk_decode_device(struct nfs_server *server, 186 - struct pnfs_device *dev); 187 - int nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo, 188 - struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); 174 + /* dev.c */ 175 + struct nfs4_deviceid_node *bl_alloc_deviceid_node(struct nfs_server *server, 176 + struct pnfs_device *pdev, gfp_t gfp_mask); 177 + void bl_free_deviceid_node(struct nfs4_deviceid_node *d); 189 178 190 - /* blocklayoutdm.c */ 191 - void bl_free_block_dev(struct pnfs_block_dev *bdev); 179 + /* extent_tree.c */ 180 + int ext_tree_insert(struct pnfs_block_layout *bl, 181 + struct pnfs_block_extent *new); 182 + int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start, 183 + sector_t end); 184 + int ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start, 185 + sector_t len); 186 + bool ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect, 187 + struct pnfs_block_extent *ret, bool rw); 188 + int ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg); 189 + void ext_tree_mark_committed(struct nfs4_layoutcommit_args *arg, int status); 192 190 193 - /* extents.c */ 194 - struct pnfs_block_extent * 195 - bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect, 196 - struct pnfs_block_extent **cow_read); 197 - int bl_mark_sectors_init(struct pnfs_inval_markings *marks, 198 - sector_t offset, sector_t length); 199 - void bl_put_extent(struct pnfs_block_extent *be); 200 - struct pnfs_block_extent *bl_alloc_extent(void); 201 - int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect); 202 - int encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, 203 - struct xdr_stream *xdr, 204 - const struct nfs4_layoutcommit_args *arg); 205 - void clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, 206 - const struct nfs4_layoutcommit_args *arg, 207 - int status); 208 - int bl_add_merge_extent(struct pnfs_block_layout *bl, 209 - struct pnfs_block_extent *new); 210 - int bl_mark_for_commit(struct pnfs_block_extent *be, 211 - sector_t offset, sector_t length, 212 - struct pnfs_block_short_extent *new); 213 - int bl_push_one_short_extent(struct pnfs_inval_markings *marks); 214 - struct pnfs_block_short_extent * 215 - bl_pop_one_short_extent(struct pnfs_inval_markings *marks); 216 - void bl_free_short_extents(struct pnfs_inval_markings *marks, int num_to_free); 191 + /* rpc_pipefs.c */ 192 + dev_t bl_resolve_deviceid(struct nfs_server *server, 193 + struct pnfs_block_volume *b, gfp_t gfp_mask); 194 + int __init bl_init_pipefs(void); 195 + void __exit bl_cleanup_pipefs(void); 217 196 218 197 #endif /* FS_NFS_NFS4BLOCKLAYOUT_H */
-384
fs/nfs/blocklayout/blocklayoutdev.c
··· 1 - /* 2 - * linux/fs/nfs/blocklayout/blocklayoutdev.c 3 - * 4 - * Device operations for the pnfs nfs4 file layout driver. 5 - * 6 - * Copyright (c) 2006 The Regents of the University of Michigan. 7 - * All rights reserved. 8 - * 9 - * Andy Adamson <andros@citi.umich.edu> 10 - * Fred Isaman <iisaman@umich.edu> 11 - * 12 - * permission is granted to use, copy, create derivative works and 13 - * redistribute this software and such derivative works for any purpose, 14 - * so long as the name of the university of michigan is not used in 15 - * any advertising or publicity pertaining to the use or distribution 16 - * of this software without specific, written prior authorization. if 17 - * the above copyright notice or any other identification of the 18 - * university of michigan is included in any copy of any portion of 19 - * this software, then the disclaimer below must also be included. 20 - * 21 - * this software is provided as is, without representation from the 22 - * university of michigan as to its fitness for any purpose, and without 23 - * warranty by the university of michigan of any kind, either express 24 - * or implied, including without limitation the implied warranties of 25 - * merchantability and fitness for a particular purpose. the regents 26 - * of the university of michigan shall not be liable for any damages, 27 - * including special, indirect, incidental, or consequential damages, 28 - * with respect to any claim arising out or in connection with the use 29 - * of the software, even if it has been or is hereafter advised of the 30 - * possibility of such damages. 31 - */ 32 - #include <linux/module.h> 33 - #include <linux/buffer_head.h> /* __bread */ 34 - 35 - #include <linux/genhd.h> 36 - #include <linux/blkdev.h> 37 - #include <linux/hash.h> 38 - 39 - #include "blocklayout.h" 40 - 41 - #define NFSDBG_FACILITY NFSDBG_PNFS_LD 42 - 43 - static int decode_sector_number(__be32 **rp, sector_t *sp) 44 - { 45 - uint64_t s; 46 - 47 - *rp = xdr_decode_hyper(*rp, &s); 48 - if (s & 0x1ff) { 49 - printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__); 50 - return -1; 51 - } 52 - *sp = s >> SECTOR_SHIFT; 53 - return 0; 54 - } 55 - 56 - /* 57 - * Release the block device 58 - */ 59 - void nfs4_blkdev_put(struct block_device *bdev) 60 - { 61 - dprintk("%s for device %d:%d\n", __func__, MAJOR(bdev->bd_dev), 62 - MINOR(bdev->bd_dev)); 63 - blkdev_put(bdev, FMODE_READ); 64 - } 65 - 66 - ssize_t bl_pipe_downcall(struct file *filp, const char __user *src, 67 - size_t mlen) 68 - { 69 - struct nfs_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info, 70 - nfs_net_id); 71 - 72 - if (mlen != sizeof (struct bl_dev_msg)) 73 - return -EINVAL; 74 - 75 - if (copy_from_user(&nn->bl_mount_reply, src, mlen) != 0) 76 - return -EFAULT; 77 - 78 - wake_up(&nn->bl_wq); 79 - 80 - return mlen; 81 - } 82 - 83 - void bl_pipe_destroy_msg(struct rpc_pipe_msg *msg) 84 - { 85 - struct bl_pipe_msg *bl_pipe_msg = container_of(msg, struct bl_pipe_msg, msg); 86 - 87 - if (msg->errno >= 0) 88 - return; 89 - wake_up(bl_pipe_msg->bl_wq); 90 - } 91 - 92 - /* 93 - * Decodes pnfs_block_deviceaddr4 which is XDR encoded in dev->dev_addr_buf. 94 - */ 95 - struct pnfs_block_dev * 96 - nfs4_blk_decode_device(struct nfs_server *server, 97 - struct pnfs_device *dev) 98 - { 99 - struct pnfs_block_dev *rv; 100 - struct block_device *bd = NULL; 101 - struct bl_pipe_msg bl_pipe_msg; 102 - struct rpc_pipe_msg *msg = &bl_pipe_msg.msg; 103 - struct bl_msg_hdr bl_msg = { 104 - .type = BL_DEVICE_MOUNT, 105 - .totallen = dev->mincount, 106 - }; 107 - uint8_t *dataptr; 108 - DECLARE_WAITQUEUE(wq, current); 109 - int offset, len, i, rc; 110 - struct net *net = server->nfs_client->cl_net; 111 - struct nfs_net *nn = net_generic(net, nfs_net_id); 112 - struct bl_dev_msg *reply = &nn->bl_mount_reply; 113 - 114 - dprintk("%s CREATING PIPEFS MESSAGE\n", __func__); 115 - dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data, 116 - dev->mincount); 117 - 118 - bl_pipe_msg.bl_wq = &nn->bl_wq; 119 - memset(msg, 0, sizeof(*msg)); 120 - msg->data = kzalloc(sizeof(bl_msg) + dev->mincount, GFP_NOFS); 121 - if (!msg->data) { 122 - rv = ERR_PTR(-ENOMEM); 123 - goto out; 124 - } 125 - 126 - memcpy(msg->data, &bl_msg, sizeof(bl_msg)); 127 - dataptr = (uint8_t *) msg->data; 128 - len = dev->mincount; 129 - offset = sizeof(bl_msg); 130 - for (i = 0; len > 0; i++) { 131 - memcpy(&dataptr[offset], page_address(dev->pages[i]), 132 - len < PAGE_CACHE_SIZE ? len : PAGE_CACHE_SIZE); 133 - len -= PAGE_CACHE_SIZE; 134 - offset += PAGE_CACHE_SIZE; 135 - } 136 - msg->len = sizeof(bl_msg) + dev->mincount; 137 - 138 - dprintk("%s CALLING USERSPACE DAEMON\n", __func__); 139 - add_wait_queue(&nn->bl_wq, &wq); 140 - rc = rpc_queue_upcall(nn->bl_device_pipe, msg); 141 - if (rc < 0) { 142 - remove_wait_queue(&nn->bl_wq, &wq); 143 - rv = ERR_PTR(rc); 144 - goto out; 145 - } 146 - 147 - set_current_state(TASK_UNINTERRUPTIBLE); 148 - schedule(); 149 - __set_current_state(TASK_RUNNING); 150 - remove_wait_queue(&nn->bl_wq, &wq); 151 - 152 - if (reply->status != BL_DEVICE_REQUEST_PROC) { 153 - dprintk("%s failed to open device: %d\n", 154 - __func__, reply->status); 155 - rv = ERR_PTR(-EINVAL); 156 - goto out; 157 - } 158 - 159 - bd = blkdev_get_by_dev(MKDEV(reply->major, reply->minor), 160 - FMODE_READ, NULL); 161 - if (IS_ERR(bd)) { 162 - dprintk("%s failed to open device : %ld\n", __func__, 163 - PTR_ERR(bd)); 164 - rv = ERR_CAST(bd); 165 - goto out; 166 - } 167 - 168 - rv = kzalloc(sizeof(*rv), GFP_NOFS); 169 - if (!rv) { 170 - rv = ERR_PTR(-ENOMEM); 171 - goto out; 172 - } 173 - 174 - rv->bm_mdev = bd; 175 - memcpy(&rv->bm_mdevid, &dev->dev_id, sizeof(struct nfs4_deviceid)); 176 - rv->net = net; 177 - dprintk("%s Created device %s with bd_block_size %u\n", 178 - __func__, 179 - bd->bd_disk->disk_name, 180 - bd->bd_block_size); 181 - 182 - out: 183 - kfree(msg->data); 184 - return rv; 185 - } 186 - 187 - /* Map deviceid returned by the server to constructed block_device */ 188 - static struct block_device *translate_devid(struct pnfs_layout_hdr *lo, 189 - struct nfs4_deviceid *id) 190 - { 191 - struct block_device *rv = NULL; 192 - struct block_mount_id *mid; 193 - struct pnfs_block_dev *dev; 194 - 195 - dprintk("%s enter, lo=%p, id=%p\n", __func__, lo, id); 196 - mid = BLK_ID(lo); 197 - spin_lock(&mid->bm_lock); 198 - list_for_each_entry(dev, &mid->bm_devlist, bm_node) { 199 - if (memcmp(id->data, dev->bm_mdevid.data, 200 - NFS4_DEVICEID4_SIZE) == 0) { 201 - rv = dev->bm_mdev; 202 - goto out; 203 - } 204 - } 205 - out: 206 - spin_unlock(&mid->bm_lock); 207 - dprintk("%s returning %p\n", __func__, rv); 208 - return rv; 209 - } 210 - 211 - /* Tracks info needed to ensure extents in layout obey constraints of spec */ 212 - struct layout_verification { 213 - u32 mode; /* R or RW */ 214 - u64 start; /* Expected start of next non-COW extent */ 215 - u64 inval; /* Start of INVAL coverage */ 216 - u64 cowread; /* End of COW read coverage */ 217 - }; 218 - 219 - /* Verify the extent meets the layout requirements of the pnfs-block draft, 220 - * section 2.3.1. 221 - */ 222 - static int verify_extent(struct pnfs_block_extent *be, 223 - struct layout_verification *lv) 224 - { 225 - if (lv->mode == IOMODE_READ) { 226 - if (be->be_state == PNFS_BLOCK_READWRITE_DATA || 227 - be->be_state == PNFS_BLOCK_INVALID_DATA) 228 - return -EIO; 229 - if (be->be_f_offset != lv->start) 230 - return -EIO; 231 - lv->start += be->be_length; 232 - return 0; 233 - } 234 - /* lv->mode == IOMODE_RW */ 235 - if (be->be_state == PNFS_BLOCK_READWRITE_DATA) { 236 - if (be->be_f_offset != lv->start) 237 - return -EIO; 238 - if (lv->cowread > lv->start) 239 - return -EIO; 240 - lv->start += be->be_length; 241 - lv->inval = lv->start; 242 - return 0; 243 - } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) { 244 - if (be->be_f_offset != lv->start) 245 - return -EIO; 246 - lv->start += be->be_length; 247 - return 0; 248 - } else if (be->be_state == PNFS_BLOCK_READ_DATA) { 249 - if (be->be_f_offset > lv->start) 250 - return -EIO; 251 - if (be->be_f_offset < lv->inval) 252 - return -EIO; 253 - if (be->be_f_offset < lv->cowread) 254 - return -EIO; 255 - /* It looks like you might want to min this with lv->start, 256 - * but you really don't. 257 - */ 258 - lv->inval = lv->inval + be->be_length; 259 - lv->cowread = be->be_f_offset + be->be_length; 260 - return 0; 261 - } else 262 - return -EIO; 263 - } 264 - 265 - /* XDR decode pnfs_block_layout4 structure */ 266 - int 267 - nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo, 268 - struct nfs4_layoutget_res *lgr, gfp_t gfp_flags) 269 - { 270 - struct pnfs_block_layout *bl = BLK_LO2EXT(lo); 271 - int i, status = -EIO; 272 - uint32_t count; 273 - struct pnfs_block_extent *be = NULL, *save; 274 - struct xdr_stream stream; 275 - struct xdr_buf buf; 276 - struct page *scratch; 277 - __be32 *p; 278 - struct layout_verification lv = { 279 - .mode = lgr->range.iomode, 280 - .start = lgr->range.offset >> SECTOR_SHIFT, 281 - .inval = lgr->range.offset >> SECTOR_SHIFT, 282 - .cowread = lgr->range.offset >> SECTOR_SHIFT, 283 - }; 284 - LIST_HEAD(extents); 285 - 286 - dprintk("---> %s\n", __func__); 287 - 288 - scratch = alloc_page(gfp_flags); 289 - if (!scratch) 290 - return -ENOMEM; 291 - 292 - xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len); 293 - xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); 294 - 295 - p = xdr_inline_decode(&stream, 4); 296 - if (unlikely(!p)) 297 - goto out_err; 298 - 299 - count = be32_to_cpup(p++); 300 - 301 - dprintk("%s enter, number of extents %i\n", __func__, count); 302 - p = xdr_inline_decode(&stream, (28 + NFS4_DEVICEID4_SIZE) * count); 303 - if (unlikely(!p)) 304 - goto out_err; 305 - 306 - /* Decode individual extents, putting them in temporary 307 - * staging area until whole layout is decoded to make error 308 - * recovery easier. 309 - */ 310 - for (i = 0; i < count; i++) { 311 - be = bl_alloc_extent(); 312 - if (!be) { 313 - status = -ENOMEM; 314 - goto out_err; 315 - } 316 - memcpy(&be->be_devid, p, NFS4_DEVICEID4_SIZE); 317 - p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); 318 - be->be_mdev = translate_devid(lo, &be->be_devid); 319 - if (!be->be_mdev) 320 - goto out_err; 321 - 322 - /* The next three values are read in as bytes, 323 - * but stored as 512-byte sector lengths 324 - */ 325 - if (decode_sector_number(&p, &be->be_f_offset) < 0) 326 - goto out_err; 327 - if (decode_sector_number(&p, &be->be_length) < 0) 328 - goto out_err; 329 - if (decode_sector_number(&p, &be->be_v_offset) < 0) 330 - goto out_err; 331 - be->be_state = be32_to_cpup(p++); 332 - if (be->be_state == PNFS_BLOCK_INVALID_DATA) 333 - be->be_inval = &bl->bl_inval; 334 - if (verify_extent(be, &lv)) { 335 - dprintk("%s verify failed\n", __func__); 336 - goto out_err; 337 - } 338 - list_add_tail(&be->be_node, &extents); 339 - } 340 - if (lgr->range.offset + lgr->range.length != 341 - lv.start << SECTOR_SHIFT) { 342 - dprintk("%s Final length mismatch\n", __func__); 343 - be = NULL; 344 - goto out_err; 345 - } 346 - if (lv.start < lv.cowread) { 347 - dprintk("%s Final uncovered COW extent\n", __func__); 348 - be = NULL; 349 - goto out_err; 350 - } 351 - /* Extents decoded properly, now try to merge them in to 352 - * existing layout extents. 353 - */ 354 - spin_lock(&bl->bl_ext_lock); 355 - list_for_each_entry_safe(be, save, &extents, be_node) { 356 - list_del(&be->be_node); 357 - status = bl_add_merge_extent(bl, be); 358 - if (status) { 359 - spin_unlock(&bl->bl_ext_lock); 360 - /* This is a fairly catastrophic error, as the 361 - * entire layout extent lists are now corrupted. 362 - * We should have some way to distinguish this. 363 - */ 364 - be = NULL; 365 - goto out_err; 366 - } 367 - } 368 - spin_unlock(&bl->bl_ext_lock); 369 - status = 0; 370 - out: 371 - __free_page(scratch); 372 - dprintk("%s returns %i\n", __func__, status); 373 - return status; 374 - 375 - out_err: 376 - bl_put_extent(be); 377 - while (!list_empty(&extents)) { 378 - be = list_first_entry(&extents, struct pnfs_block_extent, 379 - be_node); 380 - list_del(&be->be_node); 381 - bl_put_extent(be); 382 - } 383 - goto out; 384 - }
-108
fs/nfs/blocklayout/blocklayoutdm.c
··· 1 - /* 2 - * linux/fs/nfs/blocklayout/blocklayoutdm.c 3 - * 4 - * Module for the NFSv4.1 pNFS block layout driver. 5 - * 6 - * Copyright (c) 2007 The Regents of the University of Michigan. 7 - * All rights reserved. 8 - * 9 - * Fred Isaman <iisaman@umich.edu> 10 - * Andy Adamson <andros@citi.umich.edu> 11 - * 12 - * permission is granted to use, copy, create derivative works and 13 - * redistribute this software and such derivative works for any purpose, 14 - * so long as the name of the university of michigan is not used in 15 - * any advertising or publicity pertaining to the use or distribution 16 - * of this software without specific, written prior authorization. if 17 - * the above copyright notice or any other identification of the 18 - * university of michigan is included in any copy of any portion of 19 - * this software, then the disclaimer below must also be included. 20 - * 21 - * this software is provided as is, without representation from the 22 - * university of michigan as to its fitness for any purpose, and without 23 - * warranty by the university of michigan of any kind, either express 24 - * or implied, including without limitation the implied warranties of 25 - * merchantability and fitness for a particular purpose. the regents 26 - * of the university of michigan shall not be liable for any damages, 27 - * including special, indirect, incidental, or consequential damages, 28 - * with respect to any claim arising out or in connection with the use 29 - * of the software, even if it has been or is hereafter advised of the 30 - * possibility of such damages. 31 - */ 32 - 33 - #include <linux/genhd.h> /* gendisk - used in a dprintk*/ 34 - #include <linux/sched.h> 35 - #include <linux/hash.h> 36 - 37 - #include "blocklayout.h" 38 - 39 - #define NFSDBG_FACILITY NFSDBG_PNFS_LD 40 - 41 - static void dev_remove(struct net *net, dev_t dev) 42 - { 43 - struct bl_pipe_msg bl_pipe_msg; 44 - struct rpc_pipe_msg *msg = &bl_pipe_msg.msg; 45 - struct bl_dev_msg bl_umount_request; 46 - struct bl_msg_hdr bl_msg = { 47 - .type = BL_DEVICE_UMOUNT, 48 - .totallen = sizeof(bl_umount_request), 49 - }; 50 - uint8_t *dataptr; 51 - DECLARE_WAITQUEUE(wq, current); 52 - struct nfs_net *nn = net_generic(net, nfs_net_id); 53 - 54 - dprintk("Entering %s\n", __func__); 55 - 56 - bl_pipe_msg.bl_wq = &nn->bl_wq; 57 - memset(msg, 0, sizeof(*msg)); 58 - msg->len = sizeof(bl_msg) + bl_msg.totallen; 59 - msg->data = kzalloc(msg->len, GFP_NOFS); 60 - if (!msg->data) 61 - goto out; 62 - 63 - memset(&bl_umount_request, 0, sizeof(bl_umount_request)); 64 - bl_umount_request.major = MAJOR(dev); 65 - bl_umount_request.minor = MINOR(dev); 66 - 67 - memcpy(msg->data, &bl_msg, sizeof(bl_msg)); 68 - dataptr = (uint8_t *) msg->data; 69 - memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request)); 70 - 71 - add_wait_queue(&nn->bl_wq, &wq); 72 - if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { 73 - remove_wait_queue(&nn->bl_wq, &wq); 74 - goto out; 75 - } 76 - 77 - set_current_state(TASK_UNINTERRUPTIBLE); 78 - schedule(); 79 - __set_current_state(TASK_RUNNING); 80 - remove_wait_queue(&nn->bl_wq, &wq); 81 - 82 - out: 83 - kfree(msg->data); 84 - } 85 - 86 - /* 87 - * Release meta device 88 - */ 89 - static void nfs4_blk_metadev_release(struct pnfs_block_dev *bdev) 90 - { 91 - dprintk("%s Releasing\n", __func__); 92 - nfs4_blkdev_put(bdev->bm_mdev); 93 - dev_remove(bdev->net, bdev->bm_mdev->bd_dev); 94 - } 95 - 96 - void bl_free_block_dev(struct pnfs_block_dev *bdev) 97 - { 98 - if (bdev) { 99 - if (bdev->bm_mdev) { 100 - dprintk("%s Removing DM device: %d:%d\n", 101 - __func__, 102 - MAJOR(bdev->bm_mdev->bd_dev), 103 - MINOR(bdev->bm_mdev->bd_dev)); 104 - nfs4_blk_metadev_release(bdev); 105 - } 106 - kfree(bdev); 107 - } 108 - }
+363
fs/nfs/blocklayout/dev.c
··· 1 + /* 2 + * Copyright (c) 2014 Christoph Hellwig. 3 + */ 4 + #include <linux/sunrpc/svc.h> 5 + #include <linux/blkdev.h> 6 + #include <linux/nfs4.h> 7 + #include <linux/nfs_fs.h> 8 + #include <linux/nfs_xdr.h> 9 + 10 + #include "blocklayout.h" 11 + 12 + #define NFSDBG_FACILITY NFSDBG_PNFS_LD 13 + 14 + static void 15 + bl_free_device(struct pnfs_block_dev *dev) 16 + { 17 + if (dev->nr_children) { 18 + int i; 19 + 20 + for (i = 0; i < dev->nr_children; i++) 21 + bl_free_device(&dev->children[i]); 22 + kfree(dev->children); 23 + } else { 24 + if (dev->bdev) 25 + blkdev_put(dev->bdev, FMODE_READ); 26 + } 27 + } 28 + 29 + void 30 + bl_free_deviceid_node(struct nfs4_deviceid_node *d) 31 + { 32 + struct pnfs_block_dev *dev = 33 + container_of(d, struct pnfs_block_dev, node); 34 + 35 + bl_free_device(dev); 36 + kfree(dev); 37 + } 38 + 39 + static int 40 + nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b) 41 + { 42 + __be32 *p; 43 + int i; 44 + 45 + p = xdr_inline_decode(xdr, 4); 46 + if (!p) 47 + return -EIO; 48 + b->type = be32_to_cpup(p++); 49 + 50 + switch (b->type) { 51 + case PNFS_BLOCK_VOLUME_SIMPLE: 52 + p = xdr_inline_decode(xdr, 4); 53 + if (!p) 54 + return -EIO; 55 + b->simple.nr_sigs = be32_to_cpup(p++); 56 + if (!b->simple.nr_sigs) { 57 + dprintk("no signature\n"); 58 + return -EIO; 59 + } 60 + 61 + b->simple.len = 4 + 4; 62 + for (i = 0; i < b->simple.nr_sigs; i++) { 63 + p = xdr_inline_decode(xdr, 8 + 4); 64 + if (!p) 65 + return -EIO; 66 + p = xdr_decode_hyper(p, &b->simple.sigs[i].offset); 67 + b->simple.sigs[i].sig_len = be32_to_cpup(p++); 68 + 69 + p = xdr_inline_decode(xdr, b->simple.sigs[i].sig_len); 70 + if (!p) 71 + return -EIO; 72 + memcpy(&b->simple.sigs[i].sig, p, 73 + b->simple.sigs[i].sig_len); 74 + 75 + b->simple.len += 8 + 4 + b->simple.sigs[i].sig_len; 76 + } 77 + break; 78 + case PNFS_BLOCK_VOLUME_SLICE: 79 + p = xdr_inline_decode(xdr, 8 + 8 + 4); 80 + if (!p) 81 + return -EIO; 82 + p = xdr_decode_hyper(p, &b->slice.start); 83 + p = xdr_decode_hyper(p, &b->slice.len); 84 + b->slice.volume = be32_to_cpup(p++); 85 + break; 86 + case PNFS_BLOCK_VOLUME_CONCAT: 87 + p = xdr_inline_decode(xdr, 4); 88 + if (!p) 89 + return -EIO; 90 + b->concat.volumes_count = be32_to_cpup(p++); 91 + 92 + p = xdr_inline_decode(xdr, b->concat.volumes_count * 4); 93 + if (!p) 94 + return -EIO; 95 + for (i = 0; i < b->concat.volumes_count; i++) 96 + b->concat.volumes[i] = be32_to_cpup(p++); 97 + break; 98 + case PNFS_BLOCK_VOLUME_STRIPE: 99 + p = xdr_inline_decode(xdr, 8 + 4); 100 + if (!p) 101 + return -EIO; 102 + p = xdr_decode_hyper(p, &b->stripe.chunk_size); 103 + b->stripe.volumes_count = be32_to_cpup(p++); 104 + 105 + p = xdr_inline_decode(xdr, b->stripe.volumes_count * 4); 106 + if (!p) 107 + return -EIO; 108 + for (i = 0; i < b->stripe.volumes_count; i++) 109 + b->stripe.volumes[i] = be32_to_cpup(p++); 110 + break; 111 + default: 112 + dprintk("unknown volume type!\n"); 113 + return -EIO; 114 + } 115 + 116 + return 0; 117 + } 118 + 119 + static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset, 120 + struct pnfs_block_dev_map *map) 121 + { 122 + map->start = dev->start; 123 + map->len = dev->len; 124 + map->disk_offset = dev->disk_offset; 125 + map->bdev = dev->bdev; 126 + return true; 127 + } 128 + 129 + static bool bl_map_concat(struct pnfs_block_dev *dev, u64 offset, 130 + struct pnfs_block_dev_map *map) 131 + { 132 + int i; 133 + 134 + for (i = 0; i < dev->nr_children; i++) { 135 + struct pnfs_block_dev *child = &dev->children[i]; 136 + 137 + if (child->start > offset || 138 + child->start + child->len <= offset) 139 + continue; 140 + 141 + child->map(child, offset - child->start, map); 142 + return true; 143 + } 144 + 145 + dprintk("%s: ran off loop!\n", __func__); 146 + return false; 147 + } 148 + 149 + static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, 150 + struct pnfs_block_dev_map *map) 151 + { 152 + struct pnfs_block_dev *child; 153 + u64 chunk; 154 + u32 chunk_idx; 155 + u64 disk_offset; 156 + 157 + chunk = div_u64(offset, dev->chunk_size); 158 + div_u64_rem(chunk, dev->nr_children, &chunk_idx); 159 + 160 + if (chunk_idx > dev->nr_children) { 161 + dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", 162 + __func__, chunk_idx, offset, dev->chunk_size); 163 + /* error, should not happen */ 164 + return false; 165 + } 166 + 167 + /* truncate offset to the beginning of the stripe */ 168 + offset = chunk * dev->chunk_size; 169 + 170 + /* disk offset of the stripe */ 171 + disk_offset = div_u64(offset, dev->nr_children); 172 + 173 + child = &dev->children[chunk_idx]; 174 + child->map(child, disk_offset, map); 175 + 176 + map->start += offset; 177 + map->disk_offset += disk_offset; 178 + map->len = dev->chunk_size; 179 + return true; 180 + } 181 + 182 + static int 183 + bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, 184 + struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 185 + 186 + 187 + static int 188 + bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, 189 + struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 190 + { 191 + struct pnfs_block_volume *v = &volumes[idx]; 192 + dev_t dev; 193 + 194 + dev = bl_resolve_deviceid(server, v, gfp_mask); 195 + if (!dev) 196 + return -EIO; 197 + 198 + d->bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); 199 + if (IS_ERR(d->bdev)) { 200 + printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n", 201 + MAJOR(dev), MINOR(dev), PTR_ERR(d->bdev)); 202 + return PTR_ERR(d->bdev); 203 + } 204 + 205 + 206 + d->len = i_size_read(d->bdev->bd_inode); 207 + d->map = bl_map_simple; 208 + 209 + printk(KERN_INFO "pNFS: using block device %s\n", 210 + d->bdev->bd_disk->disk_name); 211 + return 0; 212 + } 213 + 214 + static int 215 + bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d, 216 + struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 217 + { 218 + struct pnfs_block_volume *v = &volumes[idx]; 219 + int ret; 220 + 221 + ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); 222 + if (ret) 223 + return ret; 224 + 225 + d->disk_offset = v->slice.start; 226 + d->len = v->slice.len; 227 + return 0; 228 + } 229 + 230 + static int 231 + bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d, 232 + struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 233 + { 234 + struct pnfs_block_volume *v = &volumes[idx]; 235 + u64 len = 0; 236 + int ret, i; 237 + 238 + d->children = kcalloc(v->concat.volumes_count, 239 + sizeof(struct pnfs_block_dev), GFP_KERNEL); 240 + if (!d->children) 241 + return -ENOMEM; 242 + 243 + for (i = 0; i < v->concat.volumes_count; i++) { 244 + ret = bl_parse_deviceid(server, &d->children[i], 245 + volumes, v->concat.volumes[i], gfp_mask); 246 + if (ret) 247 + return ret; 248 + 249 + d->nr_children++; 250 + d->children[i].start += len; 251 + len += d->children[i].len; 252 + } 253 + 254 + d->len = len; 255 + d->map = bl_map_concat; 256 + return 0; 257 + } 258 + 259 + static int 260 + bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d, 261 + struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 262 + { 263 + struct pnfs_block_volume *v = &volumes[idx]; 264 + u64 len = 0; 265 + int ret, i; 266 + 267 + d->children = kcalloc(v->stripe.volumes_count, 268 + sizeof(struct pnfs_block_dev), GFP_KERNEL); 269 + if (!d->children) 270 + return -ENOMEM; 271 + 272 + for (i = 0; i < v->stripe.volumes_count; i++) { 273 + ret = bl_parse_deviceid(server, &d->children[i], 274 + volumes, v->stripe.volumes[i], gfp_mask); 275 + if (ret) 276 + return ret; 277 + 278 + d->nr_children++; 279 + len += d->children[i].len; 280 + } 281 + 282 + d->len = len; 283 + d->chunk_size = v->stripe.chunk_size; 284 + d->map = bl_map_stripe; 285 + return 0; 286 + } 287 + 288 + static int 289 + bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, 290 + struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 291 + { 292 + switch (volumes[idx].type) { 293 + case PNFS_BLOCK_VOLUME_SIMPLE: 294 + return bl_parse_simple(server, d, volumes, idx, gfp_mask); 295 + case PNFS_BLOCK_VOLUME_SLICE: 296 + return bl_parse_slice(server, d, volumes, idx, gfp_mask); 297 + case PNFS_BLOCK_VOLUME_CONCAT: 298 + return bl_parse_concat(server, d, volumes, idx, gfp_mask); 299 + case PNFS_BLOCK_VOLUME_STRIPE: 300 + return bl_parse_stripe(server, d, volumes, idx, gfp_mask); 301 + default: 302 + dprintk("unsupported volume type: %d\n", volumes[idx].type); 303 + return -EIO; 304 + } 305 + } 306 + 307 + struct nfs4_deviceid_node * 308 + bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, 309 + gfp_t gfp_mask) 310 + { 311 + struct nfs4_deviceid_node *node = NULL; 312 + struct pnfs_block_volume *volumes; 313 + struct pnfs_block_dev *top; 314 + struct xdr_stream xdr; 315 + struct xdr_buf buf; 316 + struct page *scratch; 317 + int nr_volumes, ret, i; 318 + __be32 *p; 319 + 320 + scratch = alloc_page(gfp_mask); 321 + if (!scratch) 322 + goto out; 323 + 324 + xdr_init_decode_pages(&xdr, &buf, pdev->pages, pdev->pglen); 325 + xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE); 326 + 327 + p = xdr_inline_decode(&xdr, sizeof(__be32)); 328 + if (!p) 329 + goto out_free_scratch; 330 + nr_volumes = be32_to_cpup(p++); 331 + 332 + volumes = kcalloc(nr_volumes, sizeof(struct pnfs_block_volume), 333 + gfp_mask); 334 + if (!volumes) 335 + goto out_free_scratch; 336 + 337 + for (i = 0; i < nr_volumes; i++) { 338 + ret = nfs4_block_decode_volume(&xdr, &volumes[i]); 339 + if (ret < 0) 340 + goto out_free_volumes; 341 + } 342 + 343 + top = kzalloc(sizeof(*top), gfp_mask); 344 + if (!top) 345 + goto out_free_volumes; 346 + 347 + ret = bl_parse_deviceid(server, top, volumes, nr_volumes - 1, gfp_mask); 348 + if (ret) { 349 + bl_free_device(top); 350 + kfree(top); 351 + goto out_free_volumes; 352 + } 353 + 354 + node = &top->node; 355 + nfs4_init_deviceid_node(node, server, &pdev->dev_id); 356 + 357 + out_free_volumes: 358 + kfree(volumes); 359 + out_free_scratch: 360 + __free_page(scratch); 361 + out: 362 + return node; 363 + }
+602
fs/nfs/blocklayout/extent_tree.c
··· 1 + /* 2 + * Copyright (c) 2014 Christoph Hellwig. 3 + */ 4 + 5 + #include <linux/vmalloc.h> 6 + 7 + #include "blocklayout.h" 8 + 9 + #define NFSDBG_FACILITY NFSDBG_PNFS_LD 10 + 11 + static inline struct pnfs_block_extent * 12 + ext_node(struct rb_node *node) 13 + { 14 + return rb_entry(node, struct pnfs_block_extent, be_node); 15 + } 16 + 17 + static struct pnfs_block_extent * 18 + ext_tree_first(struct rb_root *root) 19 + { 20 + struct rb_node *node = rb_first(root); 21 + return node ? ext_node(node) : NULL; 22 + } 23 + 24 + static struct pnfs_block_extent * 25 + ext_tree_prev(struct pnfs_block_extent *be) 26 + { 27 + struct rb_node *node = rb_prev(&be->be_node); 28 + return node ? ext_node(node) : NULL; 29 + } 30 + 31 + static struct pnfs_block_extent * 32 + ext_tree_next(struct pnfs_block_extent *be) 33 + { 34 + struct rb_node *node = rb_next(&be->be_node); 35 + return node ? ext_node(node) : NULL; 36 + } 37 + 38 + static inline sector_t 39 + ext_f_end(struct pnfs_block_extent *be) 40 + { 41 + return be->be_f_offset + be->be_length; 42 + } 43 + 44 + static struct pnfs_block_extent * 45 + __ext_tree_search(struct rb_root *root, sector_t start) 46 + { 47 + struct rb_node *node = root->rb_node; 48 + struct pnfs_block_extent *be = NULL; 49 + 50 + while (node) { 51 + be = ext_node(node); 52 + if (start < be->be_f_offset) 53 + node = node->rb_left; 54 + else if (start >= ext_f_end(be)) 55 + node = node->rb_right; 56 + else 57 + return be; 58 + } 59 + 60 + if (be) { 61 + if (start < be->be_f_offset) 62 + return be; 63 + 64 + if (start >= ext_f_end(be)) 65 + return ext_tree_next(be); 66 + } 67 + 68 + return NULL; 69 + } 70 + 71 + static bool 72 + ext_can_merge(struct pnfs_block_extent *be1, struct pnfs_block_extent *be2) 73 + { 74 + if (be1->be_state != be2->be_state) 75 + return false; 76 + if (be1->be_device != be2->be_device) 77 + return false; 78 + 79 + if (be1->be_f_offset + be1->be_length != be2->be_f_offset) 80 + return false; 81 + 82 + if (be1->be_state != PNFS_BLOCK_NONE_DATA && 83 + (be1->be_v_offset + be1->be_length != be2->be_v_offset)) 84 + return false; 85 + 86 + if (be1->be_state == PNFS_BLOCK_INVALID_DATA && 87 + be1->be_tag != be2->be_tag) 88 + return false; 89 + 90 + return true; 91 + } 92 + 93 + static struct pnfs_block_extent * 94 + ext_try_to_merge_left(struct rb_root *root, struct pnfs_block_extent *be) 95 + { 96 + struct pnfs_block_extent *left = ext_tree_prev(be); 97 + 98 + if (left && ext_can_merge(left, be)) { 99 + left->be_length += be->be_length; 100 + rb_erase(&be->be_node, root); 101 + nfs4_put_deviceid_node(be->be_device); 102 + kfree(be); 103 + return left; 104 + } 105 + 106 + return be; 107 + } 108 + 109 + static struct pnfs_block_extent * 110 + ext_try_to_merge_right(struct rb_root *root, struct pnfs_block_extent *be) 111 + { 112 + struct pnfs_block_extent *right = ext_tree_next(be); 113 + 114 + if (right && ext_can_merge(be, right)) { 115 + be->be_length += right->be_length; 116 + rb_erase(&right->be_node, root); 117 + nfs4_put_deviceid_node(right->be_device); 118 + kfree(right); 119 + } 120 + 121 + return be; 122 + } 123 + 124 + static void 125 + __ext_tree_insert(struct rb_root *root, 126 + struct pnfs_block_extent *new, bool merge_ok) 127 + { 128 + struct rb_node **p = &root->rb_node, *parent = NULL; 129 + struct pnfs_block_extent *be; 130 + 131 + while (*p) { 132 + parent = *p; 133 + be = ext_node(parent); 134 + 135 + if (new->be_f_offset < be->be_f_offset) { 136 + if (merge_ok && ext_can_merge(new, be)) { 137 + be->be_f_offset = new->be_f_offset; 138 + if (be->be_state != PNFS_BLOCK_NONE_DATA) 139 + be->be_v_offset = new->be_v_offset; 140 + be->be_length += new->be_length; 141 + be = ext_try_to_merge_left(root, be); 142 + goto free_new; 143 + } 144 + p = &(*p)->rb_left; 145 + } else if (new->be_f_offset >= ext_f_end(be)) { 146 + if (merge_ok && ext_can_merge(be, new)) { 147 + be->be_length += new->be_length; 148 + be = ext_try_to_merge_right(root, be); 149 + goto free_new; 150 + } 151 + p = &(*p)->rb_right; 152 + } else { 153 + BUG(); 154 + } 155 + } 156 + 157 + rb_link_node(&new->be_node, parent, p); 158 + rb_insert_color(&new->be_node, root); 159 + return; 160 + free_new: 161 + nfs4_put_deviceid_node(new->be_device); 162 + kfree(new); 163 + } 164 + 165 + static int 166 + __ext_tree_remove(struct rb_root *root, sector_t start, sector_t end) 167 + { 168 + struct pnfs_block_extent *be; 169 + sector_t len1 = 0, len2 = 0; 170 + sector_t orig_v_offset; 171 + sector_t orig_len; 172 + 173 + be = __ext_tree_search(root, start); 174 + if (!be) 175 + return 0; 176 + if (be->be_f_offset >= end) 177 + return 0; 178 + 179 + orig_v_offset = be->be_v_offset; 180 + orig_len = be->be_length; 181 + 182 + if (start > be->be_f_offset) 183 + len1 = start - be->be_f_offset; 184 + if (ext_f_end(be) > end) 185 + len2 = ext_f_end(be) - end; 186 + 187 + if (len2 > 0) { 188 + if (len1 > 0) { 189 + struct pnfs_block_extent *new; 190 + 191 + new = kzalloc(sizeof(*new), GFP_ATOMIC); 192 + if (!new) 193 + return -ENOMEM; 194 + 195 + be->be_length = len1; 196 + 197 + new->be_f_offset = end; 198 + if (be->be_state != PNFS_BLOCK_NONE_DATA) { 199 + new->be_v_offset = 200 + orig_v_offset + orig_len - len2; 201 + } 202 + new->be_length = len2; 203 + new->be_state = be->be_state; 204 + new->be_tag = be->be_tag; 205 + new->be_device = nfs4_get_deviceid(be->be_device); 206 + 207 + __ext_tree_insert(root, new, true); 208 + } else { 209 + be->be_f_offset = end; 210 + if (be->be_state != PNFS_BLOCK_NONE_DATA) { 211 + be->be_v_offset = 212 + orig_v_offset + orig_len - len2; 213 + } 214 + be->be_length = len2; 215 + } 216 + } else { 217 + if (len1 > 0) { 218 + be->be_length = len1; 219 + be = ext_tree_next(be); 220 + } 221 + 222 + while (be && ext_f_end(be) <= end) { 223 + struct pnfs_block_extent *next = ext_tree_next(be); 224 + 225 + rb_erase(&be->be_node, root); 226 + nfs4_put_deviceid_node(be->be_device); 227 + kfree(be); 228 + be = next; 229 + } 230 + 231 + if (be && be->be_f_offset < end) { 232 + len1 = ext_f_end(be) - end; 233 + be->be_f_offset = end; 234 + if (be->be_state != PNFS_BLOCK_NONE_DATA) 235 + be->be_v_offset += be->be_length - len1; 236 + be->be_length = len1; 237 + } 238 + } 239 + 240 + return 0; 241 + } 242 + 243 + int 244 + ext_tree_insert(struct pnfs_block_layout *bl, struct pnfs_block_extent *new) 245 + { 246 + struct pnfs_block_extent *be; 247 + struct rb_root *root; 248 + int err = 0; 249 + 250 + switch (new->be_state) { 251 + case PNFS_BLOCK_READWRITE_DATA: 252 + case PNFS_BLOCK_INVALID_DATA: 253 + root = &bl->bl_ext_rw; 254 + break; 255 + case PNFS_BLOCK_READ_DATA: 256 + case PNFS_BLOCK_NONE_DATA: 257 + root = &bl->bl_ext_ro; 258 + break; 259 + default: 260 + dprintk("invalid extent type\n"); 261 + return -EINVAL; 262 + } 263 + 264 + spin_lock(&bl->bl_ext_lock); 265 + retry: 266 + be = __ext_tree_search(root, new->be_f_offset); 267 + if (!be || be->be_f_offset >= ext_f_end(new)) { 268 + __ext_tree_insert(root, new, true); 269 + } else if (new->be_f_offset >= be->be_f_offset) { 270 + if (ext_f_end(new) <= ext_f_end(be)) { 271 + nfs4_put_deviceid_node(new->be_device); 272 + kfree(new); 273 + } else { 274 + sector_t new_len = ext_f_end(new) - ext_f_end(be); 275 + sector_t diff = new->be_length - new_len; 276 + 277 + new->be_f_offset += diff; 278 + new->be_v_offset += diff; 279 + new->be_length = new_len; 280 + goto retry; 281 + } 282 + } else if (ext_f_end(new) <= ext_f_end(be)) { 283 + new->be_length = be->be_f_offset - new->be_f_offset; 284 + __ext_tree_insert(root, new, true); 285 + } else { 286 + struct pnfs_block_extent *split; 287 + sector_t new_len = ext_f_end(new) - ext_f_end(be); 288 + sector_t diff = new->be_length - new_len; 289 + 290 + split = kmemdup(new, sizeof(*new), GFP_ATOMIC); 291 + if (!split) { 292 + err = -EINVAL; 293 + goto out; 294 + } 295 + 296 + split->be_length = be->be_f_offset - split->be_f_offset; 297 + split->be_device = nfs4_get_deviceid(new->be_device); 298 + __ext_tree_insert(root, split, true); 299 + 300 + new->be_f_offset += diff; 301 + new->be_v_offset += diff; 302 + new->be_length = new_len; 303 + goto retry; 304 + } 305 + out: 306 + spin_unlock(&bl->bl_ext_lock); 307 + return err; 308 + } 309 + 310 + static bool 311 + __ext_tree_lookup(struct rb_root *root, sector_t isect, 312 + struct pnfs_block_extent *ret) 313 + { 314 + struct rb_node *node; 315 + struct pnfs_block_extent *be; 316 + 317 + node = root->rb_node; 318 + while (node) { 319 + be = ext_node(node); 320 + if (isect < be->be_f_offset) 321 + node = node->rb_left; 322 + else if (isect >= ext_f_end(be)) 323 + node = node->rb_right; 324 + else { 325 + *ret = *be; 326 + return true; 327 + } 328 + } 329 + 330 + return false; 331 + } 332 + 333 + bool 334 + ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect, 335 + struct pnfs_block_extent *ret, bool rw) 336 + { 337 + bool found = false; 338 + 339 + spin_lock(&bl->bl_ext_lock); 340 + if (!rw) 341 + found = __ext_tree_lookup(&bl->bl_ext_ro, isect, ret); 342 + if (!found) 343 + found = __ext_tree_lookup(&bl->bl_ext_rw, isect, ret); 344 + spin_unlock(&bl->bl_ext_lock); 345 + 346 + return found; 347 + } 348 + 349 + int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, 350 + sector_t start, sector_t end) 351 + { 352 + int err, err2; 353 + 354 + spin_lock(&bl->bl_ext_lock); 355 + err = __ext_tree_remove(&bl->bl_ext_ro, start, end); 356 + if (rw) { 357 + err2 = __ext_tree_remove(&bl->bl_ext_rw, start, end); 358 + if (!err) 359 + err = err2; 360 + } 361 + spin_unlock(&bl->bl_ext_lock); 362 + 363 + return err; 364 + } 365 + 366 + static int 367 + ext_tree_split(struct rb_root *root, struct pnfs_block_extent *be, 368 + sector_t split) 369 + { 370 + struct pnfs_block_extent *new; 371 + sector_t orig_len = be->be_length; 372 + 373 + new = kzalloc(sizeof(*new), GFP_ATOMIC); 374 + if (!new) 375 + return -ENOMEM; 376 + 377 + be->be_length = split - be->be_f_offset; 378 + 379 + new->be_f_offset = split; 380 + if (be->be_state != PNFS_BLOCK_NONE_DATA) 381 + new->be_v_offset = be->be_v_offset + be->be_length; 382 + new->be_length = orig_len - be->be_length; 383 + new->be_state = be->be_state; 384 + new->be_tag = be->be_tag; 385 + new->be_device = nfs4_get_deviceid(be->be_device); 386 + 387 + __ext_tree_insert(root, new, false); 388 + return 0; 389 + } 390 + 391 + int 392 + ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start, 393 + sector_t len) 394 + { 395 + struct rb_root *root = &bl->bl_ext_rw; 396 + sector_t end = start + len; 397 + struct pnfs_block_extent *be; 398 + int err = 0; 399 + 400 + spin_lock(&bl->bl_ext_lock); 401 + /* 402 + * First remove all COW extents or holes from written to range. 403 + */ 404 + err = __ext_tree_remove(&bl->bl_ext_ro, start, end); 405 + if (err) 406 + goto out; 407 + 408 + /* 409 + * Then mark all invalid extents in the range as written to. 410 + */ 411 + for (be = __ext_tree_search(root, start); be; be = ext_tree_next(be)) { 412 + if (be->be_f_offset >= end) 413 + break; 414 + 415 + if (be->be_state != PNFS_BLOCK_INVALID_DATA || be->be_tag) 416 + continue; 417 + 418 + if (be->be_f_offset < start) { 419 + struct pnfs_block_extent *left = ext_tree_prev(be); 420 + 421 + if (left && ext_can_merge(left, be)) { 422 + sector_t diff = start - be->be_f_offset; 423 + 424 + left->be_length += diff; 425 + 426 + be->be_f_offset += diff; 427 + be->be_v_offset += diff; 428 + be->be_length -= diff; 429 + } else { 430 + err = ext_tree_split(root, be, start); 431 + if (err) 432 + goto out; 433 + } 434 + } 435 + 436 + if (ext_f_end(be) > end) { 437 + struct pnfs_block_extent *right = ext_tree_next(be); 438 + 439 + if (right && ext_can_merge(be, right)) { 440 + sector_t diff = end - be->be_f_offset; 441 + 442 + be->be_length -= diff; 443 + 444 + right->be_f_offset -= diff; 445 + right->be_v_offset -= diff; 446 + right->be_length += diff; 447 + } else { 448 + err = ext_tree_split(root, be, end); 449 + if (err) 450 + goto out; 451 + } 452 + } 453 + 454 + if (be->be_f_offset >= start && ext_f_end(be) <= end) { 455 + be->be_tag = EXTENT_WRITTEN; 456 + be = ext_try_to_merge_left(root, be); 457 + be = ext_try_to_merge_right(root, be); 458 + } 459 + } 460 + out: 461 + spin_unlock(&bl->bl_ext_lock); 462 + return err; 463 + } 464 + 465 + static void ext_tree_free_commitdata(struct nfs4_layoutcommit_args *arg, 466 + size_t buffer_size) 467 + { 468 + if (arg->layoutupdate_pages != &arg->layoutupdate_page) { 469 + int nr_pages = DIV_ROUND_UP(buffer_size, PAGE_SIZE), i; 470 + 471 + for (i = 0; i < nr_pages; i++) 472 + put_page(arg->layoutupdate_pages[i]); 473 + kfree(arg->layoutupdate_pages); 474 + } else { 475 + put_page(arg->layoutupdate_page); 476 + } 477 + } 478 + 479 + static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p, 480 + size_t buffer_size, size_t *count) 481 + { 482 + struct pnfs_block_extent *be; 483 + int ret = 0; 484 + 485 + spin_lock(&bl->bl_ext_lock); 486 + for (be = ext_tree_first(&bl->bl_ext_rw); be; be = ext_tree_next(be)) { 487 + if (be->be_state != PNFS_BLOCK_INVALID_DATA || 488 + be->be_tag != EXTENT_WRITTEN) 489 + continue; 490 + 491 + (*count)++; 492 + if (*count * BL_EXTENT_SIZE > buffer_size) { 493 + /* keep counting.. */ 494 + ret = -ENOSPC; 495 + continue; 496 + } 497 + 498 + p = xdr_encode_opaque_fixed(p, be->be_device->deviceid.data, 499 + NFS4_DEVICEID4_SIZE); 500 + p = xdr_encode_hyper(p, be->be_f_offset << SECTOR_SHIFT); 501 + p = xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT); 502 + p = xdr_encode_hyper(p, 0LL); 503 + *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA); 504 + 505 + be->be_tag = EXTENT_COMMITTING; 506 + } 507 + spin_unlock(&bl->bl_ext_lock); 508 + 509 + return ret; 510 + } 511 + 512 + int 513 + ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) 514 + { 515 + struct pnfs_block_layout *bl = BLK_LO2EXT(NFS_I(arg->inode)->layout); 516 + size_t count = 0, buffer_size = PAGE_SIZE; 517 + __be32 *start_p; 518 + int ret; 519 + 520 + dprintk("%s enter\n", __func__); 521 + 522 + arg->layoutupdate_page = alloc_page(GFP_NOFS); 523 + if (!arg->layoutupdate_page) 524 + return -ENOMEM; 525 + start_p = page_address(arg->layoutupdate_page); 526 + arg->layoutupdate_pages = &arg->layoutupdate_page; 527 + 528 + retry: 529 + ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count); 530 + if (unlikely(ret)) { 531 + ext_tree_free_commitdata(arg, buffer_size); 532 + 533 + buffer_size = sizeof(__be32) + BL_EXTENT_SIZE * count; 534 + count = 0; 535 + 536 + arg->layoutupdate_pages = 537 + kcalloc(DIV_ROUND_UP(buffer_size, PAGE_SIZE), 538 + sizeof(struct page *), GFP_NOFS); 539 + if (!arg->layoutupdate_pages) 540 + return -ENOMEM; 541 + 542 + start_p = __vmalloc(buffer_size, GFP_NOFS, PAGE_KERNEL); 543 + if (!start_p) { 544 + kfree(arg->layoutupdate_pages); 545 + return -ENOMEM; 546 + } 547 + 548 + goto retry; 549 + } 550 + 551 + *start_p = cpu_to_be32(count); 552 + arg->layoutupdate_len = sizeof(__be32) + BL_EXTENT_SIZE * count; 553 + 554 + if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) { 555 + __be32 *p = start_p; 556 + int i = 0; 557 + 558 + for (p = start_p; 559 + p < start_p + arg->layoutupdate_len; 560 + p += PAGE_SIZE) { 561 + arg->layoutupdate_pages[i++] = vmalloc_to_page(p); 562 + } 563 + } 564 + 565 + dprintk("%s found %zu ranges\n", __func__, count); 566 + return 0; 567 + } 568 + 569 + void 570 + ext_tree_mark_committed(struct nfs4_layoutcommit_args *arg, int status) 571 + { 572 + struct pnfs_block_layout *bl = BLK_LO2EXT(NFS_I(arg->inode)->layout); 573 + struct rb_root *root = &bl->bl_ext_rw; 574 + struct pnfs_block_extent *be; 575 + 576 + dprintk("%s status %d\n", __func__, status); 577 + 578 + ext_tree_free_commitdata(arg, arg->layoutupdate_len); 579 + 580 + spin_lock(&bl->bl_ext_lock); 581 + for (be = ext_tree_first(root); be; be = ext_tree_next(be)) { 582 + if (be->be_state != PNFS_BLOCK_INVALID_DATA || 583 + be->be_tag != EXTENT_COMMITTING) 584 + continue; 585 + 586 + if (status) { 587 + /* 588 + * Mark as written and try again. 589 + * 590 + * XXX: some real error handling here wouldn't hurt.. 591 + */ 592 + be->be_tag = EXTENT_WRITTEN; 593 + } else { 594 + be->be_state = PNFS_BLOCK_READWRITE_DATA; 595 + be->be_tag = 0; 596 + } 597 + 598 + be = ext_try_to_merge_left(root, be); 599 + be = ext_try_to_merge_right(root, be); 600 + } 601 + spin_unlock(&bl->bl_ext_lock); 602 + }
-908
fs/nfs/blocklayout/extents.c
··· 1 - /* 2 - * linux/fs/nfs/blocklayout/blocklayout.h 3 - * 4 - * Module for the NFSv4.1 pNFS block layout driver. 5 - * 6 - * Copyright (c) 2006 The Regents of the University of Michigan. 7 - * All rights reserved. 8 - * 9 - * Andy Adamson <andros@citi.umich.edu> 10 - * Fred Isaman <iisaman@umich.edu> 11 - * 12 - * permission is granted to use, copy, create derivative works and 13 - * redistribute this software and such derivative works for any purpose, 14 - * so long as the name of the university of michigan is not used in 15 - * any advertising or publicity pertaining to the use or distribution 16 - * of this software without specific, written prior authorization. if 17 - * the above copyright notice or any other identification of the 18 - * university of michigan is included in any copy of any portion of 19 - * this software, then the disclaimer below must also be included. 20 - * 21 - * this software is provided as is, without representation from the 22 - * university of michigan as to its fitness for any purpose, and without 23 - * warranty by the university of michigan of any kind, either express 24 - * or implied, including without limitation the implied warranties of 25 - * merchantability and fitness for a particular purpose. the regents 26 - * of the university of michigan shall not be liable for any damages, 27 - * including special, indirect, incidental, or consequential damages, 28 - * with respect to any claim arising out or in connection with the use 29 - * of the software, even if it has been or is hereafter advised of the 30 - * possibility of such damages. 31 - */ 32 - 33 - #include "blocklayout.h" 34 - #define NFSDBG_FACILITY NFSDBG_PNFS_LD 35 - 36 - /* Bit numbers */ 37 - #define EXTENT_INITIALIZED 0 38 - #define EXTENT_WRITTEN 1 39 - #define EXTENT_IN_COMMIT 2 40 - #define INTERNAL_EXISTS MY_MAX_TAGS 41 - #define INTERNAL_MASK ((1 << INTERNAL_EXISTS) - 1) 42 - 43 - /* Returns largest t<=s s.t. t%base==0 */ 44 - static inline sector_t normalize(sector_t s, int base) 45 - { 46 - sector_t tmp = s; /* Since do_div modifies its argument */ 47 - return s - sector_div(tmp, base); 48 - } 49 - 50 - static inline sector_t normalize_up(sector_t s, int base) 51 - { 52 - return normalize(s + base - 1, base); 53 - } 54 - 55 - /* Complete stub using list while determine API wanted */ 56 - 57 - /* Returns tags, or negative */ 58 - static int32_t _find_entry(struct my_tree *tree, u64 s) 59 - { 60 - struct pnfs_inval_tracking *pos; 61 - 62 - dprintk("%s(%llu) enter\n", __func__, s); 63 - list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { 64 - if (pos->it_sector > s) 65 - continue; 66 - else if (pos->it_sector == s) 67 - return pos->it_tags & INTERNAL_MASK; 68 - else 69 - break; 70 - } 71 - return -ENOENT; 72 - } 73 - 74 - static inline 75 - int _has_tag(struct my_tree *tree, u64 s, int32_t tag) 76 - { 77 - int32_t tags; 78 - 79 - dprintk("%s(%llu, %i) enter\n", __func__, s, tag); 80 - s = normalize(s, tree->mtt_step_size); 81 - tags = _find_entry(tree, s); 82 - if ((tags < 0) || !(tags & (1 << tag))) 83 - return 0; 84 - else 85 - return 1; 86 - } 87 - 88 - /* Creates entry with tag, or if entry already exists, unions tag to it. 89 - * If storage is not NULL, newly created entry will use it. 90 - * Returns number of entries added, or negative on error. 91 - */ 92 - static int _add_entry(struct my_tree *tree, u64 s, int32_t tag, 93 - struct pnfs_inval_tracking *storage) 94 - { 95 - int found = 0; 96 - struct pnfs_inval_tracking *pos; 97 - 98 - dprintk("%s(%llu, %i, %p) enter\n", __func__, s, tag, storage); 99 - list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { 100 - if (pos->it_sector > s) 101 - continue; 102 - else if (pos->it_sector == s) { 103 - found = 1; 104 - break; 105 - } else 106 - break; 107 - } 108 - if (found) { 109 - pos->it_tags |= (1 << tag); 110 - return 0; 111 - } else { 112 - struct pnfs_inval_tracking *new; 113 - new = storage; 114 - new->it_sector = s; 115 - new->it_tags = (1 << tag); 116 - list_add(&new->it_link, &pos->it_link); 117 - return 1; 118 - } 119 - } 120 - 121 - /* XXXX Really want option to not create */ 122 - /* Over range, unions tag with existing entries, else creates entry with tag */ 123 - static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length) 124 - { 125 - u64 i; 126 - 127 - dprintk("%s(%i, %llu, %llu) enter\n", __func__, tag, s, length); 128 - for (i = normalize(s, tree->mtt_step_size); i < s + length; 129 - i += tree->mtt_step_size) 130 - if (_add_entry(tree, i, tag, NULL)) 131 - return -ENOMEM; 132 - return 0; 133 - } 134 - 135 - /* Ensure that future operations on given range of tree will not malloc */ 136 - static int _preload_range(struct pnfs_inval_markings *marks, 137 - u64 offset, u64 length) 138 - { 139 - u64 start, end, s; 140 - int count, i, used = 0, status = -ENOMEM; 141 - struct pnfs_inval_tracking **storage; 142 - struct my_tree *tree = &marks->im_tree; 143 - 144 - dprintk("%s(%llu, %llu) enter\n", __func__, offset, length); 145 - start = normalize(offset, tree->mtt_step_size); 146 - end = normalize_up(offset + length, tree->mtt_step_size); 147 - count = (int)(end - start) / (int)tree->mtt_step_size; 148 - 149 - /* Pre-malloc what memory we might need */ 150 - storage = kcalloc(count, sizeof(*storage), GFP_NOFS); 151 - if (!storage) 152 - return -ENOMEM; 153 - for (i = 0; i < count; i++) { 154 - storage[i] = kmalloc(sizeof(struct pnfs_inval_tracking), 155 - GFP_NOFS); 156 - if (!storage[i]) 157 - goto out_cleanup; 158 - } 159 - 160 - spin_lock_bh(&marks->im_lock); 161 - for (s = start; s < end; s += tree->mtt_step_size) 162 - used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]); 163 - spin_unlock_bh(&marks->im_lock); 164 - 165 - status = 0; 166 - 167 - out_cleanup: 168 - for (i = used; i < count; i++) { 169 - if (!storage[i]) 170 - break; 171 - kfree(storage[i]); 172 - } 173 - kfree(storage); 174 - return status; 175 - } 176 - 177 - /* We are relying on page lock to serialize this */ 178 - int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect) 179 - { 180 - int rv; 181 - 182 - spin_lock_bh(&marks->im_lock); 183 - rv = _has_tag(&marks->im_tree, isect, EXTENT_INITIALIZED); 184 - spin_unlock_bh(&marks->im_lock); 185 - return rv; 186 - } 187 - 188 - /* Assume start, end already sector aligned */ 189 - static int 190 - _range_has_tag(struct my_tree *tree, u64 start, u64 end, int32_t tag) 191 - { 192 - struct pnfs_inval_tracking *pos; 193 - u64 expect = 0; 194 - 195 - dprintk("%s(%llu, %llu, %i) enter\n", __func__, start, end, tag); 196 - list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) { 197 - if (pos->it_sector >= end) 198 - continue; 199 - if (!expect) { 200 - if ((pos->it_sector == end - tree->mtt_step_size) && 201 - (pos->it_tags & (1 << tag))) { 202 - expect = pos->it_sector - tree->mtt_step_size; 203 - if (pos->it_sector < tree->mtt_step_size || expect < start) 204 - return 1; 205 - continue; 206 - } else { 207 - return 0; 208 - } 209 - } 210 - if (pos->it_sector != expect || !(pos->it_tags & (1 << tag))) 211 - return 0; 212 - expect -= tree->mtt_step_size; 213 - if (expect < start) 214 - return 1; 215 - } 216 - return 0; 217 - } 218 - 219 - static int is_range_written(struct pnfs_inval_markings *marks, 220 - sector_t start, sector_t end) 221 - { 222 - int rv; 223 - 224 - spin_lock_bh(&marks->im_lock); 225 - rv = _range_has_tag(&marks->im_tree, start, end, EXTENT_WRITTEN); 226 - spin_unlock_bh(&marks->im_lock); 227 - return rv; 228 - } 229 - 230 - /* Marks sectors in [offest, offset_length) as having been initialized. 231 - * All lengths are step-aligned, where step is min(pagesize, blocksize). 232 - * Currently assumes offset is page-aligned 233 - */ 234 - int bl_mark_sectors_init(struct pnfs_inval_markings *marks, 235 - sector_t offset, sector_t length) 236 - { 237 - sector_t start, end; 238 - 239 - dprintk("%s(offset=%llu,len=%llu) enter\n", 240 - __func__, (u64)offset, (u64)length); 241 - 242 - start = normalize(offset, marks->im_block_size); 243 - end = normalize_up(offset + length, marks->im_block_size); 244 - if (_preload_range(marks, start, end - start)) 245 - goto outerr; 246 - 247 - spin_lock_bh(&marks->im_lock); 248 - if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length)) 249 - goto out_unlock; 250 - spin_unlock_bh(&marks->im_lock); 251 - 252 - return 0; 253 - 254 - out_unlock: 255 - spin_unlock_bh(&marks->im_lock); 256 - outerr: 257 - return -ENOMEM; 258 - } 259 - 260 - /* Marks sectors in [offest, offset+length) as having been written to disk. 261 - * All lengths should be block aligned. 262 - */ 263 - static int mark_written_sectors(struct pnfs_inval_markings *marks, 264 - sector_t offset, sector_t length) 265 - { 266 - int status; 267 - 268 - dprintk("%s(offset=%llu,len=%llu) enter\n", __func__, 269 - (u64)offset, (u64)length); 270 - spin_lock_bh(&marks->im_lock); 271 - status = _set_range(&marks->im_tree, EXTENT_WRITTEN, offset, length); 272 - spin_unlock_bh(&marks->im_lock); 273 - return status; 274 - } 275 - 276 - static void print_short_extent(struct pnfs_block_short_extent *be) 277 - { 278 - dprintk("PRINT SHORT EXTENT extent %p\n", be); 279 - if (be) { 280 - dprintk(" be_f_offset %llu\n", (u64)be->bse_f_offset); 281 - dprintk(" be_length %llu\n", (u64)be->bse_length); 282 - } 283 - } 284 - 285 - static void print_clist(struct list_head *list, unsigned int count) 286 - { 287 - struct pnfs_block_short_extent *be; 288 - unsigned int i = 0; 289 - 290 - ifdebug(FACILITY) { 291 - printk(KERN_DEBUG "****************\n"); 292 - printk(KERN_DEBUG "Extent list looks like:\n"); 293 - list_for_each_entry(be, list, bse_node) { 294 - i++; 295 - print_short_extent(be); 296 - } 297 - if (i != count) 298 - printk(KERN_DEBUG "\n\nExpected %u entries\n\n\n", count); 299 - printk(KERN_DEBUG "****************\n"); 300 - } 301 - } 302 - 303 - /* Note: In theory, we should do more checking that devid's match between 304 - * old and new, but if they don't, the lists are too corrupt to salvage anyway. 305 - */ 306 - /* Note this is very similar to bl_add_merge_extent */ 307 - static void add_to_commitlist(struct pnfs_block_layout *bl, 308 - struct pnfs_block_short_extent *new) 309 - { 310 - struct list_head *clist = &bl->bl_commit; 311 - struct pnfs_block_short_extent *old, *save; 312 - sector_t end = new->bse_f_offset + new->bse_length; 313 - 314 - dprintk("%s enter\n", __func__); 315 - print_short_extent(new); 316 - print_clist(clist, bl->bl_count); 317 - bl->bl_count++; 318 - /* Scan for proper place to insert, extending new to the left 319 - * as much as possible. 320 - */ 321 - list_for_each_entry_safe(old, save, clist, bse_node) { 322 - if (new->bse_f_offset < old->bse_f_offset) 323 - break; 324 - if (end <= old->bse_f_offset + old->bse_length) { 325 - /* Range is already in list */ 326 - bl->bl_count--; 327 - kfree(new); 328 - return; 329 - } else if (new->bse_f_offset <= 330 - old->bse_f_offset + old->bse_length) { 331 - /* new overlaps or abuts existing be */ 332 - if (new->bse_mdev == old->bse_mdev) { 333 - /* extend new to fully replace old */ 334 - new->bse_length += new->bse_f_offset - 335 - old->bse_f_offset; 336 - new->bse_f_offset = old->bse_f_offset; 337 - list_del(&old->bse_node); 338 - bl->bl_count--; 339 - kfree(old); 340 - } 341 - } 342 - } 343 - /* Note that if we never hit the above break, old will not point to a 344 - * valid extent. However, in that case &old->bse_node==list. 345 - */ 346 - list_add_tail(&new->bse_node, &old->bse_node); 347 - /* Scan forward for overlaps. If we find any, extend new and 348 - * remove the overlapped extent. 349 - */ 350 - old = list_prepare_entry(new, clist, bse_node); 351 - list_for_each_entry_safe_continue(old, save, clist, bse_node) { 352 - if (end < old->bse_f_offset) 353 - break; 354 - /* new overlaps or abuts old */ 355 - if (new->bse_mdev == old->bse_mdev) { 356 - if (end < old->bse_f_offset + old->bse_length) { 357 - /* extend new to fully cover old */ 358 - end = old->bse_f_offset + old->bse_length; 359 - new->bse_length = end - new->bse_f_offset; 360 - } 361 - list_del(&old->bse_node); 362 - bl->bl_count--; 363 - kfree(old); 364 - } 365 - } 366 - dprintk("%s: after merging\n", __func__); 367 - print_clist(clist, bl->bl_count); 368 - } 369 - 370 - /* Note the range described by offset, length is guaranteed to be contained 371 - * within be. 372 - * new will be freed, either by this function or add_to_commitlist if they 373 - * decide not to use it, or after LAYOUTCOMMIT uses it in the commitlist. 374 - */ 375 - int bl_mark_for_commit(struct pnfs_block_extent *be, 376 - sector_t offset, sector_t length, 377 - struct pnfs_block_short_extent *new) 378 - { 379 - sector_t new_end, end = offset + length; 380 - struct pnfs_block_layout *bl = container_of(be->be_inval, 381 - struct pnfs_block_layout, 382 - bl_inval); 383 - 384 - mark_written_sectors(be->be_inval, offset, length); 385 - /* We want to add the range to commit list, but it must be 386 - * block-normalized, and verified that the normalized range has 387 - * been entirely written to disk. 388 - */ 389 - new->bse_f_offset = offset; 390 - offset = normalize(offset, bl->bl_blocksize); 391 - if (offset < new->bse_f_offset) { 392 - if (is_range_written(be->be_inval, offset, new->bse_f_offset)) 393 - new->bse_f_offset = offset; 394 - else 395 - new->bse_f_offset = offset + bl->bl_blocksize; 396 - } 397 - new_end = normalize_up(end, bl->bl_blocksize); 398 - if (end < new_end) { 399 - if (is_range_written(be->be_inval, end, new_end)) 400 - end = new_end; 401 - else 402 - end = new_end - bl->bl_blocksize; 403 - } 404 - if (end <= new->bse_f_offset) { 405 - kfree(new); 406 - return 0; 407 - } 408 - new->bse_length = end - new->bse_f_offset; 409 - new->bse_devid = be->be_devid; 410 - new->bse_mdev = be->be_mdev; 411 - 412 - spin_lock(&bl->bl_ext_lock); 413 - add_to_commitlist(bl, new); 414 - spin_unlock(&bl->bl_ext_lock); 415 - return 0; 416 - } 417 - 418 - static void print_bl_extent(struct pnfs_block_extent *be) 419 - { 420 - dprintk("PRINT EXTENT extent %p\n", be); 421 - if (be) { 422 - dprintk(" be_f_offset %llu\n", (u64)be->be_f_offset); 423 - dprintk(" be_length %llu\n", (u64)be->be_length); 424 - dprintk(" be_v_offset %llu\n", (u64)be->be_v_offset); 425 - dprintk(" be_state %d\n", be->be_state); 426 - } 427 - } 428 - 429 - static void 430 - destroy_extent(struct kref *kref) 431 - { 432 - struct pnfs_block_extent *be; 433 - 434 - be = container_of(kref, struct pnfs_block_extent, be_refcnt); 435 - dprintk("%s be=%p\n", __func__, be); 436 - kfree(be); 437 - } 438 - 439 - void 440 - bl_put_extent(struct pnfs_block_extent *be) 441 - { 442 - if (be) { 443 - dprintk("%s enter %p (%i)\n", __func__, be, 444 - atomic_read(&be->be_refcnt.refcount)); 445 - kref_put(&be->be_refcnt, destroy_extent); 446 - } 447 - } 448 - 449 - struct pnfs_block_extent *bl_alloc_extent(void) 450 - { 451 - struct pnfs_block_extent *be; 452 - 453 - be = kmalloc(sizeof(struct pnfs_block_extent), GFP_NOFS); 454 - if (!be) 455 - return NULL; 456 - INIT_LIST_HEAD(&be->be_node); 457 - kref_init(&be->be_refcnt); 458 - be->be_inval = NULL; 459 - return be; 460 - } 461 - 462 - static void print_elist(struct list_head *list) 463 - { 464 - struct pnfs_block_extent *be; 465 - dprintk("****************\n"); 466 - dprintk("Extent list looks like:\n"); 467 - list_for_each_entry(be, list, be_node) { 468 - print_bl_extent(be); 469 - } 470 - dprintk("****************\n"); 471 - } 472 - 473 - static inline int 474 - extents_consistent(struct pnfs_block_extent *old, struct pnfs_block_extent *new) 475 - { 476 - /* Note this assumes new->be_f_offset >= old->be_f_offset */ 477 - return (new->be_state == old->be_state) && 478 - ((new->be_state == PNFS_BLOCK_NONE_DATA) || 479 - ((new->be_v_offset - old->be_v_offset == 480 - new->be_f_offset - old->be_f_offset) && 481 - new->be_mdev == old->be_mdev)); 482 - } 483 - 484 - /* Adds new to appropriate list in bl, modifying new and removing existing 485 - * extents as appropriate to deal with overlaps. 486 - * 487 - * See bl_find_get_extent for list constraints. 488 - * 489 - * Refcount on new is already set. If end up not using it, or error out, 490 - * need to put the reference. 491 - * 492 - * bl->bl_ext_lock is held by caller. 493 - */ 494 - int 495 - bl_add_merge_extent(struct pnfs_block_layout *bl, 496 - struct pnfs_block_extent *new) 497 - { 498 - struct pnfs_block_extent *be, *tmp; 499 - sector_t end = new->be_f_offset + new->be_length; 500 - struct list_head *list; 501 - 502 - dprintk("%s enter with be=%p\n", __func__, new); 503 - print_bl_extent(new); 504 - list = &bl->bl_extents[bl_choose_list(new->be_state)]; 505 - print_elist(list); 506 - 507 - /* Scan for proper place to insert, extending new to the left 508 - * as much as possible. 509 - */ 510 - list_for_each_entry_safe_reverse(be, tmp, list, be_node) { 511 - if (new->be_f_offset >= be->be_f_offset + be->be_length) 512 - break; 513 - if (new->be_f_offset >= be->be_f_offset) { 514 - if (end <= be->be_f_offset + be->be_length) { 515 - /* new is a subset of existing be*/ 516 - if (extents_consistent(be, new)) { 517 - dprintk("%s: new is subset, ignoring\n", 518 - __func__); 519 - bl_put_extent(new); 520 - return 0; 521 - } else { 522 - goto out_err; 523 - } 524 - } else { 525 - /* |<-- be -->| 526 - * |<-- new -->| */ 527 - if (extents_consistent(be, new)) { 528 - /* extend new to fully replace be */ 529 - new->be_length += new->be_f_offset - 530 - be->be_f_offset; 531 - new->be_f_offset = be->be_f_offset; 532 - new->be_v_offset = be->be_v_offset; 533 - dprintk("%s: removing %p\n", __func__, be); 534 - list_del(&be->be_node); 535 - bl_put_extent(be); 536 - } else { 537 - goto out_err; 538 - } 539 - } 540 - } else if (end >= be->be_f_offset + be->be_length) { 541 - /* new extent overlap existing be */ 542 - if (extents_consistent(be, new)) { 543 - /* extend new to fully replace be */ 544 - dprintk("%s: removing %p\n", __func__, be); 545 - list_del(&be->be_node); 546 - bl_put_extent(be); 547 - } else { 548 - goto out_err; 549 - } 550 - } else if (end > be->be_f_offset) { 551 - /* |<-- be -->| 552 - *|<-- new -->| */ 553 - if (extents_consistent(new, be)) { 554 - /* extend new to fully replace be */ 555 - new->be_length += be->be_f_offset + be->be_length - 556 - new->be_f_offset - new->be_length; 557 - dprintk("%s: removing %p\n", __func__, be); 558 - list_del(&be->be_node); 559 - bl_put_extent(be); 560 - } else { 561 - goto out_err; 562 - } 563 - } 564 - } 565 - /* Note that if we never hit the above break, be will not point to a 566 - * valid extent. However, in that case &be->be_node==list. 567 - */ 568 - list_add(&new->be_node, &be->be_node); 569 - dprintk("%s: inserting new\n", __func__); 570 - print_elist(list); 571 - /* FIXME - The per-list consistency checks have all been done, 572 - * should now check cross-list consistency. 573 - */ 574 - return 0; 575 - 576 - out_err: 577 - bl_put_extent(new); 578 - return -EIO; 579 - } 580 - 581 - /* Returns extent, or NULL. If a second READ extent exists, it is returned 582 - * in cow_read, if given. 583 - * 584 - * The extents are kept in two seperate ordered lists, one for READ and NONE, 585 - * one for READWRITE and INVALID. Within each list, we assume: 586 - * 1. Extents are ordered by file offset. 587 - * 2. For any given isect, there is at most one extents that matches. 588 - */ 589 - struct pnfs_block_extent * 590 - bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect, 591 - struct pnfs_block_extent **cow_read) 592 - { 593 - struct pnfs_block_extent *be, *cow, *ret; 594 - int i; 595 - 596 - dprintk("%s enter with isect %llu\n", __func__, (u64)isect); 597 - cow = ret = NULL; 598 - spin_lock(&bl->bl_ext_lock); 599 - for (i = 0; i < EXTENT_LISTS; i++) { 600 - list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) { 601 - if (isect >= be->be_f_offset + be->be_length) 602 - break; 603 - if (isect >= be->be_f_offset) { 604 - /* We have found an extent */ 605 - dprintk("%s Get %p (%i)\n", __func__, be, 606 - atomic_read(&be->be_refcnt.refcount)); 607 - kref_get(&be->be_refcnt); 608 - if (!ret) 609 - ret = be; 610 - else if (be->be_state != PNFS_BLOCK_READ_DATA) 611 - bl_put_extent(be); 612 - else 613 - cow = be; 614 - break; 615 - } 616 - } 617 - if (ret && 618 - (!cow_read || ret->be_state != PNFS_BLOCK_INVALID_DATA)) 619 - break; 620 - } 621 - spin_unlock(&bl->bl_ext_lock); 622 - if (cow_read) 623 - *cow_read = cow; 624 - print_bl_extent(ret); 625 - return ret; 626 - } 627 - 628 - /* Similar to bl_find_get_extent, but called with lock held, and ignores cow */ 629 - static struct pnfs_block_extent * 630 - bl_find_get_extent_locked(struct pnfs_block_layout *bl, sector_t isect) 631 - { 632 - struct pnfs_block_extent *be, *ret = NULL; 633 - int i; 634 - 635 - dprintk("%s enter with isect %llu\n", __func__, (u64)isect); 636 - for (i = 0; i < EXTENT_LISTS; i++) { 637 - if (ret) 638 - break; 639 - list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) { 640 - if (isect >= be->be_f_offset + be->be_length) 641 - break; 642 - if (isect >= be->be_f_offset) { 643 - /* We have found an extent */ 644 - dprintk("%s Get %p (%i)\n", __func__, be, 645 - atomic_read(&be->be_refcnt.refcount)); 646 - kref_get(&be->be_refcnt); 647 - ret = be; 648 - break; 649 - } 650 - } 651 - } 652 - print_bl_extent(ret); 653 - return ret; 654 - } 655 - 656 - int 657 - encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, 658 - struct xdr_stream *xdr, 659 - const struct nfs4_layoutcommit_args *arg) 660 - { 661 - struct pnfs_block_short_extent *lce, *save; 662 - unsigned int count = 0; 663 - __be32 *p, *xdr_start; 664 - 665 - dprintk("%s enter\n", __func__); 666 - /* BUG - creation of bl_commit is buggy - need to wait for 667 - * entire block to be marked WRITTEN before it can be added. 668 - */ 669 - spin_lock(&bl->bl_ext_lock); 670 - /* Want to adjust for possible truncate */ 671 - /* We now want to adjust argument range */ 672 - 673 - /* XDR encode the ranges found */ 674 - xdr_start = xdr_reserve_space(xdr, 8); 675 - if (!xdr_start) 676 - goto out; 677 - list_for_each_entry_safe(lce, save, &bl->bl_commit, bse_node) { 678 - p = xdr_reserve_space(xdr, 7 * 4 + sizeof(lce->bse_devid.data)); 679 - if (!p) 680 - break; 681 - p = xdr_encode_opaque_fixed(p, lce->bse_devid.data, NFS4_DEVICEID4_SIZE); 682 - p = xdr_encode_hyper(p, lce->bse_f_offset << SECTOR_SHIFT); 683 - p = xdr_encode_hyper(p, lce->bse_length << SECTOR_SHIFT); 684 - p = xdr_encode_hyper(p, 0LL); 685 - *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA); 686 - list_move_tail(&lce->bse_node, &bl->bl_committing); 687 - bl->bl_count--; 688 - count++; 689 - } 690 - xdr_start[0] = cpu_to_be32((xdr->p - xdr_start - 1) * 4); 691 - xdr_start[1] = cpu_to_be32(count); 692 - out: 693 - spin_unlock(&bl->bl_ext_lock); 694 - dprintk("%s found %i ranges\n", __func__, count); 695 - return 0; 696 - } 697 - 698 - /* Helper function to set_to_rw that initialize a new extent */ 699 - static void 700 - _prep_new_extent(struct pnfs_block_extent *new, 701 - struct pnfs_block_extent *orig, 702 - sector_t offset, sector_t length, int state) 703 - { 704 - kref_init(&new->be_refcnt); 705 - /* don't need to INIT_LIST_HEAD(&new->be_node) */ 706 - memcpy(&new->be_devid, &orig->be_devid, sizeof(struct nfs4_deviceid)); 707 - new->be_mdev = orig->be_mdev; 708 - new->be_f_offset = offset; 709 - new->be_length = length; 710 - new->be_v_offset = orig->be_v_offset - orig->be_f_offset + offset; 711 - new->be_state = state; 712 - new->be_inval = orig->be_inval; 713 - } 714 - 715 - /* Tries to merge be with extent in front of it in list. 716 - * Frees storage if not used. 717 - */ 718 - static struct pnfs_block_extent * 719 - _front_merge(struct pnfs_block_extent *be, struct list_head *head, 720 - struct pnfs_block_extent *storage) 721 - { 722 - struct pnfs_block_extent *prev; 723 - 724 - if (!storage) 725 - goto no_merge; 726 - if (&be->be_node == head || be->be_node.prev == head) 727 - goto no_merge; 728 - prev = list_entry(be->be_node.prev, struct pnfs_block_extent, be_node); 729 - if ((prev->be_f_offset + prev->be_length != be->be_f_offset) || 730 - !extents_consistent(prev, be)) 731 - goto no_merge; 732 - _prep_new_extent(storage, prev, prev->be_f_offset, 733 - prev->be_length + be->be_length, prev->be_state); 734 - list_replace(&prev->be_node, &storage->be_node); 735 - bl_put_extent(prev); 736 - list_del(&be->be_node); 737 - bl_put_extent(be); 738 - return storage; 739 - 740 - no_merge: 741 - kfree(storage); 742 - return be; 743 - } 744 - 745 - static u64 746 - set_to_rw(struct pnfs_block_layout *bl, u64 offset, u64 length) 747 - { 748 - u64 rv = offset + length; 749 - struct pnfs_block_extent *be, *e1, *e2, *e3, *new, *old; 750 - struct pnfs_block_extent *children[3]; 751 - struct pnfs_block_extent *merge1 = NULL, *merge2 = NULL; 752 - int i = 0, j; 753 - 754 - dprintk("%s(%llu, %llu)\n", __func__, offset, length); 755 - /* Create storage for up to three new extents e1, e2, e3 */ 756 - e1 = kmalloc(sizeof(*e1), GFP_ATOMIC); 757 - e2 = kmalloc(sizeof(*e2), GFP_ATOMIC); 758 - e3 = kmalloc(sizeof(*e3), GFP_ATOMIC); 759 - /* BUG - we are ignoring any failure */ 760 - if (!e1 || !e2 || !e3) 761 - goto out_nosplit; 762 - 763 - spin_lock(&bl->bl_ext_lock); 764 - be = bl_find_get_extent_locked(bl, offset); 765 - rv = be->be_f_offset + be->be_length; 766 - if (be->be_state != PNFS_BLOCK_INVALID_DATA) { 767 - spin_unlock(&bl->bl_ext_lock); 768 - goto out_nosplit; 769 - } 770 - /* Add e* to children, bumping e*'s krefs */ 771 - if (be->be_f_offset != offset) { 772 - _prep_new_extent(e1, be, be->be_f_offset, 773 - offset - be->be_f_offset, 774 - PNFS_BLOCK_INVALID_DATA); 775 - children[i++] = e1; 776 - print_bl_extent(e1); 777 - } else 778 - merge1 = e1; 779 - _prep_new_extent(e2, be, offset, 780 - min(length, be->be_f_offset + be->be_length - offset), 781 - PNFS_BLOCK_READWRITE_DATA); 782 - children[i++] = e2; 783 - print_bl_extent(e2); 784 - if (offset + length < be->be_f_offset + be->be_length) { 785 - _prep_new_extent(e3, be, e2->be_f_offset + e2->be_length, 786 - be->be_f_offset + be->be_length - 787 - offset - length, 788 - PNFS_BLOCK_INVALID_DATA); 789 - children[i++] = e3; 790 - print_bl_extent(e3); 791 - } else 792 - merge2 = e3; 793 - 794 - /* Remove be from list, and insert the e* */ 795 - /* We don't get refs on e*, since this list is the base reference 796 - * set when init'ed. 797 - */ 798 - if (i < 3) 799 - children[i] = NULL; 800 - new = children[0]; 801 - list_replace(&be->be_node, &new->be_node); 802 - bl_put_extent(be); 803 - new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge1); 804 - for (j = 1; j < i; j++) { 805 - old = new; 806 - new = children[j]; 807 - list_add(&new->be_node, &old->be_node); 808 - } 809 - if (merge2) { 810 - /* This is a HACK, should just create a _back_merge function */ 811 - new = list_entry(new->be_node.next, 812 - struct pnfs_block_extent, be_node); 813 - new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge2); 814 - } 815 - spin_unlock(&bl->bl_ext_lock); 816 - 817 - /* Since we removed the base reference above, be is now scheduled for 818 - * destruction. 819 - */ 820 - bl_put_extent(be); 821 - dprintk("%s returns %llu after split\n", __func__, rv); 822 - return rv; 823 - 824 - out_nosplit: 825 - kfree(e1); 826 - kfree(e2); 827 - kfree(e3); 828 - dprintk("%s returns %llu without splitting\n", __func__, rv); 829 - return rv; 830 - } 831 - 832 - void 833 - clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl, 834 - const struct nfs4_layoutcommit_args *arg, 835 - int status) 836 - { 837 - struct pnfs_block_short_extent *lce, *save; 838 - 839 - dprintk("%s status %d\n", __func__, status); 840 - list_for_each_entry_safe(lce, save, &bl->bl_committing, bse_node) { 841 - if (likely(!status)) { 842 - u64 offset = lce->bse_f_offset; 843 - u64 end = offset + lce->bse_length; 844 - 845 - do { 846 - offset = set_to_rw(bl, offset, end - offset); 847 - } while (offset < end); 848 - list_del(&lce->bse_node); 849 - 850 - kfree(lce); 851 - } else { 852 - list_del(&lce->bse_node); 853 - spin_lock(&bl->bl_ext_lock); 854 - add_to_commitlist(bl, lce); 855 - spin_unlock(&bl->bl_ext_lock); 856 - } 857 - } 858 - } 859 - 860 - int bl_push_one_short_extent(struct pnfs_inval_markings *marks) 861 - { 862 - struct pnfs_block_short_extent *new; 863 - 864 - new = kmalloc(sizeof(*new), GFP_NOFS); 865 - if (unlikely(!new)) 866 - return -ENOMEM; 867 - 868 - spin_lock_bh(&marks->im_lock); 869 - list_add(&new->bse_node, &marks->im_extents); 870 - spin_unlock_bh(&marks->im_lock); 871 - 872 - return 0; 873 - } 874 - 875 - struct pnfs_block_short_extent * 876 - bl_pop_one_short_extent(struct pnfs_inval_markings *marks) 877 - { 878 - struct pnfs_block_short_extent *rv = NULL; 879 - 880 - spin_lock_bh(&marks->im_lock); 881 - if (!list_empty(&marks->im_extents)) { 882 - rv = list_entry((&marks->im_extents)->next, 883 - struct pnfs_block_short_extent, bse_node); 884 - list_del_init(&rv->bse_node); 885 - } 886 - spin_unlock_bh(&marks->im_lock); 887 - 888 - return rv; 889 - } 890 - 891 - void bl_free_short_extents(struct pnfs_inval_markings *marks, int num_to_free) 892 - { 893 - struct pnfs_block_short_extent *se = NULL, *tmp; 894 - 895 - if (num_to_free <= 0) 896 - return; 897 - 898 - spin_lock(&marks->im_lock); 899 - list_for_each_entry_safe(se, tmp, &marks->im_extents, bse_node) { 900 - list_del(&se->bse_node); 901 - kfree(se); 902 - if (--num_to_free == 0) 903 - break; 904 - } 905 - spin_unlock(&marks->im_lock); 906 - 907 - BUG_ON(num_to_free > 0); 908 - }
+285
fs/nfs/blocklayout/rpc_pipefs.c
··· 1 + /* 2 + * Copyright (c) 2006,2007 The Regents of the University of Michigan. 3 + * All rights reserved. 4 + * 5 + * Andy Adamson <andros@citi.umich.edu> 6 + * Fred Isaman <iisaman@umich.edu> 7 + * 8 + * permission is granted to use, copy, create derivative works and 9 + * redistribute this software and such derivative works for any purpose, 10 + * so long as the name of the university of michigan is not used in 11 + * any advertising or publicity pertaining to the use or distribution 12 + * of this software without specific, written prior authorization. if 13 + * the above copyright notice or any other identification of the 14 + * university of michigan is included in any copy of any portion of 15 + * this software, then the disclaimer below must also be included. 16 + * 17 + * this software is provided as is, without representation from the 18 + * university of michigan as to its fitness for any purpose, and without 19 + * warranty by the university of michigan of any kind, either express 20 + * or implied, including without limitation the implied warranties of 21 + * merchantability and fitness for a particular purpose. the regents 22 + * of the university of michigan shall not be liable for any damages, 23 + * including special, indirect, incidental, or consequential damages, 24 + * with respect to any claim arising out or in connection with the use 25 + * of the software, even if it has been or is hereafter advised of the 26 + * possibility of such damages. 27 + */ 28 + 29 + #include <linux/module.h> 30 + #include <linux/genhd.h> 31 + #include <linux/blkdev.h> 32 + 33 + #include "blocklayout.h" 34 + 35 + #define NFSDBG_FACILITY NFSDBG_PNFS_LD 36 + 37 + static void 38 + nfs4_encode_simple(__be32 *p, struct pnfs_block_volume *b) 39 + { 40 + int i; 41 + 42 + *p++ = cpu_to_be32(1); 43 + *p++ = cpu_to_be32(b->type); 44 + *p++ = cpu_to_be32(b->simple.nr_sigs); 45 + for (i = 0; i < b->simple.nr_sigs; i++) { 46 + p = xdr_encode_hyper(p, b->simple.sigs[i].offset); 47 + p = xdr_encode_opaque(p, b->simple.sigs[i].sig, 48 + b->simple.sigs[i].sig_len); 49 + } 50 + } 51 + 52 + dev_t 53 + bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b, 54 + gfp_t gfp_mask) 55 + { 56 + struct net *net = server->nfs_client->cl_net; 57 + struct nfs_net *nn = net_generic(net, nfs_net_id); 58 + struct bl_dev_msg *reply = &nn->bl_mount_reply; 59 + struct bl_pipe_msg bl_pipe_msg; 60 + struct rpc_pipe_msg *msg = &bl_pipe_msg.msg; 61 + struct bl_msg_hdr *bl_msg; 62 + DECLARE_WAITQUEUE(wq, current); 63 + dev_t dev = 0; 64 + int rc; 65 + 66 + dprintk("%s CREATING PIPEFS MESSAGE\n", __func__); 67 + 68 + bl_pipe_msg.bl_wq = &nn->bl_wq; 69 + 70 + b->simple.len += 4; /* single volume */ 71 + if (b->simple.len > PAGE_SIZE) 72 + return -EIO; 73 + 74 + memset(msg, 0, sizeof(*msg)); 75 + msg->len = sizeof(*bl_msg) + b->simple.len; 76 + msg->data = kzalloc(msg->len, gfp_mask); 77 + if (!msg->data) 78 + goto out; 79 + 80 + bl_msg = msg->data; 81 + bl_msg->type = BL_DEVICE_MOUNT, 82 + bl_msg->totallen = b->simple.len; 83 + nfs4_encode_simple(msg->data + sizeof(*bl_msg), b); 84 + 85 + dprintk("%s CALLING USERSPACE DAEMON\n", __func__); 86 + add_wait_queue(&nn->bl_wq, &wq); 87 + rc = rpc_queue_upcall(nn->bl_device_pipe, msg); 88 + if (rc < 0) { 89 + remove_wait_queue(&nn->bl_wq, &wq); 90 + goto out; 91 + } 92 + 93 + set_current_state(TASK_UNINTERRUPTIBLE); 94 + schedule(); 95 + __set_current_state(TASK_RUNNING); 96 + remove_wait_queue(&nn->bl_wq, &wq); 97 + 98 + if (reply->status != BL_DEVICE_REQUEST_PROC) { 99 + printk(KERN_WARNING "%s failed to decode device: %d\n", 100 + __func__, reply->status); 101 + goto out; 102 + } 103 + 104 + dev = MKDEV(reply->major, reply->minor); 105 + out: 106 + kfree(msg->data); 107 + return dev; 108 + } 109 + 110 + static ssize_t bl_pipe_downcall(struct file *filp, const char __user *src, 111 + size_t mlen) 112 + { 113 + struct nfs_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info, 114 + nfs_net_id); 115 + 116 + if (mlen != sizeof (struct bl_dev_msg)) 117 + return -EINVAL; 118 + 119 + if (copy_from_user(&nn->bl_mount_reply, src, mlen) != 0) 120 + return -EFAULT; 121 + 122 + wake_up(&nn->bl_wq); 123 + 124 + return mlen; 125 + } 126 + 127 + static void bl_pipe_destroy_msg(struct rpc_pipe_msg *msg) 128 + { 129 + struct bl_pipe_msg *bl_pipe_msg = 130 + container_of(msg, struct bl_pipe_msg, msg); 131 + 132 + if (msg->errno >= 0) 133 + return; 134 + wake_up(bl_pipe_msg->bl_wq); 135 + } 136 + 137 + static const struct rpc_pipe_ops bl_upcall_ops = { 138 + .upcall = rpc_pipe_generic_upcall, 139 + .downcall = bl_pipe_downcall, 140 + .destroy_msg = bl_pipe_destroy_msg, 141 + }; 142 + 143 + static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb, 144 + struct rpc_pipe *pipe) 145 + { 146 + struct dentry *dir, *dentry; 147 + 148 + dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME); 149 + if (dir == NULL) 150 + return ERR_PTR(-ENOENT); 151 + dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe); 152 + dput(dir); 153 + return dentry; 154 + } 155 + 156 + static void nfs4blocklayout_unregister_sb(struct super_block *sb, 157 + struct rpc_pipe *pipe) 158 + { 159 + if (pipe->dentry) 160 + rpc_unlink(pipe->dentry); 161 + } 162 + 163 + static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, 164 + void *ptr) 165 + { 166 + struct super_block *sb = ptr; 167 + struct net *net = sb->s_fs_info; 168 + struct nfs_net *nn = net_generic(net, nfs_net_id); 169 + struct dentry *dentry; 170 + int ret = 0; 171 + 172 + if (!try_module_get(THIS_MODULE)) 173 + return 0; 174 + 175 + if (nn->bl_device_pipe == NULL) { 176 + module_put(THIS_MODULE); 177 + return 0; 178 + } 179 + 180 + switch (event) { 181 + case RPC_PIPEFS_MOUNT: 182 + dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe); 183 + if (IS_ERR(dentry)) { 184 + ret = PTR_ERR(dentry); 185 + break; 186 + } 187 + nn->bl_device_pipe->dentry = dentry; 188 + break; 189 + case RPC_PIPEFS_UMOUNT: 190 + if (nn->bl_device_pipe->dentry) 191 + nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe); 192 + break; 193 + default: 194 + ret = -ENOTSUPP; 195 + break; 196 + } 197 + module_put(THIS_MODULE); 198 + return ret; 199 + } 200 + 201 + static struct notifier_block nfs4blocklayout_block = { 202 + .notifier_call = rpc_pipefs_event, 203 + }; 204 + 205 + static struct dentry *nfs4blocklayout_register_net(struct net *net, 206 + struct rpc_pipe *pipe) 207 + { 208 + struct super_block *pipefs_sb; 209 + struct dentry *dentry; 210 + 211 + pipefs_sb = rpc_get_sb_net(net); 212 + if (!pipefs_sb) 213 + return NULL; 214 + dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe); 215 + rpc_put_sb_net(net); 216 + return dentry; 217 + } 218 + 219 + static void nfs4blocklayout_unregister_net(struct net *net, 220 + struct rpc_pipe *pipe) 221 + { 222 + struct super_block *pipefs_sb; 223 + 224 + pipefs_sb = rpc_get_sb_net(net); 225 + if (pipefs_sb) { 226 + nfs4blocklayout_unregister_sb(pipefs_sb, pipe); 227 + rpc_put_sb_net(net); 228 + } 229 + } 230 + 231 + static int nfs4blocklayout_net_init(struct net *net) 232 + { 233 + struct nfs_net *nn = net_generic(net, nfs_net_id); 234 + struct dentry *dentry; 235 + 236 + init_waitqueue_head(&nn->bl_wq); 237 + nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); 238 + if (IS_ERR(nn->bl_device_pipe)) 239 + return PTR_ERR(nn->bl_device_pipe); 240 + dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe); 241 + if (IS_ERR(dentry)) { 242 + rpc_destroy_pipe_data(nn->bl_device_pipe); 243 + return PTR_ERR(dentry); 244 + } 245 + nn->bl_device_pipe->dentry = dentry; 246 + return 0; 247 + } 248 + 249 + static void nfs4blocklayout_net_exit(struct net *net) 250 + { 251 + struct nfs_net *nn = net_generic(net, nfs_net_id); 252 + 253 + nfs4blocklayout_unregister_net(net, nn->bl_device_pipe); 254 + rpc_destroy_pipe_data(nn->bl_device_pipe); 255 + nn->bl_device_pipe = NULL; 256 + } 257 + 258 + static struct pernet_operations nfs4blocklayout_net_ops = { 259 + .init = nfs4blocklayout_net_init, 260 + .exit = nfs4blocklayout_net_exit, 261 + }; 262 + 263 + int __init bl_init_pipefs(void) 264 + { 265 + int ret; 266 + 267 + ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block); 268 + if (ret) 269 + goto out; 270 + ret = register_pernet_subsys(&nfs4blocklayout_net_ops); 271 + if (ret) 272 + goto out_unregister_notifier; 273 + return 0; 274 + 275 + out_unregister_notifier: 276 + rpc_pipefs_notifier_unregister(&nfs4blocklayout_block); 277 + out: 278 + return ret; 279 + } 280 + 281 + void __exit bl_cleanup_pipefs(void) 282 + { 283 + rpc_pipefs_notifier_unregister(&nfs4blocklayout_block); 284 + unregister_pernet_subsys(&nfs4blocklayout_net_ops); 285 + }
+16 -7
fs/nfs/callback_proc.c
··· 171 171 goto out; 172 172 173 173 ino = lo->plh_inode; 174 + 175 + spin_lock(&ino->i_lock); 176 + pnfs_set_layout_stateid(lo, &args->cbl_stateid, true); 177 + spin_unlock(&ino->i_lock); 178 + 179 + pnfs_layoutcommit_inode(ino, false); 180 + 174 181 spin_lock(&ino->i_lock); 175 182 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || 176 183 pnfs_mark_matching_lsegs_invalid(lo, &free_me_list, 177 - &args->cbl_range)) 184 + &args->cbl_range)) { 178 185 rv = NFS4ERR_DELAY; 179 - else 180 - rv = NFS4ERR_NOMATCHING_LAYOUT; 181 - pnfs_set_layout_stateid(lo, &args->cbl_stateid, true); 186 + goto unlock; 187 + } 188 + 189 + if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { 190 + NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, 191 + &args->cbl_range); 192 + } 193 + unlock: 182 194 spin_unlock(&ino->i_lock); 183 195 pnfs_free_lseg_list(&free_me_list); 184 196 pnfs_put_layout_hdr(lo); ··· 289 277 } 290 278 291 279 found: 292 - if (dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE) 293 - dprintk("%s: NOTIFY_DEVICEID4_CHANGE not supported, " 294 - "deleting instead\n", __func__); 295 280 nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id); 296 281 } 297 282
+5 -1
fs/nfs/client.c
··· 1252 1252 * set up the iterator to start reading from the server list and return the first item 1253 1253 */ 1254 1254 static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos) 1255 + __acquires(&nn->nfs_client_lock) 1255 1256 { 1256 1257 struct nfs_net *nn = net_generic(seq_file_net(m), nfs_net_id); 1257 1258 ··· 1275 1274 * clean up after reading from the transports list 1276 1275 */ 1277 1276 static void nfs_server_list_stop(struct seq_file *p, void *v) 1277 + __releases(&nn->nfs_client_lock) 1278 1278 { 1279 1279 struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id); 1280 1280 ··· 1320 1318 */ 1321 1319 static int nfs_volume_list_open(struct inode *inode, struct file *file) 1322 1320 { 1323 - return seq_open_net(inode, file, &nfs_server_list_ops, 1321 + return seq_open_net(inode, file, &nfs_volume_list_ops, 1324 1322 sizeof(struct seq_net_private)); 1325 1323 } 1326 1324 ··· 1328 1326 * set up the iterator to start reading from the volume list and return the first item 1329 1327 */ 1330 1328 static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos) 1329 + __acquires(&nn->nfs_client_lock) 1331 1330 { 1332 1331 struct nfs_net *nn = net_generic(seq_file_net(m), nfs_net_id); 1333 1332 ··· 1351 1348 * clean up after reading from the transports list 1352 1349 */ 1353 1350 static void nfs_volume_list_stop(struct seq_file *p, void *v) 1351 + __releases(&nn->nfs_client_lock) 1354 1352 { 1355 1353 struct nfs_net *nn = net_generic(seq_file_net(p), nfs_net_id); 1356 1354
-14
fs/nfs/direct.c
··· 178 178 return memcmp(verfp, &hdr->verf, sizeof(struct nfs_writeverf)); 179 179 } 180 180 181 - #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 182 181 /* 183 182 * nfs_direct_cmp_commit_data_verf - compare verifier for commit data 184 183 * @dreq - direct request possibly spanning multiple servers ··· 196 197 WARN_ON_ONCE(verfp->committed < 0); 197 198 return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf)); 198 199 } 199 - #endif 200 200 201 201 /** 202 202 * nfs_direct_IO - NFS address space operation for direct I/O ··· 574 576 return result; 575 577 } 576 578 577 - #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 578 579 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 579 580 { 580 581 struct nfs_pageio_descriptor desc; ··· 696 699 { 697 700 schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */ 698 701 } 699 - 700 - #else 701 - static void nfs_direct_write_schedule_work(struct work_struct *work) 702 - { 703 - } 704 - 705 - static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 706 - { 707 - nfs_direct_complete(dreq, true); 708 - } 709 - #endif 710 702 711 703 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) 712 704 {
+40 -12
fs/nfs/file.c
··· 36 36 #include "internal.h" 37 37 #include "iostat.h" 38 38 #include "fscache.h" 39 + #include "pnfs.h" 39 40 40 41 #include "nfstrace.h" 41 42 ··· 328 327 unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); 329 328 unsigned int end = offset + len; 330 329 330 + if (pnfs_ld_read_whole_page(file->f_mapping->host)) { 331 + if (!PageUptodate(page)) 332 + return 1; 333 + return 0; 334 + } 335 + 331 336 if ((file->f_mode & FMODE_READ) && /* open for read? */ 332 337 !PageUptodate(page) && /* Uptodate? */ 333 338 !PagePrivate(page) && /* i/o request already? */ ··· 475 468 476 469 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); 477 470 478 - /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not 479 - * doing this memory reclaim for a fs-related allocation. 471 + /* Always try to initiate a 'commit' if relevant, but only 472 + * wait for it if __GFP_WAIT is set. Even then, only wait 1 473 + * second and only if the 'bdi' is not congested. 474 + * Waiting indefinitely can cause deadlocks when the NFS 475 + * server is on this machine, when a new TCP connection is 476 + * needed and in other rare cases. There is no particular 477 + * need to wait extensively here. A short wait has the 478 + * benefit that someone else can worry about the freezer. 480 479 */ 481 - if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL && 482 - !(current->flags & PF_FSTRANS)) { 483 - int how = FLUSH_SYNC; 484 - 485 - /* Don't let kswapd deadlock waiting for OOM RPC calls */ 486 - if (current_is_kswapd()) 487 - how = 0; 488 - nfs_commit_inode(mapping->host, how); 480 + if (mapping) { 481 + struct nfs_server *nfss = NFS_SERVER(mapping->host); 482 + nfs_commit_inode(mapping->host, 0); 483 + if ((gfp & __GFP_WAIT) && 484 + !bdi_write_congested(&nfss->backing_dev_info)) { 485 + wait_on_page_bit_killable_timeout(page, PG_private, 486 + HZ); 487 + if (PagePrivate(page)) 488 + set_bdi_congested(&nfss->backing_dev_info, 489 + BLK_RW_ASYNC); 490 + } 489 491 } 490 492 /* If PagePrivate() is set, then the page is not freeable */ 491 493 if (PagePrivate(page)) ··· 555 539 static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, 556 540 sector_t *span) 557 541 { 542 + int ret; 543 + struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); 544 + 558 545 *span = sis->pages; 559 - return xs_swapper(NFS_CLIENT(file->f_mapping->host)->cl_xprt, 1); 546 + 547 + rcu_read_lock(); 548 + ret = xs_swapper(rcu_dereference(clnt->cl_xprt), 1); 549 + rcu_read_unlock(); 550 + 551 + return ret; 560 552 } 561 553 562 554 static void nfs_swap_deactivate(struct file *file) 563 555 { 564 - xs_swapper(NFS_CLIENT(file->f_mapping->host)->cl_xprt, 0); 556 + struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); 557 + 558 + rcu_read_lock(); 559 + xs_swapper(rcu_dereference(clnt->cl_xprt), 0); 560 + rcu_read_unlock(); 565 561 } 566 562 #endif 567 563
+23 -11
fs/nfs/filelayout/filelayout.c
··· 265 265 { 266 266 267 267 if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds || 268 - hdr->res.verf->committed == NFS_FILE_SYNC) 268 + hdr->res.verf->committed != NFS_DATA_SYNC) 269 269 return; 270 270 271 271 pnfs_set_layoutcommit(hdr); ··· 402 402 rpc_restart_call_prepare(task); 403 403 return -EAGAIN; 404 404 } 405 + 406 + if (data->verf.committed == NFS_UNSTABLE) 407 + pnfs_commit_set_layoutcommit(data); 405 408 406 409 return 0; 407 410 } ··· 649 646 } 650 647 651 648 /* find and reference the deviceid */ 652 - d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode)->pnfs_curr_ld, 653 - NFS_SERVER(lo->plh_inode)->nfs_client, id); 654 - if (d == NULL) { 655 - dsaddr = filelayout_get_device_info(lo->plh_inode, id, 656 - lo->plh_lc_cred, gfp_flags); 657 - if (dsaddr == NULL) 658 - goto out; 659 - } else 660 - dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node); 649 + d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), id, 650 + lo->plh_lc_cred, gfp_flags); 651 + if (d == NULL) 652 + goto out; 653 + 654 + dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node); 661 655 /* Found deviceid is unavailable */ 662 656 if (filelayout_test_devid_unavailable(&dsaddr->id_node)) 663 - goto out_put; 657 + goto out_put; 664 658 665 659 fl->dsaddr = dsaddr; 666 660 ··· 1368 1368 cinfo->ds->ncommitting = 0; 1369 1369 return PNFS_ATTEMPTED; 1370 1370 } 1371 + static struct nfs4_deviceid_node * 1372 + filelayout_alloc_deviceid_node(struct nfs_server *server, 1373 + struct pnfs_device *pdev, gfp_t gfp_flags) 1374 + { 1375 + struct nfs4_file_layout_dsaddr *dsaddr; 1376 + 1377 + dsaddr = nfs4_fl_alloc_deviceid_node(server, pdev, gfp_flags); 1378 + if (!dsaddr) 1379 + return NULL; 1380 + return &dsaddr->id_node; 1381 + } 1371 1382 1372 1383 static void 1373 1384 filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d) ··· 1431 1420 .commit_pagelist = filelayout_commit_pagelist, 1432 1421 .read_pagelist = filelayout_read_pagelist, 1433 1422 .write_pagelist = filelayout_write_pagelist, 1423 + .alloc_deviceid_node = filelayout_alloc_deviceid_node, 1434 1424 .free_deviceid_node = filelayout_free_deveiceid_node, 1435 1425 }; 1436 1426
+4 -3
fs/nfs/filelayout/filelayout.h
··· 147 147 u32 nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j); 148 148 struct nfs4_pnfs_ds *nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, 149 149 u32 ds_idx); 150 + 151 + extern struct nfs4_file_layout_dsaddr * 152 + nfs4_fl_alloc_deviceid_node(struct nfs_server *server, 153 + struct pnfs_device *pdev, gfp_t gfp_flags); 150 154 extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); 151 155 extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); 152 - struct nfs4_file_layout_dsaddr * 153 - filelayout_get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, 154 - struct rpc_cred *cred, gfp_t gfp_flags); 155 156 156 157 #endif /* FS_NFS_NFS4FILELAYOUT_H */
+5 -103
fs/nfs/filelayout/filelayoutdev.c
··· 484 484 } 485 485 486 486 /* Decode opaque device data and return the result */ 487 - static struct nfs4_file_layout_dsaddr* 488 - decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) 487 + struct nfs4_file_layout_dsaddr * 488 + nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, 489 + gfp_t gfp_flags) 489 490 { 490 491 int i; 491 492 u32 cnt, num; ··· 571 570 dsaddr->stripe_indices = stripe_indices; 572 571 stripe_indices = NULL; 573 572 dsaddr->ds_num = num; 574 - nfs4_init_deviceid_node(&dsaddr->id_node, 575 - NFS_SERVER(ino)->pnfs_curr_ld, 576 - NFS_SERVER(ino)->nfs_client, 577 - &pdev->dev_id); 573 + nfs4_init_deviceid_node(&dsaddr->id_node, server, &pdev->dev_id); 578 574 579 575 INIT_LIST_HEAD(&dsaddrs); 580 576 ··· 585 587 586 588 mp_count = be32_to_cpup(p); /* multipath count */ 587 589 for (j = 0; j < mp_count; j++) { 588 - da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->cl_net, 590 + da = decode_ds_addr(server->nfs_client->cl_net, 589 591 &stream, gfp_flags); 590 592 if (da) 591 593 list_add_tail(&da->da_node, &dsaddrs); ··· 633 635 out_err: 634 636 dprintk("%s ERROR: returning NULL\n", __func__); 635 637 return NULL; 636 - } 637 - 638 - /* 639 - * Decode the opaque device specified in 'dev' and add it to the cache of 640 - * available devices. 641 - */ 642 - static struct nfs4_file_layout_dsaddr * 643 - decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) 644 - { 645 - struct nfs4_deviceid_node *d; 646 - struct nfs4_file_layout_dsaddr *n, *new; 647 - 648 - new = decode_device(inode, dev, gfp_flags); 649 - if (!new) { 650 - printk(KERN_WARNING "NFS: %s: Could not decode or add device\n", 651 - __func__); 652 - return NULL; 653 - } 654 - 655 - d = nfs4_insert_deviceid_node(&new->id_node); 656 - n = container_of(d, struct nfs4_file_layout_dsaddr, id_node); 657 - if (n != new) { 658 - nfs4_fl_free_deviceid(new); 659 - return n; 660 - } 661 - 662 - return new; 663 - } 664 - 665 - /* 666 - * Retrieve the information for dev_id, add it to the list 667 - * of available devices, and return it. 668 - */ 669 - struct nfs4_file_layout_dsaddr * 670 - filelayout_get_device_info(struct inode *inode, 671 - struct nfs4_deviceid *dev_id, 672 - struct rpc_cred *cred, 673 - gfp_t gfp_flags) 674 - { 675 - struct pnfs_device *pdev = NULL; 676 - u32 max_resp_sz; 677 - int max_pages; 678 - struct page **pages = NULL; 679 - struct nfs4_file_layout_dsaddr *dsaddr = NULL; 680 - int rc, i; 681 - struct nfs_server *server = NFS_SERVER(inode); 682 - 683 - /* 684 - * Use the session max response size as the basis for setting 685 - * GETDEVICEINFO's maxcount 686 - */ 687 - max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 688 - max_pages = nfs_page_array_len(0, max_resp_sz); 689 - dprintk("%s inode %p max_resp_sz %u max_pages %d\n", 690 - __func__, inode, max_resp_sz, max_pages); 691 - 692 - pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); 693 - if (pdev == NULL) 694 - return NULL; 695 - 696 - pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); 697 - if (pages == NULL) { 698 - kfree(pdev); 699 - return NULL; 700 - } 701 - for (i = 0; i < max_pages; i++) { 702 - pages[i] = alloc_page(gfp_flags); 703 - if (!pages[i]) 704 - goto out_free; 705 - } 706 - 707 - memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id)); 708 - pdev->layout_type = LAYOUT_NFSV4_1_FILES; 709 - pdev->pages = pages; 710 - pdev->pgbase = 0; 711 - pdev->pglen = max_resp_sz; 712 - pdev->mincount = 0; 713 - pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead; 714 - 715 - rc = nfs4_proc_getdeviceinfo(server, pdev, cred); 716 - dprintk("%s getdevice info returns %d\n", __func__, rc); 717 - if (rc) 718 - goto out_free; 719 - 720 - /* 721 - * Found new device, need to decode it and then add it to the 722 - * list of known devices for this mountpoint. 723 - */ 724 - dsaddr = decode_and_add_device(inode, pdev, gfp_flags); 725 - out_free: 726 - for (i = 0; i < max_pages; i++) 727 - __free_page(pages[i]); 728 - kfree(pages); 729 - kfree(pdev); 730 - dprintk("<-- %s dsaddr %p\n", __func__, dsaddr); 731 - return dsaddr; 732 638 } 733 639 734 640 void
+1 -2
fs/nfs/fscache-index.c
··· 74 74 struct nfs_server_key *key = buffer; 75 75 uint16_t len = sizeof(struct nfs_server_key); 76 76 77 + memset(key, 0, len); 77 78 key->nfsversion = clp->rpc_ops->version; 78 79 key->family = clp->cl_addr.ss_family; 79 - 80 - memset(key, 0, len); 81 80 82 81 switch (clp->cl_addr.ss_family) { 83 82 case AF_INET:
+3 -1
fs/nfs/inode.c
··· 505 505 attr->ia_valid &= ~ATTR_MODE; 506 506 507 507 if (attr->ia_valid & ATTR_SIZE) { 508 - if (!S_ISREG(inode->i_mode) || attr->ia_size == i_size_read(inode)) 508 + BUG_ON(!S_ISREG(inode->i_mode)); 509 + 510 + if (attr->ia_size == i_size_read(inode)) 509 511 attr->ia_valid &= ~ATTR_SIZE; 510 512 } 511 513
-7
fs/nfs/internal.h
··· 218 218 int nfs_sockaddr_match_ipaddr(const struct sockaddr *, const struct sockaddr *); 219 219 #endif 220 220 221 - /* nfs3client.c */ 222 - #if IS_ENABLED(CONFIG_NFS_V3) 223 - struct nfs_server *nfs3_create_server(struct nfs_mount_info *, struct nfs_subversion *); 224 - struct nfs_server *nfs3_clone_server(struct nfs_server *, struct nfs_fh *, 225 - struct nfs_fattr *, rpc_authflavor_t); 226 - #endif 227 - 228 221 /* callback_xdr.c */ 229 222 extern struct svc_version nfs4_callback_version1; 230 223 extern struct svc_version nfs4_callback_version4;
+34
fs/nfs/nfs3_fs.h
··· 1 + /* 2 + * Copyright (C) 2014 Anna Schumaker. 3 + * 4 + * NFSv3-specific filesystem definitions and declarations 5 + */ 6 + #ifndef __LINUX_FS_NFS_NFS3_FS_H 7 + #define __LINUX_FS_NFS_NFS3_FS_H 8 + 9 + /* 10 + * nfs3acl.c 11 + */ 12 + #ifdef CONFIG_NFS_V3_ACL 13 + extern struct posix_acl *nfs3_get_acl(struct inode *inode, int type); 14 + extern int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type); 15 + extern int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, 16 + struct posix_acl *dfacl); 17 + extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t); 18 + extern const struct xattr_handler *nfs3_xattr_handlers[]; 19 + #else 20 + static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, 21 + struct posix_acl *dfacl) 22 + { 23 + return 0; 24 + } 25 + #define nfs3_listxattr NULL 26 + #endif /* CONFIG_NFS_V3_ACL */ 27 + 28 + /* nfs3client.c */ 29 + struct nfs_server *nfs3_create_server(struct nfs_mount_info *, struct nfs_subversion *); 30 + struct nfs_server *nfs3_clone_server(struct nfs_server *, struct nfs_fh *, 31 + struct nfs_fattr *, rpc_authflavor_t); 32 + 33 + 34 + #endif /* __LINUX_FS_NFS_NFS3_FS_H */
+1
fs/nfs/nfs3acl.c
··· 7 7 #include <linux/nfsacl.h> 8 8 9 9 #include "internal.h" 10 + #include "nfs3_fs.h" 10 11 11 12 #define NFSDBG_FACILITY NFSDBG_PROC 12 13
+1
fs/nfs/nfs3client.c
··· 1 1 #include <linux/nfs_fs.h> 2 2 #include <linux/nfs_mount.h> 3 3 #include "internal.h" 4 + #include "nfs3_fs.h" 4 5 5 6 #ifdef CONFIG_NFS_V3_ACL 6 7 static struct rpc_stat nfsacl_rpcstat = { &nfsacl_program };
+1
fs/nfs/nfs3proc.c
··· 22 22 23 23 #include "iostat.h" 24 24 #include "internal.h" 25 + #include "nfs3_fs.h" 25 26 26 27 #define NFSDBG_FACILITY NFSDBG_PROC 27 28
+1
fs/nfs/nfs3super.c
··· 4 4 #include <linux/module.h> 5 5 #include <linux/nfs_fs.h> 6 6 #include "internal.h" 7 + #include "nfs3_fs.h" 7 8 #include "nfs.h" 8 9 9 10 static struct nfs_subversion nfs_v3 = {
+57 -81
fs/nfs/nfs4proc.c
··· 77 77 static int _nfs4_proc_open(struct nfs4_opendata *data); 78 78 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 79 79 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 80 - static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); 80 + static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *); 81 81 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 82 82 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label); 83 83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label); ··· 314 314 kunmap_atomic(start); 315 315 } 316 316 317 + static long nfs4_update_delay(long *timeout) 318 + { 319 + long ret; 320 + if (!timeout) 321 + return NFS4_POLL_RETRY_MAX; 322 + if (*timeout <= 0) 323 + *timeout = NFS4_POLL_RETRY_MIN; 324 + if (*timeout > NFS4_POLL_RETRY_MAX) 325 + *timeout = NFS4_POLL_RETRY_MAX; 326 + ret = *timeout; 327 + *timeout <<= 1; 328 + return ret; 329 + } 330 + 317 331 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 318 332 { 319 333 int res = 0; 320 334 321 335 might_sleep(); 322 336 323 - if (*timeout <= 0) 324 - *timeout = NFS4_POLL_RETRY_MIN; 325 - if (*timeout > NFS4_POLL_RETRY_MAX) 326 - *timeout = NFS4_POLL_RETRY_MAX; 327 - freezable_schedule_timeout_killable_unsafe(*timeout); 337 + freezable_schedule_timeout_killable_unsafe( 338 + nfs4_update_delay(timeout)); 328 339 if (fatal_signal_pending(current)) 329 340 res = -ERESTARTSYS; 330 - *timeout <<= 1; 331 341 return res; 332 342 } 333 343 ··· 1317 1307 int ret = -EAGAIN; 1318 1308 1319 1309 for (;;) { 1310 + spin_lock(&state->owner->so_lock); 1320 1311 if (can_open_cached(state, fmode, open_mode)) { 1321 - spin_lock(&state->owner->so_lock); 1322 - if (can_open_cached(state, fmode, open_mode)) { 1323 - update_open_stateflags(state, fmode); 1324 - spin_unlock(&state->owner->so_lock); 1325 - goto out_return_state; 1326 - } 1312 + update_open_stateflags(state, fmode); 1327 1313 spin_unlock(&state->owner->so_lock); 1314 + goto out_return_state; 1328 1315 } 1316 + spin_unlock(&state->owner->so_lock); 1329 1317 rcu_read_lock(); 1330 1318 delegation = rcu_dereference(nfsi->delegation); 1331 1319 if (!can_open_delegated(delegation, fmode)) { ··· 2597 2589 if (calldata->arg.fmode == 0) 2598 2590 break; 2599 2591 default: 2600 - if (nfs4_async_handle_error(task, server, state) == -EAGAIN) { 2592 + if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { 2601 2593 rpc_restart_call_prepare(task); 2602 2594 goto out_release; 2603 2595 } ··· 3225 3217 struct nfs4_label *label = NULL; 3226 3218 int status; 3227 3219 3228 - if (pnfs_ld_layoutret_on_setattr(inode)) 3220 + if (pnfs_ld_layoutret_on_setattr(inode) && 3221 + sattr->ia_valid & ATTR_SIZE && 3222 + sattr->ia_size < i_size_read(inode)) 3229 3223 pnfs_commit_and_return_layout(inode); 3230 3224 3231 3225 nfs_fattr_init(fattr); ··· 3586 3576 3587 3577 if (!nfs4_sequence_done(task, &res->seq_res)) 3588 3578 return 0; 3589 - if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 3579 + if (nfs4_async_handle_error(task, res->server, NULL, 3580 + &data->timeout) == -EAGAIN) 3590 3581 return 0; 3591 3582 update_changeattr(dir, &res->cinfo); 3592 3583 return 1; ··· 3620 3609 3621 3610 if (!nfs4_sequence_done(task, &res->seq_res)) 3622 3611 return 0; 3623 - if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 3612 + if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 3624 3613 return 0; 3625 3614 3626 3615 update_changeattr(old_dir, &res->old_cinfo); ··· 4124 4113 4125 4114 trace_nfs4_read(hdr, task->tk_status); 4126 4115 if (nfs4_async_handle_error(task, server, 4127 - hdr->args.context->state) == -EAGAIN) { 4116 + hdr->args.context->state, 4117 + NULL) == -EAGAIN) { 4128 4118 rpc_restart_call_prepare(task); 4129 4119 return -EAGAIN; 4130 4120 } ··· 4193 4181 struct nfs_pgio_header *hdr) 4194 4182 { 4195 4183 struct inode *inode = hdr->inode; 4196 - 4184 + 4197 4185 trace_nfs4_write(hdr, task->tk_status); 4198 4186 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4199 - hdr->args.context->state) == -EAGAIN) { 4187 + hdr->args.context->state, 4188 + NULL) == -EAGAIN) { 4200 4189 rpc_restart_call_prepare(task); 4201 4190 return -EAGAIN; 4202 4191 } ··· 4277 4264 struct inode *inode = data->inode; 4278 4265 4279 4266 trace_nfs4_commit(data, task->tk_status); 4280 - if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { 4267 + if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4268 + NULL, NULL) == -EAGAIN) { 4281 4269 rpc_restart_call_prepare(task); 4282 4270 return -EAGAIN; 4283 4271 } ··· 4831 4817 4832 4818 4833 4819 static int 4834 - nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) 4820 + nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, 4821 + struct nfs4_state *state, long *timeout) 4835 4822 { 4836 4823 struct nfs_client *clp = server->nfs_client; 4837 4824 ··· 4882 4867 #endif /* CONFIG_NFS_V4_1 */ 4883 4868 case -NFS4ERR_DELAY: 4884 4869 nfs_inc_server_stats(server, NFSIOS_DELAY); 4870 + rpc_delay(task, nfs4_update_delay(timeout)); 4871 + goto restart_call; 4885 4872 case -NFS4ERR_GRACE: 4886 4873 rpc_delay(task, NFS4_POLL_RETRY_MAX); 4887 4874 case -NFS4ERR_RETRY_UNCACHED_REP: ··· 5124 5107 pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5125 5108 break; 5126 5109 default: 5127 - if (nfs4_async_handle_error(task, data->res.server, NULL) == 5128 - -EAGAIN) { 5110 + if (nfs4_async_handle_error(task, data->res.server, 5111 + NULL, NULL) == -EAGAIN) { 5129 5112 rpc_restart_call_prepare(task); 5130 5113 return; 5131 5114 } ··· 5389 5372 case -NFS4ERR_EXPIRED: 5390 5373 break; 5391 5374 default: 5392 - if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) 5375 + if (nfs4_async_handle_error(task, calldata->server, 5376 + NULL, NULL) == -EAGAIN) 5393 5377 rpc_restart_call_prepare(task); 5394 5378 } 5395 5379 nfs_release_seqid(calldata->arg.seqid); ··· 5996 5978 break; 5997 5979 case -NFS4ERR_LEASE_MOVED: 5998 5980 case -NFS4ERR_DELAY: 5999 - if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) 5981 + if (nfs4_async_handle_error(task, server, 5982 + NULL, NULL) == -EAGAIN) 6000 5983 rpc_restart_call_prepare(task); 6001 5984 } 6002 5985 } ··· 7372 7353 int ret = 0; 7373 7354 7374 7355 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 7375 - return 0; 7356 + return -EAGAIN; 7376 7357 task = _nfs41_proc_sequence(clp, cred, false); 7377 7358 if (IS_ERR(task)) 7378 7359 ret = PTR_ERR(task); ··· 7602 7583 } else { 7603 7584 LIST_HEAD(head); 7604 7585 7586 + /* 7587 + * Mark the bad layout state as invalid, then retry 7588 + * with the current stateid. 7589 + */ 7605 7590 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 7606 7591 spin_unlock(&inode->i_lock); 7607 - /* Mark the bad layout state as invalid, then 7608 - * retry using the open stateid. */ 7609 7592 pnfs_free_lseg_list(&head); 7593 + 7594 + task->tk_status = 0; 7595 + rpc_restart_call_prepare(task); 7610 7596 } 7611 7597 } 7612 - if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 7598 + if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) 7613 7599 rpc_restart_call_prepare(task); 7614 7600 out: 7615 7601 dprintk("<-- %s\n", __func__); ··· 7774 7750 case 0: 7775 7751 break; 7776 7752 case -NFS4ERR_DELAY: 7777 - if (nfs4_async_handle_error(task, server, NULL) != -EAGAIN) 7753 + if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 7778 7754 break; 7779 7755 rpc_restart_call_prepare(task); 7780 7756 return; ··· 7832 7808 rpc_put_task(task); 7833 7809 return status; 7834 7810 } 7835 - 7836 - /* 7837 - * Retrieve the list of Data Server devices from the MDS. 7838 - */ 7839 - static int _nfs4_getdevicelist(struct nfs_server *server, 7840 - const struct nfs_fh *fh, 7841 - struct pnfs_devicelist *devlist) 7842 - { 7843 - struct nfs4_getdevicelist_args args = { 7844 - .fh = fh, 7845 - .layoutclass = server->pnfs_curr_ld->id, 7846 - }; 7847 - struct nfs4_getdevicelist_res res = { 7848 - .devlist = devlist, 7849 - }; 7850 - struct rpc_message msg = { 7851 - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], 7852 - .rpc_argp = &args, 7853 - .rpc_resp = &res, 7854 - }; 7855 - int status; 7856 - 7857 - dprintk("--> %s\n", __func__); 7858 - status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 7859 - &res.seq_res, 0); 7860 - dprintk("<-- %s status=%d\n", __func__, status); 7861 - return status; 7862 - } 7863 - 7864 - int nfs4_proc_getdevicelist(struct nfs_server *server, 7865 - const struct nfs_fh *fh, 7866 - struct pnfs_devicelist *devlist) 7867 - { 7868 - struct nfs4_exception exception = { }; 7869 - int err; 7870 - 7871 - do { 7872 - err = nfs4_handle_exception(server, 7873 - _nfs4_getdevicelist(server, fh, devlist), 7874 - &exception); 7875 - } while (exception.retry); 7876 - 7877 - dprintk("%s: err=%d, num_devs=%u\n", __func__, 7878 - err, devlist->num_devs); 7879 - 7880 - return err; 7881 - } 7882 - EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); 7883 7811 7884 7812 static int 7885 7813 _nfs4_proc_getdeviceinfo(struct nfs_server *server, ··· 7905 7929 case 0: 7906 7930 break; 7907 7931 default: 7908 - if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 7932 + if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 7909 7933 rpc_restart_call_prepare(task); 7910 7934 return; 7911 7935 } ··· 8201 8225 8202 8226 switch (task->tk_status) { 8203 8227 case -NFS4ERR_DELAY: 8204 - if (nfs4_async_handle_error(task, data->server, NULL) == -EAGAIN) 8228 + if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 8205 8229 rpc_restart_call_prepare(task); 8206 8230 } 8207 8231 }
+10 -2
fs/nfs/nfs4renewd.c
··· 88 88 } 89 89 nfs_expire_all_delegations(clp); 90 90 } else { 91 + int ret; 92 + 91 93 /* Queue an asynchronous RENEW. */ 92 - ops->sched_state_renewal(clp, cred, renew_flags); 94 + ret = ops->sched_state_renewal(clp, cred, renew_flags); 93 95 put_rpccred(cred); 94 - goto out_exp; 96 + switch (ret) { 97 + default: 98 + goto out_exp; 99 + case -EAGAIN: 100 + case -ENOMEM: 101 + break; 102 + } 95 103 } 96 104 } else { 97 105 dprintk("%s: failed to call renewd. Reason: lease not expired \n",
+7 -11
fs/nfs/nfs4state.c
··· 1705 1705 if (status < 0) { 1706 1706 set_bit(ops->owner_flag_bit, &sp->so_flags); 1707 1707 nfs4_put_state_owner(sp); 1708 - return nfs4_recovery_handle_error(clp, status); 1708 + status = nfs4_recovery_handle_error(clp, status); 1709 + return (status != 0) ? status : -EAGAIN; 1709 1710 } 1710 1711 1711 1712 nfs4_put_state_owner(sp); ··· 1715 1714 spin_unlock(&clp->cl_lock); 1716 1715 } 1717 1716 rcu_read_unlock(); 1718 - return status; 1717 + return 0; 1719 1718 } 1720 1719 1721 1720 static int nfs4_check_lease(struct nfs_client *clp) ··· 1762 1761 break; 1763 1762 case -NFS4ERR_STALE_CLIENTID: 1764 1763 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 1765 - nfs4_state_clear_reclaim_reboot(clp); 1766 1764 nfs4_state_start_reclaim_reboot(clp); 1767 1765 break; 1768 1766 case -NFS4ERR_CLID_INUSE: ··· 2345 2345 status = nfs4_check_lease(clp); 2346 2346 if (status < 0) 2347 2347 goto out_error; 2348 + continue; 2348 2349 } 2349 2350 2350 2351 if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) { ··· 2367 2366 section = "reclaim reboot"; 2368 2367 status = nfs4_do_reclaim(clp, 2369 2368 clp->cl_mvops->reboot_recovery_ops); 2370 - if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || 2371 - test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) 2372 - continue; 2373 - nfs4_state_end_reclaim_reboot(clp); 2374 - if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) 2369 + if (status == -EAGAIN) 2375 2370 continue; 2376 2371 if (status < 0) 2377 2372 goto out_error; 2373 + nfs4_state_end_reclaim_reboot(clp); 2378 2374 } 2379 2375 2380 2376 /* Now recover expired state... */ ··· 2379 2381 section = "reclaim nograce"; 2380 2382 status = nfs4_do_reclaim(clp, 2381 2383 clp->cl_mvops->nograce_recovery_ops); 2382 - if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || 2383 - test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) || 2384 - test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) 2384 + if (status == -EAGAIN) 2385 2385 continue; 2386 2386 if (status < 0) 2387 2387 goto out_error;
+36 -143
fs/nfs/nfs4xdr.c
··· 362 362 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) 363 363 #define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4) 364 364 #define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4) 365 - #define encode_getdevicelist_maxsz (op_encode_hdr_maxsz + 4 + \ 366 - encode_verifier_maxsz) 367 - #define decode_getdevicelist_maxsz (op_decode_hdr_maxsz + \ 368 - 2 /* nfs_cookie4 gdlr_cookie */ + \ 369 - decode_verifier_maxsz \ 370 - /* verifier4 gdlr_verifier */ + \ 371 - 1 /* gdlr_deviceid_list count */ + \ 372 - XDR_QUADLEN(NFS4_PNFS_GETDEVLIST_MAXNUM * \ 373 - NFS4_DEVICEID4_SIZE) \ 374 - /* gdlr_deviceid_list */ + \ 375 - 1 /* bool gdlr_eof */) 376 - #define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + 4 + \ 377 - XDR_QUADLEN(NFS4_DEVICEID4_SIZE)) 365 + #define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + \ 366 + XDR_QUADLEN(NFS4_DEVICEID4_SIZE) + \ 367 + 1 /* layout type */ + \ 368 + 1 /* maxcount */ + \ 369 + 1 /* bitmap size */ + \ 370 + 1 /* notification bitmap length */ + \ 371 + 1 /* notification bitmap, word 0 */) 378 372 #define decode_getdeviceinfo_maxsz (op_decode_hdr_maxsz + \ 379 373 1 /* layout type */ + \ 380 374 1 /* opaque devaddr4 length */ + \ 381 375 /* devaddr4 payload is read into page */ \ 382 376 1 /* notification bitmap length */ + \ 383 - 1 /* notification bitmap */) 377 + 1 /* notification bitmap, word 0 */) 384 378 #define encode_layoutget_maxsz (op_encode_hdr_maxsz + 10 + \ 385 379 encode_stateid_maxsz) 386 380 #define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \ ··· 389 395 2 /* last byte written */ + \ 390 396 1 /* nt_timechanged (false) */ + \ 391 397 1 /* layoutupdate4 layout type */ + \ 392 - 1 /* NULL filelayout layoutupdate4 payload */) 398 + 1 /* layoutupdate4 opaqueue len */) 399 + /* the actual content of layoutupdate4 should 400 + be allocated by drivers and spliced in 401 + using xdr_write_pages */ 393 402 #define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3) 394 403 #define encode_layoutreturn_maxsz (8 + op_encode_hdr_maxsz + \ 395 404 encode_stateid_maxsz + \ ··· 806 809 #define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \ 807 810 decode_sequence_maxsz + \ 808 811 decode_reclaim_complete_maxsz) 809 - #define NFS4_enc_getdevicelist_sz (compound_encode_hdr_maxsz + \ 810 - encode_sequence_maxsz + \ 811 - encode_putfh_maxsz + \ 812 - encode_getdevicelist_maxsz) 813 - #define NFS4_dec_getdevicelist_sz (compound_decode_hdr_maxsz + \ 814 - decode_sequence_maxsz + \ 815 - decode_putfh_maxsz + \ 816 - decode_getdevicelist_maxsz) 817 812 #define NFS4_enc_getdeviceinfo_sz (compound_encode_hdr_maxsz + \ 818 813 encode_sequence_maxsz +\ 819 814 encode_getdeviceinfo_maxsz) ··· 1916 1927 1917 1928 #ifdef CONFIG_NFS_V4_1 1918 1929 static void 1919 - encode_getdevicelist(struct xdr_stream *xdr, 1920 - const struct nfs4_getdevicelist_args *args, 1921 - struct compound_hdr *hdr) 1922 - { 1923 - __be32 *p; 1924 - nfs4_verifier dummy = { 1925 - .data = "dummmmmy", 1926 - }; 1927 - 1928 - encode_op_hdr(xdr, OP_GETDEVICELIST, decode_getdevicelist_maxsz, hdr); 1929 - p = reserve_space(xdr, 16); 1930 - *p++ = cpu_to_be32(args->layoutclass); 1931 - *p++ = cpu_to_be32(NFS4_PNFS_GETDEVLIST_MAXNUM); 1932 - xdr_encode_hyper(p, 0ULL); /* cookie */ 1933 - encode_nfs4_verifier(xdr, &dummy); 1934 - } 1935 - 1936 - static void 1937 1930 encode_getdeviceinfo(struct xdr_stream *xdr, 1938 1931 const struct nfs4_getdeviceinfo_args *args, 1939 1932 struct compound_hdr *hdr) ··· 1923 1952 __be32 *p; 1924 1953 1925 1954 encode_op_hdr(xdr, OP_GETDEVICEINFO, decode_getdeviceinfo_maxsz, hdr); 1926 - p = reserve_space(xdr, 12 + NFS4_DEVICEID4_SIZE); 1955 + p = reserve_space(xdr, NFS4_DEVICEID4_SIZE + 4 + 4); 1927 1956 p = xdr_encode_opaque_fixed(p, args->pdev->dev_id.data, 1928 1957 NFS4_DEVICEID4_SIZE); 1929 1958 *p++ = cpu_to_be32(args->pdev->layout_type); 1930 1959 *p++ = cpu_to_be32(args->pdev->maxcount); /* gdia_maxcount */ 1931 - *p++ = cpu_to_be32(0); /* bitmap length 0 */ 1960 + 1961 + p = reserve_space(xdr, 4 + 4); 1962 + *p++ = cpu_to_be32(1); /* bitmap length */ 1963 + *p++ = cpu_to_be32(NOTIFY_DEVICEID4_CHANGE | NOTIFY_DEVICEID4_DELETE); 1932 1964 } 1933 1965 1934 1966 static void ··· 1964 1990 static int 1965 1991 encode_layoutcommit(struct xdr_stream *xdr, 1966 1992 struct inode *inode, 1967 - const struct nfs4_layoutcommit_args *args, 1993 + struct nfs4_layoutcommit_args *args, 1968 1994 struct compound_hdr *hdr) 1969 1995 { 1970 1996 __be32 *p; ··· 1985 2011 *p++ = cpu_to_be32(0); /* Never send time_modify_changed */ 1986 2012 *p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */ 1987 2013 1988 - if (NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit) 2014 + if (NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit) { 1989 2015 NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit( 1990 2016 NFS_I(inode)->layout, xdr, args); 1991 - else 1992 - encode_uint32(xdr, 0); /* no layout-type payload */ 2017 + } else { 2018 + encode_uint32(xdr, args->layoutupdate_len); 2019 + if (args->layoutupdate_pages) { 2020 + xdr_write_pages(xdr, args->layoutupdate_pages, 0, 2021 + args->layoutupdate_len); 2022 + } 2023 + } 1993 2024 1994 2025 return 0; 1995 2026 } ··· 2868 2889 encode_compound_hdr(xdr, req, &hdr); 2869 2890 encode_sequence(xdr, &args->seq_args, &hdr); 2870 2891 encode_reclaim_complete(xdr, args, &hdr); 2871 - encode_nops(&hdr); 2872 - } 2873 - 2874 - /* 2875 - * Encode GETDEVICELIST request 2876 - */ 2877 - static void nfs4_xdr_enc_getdevicelist(struct rpc_rqst *req, 2878 - struct xdr_stream *xdr, 2879 - struct nfs4_getdevicelist_args *args) 2880 - { 2881 - struct compound_hdr hdr = { 2882 - .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2883 - }; 2884 - 2885 - encode_compound_hdr(xdr, req, &hdr); 2886 - encode_sequence(xdr, &args->seq_args, &hdr); 2887 - encode_putfh(xdr, args->fh, &hdr); 2888 - encode_getdevicelist(xdr, args, &hdr); 2889 2892 encode_nops(&hdr); 2890 2893 } 2891 2894 ··· 5726 5765 } 5727 5766 5728 5767 #if defined(CONFIG_NFS_V4_1) 5729 - /* 5730 - * TODO: Need to handle case when EOF != true; 5731 - */ 5732 - static int decode_getdevicelist(struct xdr_stream *xdr, 5733 - struct pnfs_devicelist *res) 5734 - { 5735 - __be32 *p; 5736 - int status, i; 5737 - nfs4_verifier verftemp; 5738 - 5739 - status = decode_op_hdr(xdr, OP_GETDEVICELIST); 5740 - if (status) 5741 - return status; 5742 - 5743 - p = xdr_inline_decode(xdr, 8 + 8 + 4); 5744 - if (unlikely(!p)) 5745 - goto out_overflow; 5746 - 5747 - /* TODO: Skip cookie for now */ 5748 - p += 2; 5749 - 5750 - /* Read verifier */ 5751 - p = xdr_decode_opaque_fixed(p, verftemp.data, NFS4_VERIFIER_SIZE); 5752 - 5753 - res->num_devs = be32_to_cpup(p); 5754 - 5755 - dprintk("%s: num_dev %d\n", __func__, res->num_devs); 5756 - 5757 - if (res->num_devs > NFS4_PNFS_GETDEVLIST_MAXNUM) { 5758 - printk(KERN_ERR "NFS: %s too many result dev_num %u\n", 5759 - __func__, res->num_devs); 5760 - return -EIO; 5761 - } 5762 - 5763 - p = xdr_inline_decode(xdr, 5764 - res->num_devs * NFS4_DEVICEID4_SIZE + 4); 5765 - if (unlikely(!p)) 5766 - goto out_overflow; 5767 - for (i = 0; i < res->num_devs; i++) 5768 - p = xdr_decode_opaque_fixed(p, res->dev_id[i].data, 5769 - NFS4_DEVICEID4_SIZE); 5770 - res->eof = be32_to_cpup(p); 5771 - return 0; 5772 - out_overflow: 5773 - print_overflow_msg(__func__, xdr); 5774 - return -EIO; 5775 - } 5776 - 5777 5768 static int decode_getdeviceinfo(struct xdr_stream *xdr, 5778 5769 struct pnfs_device *pdev) 5779 5770 { ··· 5775 5862 p = xdr_inline_decode(xdr, 4 * len); 5776 5863 if (unlikely(!p)) 5777 5864 goto out_overflow; 5778 - for (i = 0; i < len; i++, p++) { 5779 - if (be32_to_cpup(p)) { 5780 - dprintk("%s: notifications not supported\n", 5865 + 5866 + if (be32_to_cpup(p++) & 5867 + ~(NOTIFY_DEVICEID4_CHANGE | NOTIFY_DEVICEID4_DELETE)) { 5868 + dprintk("%s: unsupported notification\n", 5869 + __func__); 5870 + } 5871 + 5872 + for (i = 1; i < len; i++) { 5873 + if (be32_to_cpup(p++)) { 5874 + dprintk("%s: unsupported notification\n", 5781 5875 __func__); 5782 5876 return -EIO; 5783 5877 } ··· 7017 7097 } 7018 7098 7019 7099 /* 7020 - * Decode GETDEVICELIST response 7021 - */ 7022 - static int nfs4_xdr_dec_getdevicelist(struct rpc_rqst *rqstp, 7023 - struct xdr_stream *xdr, 7024 - struct nfs4_getdevicelist_res *res) 7025 - { 7026 - struct compound_hdr hdr; 7027 - int status; 7028 - 7029 - dprintk("encoding getdevicelist!\n"); 7030 - 7031 - status = decode_compound_hdr(xdr, &hdr); 7032 - if (status != 0) 7033 - goto out; 7034 - status = decode_sequence(xdr, &res->seq_res, rqstp); 7035 - if (status != 0) 7036 - goto out; 7037 - status = decode_putfh(xdr); 7038 - if (status != 0) 7039 - goto out; 7040 - status = decode_getdevicelist(xdr, res->devlist); 7041 - out: 7042 - return status; 7043 - } 7044 - 7045 - /* 7046 7100 * Decode GETDEVINFO response 7047 7101 */ 7048 7102 static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, ··· 7384 7490 PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name), 7385 7491 PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid), 7386 7492 PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid), 7387 - PROC(GETDEVICELIST, enc_getdevicelist, dec_getdevicelist), 7388 7493 PROC(BIND_CONN_TO_SESSION, 7389 7494 enc_bind_conn_to_session, dec_bind_conn_to_session), 7390 7495 PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
+41 -72
fs/nfs/objlayout/objio_osd.c
··· 60 60 kfree(de); 61 61 } 62 62 63 - static struct objio_dev_ent *_dev_list_find(const struct nfs_server *nfss, 64 - const struct nfs4_deviceid *d_id) 65 - { 66 - struct nfs4_deviceid_node *d; 67 - struct objio_dev_ent *de; 68 - 69 - d = nfs4_find_get_deviceid(nfss->pnfs_curr_ld, nfss->nfs_client, d_id); 70 - if (!d) 71 - return NULL; 72 - 73 - de = container_of(d, struct objio_dev_ent, id_node); 74 - return de; 75 - } 76 - 77 - static struct objio_dev_ent * 78 - _dev_list_add(const struct nfs_server *nfss, 79 - const struct nfs4_deviceid *d_id, struct osd_dev *od, 80 - gfp_t gfp_flags) 81 - { 82 - struct nfs4_deviceid_node *d; 83 - struct objio_dev_ent *de = kzalloc(sizeof(*de), gfp_flags); 84 - struct objio_dev_ent *n; 85 - 86 - if (!de) { 87 - dprintk("%s: -ENOMEM od=%p\n", __func__, od); 88 - return NULL; 89 - } 90 - 91 - dprintk("%s: Adding od=%p\n", __func__, od); 92 - nfs4_init_deviceid_node(&de->id_node, 93 - nfss->pnfs_curr_ld, 94 - nfss->nfs_client, 95 - d_id); 96 - de->od.od = od; 97 - 98 - d = nfs4_insert_deviceid_node(&de->id_node); 99 - n = container_of(d, struct objio_dev_ent, id_node); 100 - if (n != de) { 101 - dprintk("%s: Race with other n->od=%p\n", __func__, n->od.od); 102 - objio_free_deviceid_node(&de->id_node); 103 - de = n; 104 - } 105 - 106 - return de; 107 - } 108 - 109 63 struct objio_segment { 110 64 struct pnfs_layout_segment lseg; 111 65 ··· 84 130 85 131 /* Send and wait for a get_device_info of devices in the layout, 86 132 then look them up with the osd_initiator library */ 87 - static int objio_devices_lookup(struct pnfs_layout_hdr *pnfslay, 88 - struct objio_segment *objio_seg, unsigned c, struct nfs4_deviceid *d_id, 89 - gfp_t gfp_flags) 133 + struct nfs4_deviceid_node * 134 + objio_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, 135 + gfp_t gfp_flags) 90 136 { 91 137 struct pnfs_osd_deviceaddr *deviceaddr; 92 - struct objio_dev_ent *ode; 138 + struct objio_dev_ent *ode = NULL; 93 139 struct osd_dev *od; 94 140 struct osd_dev_info odi; 95 141 bool retry_flag = true; 142 + __be32 *p; 96 143 int err; 97 144 98 - ode = _dev_list_find(NFS_SERVER(pnfslay->plh_inode), d_id); 99 - if (ode) { 100 - objio_seg->oc.ods[c] = &ode->od; /* must use container_of */ 101 - return 0; 102 - } 145 + deviceaddr = kzalloc(sizeof(*deviceaddr), gfp_flags); 146 + if (!deviceaddr) 147 + return NULL; 103 148 104 - err = objlayout_get_deviceinfo(pnfslay, d_id, &deviceaddr, gfp_flags); 105 - if (unlikely(err)) { 106 - dprintk("%s: objlayout_get_deviceinfo dev(%llx:%llx) =>%d\n", 107 - __func__, _DEVID_LO(d_id), _DEVID_HI(d_id), err); 108 - return err; 109 - } 149 + p = page_address(pdev->pages[0]); 150 + pnfs_osd_xdr_decode_deviceaddr(deviceaddr, p); 110 151 111 152 odi.systemid_len = deviceaddr->oda_systemid.len; 112 153 if (odi.systemid_len > sizeof(odi.systemid)) { ··· 137 188 goto out; 138 189 } 139 190 140 - ode = _dev_list_add(NFS_SERVER(pnfslay->plh_inode), d_id, od, 141 - gfp_flags); 142 - objio_seg->oc.ods[c] = &ode->od; /* must use container_of */ 143 191 dprintk("Adding new dev_id(%llx:%llx)\n", 144 - _DEVID_LO(d_id), _DEVID_HI(d_id)); 192 + _DEVID_LO(&pdev->dev_id), _DEVID_HI(&pdev->dev_id)); 193 + 194 + ode = kzalloc(sizeof(*ode), gfp_flags); 195 + if (!ode) { 196 + dprintk("%s: -ENOMEM od=%p\n", __func__, od); 197 + goto out; 198 + } 199 + 200 + nfs4_init_deviceid_node(&ode->id_node, server, &pdev->dev_id); 201 + kfree(deviceaddr); 202 + 203 + ode->od.od = od; 204 + return &ode->id_node; 205 + 145 206 out: 146 - objlayout_put_deviceinfo(deviceaddr); 147 - return err; 207 + kfree(deviceaddr); 208 + return NULL; 148 209 } 149 210 150 211 static void copy_single_comp(struct ore_components *oc, unsigned c, ··· 213 254 struct xdr_stream *xdr, 214 255 gfp_t gfp_flags) 215 256 { 257 + struct nfs_server *server = NFS_SERVER(pnfslay->plh_inode); 216 258 struct objio_segment *objio_seg; 217 259 struct pnfs_osd_xdr_decode_layout_iter iter; 218 260 struct pnfs_osd_layout layout; ··· 243 283 objio_seg->oc.first_dev = layout.olo_comps_index; 244 284 cur_comp = 0; 245 285 while (pnfs_osd_xdr_decode_layout_comp(&src_comp, &iter, xdr, &err)) { 286 + struct nfs4_deviceid_node *d; 287 + struct objio_dev_ent *ode; 288 + 246 289 copy_single_comp(&objio_seg->oc, cur_comp, &src_comp); 247 - err = objio_devices_lookup(pnfslay, objio_seg, cur_comp, 248 - &src_comp.oc_object_id.oid_device_id, 249 - gfp_flags); 250 - if (err) 290 + 291 + d = nfs4_find_get_deviceid(server, 292 + &src_comp.oc_object_id.oid_device_id, 293 + pnfslay->plh_lc_cred, gfp_flags); 294 + if (!d) { 295 + err = -ENXIO; 251 296 goto err; 252 - ++cur_comp; 297 + } 298 + 299 + ode = container_of(d, struct objio_dev_ent, id_node); 300 + objio_seg->oc.ods[cur_comp++] = &ode->od; 253 301 } 254 302 /* pnfs_osd_xdr_decode_layout_comp returns false on error */ 255 303 if (unlikely(err)) ··· 621 653 .flags = PNFS_LAYOUTRET_ON_SETATTR | 622 654 PNFS_LAYOUTRET_ON_ERROR, 623 655 656 + .max_deviceinfo_size = PAGE_SIZE, 624 657 .owner = THIS_MODULE, 625 658 .alloc_layout_hdr = objlayout_alloc_layout_hdr, 626 659 .free_layout_hdr = objlayout_free_layout_hdr,
-70
fs/nfs/objlayout/objlayout.c
··· 574 574 dprintk("%s: Return\n", __func__); 575 575 } 576 576 577 - 578 - /* 579 - * Get Device Info API for io engines 580 - */ 581 - struct objlayout_deviceinfo { 582 - struct page *page; 583 - struct pnfs_osd_deviceaddr da; /* This must be last */ 584 - }; 585 - 586 - /* Initialize and call nfs_getdeviceinfo, then decode and return a 587 - * "struct pnfs_osd_deviceaddr *" Eventually objlayout_put_deviceinfo() 588 - * should be called. 589 - */ 590 - int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay, 591 - struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr, 592 - gfp_t gfp_flags) 593 - { 594 - struct objlayout_deviceinfo *odi; 595 - struct pnfs_device pd; 596 - struct page *page, **pages; 597 - u32 *p; 598 - int err; 599 - 600 - page = alloc_page(gfp_flags); 601 - if (!page) 602 - return -ENOMEM; 603 - 604 - pages = &page; 605 - pd.pages = pages; 606 - 607 - memcpy(&pd.dev_id, d_id, sizeof(*d_id)); 608 - pd.layout_type = LAYOUT_OSD2_OBJECTS; 609 - pd.pages = &page; 610 - pd.pgbase = 0; 611 - pd.pglen = PAGE_SIZE; 612 - pd.mincount = 0; 613 - pd.maxcount = PAGE_SIZE; 614 - 615 - err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd, 616 - pnfslay->plh_lc_cred); 617 - dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err); 618 - if (err) 619 - goto err_out; 620 - 621 - p = page_address(page); 622 - odi = kzalloc(sizeof(*odi), gfp_flags); 623 - if (!odi) { 624 - err = -ENOMEM; 625 - goto err_out; 626 - } 627 - pnfs_osd_xdr_decode_deviceaddr(&odi->da, p); 628 - odi->page = page; 629 - *deviceaddr = &odi->da; 630 - return 0; 631 - 632 - err_out: 633 - __free_page(page); 634 - return err; 635 - } 636 - 637 - void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr) 638 - { 639 - struct objlayout_deviceinfo *odi = container_of(deviceaddr, 640 - struct objlayout_deviceinfo, 641 - da); 642 - 643 - __free_page(odi->page); 644 - kfree(odi); 645 - } 646 - 647 577 enum { 648 578 OBJLAYOUT_MAX_URI_LEN = 256, OBJLAYOUT_MAX_OSDNAME_LEN = 64, 649 579 OBJLAYOUT_MAX_SYSID_HEX_LEN = OSD_SYSTEMID_LEN * 2 + 1,
-5
fs/nfs/objlayout/objlayout.h
··· 149 149 extern void objlayout_write_done(struct objlayout_io_res *oir, 150 150 ssize_t status, bool sync); 151 151 152 - extern int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay, 153 - struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr, 154 - gfp_t gfp_flags); 155 - extern void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr); 156 - 157 152 /* 158 153 * exported generic objects function vectors 159 154 */
+8
fs/nfs/pagelist.c
··· 481 481 return 0; 482 482 } 483 483 484 + /* 485 + * Limit the request size so that we can still allocate a page array 486 + * for it without upsetting the slab allocator. 487 + */ 488 + if (((desc->pg_count + req->wb_bytes) >> PAGE_SHIFT) * 489 + sizeof(struct page) > PAGE_SIZE) 490 + return 0; 491 + 484 492 return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes); 485 493 } 486 494 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
+85 -20
fs/nfs/pnfs.c
··· 594 594 dprintk("%s freeing layout for inode %lu\n", __func__, 595 595 lo->plh_inode->i_ino); 596 596 inode = lo->plh_inode; 597 + 598 + pnfs_layoutcommit_inode(inode, false); 599 + 597 600 spin_lock(&inode->i_lock); 598 601 list_del_init(&lo->plh_bulk_destroy); 599 602 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */ ··· 685 682 return (s32)(s1 - s2) > 0; 686 683 } 687 684 688 - static void 689 - pnfs_verify_layout_stateid(struct pnfs_layout_hdr *lo, 690 - const nfs4_stateid *new, 691 - struct list_head *free_me_list) 692 - { 693 - if (nfs4_stateid_match_other(&lo->plh_stateid, new)) 694 - return; 695 - /* Layout is new! Kill existing layout segments */ 696 - pnfs_mark_matching_lsegs_invalid(lo, free_me_list, NULL); 697 - } 698 - 699 685 /* update lo->plh_stateid with new if is more recent */ 700 686 void 701 687 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, ··· 741 749 status = -EAGAIN; 742 750 } else if (!nfs4_valid_open_stateid(open_state)) { 743 751 status = -EBADF; 744 - } else if (list_empty(&lo->plh_segs)) { 752 + } else if (list_empty(&lo->plh_segs) || 753 + test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) { 745 754 int seq; 746 755 747 756 do { ··· 857 864 empty = list_empty(&lo->plh_segs); 858 865 pnfs_clear_layoutcommit(ino, &tmp_list); 859 866 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); 867 + 868 + if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { 869 + struct pnfs_layout_range range = { 870 + .iomode = IOMODE_ANY, 871 + .offset = 0, 872 + .length = NFS4_MAX_UINT64, 873 + }; 874 + NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range); 875 + } 876 + 860 877 /* Don't send a LAYOUTRETURN if list was initially empty */ 861 878 if (empty) { 862 879 spin_unlock(&ino->i_lock); ··· 874 871 dprintk("NFS: %s no layout segments to return\n", __func__); 875 872 goto out; 876 873 } 874 + 875 + set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 877 876 lo->plh_block_lgets++; 878 877 spin_unlock(&ino->i_lock); 879 878 pnfs_free_lseg_list(&tmp_list); ··· 1363 1358 goto out; 1364 1359 } 1365 1360 1361 + init_lseg(lo, lseg); 1362 + lseg->pls_range = res->range; 1363 + 1366 1364 spin_lock(&ino->i_lock); 1367 1365 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { 1368 1366 dprintk("%s forget reply due to recall\n", __func__); 1369 1367 goto out_forget_reply; 1370 1368 } 1371 1369 1372 - if (pnfs_layoutgets_blocked(lo, 1) || 1373 - pnfs_layout_stateid_blocked(lo, &res->stateid)) { 1370 + if (pnfs_layoutgets_blocked(lo, 1)) { 1374 1371 dprintk("%s forget reply due to state\n", __func__); 1375 1372 goto out_forget_reply; 1376 1373 } 1377 1374 1378 - /* Check that the new stateid matches the old stateid */ 1379 - pnfs_verify_layout_stateid(lo, &res->stateid, &free_me); 1380 - /* Done processing layoutget. Set the layout stateid */ 1381 - pnfs_set_layout_stateid(lo, &res->stateid, false); 1375 + if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) { 1376 + /* existing state ID, make sure the sequence number matches. */ 1377 + if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { 1378 + dprintk("%s forget reply due to sequence\n", __func__); 1379 + goto out_forget_reply; 1380 + } 1381 + pnfs_set_layout_stateid(lo, &res->stateid, false); 1382 + } else { 1383 + /* 1384 + * We got an entirely new state ID. Mark all segments for the 1385 + * inode invalid, and don't bother validating the stateid 1386 + * sequence number. 1387 + */ 1388 + pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL); 1382 1389 1383 - init_lseg(lo, lseg); 1384 - lseg->pls_range = res->range; 1390 + nfs4_stateid_copy(&lo->plh_stateid, &res->stateid); 1391 + lo->plh_barrier = be32_to_cpu(res->stateid.seqid); 1392 + } 1393 + 1394 + clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 1395 + 1385 1396 pnfs_get_lseg(lseg); 1386 1397 pnfs_layout_insert_lseg(lo, lseg); 1387 1398 ··· 1818 1797 } 1819 1798 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); 1820 1799 1800 + void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data) 1801 + { 1802 + struct inode *inode = data->inode; 1803 + struct nfs_inode *nfsi = NFS_I(inode); 1804 + bool mark_as_dirty = false; 1805 + 1806 + spin_lock(&inode->i_lock); 1807 + if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { 1808 + mark_as_dirty = true; 1809 + dprintk("%s: Set layoutcommit for inode %lu ", 1810 + __func__, inode->i_ino); 1811 + } 1812 + if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &data->lseg->pls_flags)) { 1813 + /* references matched in nfs4_layoutcommit_release */ 1814 + pnfs_get_lseg(data->lseg); 1815 + } 1816 + if (data->lwb > nfsi->layout->plh_lwb) 1817 + nfsi->layout->plh_lwb = data->lwb; 1818 + spin_unlock(&inode->i_lock); 1819 + dprintk("%s: lseg %p end_pos %llu\n", 1820 + __func__, data->lseg, nfsi->layout->plh_lwb); 1821 + 1822 + /* if pnfs_layoutcommit_inode() runs between inode locks, the next one 1823 + * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ 1824 + if (mark_as_dirty) 1825 + mark_inode_dirty_sync(inode); 1826 + } 1827 + EXPORT_SYMBOL_GPL(pnfs_commit_set_layoutcommit); 1828 + 1821 1829 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data) 1822 1830 { 1823 1831 struct nfs_server *nfss = NFS_SERVER(data->args.inode); ··· 1867 1817 int 1868 1818 pnfs_layoutcommit_inode(struct inode *inode, bool sync) 1869 1819 { 1820 + struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 1870 1821 struct nfs4_layoutcommit_data *data; 1871 1822 struct nfs_inode *nfsi = NFS_I(inode); 1872 1823 loff_t end_pos; ··· 1917 1866 data->res.fattr = &data->fattr; 1918 1867 data->args.lastbytewritten = end_pos - 1; 1919 1868 data->res.server = NFS_SERVER(inode); 1869 + 1870 + if (ld->prepare_layoutcommit) { 1871 + status = ld->prepare_layoutcommit(&data->args); 1872 + if (status) { 1873 + spin_lock(&inode->i_lock); 1874 + if (end_pos < nfsi->layout->plh_lwb) 1875 + nfsi->layout->plh_lwb = end_pos; 1876 + spin_unlock(&inode->i_lock); 1877 + put_rpccred(data->cred); 1878 + set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); 1879 + goto clear_layoutcommitting; 1880 + } 1881 + } 1882 + 1920 1883 1921 1884 status = nfs4_proc_layoutcommit(data, sync); 1922 1885 out:
+40 -10
fs/nfs/pnfs.h
··· 65 65 NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */ 66 66 NFS_LAYOUT_ROC, /* some lseg had roc bit set */ 67 67 NFS_LAYOUT_RETURN, /* Return this layout ASAP */ 68 + NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ 68 69 }; 69 70 70 71 enum layoutdriver_policy_flags { 71 - /* Should the pNFS client commit and return the layout upon a setattr */ 72 + /* Should the pNFS client commit and return the layout upon truncate to 73 + * a smaller size */ 72 74 PNFS_LAYOUTRET_ON_SETATTR = 1 << 0, 73 75 PNFS_LAYOUTRET_ON_ERROR = 1 << 1, 76 + PNFS_READ_WHOLE_PAGE = 1 << 2, 74 77 }; 75 78 76 79 struct nfs4_deviceid_node; ··· 85 82 const char *name; 86 83 struct module *owner; 87 84 unsigned flags; 85 + unsigned max_deviceinfo_size; 88 86 89 87 int (*set_layoutdriver) (struct nfs_server *, const struct nfs_fh *); 90 88 int (*clear_layoutdriver) (struct nfs_server *); ··· 95 91 96 92 struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); 97 93 void (*free_lseg) (struct pnfs_layout_segment *lseg); 94 + 95 + void (*return_range) (struct pnfs_layout_hdr *lo, 96 + struct pnfs_layout_range *range); 98 97 99 98 /* test for nfs page cache coalescing */ 100 99 const struct nfs_pageio_ops *pg_read_ops; ··· 128 121 enum pnfs_try_status (*write_pagelist)(struct nfs_pgio_header *, int); 129 122 130 123 void (*free_deviceid_node) (struct nfs4_deviceid_node *); 124 + struct nfs4_deviceid_node * (*alloc_deviceid_node) 125 + (struct nfs_server *server, struct pnfs_device *pdev, 126 + gfp_t gfp_flags); 131 127 132 128 void (*encode_layoutreturn) (struct pnfs_layout_hdr *layoutid, 133 129 struct xdr_stream *xdr, 134 130 const struct nfs4_layoutreturn_args *args); 135 131 136 132 void (*cleanup_layoutcommit) (struct nfs4_layoutcommit_data *data); 137 - 138 - void (*encode_layoutcommit) (struct pnfs_layout_hdr *layoutid, 133 + int (*prepare_layoutcommit) (struct nfs4_layoutcommit_args *args); 134 + void (*encode_layoutcommit) (struct pnfs_layout_hdr *lo, 139 135 struct xdr_stream *xdr, 140 136 const struct nfs4_layoutcommit_args *args); 141 137 }; ··· 181 171 extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *); 182 172 183 173 /* nfs4proc.c */ 184 - extern int nfs4_proc_getdevicelist(struct nfs_server *server, 185 - const struct nfs_fh *fh, 186 - struct pnfs_devicelist *devlist); 187 174 extern int nfs4_proc_getdeviceinfo(struct nfs_server *server, 188 175 struct pnfs_device *dev, 189 176 struct rpc_cred *cred); ··· 226 219 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); 227 220 bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task); 228 221 void pnfs_set_layoutcommit(struct nfs_pgio_header *); 222 + void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data); 229 223 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); 230 224 int pnfs_layoutcommit_inode(struct inode *inode, bool sync); 231 225 int _pnfs_return_layout(struct inode *); ··· 263 255 atomic_t ref; 264 256 }; 265 257 266 - struct nfs4_deviceid_node *nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *); 258 + struct nfs4_deviceid_node * 259 + nfs4_find_get_deviceid(struct nfs_server *server, 260 + const struct nfs4_deviceid *id, struct rpc_cred *cred, 261 + gfp_t gfp_mask); 267 262 void nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *); 268 - void nfs4_init_deviceid_node(struct nfs4_deviceid_node *, 269 - const struct pnfs_layoutdriver_type *, 270 - const struct nfs_client *, 263 + void nfs4_init_deviceid_node(struct nfs4_deviceid_node *, struct nfs_server *, 271 264 const struct nfs4_deviceid *); 272 265 struct nfs4_deviceid_node *nfs4_insert_deviceid_node(struct nfs4_deviceid_node *); 273 266 bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *); 274 267 void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node); 275 268 bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node); 276 269 void nfs4_deviceid_purge_client(const struct nfs_client *); 270 + 271 + static inline struct nfs4_deviceid_node * 272 + nfs4_get_deviceid(struct nfs4_deviceid_node *d) 273 + { 274 + atomic_inc(&d->ref); 275 + return d; 276 + } 277 277 278 278 static inline struct pnfs_layout_segment * 279 279 pnfs_get_lseg(struct pnfs_layout_segment *lseg) ··· 384 368 } 385 369 386 370 static inline bool 371 + pnfs_ld_read_whole_page(struct inode *inode) 372 + { 373 + if (!pnfs_enabled_sb(NFS_SERVER(inode))) 374 + return false; 375 + return NFS_SERVER(inode)->pnfs_curr_ld->flags & PNFS_READ_WHOLE_PAGE; 376 + } 377 + 378 + static inline bool 387 379 pnfs_layoutcommit_outstanding(struct inode *inode) 388 380 { 389 381 struct nfs_inode *nfsi = NFS_I(inode); ··· 462 438 463 439 static inline bool 464 440 pnfs_ld_layoutret_on_setattr(struct inode *inode) 441 + { 442 + return false; 443 + } 444 + 445 + static inline bool 446 + pnfs_ld_read_whole_page(struct inode *inode) 465 447 { 466 448 return false; 467 449 }
+104 -46
fs/nfs/pnfs_dev.c
··· 29 29 */ 30 30 31 31 #include <linux/export.h> 32 + #include <linux/nfs_fs.h> 33 + #include "nfs4session.h" 34 + #include "internal.h" 32 35 #include "pnfs.h" 33 36 34 37 #define NFSDBG_FACILITY NFSDBG_PNFS ··· 92 89 return NULL; 93 90 } 94 91 92 + static struct nfs4_deviceid_node * 93 + nfs4_get_device_info(struct nfs_server *server, 94 + const struct nfs4_deviceid *dev_id, 95 + struct rpc_cred *cred, gfp_t gfp_flags) 96 + { 97 + struct nfs4_deviceid_node *d = NULL; 98 + struct pnfs_device *pdev = NULL; 99 + struct page **pages = NULL; 100 + u32 max_resp_sz; 101 + int max_pages; 102 + int rc, i; 103 + 104 + /* 105 + * Use the session max response size as the basis for setting 106 + * GETDEVICEINFO's maxcount 107 + */ 108 + max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 109 + if (server->pnfs_curr_ld->max_deviceinfo_size && 110 + server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz) 111 + max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size; 112 + max_pages = nfs_page_array_len(0, max_resp_sz); 113 + dprintk("%s: server %p max_resp_sz %u max_pages %d\n", 114 + __func__, server, max_resp_sz, max_pages); 115 + 116 + pdev = kzalloc(sizeof(*pdev), gfp_flags); 117 + if (!pdev) 118 + return NULL; 119 + 120 + pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); 121 + if (!pages) 122 + goto out_free_pdev; 123 + 124 + for (i = 0; i < max_pages; i++) { 125 + pages[i] = alloc_page(gfp_flags); 126 + if (!pages[i]) 127 + goto out_free_pages; 128 + } 129 + 130 + memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id)); 131 + pdev->layout_type = server->pnfs_curr_ld->id; 132 + pdev->pages = pages; 133 + pdev->pgbase = 0; 134 + pdev->pglen = max_resp_sz; 135 + pdev->mincount = 0; 136 + pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead; 137 + 138 + rc = nfs4_proc_getdeviceinfo(server, pdev, cred); 139 + dprintk("%s getdevice info returns %d\n", __func__, rc); 140 + if (rc) 141 + goto out_free_pages; 142 + 143 + /* 144 + * Found new device, need to decode it and then add it to the 145 + * list of known devices for this mountpoint. 146 + */ 147 + d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev, 148 + gfp_flags); 149 + 150 + out_free_pages: 151 + for (i = 0; i < max_pages; i++) 152 + __free_page(pages[i]); 153 + kfree(pages); 154 + out_free_pdev: 155 + kfree(pdev); 156 + dprintk("<-- %s d %p\n", __func__, d); 157 + return d; 158 + } 159 + 95 160 /* 96 161 * Lookup a deviceid in cache and get a reference count on it if found 97 162 * ··· 167 96 * @id deviceid to look up 168 97 */ 169 98 static struct nfs4_deviceid_node * 170 - _find_get_deviceid(const struct pnfs_layoutdriver_type *ld, 171 - const struct nfs_client *clp, const struct nfs4_deviceid *id, 172 - long hash) 99 + __nfs4_find_get_deviceid(struct nfs_server *server, 100 + const struct nfs4_deviceid *id, long hash) 173 101 { 174 102 struct nfs4_deviceid_node *d; 175 103 176 104 rcu_read_lock(); 177 - d = _lookup_deviceid(ld, clp, id, hash); 105 + d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id, 106 + hash); 178 107 if (d != NULL) 179 108 atomic_inc(&d->ref); 180 109 rcu_read_unlock(); ··· 182 111 } 183 112 184 113 struct nfs4_deviceid_node * 185 - nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld, 186 - const struct nfs_client *clp, const struct nfs4_deviceid *id) 114 + nfs4_find_get_deviceid(struct nfs_server *server, 115 + const struct nfs4_deviceid *id, struct rpc_cred *cred, 116 + gfp_t gfp_mask) 187 117 { 188 - return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); 118 + long hash = nfs4_deviceid_hash(id); 119 + struct nfs4_deviceid_node *d, *new; 120 + 121 + d = __nfs4_find_get_deviceid(server, id, hash); 122 + if (d) 123 + return d; 124 + 125 + new = nfs4_get_device_info(server, id, cred, gfp_mask); 126 + if (!new) 127 + return new; 128 + 129 + spin_lock(&nfs4_deviceid_lock); 130 + d = __nfs4_find_get_deviceid(server, id, hash); 131 + if (d) { 132 + spin_unlock(&nfs4_deviceid_lock); 133 + server->pnfs_curr_ld->free_deviceid_node(new); 134 + return d; 135 + } 136 + hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); 137 + atomic_inc(&new->ref); 138 + spin_unlock(&nfs4_deviceid_lock); 139 + 140 + return new; 189 141 } 190 142 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); 191 143 ··· 245 151 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); 246 152 247 153 void 248 - nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, 249 - const struct pnfs_layoutdriver_type *ld, 250 - const struct nfs_client *nfs_client, 154 + nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server, 251 155 const struct nfs4_deviceid *id) 252 156 { 253 157 INIT_HLIST_NODE(&d->node); 254 158 INIT_HLIST_NODE(&d->tmpnode); 255 - d->ld = ld; 256 - d->nfs_client = nfs_client; 159 + d->ld = server->pnfs_curr_ld; 160 + d->nfs_client = server->nfs_client; 257 161 d->flags = 0; 258 162 d->deviceid = *id; 259 163 atomic_set(&d->ref, 1); 260 164 } 261 165 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); 262 - 263 - /* 264 - * Uniquely initialize and insert a deviceid node into cache 265 - * 266 - * @new new deviceid node 267 - * Note that the caller must set up the following members: 268 - * new->ld 269 - * new->nfs_client 270 - * new->deviceid 271 - * 272 - * @ret the inserted node, if none found, otherwise, the found entry. 273 - */ 274 - struct nfs4_deviceid_node * 275 - nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new) 276 - { 277 - struct nfs4_deviceid_node *d; 278 - long hash; 279 - 280 - spin_lock(&nfs4_deviceid_lock); 281 - hash = nfs4_deviceid_hash(&new->deviceid); 282 - d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash); 283 - if (d) { 284 - spin_unlock(&nfs4_deviceid_lock); 285 - return d; 286 - } 287 - 288 - hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); 289 - spin_unlock(&nfs4_deviceid_lock); 290 - atomic_inc(&new->ref); 291 - 292 - return new; 293 - } 294 - EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node); 295 166 296 167 /* 297 168 * Dereference a deviceid node and delete it when its reference count drops ··· 358 299 } 359 300 rcu_read_unlock(); 360 301 } 361 -
-11
fs/nfs/super.c
··· 2065 2065 return NFS_TEXT_DATA; 2066 2066 } 2067 2067 2068 - #if !IS_ENABLED(CONFIG_NFS_V3) 2069 - if (args->version == 3) 2070 - goto out_v3_not_compiled; 2071 - #endif /* !CONFIG_NFS_V3 */ 2072 - 2073 2068 return 0; 2074 2069 2075 2070 out_no_data: ··· 2079 2084 out_no_sec: 2080 2085 dfprintk(MOUNT, "NFS: nfs_mount_data version supports only AUTH_SYS\n"); 2081 2086 return -EINVAL; 2082 - 2083 - #if !IS_ENABLED(CONFIG_NFS_V3) 2084 - out_v3_not_compiled: 2085 - dfprintk(MOUNT, "NFS: NFSv3 is not compiled into kernel\n"); 2086 - return -EPROTONOSUPPORT; 2087 - #endif /* !CONFIG_NFS_V3 */ 2088 2087 2089 2088 out_nomem: 2090 2089 dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
+62 -88
fs/nfs/write.c
··· 49 49 static void nfs_clear_request_commit(struct nfs_page *req); 50 50 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 51 51 struct inode *inode); 52 + static struct nfs_page * 53 + nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 54 + struct page *page); 52 55 53 56 static struct kmem_cache *nfs_wdata_cachep; 54 57 static mempool_t *nfs_wdata_mempool; ··· 95 92 ctx->error = error; 96 93 smp_wmb(); 97 94 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 98 - } 99 - 100 - /* 101 - * nfs_page_search_commits_for_head_request_locked 102 - * 103 - * Search through commit lists on @inode for the head request for @page. 104 - * Must be called while holding the inode (which is cinfo) lock. 105 - * 106 - * Returns the head request if found, or NULL if not found. 107 - */ 108 - static struct nfs_page * 109 - nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 110 - struct page *page) 111 - { 112 - struct nfs_page *freq, *t; 113 - struct nfs_commit_info cinfo; 114 - struct inode *inode = &nfsi->vfs_inode; 115 - 116 - nfs_init_cinfo_from_inode(&cinfo, inode); 117 - 118 - /* search through pnfs commit lists */ 119 - freq = pnfs_search_commit_reqs(inode, &cinfo, page); 120 - if (freq) 121 - return freq->wb_head; 122 - 123 - /* Linearly search the commit list for the correct request */ 124 - list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { 125 - if (freq->wb_page == page) 126 - return freq->wb_head; 127 - } 128 - 129 - return NULL; 130 95 } 131 96 132 97 /* ··· 242 271 243 272 static int wb_priority(struct writeback_control *wbc) 244 273 { 274 + int ret = 0; 245 275 if (wbc->for_reclaim) 246 276 return FLUSH_HIGHPRI | FLUSH_STABLE; 277 + if (wbc->sync_mode == WB_SYNC_ALL) 278 + ret = FLUSH_COND_STABLE; 247 279 if (wbc->for_kupdate || wbc->for_background) 248 - return FLUSH_LOWPRI | FLUSH_COND_STABLE; 249 - return FLUSH_COND_STABLE; 280 + ret |= FLUSH_LOWPRI; 281 + return ret; 250 282 } 251 283 252 284 /* ··· 705 731 if (likely(!PageSwapCache(head->wb_page))) { 706 732 set_page_private(head->wb_page, 0); 707 733 ClearPagePrivate(head->wb_page); 734 + smp_mb__after_atomic(); 735 + wake_up_page(head->wb_page, PG_private); 708 736 clear_bit(PG_MAPPED, &head->wb_flags); 709 737 } 710 738 nfsi->npages--; ··· 725 749 __set_page_dirty_nobuffers(req->wb_page); 726 750 } 727 751 728 - #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 752 + /* 753 + * nfs_page_search_commits_for_head_request_locked 754 + * 755 + * Search through commit lists on @inode for the head request for @page. 756 + * Must be called while holding the inode (which is cinfo) lock. 757 + * 758 + * Returns the head request if found, or NULL if not found. 759 + */ 760 + static struct nfs_page * 761 + nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 762 + struct page *page) 763 + { 764 + struct nfs_page *freq, *t; 765 + struct nfs_commit_info cinfo; 766 + struct inode *inode = &nfsi->vfs_inode; 767 + 768 + nfs_init_cinfo_from_inode(&cinfo, inode); 769 + 770 + /* search through pnfs commit lists */ 771 + freq = pnfs_search_commit_reqs(inode, &cinfo, page); 772 + if (freq) 773 + return freq->wb_head; 774 + 775 + /* Linearly search the commit list for the correct request */ 776 + list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { 777 + if (freq->wb_page == page) 778 + return freq->wb_head; 779 + } 780 + 781 + return NULL; 782 + } 783 + 729 784 /** 730 785 * nfs_request_add_commit_list - add request to a commit list 731 786 * @req: pointer to a struct nfs_page ··· 874 867 return hdr->verf.committed != NFS_FILE_SYNC; 875 868 } 876 869 877 - #else 878 - static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 879 - struct inode *inode) 880 - { 881 - } 882 - 883 - void nfs_init_cinfo(struct nfs_commit_info *cinfo, 884 - struct inode *inode, 885 - struct nfs_direct_req *dreq) 886 - { 887 - } 888 - 889 - void 890 - nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 891 - struct nfs_commit_info *cinfo) 892 - { 893 - } 894 - 895 - static void 896 - nfs_clear_request_commit(struct nfs_page *req) 897 - { 898 - } 899 - 900 - int nfs_write_need_commit(struct nfs_pgio_header *hdr) 901 - { 902 - return 0; 903 - } 904 - 905 - #endif 906 - 907 870 static void nfs_write_completion(struct nfs_pgio_header *hdr) 908 871 { 909 872 struct nfs_commit_info cinfo; ··· 909 932 hdr->release(hdr); 910 933 } 911 934 912 - #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 913 935 unsigned long 914 936 nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 915 937 { ··· 964 988 spin_unlock(cinfo->lock); 965 989 return ret; 966 990 } 967 - 968 - #else 969 - unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 970 - { 971 - return 0; 972 - } 973 - 974 - int nfs_scan_commit(struct inode *inode, struct list_head *dst, 975 - struct nfs_commit_info *cinfo) 976 - { 977 - return 0; 978 - } 979 - #endif 980 991 981 992 /* 982 993 * Search for an existing write request, and attempt to update ··· 1357 1394 return status; 1358 1395 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); 1359 1396 1360 - #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 1361 1397 if (hdr->res.verf->committed < hdr->args.stable && 1362 1398 task->tk_status >= 0) { 1363 1399 /* We tried a write call, but the server did not ··· 1378 1416 complain = jiffies + 300 * HZ; 1379 1417 } 1380 1418 } 1381 - #endif 1382 1419 1383 1420 /* Deal with the suid/sgid bit corner case */ 1384 1421 if (nfs_should_remove_suid(inode)) ··· 1430 1469 } 1431 1470 1432 1471 1433 - #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 1434 1472 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait) 1435 1473 { 1436 1474 int ret; ··· 1498 1538 } 1499 1539 EXPORT_SYMBOL_GPL(nfs_initiate_commit); 1500 1540 1541 + static loff_t nfs_get_lwb(struct list_head *head) 1542 + { 1543 + loff_t lwb = 0; 1544 + struct nfs_page *req; 1545 + 1546 + list_for_each_entry(req, head, wb_list) 1547 + if (lwb < (req_offset(req) + req->wb_bytes)) 1548 + lwb = req_offset(req) + req->wb_bytes; 1549 + 1550 + return lwb; 1551 + } 1552 + 1501 1553 /* 1502 1554 * Set up the argument/result storage required for the RPC call. 1503 1555 */ ··· 1529 1557 data->inode = inode; 1530 1558 data->cred = first->wb_context->cred; 1531 1559 data->lseg = lseg; /* reference transferred */ 1560 + /* only set lwb for pnfs commit */ 1561 + if (lseg) 1562 + data->lwb = nfs_get_lwb(&data->pages); 1532 1563 data->mds_ops = &nfs_commit_ops; 1533 1564 data->completion_ops = cinfo->completion_ops; 1534 1565 data->dreq = cinfo->dreq; ··· 1611 1636 struct nfs_page *req; 1612 1637 int status = data->task.tk_status; 1613 1638 struct nfs_commit_info cinfo; 1639 + struct nfs_server *nfss; 1614 1640 1615 1641 while (!list_empty(&data->pages)) { 1616 1642 req = nfs_list_entry(data->pages.next); ··· 1645 1669 next: 1646 1670 nfs_unlock_and_release_request(req); 1647 1671 } 1672 + nfss = NFS_SERVER(data->inode); 1673 + if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 1674 + clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 1675 + 1648 1676 nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1649 1677 if (atomic_dec_and_test(&cinfo.mds->rpcs_out)) 1650 1678 nfs_commit_clear_lock(NFS_I(data->inode)); ··· 1758 1778 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1759 1779 return ret; 1760 1780 } 1761 - #else 1762 - static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 1763 - { 1764 - return 0; 1765 - } 1766 - #endif 1767 1781 1768 1782 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1769 1783 {
-41
include/linux/nfs_fs.h
··· 443 443 } 444 444 445 445 /* 446 - * linux/fs/nfs/xattr.c 447 - */ 448 - #ifdef CONFIG_NFS_V3_ACL 449 - extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t); 450 - extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t); 451 - extern int nfs3_setxattr(struct dentry *, const char *, 452 - const void *, size_t, int); 453 - extern int nfs3_removexattr (struct dentry *, const char *name); 454 - #else 455 - # define nfs3_listxattr NULL 456 - # define nfs3_getxattr NULL 457 - # define nfs3_setxattr NULL 458 - # define nfs3_removexattr NULL 459 - #endif 460 - 461 - /* 462 446 * linux/fs/nfs/direct.c 463 447 */ 464 448 extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); ··· 513 529 extern int nfs_wb_all(struct inode *inode); 514 530 extern int nfs_wb_page(struct inode *inode, struct page* page); 515 531 extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); 516 - #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 517 532 extern int nfs_commit_inode(struct inode *, int); 518 533 extern struct nfs_commit_data *nfs_commitdata_alloc(void); 519 534 extern void nfs_commit_free(struct nfs_commit_data *data); 520 - #else 521 - static inline int 522 - nfs_commit_inode(struct inode *inode, int how) 523 - { 524 - return 0; 525 - } 526 - #endif 527 535 528 536 static inline int 529 537 nfs_have_writebacks(struct inode *inode) ··· 531 555 struct list_head *, unsigned); 532 556 extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, 533 557 struct page *); 534 - 535 - /* 536 - * linux/fs/nfs3proc.c 537 - */ 538 - #ifdef CONFIG_NFS_V3_ACL 539 - extern struct posix_acl *nfs3_get_acl(struct inode *inode, int type); 540 - extern int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type); 541 - extern int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, 542 - struct posix_acl *dfacl); 543 - extern const struct xattr_handler *nfs3_xattr_handlers[]; 544 - #else 545 - static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, 546 - struct posix_acl *dfacl) 547 - { 548 - return 0; 549 - } 550 - #endif /* CONFIG_NFS_V3_ACL */ 551 558 552 559 /* 553 560 * inline functions
+6 -11
include/linux/nfs_xdr.h
··· 252 252 gfp_t gfp_flags; 253 253 }; 254 254 255 - struct nfs4_getdevicelist_args { 256 - struct nfs4_sequence_args seq_args; 257 - const struct nfs_fh *fh; 258 - u32 layoutclass; 259 - }; 260 - 261 - struct nfs4_getdevicelist_res { 262 - struct nfs4_sequence_res seq_res; 263 - struct pnfs_devicelist *devlist; 264 - }; 265 - 266 255 struct nfs4_getdeviceinfo_args { 267 256 struct nfs4_sequence_args seq_args; 268 257 struct pnfs_device *pdev; ··· 268 279 __u64 lastbytewritten; 269 280 struct inode *inode; 270 281 const u32 *bitmask; 282 + size_t layoutupdate_len; 283 + struct page *layoutupdate_page; 284 + struct page **layoutupdate_pages; 271 285 }; 272 286 273 287 struct nfs4_layoutcommit_res { ··· 1320 1328 struct pnfs_layout_segment *lseg; 1321 1329 struct nfs_client *ds_clp; /* pNFS data server */ 1322 1330 int ds_commit_index; 1331 + loff_t lwb; 1323 1332 const struct rpc_call_ops *mds_ops; 1324 1333 const struct nfs_commit_completion_ops *completion_ops; 1325 1334 int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data); ··· 1339 1346 struct inode *dir; 1340 1347 struct rpc_cred *cred; 1341 1348 struct nfs_fattr dir_attr; 1349 + long timeout; 1342 1350 }; 1343 1351 1344 1352 struct nfs_renamedata { ··· 1353 1359 struct dentry *new_dentry; 1354 1360 struct nfs_fattr new_fattr; 1355 1361 void (*complete)(struct rpc_task *, struct nfs_renamedata *); 1362 + long timeout; 1356 1363 }; 1357 1364 1358 1365 struct nfs_access_entry;
+10 -2
include/linux/pagemap.h
··· 496 496 } 497 497 498 498 /* 499 - * This is exported only for wait_on_page_locked/wait_on_page_writeback. 500 - * Never use this directly! 499 + * This is exported only for wait_on_page_locked/wait_on_page_writeback, 500 + * and for filesystems which need to wait on PG_private. 501 501 */ 502 502 extern void wait_on_page_bit(struct page *page, int bit_nr); 503 503 504 504 extern int wait_on_page_bit_killable(struct page *page, int bit_nr); 505 + extern int wait_on_page_bit_killable_timeout(struct page *page, 506 + int bit_nr, unsigned long timeout); 505 507 506 508 static inline int wait_on_page_locked_killable(struct page *page) 507 509 { 508 510 if (PageLocked(page)) 509 511 return wait_on_page_bit_killable(page, PG_locked); 510 512 return 0; 513 + } 514 + 515 + extern wait_queue_head_t *page_waitqueue(struct page *page); 516 + static inline void wake_up_page(struct page *page, int bit) 517 + { 518 + __wake_up_bit(page_waitqueue(page), &page->flags, bit); 511 519 } 512 520 513 521 /*
+1
include/linux/sunrpc/xprt.h
··· 357 357 #define XPRT_CONNECTION_ABORT (7) 358 358 #define XPRT_CONNECTION_CLOSE (8) 359 359 #define XPRT_CONGESTED (9) 360 + #define XPRT_CONNECTION_REUSE (10) 360 361 361 362 static inline void xprt_set_connected(struct rpc_xprt *xprt) 362 363 {
+4 -1
include/linux/wait.h
··· 25 25 void *flags; 26 26 int bit_nr; 27 27 #define WAIT_ATOMIC_T_BIT_NR -1 28 - unsigned long private; 28 + unsigned long timeout; 29 29 }; 30 30 31 31 struct wait_bit_queue { ··· 154 154 void wake_up_bit(void *, int); 155 155 void wake_up_atomic_t(atomic_t *); 156 156 int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned); 157 + int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long); 157 158 int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned); 158 159 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); 159 160 wait_queue_head_t *bit_waitqueue(void *, int); ··· 860 859 861 860 extern int bit_wait(struct wait_bit_key *); 862 861 extern int bit_wait_io(struct wait_bit_key *); 862 + extern int bit_wait_timeout(struct wait_bit_key *); 863 + extern int bit_wait_io_timeout(struct wait_bit_key *); 863 864 864 865 /** 865 866 * wait_on_bit - wait for a bit to be cleared
+36
kernel/sched/wait.c
··· 343 343 } 344 344 EXPORT_SYMBOL(out_of_line_wait_on_bit); 345 345 346 + int __sched out_of_line_wait_on_bit_timeout( 347 + void *word, int bit, wait_bit_action_f *action, 348 + unsigned mode, unsigned long timeout) 349 + { 350 + wait_queue_head_t *wq = bit_waitqueue(word, bit); 351 + DEFINE_WAIT_BIT(wait, word, bit); 352 + 353 + wait.key.timeout = jiffies + timeout; 354 + return __wait_on_bit(wq, &wait, action, mode); 355 + } 356 + EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout); 357 + 346 358 int __sched 347 359 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, 348 360 wait_bit_action_f *action, unsigned mode) ··· 532 520 return 0; 533 521 } 534 522 EXPORT_SYMBOL(bit_wait_io); 523 + 524 + __sched int bit_wait_timeout(struct wait_bit_key *word) 525 + { 526 + unsigned long now = ACCESS_ONCE(jiffies); 527 + if (signal_pending_state(current->state, current)) 528 + return 1; 529 + if (time_after_eq(now, word->timeout)) 530 + return -EAGAIN; 531 + schedule_timeout(word->timeout - now); 532 + return 0; 533 + } 534 + EXPORT_SYMBOL_GPL(bit_wait_timeout); 535 + 536 + __sched int bit_wait_io_timeout(struct wait_bit_key *word) 537 + { 538 + unsigned long now = ACCESS_ONCE(jiffies); 539 + if (signal_pending_state(current->state, current)) 540 + return 1; 541 + if (time_after_eq(now, word->timeout)) 542 + return -EAGAIN; 543 + io_schedule_timeout(word->timeout - now); 544 + return 0; 545 + } 546 + EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
+15 -6
mm/filemap.c
··· 670 670 * at a cost of "thundering herd" phenomena during rare hash 671 671 * collisions. 672 672 */ 673 - static wait_queue_head_t *page_waitqueue(struct page *page) 673 + wait_queue_head_t *page_waitqueue(struct page *page) 674 674 { 675 675 const struct zone *zone = page_zone(page); 676 676 677 677 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 678 678 } 679 - 680 - static inline void wake_up_page(struct page *page, int bit) 681 - { 682 - __wake_up_bit(page_waitqueue(page), &page->flags, bit); 683 - } 679 + EXPORT_SYMBOL(page_waitqueue); 684 680 685 681 void wait_on_page_bit(struct page *page, int bit_nr) 686 682 { ··· 698 702 return __wait_on_bit(page_waitqueue(page), &wait, 699 703 bit_wait_io, TASK_KILLABLE); 700 704 } 705 + 706 + int wait_on_page_bit_killable_timeout(struct page *page, 707 + int bit_nr, unsigned long timeout) 708 + { 709 + DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 710 + 711 + wait.key.timeout = jiffies + timeout; 712 + if (!test_bit(bit_nr, &page->flags)) 713 + return 0; 714 + return __wait_on_bit(page_waitqueue(page), &wait, 715 + bit_wait_io_timeout, TASK_KILLABLE); 716 + } 717 + EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout); 701 718 702 719 /** 703 720 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
+5
net/sunrpc/clnt.c
··· 461 461 462 462 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 463 463 clnt->cl_autobind = 1; 464 + if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT) 465 + clnt->cl_noretranstimeo = 1; 464 466 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 465 467 clnt->cl_discrtry = 1; 466 468 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) ··· 581 579 /* Turn off autobind on clones */ 582 580 new->cl_autobind = 0; 583 581 new->cl_softrtry = clnt->cl_softrtry; 582 + new->cl_noretranstimeo = clnt->cl_noretranstimeo; 584 583 new->cl_discrtry = clnt->cl_discrtry; 585 584 new->cl_chatty = clnt->cl_chatty; 586 585 return new; ··· 1916 1913 case -EHOSTDOWN: 1917 1914 case -EHOSTUNREACH: 1918 1915 case -ENETUNREACH: 1916 + case -EPERM: 1919 1917 if (RPC_IS_SOFTCONN(task)) { 1920 1918 xprt_end_transmit(task); 1921 1919 rpc_exit(task, task->tk_status); ··· 2022 2018 case -EHOSTDOWN: 2023 2019 case -EHOSTUNREACH: 2024 2020 case -ENETUNREACH: 2021 + case -EPERM: 2025 2022 if (RPC_IS_SOFTCONN(task)) { 2026 2023 rpc_exit(task, status); 2027 2024 break;
-2
net/sunrpc/sched.c
··· 821 821 822 822 static void rpc_async_schedule(struct work_struct *work) 823 823 { 824 - current->flags |= PF_FSTRANS; 825 824 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 826 - current->flags &= ~PF_FSTRANS; 827 825 } 828 826 829 827 /**
-2
net/sunrpc/xprtrdma/transport.c
··· 205 205 struct rpc_xprt *xprt = &r_xprt->xprt; 206 206 int rc = 0; 207 207 208 - current->flags |= PF_FSTRANS; 209 208 xprt_clear_connected(xprt); 210 209 211 210 dprintk("RPC: %s: %sconnect\n", __func__, ··· 215 216 216 217 dprintk("RPC: %s: exit\n", __func__); 217 218 xprt_clear_connecting(xprt); 218 - current->flags &= ~PF_FSTRANS; 219 219 } 220 220 221 221 /*
+70 -51
net/sunrpc/xprtsock.c
··· 399 399 return kernel_sendmsg(sock, &msg, NULL, 0, 0); 400 400 } 401 401 402 - static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy) 402 + static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p) 403 403 { 404 404 ssize_t (*do_sendpage)(struct socket *sock, struct page *page, 405 405 int offset, size_t size, int flags); 406 406 struct page **ppage; 407 407 unsigned int remainder; 408 - int err, sent = 0; 408 + int err; 409 409 410 410 remainder = xdr->page_len - base; 411 411 base += xdr->page_base; ··· 424 424 err = do_sendpage(sock, *ppage, base, len, flags); 425 425 if (remainder == 0 || err != len) 426 426 break; 427 - sent += err; 427 + *sent_p += err; 428 428 ppage++; 429 429 base = 0; 430 430 } 431 - if (sent == 0) 432 - return err; 433 - if (err > 0) 434 - sent += err; 435 - return sent; 431 + if (err > 0) { 432 + *sent_p += err; 433 + err = 0; 434 + } 435 + return err; 436 436 } 437 437 438 438 /** ··· 443 443 * @xdr: buffer containing this request 444 444 * @base: starting position in the buffer 445 445 * @zerocopy: true if it is safe to use sendpage() 446 + * @sent_p: return the total number of bytes successfully queued for sending 446 447 * 447 448 */ 448 - static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy) 449 + static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p) 449 450 { 450 451 unsigned int remainder = xdr->len - base; 451 - int err, sent = 0; 452 + int err = 0; 453 + int sent = 0; 452 454 453 455 if (unlikely(!sock)) 454 456 return -ENOTSOCK; ··· 467 465 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0); 468 466 if (remainder == 0 || err != len) 469 467 goto out; 470 - sent += err; 468 + *sent_p += err; 471 469 base = 0; 472 470 } else 473 471 base -= xdr->head[0].iov_len; ··· 475 473 if (base < xdr->page_len) { 476 474 unsigned int len = xdr->page_len - base; 477 475 remainder -= len; 478 - err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy); 479 - if (remainder == 0 || err != len) 476 + err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent); 477 + *sent_p += sent; 478 + if (remainder == 0 || sent != len) 480 479 goto out; 481 - sent += err; 482 480 base = 0; 483 481 } else 484 482 base -= xdr->page_len; 485 483 486 484 if (base >= xdr->tail[0].iov_len) 487 - return sent; 485 + return 0; 488 486 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); 489 487 out: 490 - if (sent == 0) 491 - return err; 492 - if (err > 0) 493 - sent += err; 494 - return sent; 488 + if (err > 0) { 489 + *sent_p += err; 490 + err = 0; 491 + } 492 + return err; 495 493 } 496 494 497 495 static void xs_nospace_callback(struct rpc_task *task) ··· 575 573 container_of(xprt, struct sock_xprt, xprt); 576 574 struct xdr_buf *xdr = &req->rq_snd_buf; 577 575 int status; 576 + int sent = 0; 578 577 579 578 xs_encode_stream_record_marker(&req->rq_snd_buf); 580 579 581 580 xs_pktdump("packet data:", 582 581 req->rq_svec->iov_base, req->rq_svec->iov_len); 583 582 584 - status = xs_sendpages(transport->sock, NULL, 0, 585 - xdr, req->rq_bytes_sent, true); 583 + status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent, 584 + true, &sent); 586 585 dprintk("RPC: %s(%u) = %d\n", 587 586 __func__, xdr->len - req->rq_bytes_sent, status); 588 - if (likely(status >= 0)) { 589 - req->rq_bytes_sent += status; 590 - req->rq_xmit_bytes_sent += status; 587 + if (likely(sent > 0) || status == 0) { 588 + req->rq_bytes_sent += sent; 589 + req->rq_xmit_bytes_sent += sent; 591 590 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 592 591 req->rq_bytes_sent = 0; 593 592 return 0; ··· 629 626 struct rpc_xprt *xprt = req->rq_xprt; 630 627 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 631 628 struct xdr_buf *xdr = &req->rq_snd_buf; 629 + int sent = 0; 632 630 int status; 633 631 634 632 xs_pktdump("packet data:", ··· 638 634 639 635 if (!xprt_bound(xprt)) 640 636 return -ENOTCONN; 641 - status = xs_sendpages(transport->sock, 642 - xs_addr(xprt), 643 - xprt->addrlen, xdr, 644 - req->rq_bytes_sent, true); 637 + status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, 638 + xdr, req->rq_bytes_sent, true, &sent); 645 639 646 640 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 647 641 xdr->len - req->rq_bytes_sent, status); 648 642 649 - if (status >= 0) { 650 - req->rq_xmit_bytes_sent += status; 651 - if (status >= req->rq_slen) 643 + /* firewall is blocking us, don't return -EAGAIN or we end up looping */ 644 + if (status == -EPERM) 645 + goto process_status; 646 + 647 + if (sent > 0 || status == 0) { 648 + req->rq_xmit_bytes_sent += sent; 649 + if (sent >= req->rq_slen) 652 650 return 0; 653 651 /* Still some bytes left; set up for a retry later. */ 654 652 status = -EAGAIN; 655 653 } 656 654 655 + process_status: 657 656 switch (status) { 658 657 case -ENOTSOCK: 659 658 status = -ENOTCONN; ··· 672 665 case -ENOBUFS: 673 666 case -EPIPE: 674 667 case -ECONNREFUSED: 668 + case -EPERM: 675 669 /* When the server has died, an ICMP port unreachable message 676 670 * prompts ECONNREFUSED. */ 677 671 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); ··· 721 713 struct xdr_buf *xdr = &req->rq_snd_buf; 722 714 bool zerocopy = true; 723 715 int status; 716 + int sent; 724 717 725 718 xs_encode_stream_record_marker(&req->rq_snd_buf); 726 719 ··· 739 730 * to cope with writespace callbacks arriving _after_ we have 740 731 * called sendmsg(). */ 741 732 while (1) { 742 - status = xs_sendpages(transport->sock, 743 - NULL, 0, xdr, req->rq_bytes_sent, 744 - zerocopy); 733 + sent = 0; 734 + status = xs_sendpages(transport->sock, NULL, 0, xdr, 735 + req->rq_bytes_sent, zerocopy, &sent); 745 736 746 737 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 747 738 xdr->len - req->rq_bytes_sent, status); 748 739 749 - if (unlikely(status < 0)) 740 + if (unlikely(sent == 0 && status < 0)) 750 741 break; 751 742 752 743 /* If we've sent the entire packet, immediately 753 744 * reset the count of bytes sent. */ 754 - req->rq_bytes_sent += status; 755 - req->rq_xmit_bytes_sent += status; 745 + req->rq_bytes_sent += sent; 746 + req->rq_xmit_bytes_sent += sent; 756 747 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 757 748 req->rq_bytes_sent = 0; 758 749 return 0; 759 750 } 760 751 761 - if (status != 0) 752 + if (sent != 0) 762 753 continue; 763 754 status = -EAGAIN; 764 755 break; ··· 854 845 dprintk("RPC: xs_error_report client %p, error=%d...\n", 855 846 xprt, -err); 856 847 trace_rpc_socket_error(xprt, sk->sk_socket, err); 848 + if (test_bit(XPRT_CONNECTION_REUSE, &xprt->state)) 849 + goto out; 857 850 xprt_wake_pending_tasks(xprt, err); 858 851 out: 859 852 read_unlock_bh(&sk->sk_callback_lock); ··· 1757 1746 unsigned short port = xs_get_srcport(transport); 1758 1747 unsigned short last; 1759 1748 1749 + /* 1750 + * If we are asking for any ephemeral port (i.e. port == 0 && 1751 + * transport->xprt.resvport == 0), don't bind. Let the local 1752 + * port selection happen implicitly when the socket is used 1753 + * (for example at connect time). 1754 + * 1755 + * This ensures that we can continue to establish TCP 1756 + * connections even when all local ephemeral ports are already 1757 + * a part of some TCP connection. This makes no difference 1758 + * for UDP sockets, but also doens't harm them. 1759 + * 1760 + * If we're asking for any reserved port (i.e. port == 0 && 1761 + * transport->xprt.resvport == 1) xs_get_srcport above will 1762 + * ensure that port is non-zero and we will bind as needed. 1763 + */ 1764 + if (port == 0) 1765 + return 0; 1766 + 1760 1767 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); 1761 1768 do { 1762 1769 rpc_set_port((struct sockaddr *)&myaddr, port); 1763 1770 err = kernel_bind(sock, (struct sockaddr *)&myaddr, 1764 1771 transport->xprt.addrlen); 1765 - if (port == 0) 1766 - break; 1767 1772 if (err == 0) { 1768 1773 transport->srcport = port; 1769 1774 break; ··· 1954 1927 struct socket *sock; 1955 1928 int status = -EIO; 1956 1929 1957 - current->flags |= PF_FSTRANS; 1958 - 1959 1930 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1960 1931 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1961 1932 SOCK_STREAM, 0, &sock, 1); ··· 1993 1968 out: 1994 1969 xprt_clear_connecting(xprt); 1995 1970 xprt_wake_pending_tasks(xprt, status); 1996 - current->flags &= ~PF_FSTRANS; 1997 1971 return status; 1998 1972 } 1999 1973 ··· 2095 2071 struct socket *sock = transport->sock; 2096 2072 int status = -EIO; 2097 2073 2098 - current->flags |= PF_FSTRANS; 2099 - 2100 2074 /* Start by resetting any existing state */ 2101 2075 xs_reset_transport(transport); 2102 2076 sock = xs_create_sock(xprt, transport, ··· 2114 2092 out: 2115 2093 xprt_clear_connecting(xprt); 2116 2094 xprt_wake_pending_tasks(xprt, status); 2117 - current->flags &= ~PF_FSTRANS; 2118 2095 } 2119 2096 2120 2097 /* ··· 2250 2229 struct rpc_xprt *xprt = &transport->xprt; 2251 2230 int status = -EIO; 2252 2231 2253 - current->flags |= PF_FSTRANS; 2254 - 2255 2232 if (!sock) { 2256 2233 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 2257 2234 sock = xs_create_sock(xprt, transport, ··· 2264 2245 abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT, 2265 2246 &xprt->state); 2266 2247 /* "close" the socket, preserving the local port */ 2248 + set_bit(XPRT_CONNECTION_REUSE, &xprt->state); 2267 2249 xs_tcp_reuse_connection(transport); 2250 + clear_bit(XPRT_CONNECTION_REUSE, &xprt->state); 2268 2251 2269 2252 if (abort_and_exit) 2270 2253 goto out_eagain; ··· 2297 2276 case -EINPROGRESS: 2298 2277 case -EALREADY: 2299 2278 xprt_clear_connecting(xprt); 2300 - current->flags &= ~PF_FSTRANS; 2301 2279 return; 2302 2280 case -EINVAL: 2303 2281 /* Happens, for instance, if the user specified a link ··· 2314 2294 out: 2315 2295 xprt_clear_connecting(xprt); 2316 2296 xprt_wake_pending_tasks(xprt, status); 2317 - current->flags &= ~PF_FSTRANS; 2318 2297 } 2319 2298 2320 2299 /**