Merge git://oss.sgi.com:8090/oss/git/xfs-2.6

+2702 -2555
+597 -505
fs/xfs/linux-2.6/xfs_aops.c
··· 40 40 #include "xfs_rw.h" 41 41 #include "xfs_iomap.h" 42 42 #include <linux/mpage.h> 43 + #include <linux/pagevec.h> 43 44 #include <linux/writeback.h> 44 45 45 46 STATIC void xfs_count_page_state(struct page *, int *, int *, int *); 46 - STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *, 47 - struct writeback_control *wbc, void *, int, int); 48 47 49 48 #if defined(XFS_RW_TRACE) 50 49 void ··· 54 55 int mask) 55 56 { 56 57 xfs_inode_t *ip; 57 - bhv_desc_t *bdp; 58 58 vnode_t *vp = LINVFS_GET_VP(inode); 59 59 loff_t isize = i_size_read(inode); 60 - loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 60 + loff_t offset = page_offset(page); 61 61 int delalloc = -1, unmapped = -1, unwritten = -1; 62 62 63 63 if (page_has_buffers(page)) 64 64 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); 65 65 66 - bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops); 67 - ip = XFS_BHVTOI(bdp); 66 + ip = xfs_vtoi(vp); 68 67 if (!ip->i_rwtrace) 69 68 return; 70 69 ··· 100 103 queue_work(xfsdatad_workqueue, &ioend->io_work); 101 104 } 102 105 106 + /* 107 + * We're now finished for good with this ioend structure. 108 + * Update the page state via the associated buffer_heads, 109 + * release holds on the inode and bio, and finally free 110 + * up memory. Do not use the ioend after this. 111 + */ 103 112 STATIC void 104 113 xfs_destroy_ioend( 105 114 xfs_ioend_t *ioend) 106 115 { 116 + struct buffer_head *bh, *next; 117 + 118 + for (bh = ioend->io_buffer_head; bh; bh = next) { 119 + next = bh->b_private; 120 + bh->b_end_io(bh, ioend->io_uptodate); 121 + } 122 + 107 123 vn_iowake(ioend->io_vnode); 108 124 mempool_free(ioend, xfs_ioend_pool); 109 125 } 110 126 111 127 /* 128 + * Buffered IO write completion for delayed allocate extents. 129 + * TODO: Update ondisk isize now that we know the file data 130 + * has been flushed (i.e. the notorious "NULL file" problem). 131 + */ 132 + STATIC void 133 + xfs_end_bio_delalloc( 134 + void *data) 135 + { 136 + xfs_ioend_t *ioend = data; 137 + 138 + xfs_destroy_ioend(ioend); 139 + } 140 + 141 + /* 142 + * Buffered IO write completion for regular, written extents. 143 + */ 144 + STATIC void 145 + xfs_end_bio_written( 146 + void *data) 147 + { 148 + xfs_ioend_t *ioend = data; 149 + 150 + xfs_destroy_ioend(ioend); 151 + } 152 + 153 + /* 154 + * IO write completion for unwritten extents. 155 + * 112 156 * Issue transactions to convert a buffer range from unwritten 113 157 * to written extents. 114 158 */ ··· 161 123 vnode_t *vp = ioend->io_vnode; 162 124 xfs_off_t offset = ioend->io_offset; 163 125 size_t size = ioend->io_size; 164 - struct buffer_head *bh, *next; 165 126 int error; 166 127 167 128 if (ioend->io_uptodate) 168 129 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error); 169 - 170 - /* ioend->io_buffer_head is only non-NULL for buffered I/O */ 171 - for (bh = ioend->io_buffer_head; bh; bh = next) { 172 - next = bh->b_private; 173 - 174 - bh->b_end_io = NULL; 175 - clear_buffer_unwritten(bh); 176 - end_buffer_async_write(bh, ioend->io_uptodate); 177 - } 178 - 179 130 xfs_destroy_ioend(ioend); 180 131 } 181 132 ··· 176 149 */ 177 150 STATIC xfs_ioend_t * 178 151 xfs_alloc_ioend( 179 - struct inode *inode) 152 + struct inode *inode, 153 + unsigned int type) 180 154 { 181 155 xfs_ioend_t *ioend; 182 156 ··· 190 162 */ 191 163 atomic_set(&ioend->io_remaining, 1); 192 164 ioend->io_uptodate = 1; /* cleared if any I/O fails */ 165 + ioend->io_list = NULL; 166 + ioend->io_type = type; 193 167 ioend->io_vnode = LINVFS_GET_VP(inode); 194 168 ioend->io_buffer_head = NULL; 169 + ioend->io_buffer_tail = NULL; 195 170 atomic_inc(&ioend->io_vnode->v_iocount); 196 171 ioend->io_offset = 0; 197 172 ioend->io_size = 0; 198 173 199 - INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); 174 + if (type == IOMAP_UNWRITTEN) 175 + INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); 176 + else if (type == IOMAP_DELAY) 177 + INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); 178 + else 179 + INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); 200 180 201 181 return ioend; 202 - } 203 - 204 - void 205 - linvfs_unwritten_done( 206 - struct buffer_head *bh, 207 - int uptodate) 208 - { 209 - xfs_ioend_t *ioend = bh->b_private; 210 - static spinlock_t unwritten_done_lock = SPIN_LOCK_UNLOCKED; 211 - unsigned long flags; 212 - 213 - ASSERT(buffer_unwritten(bh)); 214 - bh->b_end_io = NULL; 215 - 216 - if (!uptodate) 217 - ioend->io_uptodate = 0; 218 - 219 - /* 220 - * Deep magic here. We reuse b_private in the buffer_heads to build 221 - * a chain for completing the I/O from user context after we've issued 222 - * a transaction to convert the unwritten extent. 223 - */ 224 - spin_lock_irqsave(&unwritten_done_lock, flags); 225 - bh->b_private = ioend->io_buffer_head; 226 - ioend->io_buffer_head = bh; 227 - spin_unlock_irqrestore(&unwritten_done_lock, flags); 228 - 229 - xfs_finish_ioend(ioend); 230 182 } 231 183 232 184 STATIC int ··· 226 218 return -error; 227 219 } 228 220 229 - /* 230 - * Finds the corresponding mapping in block @map array of the 231 - * given @offset within a @page. 232 - */ 233 - STATIC xfs_iomap_t * 234 - xfs_offset_to_map( 235 - struct page *page, 221 + STATIC inline int 222 + xfs_iomap_valid( 236 223 xfs_iomap_t *iomapp, 237 - unsigned long offset) 224 + loff_t offset) 238 225 { 239 - loff_t full_offset; /* offset from start of file */ 226 + return offset >= iomapp->iomap_offset && 227 + offset < iomapp->iomap_offset + iomapp->iomap_bsize; 228 + } 240 229 241 - ASSERT(offset < PAGE_CACHE_SIZE); 230 + /* 231 + * BIO completion handler for buffered IO. 232 + */ 233 + STATIC int 234 + xfs_end_bio( 235 + struct bio *bio, 236 + unsigned int bytes_done, 237 + int error) 238 + { 239 + xfs_ioend_t *ioend = bio->bi_private; 242 240 243 - full_offset = page->index; /* NB: using 64bit number */ 244 - full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */ 245 - full_offset += offset; /* offset from page start */ 241 + if (bio->bi_size) 242 + return 1; 246 243 247 - if (full_offset < iomapp->iomap_offset) 248 - return NULL; 249 - if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset) 250 - return iomapp; 251 - return NULL; 244 + ASSERT(ioend); 245 + ASSERT(atomic_read(&bio->bi_cnt) >= 1); 246 + 247 + /* Toss bio and pass work off to an xfsdatad thread */ 248 + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 249 + ioend->io_uptodate = 0; 250 + bio->bi_private = NULL; 251 + bio->bi_end_io = NULL; 252 + 253 + bio_put(bio); 254 + xfs_finish_ioend(ioend); 255 + return 0; 256 + } 257 + 258 + STATIC void 259 + xfs_submit_ioend_bio( 260 + xfs_ioend_t *ioend, 261 + struct bio *bio) 262 + { 263 + atomic_inc(&ioend->io_remaining); 264 + 265 + bio->bi_private = ioend; 266 + bio->bi_end_io = xfs_end_bio; 267 + 268 + submit_bio(WRITE, bio); 269 + ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); 270 + bio_put(bio); 271 + } 272 + 273 + STATIC struct bio * 274 + xfs_alloc_ioend_bio( 275 + struct buffer_head *bh) 276 + { 277 + struct bio *bio; 278 + int nvecs = bio_get_nr_vecs(bh->b_bdev); 279 + 280 + do { 281 + bio = bio_alloc(GFP_NOIO, nvecs); 282 + nvecs >>= 1; 283 + } while (!bio); 284 + 285 + ASSERT(bio->bi_private == NULL); 286 + bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 287 + bio->bi_bdev = bh->b_bdev; 288 + bio_get(bio); 289 + return bio; 290 + } 291 + 292 + STATIC void 293 + xfs_start_buffer_writeback( 294 + struct buffer_head *bh) 295 + { 296 + ASSERT(buffer_mapped(bh)); 297 + ASSERT(buffer_locked(bh)); 298 + ASSERT(!buffer_delay(bh)); 299 + ASSERT(!buffer_unwritten(bh)); 300 + 301 + mark_buffer_async_write(bh); 302 + set_buffer_uptodate(bh); 303 + clear_buffer_dirty(bh); 304 + } 305 + 306 + STATIC void 307 + xfs_start_page_writeback( 308 + struct page *page, 309 + struct writeback_control *wbc, 310 + int clear_dirty, 311 + int buffers) 312 + { 313 + ASSERT(PageLocked(page)); 314 + ASSERT(!PageWriteback(page)); 315 + set_page_writeback(page); 316 + if (clear_dirty) 317 + clear_page_dirty(page); 318 + unlock_page(page); 319 + if (!buffers) { 320 + end_page_writeback(page); 321 + wbc->pages_skipped++; /* We didn't write this page */ 322 + } 323 + } 324 + 325 + static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) 326 + { 327 + return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 328 + } 329 + 330 + /* 331 + * Submit all of the bios for all of the ioends we have saved up, 332 + * covering the initial writepage page and also any probed pages. 333 + */ 334 + STATIC void 335 + xfs_submit_ioend( 336 + xfs_ioend_t *ioend) 337 + { 338 + xfs_ioend_t *next; 339 + struct buffer_head *bh; 340 + struct bio *bio; 341 + sector_t lastblock = 0; 342 + 343 + do { 344 + next = ioend->io_list; 345 + bio = NULL; 346 + 347 + for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { 348 + xfs_start_buffer_writeback(bh); 349 + 350 + if (!bio) { 351 + retry: 352 + bio = xfs_alloc_ioend_bio(bh); 353 + } else if (bh->b_blocknr != lastblock + 1) { 354 + xfs_submit_ioend_bio(ioend, bio); 355 + goto retry; 356 + } 357 + 358 + if (bio_add_buffer(bio, bh) != bh->b_size) { 359 + xfs_submit_ioend_bio(ioend, bio); 360 + goto retry; 361 + } 362 + 363 + lastblock = bh->b_blocknr; 364 + } 365 + if (bio) 366 + xfs_submit_ioend_bio(ioend, bio); 367 + xfs_finish_ioend(ioend); 368 + } while ((ioend = next) != NULL); 369 + } 370 + 371 + /* 372 + * Cancel submission of all buffer_heads so far in this endio. 373 + * Toss the endio too. Only ever called for the initial page 374 + * in a writepage request, so only ever one page. 375 + */ 376 + STATIC void 377 + xfs_cancel_ioend( 378 + xfs_ioend_t *ioend) 379 + { 380 + xfs_ioend_t *next; 381 + struct buffer_head *bh, *next_bh; 382 + 383 + do { 384 + next = ioend->io_list; 385 + bh = ioend->io_buffer_head; 386 + do { 387 + next_bh = bh->b_private; 388 + clear_buffer_async_write(bh); 389 + unlock_buffer(bh); 390 + } while ((bh = next_bh) != NULL); 391 + 392 + vn_iowake(ioend->io_vnode); 393 + mempool_free(ioend, xfs_ioend_pool); 394 + } while ((ioend = next) != NULL); 395 + } 396 + 397 + /* 398 + * Test to see if we've been building up a completion structure for 399 + * earlier buffers -- if so, we try to append to this ioend if we 400 + * can, otherwise we finish off any current ioend and start another. 401 + * Return true if we've finished the given ioend. 402 + */ 403 + STATIC void 404 + xfs_add_to_ioend( 405 + struct inode *inode, 406 + struct buffer_head *bh, 407 + xfs_off_t offset, 408 + unsigned int type, 409 + xfs_ioend_t **result, 410 + int need_ioend) 411 + { 412 + xfs_ioend_t *ioend = *result; 413 + 414 + if (!ioend || need_ioend || type != ioend->io_type) { 415 + xfs_ioend_t *previous = *result; 416 + 417 + ioend = xfs_alloc_ioend(inode, type); 418 + ioend->io_offset = offset; 419 + ioend->io_buffer_head = bh; 420 + ioend->io_buffer_tail = bh; 421 + if (previous) 422 + previous->io_list = ioend; 423 + *result = ioend; 424 + } else { 425 + ioend->io_buffer_tail->b_private = bh; 426 + ioend->io_buffer_tail = bh; 427 + } 428 + 429 + bh->b_private = NULL; 430 + ioend->io_size += bh->b_size; 252 431 } 253 432 254 433 STATIC void 255 434 xfs_map_at_offset( 256 - struct page *page, 257 435 struct buffer_head *bh, 258 - unsigned long offset, 436 + loff_t offset, 259 437 int block_bits, 260 438 xfs_iomap_t *iomapp) 261 439 { 262 440 xfs_daddr_t bn; 263 - loff_t delta; 264 441 int sector_shift; 265 442 266 443 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); 267 444 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); 268 445 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); 269 446 270 - delta = page->index; 271 - delta <<= PAGE_CACHE_SHIFT; 272 - delta += offset; 273 - delta -= iomapp->iomap_offset; 274 - delta >>= block_bits; 275 - 276 447 sector_shift = block_bits - BBSHIFT; 277 - bn = iomapp->iomap_bn >> sector_shift; 278 - bn += delta; 279 - BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME)); 448 + bn = (iomapp->iomap_bn >> sector_shift) + 449 + ((offset - iomapp->iomap_offset) >> block_bits); 450 + 451 + ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME)); 280 452 ASSERT((bn << sector_shift) >= iomapp->iomap_bn); 281 453 282 454 lock_buffer(bh); 283 455 bh->b_blocknr = bn; 284 - bh->b_bdev = iomapp->iomap_target->pbr_bdev; 456 + bh->b_bdev = iomapp->iomap_target->bt_bdev; 285 457 set_buffer_mapped(bh); 286 458 clear_buffer_delay(bh); 459 + clear_buffer_unwritten(bh); 287 460 } 288 461 289 462 /* 290 - * Look for a page at index which is unlocked and contains our 291 - * unwritten extent flagged buffers at its head. Returns page 292 - * locked and with an extra reference count, and length of the 293 - * unwritten extent component on this page that we can write, 294 - * in units of filesystem blocks. 295 - */ 296 - STATIC struct page * 297 - xfs_probe_unwritten_page( 298 - struct address_space *mapping, 299 - pgoff_t index, 300 - xfs_iomap_t *iomapp, 301 - xfs_ioend_t *ioend, 302 - unsigned long max_offset, 303 - unsigned long *fsbs, 304 - unsigned int bbits) 305 - { 306 - struct page *page; 307 - 308 - page = find_trylock_page(mapping, index); 309 - if (!page) 310 - return NULL; 311 - if (PageWriteback(page)) 312 - goto out; 313 - 314 - if (page->mapping && page_has_buffers(page)) { 315 - struct buffer_head *bh, *head; 316 - unsigned long p_offset = 0; 317 - 318 - *fsbs = 0; 319 - bh = head = page_buffers(page); 320 - do { 321 - if (!buffer_unwritten(bh) || !buffer_uptodate(bh)) 322 - break; 323 - if (!xfs_offset_to_map(page, iomapp, p_offset)) 324 - break; 325 - if (p_offset >= max_offset) 326 - break; 327 - xfs_map_at_offset(page, bh, p_offset, bbits, iomapp); 328 - set_buffer_unwritten_io(bh); 329 - bh->b_private = ioend; 330 - p_offset += bh->b_size; 331 - (*fsbs)++; 332 - } while ((bh = bh->b_this_page) != head); 333 - 334 - if (p_offset) 335 - return page; 336 - } 337 - 338 - out: 339 - unlock_page(page); 340 - return NULL; 341 - } 342 - 343 - /* 344 - * Look for a page at index which is unlocked and not mapped 345 - * yet - clustering for mmap write case. 463 + * Look for a page at index that is suitable for clustering. 346 464 */ 347 465 STATIC unsigned int 348 - xfs_probe_unmapped_page( 349 - struct address_space *mapping, 350 - pgoff_t index, 351 - unsigned int pg_offset) 466 + xfs_probe_page( 467 + struct page *page, 468 + unsigned int pg_offset, 469 + int mapped) 352 470 { 353 - struct page *page; 354 471 int ret = 0; 355 472 356 - page = find_trylock_page(mapping, index); 357 - if (!page) 358 - return 0; 359 473 if (PageWriteback(page)) 360 - goto out; 474 + return 0; 361 475 362 476 if (page->mapping && PageDirty(page)) { 363 477 if (page_has_buffers(page)) { ··· 487 357 488 358 bh = head = page_buffers(page); 489 359 do { 490 - if (buffer_mapped(bh) || !buffer_uptodate(bh)) 360 + if (!buffer_uptodate(bh)) 361 + break; 362 + if (mapped != buffer_mapped(bh)) 491 363 break; 492 364 ret += bh->b_size; 493 365 if (ret >= pg_offset) 494 366 break; 495 367 } while ((bh = bh->b_this_page) != head); 496 368 } else 497 - ret = PAGE_CACHE_SIZE; 369 + ret = mapped ? 0 : PAGE_CACHE_SIZE; 498 370 } 499 371 500 - out: 501 - unlock_page(page); 502 372 return ret; 503 373 } 504 374 505 - STATIC unsigned int 506 - xfs_probe_unmapped_cluster( 375 + STATIC size_t 376 + xfs_probe_cluster( 507 377 struct inode *inode, 508 378 struct page *startpage, 509 379 struct buffer_head *bh, 510 - struct buffer_head *head) 380 + struct buffer_head *head, 381 + int mapped) 511 382 { 383 + struct pagevec pvec; 512 384 pgoff_t tindex, tlast, tloff; 513 - unsigned int pg_offset, len, total = 0; 514 - struct address_space *mapping = inode->i_mapping; 385 + size_t total = 0; 386 + int done = 0, i; 515 387 516 388 /* First sum forwards in this page */ 517 389 do { 518 - if (buffer_mapped(bh)) 519 - break; 390 + if (mapped != buffer_mapped(bh)) 391 + return total; 520 392 total += bh->b_size; 521 393 } while ((bh = bh->b_this_page) != head); 522 394 523 - /* If we reached the end of the page, sum forwards in 524 - * following pages. 525 - */ 526 - if (bh == head) { 527 - tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 528 - /* Prune this back to avoid pathological behavior */ 529 - tloff = min(tlast, startpage->index + 64); 530 - for (tindex = startpage->index + 1; tindex < tloff; tindex++) { 531 - len = xfs_probe_unmapped_page(mapping, tindex, 532 - PAGE_CACHE_SIZE); 533 - if (!len) 534 - return total; 395 + /* if we reached the end of the page, sum forwards in following pages */ 396 + tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 397 + tindex = startpage->index + 1; 398 + 399 + /* Prune this back to avoid pathological behavior */ 400 + tloff = min(tlast, startpage->index + 64); 401 + 402 + pagevec_init(&pvec, 0); 403 + while (!done && tindex <= tloff) { 404 + unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 405 + 406 + if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 407 + break; 408 + 409 + for (i = 0; i < pagevec_count(&pvec); i++) { 410 + struct page *page = pvec.pages[i]; 411 + size_t pg_offset, len = 0; 412 + 413 + if (tindex == tlast) { 414 + pg_offset = 415 + i_size_read(inode) & (PAGE_CACHE_SIZE - 1); 416 + if (!pg_offset) { 417 + done = 1; 418 + break; 419 + } 420 + } else 421 + pg_offset = PAGE_CACHE_SIZE; 422 + 423 + if (page->index == tindex && !TestSetPageLocked(page)) { 424 + len = xfs_probe_page(page, pg_offset, mapped); 425 + unlock_page(page); 426 + } 427 + 428 + if (!len) { 429 + done = 1; 430 + break; 431 + } 432 + 535 433 total += len; 434 + tindex++; 536 435 } 537 - if (tindex == tlast && 538 - (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { 539 - total += xfs_probe_unmapped_page(mapping, 540 - tindex, pg_offset); 541 - } 436 + 437 + pagevec_release(&pvec); 438 + cond_resched(); 542 439 } 440 + 543 441 return total; 544 442 } 545 443 546 444 /* 547 - * Probe for a given page (index) in the inode and test if it is delayed 548 - * and without unwritten buffers. Returns page locked and with an extra 549 - * reference count. 445 + * Test if a given page is suitable for writing as part of an unwritten 446 + * or delayed allocate extent. 550 447 */ 551 - STATIC struct page * 552 - xfs_probe_delalloc_page( 553 - struct inode *inode, 554 - pgoff_t index) 448 + STATIC int 449 + xfs_is_delayed_page( 450 + struct page *page, 451 + unsigned int type) 555 452 { 556 - struct page *page; 557 - 558 - page = find_trylock_page(inode->i_mapping, index); 559 - if (!page) 560 - return NULL; 561 453 if (PageWriteback(page)) 562 - goto out; 454 + return 0; 563 455 564 456 if (page->mapping && page_has_buffers(page)) { 565 457 struct buffer_head *bh, *head; ··· 589 437 590 438 bh = head = page_buffers(page); 591 439 do { 592 - if (buffer_unwritten(bh)) { 593 - acceptable = 0; 440 + if (buffer_unwritten(bh)) 441 + acceptable = (type == IOMAP_UNWRITTEN); 442 + else if (buffer_delay(bh)) 443 + acceptable = (type == IOMAP_DELAY); 444 + else if (buffer_mapped(bh)) 445 + acceptable = (type == 0); 446 + else 594 447 break; 595 - } else if (buffer_delay(bh)) { 596 - acceptable = 1; 597 - } 598 448 } while ((bh = bh->b_this_page) != head); 599 449 600 450 if (acceptable) 601 - return page; 451 + return 1; 602 452 } 603 453 604 - out: 605 - unlock_page(page); 606 - return NULL; 607 - } 608 - 609 - STATIC int 610 - xfs_map_unwritten( 611 - struct inode *inode, 612 - struct page *start_page, 613 - struct buffer_head *head, 614 - struct buffer_head *curr, 615 - unsigned long p_offset, 616 - int block_bits, 617 - xfs_iomap_t *iomapp, 618 - struct writeback_control *wbc, 619 - int startio, 620 - int all_bh) 621 - { 622 - struct buffer_head *bh = curr; 623 - xfs_iomap_t *tmp; 624 - xfs_ioend_t *ioend; 625 - loff_t offset; 626 - unsigned long nblocks = 0; 627 - 628 - offset = start_page->index; 629 - offset <<= PAGE_CACHE_SHIFT; 630 - offset += p_offset; 631 - 632 - ioend = xfs_alloc_ioend(inode); 633 - 634 - /* First map forwards in the page consecutive buffers 635 - * covering this unwritten extent 636 - */ 637 - do { 638 - if (!buffer_unwritten(bh)) 639 - break; 640 - tmp = xfs_offset_to_map(start_page, iomapp, p_offset); 641 - if (!tmp) 642 - break; 643 - xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp); 644 - set_buffer_unwritten_io(bh); 645 - bh->b_private = ioend; 646 - p_offset += bh->b_size; 647 - nblocks++; 648 - } while ((bh = bh->b_this_page) != head); 649 - 650 - atomic_add(nblocks, &ioend->io_remaining); 651 - 652 - /* If we reached the end of the page, map forwards in any 653 - * following pages which are also covered by this extent. 654 - */ 655 - if (bh == head) { 656 - struct address_space *mapping = inode->i_mapping; 657 - pgoff_t tindex, tloff, tlast; 658 - unsigned long bs; 659 - unsigned int pg_offset, bbits = inode->i_blkbits; 660 - struct page *page; 661 - 662 - tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 663 - tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT; 664 - tloff = min(tlast, tloff); 665 - for (tindex = start_page->index + 1; tindex < tloff; tindex++) { 666 - page = xfs_probe_unwritten_page(mapping, 667 - tindex, iomapp, ioend, 668 - PAGE_CACHE_SIZE, &bs, bbits); 669 - if (!page) 670 - break; 671 - nblocks += bs; 672 - atomic_add(bs, &ioend->io_remaining); 673 - xfs_convert_page(inode, page, iomapp, wbc, ioend, 674 - startio, all_bh); 675 - /* stop if converting the next page might add 676 - * enough blocks that the corresponding byte 677 - * count won't fit in our ulong page buf length */ 678 - if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) 679 - goto enough; 680 - } 681 - 682 - if (tindex == tlast && 683 - (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) { 684 - page = xfs_probe_unwritten_page(mapping, 685 - tindex, iomapp, ioend, 686 - pg_offset, &bs, bbits); 687 - if (page) { 688 - nblocks += bs; 689 - atomic_add(bs, &ioend->io_remaining); 690 - xfs_convert_page(inode, page, iomapp, wbc, ioend, 691 - startio, all_bh); 692 - if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) 693 - goto enough; 694 - } 695 - } 696 - } 697 - 698 - enough: 699 - ioend->io_size = (xfs_off_t)nblocks << block_bits; 700 - ioend->io_offset = offset; 701 - xfs_finish_ioend(ioend); 702 454 return 0; 703 - } 704 - 705 - STATIC void 706 - xfs_submit_page( 707 - struct page *page, 708 - struct writeback_control *wbc, 709 - struct buffer_head *bh_arr[], 710 - int bh_count, 711 - int probed_page, 712 - int clear_dirty) 713 - { 714 - struct buffer_head *bh; 715 - int i; 716 - 717 - BUG_ON(PageWriteback(page)); 718 - if (bh_count) 719 - set_page_writeback(page); 720 - if (clear_dirty) 721 - clear_page_dirty(page); 722 - unlock_page(page); 723 - 724 - if (bh_count) { 725 - for (i = 0; i < bh_count; i++) { 726 - bh = bh_arr[i]; 727 - mark_buffer_async_write(bh); 728 - if (buffer_unwritten(bh)) 729 - set_buffer_unwritten_io(bh); 730 - set_buffer_uptodate(bh); 731 - clear_buffer_dirty(bh); 732 - } 733 - 734 - for (i = 0; i < bh_count; i++) 735 - submit_bh(WRITE, bh_arr[i]); 736 - 737 - if (probed_page && clear_dirty) 738 - wbc->nr_to_write--; /* Wrote an "extra" page */ 739 - } 740 455 } 741 456 742 457 /* ··· 612 593 * delalloc/unwritten pages only, for the original page it is possible 613 594 * that the page has no mapping at all. 614 595 */ 615 - STATIC void 596 + STATIC int 616 597 xfs_convert_page( 617 598 struct inode *inode, 618 599 struct page *page, 619 - xfs_iomap_t *iomapp, 600 + loff_t tindex, 601 + xfs_iomap_t *mp, 602 + xfs_ioend_t **ioendp, 620 603 struct writeback_control *wbc, 621 - void *private, 622 604 int startio, 623 605 int all_bh) 624 606 { 625 - struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; 626 - xfs_iomap_t *mp = iomapp, *tmp; 627 - unsigned long offset, end_offset; 628 - int index = 0; 607 + struct buffer_head *bh, *head; 608 + xfs_off_t end_offset; 609 + unsigned long p_offset; 610 + unsigned int type; 629 611 int bbits = inode->i_blkbits; 630 612 int len, page_dirty; 613 + int count = 0, done = 0, uptodate = 1; 614 + xfs_off_t offset = page_offset(page); 631 615 632 - end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)); 616 + if (page->index != tindex) 617 + goto fail; 618 + if (TestSetPageLocked(page)) 619 + goto fail; 620 + if (PageWriteback(page)) 621 + goto fail_unlock_page; 622 + if (page->mapping != inode->i_mapping) 623 + goto fail_unlock_page; 624 + if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) 625 + goto fail_unlock_page; 633 626 634 627 /* 635 628 * page_dirty is initially a count of buffers on the page before 636 629 * EOF and is decrememted as we move each into a cleanable state. 630 + * 631 + * Derivation: 632 + * 633 + * End offset is the highest offset that this page should represent. 634 + * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) 635 + * will evaluate non-zero and be less than PAGE_CACHE_SIZE and 636 + * hence give us the correct page_dirty count. On any other page, 637 + * it will be zero and in that case we need page_dirty to be the 638 + * count of buffers on the page. 637 639 */ 638 - len = 1 << inode->i_blkbits; 639 - end_offset = max(end_offset, PAGE_CACHE_SIZE); 640 - end_offset = roundup(end_offset, len); 641 - page_dirty = end_offset / len; 640 + end_offset = min_t(unsigned long long, 641 + (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, 642 + i_size_read(inode)); 642 643 643 - offset = 0; 644 + len = 1 << inode->i_blkbits; 645 + p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), 646 + PAGE_CACHE_SIZE); 647 + p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 648 + page_dirty = p_offset / len; 649 + 644 650 bh = head = page_buffers(page); 645 651 do { 646 652 if (offset >= end_offset) 647 653 break; 648 - if (!(PageUptodate(page) || buffer_uptodate(bh))) 649 - continue; 650 - if (buffer_mapped(bh) && all_bh && 651 - !(buffer_unwritten(bh) || buffer_delay(bh))) { 652 - if (startio) { 653 - lock_buffer(bh); 654 - bh_arr[index++] = bh; 655 - page_dirty--; 656 - } 654 + if (!buffer_uptodate(bh)) 655 + uptodate = 0; 656 + if (!(PageUptodate(page) || buffer_uptodate(bh))) { 657 + done = 1; 657 658 continue; 658 659 } 659 - tmp = xfs_offset_to_map(page, mp, offset); 660 - if (!tmp) 661 - continue; 662 - ASSERT(!(tmp->iomap_flags & IOMAP_HOLE)); 663 - ASSERT(!(tmp->iomap_flags & IOMAP_DELAY)); 664 660 665 - /* If this is a new unwritten extent buffer (i.e. one 666 - * that we haven't passed in private data for, we must 667 - * now map this buffer too. 668 - */ 669 - if (buffer_unwritten(bh) && !bh->b_end_io) { 670 - ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN); 671 - xfs_map_unwritten(inode, page, head, bh, offset, 672 - bbits, tmp, wbc, startio, all_bh); 673 - } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) { 674 - xfs_map_at_offset(page, bh, offset, bbits, tmp); 675 - if (buffer_unwritten(bh)) { 676 - set_buffer_unwritten_io(bh); 677 - bh->b_private = private; 678 - ASSERT(private); 661 + if (buffer_unwritten(bh) || buffer_delay(bh)) { 662 + if (buffer_unwritten(bh)) 663 + type = IOMAP_UNWRITTEN; 664 + else 665 + type = IOMAP_DELAY; 666 + 667 + if (!xfs_iomap_valid(mp, offset)) { 668 + done = 1; 669 + continue; 670 + } 671 + 672 + ASSERT(!(mp->iomap_flags & IOMAP_HOLE)); 673 + ASSERT(!(mp->iomap_flags & IOMAP_DELAY)); 674 + 675 + xfs_map_at_offset(bh, offset, bbits, mp); 676 + if (startio) { 677 + xfs_add_to_ioend(inode, bh, offset, 678 + type, ioendp, done); 679 + } else { 680 + set_buffer_dirty(bh); 681 + unlock_buffer(bh); 682 + mark_buffer_dirty(bh); 683 + } 684 + page_dirty--; 685 + count++; 686 + } else { 687 + type = 0; 688 + if (buffer_mapped(bh) && all_bh && startio) { 689 + lock_buffer(bh); 690 + xfs_add_to_ioend(inode, bh, offset, 691 + type, ioendp, done); 692 + count++; 693 + page_dirty--; 694 + } else { 695 + done = 1; 679 696 } 680 697 } 681 - if (startio) { 682 - bh_arr[index++] = bh; 683 - } else { 684 - set_buffer_dirty(bh); 685 - unlock_buffer(bh); 686 - mark_buffer_dirty(bh); 687 - } 688 - page_dirty--; 689 698 } while (offset += len, (bh = bh->b_this_page) != head); 690 699 691 - if (startio && index) { 692 - xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty); 693 - } else { 694 - unlock_page(page); 700 + if (uptodate && bh == head) 701 + SetPageUptodate(page); 702 + 703 + if (startio) { 704 + if (count) { 705 + struct backing_dev_info *bdi; 706 + 707 + bdi = inode->i_mapping->backing_dev_info; 708 + if (bdi_write_congested(bdi)) { 709 + wbc->encountered_congestion = 1; 710 + done = 1; 711 + } else if (--wbc->nr_to_write <= 0) { 712 + done = 1; 713 + } 714 + } 715 + xfs_start_page_writeback(page, wbc, !page_dirty, count); 695 716 } 717 + 718 + return done; 719 + fail_unlock_page: 720 + unlock_page(page); 721 + fail: 722 + return 1; 696 723 } 697 724 698 725 /* ··· 750 685 struct inode *inode, 751 686 pgoff_t tindex, 752 687 xfs_iomap_t *iomapp, 688 + xfs_ioend_t **ioendp, 753 689 struct writeback_control *wbc, 754 690 int startio, 755 691 int all_bh, 756 692 pgoff_t tlast) 757 693 { 758 - struct page *page; 694 + struct pagevec pvec; 695 + int done = 0, i; 759 696 760 - for (; tindex <= tlast; tindex++) { 761 - page = xfs_probe_delalloc_page(inode, tindex); 762 - if (!page) 697 + pagevec_init(&pvec, 0); 698 + while (!done && tindex <= tlast) { 699 + unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 700 + 701 + if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 763 702 break; 764 - xfs_convert_page(inode, page, iomapp, wbc, NULL, 765 - startio, all_bh); 703 + 704 + for (i = 0; i < pagevec_count(&pvec); i++) { 705 + done = xfs_convert_page(inode, pvec.pages[i], tindex++, 706 + iomapp, ioendp, wbc, startio, all_bh); 707 + if (done) 708 + break; 709 + } 710 + 711 + pagevec_release(&pvec); 712 + cond_resched(); 766 713 } 767 714 } 768 715 ··· 805 728 int startio, 806 729 int unmapped) /* also implies page uptodate */ 807 730 { 808 - struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; 809 - xfs_iomap_t *iomp, iomap; 731 + struct buffer_head *bh, *head; 732 + xfs_iomap_t iomap; 733 + xfs_ioend_t *ioend = NULL, *iohead = NULL; 810 734 loff_t offset; 811 735 unsigned long p_offset = 0; 736 + unsigned int type; 812 737 __uint64_t end_offset; 813 738 pgoff_t end_index, last_index, tlast; 814 - int len, err, i, cnt = 0, uptodate = 1; 815 - int flags; 816 - int page_dirty; 739 + ssize_t size, len; 740 + int flags, err, iomap_valid = 0, uptodate = 1; 741 + int page_dirty, count = 0, trylock_flag = 0; 742 + int all_bh = unmapped; 817 743 818 744 /* wait for other IO threads? */ 819 - flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK; 745 + if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)) 746 + trylock_flag |= BMAPI_TRYLOCK; 820 747 821 748 /* Is this page beyond the end of the file? */ 822 749 offset = i_size_read(inode); ··· 835 754 } 836 755 } 837 756 838 - end_offset = min_t(unsigned long long, 839 - (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset); 840 - offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 841 - 842 757 /* 843 758 * page_dirty is initially a count of buffers on the page before 844 759 * EOF and is decrememted as we move each into a cleanable state. 845 - */ 760 + * 761 + * Derivation: 762 + * 763 + * End offset is the highest offset that this page should represent. 764 + * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) 765 + * will evaluate non-zero and be less than PAGE_CACHE_SIZE and 766 + * hence give us the correct page_dirty count. On any other page, 767 + * it will be zero and in that case we need page_dirty to be the 768 + * count of buffers on the page. 769 + */ 770 + end_offset = min_t(unsigned long long, 771 + (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset); 846 772 len = 1 << inode->i_blkbits; 847 - p_offset = max(p_offset, PAGE_CACHE_SIZE); 848 - p_offset = roundup(p_offset, len); 773 + p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), 774 + PAGE_CACHE_SIZE); 775 + p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 849 776 page_dirty = p_offset / len; 850 777 851 - iomp = NULL; 852 - p_offset = 0; 853 778 bh = head = page_buffers(page); 779 + offset = page_offset(page); 780 + flags = -1; 781 + type = 0; 782 + 783 + /* TODO: cleanup count and page_dirty */ 854 784 855 785 do { 856 786 if (offset >= end_offset) 857 787 break; 858 788 if (!buffer_uptodate(bh)) 859 789 uptodate = 0; 860 - if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) 790 + if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) { 791 + /* 792 + * the iomap is actually still valid, but the ioend 793 + * isn't. shouldn't happen too often. 794 + */ 795 + iomap_valid = 0; 861 796 continue; 862 - 863 - if (iomp) { 864 - iomp = xfs_offset_to_map(page, &iomap, p_offset); 865 797 } 798 + 799 + if (iomap_valid) 800 + iomap_valid = xfs_iomap_valid(&iomap, offset); 866 801 867 802 /* 868 803 * First case, map an unwritten extent and prepare for 869 804 * extent state conversion transaction on completion. 870 - */ 871 - if (buffer_unwritten(bh)) { 872 - if (!startio) 873 - continue; 874 - if (!iomp) { 875 - err = xfs_map_blocks(inode, offset, len, &iomap, 876 - BMAPI_WRITE|BMAPI_IGNSTATE); 877 - if (err) { 878 - goto error; 879 - } 880 - iomp = xfs_offset_to_map(page, &iomap, 881 - p_offset); 882 - } 883 - if (iomp) { 884 - if (!bh->b_end_io) { 885 - err = xfs_map_unwritten(inode, page, 886 - head, bh, p_offset, 887 - inode->i_blkbits, iomp, 888 - wbc, startio, unmapped); 889 - if (err) { 890 - goto error; 891 - } 892 - } else { 893 - set_bit(BH_Lock, &bh->b_state); 894 - } 895 - BUG_ON(!buffer_locked(bh)); 896 - bh_arr[cnt++] = bh; 897 - page_dirty--; 898 - } 899 - /* 805 + * 900 806 * Second case, allocate space for a delalloc buffer. 901 807 * We can return EAGAIN here in the release page case. 902 - */ 903 - } else if (buffer_delay(bh)) { 904 - if (!iomp) { 905 - err = xfs_map_blocks(inode, offset, len, &iomap, 906 - BMAPI_ALLOCATE | flags); 907 - if (err) { 908 - goto error; 909 - } 910 - iomp = xfs_offset_to_map(page, &iomap, 911 - p_offset); 808 + * 809 + * Third case, an unmapped buffer was found, and we are 810 + * in a path where we need to write the whole page out. 811 + */ 812 + if (buffer_unwritten(bh) || buffer_delay(bh) || 813 + ((buffer_uptodate(bh) || PageUptodate(page)) && 814 + !buffer_mapped(bh) && (unmapped || startio))) { 815 + /* 816 + * Make sure we don't use a read-only iomap 817 + */ 818 + if (flags == BMAPI_READ) 819 + iomap_valid = 0; 820 + 821 + if (buffer_unwritten(bh)) { 822 + type = IOMAP_UNWRITTEN; 823 + flags = BMAPI_WRITE|BMAPI_IGNSTATE; 824 + } else if (buffer_delay(bh)) { 825 + type = IOMAP_DELAY; 826 + flags = BMAPI_ALLOCATE; 827 + if (!startio) 828 + flags |= trylock_flag; 829 + } else { 830 + type = IOMAP_NEW; 831 + flags = BMAPI_WRITE|BMAPI_MMAP; 912 832 } 913 - if (iomp) { 914 - xfs_map_at_offset(page, bh, p_offset, 915 - inode->i_blkbits, iomp); 833 + 834 + if (!iomap_valid) { 835 + if (type == IOMAP_NEW) { 836 + size = xfs_probe_cluster(inode, 837 + page, bh, head, 0); 838 + } else { 839 + size = len; 840 + } 841 + 842 + err = xfs_map_blocks(inode, offset, size, 843 + &iomap, flags); 844 + if (err) 845 + goto error; 846 + iomap_valid = xfs_iomap_valid(&iomap, offset); 847 + } 848 + if (iomap_valid) { 849 + xfs_map_at_offset(bh, offset, 850 + inode->i_blkbits, &iomap); 916 851 if (startio) { 917 - bh_arr[cnt++] = bh; 852 + xfs_add_to_ioend(inode, bh, offset, 853 + type, &ioend, 854 + !iomap_valid); 918 855 } else { 919 856 set_buffer_dirty(bh); 920 857 unlock_buffer(bh); 921 858 mark_buffer_dirty(bh); 922 859 } 923 860 page_dirty--; 861 + count++; 862 + } 863 + } else if (buffer_uptodate(bh) && startio) { 864 + /* 865 + * we got here because the buffer is already mapped. 866 + * That means it must already have extents allocated 867 + * underneath it. Map the extent by reading it. 868 + */ 869 + if (!iomap_valid || type != 0) { 870 + flags = BMAPI_READ; 871 + size = xfs_probe_cluster(inode, page, bh, 872 + head, 1); 873 + err = xfs_map_blocks(inode, offset, size, 874 + &iomap, flags); 875 + if (err) 876 + goto error; 877 + iomap_valid = xfs_iomap_valid(&iomap, offset); 878 + } 879 + 880 + type = 0; 881 + if (!test_and_set_bit(BH_Lock, &bh->b_state)) { 882 + ASSERT(buffer_mapped(bh)); 883 + if (iomap_valid) 884 + all_bh = 1; 885 + xfs_add_to_ioend(inode, bh, offset, type, 886 + &ioend, !iomap_valid); 887 + page_dirty--; 888 + count++; 889 + } else { 890 + iomap_valid = 0; 924 891 } 925 892 } else if ((buffer_uptodate(bh) || PageUptodate(page)) && 926 893 (unmapped || startio)) { 927 - 928 - if (!buffer_mapped(bh)) { 929 - int size; 930 - 931 - /* 932 - * Getting here implies an unmapped buffer 933 - * was found, and we are in a path where we 934 - * need to write the whole page out. 935 - */ 936 - if (!iomp) { 937 - size = xfs_probe_unmapped_cluster( 938 - inode, page, bh, head); 939 - err = xfs_map_blocks(inode, offset, 940 - size, &iomap, 941 - BMAPI_WRITE|BMAPI_MMAP); 942 - if (err) { 943 - goto error; 944 - } 945 - iomp = xfs_offset_to_map(page, &iomap, 946 - p_offset); 947 - } 948 - if (iomp) { 949 - xfs_map_at_offset(page, 950 - bh, p_offset, 951 - inode->i_blkbits, iomp); 952 - if (startio) { 953 - bh_arr[cnt++] = bh; 954 - } else { 955 - set_buffer_dirty(bh); 956 - unlock_buffer(bh); 957 - mark_buffer_dirty(bh); 958 - } 959 - page_dirty--; 960 - } 961 - } else if (startio) { 962 - if (buffer_uptodate(bh) && 963 - !test_and_set_bit(BH_Lock, &bh->b_state)) { 964 - bh_arr[cnt++] = bh; 965 - page_dirty--; 966 - } 967 - } 894 + iomap_valid = 0; 968 895 } 969 - } while (offset += len, p_offset += len, 970 - ((bh = bh->b_this_page) != head)); 896 + 897 + if (!iohead) 898 + iohead = ioend; 899 + 900 + } while (offset += len, ((bh = bh->b_this_page) != head)); 971 901 972 902 if (uptodate && bh == head) 973 903 SetPageUptodate(page); 974 904 975 - if (startio) { 976 - xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty); 977 - } 905 + if (startio) 906 + xfs_start_page_writeback(page, wbc, 1, count); 978 907 979 - if (iomp) { 980 - offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> 908 + if (ioend && iomap_valid) { 909 + offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >> 981 910 PAGE_CACHE_SHIFT; 982 911 tlast = min_t(pgoff_t, offset, last_index); 983 - xfs_cluster_write(inode, page->index + 1, iomp, wbc, 984 - startio, unmapped, tlast); 912 + xfs_cluster_write(inode, page->index + 1, &iomap, &ioend, 913 + wbc, startio, all_bh, tlast); 985 914 } 915 + 916 + if (iohead) 917 + xfs_submit_ioend(iohead); 986 918 987 919 return page_dirty; 988 920 989 921 error: 990 - for (i = 0; i < cnt; i++) { 991 - unlock_buffer(bh_arr[i]); 992 - } 922 + if (iohead) 923 + xfs_cancel_ioend(iohead); 993 924 994 925 /* 995 926 * If it's delalloc and we have nowhere to put it, ··· 1009 916 * us to try again. 1010 917 */ 1011 918 if (err != -EAGAIN) { 1012 - if (!unmapped) { 919 + if (!unmapped) 1013 920 block_invalidatepage(page, 0); 1014 - } 1015 921 ClearPageUptodate(page); 1016 922 } 1017 923 return err; ··· 1074 982 } 1075 983 1076 984 /* If this is a realtime file, data might be on a new device */ 1077 - bh_result->b_bdev = iomap.iomap_target->pbr_bdev; 985 + bh_result->b_bdev = iomap.iomap_target->bt_bdev; 1078 986 1079 987 /* If we previously allocated a block out beyond eof and 1080 988 * we are now coming back to use it then we will need to ··· 1186 1094 if (error) 1187 1095 return -error; 1188 1096 1189 - iocb->private = xfs_alloc_ioend(inode); 1097 + iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); 1190 1098 1191 1099 ret = blockdev_direct_IO_own_locking(rw, iocb, inode, 1192 - iomap.iomap_target->pbr_bdev, 1100 + iomap.iomap_target->bt_bdev, 1193 1101 iov, offset, nr_segs, 1194 1102 linvfs_get_blocks_direct, 1195 1103 linvfs_end_io_direct);
+10
fs/xfs/linux-2.6/xfs_aops.h
··· 23 23 24 24 typedef void (*xfs_ioend_func_t)(void *); 25 25 26 + /* 27 + * xfs_ioend struct manages large extent writes for XFS. 28 + * It can manage several multi-page bio's at once. 29 + */ 26 30 typedef struct xfs_ioend { 31 + struct xfs_ioend *io_list; /* next ioend in chain */ 32 + unsigned int io_type; /* delalloc / unwritten */ 27 33 unsigned int io_uptodate; /* I/O status register */ 28 34 atomic_t io_remaining; /* hold count */ 29 35 struct vnode *io_vnode; /* file being written to */ 30 36 struct buffer_head *io_buffer_head;/* buffer linked list head */ 37 + struct buffer_head *io_buffer_tail;/* buffer linked list tail */ 31 38 size_t io_size; /* size of the extent */ 32 39 xfs_off_t io_offset; /* offset in the file */ 33 40 struct work_struct io_work; /* xfsdatad work queue */ 34 41 } xfs_ioend_t; 42 + 43 + extern struct address_space_operations linvfs_aops; 44 + extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int); 35 45 36 46 #endif /* __XFS_IOPS_H__ */
+661 -702
fs/xfs/linux-2.6/xfs_buf.c
··· 31 31 #include <linux/kthread.h> 32 32 #include "xfs_linux.h" 33 33 34 - STATIC kmem_cache_t *pagebuf_zone; 35 - STATIC kmem_shaker_t pagebuf_shake; 34 + STATIC kmem_zone_t *xfs_buf_zone; 35 + STATIC kmem_shaker_t xfs_buf_shake; 36 + STATIC int xfsbufd(void *); 36 37 STATIC int xfsbufd_wakeup(int, gfp_t); 37 - STATIC void pagebuf_delwri_queue(xfs_buf_t *, int); 38 + STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); 38 39 39 40 STATIC struct workqueue_struct *xfslogd_workqueue; 40 41 struct workqueue_struct *xfsdatad_workqueue; 41 42 42 - #ifdef PAGEBUF_TRACE 43 + #ifdef XFS_BUF_TRACE 43 44 void 44 - pagebuf_trace( 45 - xfs_buf_t *pb, 45 + xfs_buf_trace( 46 + xfs_buf_t *bp, 46 47 char *id, 47 48 void *data, 48 49 void *ra) 49 50 { 50 - ktrace_enter(pagebuf_trace_buf, 51 - pb, id, 52 - (void *)(unsigned long)pb->pb_flags, 53 - (void *)(unsigned long)pb->pb_hold.counter, 54 - (void *)(unsigned long)pb->pb_sema.count.counter, 51 + ktrace_enter(xfs_buf_trace_buf, 52 + bp, id, 53 + (void *)(unsigned long)bp->b_flags, 54 + (void *)(unsigned long)bp->b_hold.counter, 55 + (void *)(unsigned long)bp->b_sema.count.counter, 55 56 (void *)current, 56 57 data, ra, 57 - (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff), 58 - (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff), 59 - (void *)(unsigned long)pb->pb_buffer_length, 58 + (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff), 59 + (void *)(unsigned long)(bp->b_file_offset & 0xffffffff), 60 + (void *)(unsigned long)bp->b_buffer_length, 60 61 NULL, NULL, NULL, NULL, NULL); 61 62 } 62 - ktrace_t *pagebuf_trace_buf; 63 - #define PAGEBUF_TRACE_SIZE 4096 64 - #define PB_TRACE(pb, id, data) \ 65 - pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0)) 63 + ktrace_t *xfs_buf_trace_buf; 64 + #define XFS_BUF_TRACE_SIZE 4096 65 + #define XB_TRACE(bp, id, data) \ 66 + xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0)) 66 67 #else 67 - #define PB_TRACE(pb, id, data) do { } while (0) 68 + #define XB_TRACE(bp, id, data) do { } while (0) 68 69 #endif 69 70 70 - #ifdef PAGEBUF_LOCK_TRACKING 71 - # define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid) 72 - # define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1) 73 - # define PB_GET_OWNER(pb) ((pb)->pb_last_holder) 71 + #ifdef XFS_BUF_LOCK_TRACKING 72 + # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) 73 + # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) 74 + # define XB_GET_OWNER(bp) ((bp)->b_last_holder) 74 75 #else 75 - # define PB_SET_OWNER(pb) do { } while (0) 76 - # define PB_CLEAR_OWNER(pb) do { } while (0) 77 - # define PB_GET_OWNER(pb) do { } while (0) 76 + # define XB_SET_OWNER(bp) do { } while (0) 77 + # define XB_CLEAR_OWNER(bp) do { } while (0) 78 + # define XB_GET_OWNER(bp) do { } while (0) 78 79 #endif 79 80 80 - #define pb_to_gfp(flags) \ 81 - ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \ 82 - ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) 81 + #define xb_to_gfp(flags) \ 82 + ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \ 83 + ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) 83 84 84 - #define pb_to_km(flags) \ 85 - (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) 85 + #define xb_to_km(flags) \ 86 + (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) 86 87 87 - #define pagebuf_allocate(flags) \ 88 - kmem_zone_alloc(pagebuf_zone, pb_to_km(flags)) 89 - #define pagebuf_deallocate(pb) \ 90 - kmem_zone_free(pagebuf_zone, (pb)); 88 + #define xfs_buf_allocate(flags) \ 89 + kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags)) 90 + #define xfs_buf_deallocate(bp) \ 91 + kmem_zone_free(xfs_buf_zone, (bp)); 91 92 92 93 /* 93 - * Page Region interfaces. 94 + * Page Region interfaces. 94 95 * 95 - * For pages in filesystems where the blocksize is smaller than the 96 - * pagesize, we use the page->private field (long) to hold a bitmap 97 - * of uptodate regions within the page. 96 + * For pages in filesystems where the blocksize is smaller than the 97 + * pagesize, we use the page->private field (long) to hold a bitmap 98 + * of uptodate regions within the page. 98 99 * 99 - * Each such region is "bytes per page / bits per long" bytes long. 100 + * Each such region is "bytes per page / bits per long" bytes long. 100 101 * 101 - * NBPPR == number-of-bytes-per-page-region 102 - * BTOPR == bytes-to-page-region (rounded up) 103 - * BTOPRT == bytes-to-page-region-truncated (rounded down) 102 + * NBPPR == number-of-bytes-per-page-region 103 + * BTOPR == bytes-to-page-region (rounded up) 104 + * BTOPRT == bytes-to-page-region-truncated (rounded down) 104 105 */ 105 106 #if (BITS_PER_LONG == 32) 106 107 #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */ ··· 160 159 } 161 160 162 161 /* 163 - * Mapping of multi-page buffers into contiguous virtual space 162 + * Mapping of multi-page buffers into contiguous virtual space 164 163 */ 165 164 166 165 typedef struct a_list { ··· 173 172 STATIC DEFINE_SPINLOCK(as_lock); 174 173 175 174 /* 176 - * Try to batch vunmaps because they are costly. 175 + * Try to batch vunmaps because they are costly. 177 176 */ 178 177 STATIC void 179 178 free_address( ··· 216 215 } 217 216 218 217 /* 219 - * Internal pagebuf object manipulation 218 + * Internal xfs_buf_t object manipulation 220 219 */ 221 220 222 221 STATIC void 223 - _pagebuf_initialize( 224 - xfs_buf_t *pb, 222 + _xfs_buf_initialize( 223 + xfs_buf_t *bp, 225 224 xfs_buftarg_t *target, 226 - loff_t range_base, 225 + xfs_off_t range_base, 227 226 size_t range_length, 228 - page_buf_flags_t flags) 227 + xfs_buf_flags_t flags) 229 228 { 230 229 /* 231 - * We don't want certain flags to appear in pb->pb_flags. 230 + * We don't want certain flags to appear in b_flags. 232 231 */ 233 - flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD); 232 + flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD); 234 233 235 - memset(pb, 0, sizeof(xfs_buf_t)); 236 - atomic_set(&pb->pb_hold, 1); 237 - init_MUTEX_LOCKED(&pb->pb_iodonesema); 238 - INIT_LIST_HEAD(&pb->pb_list); 239 - INIT_LIST_HEAD(&pb->pb_hash_list); 240 - init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */ 241 - PB_SET_OWNER(pb); 242 - pb->pb_target = target; 243 - pb->pb_file_offset = range_base; 234 + memset(bp, 0, sizeof(xfs_buf_t)); 235 + atomic_set(&bp->b_hold, 1); 236 + init_MUTEX_LOCKED(&bp->b_iodonesema); 237 + INIT_LIST_HEAD(&bp->b_list); 238 + INIT_LIST_HEAD(&bp->b_hash_list); 239 + init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ 240 + XB_SET_OWNER(bp); 241 + bp->b_target = target; 242 + bp->b_file_offset = range_base; 244 243 /* 245 244 * Set buffer_length and count_desired to the same value initially. 246 245 * I/O routines should use count_desired, which will be the same in 247 246 * most cases but may be reset (e.g. XFS recovery). 248 247 */ 249 - pb->pb_buffer_length = pb->pb_count_desired = range_length; 250 - pb->pb_flags = flags; 251 - pb->pb_bn = XFS_BUF_DADDR_NULL; 252 - atomic_set(&pb->pb_pin_count, 0); 253 - init_waitqueue_head(&pb->pb_waiters); 248 + bp->b_buffer_length = bp->b_count_desired = range_length; 249 + bp->b_flags = flags; 250 + bp->b_bn = XFS_BUF_DADDR_NULL; 251 + atomic_set(&bp->b_pin_count, 0); 252 + init_waitqueue_head(&bp->b_waiters); 254 253 255 - XFS_STATS_INC(pb_create); 256 - PB_TRACE(pb, "initialize", target); 254 + XFS_STATS_INC(xb_create); 255 + XB_TRACE(bp, "initialize", target); 257 256 } 258 257 259 258 /* 260 - * Allocate a page array capable of holding a specified number 261 - * of pages, and point the page buf at it. 259 + * Allocate a page array capable of holding a specified number 260 + * of pages, and point the page buf at it. 262 261 */ 263 262 STATIC int 264 - _pagebuf_get_pages( 265 - xfs_buf_t *pb, 263 + _xfs_buf_get_pages( 264 + xfs_buf_t *bp, 266 265 int page_count, 267 - page_buf_flags_t flags) 266 + xfs_buf_flags_t flags) 268 267 { 269 268 /* Make sure that we have a page list */ 270 - if (pb->pb_pages == NULL) { 271 - pb->pb_offset = page_buf_poff(pb->pb_file_offset); 272 - pb->pb_page_count = page_count; 273 - if (page_count <= PB_PAGES) { 274 - pb->pb_pages = pb->pb_page_array; 269 + if (bp->b_pages == NULL) { 270 + bp->b_offset = xfs_buf_poff(bp->b_file_offset); 271 + bp->b_page_count = page_count; 272 + if (page_count <= XB_PAGES) { 273 + bp->b_pages = bp->b_page_array; 275 274 } else { 276 - pb->pb_pages = kmem_alloc(sizeof(struct page *) * 277 - page_count, pb_to_km(flags)); 278 - if (pb->pb_pages == NULL) 275 + bp->b_pages = kmem_alloc(sizeof(struct page *) * 276 + page_count, xb_to_km(flags)); 277 + if (bp->b_pages == NULL) 279 278 return -ENOMEM; 280 279 } 281 - memset(pb->pb_pages, 0, sizeof(struct page *) * page_count); 280 + memset(bp->b_pages, 0, sizeof(struct page *) * page_count); 282 281 } 283 282 return 0; 284 283 } 285 284 286 285 /* 287 - * Frees pb_pages if it was malloced. 286 + * Frees b_pages if it was allocated. 288 287 */ 289 288 STATIC void 290 - _pagebuf_free_pages( 289 + _xfs_buf_free_pages( 291 290 xfs_buf_t *bp) 292 291 { 293 - if (bp->pb_pages != bp->pb_page_array) { 294 - kmem_free(bp->pb_pages, 295 - bp->pb_page_count * sizeof(struct page *)); 292 + if (bp->b_pages != bp->b_page_array) { 293 + kmem_free(bp->b_pages, 294 + bp->b_page_count * sizeof(struct page *)); 296 295 } 297 296 } 298 297 ··· 300 299 * Releases the specified buffer. 301 300 * 302 301 * The modification state of any associated pages is left unchanged. 303 - * The buffer most not be on any hash - use pagebuf_rele instead for 302 + * The buffer most not be on any hash - use xfs_buf_rele instead for 304 303 * hashed and refcounted buffers 305 304 */ 306 305 void 307 - pagebuf_free( 306 + xfs_buf_free( 308 307 xfs_buf_t *bp) 309 308 { 310 - PB_TRACE(bp, "free", 0); 309 + XB_TRACE(bp, "free", 0); 311 310 312 - ASSERT(list_empty(&bp->pb_hash_list)); 311 + ASSERT(list_empty(&bp->b_hash_list)); 313 312 314 - if (bp->pb_flags & _PBF_PAGE_CACHE) { 313 + if (bp->b_flags & _XBF_PAGE_CACHE) { 315 314 uint i; 316 315 317 - if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1)) 318 - free_address(bp->pb_addr - bp->pb_offset); 316 + if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 317 + free_address(bp->b_addr - bp->b_offset); 319 318 320 - for (i = 0; i < bp->pb_page_count; i++) 321 - page_cache_release(bp->pb_pages[i]); 322 - _pagebuf_free_pages(bp); 323 - } else if (bp->pb_flags & _PBF_KMEM_ALLOC) { 319 + for (i = 0; i < bp->b_page_count; i++) 320 + page_cache_release(bp->b_pages[i]); 321 + _xfs_buf_free_pages(bp); 322 + } else if (bp->b_flags & _XBF_KMEM_ALLOC) { 324 323 /* 325 - * XXX(hch): bp->pb_count_desired might be incorrect (see 326 - * pagebuf_associate_memory for details), but fortunately 324 + * XXX(hch): bp->b_count_desired might be incorrect (see 325 + * xfs_buf_associate_memory for details), but fortunately 327 326 * the Linux version of kmem_free ignores the len argument.. 328 327 */ 329 - kmem_free(bp->pb_addr, bp->pb_count_desired); 330 - _pagebuf_free_pages(bp); 328 + kmem_free(bp->b_addr, bp->b_count_desired); 329 + _xfs_buf_free_pages(bp); 331 330 } 332 331 333 - pagebuf_deallocate(bp); 332 + xfs_buf_deallocate(bp); 334 333 } 335 334 336 335 /* 337 336 * Finds all pages for buffer in question and builds it's page list. 338 337 */ 339 338 STATIC int 340 - _pagebuf_lookup_pages( 339 + _xfs_buf_lookup_pages( 341 340 xfs_buf_t *bp, 342 341 uint flags) 343 342 { 344 - struct address_space *mapping = bp->pb_target->pbr_mapping; 345 - size_t blocksize = bp->pb_target->pbr_bsize; 346 - size_t size = bp->pb_count_desired; 343 + struct address_space *mapping = bp->b_target->bt_mapping; 344 + size_t blocksize = bp->b_target->bt_bsize; 345 + size_t size = bp->b_count_desired; 347 346 size_t nbytes, offset; 348 - gfp_t gfp_mask = pb_to_gfp(flags); 347 + gfp_t gfp_mask = xb_to_gfp(flags); 349 348 unsigned short page_count, i; 350 349 pgoff_t first; 351 - loff_t end; 350 + xfs_off_t end; 352 351 int error; 353 352 354 - end = bp->pb_file_offset + bp->pb_buffer_length; 355 - page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset); 353 + end = bp->b_file_offset + bp->b_buffer_length; 354 + page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); 356 355 357 - error = _pagebuf_get_pages(bp, page_count, flags); 356 + error = _xfs_buf_get_pages(bp, page_count, flags); 358 357 if (unlikely(error)) 359 358 return error; 360 - bp->pb_flags |= _PBF_PAGE_CACHE; 359 + bp->b_flags |= _XBF_PAGE_CACHE; 361 360 362 - offset = bp->pb_offset; 363 - first = bp->pb_file_offset >> PAGE_CACHE_SHIFT; 361 + offset = bp->b_offset; 362 + first = bp->b_file_offset >> PAGE_CACHE_SHIFT; 364 363 365 - for (i = 0; i < bp->pb_page_count; i++) { 364 + for (i = 0; i < bp->b_page_count; i++) { 366 365 struct page *page; 367 366 uint retries = 0; 368 367 369 368 retry: 370 369 page = find_or_create_page(mapping, first + i, gfp_mask); 371 370 if (unlikely(page == NULL)) { 372 - if (flags & PBF_READ_AHEAD) { 373 - bp->pb_page_count = i; 374 - for (i = 0; i < bp->pb_page_count; i++) 375 - unlock_page(bp->pb_pages[i]); 371 + if (flags & XBF_READ_AHEAD) { 372 + bp->b_page_count = i; 373 + for (i = 0; i < bp->b_page_count; i++) 374 + unlock_page(bp->b_pages[i]); 376 375 return -ENOMEM; 377 376 } 378 377 ··· 388 387 "deadlock in %s (mode:0x%x)\n", 389 388 __FUNCTION__, gfp_mask); 390 389 391 - XFS_STATS_INC(pb_page_retries); 390 + XFS_STATS_INC(xb_page_retries); 392 391 xfsbufd_wakeup(0, gfp_mask); 393 392 blk_congestion_wait(WRITE, HZ/50); 394 393 goto retry; 395 394 } 396 395 397 - XFS_STATS_INC(pb_page_found); 396 + XFS_STATS_INC(xb_page_found); 398 397 399 398 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); 400 399 size -= nbytes; ··· 402 401 if (!PageUptodate(page)) { 403 402 page_count--; 404 403 if (blocksize >= PAGE_CACHE_SIZE) { 405 - if (flags & PBF_READ) 406 - bp->pb_locked = 1; 404 + if (flags & XBF_READ) 405 + bp->b_locked = 1; 407 406 } else if (!PagePrivate(page)) { 408 407 if (test_page_region(page, offset, nbytes)) 409 408 page_count++; 410 409 } 411 410 } 412 411 413 - bp->pb_pages[i] = page; 412 + bp->b_pages[i] = page; 414 413 offset = 0; 415 414 } 416 415 417 - if (!bp->pb_locked) { 418 - for (i = 0; i < bp->pb_page_count; i++) 419 - unlock_page(bp->pb_pages[i]); 416 + if (!bp->b_locked) { 417 + for (i = 0; i < bp->b_page_count; i++) 418 + unlock_page(bp->b_pages[i]); 420 419 } 421 420 422 - if (page_count == bp->pb_page_count) 423 - bp->pb_flags |= PBF_DONE; 421 + if (page_count == bp->b_page_count) 422 + bp->b_flags |= XBF_DONE; 424 423 425 - PB_TRACE(bp, "lookup_pages", (long)page_count); 424 + XB_TRACE(bp, "lookup_pages", (long)page_count); 426 425 return error; 427 426 } 428 427 ··· 430 429 * Map buffer into kernel address-space if nessecary. 431 430 */ 432 431 STATIC int 433 - _pagebuf_map_pages( 432 + _xfs_buf_map_pages( 434 433 xfs_buf_t *bp, 435 434 uint flags) 436 435 { 437 436 /* A single page buffer is always mappable */ 438 - if (bp->pb_page_count == 1) { 439 - bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset; 440 - bp->pb_flags |= PBF_MAPPED; 441 - } else if (flags & PBF_MAPPED) { 437 + if (bp->b_page_count == 1) { 438 + bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 439 + bp->b_flags |= XBF_MAPPED; 440 + } else if (flags & XBF_MAPPED) { 442 441 if (as_list_len > 64) 443 442 purge_addresses(); 444 - bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count, 445 - VM_MAP, PAGE_KERNEL); 446 - if (unlikely(bp->pb_addr == NULL)) 443 + bp->b_addr = vmap(bp->b_pages, bp->b_page_count, 444 + VM_MAP, PAGE_KERNEL); 445 + if (unlikely(bp->b_addr == NULL)) 447 446 return -ENOMEM; 448 - bp->pb_addr += bp->pb_offset; 449 - bp->pb_flags |= PBF_MAPPED; 447 + bp->b_addr += bp->b_offset; 448 + bp->b_flags |= XBF_MAPPED; 450 449 } 451 450 452 451 return 0; ··· 457 456 */ 458 457 459 458 /* 460 - * _pagebuf_find 461 - * 462 - * Looks up, and creates if absent, a lockable buffer for 459 + * Look up, and creates if absent, a lockable buffer for 463 460 * a given range of an inode. The buffer is returned 464 461 * locked. If other overlapping buffers exist, they are 465 462 * released before the new buffer is created and locked, ··· 465 466 * are unlocked. No I/O is implied by this call. 466 467 */ 467 468 xfs_buf_t * 468 - _pagebuf_find( 469 + _xfs_buf_find( 469 470 xfs_buftarg_t *btp, /* block device target */ 470 - loff_t ioff, /* starting offset of range */ 471 + xfs_off_t ioff, /* starting offset of range */ 471 472 size_t isize, /* length of range */ 472 - page_buf_flags_t flags, /* PBF_TRYLOCK */ 473 - xfs_buf_t *new_pb)/* newly allocated buffer */ 473 + xfs_buf_flags_t flags, 474 + xfs_buf_t *new_bp) 474 475 { 475 - loff_t range_base; 476 + xfs_off_t range_base; 476 477 size_t range_length; 477 478 xfs_bufhash_t *hash; 478 - xfs_buf_t *pb, *n; 479 + xfs_buf_t *bp, *n; 479 480 480 481 range_base = (ioff << BBSHIFT); 481 482 range_length = (isize << BBSHIFT); 482 483 483 484 /* Check for IOs smaller than the sector size / not sector aligned */ 484 - ASSERT(!(range_length < (1 << btp->pbr_sshift))); 485 - ASSERT(!(range_base & (loff_t)btp->pbr_smask)); 485 + ASSERT(!(range_length < (1 << btp->bt_sshift))); 486 + ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); 486 487 487 488 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; 488 489 489 490 spin_lock(&hash->bh_lock); 490 491 491 - list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) { 492 - ASSERT(btp == pb->pb_target); 493 - if (pb->pb_file_offset == range_base && 494 - pb->pb_buffer_length == range_length) { 492 + list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { 493 + ASSERT(btp == bp->b_target); 494 + if (bp->b_file_offset == range_base && 495 + bp->b_buffer_length == range_length) { 495 496 /* 496 - * If we look at something bring it to the 497 + * If we look at something, bring it to the 497 498 * front of the list for next time. 498 499 */ 499 - atomic_inc(&pb->pb_hold); 500 - list_move(&pb->pb_hash_list, &hash->bh_list); 500 + atomic_inc(&bp->b_hold); 501 + list_move(&bp->b_hash_list, &hash->bh_list); 501 502 goto found; 502 503 } 503 504 } 504 505 505 506 /* No match found */ 506 - if (new_pb) { 507 - _pagebuf_initialize(new_pb, btp, range_base, 507 + if (new_bp) { 508 + _xfs_buf_initialize(new_bp, btp, range_base, 508 509 range_length, flags); 509 - new_pb->pb_hash = hash; 510 - list_add(&new_pb->pb_hash_list, &hash->bh_list); 510 + new_bp->b_hash = hash; 511 + list_add(&new_bp->b_hash_list, &hash->bh_list); 511 512 } else { 512 - XFS_STATS_INC(pb_miss_locked); 513 + XFS_STATS_INC(xb_miss_locked); 513 514 } 514 515 515 516 spin_unlock(&hash->bh_lock); 516 - return new_pb; 517 + return new_bp; 517 518 518 519 found: 519 520 spin_unlock(&hash->bh_lock); ··· 522 523 * if this does not work then we need to drop the 523 524 * spinlock and do a hard attempt on the semaphore. 524 525 */ 525 - if (down_trylock(&pb->pb_sema)) { 526 - if (!(flags & PBF_TRYLOCK)) { 526 + if (down_trylock(&bp->b_sema)) { 527 + if (!(flags & XBF_TRYLOCK)) { 527 528 /* wait for buffer ownership */ 528 - PB_TRACE(pb, "get_lock", 0); 529 - pagebuf_lock(pb); 530 - XFS_STATS_INC(pb_get_locked_waited); 529 + XB_TRACE(bp, "get_lock", 0); 530 + xfs_buf_lock(bp); 531 + XFS_STATS_INC(xb_get_locked_waited); 531 532 } else { 532 533 /* We asked for a trylock and failed, no need 533 534 * to look at file offset and length here, we 534 - * know that this pagebuf at least overlaps our 535 - * pagebuf and is locked, therefore our buffer 536 - * either does not exist, or is this buffer 535 + * know that this buffer at least overlaps our 536 + * buffer and is locked, therefore our buffer 537 + * either does not exist, or is this buffer. 537 538 */ 538 - 539 - pagebuf_rele(pb); 540 - XFS_STATS_INC(pb_busy_locked); 541 - return (NULL); 539 + xfs_buf_rele(bp); 540 + XFS_STATS_INC(xb_busy_locked); 541 + return NULL; 542 542 } 543 543 } else { 544 544 /* trylock worked */ 545 - PB_SET_OWNER(pb); 545 + XB_SET_OWNER(bp); 546 546 } 547 547 548 - if (pb->pb_flags & PBF_STALE) { 549 - ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0); 550 - pb->pb_flags &= PBF_MAPPED; 548 + if (bp->b_flags & XBF_STALE) { 549 + ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 550 + bp->b_flags &= XBF_MAPPED; 551 551 } 552 - PB_TRACE(pb, "got_lock", 0); 553 - XFS_STATS_INC(pb_get_locked); 554 - return (pb); 552 + XB_TRACE(bp, "got_lock", 0); 553 + XFS_STATS_INC(xb_get_locked); 554 + return bp; 555 555 } 556 556 557 557 /* 558 - * xfs_buf_get_flags assembles a buffer covering the specified range. 559 - * 558 + * Assembles a buffer covering the specified range. 560 559 * Storage in memory for all portions of the buffer will be allocated, 561 560 * although backing storage may not be. 562 561 */ 563 562 xfs_buf_t * 564 - xfs_buf_get_flags( /* allocate a buffer */ 563 + xfs_buf_get_flags( 565 564 xfs_buftarg_t *target,/* target for buffer */ 566 - loff_t ioff, /* starting offset of range */ 565 + xfs_off_t ioff, /* starting offset of range */ 567 566 size_t isize, /* length of range */ 568 - page_buf_flags_t flags) /* PBF_TRYLOCK */ 567 + xfs_buf_flags_t flags) 569 568 { 570 - xfs_buf_t *pb, *new_pb; 569 + xfs_buf_t *bp, *new_bp; 571 570 int error = 0, i; 572 571 573 - new_pb = pagebuf_allocate(flags); 574 - if (unlikely(!new_pb)) 572 + new_bp = xfs_buf_allocate(flags); 573 + if (unlikely(!new_bp)) 575 574 return NULL; 576 575 577 - pb = _pagebuf_find(target, ioff, isize, flags, new_pb); 578 - if (pb == new_pb) { 579 - error = _pagebuf_lookup_pages(pb, flags); 576 + bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); 577 + if (bp == new_bp) { 578 + error = _xfs_buf_lookup_pages(bp, flags); 580 579 if (error) 581 580 goto no_buffer; 582 581 } else { 583 - pagebuf_deallocate(new_pb); 584 - if (unlikely(pb == NULL)) 582 + xfs_buf_deallocate(new_bp); 583 + if (unlikely(bp == NULL)) 585 584 return NULL; 586 585 } 587 586 588 - for (i = 0; i < pb->pb_page_count; i++) 589 - mark_page_accessed(pb->pb_pages[i]); 587 + for (i = 0; i < bp->b_page_count; i++) 588 + mark_page_accessed(bp->b_pages[i]); 590 589 591 - if (!(pb->pb_flags & PBF_MAPPED)) { 592 - error = _pagebuf_map_pages(pb, flags); 590 + if (!(bp->b_flags & XBF_MAPPED)) { 591 + error = _xfs_buf_map_pages(bp, flags); 593 592 if (unlikely(error)) { 594 593 printk(KERN_WARNING "%s: failed to map pages\n", 595 594 __FUNCTION__); ··· 595 598 } 596 599 } 597 600 598 - XFS_STATS_INC(pb_get); 601 + XFS_STATS_INC(xb_get); 599 602 600 603 /* 601 604 * Always fill in the block number now, the mapped cases can do 602 605 * their own overlay of this later. 603 606 */ 604 - pb->pb_bn = ioff; 605 - pb->pb_count_desired = pb->pb_buffer_length; 607 + bp->b_bn = ioff; 608 + bp->b_count_desired = bp->b_buffer_length; 606 609 607 - PB_TRACE(pb, "get", (unsigned long)flags); 608 - return pb; 610 + XB_TRACE(bp, "get", (unsigned long)flags); 611 + return bp; 609 612 610 613 no_buffer: 611 - if (flags & (PBF_LOCK | PBF_TRYLOCK)) 612 - pagebuf_unlock(pb); 613 - pagebuf_rele(pb); 614 + if (flags & (XBF_LOCK | XBF_TRYLOCK)) 615 + xfs_buf_unlock(bp); 616 + xfs_buf_rele(bp); 614 617 return NULL; 615 618 } 616 619 617 620 xfs_buf_t * 618 621 xfs_buf_read_flags( 619 622 xfs_buftarg_t *target, 620 - loff_t ioff, 623 + xfs_off_t ioff, 621 624 size_t isize, 622 - page_buf_flags_t flags) 625 + xfs_buf_flags_t flags) 623 626 { 624 - xfs_buf_t *pb; 627 + xfs_buf_t *bp; 625 628 626 - flags |= PBF_READ; 629 + flags |= XBF_READ; 627 630 628 - pb = xfs_buf_get_flags(target, ioff, isize, flags); 629 - if (pb) { 630 - if (!XFS_BUF_ISDONE(pb)) { 631 - PB_TRACE(pb, "read", (unsigned long)flags); 632 - XFS_STATS_INC(pb_get_read); 633 - pagebuf_iostart(pb, flags); 634 - } else if (flags & PBF_ASYNC) { 635 - PB_TRACE(pb, "read_async", (unsigned long)flags); 631 + bp = xfs_buf_get_flags(target, ioff, isize, flags); 632 + if (bp) { 633 + if (!XFS_BUF_ISDONE(bp)) { 634 + XB_TRACE(bp, "read", (unsigned long)flags); 635 + XFS_STATS_INC(xb_get_read); 636 + xfs_buf_iostart(bp, flags); 637 + } else if (flags & XBF_ASYNC) { 638 + XB_TRACE(bp, "read_async", (unsigned long)flags); 636 639 /* 637 640 * Read ahead call which is already satisfied, 638 641 * drop the buffer 639 642 */ 640 643 goto no_buffer; 641 644 } else { 642 - PB_TRACE(pb, "read_done", (unsigned long)flags); 645 + XB_TRACE(bp, "read_done", (unsigned long)flags); 643 646 /* We do not want read in the flags */ 644 - pb->pb_flags &= ~PBF_READ; 647 + bp->b_flags &= ~XBF_READ; 645 648 } 646 649 } 647 650 648 - return pb; 651 + return bp; 649 652 650 653 no_buffer: 651 - if (flags & (PBF_LOCK | PBF_TRYLOCK)) 652 - pagebuf_unlock(pb); 653 - pagebuf_rele(pb); 654 + if (flags & (XBF_LOCK | XBF_TRYLOCK)) 655 + xfs_buf_unlock(bp); 656 + xfs_buf_rele(bp); 654 657 return NULL; 655 658 } 656 659 657 660 /* 658 - * If we are not low on memory then do the readahead in a deadlock 659 - * safe manner. 661 + * If we are not low on memory then do the readahead in a deadlock 662 + * safe manner. 660 663 */ 661 664 void 662 - pagebuf_readahead( 665 + xfs_buf_readahead( 663 666 xfs_buftarg_t *target, 664 - loff_t ioff, 667 + xfs_off_t ioff, 665 668 size_t isize, 666 - page_buf_flags_t flags) 669 + xfs_buf_flags_t flags) 667 670 { 668 671 struct backing_dev_info *bdi; 669 672 670 - bdi = target->pbr_mapping->backing_dev_info; 673 + bdi = target->bt_mapping->backing_dev_info; 671 674 if (bdi_read_congested(bdi)) 672 675 return; 673 676 674 - flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD); 677 + flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); 675 678 xfs_buf_read_flags(target, ioff, isize, flags); 676 679 } 677 680 678 681 xfs_buf_t * 679 - pagebuf_get_empty( 682 + xfs_buf_get_empty( 680 683 size_t len, 681 684 xfs_buftarg_t *target) 682 685 { 683 - xfs_buf_t *pb; 686 + xfs_buf_t *bp; 684 687 685 - pb = pagebuf_allocate(0); 686 - if (pb) 687 - _pagebuf_initialize(pb, target, 0, len, 0); 688 - return pb; 688 + bp = xfs_buf_allocate(0); 689 + if (bp) 690 + _xfs_buf_initialize(bp, target, 0, len, 0); 691 + return bp; 689 692 } 690 693 691 694 static inline struct page * ··· 701 704 } 702 705 703 706 int 704 - pagebuf_associate_memory( 705 - xfs_buf_t *pb, 707 + xfs_buf_associate_memory( 708 + xfs_buf_t *bp, 706 709 void *mem, 707 710 size_t len) 708 711 { ··· 719 722 page_count++; 720 723 721 724 /* Free any previous set of page pointers */ 722 - if (pb->pb_pages) 723 - _pagebuf_free_pages(pb); 725 + if (bp->b_pages) 726 + _xfs_buf_free_pages(bp); 724 727 725 - pb->pb_pages = NULL; 726 - pb->pb_addr = mem; 728 + bp->b_pages = NULL; 729 + bp->b_addr = mem; 727 730 728 - rval = _pagebuf_get_pages(pb, page_count, 0); 731 + rval = _xfs_buf_get_pages(bp, page_count, 0); 729 732 if (rval) 730 733 return rval; 731 734 732 - pb->pb_offset = offset; 735 + bp->b_offset = offset; 733 736 ptr = (size_t) mem & PAGE_CACHE_MASK; 734 737 end = PAGE_CACHE_ALIGN((size_t) mem + len); 735 738 end_cur = end; 736 739 /* set up first page */ 737 - pb->pb_pages[0] = mem_to_page(mem); 740 + bp->b_pages[0] = mem_to_page(mem); 738 741 739 742 ptr += PAGE_CACHE_SIZE; 740 - pb->pb_page_count = ++i; 743 + bp->b_page_count = ++i; 741 744 while (ptr < end) { 742 - pb->pb_pages[i] = mem_to_page((void *)ptr); 743 - pb->pb_page_count = ++i; 745 + bp->b_pages[i] = mem_to_page((void *)ptr); 746 + bp->b_page_count = ++i; 744 747 ptr += PAGE_CACHE_SIZE; 745 748 } 746 - pb->pb_locked = 0; 749 + bp->b_locked = 0; 747 750 748 - pb->pb_count_desired = pb->pb_buffer_length = len; 749 - pb->pb_flags |= PBF_MAPPED; 751 + bp->b_count_desired = bp->b_buffer_length = len; 752 + bp->b_flags |= XBF_MAPPED; 750 753 751 754 return 0; 752 755 } 753 756 754 757 xfs_buf_t * 755 - pagebuf_get_no_daddr( 758 + xfs_buf_get_noaddr( 756 759 size_t len, 757 760 xfs_buftarg_t *target) 758 761 { ··· 761 764 void *data; 762 765 int error; 763 766 764 - bp = pagebuf_allocate(0); 767 + bp = xfs_buf_allocate(0); 765 768 if (unlikely(bp == NULL)) 766 769 goto fail; 767 - _pagebuf_initialize(bp, target, 0, len, 0); 770 + _xfs_buf_initialize(bp, target, 0, len, 0); 768 771 769 772 try_again: 770 773 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); ··· 773 776 774 777 /* check whether alignment matches.. */ 775 778 if ((__psunsigned_t)data != 776 - ((__psunsigned_t)data & ~target->pbr_smask)) { 779 + ((__psunsigned_t)data & ~target->bt_smask)) { 777 780 /* .. else double the size and try again */ 778 781 kmem_free(data, malloc_len); 779 782 malloc_len <<= 1; 780 783 goto try_again; 781 784 } 782 785 783 - error = pagebuf_associate_memory(bp, data, len); 786 + error = xfs_buf_associate_memory(bp, data, len); 784 787 if (error) 785 788 goto fail_free_mem; 786 - bp->pb_flags |= _PBF_KMEM_ALLOC; 789 + bp->b_flags |= _XBF_KMEM_ALLOC; 787 790 788 - pagebuf_unlock(bp); 791 + xfs_buf_unlock(bp); 789 792 790 - PB_TRACE(bp, "no_daddr", data); 793 + XB_TRACE(bp, "no_daddr", data); 791 794 return bp; 792 795 fail_free_mem: 793 796 kmem_free(data, malloc_len); 794 797 fail_free_buf: 795 - pagebuf_free(bp); 798 + xfs_buf_free(bp); 796 799 fail: 797 800 return NULL; 798 801 } 799 802 800 803 /* 801 - * pagebuf_hold 802 - * 803 804 * Increment reference count on buffer, to hold the buffer concurrently 804 805 * with another thread which may release (free) the buffer asynchronously. 805 - * 806 806 * Must hold the buffer already to call this function. 807 807 */ 808 808 void 809 - pagebuf_hold( 810 - xfs_buf_t *pb) 809 + xfs_buf_hold( 810 + xfs_buf_t *bp) 811 811 { 812 - atomic_inc(&pb->pb_hold); 813 - PB_TRACE(pb, "hold", 0); 812 + atomic_inc(&bp->b_hold); 813 + XB_TRACE(bp, "hold", 0); 814 814 } 815 815 816 816 /* 817 - * pagebuf_rele 818 - * 819 - * pagebuf_rele releases a hold on the specified buffer. If the 820 - * the hold count is 1, pagebuf_rele calls pagebuf_free. 817 + * Releases a hold on the specified buffer. If the 818 + * the hold count is 1, calls xfs_buf_free. 821 819 */ 822 820 void 823 - pagebuf_rele( 824 - xfs_buf_t *pb) 821 + xfs_buf_rele( 822 + xfs_buf_t *bp) 825 823 { 826 - xfs_bufhash_t *hash = pb->pb_hash; 824 + xfs_bufhash_t *hash = bp->b_hash; 827 825 828 - PB_TRACE(pb, "rele", pb->pb_relse); 826 + XB_TRACE(bp, "rele", bp->b_relse); 829 827 830 - if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) { 831 - if (pb->pb_relse) { 832 - atomic_inc(&pb->pb_hold); 828 + if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { 829 + if (bp->b_relse) { 830 + atomic_inc(&bp->b_hold); 833 831 spin_unlock(&hash->bh_lock); 834 - (*(pb->pb_relse)) (pb); 835 - } else if (pb->pb_flags & PBF_FS_MANAGED) { 832 + (*(bp->b_relse)) (bp); 833 + } else if (bp->b_flags & XBF_FS_MANAGED) { 836 834 spin_unlock(&hash->bh_lock); 837 835 } else { 838 - ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q))); 839 - list_del_init(&pb->pb_hash_list); 836 + ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); 837 + list_del_init(&bp->b_hash_list); 840 838 spin_unlock(&hash->bh_lock); 841 - pagebuf_free(pb); 839 + xfs_buf_free(bp); 842 840 } 843 841 } else { 844 842 /* 845 843 * Catch reference count leaks 846 844 */ 847 - ASSERT(atomic_read(&pb->pb_hold) >= 0); 845 + ASSERT(atomic_read(&bp->b_hold) >= 0); 848 846 } 849 847 } 850 848 ··· 855 863 */ 856 864 857 865 /* 858 - * pagebuf_cond_lock 859 - * 860 - * pagebuf_cond_lock locks a buffer object, if it is not already locked. 861 - * Note that this in no way 862 - * locks the underlying pages, so it is only useful for synchronizing 863 - * concurrent use of page buffer objects, not for synchronizing independent 864 - * access to the underlying pages. 866 + * Locks a buffer object, if it is not already locked. 867 + * Note that this in no way locks the underlying pages, so it is only 868 + * useful for synchronizing concurrent use of buffer objects, not for 869 + * synchronizing independent access to the underlying pages. 865 870 */ 866 871 int 867 - pagebuf_cond_lock( /* lock buffer, if not locked */ 868 - /* returns -EBUSY if locked) */ 869 - xfs_buf_t *pb) 872 + xfs_buf_cond_lock( 873 + xfs_buf_t *bp) 870 874 { 871 875 int locked; 872 876 873 - locked = down_trylock(&pb->pb_sema) == 0; 877 + locked = down_trylock(&bp->b_sema) == 0; 874 878 if (locked) { 875 - PB_SET_OWNER(pb); 879 + XB_SET_OWNER(bp); 876 880 } 877 - PB_TRACE(pb, "cond_lock", (long)locked); 878 - return(locked ? 0 : -EBUSY); 881 + XB_TRACE(bp, "cond_lock", (long)locked); 882 + return locked ? 0 : -EBUSY; 879 883 } 880 884 881 885 #if defined(DEBUG) || defined(XFS_BLI_TRACE) 882 - /* 883 - * pagebuf_lock_value 884 - * 885 - * Return lock value for a pagebuf 886 - */ 887 886 int 888 - pagebuf_lock_value( 889 - xfs_buf_t *pb) 887 + xfs_buf_lock_value( 888 + xfs_buf_t *bp) 890 889 { 891 - return(atomic_read(&pb->pb_sema.count)); 890 + return atomic_read(&bp->b_sema.count); 892 891 } 893 892 #endif 894 893 895 894 /* 896 - * pagebuf_lock 897 - * 898 - * pagebuf_lock locks a buffer object. Note that this in no way 899 - * locks the underlying pages, so it is only useful for synchronizing 900 - * concurrent use of page buffer objects, not for synchronizing independent 901 - * access to the underlying pages. 895 + * Locks a buffer object. 896 + * Note that this in no way locks the underlying pages, so it is only 897 + * useful for synchronizing concurrent use of buffer objects, not for 898 + * synchronizing independent access to the underlying pages. 902 899 */ 903 - int 904 - pagebuf_lock( 905 - xfs_buf_t *pb) 900 + void 901 + xfs_buf_lock( 902 + xfs_buf_t *bp) 906 903 { 907 - PB_TRACE(pb, "lock", 0); 908 - if (atomic_read(&pb->pb_io_remaining)) 909 - blk_run_address_space(pb->pb_target->pbr_mapping); 910 - down(&pb->pb_sema); 911 - PB_SET_OWNER(pb); 912 - PB_TRACE(pb, "locked", 0); 913 - return 0; 904 + XB_TRACE(bp, "lock", 0); 905 + if (atomic_read(&bp->b_io_remaining)) 906 + blk_run_address_space(bp->b_target->bt_mapping); 907 + down(&bp->b_sema); 908 + XB_SET_OWNER(bp); 909 + XB_TRACE(bp, "locked", 0); 914 910 } 915 911 916 912 /* 917 - * pagebuf_unlock 918 - * 919 - * pagebuf_unlock releases the lock on the buffer object created by 920 - * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages 921 - * created by pagebuf_pin). 922 - * 913 + * Releases the lock on the buffer object. 923 914 * If the buffer is marked delwri but is not queued, do so before we 924 - * unlock the buffer as we need to set flags correctly. We also need to 915 + * unlock the buffer as we need to set flags correctly. We also need to 925 916 * take a reference for the delwri queue because the unlocker is going to 926 917 * drop their's and they don't know we just queued it. 927 918 */ 928 919 void 929 - pagebuf_unlock( /* unlock buffer */ 930 - xfs_buf_t *pb) /* buffer to unlock */ 920 + xfs_buf_unlock( 921 + xfs_buf_t *bp) 931 922 { 932 - if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) { 933 - atomic_inc(&pb->pb_hold); 934 - pb->pb_flags |= PBF_ASYNC; 935 - pagebuf_delwri_queue(pb, 0); 923 + if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) { 924 + atomic_inc(&bp->b_hold); 925 + bp->b_flags |= XBF_ASYNC; 926 + xfs_buf_delwri_queue(bp, 0); 936 927 } 937 928 938 - PB_CLEAR_OWNER(pb); 939 - up(&pb->pb_sema); 940 - PB_TRACE(pb, "unlock", 0); 929 + XB_CLEAR_OWNER(bp); 930 + up(&bp->b_sema); 931 + XB_TRACE(bp, "unlock", 0); 941 932 } 942 933 943 934 944 935 /* 945 936 * Pinning Buffer Storage in Memory 946 - */ 947 - 948 - /* 949 - * pagebuf_pin 950 - * 951 - * pagebuf_pin locks all of the memory represented by a buffer in 952 - * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for 953 - * the same or different buffers affecting a given page, will 954 - * properly count the number of outstanding "pin" requests. The 955 - * buffer may be released after the pagebuf_pin and a different 956 - * buffer used when calling pagebuf_unpin, if desired. 957 - * pagebuf_pin should be used by the file system when it wants be 958 - * assured that no attempt will be made to force the affected 959 - * memory to disk. It does not assure that a given logical page 960 - * will not be moved to a different physical page. 937 + * Ensure that no attempt to force a buffer to disk will succeed. 961 938 */ 962 939 void 963 - pagebuf_pin( 964 - xfs_buf_t *pb) 940 + xfs_buf_pin( 941 + xfs_buf_t *bp) 965 942 { 966 - atomic_inc(&pb->pb_pin_count); 967 - PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter); 943 + atomic_inc(&bp->b_pin_count); 944 + XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter); 968 945 } 969 946 970 - /* 971 - * pagebuf_unpin 972 - * 973 - * pagebuf_unpin reverses the locking of memory performed by 974 - * pagebuf_pin. Note that both functions affected the logical 975 - * pages associated with the buffer, not the buffer itself. 976 - */ 977 947 void 978 - pagebuf_unpin( 979 - xfs_buf_t *pb) 948 + xfs_buf_unpin( 949 + xfs_buf_t *bp) 980 950 { 981 - if (atomic_dec_and_test(&pb->pb_pin_count)) { 982 - wake_up_all(&pb->pb_waiters); 983 - } 984 - PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter); 951 + if (atomic_dec_and_test(&bp->b_pin_count)) 952 + wake_up_all(&bp->b_waiters); 953 + XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter); 985 954 } 986 955 987 956 int 988 - pagebuf_ispin( 989 - xfs_buf_t *pb) 957 + xfs_buf_ispin( 958 + xfs_buf_t *bp) 990 959 { 991 - return atomic_read(&pb->pb_pin_count); 960 + return atomic_read(&bp->b_pin_count); 992 961 } 993 962 994 - /* 995 - * pagebuf_wait_unpin 996 - * 997 - * pagebuf_wait_unpin waits until all of the memory associated 998 - * with the buffer is not longer locked in memory. It returns 999 - * immediately if none of the affected pages are locked. 1000 - */ 1001 - static inline void 1002 - _pagebuf_wait_unpin( 1003 - xfs_buf_t *pb) 963 + STATIC void 964 + xfs_buf_wait_unpin( 965 + xfs_buf_t *bp) 1004 966 { 1005 967 DECLARE_WAITQUEUE (wait, current); 1006 968 1007 - if (atomic_read(&pb->pb_pin_count) == 0) 969 + if (atomic_read(&bp->b_pin_count) == 0) 1008 970 return; 1009 971 1010 - add_wait_queue(&pb->pb_waiters, &wait); 972 + add_wait_queue(&bp->b_waiters, &wait); 1011 973 for (;;) { 1012 974 set_current_state(TASK_UNINTERRUPTIBLE); 1013 - if (atomic_read(&pb->pb_pin_count) == 0) 975 + if (atomic_read(&bp->b_pin_count) == 0) 1014 976 break; 1015 - if (atomic_read(&pb->pb_io_remaining)) 1016 - blk_run_address_space(pb->pb_target->pbr_mapping); 977 + if (atomic_read(&bp->b_io_remaining)) 978 + blk_run_address_space(bp->b_target->bt_mapping); 1017 979 schedule(); 1018 980 } 1019 - remove_wait_queue(&pb->pb_waiters, &wait); 981 + remove_wait_queue(&bp->b_waiters, &wait); 1020 982 set_current_state(TASK_RUNNING); 1021 983 } 1022 984 ··· 978 1032 * Buffer Utility Routines 979 1033 */ 980 1034 981 - /* 982 - * pagebuf_iodone 983 - * 984 - * pagebuf_iodone marks a buffer for which I/O is in progress 985 - * done with respect to that I/O. The pb_iodone routine, if 986 - * present, will be called as a side-effect. 987 - */ 988 1035 STATIC void 989 - pagebuf_iodone_work( 1036 + xfs_buf_iodone_work( 990 1037 void *v) 991 1038 { 992 1039 xfs_buf_t *bp = (xfs_buf_t *)v; 993 1040 994 - if (bp->pb_iodone) 995 - (*(bp->pb_iodone))(bp); 996 - else if (bp->pb_flags & PBF_ASYNC) 1041 + if (bp->b_iodone) 1042 + (*(bp->b_iodone))(bp); 1043 + else if (bp->b_flags & XBF_ASYNC) 997 1044 xfs_buf_relse(bp); 998 1045 } 999 1046 1000 1047 void 1001 - pagebuf_iodone( 1002 - xfs_buf_t *pb, 1048 + xfs_buf_ioend( 1049 + xfs_buf_t *bp, 1003 1050 int schedule) 1004 1051 { 1005 - pb->pb_flags &= ~(PBF_READ | PBF_WRITE); 1006 - if (pb->pb_error == 0) 1007 - pb->pb_flags |= PBF_DONE; 1052 + bp->b_flags &= ~(XBF_READ | XBF_WRITE); 1053 + if (bp->b_error == 0) 1054 + bp->b_flags |= XBF_DONE; 1008 1055 1009 - PB_TRACE(pb, "iodone", pb->pb_iodone); 1056 + XB_TRACE(bp, "iodone", bp->b_iodone); 1010 1057 1011 - if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) { 1058 + if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { 1012 1059 if (schedule) { 1013 - INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb); 1014 - queue_work(xfslogd_workqueue, &pb->pb_iodone_work); 1060 + INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); 1061 + queue_work(xfslogd_workqueue, &bp->b_iodone_work); 1015 1062 } else { 1016 - pagebuf_iodone_work(pb); 1063 + xfs_buf_iodone_work(bp); 1017 1064 } 1018 1065 } else { 1019 - up(&pb->pb_iodonesema); 1066 + up(&bp->b_iodonesema); 1020 1067 } 1021 1068 } 1022 1069 1023 - /* 1024 - * pagebuf_ioerror 1025 - * 1026 - * pagebuf_ioerror sets the error code for a buffer. 1027 - */ 1028 1070 void 1029 - pagebuf_ioerror( /* mark/clear buffer error flag */ 1030 - xfs_buf_t *pb, /* buffer to mark */ 1031 - int error) /* error to store (0 if none) */ 1071 + xfs_buf_ioerror( 1072 + xfs_buf_t *bp, 1073 + int error) 1032 1074 { 1033 1075 ASSERT(error >= 0 && error <= 0xffff); 1034 - pb->pb_error = (unsigned short)error; 1035 - PB_TRACE(pb, "ioerror", (unsigned long)error); 1076 + bp->b_error = (unsigned short)error; 1077 + XB_TRACE(bp, "ioerror", (unsigned long)error); 1036 1078 } 1037 1079 1038 1080 /* 1039 - * pagebuf_iostart 1040 - * 1041 - * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied. 1042 - * If necessary, it will arrange for any disk space allocation required, 1043 - * and it will break up the request if the block mappings require it. 1044 - * The pb_iodone routine in the buffer supplied will only be called 1081 + * Initiate I/O on a buffer, based on the flags supplied. 1082 + * The b_iodone routine in the buffer supplied will only be called 1045 1083 * when all of the subsidiary I/O requests, if any, have been completed. 1046 - * pagebuf_iostart calls the pagebuf_ioinitiate routine or 1047 - * pagebuf_iorequest, if the former routine is not defined, to start 1048 - * the I/O on a given low-level request. 1049 1084 */ 1050 1085 int 1051 - pagebuf_iostart( /* start I/O on a buffer */ 1052 - xfs_buf_t *pb, /* buffer to start */ 1053 - page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */ 1054 - /* PBF_WRITE, PBF_DELWRI, */ 1055 - /* PBF_DONT_BLOCK */ 1086 + xfs_buf_iostart( 1087 + xfs_buf_t *bp, 1088 + xfs_buf_flags_t flags) 1056 1089 { 1057 1090 int status = 0; 1058 1091 1059 - PB_TRACE(pb, "iostart", (unsigned long)flags); 1092 + XB_TRACE(bp, "iostart", (unsigned long)flags); 1060 1093 1061 - if (flags & PBF_DELWRI) { 1062 - pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC); 1063 - pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC); 1064 - pagebuf_delwri_queue(pb, 1); 1094 + if (flags & XBF_DELWRI) { 1095 + bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC); 1096 + bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC); 1097 + xfs_buf_delwri_queue(bp, 1); 1065 1098 return status; 1066 1099 } 1067 1100 1068 - pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \ 1069 - PBF_READ_AHEAD | _PBF_RUN_QUEUES); 1070 - pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \ 1071 - PBF_READ_AHEAD | _PBF_RUN_QUEUES); 1101 + bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \ 1102 + XBF_READ_AHEAD | _XBF_RUN_QUEUES); 1103 + bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \ 1104 + XBF_READ_AHEAD | _XBF_RUN_QUEUES); 1072 1105 1073 - BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL); 1106 + BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL); 1074 1107 1075 1108 /* For writes allow an alternate strategy routine to precede 1076 1109 * the actual I/O request (which may not be issued at all in 1077 1110 * a shutdown situation, for example). 1078 1111 */ 1079 - status = (flags & PBF_WRITE) ? 1080 - pagebuf_iostrategy(pb) : pagebuf_iorequest(pb); 1112 + status = (flags & XBF_WRITE) ? 1113 + xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp); 1081 1114 1082 1115 /* Wait for I/O if we are not an async request. 1083 1116 * Note: async I/O request completion will release the buffer, 1084 1117 * and that can already be done by this point. So using the 1085 1118 * buffer pointer from here on, after async I/O, is invalid. 1086 1119 */ 1087 - if (!status && !(flags & PBF_ASYNC)) 1088 - status = pagebuf_iowait(pb); 1120 + if (!status && !(flags & XBF_ASYNC)) 1121 + status = xfs_buf_iowait(bp); 1089 1122 1090 1123 return status; 1091 1124 } 1092 1125 1093 - /* 1094 - * Helper routine for pagebuf_iorequest 1095 - */ 1096 - 1097 1126 STATIC __inline__ int 1098 - _pagebuf_iolocked( 1099 - xfs_buf_t *pb) 1127 + _xfs_buf_iolocked( 1128 + xfs_buf_t *bp) 1100 1129 { 1101 - ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE)); 1102 - if (pb->pb_flags & PBF_READ) 1103 - return pb->pb_locked; 1130 + ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE)); 1131 + if (bp->b_flags & XBF_READ) 1132 + return bp->b_locked; 1104 1133 return 0; 1105 1134 } 1106 1135 1107 1136 STATIC __inline__ void 1108 - _pagebuf_iodone( 1109 - xfs_buf_t *pb, 1137 + _xfs_buf_ioend( 1138 + xfs_buf_t *bp, 1110 1139 int schedule) 1111 1140 { 1112 - if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { 1113 - pb->pb_locked = 0; 1114 - pagebuf_iodone(pb, schedule); 1141 + if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1142 + bp->b_locked = 0; 1143 + xfs_buf_ioend(bp, schedule); 1115 1144 } 1116 1145 } 1117 1146 1118 1147 STATIC int 1119 - bio_end_io_pagebuf( 1148 + xfs_buf_bio_end_io( 1120 1149 struct bio *bio, 1121 1150 unsigned int bytes_done, 1122 1151 int error) 1123 1152 { 1124 - xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private; 1125 - unsigned int blocksize = pb->pb_target->pbr_bsize; 1153 + xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1154 + unsigned int blocksize = bp->b_target->bt_bsize; 1126 1155 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1127 1156 1128 1157 if (bio->bi_size) 1129 1158 return 1; 1130 1159 1131 1160 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1132 - pb->pb_error = EIO; 1161 + bp->b_error = EIO; 1133 1162 1134 1163 do { 1135 1164 struct page *page = bvec->bv_page; 1136 1165 1137 - if (unlikely(pb->pb_error)) { 1138 - if (pb->pb_flags & PBF_READ) 1166 + if (unlikely(bp->b_error)) { 1167 + if (bp->b_flags & XBF_READ) 1139 1168 ClearPageUptodate(page); 1140 1169 SetPageError(page); 1141 - } else if (blocksize == PAGE_CACHE_SIZE) { 1170 + } else if (blocksize >= PAGE_CACHE_SIZE) { 1142 1171 SetPageUptodate(page); 1143 1172 } else if (!PagePrivate(page) && 1144 - (pb->pb_flags & _PBF_PAGE_CACHE)) { 1173 + (bp->b_flags & _XBF_PAGE_CACHE)) { 1145 1174 set_page_region(page, bvec->bv_offset, bvec->bv_len); 1146 1175 } 1147 1176 1148 1177 if (--bvec >= bio->bi_io_vec) 1149 1178 prefetchw(&bvec->bv_page->flags); 1150 1179 1151 - if (_pagebuf_iolocked(pb)) { 1180 + if (_xfs_buf_iolocked(bp)) { 1152 1181 unlock_page(page); 1153 1182 } 1154 1183 } while (bvec >= bio->bi_io_vec); 1155 1184 1156 - _pagebuf_iodone(pb, 1); 1185 + _xfs_buf_ioend(bp, 1); 1157 1186 bio_put(bio); 1158 1187 return 0; 1159 1188 } 1160 1189 1161 1190 STATIC void 1162 - _pagebuf_ioapply( 1163 - xfs_buf_t *pb) 1191 + _xfs_buf_ioapply( 1192 + xfs_buf_t *bp) 1164 1193 { 1165 1194 int i, rw, map_i, total_nr_pages, nr_pages; 1166 1195 struct bio *bio; 1167 - int offset = pb->pb_offset; 1168 - int size = pb->pb_count_desired; 1169 - sector_t sector = pb->pb_bn; 1170 - unsigned int blocksize = pb->pb_target->pbr_bsize; 1171 - int locking = _pagebuf_iolocked(pb); 1196 + int offset = bp->b_offset; 1197 + int size = bp->b_count_desired; 1198 + sector_t sector = bp->b_bn; 1199 + unsigned int blocksize = bp->b_target->bt_bsize; 1200 + int locking = _xfs_buf_iolocked(bp); 1172 1201 1173 - total_nr_pages = pb->pb_page_count; 1202 + total_nr_pages = bp->b_page_count; 1174 1203 map_i = 0; 1175 1204 1176 - if (pb->pb_flags & _PBF_RUN_QUEUES) { 1177 - pb->pb_flags &= ~_PBF_RUN_QUEUES; 1178 - rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC; 1205 + if (bp->b_flags & _XBF_RUN_QUEUES) { 1206 + bp->b_flags &= ~_XBF_RUN_QUEUES; 1207 + rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC; 1179 1208 } else { 1180 - rw = (pb->pb_flags & PBF_READ) ? READ : WRITE; 1209 + rw = (bp->b_flags & XBF_READ) ? READ : WRITE; 1181 1210 } 1182 1211 1183 - if (pb->pb_flags & PBF_ORDERED) { 1184 - ASSERT(!(pb->pb_flags & PBF_READ)); 1212 + if (bp->b_flags & XBF_ORDERED) { 1213 + ASSERT(!(bp->b_flags & XBF_READ)); 1185 1214 rw = WRITE_BARRIER; 1186 1215 } 1187 1216 1188 - /* Special code path for reading a sub page size pagebuf in -- 1217 + /* Special code path for reading a sub page size buffer in -- 1189 1218 * we populate up the whole page, and hence the other metadata 1190 1219 * in the same page. This optimization is only valid when the 1191 - * filesystem block size and the page size are equal. 1220 + * filesystem block size is not smaller than the page size. 1192 1221 */ 1193 - if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) && 1194 - (pb->pb_flags & PBF_READ) && locking && 1195 - (blocksize == PAGE_CACHE_SIZE)) { 1222 + if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && 1223 + (bp->b_flags & XBF_READ) && locking && 1224 + (blocksize >= PAGE_CACHE_SIZE)) { 1196 1225 bio = bio_alloc(GFP_NOIO, 1); 1197 1226 1198 - bio->bi_bdev = pb->pb_target->pbr_bdev; 1227 + bio->bi_bdev = bp->b_target->bt_bdev; 1199 1228 bio->bi_sector = sector - (offset >> BBSHIFT); 1200 - bio->bi_end_io = bio_end_io_pagebuf; 1201 - bio->bi_private = pb; 1229 + bio->bi_end_io = xfs_buf_bio_end_io; 1230 + bio->bi_private = bp; 1202 1231 1203 - bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0); 1232 + bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0); 1204 1233 size = 0; 1205 1234 1206 - atomic_inc(&pb->pb_io_remaining); 1235 + atomic_inc(&bp->b_io_remaining); 1207 1236 1208 1237 goto submit_io; 1209 1238 } 1210 1239 1211 1240 /* Lock down the pages which we need to for the request */ 1212 - if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) { 1241 + if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) { 1213 1242 for (i = 0; size; i++) { 1214 1243 int nbytes = PAGE_CACHE_SIZE - offset; 1215 - struct page *page = pb->pb_pages[i]; 1244 + struct page *page = bp->b_pages[i]; 1216 1245 1217 1246 if (nbytes > size) 1218 1247 nbytes = size; ··· 1197 1276 size -= nbytes; 1198 1277 offset = 0; 1199 1278 } 1200 - offset = pb->pb_offset; 1201 - size = pb->pb_count_desired; 1279 + offset = bp->b_offset; 1280 + size = bp->b_count_desired; 1202 1281 } 1203 1282 1204 1283 next_chunk: 1205 - atomic_inc(&pb->pb_io_remaining); 1284 + atomic_inc(&bp->b_io_remaining); 1206 1285 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); 1207 1286 if (nr_pages > total_nr_pages) 1208 1287 nr_pages = total_nr_pages; 1209 1288 1210 1289 bio = bio_alloc(GFP_NOIO, nr_pages); 1211 - bio->bi_bdev = pb->pb_target->pbr_bdev; 1290 + bio->bi_bdev = bp->b_target->bt_bdev; 1212 1291 bio->bi_sector = sector; 1213 - bio->bi_end_io = bio_end_io_pagebuf; 1214 - bio->bi_private = pb; 1292 + bio->bi_end_io = xfs_buf_bio_end_io; 1293 + bio->bi_private = bp; 1215 1294 1216 1295 for (; size && nr_pages; nr_pages--, map_i++) { 1217 - int nbytes = PAGE_CACHE_SIZE - offset; 1296 + int rbytes, nbytes = PAGE_CACHE_SIZE - offset; 1218 1297 1219 1298 if (nbytes > size) 1220 1299 nbytes = size; 1221 1300 1222 - if (bio_add_page(bio, pb->pb_pages[map_i], 1223 - nbytes, offset) < nbytes) 1301 + rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset); 1302 + if (rbytes < nbytes) 1224 1303 break; 1225 1304 1226 1305 offset = 0; ··· 1236 1315 goto next_chunk; 1237 1316 } else { 1238 1317 bio_put(bio); 1239 - pagebuf_ioerror(pb, EIO); 1318 + xfs_buf_ioerror(bp, EIO); 1240 1319 } 1241 1320 } 1242 1321 1243 - /* 1244 - * pagebuf_iorequest -- the core I/O request routine. 1245 - */ 1246 1322 int 1247 - pagebuf_iorequest( /* start real I/O */ 1248 - xfs_buf_t *pb) /* buffer to convey to device */ 1323 + xfs_buf_iorequest( 1324 + xfs_buf_t *bp) 1249 1325 { 1250 - PB_TRACE(pb, "iorequest", 0); 1326 + XB_TRACE(bp, "iorequest", 0); 1251 1327 1252 - if (pb->pb_flags & PBF_DELWRI) { 1253 - pagebuf_delwri_queue(pb, 1); 1328 + if (bp->b_flags & XBF_DELWRI) { 1329 + xfs_buf_delwri_queue(bp, 1); 1254 1330 return 0; 1255 1331 } 1256 1332 1257 - if (pb->pb_flags & PBF_WRITE) { 1258 - _pagebuf_wait_unpin(pb); 1333 + if (bp->b_flags & XBF_WRITE) { 1334 + xfs_buf_wait_unpin(bp); 1259 1335 } 1260 1336 1261 - pagebuf_hold(pb); 1337 + xfs_buf_hold(bp); 1262 1338 1263 1339 /* Set the count to 1 initially, this will stop an I/O 1264 1340 * completion callout which happens before we have started 1265 - * all the I/O from calling pagebuf_iodone too early. 1341 + * all the I/O from calling xfs_buf_ioend too early. 1266 1342 */ 1267 - atomic_set(&pb->pb_io_remaining, 1); 1268 - _pagebuf_ioapply(pb); 1269 - _pagebuf_iodone(pb, 0); 1343 + atomic_set(&bp->b_io_remaining, 1); 1344 + _xfs_buf_ioapply(bp); 1345 + _xfs_buf_ioend(bp, 0); 1270 1346 1271 - pagebuf_rele(pb); 1347 + xfs_buf_rele(bp); 1272 1348 return 0; 1273 1349 } 1274 1350 1275 1351 /* 1276 - * pagebuf_iowait 1277 - * 1278 - * pagebuf_iowait waits for I/O to complete on the buffer supplied. 1279 - * It returns immediately if no I/O is pending. In any case, it returns 1280 - * the error code, if any, or 0 if there is no error. 1352 + * Waits for I/O to complete on the buffer supplied. 1353 + * It returns immediately if no I/O is pending. 1354 + * It returns the I/O error code, if any, or 0 if there was no error. 1281 1355 */ 1282 1356 int 1283 - pagebuf_iowait( 1284 - xfs_buf_t *pb) 1357 + xfs_buf_iowait( 1358 + xfs_buf_t *bp) 1285 1359 { 1286 - PB_TRACE(pb, "iowait", 0); 1287 - if (atomic_read(&pb->pb_io_remaining)) 1288 - blk_run_address_space(pb->pb_target->pbr_mapping); 1289 - down(&pb->pb_iodonesema); 1290 - PB_TRACE(pb, "iowaited", (long)pb->pb_error); 1291 - return pb->pb_error; 1360 + XB_TRACE(bp, "iowait", 0); 1361 + if (atomic_read(&bp->b_io_remaining)) 1362 + blk_run_address_space(bp->b_target->bt_mapping); 1363 + down(&bp->b_iodonesema); 1364 + XB_TRACE(bp, "iowaited", (long)bp->b_error); 1365 + return bp->b_error; 1292 1366 } 1293 1367 1294 - caddr_t 1295 - pagebuf_offset( 1296 - xfs_buf_t *pb, 1368 + xfs_caddr_t 1369 + xfs_buf_offset( 1370 + xfs_buf_t *bp, 1297 1371 size_t offset) 1298 1372 { 1299 1373 struct page *page; 1300 1374 1301 - offset += pb->pb_offset; 1375 + if (bp->b_flags & XBF_MAPPED) 1376 + return XFS_BUF_PTR(bp) + offset; 1302 1377 1303 - page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT]; 1304 - return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1)); 1378 + offset += bp->b_offset; 1379 + page = bp->b_pages[offset >> PAGE_CACHE_SHIFT]; 1380 + return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1)); 1305 1381 } 1306 1382 1307 1383 /* 1308 - * pagebuf_iomove 1309 - * 1310 1384 * Move data into or out of a buffer. 1311 1385 */ 1312 1386 void 1313 - pagebuf_iomove( 1314 - xfs_buf_t *pb, /* buffer to process */ 1387 + xfs_buf_iomove( 1388 + xfs_buf_t *bp, /* buffer to process */ 1315 1389 size_t boff, /* starting buffer offset */ 1316 1390 size_t bsize, /* length to copy */ 1317 1391 caddr_t data, /* data address */ 1318 - page_buf_rw_t mode) /* read/write flag */ 1392 + xfs_buf_rw_t mode) /* read/write/zero flag */ 1319 1393 { 1320 1394 size_t bend, cpoff, csize; 1321 1395 struct page *page; 1322 1396 1323 1397 bend = boff + bsize; 1324 1398 while (boff < bend) { 1325 - page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)]; 1326 - cpoff = page_buf_poff(boff + pb->pb_offset); 1399 + page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; 1400 + cpoff = xfs_buf_poff(boff + bp->b_offset); 1327 1401 csize = min_t(size_t, 1328 - PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff); 1402 + PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff); 1329 1403 1330 1404 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); 1331 1405 1332 1406 switch (mode) { 1333 - case PBRW_ZERO: 1407 + case XBRW_ZERO: 1334 1408 memset(page_address(page) + cpoff, 0, csize); 1335 1409 break; 1336 - case PBRW_READ: 1410 + case XBRW_READ: 1337 1411 memcpy(data, page_address(page) + cpoff, csize); 1338 1412 break; 1339 - case PBRW_WRITE: 1413 + case XBRW_WRITE: 1340 1414 memcpy(page_address(page) + cpoff, data, csize); 1341 1415 } 1342 1416 ··· 1341 1425 } 1342 1426 1343 1427 /* 1344 - * Handling of buftargs. 1428 + * Handling of buffer targets (buftargs). 1345 1429 */ 1346 1430 1347 1431 /* 1348 - * Wait for any bufs with callbacks that have been submitted but 1349 - * have not yet returned... walk the hash list for the target. 1432 + * Wait for any bufs with callbacks that have been submitted but 1433 + * have not yet returned... walk the hash list for the target. 1350 1434 */ 1351 1435 void 1352 1436 xfs_wait_buftarg( ··· 1360 1444 hash = &btp->bt_hash[i]; 1361 1445 again: 1362 1446 spin_lock(&hash->bh_lock); 1363 - list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) { 1364 - ASSERT(btp == bp->pb_target); 1365 - if (!(bp->pb_flags & PBF_FS_MANAGED)) { 1447 + list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { 1448 + ASSERT(btp == bp->b_target); 1449 + if (!(bp->b_flags & XBF_FS_MANAGED)) { 1366 1450 spin_unlock(&hash->bh_lock); 1367 1451 /* 1368 1452 * Catch superblock reference count leaks 1369 1453 * immediately 1370 1454 */ 1371 - BUG_ON(bp->pb_bn == 0); 1455 + BUG_ON(bp->b_bn == 0); 1372 1456 delay(100); 1373 1457 goto again; 1374 1458 } ··· 1378 1462 } 1379 1463 1380 1464 /* 1381 - * Allocate buffer hash table for a given target. 1382 - * For devices containing metadata (i.e. not the log/realtime devices) 1383 - * we need to allocate a much larger hash table. 1465 + * Allocate buffer hash table for a given target. 1466 + * For devices containing metadata (i.e. not the log/realtime devices) 1467 + * we need to allocate a much larger hash table. 1384 1468 */ 1385 1469 STATIC void 1386 1470 xfs_alloc_bufhash( ··· 1403 1487 xfs_free_bufhash( 1404 1488 xfs_buftarg_t *btp) 1405 1489 { 1406 - kmem_free(btp->bt_hash, 1407 - (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t)); 1490 + kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t)); 1408 1491 btp->bt_hash = NULL; 1492 + } 1493 + 1494 + /* 1495 + * buftarg list for delwrite queue processing 1496 + */ 1497 + STATIC LIST_HEAD(xfs_buftarg_list); 1498 + STATIC DEFINE_SPINLOCK(xfs_buftarg_lock); 1499 + 1500 + STATIC void 1501 + xfs_register_buftarg( 1502 + xfs_buftarg_t *btp) 1503 + { 1504 + spin_lock(&xfs_buftarg_lock); 1505 + list_add(&btp->bt_list, &xfs_buftarg_list); 1506 + spin_unlock(&xfs_buftarg_lock); 1507 + } 1508 + 1509 + STATIC void 1510 + xfs_unregister_buftarg( 1511 + xfs_buftarg_t *btp) 1512 + { 1513 + spin_lock(&xfs_buftarg_lock); 1514 + list_del(&btp->bt_list); 1515 + spin_unlock(&xfs_buftarg_lock); 1409 1516 } 1410 1517 1411 1518 void ··· 1438 1499 { 1439 1500 xfs_flush_buftarg(btp, 1); 1440 1501 if (external) 1441 - xfs_blkdev_put(btp->pbr_bdev); 1502 + xfs_blkdev_put(btp->bt_bdev); 1442 1503 xfs_free_bufhash(btp); 1443 - iput(btp->pbr_mapping->host); 1504 + iput(btp->bt_mapping->host); 1505 + 1506 + /* Unregister the buftarg first so that we don't get a 1507 + * wakeup finding a non-existent task 1508 + */ 1509 + xfs_unregister_buftarg(btp); 1510 + kthread_stop(btp->bt_task); 1511 + 1444 1512 kmem_free(btp, sizeof(*btp)); 1445 1513 } 1446 1514 ··· 1458 1512 unsigned int sectorsize, 1459 1513 int verbose) 1460 1514 { 1461 - btp->pbr_bsize = blocksize; 1462 - btp->pbr_sshift = ffs(sectorsize) - 1; 1463 - btp->pbr_smask = sectorsize - 1; 1515 + btp->bt_bsize = blocksize; 1516 + btp->bt_sshift = ffs(sectorsize) - 1; 1517 + btp->bt_smask = sectorsize - 1; 1464 1518 1465 - if (set_blocksize(btp->pbr_bdev, sectorsize)) { 1519 + if (set_blocksize(btp->bt_bdev, sectorsize)) { 1466 1520 printk(KERN_WARNING 1467 1521 "XFS: Cannot set_blocksize to %u on device %s\n", 1468 1522 sectorsize, XFS_BUFTARG_NAME(btp)); ··· 1482 1536 } 1483 1537 1484 1538 /* 1485 - * When allocating the initial buffer target we have not yet 1486 - * read in the superblock, so don't know what sized sectors 1487 - * are being used is at this early stage. Play safe. 1488 - */ 1539 + * When allocating the initial buffer target we have not yet 1540 + * read in the superblock, so don't know what sized sectors 1541 + * are being used is at this early stage. Play safe. 1542 + */ 1489 1543 STATIC int 1490 1544 xfs_setsize_buftarg_early( 1491 1545 xfs_buftarg_t *btp, ··· 1533 1587 mapping->a_ops = &mapping_aops; 1534 1588 mapping->backing_dev_info = bdi; 1535 1589 mapping_set_gfp_mask(mapping, GFP_NOFS); 1536 - btp->pbr_mapping = mapping; 1590 + btp->bt_mapping = mapping; 1537 1591 return 0; 1592 + } 1593 + 1594 + STATIC int 1595 + xfs_alloc_delwrite_queue( 1596 + xfs_buftarg_t *btp) 1597 + { 1598 + int error = 0; 1599 + 1600 + INIT_LIST_HEAD(&btp->bt_list); 1601 + INIT_LIST_HEAD(&btp->bt_delwrite_queue); 1602 + spinlock_init(&btp->bt_delwrite_lock, "delwri_lock"); 1603 + btp->bt_flags = 0; 1604 + btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd"); 1605 + if (IS_ERR(btp->bt_task)) { 1606 + error = PTR_ERR(btp->bt_task); 1607 + goto out_error; 1608 + } 1609 + xfs_register_buftarg(btp); 1610 + out_error: 1611 + return error; 1538 1612 } 1539 1613 1540 1614 xfs_buftarg_t * ··· 1566 1600 1567 1601 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); 1568 1602 1569 - btp->pbr_dev = bdev->bd_dev; 1570 - btp->pbr_bdev = bdev; 1603 + btp->bt_dev = bdev->bd_dev; 1604 + btp->bt_bdev = bdev; 1571 1605 if (xfs_setsize_buftarg_early(btp, bdev)) 1572 1606 goto error; 1573 1607 if (xfs_mapping_buftarg(btp, bdev)) 1608 + goto error; 1609 + if (xfs_alloc_delwrite_queue(btp)) 1574 1610 goto error; 1575 1611 xfs_alloc_bufhash(btp, external); 1576 1612 return btp; ··· 1584 1616 1585 1617 1586 1618 /* 1587 - * Pagebuf delayed write buffer handling 1619 + * Delayed write buffer handling 1588 1620 */ 1589 - 1590 - STATIC LIST_HEAD(pbd_delwrite_queue); 1591 - STATIC DEFINE_SPINLOCK(pbd_delwrite_lock); 1592 - 1593 1621 STATIC void 1594 - pagebuf_delwri_queue( 1595 - xfs_buf_t *pb, 1622 + xfs_buf_delwri_queue( 1623 + xfs_buf_t *bp, 1596 1624 int unlock) 1597 1625 { 1598 - PB_TRACE(pb, "delwri_q", (long)unlock); 1599 - ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) == 1600 - (PBF_DELWRI|PBF_ASYNC)); 1626 + struct list_head *dwq = &bp->b_target->bt_delwrite_queue; 1627 + spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; 1601 1628 1602 - spin_lock(&pbd_delwrite_lock); 1629 + XB_TRACE(bp, "delwri_q", (long)unlock); 1630 + ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC)); 1631 + 1632 + spin_lock(dwlk); 1603 1633 /* If already in the queue, dequeue and place at tail */ 1604 - if (!list_empty(&pb->pb_list)) { 1605 - ASSERT(pb->pb_flags & _PBF_DELWRI_Q); 1606 - if (unlock) { 1607 - atomic_dec(&pb->pb_hold); 1608 - } 1609 - list_del(&pb->pb_list); 1634 + if (!list_empty(&bp->b_list)) { 1635 + ASSERT(bp->b_flags & _XBF_DELWRI_Q); 1636 + if (unlock) 1637 + atomic_dec(&bp->b_hold); 1638 + list_del(&bp->b_list); 1610 1639 } 1611 1640 1612 - pb->pb_flags |= _PBF_DELWRI_Q; 1613 - list_add_tail(&pb->pb_list, &pbd_delwrite_queue); 1614 - pb->pb_queuetime = jiffies; 1615 - spin_unlock(&pbd_delwrite_lock); 1641 + bp->b_flags |= _XBF_DELWRI_Q; 1642 + list_add_tail(&bp->b_list, dwq); 1643 + bp->b_queuetime = jiffies; 1644 + spin_unlock(dwlk); 1616 1645 1617 1646 if (unlock) 1618 - pagebuf_unlock(pb); 1647 + xfs_buf_unlock(bp); 1619 1648 } 1620 1649 1621 1650 void 1622 - pagebuf_delwri_dequeue( 1623 - xfs_buf_t *pb) 1651 + xfs_buf_delwri_dequeue( 1652 + xfs_buf_t *bp) 1624 1653 { 1654 + spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; 1625 1655 int dequeued = 0; 1626 1656 1627 - spin_lock(&pbd_delwrite_lock); 1628 - if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) { 1629 - ASSERT(pb->pb_flags & _PBF_DELWRI_Q); 1630 - list_del_init(&pb->pb_list); 1657 + spin_lock(dwlk); 1658 + if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) { 1659 + ASSERT(bp->b_flags & _XBF_DELWRI_Q); 1660 + list_del_init(&bp->b_list); 1631 1661 dequeued = 1; 1632 1662 } 1633 - pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); 1634 - spin_unlock(&pbd_delwrite_lock); 1663 + bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); 1664 + spin_unlock(dwlk); 1635 1665 1636 1666 if (dequeued) 1637 - pagebuf_rele(pb); 1667 + xfs_buf_rele(bp); 1638 1668 1639 - PB_TRACE(pb, "delwri_dq", (long)dequeued); 1669 + XB_TRACE(bp, "delwri_dq", (long)dequeued); 1640 1670 } 1641 1671 1642 1672 STATIC void 1643 - pagebuf_runall_queues( 1673 + xfs_buf_runall_queues( 1644 1674 struct workqueue_struct *queue) 1645 1675 { 1646 1676 flush_workqueue(queue); 1647 1677 } 1648 - 1649 - /* Defines for pagebuf daemon */ 1650 - STATIC struct task_struct *xfsbufd_task; 1651 - STATIC int xfsbufd_force_flush; 1652 - STATIC int xfsbufd_force_sleep; 1653 1678 1654 1679 STATIC int 1655 1680 xfsbufd_wakeup( 1656 1681 int priority, 1657 1682 gfp_t mask) 1658 1683 { 1659 - if (xfsbufd_force_sleep) 1660 - return 0; 1661 - xfsbufd_force_flush = 1; 1662 - barrier(); 1663 - wake_up_process(xfsbufd_task); 1684 + xfs_buftarg_t *btp; 1685 + 1686 + spin_lock(&xfs_buftarg_lock); 1687 + list_for_each_entry(btp, &xfs_buftarg_list, bt_list) { 1688 + if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags)) 1689 + continue; 1690 + set_bit(XBT_FORCE_FLUSH, &btp->bt_flags); 1691 + wake_up_process(btp->bt_task); 1692 + } 1693 + spin_unlock(&xfs_buftarg_lock); 1664 1694 return 0; 1665 1695 } 1666 1696 ··· 1668 1702 { 1669 1703 struct list_head tmp; 1670 1704 unsigned long age; 1671 - xfs_buftarg_t *target; 1672 - xfs_buf_t *pb, *n; 1705 + xfs_buftarg_t *target = (xfs_buftarg_t *)data; 1706 + xfs_buf_t *bp, *n; 1707 + struct list_head *dwq = &target->bt_delwrite_queue; 1708 + spinlock_t *dwlk = &target->bt_delwrite_lock; 1673 1709 1674 1710 current->flags |= PF_MEMALLOC; 1675 1711 1676 1712 INIT_LIST_HEAD(&tmp); 1677 1713 do { 1678 1714 if (unlikely(freezing(current))) { 1679 - xfsbufd_force_sleep = 1; 1715 + set_bit(XBT_FORCE_SLEEP, &target->bt_flags); 1680 1716 refrigerator(); 1681 1717 } else { 1682 - xfsbufd_force_sleep = 0; 1718 + clear_bit(XBT_FORCE_SLEEP, &target->bt_flags); 1683 1719 } 1684 1720 1685 1721 schedule_timeout_interruptible( 1686 1722 xfs_buf_timer_centisecs * msecs_to_jiffies(10)); 1687 1723 1688 1724 age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1689 - spin_lock(&pbd_delwrite_lock); 1690 - list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) { 1691 - PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb)); 1692 - ASSERT(pb->pb_flags & PBF_DELWRI); 1725 + spin_lock(dwlk); 1726 + list_for_each_entry_safe(bp, n, dwq, b_list) { 1727 + XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp)); 1728 + ASSERT(bp->b_flags & XBF_DELWRI); 1693 1729 1694 - if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) { 1695 - if (!xfsbufd_force_flush && 1730 + if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { 1731 + if (!test_bit(XBT_FORCE_FLUSH, 1732 + &target->bt_flags) && 1696 1733 time_before(jiffies, 1697 - pb->pb_queuetime + age)) { 1698 - pagebuf_unlock(pb); 1734 + bp->b_queuetime + age)) { 1735 + xfs_buf_unlock(bp); 1699 1736 break; 1700 1737 } 1701 1738 1702 - pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); 1703 - pb->pb_flags |= PBF_WRITE; 1704 - list_move(&pb->pb_list, &tmp); 1739 + bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); 1740 + bp->b_flags |= XBF_WRITE; 1741 + list_move(&bp->b_list, &tmp); 1705 1742 } 1706 1743 } 1707 - spin_unlock(&pbd_delwrite_lock); 1744 + spin_unlock(dwlk); 1708 1745 1709 1746 while (!list_empty(&tmp)) { 1710 - pb = list_entry(tmp.next, xfs_buf_t, pb_list); 1711 - target = pb->pb_target; 1747 + bp = list_entry(tmp.next, xfs_buf_t, b_list); 1748 + ASSERT(target == bp->b_target); 1712 1749 1713 - list_del_init(&pb->pb_list); 1714 - pagebuf_iostrategy(pb); 1750 + list_del_init(&bp->b_list); 1751 + xfs_buf_iostrategy(bp); 1715 1752 1716 - blk_run_address_space(target->pbr_mapping); 1753 + blk_run_address_space(target->bt_mapping); 1717 1754 } 1718 1755 1719 1756 if (as_list_len > 0) 1720 1757 purge_addresses(); 1721 1758 1722 - xfsbufd_force_flush = 0; 1759 + clear_bit(XBT_FORCE_FLUSH, &target->bt_flags); 1723 1760 } while (!kthread_should_stop()); 1724 1761 1725 1762 return 0; 1726 1763 } 1727 1764 1728 1765 /* 1729 - * Go through all incore buffers, and release buffers if they belong to 1730 - * the given device. This is used in filesystem error handling to 1731 - * preserve the consistency of its metadata. 1766 + * Go through all incore buffers, and release buffers if they belong to 1767 + * the given device. This is used in filesystem error handling to 1768 + * preserve the consistency of its metadata. 1732 1769 */ 1733 1770 int 1734 1771 xfs_flush_buftarg( ··· 1739 1770 int wait) 1740 1771 { 1741 1772 struct list_head tmp; 1742 - xfs_buf_t *pb, *n; 1773 + xfs_buf_t *bp, *n; 1743 1774 int pincount = 0; 1775 + struct list_head *dwq = &target->bt_delwrite_queue; 1776 + spinlock_t *dwlk = &target->bt_delwrite_lock; 1744 1777 1745 - pagebuf_runall_queues(xfsdatad_workqueue); 1746 - pagebuf_runall_queues(xfslogd_workqueue); 1778 + xfs_buf_runall_queues(xfsdatad_workqueue); 1779 + xfs_buf_runall_queues(xfslogd_workqueue); 1747 1780 1748 1781 INIT_LIST_HEAD(&tmp); 1749 - spin_lock(&pbd_delwrite_lock); 1750 - list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) { 1751 - 1752 - if (pb->pb_target != target) 1753 - continue; 1754 - 1755 - ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)); 1756 - PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb)); 1757 - if (pagebuf_ispin(pb)) { 1782 + spin_lock(dwlk); 1783 + list_for_each_entry_safe(bp, n, dwq, b_list) { 1784 + ASSERT(bp->b_target == target); 1785 + ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q)); 1786 + XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp)); 1787 + if (xfs_buf_ispin(bp)) { 1758 1788 pincount++; 1759 1789 continue; 1760 1790 } 1761 1791 1762 - list_move(&pb->pb_list, &tmp); 1792 + list_move(&bp->b_list, &tmp); 1763 1793 } 1764 - spin_unlock(&pbd_delwrite_lock); 1794 + spin_unlock(dwlk); 1765 1795 1766 1796 /* 1767 1797 * Dropped the delayed write list lock, now walk the temporary list 1768 1798 */ 1769 - list_for_each_entry_safe(pb, n, &tmp, pb_list) { 1770 - pagebuf_lock(pb); 1771 - pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); 1772 - pb->pb_flags |= PBF_WRITE; 1799 + list_for_each_entry_safe(bp, n, &tmp, b_list) { 1800 + xfs_buf_lock(bp); 1801 + bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); 1802 + bp->b_flags |= XBF_WRITE; 1773 1803 if (wait) 1774 - pb->pb_flags &= ~PBF_ASYNC; 1804 + bp->b_flags &= ~XBF_ASYNC; 1775 1805 else 1776 - list_del_init(&pb->pb_list); 1806 + list_del_init(&bp->b_list); 1777 1807 1778 - pagebuf_iostrategy(pb); 1808 + xfs_buf_iostrategy(bp); 1779 1809 } 1780 1810 1781 1811 /* 1782 1812 * Remaining list items must be flushed before returning 1783 1813 */ 1784 1814 while (!list_empty(&tmp)) { 1785 - pb = list_entry(tmp.next, xfs_buf_t, pb_list); 1815 + bp = list_entry(tmp.next, xfs_buf_t, b_list); 1786 1816 1787 - list_del_init(&pb->pb_list); 1788 - xfs_iowait(pb); 1789 - xfs_buf_relse(pb); 1817 + list_del_init(&bp->b_list); 1818 + xfs_iowait(bp); 1819 + xfs_buf_relse(bp); 1790 1820 } 1791 1821 1792 1822 if (wait) 1793 - blk_run_address_space(target->pbr_mapping); 1823 + blk_run_address_space(target->bt_mapping); 1794 1824 1795 1825 return pincount; 1796 1826 } 1797 1827 1798 1828 int __init 1799 - pagebuf_init(void) 1829 + xfs_buf_init(void) 1800 1830 { 1801 1831 int error = -ENOMEM; 1802 1832 1803 - #ifdef PAGEBUF_TRACE 1804 - pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP); 1833 + #ifdef XFS_BUF_TRACE 1834 + xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); 1805 1835 #endif 1806 1836 1807 - pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); 1808 - if (!pagebuf_zone) 1837 + xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); 1838 + if (!xfs_buf_zone) 1809 1839 goto out_free_trace_buf; 1810 1840 1811 1841 xfslogd_workqueue = create_workqueue("xfslogd"); ··· 1815 1847 if (!xfsdatad_workqueue) 1816 1848 goto out_destroy_xfslogd_workqueue; 1817 1849 1818 - xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd"); 1819 - if (IS_ERR(xfsbufd_task)) { 1820 - error = PTR_ERR(xfsbufd_task); 1850 + xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup); 1851 + if (!xfs_buf_shake) 1821 1852 goto out_destroy_xfsdatad_workqueue; 1822 - } 1823 - 1824 - pagebuf_shake = kmem_shake_register(xfsbufd_wakeup); 1825 - if (!pagebuf_shake) 1826 - goto out_stop_xfsbufd; 1827 1853 1828 1854 return 0; 1829 1855 1830 - out_stop_xfsbufd: 1831 - kthread_stop(xfsbufd_task); 1832 1856 out_destroy_xfsdatad_workqueue: 1833 1857 destroy_workqueue(xfsdatad_workqueue); 1834 1858 out_destroy_xfslogd_workqueue: 1835 1859 destroy_workqueue(xfslogd_workqueue); 1836 1860 out_free_buf_zone: 1837 - kmem_zone_destroy(pagebuf_zone); 1861 + kmem_zone_destroy(xfs_buf_zone); 1838 1862 out_free_trace_buf: 1839 - #ifdef PAGEBUF_TRACE 1840 - ktrace_free(pagebuf_trace_buf); 1863 + #ifdef XFS_BUF_TRACE 1864 + ktrace_free(xfs_buf_trace_buf); 1841 1865 #endif 1842 1866 return error; 1843 1867 } 1844 1868 1845 1869 void 1846 - pagebuf_terminate(void) 1870 + xfs_buf_terminate(void) 1847 1871 { 1848 - kmem_shake_deregister(pagebuf_shake); 1849 - kthread_stop(xfsbufd_task); 1872 + kmem_shake_deregister(xfs_buf_shake); 1850 1873 destroy_workqueue(xfsdatad_workqueue); 1851 1874 destroy_workqueue(xfslogd_workqueue); 1852 - kmem_zone_destroy(pagebuf_zone); 1853 - #ifdef PAGEBUF_TRACE 1854 - ktrace_free(pagebuf_trace_buf); 1875 + kmem_zone_destroy(xfs_buf_zone); 1876 + #ifdef XFS_BUF_TRACE 1877 + ktrace_free(xfs_buf_trace_buf); 1855 1878 #endif 1856 1879 }
+262 -386
fs/xfs/linux-2.6/xfs_buf.h
··· 32 32 * Base types 33 33 */ 34 34 35 - #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) 35 + #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) 36 36 37 - #define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE) 38 - #define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) 39 - #define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT) 40 - #define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK) 37 + #define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE) 38 + #define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) 39 + #define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT) 40 + #define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK) 41 41 42 - typedef enum page_buf_rw_e { 43 - PBRW_READ = 1, /* transfer into target memory */ 44 - PBRW_WRITE = 2, /* transfer from target memory */ 45 - PBRW_ZERO = 3 /* Zero target memory */ 46 - } page_buf_rw_t; 42 + typedef enum { 43 + XBRW_READ = 1, /* transfer into target memory */ 44 + XBRW_WRITE = 2, /* transfer from target memory */ 45 + XBRW_ZERO = 3, /* Zero target memory */ 46 + } xfs_buf_rw_t; 47 47 48 - 49 - typedef enum page_buf_flags_e { /* pb_flags values */ 50 - PBF_READ = (1 << 0), /* buffer intended for reading from device */ 51 - PBF_WRITE = (1 << 1), /* buffer intended for writing to device */ 52 - PBF_MAPPED = (1 << 2), /* buffer mapped (pb_addr valid) */ 53 - PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */ 54 - PBF_DONE = (1 << 5), /* all pages in the buffer uptodate */ 55 - PBF_DELWRI = (1 << 6), /* buffer has dirty pages */ 56 - PBF_STALE = (1 << 7), /* buffer has been staled, do not find it */ 57 - PBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */ 58 - PBF_ORDERED = (1 << 11), /* use ordered writes */ 59 - PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */ 48 + typedef enum { 49 + XBF_READ = (1 << 0), /* buffer intended for reading from device */ 50 + XBF_WRITE = (1 << 1), /* buffer intended for writing to device */ 51 + XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */ 52 + XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */ 53 + XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */ 54 + XBF_DELWRI = (1 << 6), /* buffer has dirty pages */ 55 + XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */ 56 + XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */ 57 + XBF_ORDERED = (1 << 11), /* use ordered writes */ 58 + XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */ 60 59 61 60 /* flags used only as arguments to access routines */ 62 - PBF_LOCK = (1 << 14), /* lock requested */ 63 - PBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */ 64 - PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */ 61 + XBF_LOCK = (1 << 14), /* lock requested */ 62 + XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */ 63 + XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */ 65 64 66 65 /* flags used only internally */ 67 - _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ 68 - _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */ 69 - _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ 70 - _PBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ 71 - } page_buf_flags_t; 66 + _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ 67 + _XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */ 68 + _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ 69 + _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ 70 + } xfs_buf_flags_t; 72 71 72 + typedef enum { 73 + XBT_FORCE_SLEEP = (0 << 1), 74 + XBT_FORCE_FLUSH = (1 << 1), 75 + } xfs_buftarg_flags_t; 73 76 74 77 typedef struct xfs_bufhash { 75 78 struct list_head bh_list; ··· 80 77 } xfs_bufhash_t; 81 78 82 79 typedef struct xfs_buftarg { 83 - dev_t pbr_dev; 84 - struct block_device *pbr_bdev; 85 - struct address_space *pbr_mapping; 86 - unsigned int pbr_bsize; 87 - unsigned int pbr_sshift; 88 - size_t pbr_smask; 80 + dev_t bt_dev; 81 + struct block_device *bt_bdev; 82 + struct address_space *bt_mapping; 83 + unsigned int bt_bsize; 84 + unsigned int bt_sshift; 85 + size_t bt_smask; 89 86 90 - /* per-device buffer hash table */ 87 + /* per device buffer hash table */ 91 88 uint bt_hashmask; 92 89 uint bt_hashshift; 93 90 xfs_bufhash_t *bt_hash; 91 + 92 + /* per device delwri queue */ 93 + struct task_struct *bt_task; 94 + struct list_head bt_list; 95 + struct list_head bt_delwrite_queue; 96 + spinlock_t bt_delwrite_lock; 97 + unsigned long bt_flags; 94 98 } xfs_buftarg_t; 95 99 96 100 /* 97 - * xfs_buf_t: Buffer structure for page cache-based buffers 101 + * xfs_buf_t: Buffer structure for pagecache-based buffers 98 102 * 99 - * This buffer structure is used by the page cache buffer management routines 100 - * to refer to an assembly of pages forming a logical buffer. The actual I/O 101 - * is performed with buffer_head structures, as required by drivers. 102 - * 103 - * The buffer structure is used on temporary basis only, and discarded when 104 - * released. The real data storage is recorded in the page cache. Metadata is 103 + * This buffer structure is used by the pagecache buffer management routines 104 + * to refer to an assembly of pages forming a logical buffer. 105 + * 106 + * The buffer structure is used on a temporary basis only, and discarded when 107 + * released. The real data storage is recorded in the pagecache. Buffers are 105 108 * hashed to the block device on which the file system resides. 106 109 */ 107 110 108 111 struct xfs_buf; 112 + typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); 113 + typedef void (*xfs_buf_relse_t)(struct xfs_buf *); 114 + typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *); 109 115 110 - /* call-back function on I/O completion */ 111 - typedef void (*page_buf_iodone_t)(struct xfs_buf *); 112 - /* call-back function on I/O completion */ 113 - typedef void (*page_buf_relse_t)(struct xfs_buf *); 114 - /* pre-write function */ 115 - typedef int (*page_buf_bdstrat_t)(struct xfs_buf *); 116 - 117 - #define PB_PAGES 2 116 + #define XB_PAGES 2 118 117 119 118 typedef struct xfs_buf { 120 - struct semaphore pb_sema; /* semaphore for lockables */ 121 - unsigned long pb_queuetime; /* time buffer was queued */ 122 - atomic_t pb_pin_count; /* pin count */ 123 - wait_queue_head_t pb_waiters; /* unpin waiters */ 124 - struct list_head pb_list; 125 - page_buf_flags_t pb_flags; /* status flags */ 126 - struct list_head pb_hash_list; /* hash table list */ 127 - xfs_bufhash_t *pb_hash; /* hash table list start */ 128 - xfs_buftarg_t *pb_target; /* buffer target (device) */ 129 - atomic_t pb_hold; /* reference count */ 130 - xfs_daddr_t pb_bn; /* block number for I/O */ 131 - loff_t pb_file_offset; /* offset in file */ 132 - size_t pb_buffer_length; /* size of buffer in bytes */ 133 - size_t pb_count_desired; /* desired transfer size */ 134 - void *pb_addr; /* virtual address of buffer */ 135 - struct work_struct pb_iodone_work; 136 - atomic_t pb_io_remaining;/* #outstanding I/O requests */ 137 - page_buf_iodone_t pb_iodone; /* I/O completion function */ 138 - page_buf_relse_t pb_relse; /* releasing function */ 139 - page_buf_bdstrat_t pb_strat; /* pre-write function */ 140 - struct semaphore pb_iodonesema; /* Semaphore for I/O waiters */ 141 - void *pb_fspriv; 142 - void *pb_fspriv2; 143 - void *pb_fspriv3; 144 - unsigned short pb_error; /* error code on I/O */ 145 - unsigned short pb_locked; /* page array is locked */ 146 - unsigned int pb_page_count; /* size of page array */ 147 - unsigned int pb_offset; /* page offset in first page */ 148 - struct page **pb_pages; /* array of page pointers */ 149 - struct page *pb_page_array[PB_PAGES]; /* inline pages */ 150 - #ifdef PAGEBUF_LOCK_TRACKING 151 - int pb_last_holder; 119 + struct semaphore b_sema; /* semaphore for lockables */ 120 + unsigned long b_queuetime; /* time buffer was queued */ 121 + atomic_t b_pin_count; /* pin count */ 122 + wait_queue_head_t b_waiters; /* unpin waiters */ 123 + struct list_head b_list; 124 + xfs_buf_flags_t b_flags; /* status flags */ 125 + struct list_head b_hash_list; /* hash table list */ 126 + xfs_bufhash_t *b_hash; /* hash table list start */ 127 + xfs_buftarg_t *b_target; /* buffer target (device) */ 128 + atomic_t b_hold; /* reference count */ 129 + xfs_daddr_t b_bn; /* block number for I/O */ 130 + xfs_off_t b_file_offset; /* offset in file */ 131 + size_t b_buffer_length;/* size of buffer in bytes */ 132 + size_t b_count_desired;/* desired transfer size */ 133 + void *b_addr; /* virtual address of buffer */ 134 + struct work_struct b_iodone_work; 135 + atomic_t b_io_remaining; /* #outstanding I/O requests */ 136 + xfs_buf_iodone_t b_iodone; /* I/O completion function */ 137 + xfs_buf_relse_t b_relse; /* releasing function */ 138 + xfs_buf_bdstrat_t b_strat; /* pre-write function */ 139 + struct semaphore b_iodonesema; /* Semaphore for I/O waiters */ 140 + void *b_fspriv; 141 + void *b_fspriv2; 142 + void *b_fspriv3; 143 + unsigned short b_error; /* error code on I/O */ 144 + unsigned short b_locked; /* page array is locked */ 145 + unsigned int b_page_count; /* size of page array */ 146 + unsigned int b_offset; /* page offset in first page */ 147 + struct page **b_pages; /* array of page pointers */ 148 + struct page *b_page_array[XB_PAGES]; /* inline pages */ 149 + #ifdef XFS_BUF_LOCK_TRACKING 150 + int b_last_holder; 152 151 #endif 153 152 } xfs_buf_t; 154 153 155 154 156 155 /* Finding and Reading Buffers */ 157 - 158 - extern xfs_buf_t *_pagebuf_find( /* find buffer for block if */ 159 - /* the block is in memory */ 160 - xfs_buftarg_t *, /* inode for block */ 161 - loff_t, /* starting offset of range */ 162 - size_t, /* length of range */ 163 - page_buf_flags_t, /* PBF_LOCK */ 164 - xfs_buf_t *); /* newly allocated buffer */ 165 - 156 + extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t, 157 + xfs_buf_flags_t, xfs_buf_t *); 166 158 #define xfs_incore(buftarg,blkno,len,lockit) \ 167 - _pagebuf_find(buftarg, blkno ,len, lockit, NULL) 159 + _xfs_buf_find(buftarg, blkno ,len, lockit, NULL) 168 160 169 - extern xfs_buf_t *xfs_buf_get_flags( /* allocate a buffer */ 170 - xfs_buftarg_t *, /* inode for buffer */ 171 - loff_t, /* starting offset of range */ 172 - size_t, /* length of range */ 173 - page_buf_flags_t); /* PBF_LOCK, PBF_READ, */ 174 - /* PBF_ASYNC */ 175 - 161 + extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t, 162 + xfs_buf_flags_t); 176 163 #define xfs_buf_get(target, blkno, len, flags) \ 177 - xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) 164 + xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED) 178 165 179 - extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */ 180 - xfs_buftarg_t *, /* inode for buffer */ 181 - loff_t, /* starting offset of range */ 182 - size_t, /* length of range */ 183 - page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC */ 184 - 166 + extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t, 167 + xfs_buf_flags_t); 185 168 #define xfs_buf_read(target, blkno, len, flags) \ 186 - xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) 169 + xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED) 187 170 188 - extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */ 189 - /* no memory or disk address */ 190 - size_t len, 191 - xfs_buftarg_t *); /* mount point "fake" inode */ 192 - 193 - extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */ 194 - /* without disk address */ 195 - size_t len, 196 - xfs_buftarg_t *); /* mount point "fake" inode */ 197 - 198 - extern int pagebuf_associate_memory( 199 - xfs_buf_t *, 200 - void *, 201 - size_t); 202 - 203 - extern void pagebuf_hold( /* increment reference count */ 204 - xfs_buf_t *); /* buffer to hold */ 205 - 206 - extern void pagebuf_readahead( /* read ahead into cache */ 207 - xfs_buftarg_t *, /* target for buffer (or NULL) */ 208 - loff_t, /* starting offset of range */ 209 - size_t, /* length of range */ 210 - page_buf_flags_t); /* additional read flags */ 171 + extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *); 172 + extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *); 173 + extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t); 174 + extern void xfs_buf_hold(xfs_buf_t *); 175 + extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t, 176 + xfs_buf_flags_t); 211 177 212 178 /* Releasing Buffers */ 213 - 214 - extern void pagebuf_free( /* deallocate a buffer */ 215 - xfs_buf_t *); /* buffer to deallocate */ 216 - 217 - extern void pagebuf_rele( /* release hold on a buffer */ 218 - xfs_buf_t *); /* buffer to release */ 179 + extern void xfs_buf_free(xfs_buf_t *); 180 + extern void xfs_buf_rele(xfs_buf_t *); 219 181 220 182 /* Locking and Unlocking Buffers */ 221 - 222 - extern int pagebuf_cond_lock( /* lock buffer, if not locked */ 223 - /* (returns -EBUSY if locked) */ 224 - xfs_buf_t *); /* buffer to lock */ 225 - 226 - extern int pagebuf_lock_value( /* return count on lock */ 227 - xfs_buf_t *); /* buffer to check */ 228 - 229 - extern int pagebuf_lock( /* lock buffer */ 230 - xfs_buf_t *); /* buffer to lock */ 231 - 232 - extern void pagebuf_unlock( /* unlock buffer */ 233 - xfs_buf_t *); /* buffer to unlock */ 183 + extern int xfs_buf_cond_lock(xfs_buf_t *); 184 + extern int xfs_buf_lock_value(xfs_buf_t *); 185 + extern void xfs_buf_lock(xfs_buf_t *); 186 + extern void xfs_buf_unlock(xfs_buf_t *); 234 187 235 188 /* Buffer Read and Write Routines */ 189 + extern void xfs_buf_ioend(xfs_buf_t *, int); 190 + extern void xfs_buf_ioerror(xfs_buf_t *, int); 191 + extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t); 192 + extern int xfs_buf_iorequest(xfs_buf_t *); 193 + extern int xfs_buf_iowait(xfs_buf_t *); 194 + extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t, 195 + xfs_buf_rw_t); 236 196 237 - extern void pagebuf_iodone( /* mark buffer I/O complete */ 238 - xfs_buf_t *, /* buffer to mark */ 239 - int); /* run completion locally, or in 240 - * a helper thread. */ 241 - 242 - extern void pagebuf_ioerror( /* mark buffer in error (or not) */ 243 - xfs_buf_t *, /* buffer to mark */ 244 - int); /* error to store (0 if none) */ 245 - 246 - extern int pagebuf_iostart( /* start I/O on a buffer */ 247 - xfs_buf_t *, /* buffer to start */ 248 - page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC, */ 249 - /* PBF_READ, PBF_WRITE, */ 250 - /* PBF_DELWRI */ 251 - 252 - extern int pagebuf_iorequest( /* start real I/O */ 253 - xfs_buf_t *); /* buffer to convey to device */ 254 - 255 - extern int pagebuf_iowait( /* wait for buffer I/O done */ 256 - xfs_buf_t *); /* buffer to wait on */ 257 - 258 - extern void pagebuf_iomove( /* move data in/out of pagebuf */ 259 - xfs_buf_t *, /* buffer to manipulate */ 260 - size_t, /* starting buffer offset */ 261 - size_t, /* length in buffer */ 262 - caddr_t, /* data pointer */ 263 - page_buf_rw_t); /* direction */ 264 - 265 - static inline int pagebuf_iostrategy(xfs_buf_t *pb) 197 + static inline int xfs_buf_iostrategy(xfs_buf_t *bp) 266 198 { 267 - return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb); 199 + return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp); 268 200 } 269 201 270 - static inline int pagebuf_geterror(xfs_buf_t *pb) 202 + static inline int xfs_buf_geterror(xfs_buf_t *bp) 271 203 { 272 - return pb ? pb->pb_error : ENOMEM; 204 + return bp ? bp->b_error : ENOMEM; 273 205 } 274 206 275 207 /* Buffer Utility Routines */ 276 - 277 - extern caddr_t pagebuf_offset( /* pointer at offset in buffer */ 278 - xfs_buf_t *, /* buffer to offset into */ 279 - size_t); /* offset */ 208 + extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t); 280 209 281 210 /* Pinning Buffer Storage in Memory */ 282 - 283 - extern void pagebuf_pin( /* pin buffer in memory */ 284 - xfs_buf_t *); /* buffer to pin */ 285 - 286 - extern void pagebuf_unpin( /* unpin buffered data */ 287 - xfs_buf_t *); /* buffer to unpin */ 288 - 289 - extern int pagebuf_ispin( /* check if buffer is pinned */ 290 - xfs_buf_t *); /* buffer to check */ 211 + extern void xfs_buf_pin(xfs_buf_t *); 212 + extern void xfs_buf_unpin(xfs_buf_t *); 213 + extern int xfs_buf_ispin(xfs_buf_t *); 291 214 292 215 /* Delayed Write Buffer Routines */ 293 - 294 - extern void pagebuf_delwri_dequeue(xfs_buf_t *); 216 + extern void xfs_buf_delwri_dequeue(xfs_buf_t *); 295 217 296 218 /* Buffer Daemon Setup Routines */ 219 + extern int xfs_buf_init(void); 220 + extern void xfs_buf_terminate(void); 297 221 298 - extern int pagebuf_init(void); 299 - extern void pagebuf_terminate(void); 300 - 301 - 302 - #ifdef PAGEBUF_TRACE 303 - extern ktrace_t *pagebuf_trace_buf; 304 - extern void pagebuf_trace( 305 - xfs_buf_t *, /* buffer being traced */ 306 - char *, /* description of operation */ 307 - void *, /* arbitrary diagnostic value */ 308 - void *); /* return address */ 222 + #ifdef XFS_BUF_TRACE 223 + extern ktrace_t *xfs_buf_trace_buf; 224 + extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); 309 225 #else 310 - # define pagebuf_trace(pb, id, ptr, ra) do { } while (0) 226 + #define xfs_buf_trace(bp,id,ptr,ra) do { } while (0) 311 227 #endif 312 228 313 - #define pagebuf_target_name(target) \ 314 - ({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; }) 229 + #define xfs_buf_target_name(target) \ 230 + ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; }) 315 231 316 232 233 + #define XFS_B_ASYNC XBF_ASYNC 234 + #define XFS_B_DELWRI XBF_DELWRI 235 + #define XFS_B_READ XBF_READ 236 + #define XFS_B_WRITE XBF_WRITE 237 + #define XFS_B_STALE XBF_STALE 317 238 318 - /* These are just for xfs_syncsub... it sets an internal variable 319 - * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t 320 - */ 321 - #define XFS_B_ASYNC PBF_ASYNC 322 - #define XFS_B_DELWRI PBF_DELWRI 323 - #define XFS_B_READ PBF_READ 324 - #define XFS_B_WRITE PBF_WRITE 325 - #define XFS_B_STALE PBF_STALE 239 + #define XFS_BUF_TRYLOCK XBF_TRYLOCK 240 + #define XFS_INCORE_TRYLOCK XBF_TRYLOCK 241 + #define XFS_BUF_LOCK XBF_LOCK 242 + #define XFS_BUF_MAPPED XBF_MAPPED 326 243 327 - #define XFS_BUF_TRYLOCK PBF_TRYLOCK 328 - #define XFS_INCORE_TRYLOCK PBF_TRYLOCK 329 - #define XFS_BUF_LOCK PBF_LOCK 330 - #define XFS_BUF_MAPPED PBF_MAPPED 244 + #define BUF_BUSY XBF_DONT_BLOCK 331 245 332 - #define BUF_BUSY PBF_DONT_BLOCK 246 + #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) 247 + #define XFS_BUF_ZEROFLAGS(bp) \ 248 + ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI)) 333 249 334 - #define XFS_BUF_BFLAGS(x) ((x)->pb_flags) 335 - #define XFS_BUF_ZEROFLAGS(x) \ 336 - ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI)) 337 - 338 - #define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE) 339 - #define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE) 340 - #define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE) 341 - #define XFS_BUF_SUPER_STALE(x) do { \ 342 - XFS_BUF_STALE(x); \ 343 - pagebuf_delwri_dequeue(x); \ 344 - XFS_BUF_DONE(x); \ 250 + #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) 251 + #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) 252 + #define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE) 253 + #define XFS_BUF_SUPER_STALE(bp) do { \ 254 + XFS_BUF_STALE(bp); \ 255 + xfs_buf_delwri_dequeue(bp); \ 256 + XFS_BUF_DONE(bp); \ 345 257 } while (0) 346 258 347 - #define XFS_BUF_MANAGE PBF_FS_MANAGED 348 - #define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED) 259 + #define XFS_BUF_MANAGE XBF_FS_MANAGED 260 + #define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED) 349 261 350 - #define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI) 351 - #define XFS_BUF_UNDELAYWRITE(x) pagebuf_delwri_dequeue(x) 352 - #define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI) 262 + #define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI) 263 + #define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) 264 + #define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) 353 265 354 - #define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no) 355 - #define XFS_BUF_GETERROR(x) pagebuf_geterror(x) 356 - #define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0) 266 + #define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no) 267 + #define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp) 268 + #define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0) 357 269 358 - #define XFS_BUF_DONE(x) ((x)->pb_flags |= PBF_DONE) 359 - #define XFS_BUF_UNDONE(x) ((x)->pb_flags &= ~PBF_DONE) 360 - #define XFS_BUF_ISDONE(x) ((x)->pb_flags & PBF_DONE) 270 + #define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE) 271 + #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) 272 + #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) 361 273 362 - #define XFS_BUF_BUSY(x) do { } while (0) 363 - #define XFS_BUF_UNBUSY(x) do { } while (0) 364 - #define XFS_BUF_ISBUSY(x) (1) 274 + #define XFS_BUF_BUSY(bp) do { } while (0) 275 + #define XFS_BUF_UNBUSY(bp) do { } while (0) 276 + #define XFS_BUF_ISBUSY(bp) (1) 365 277 366 - #define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC) 367 - #define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC) 368 - #define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC) 278 + #define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC) 279 + #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) 280 + #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) 369 281 370 - #define XFS_BUF_ORDERED(x) ((x)->pb_flags |= PBF_ORDERED) 371 - #define XFS_BUF_UNORDERED(x) ((x)->pb_flags &= ~PBF_ORDERED) 372 - #define XFS_BUF_ISORDERED(x) ((x)->pb_flags & PBF_ORDERED) 282 + #define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED) 283 + #define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED) 284 + #define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED) 373 285 374 - #define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n") 375 - #define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n") 376 - #define XFS_BUF_ISSHUT(x) (0) 286 + #define XFS_BUF_SHUT(bp) do { } while (0) 287 + #define XFS_BUF_UNSHUT(bp) do { } while (0) 288 + #define XFS_BUF_ISSHUT(bp) (0) 377 289 378 - #define XFS_BUF_HOLD(x) pagebuf_hold(x) 379 - #define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ) 380 - #define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ) 381 - #define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ) 290 + #define XFS_BUF_HOLD(bp) xfs_buf_hold(bp) 291 + #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) 292 + #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) 293 + #define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ) 382 294 383 - #define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE) 384 - #define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE) 385 - #define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE) 295 + #define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE) 296 + #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) 297 + #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) 386 298 387 - #define XFS_BUF_ISUNINITIAL(x) (0) 388 - #define XFS_BUF_UNUNINITIAL(x) (0) 299 + #define XFS_BUF_ISUNINITIAL(bp) (0) 300 + #define XFS_BUF_UNUNINITIAL(bp) (0) 389 301 390 - #define XFS_BUF_BP_ISMAPPED(bp) 1 302 + #define XFS_BUF_BP_ISMAPPED(bp) (1) 391 303 392 - #define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone 393 - #define XFS_BUF_SET_IODONE_FUNC(buf, func) \ 394 - (buf)->pb_iodone = (func) 395 - #define XFS_BUF_CLR_IODONE_FUNC(buf) \ 396 - (buf)->pb_iodone = NULL 397 - #define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \ 398 - (buf)->pb_strat = (func) 399 - #define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \ 400 - (buf)->pb_strat = NULL 304 + #define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone) 305 + #define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func)) 306 + #define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL) 307 + #define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func)) 308 + #define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL) 401 309 402 - #define XFS_BUF_FSPRIVATE(buf, type) \ 403 - ((type)(buf)->pb_fspriv) 404 - #define XFS_BUF_SET_FSPRIVATE(buf, value) \ 405 - (buf)->pb_fspriv = (void *)(value) 406 - #define XFS_BUF_FSPRIVATE2(buf, type) \ 407 - ((type)(buf)->pb_fspriv2) 408 - #define XFS_BUF_SET_FSPRIVATE2(buf, value) \ 409 - (buf)->pb_fspriv2 = (void *)(value) 410 - #define XFS_BUF_FSPRIVATE3(buf, type) \ 411 - ((type)(buf)->pb_fspriv3) 412 - #define XFS_BUF_SET_FSPRIVATE3(buf, value) \ 413 - (buf)->pb_fspriv3 = (void *)(value) 414 - #define XFS_BUF_SET_START(buf) 310 + #define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv) 311 + #define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val)) 312 + #define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2) 313 + #define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val)) 314 + #define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3) 315 + #define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val)) 316 + #define XFS_BUF_SET_START(bp) do { } while (0) 317 + #define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func)) 415 318 416 - #define XFS_BUF_SET_BRELSE_FUNC(buf, value) \ 417 - (buf)->pb_relse = (value) 319 + #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) 320 + #define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt) 321 + #define XFS_BUF_ADDR(bp) ((bp)->b_bn) 322 + #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno)) 323 + #define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset) 324 + #define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off)) 325 + #define XFS_BUF_COUNT(bp) ((bp)->b_count_desired) 326 + #define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt)) 327 + #define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length) 328 + #define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt)) 418 329 419 - #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr) 330 + #define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0) 331 + #define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) 332 + #define XFS_BUF_SET_REF(bp, ref) do { } while (0) 420 333 421 - static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset) 334 + #define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp) 335 + 336 + #define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp) 337 + #define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0) 338 + #define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp) 339 + #define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp) 340 + #define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema); 341 + 342 + #define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target)) 343 + #define XFS_BUF_TARGET(bp) ((bp)->b_target) 344 + #define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target) 345 + 346 + static inline int xfs_bawrite(void *mp, xfs_buf_t *bp) 422 347 { 423 - if (bp->pb_flags & PBF_MAPPED) 424 - return XFS_BUF_PTR(bp) + offset; 425 - return (xfs_caddr_t) pagebuf_offset(bp, offset); 348 + bp->b_fspriv3 = mp; 349 + bp->b_strat = xfs_bdstrat_cb; 350 + xfs_buf_delwri_dequeue(bp); 351 + return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES); 426 352 } 427 353 428 - #define XFS_BUF_SET_PTR(bp, val, count) \ 429 - pagebuf_associate_memory(bp, val, count) 430 - #define XFS_BUF_ADDR(bp) ((bp)->pb_bn) 431 - #define XFS_BUF_SET_ADDR(bp, blk) \ 432 - ((bp)->pb_bn = (xfs_daddr_t)(blk)) 433 - #define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset) 434 - #define XFS_BUF_SET_OFFSET(bp, off) \ 435 - ((bp)->pb_file_offset = (off)) 436 - #define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired) 437 - #define XFS_BUF_SET_COUNT(bp, cnt) \ 438 - ((bp)->pb_count_desired = (cnt)) 439 - #define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length) 440 - #define XFS_BUF_SET_SIZE(bp, cnt) \ 441 - ((bp)->pb_buffer_length = (cnt)) 442 - #define XFS_BUF_SET_VTYPE_REF(bp, type, ref) 443 - #define XFS_BUF_SET_VTYPE(bp, type) 444 - #define XFS_BUF_SET_REF(bp, ref) 445 - 446 - #define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp) 447 - 448 - #define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp) 449 - #define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0) 450 - #define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp) 451 - #define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp) 452 - #define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema); 453 - 454 - /* setup the buffer target from a buftarg structure */ 455 - #define XFS_BUF_SET_TARGET(bp, target) \ 456 - (bp)->pb_target = (target) 457 - #define XFS_BUF_TARGET(bp) ((bp)->pb_target) 458 - #define XFS_BUFTARG_NAME(target) \ 459 - pagebuf_target_name(target) 460 - 461 - #define XFS_BUF_SET_VTYPE_REF(bp, type, ref) 462 - #define XFS_BUF_SET_VTYPE(bp, type) 463 - #define XFS_BUF_SET_REF(bp, ref) 464 - 465 - static inline int xfs_bawrite(void *mp, xfs_buf_t *bp) 354 + static inline void xfs_buf_relse(xfs_buf_t *bp) 466 355 { 467 - bp->pb_fspriv3 = mp; 468 - bp->pb_strat = xfs_bdstrat_cb; 469 - pagebuf_delwri_dequeue(bp); 470 - return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES); 356 + if (!bp->b_relse) 357 + xfs_buf_unlock(bp); 358 + xfs_buf_rele(bp); 471 359 } 472 360 473 - static inline void xfs_buf_relse(xfs_buf_t *bp) 474 - { 475 - if (!bp->pb_relse) 476 - pagebuf_unlock(bp); 477 - pagebuf_rele(bp); 478 - } 479 - 480 - #define xfs_bpin(bp) pagebuf_pin(bp) 481 - #define xfs_bunpin(bp) pagebuf_unpin(bp) 361 + #define xfs_bpin(bp) xfs_buf_pin(bp) 362 + #define xfs_bunpin(bp) xfs_buf_unpin(bp) 482 363 483 364 #define xfs_buftrace(id, bp) \ 484 - pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0)) 365 + xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0)) 485 366 486 - #define xfs_biodone(pb) \ 487 - pagebuf_iodone(pb, 0) 367 + #define xfs_biodone(bp) xfs_buf_ioend(bp, 0) 488 368 489 - #define xfs_biomove(pb, off, len, data, rw) \ 490 - pagebuf_iomove((pb), (off), (len), (data), \ 491 - ((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ) 369 + #define xfs_biomove(bp, off, len, data, rw) \ 370 + xfs_buf_iomove((bp), (off), (len), (data), \ 371 + ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ) 492 372 493 - #define xfs_biozero(pb, off, len) \ 494 - pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO) 373 + #define xfs_biozero(bp, off, len) \ 374 + xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) 495 375 496 376 497 - static inline int XFS_bwrite(xfs_buf_t *pb) 377 + static inline int XFS_bwrite(xfs_buf_t *bp) 498 378 { 499 - int iowait = (pb->pb_flags & PBF_ASYNC) == 0; 379 + int iowait = (bp->b_flags & XBF_ASYNC) == 0; 500 380 int error = 0; 501 381 502 382 if (!iowait) 503 - pb->pb_flags |= _PBF_RUN_QUEUES; 383 + bp->b_flags |= _XBF_RUN_QUEUES; 504 384 505 - pagebuf_delwri_dequeue(pb); 506 - pagebuf_iostrategy(pb); 385 + xfs_buf_delwri_dequeue(bp); 386 + xfs_buf_iostrategy(bp); 507 387 if (iowait) { 508 - error = pagebuf_iowait(pb); 509 - xfs_buf_relse(pb); 388 + error = xfs_buf_iowait(bp); 389 + xfs_buf_relse(bp); 510 390 } 511 391 return error; 512 392 } 513 393 514 - #define XFS_bdwrite(pb) \ 515 - pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC) 394 + #define XFS_bdwrite(bp) xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC) 516 395 517 396 static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp) 518 397 { 519 - bp->pb_strat = xfs_bdstrat_cb; 520 - bp->pb_fspriv3 = mp; 521 - 522 - return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC); 398 + bp->b_strat = xfs_bdstrat_cb; 399 + bp->b_fspriv3 = mp; 400 + return xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC); 523 401 } 524 402 525 - #define XFS_bdstrat(bp) pagebuf_iorequest(bp) 403 + #define XFS_bdstrat(bp) xfs_buf_iorequest(bp) 526 404 527 - #define xfs_iowait(pb) pagebuf_iowait(pb) 405 + #define xfs_iowait(bp) xfs_buf_iowait(bp) 528 406 529 407 #define xfs_baread(target, rablkno, ralen) \ 530 - pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK) 531 - 532 - #define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target)) 533 - #define xfs_buf_get_noaddr(len, target) pagebuf_get_no_daddr((len), (target)) 534 - #define xfs_buf_free(bp) pagebuf_free(bp) 408 + xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK) 535 409 536 410 537 411 /* 538 412 * Handling of buftargs. 539 413 */ 540 - 541 414 extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int); 542 415 extern void xfs_free_buftarg(xfs_buftarg_t *, int); 543 416 extern void xfs_wait_buftarg(xfs_buftarg_t *); 544 417 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); 545 418 extern int xfs_flush_buftarg(xfs_buftarg_t *, int); 546 419 547 - #define xfs_getsize_buftarg(buftarg) \ 548 - block_size((buftarg)->pbr_bdev) 549 - #define xfs_readonly_buftarg(buftarg) \ 550 - bdev_read_only((buftarg)->pbr_bdev) 551 - #define xfs_binval(buftarg) \ 552 - xfs_flush_buftarg(buftarg, 1) 553 - #define XFS_bflush(buftarg) \ 554 - xfs_flush_buftarg(buftarg, 1) 420 + #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) 421 + #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) 422 + 423 + #define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1) 424 + #define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1) 555 425 556 426 #endif /* __XFS_BUF_H__ */
+2 -4
fs/xfs/linux-2.6/xfs_file.c
··· 509 509 vnode_t *vp = LINVFS_GET_VP(inode); 510 510 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); 511 511 int error = 0; 512 - bhv_desc_t *bdp; 513 512 xfs_inode_t *ip; 514 513 515 514 if (vp->v_vfsp->vfs_flag & VFS_DMI) { 516 - bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops); 517 - if (!bdp) { 515 + ip = xfs_vtoi(vp); 516 + if (!ip) { 518 517 error = -EINVAL; 519 518 goto open_exec_out; 520 519 } 521 - ip = XFS_BHVTOI(bdp); 522 520 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) { 523 521 error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp, 524 522 0, 0, 0, NULL);
+3 -7
fs/xfs/linux-2.6/xfs_ioctl.c
··· 146 146 147 147 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) { 148 148 xfs_inode_t *ip; 149 - bhv_desc_t *bhv; 150 149 int lock_mode; 151 150 152 151 /* need to get access to the xfs_inode to read the generation */ 153 - bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops); 154 - ASSERT(bhv); 155 - ip = XFS_BHVTOI(bhv); 152 + ip = xfs_vtoi(vp); 156 153 ASSERT(ip); 157 154 lock_mode = xfs_ilock_map_shared(ip); 158 155 ··· 748 751 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 749 752 mp->m_rtdev_targp : mp->m_ddev_targp; 750 753 751 - da.d_mem = da.d_miniosz = 1 << target->pbr_sshift; 752 - /* The size dio will do in one go */ 753 - da.d_maxiosz = 64 * PAGE_CACHE_SIZE; 754 + da.d_mem = da.d_miniosz = 1 << target->bt_sshift; 755 + da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); 754 756 755 757 if (copy_to_user(arg, &da, sizeof(da))) 756 758 return -XFS_ERROR(EFAULT);
+81 -40
fs/xfs/linux-2.6/xfs_iops.c
··· 54 54 #include <linux/capability.h> 55 55 #include <linux/xattr.h> 56 56 #include <linux/namei.h> 57 + #include <linux/security.h> 57 58 58 59 #define IS_NOATIME(inode) ((inode->i_sb->s_flags & MS_NOATIME) || \ 59 60 (S_ISDIR(inode->i_mode) && inode->i_sb->s_flags & MS_NODIRATIME)) 61 + 62 + /* 63 + * Get a XFS inode from a given vnode. 64 + */ 65 + xfs_inode_t * 66 + xfs_vtoi( 67 + struct vnode *vp) 68 + { 69 + bhv_desc_t *bdp; 70 + 71 + bdp = bhv_lookup_range(VN_BHV_HEAD(vp), 72 + VNODE_POSITION_XFS, VNODE_POSITION_XFS); 73 + if (unlikely(bdp == NULL)) 74 + return NULL; 75 + return XFS_BHVTOI(bdp); 76 + } 77 + 78 + /* 79 + * Bring the atime in the XFS inode uptodate. 80 + * Used before logging the inode to disk or when the Linux inode goes away. 81 + */ 82 + void 83 + xfs_synchronize_atime( 84 + xfs_inode_t *ip) 85 + { 86 + vnode_t *vp; 87 + 88 + vp = XFS_ITOV_NULL(ip); 89 + if (vp) { 90 + struct inode *inode = &vp->v_inode; 91 + ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; 92 + ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; 93 + } 94 + } 60 95 61 96 /* 62 97 * Change the requested timestamp in the given inode. ··· 111 76 { 112 77 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); 113 78 timespec_t tv; 114 - 115 - /* 116 - * We're not supposed to change timestamps in readonly-mounted 117 - * filesystems. Throw it away if anyone asks us. 118 - */ 119 - if (unlikely(IS_RDONLY(inode))) 120 - return; 121 - 122 - /* 123 - * Don't update access timestamps on reads if mounted "noatime". 124 - * Throw it away if anyone asks us. 125 - */ 126 - if (unlikely( 127 - (ip->i_mount->m_flags & XFS_MOUNT_NOATIME || IS_NOATIME(inode)) && 128 - (flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) == 129 - XFS_ICHGTIME_ACC)) 130 - return; 131 79 132 80 nanotime(&tv); 133 81 if (flags & XFS_ICHGTIME_MOD) { ··· 148 130 * Variant on the above which avoids querying the system clock 149 131 * in situations where we know the Linux inode timestamps have 150 132 * just been updated (and so we can update our inode cheaply). 151 - * We also skip the readonly and noatime checks here, they are 152 - * also catered for already. 153 133 */ 154 134 void 155 135 xfs_ichgtime_fast( ··· 158 142 timespec_t *tvp; 159 143 160 144 /* 145 + * Atime updates for read() & friends are handled lazily now, and 146 + * explicit updates must go through xfs_ichgtime() 147 + */ 148 + ASSERT((flags & XFS_ICHGTIME_ACC) == 0); 149 + 150 + /* 161 151 * We're not supposed to change timestamps in readonly-mounted 162 152 * filesystems. Throw it away if anyone asks us. 163 153 */ 164 154 if (unlikely(IS_RDONLY(inode))) 165 155 return; 166 156 167 - /* 168 - * Don't update access timestamps on reads if mounted "noatime". 169 - * Throw it away if anyone asks us. 170 - */ 171 - if (unlikely( 172 - (ip->i_mount->m_flags & XFS_MOUNT_NOATIME || IS_NOATIME(inode)) && 173 - ((flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) == 174 - XFS_ICHGTIME_ACC))) 175 - return; 176 - 177 157 if (flags & XFS_ICHGTIME_MOD) { 178 158 tvp = &inode->i_mtime; 179 159 ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec; 180 160 ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec; 181 - } 182 - if (flags & XFS_ICHGTIME_ACC) { 183 - tvp = &inode->i_atime; 184 - ip->i_d.di_atime.t_sec = (__int32_t)tvp->tv_sec; 185 - ip->i_d.di_atime.t_nsec = (__int32_t)tvp->tv_nsec; 186 161 } 187 162 if (flags & XFS_ICHGTIME_CHG) { 188 163 tvp = &inode->i_ctime; ··· 218 211 if (i_size_read(ip) != va.va_size) 219 212 i_size_write(ip, va.va_size); 220 213 } 214 + } 215 + 216 + /* 217 + * Hook in SELinux. This is not quite correct yet, what we really need 218 + * here (as we do for default ACLs) is a mechanism by which creation of 219 + * these attrs can be journalled at inode creation time (along with the 220 + * inode, of course, such that log replay can't cause these to be lost). 221 + */ 222 + STATIC int 223 + linvfs_init_security( 224 + struct vnode *vp, 225 + struct inode *dir) 226 + { 227 + struct inode *ip = LINVFS_GET_IP(vp); 228 + size_t length; 229 + void *value; 230 + char *name; 231 + int error; 232 + 233 + error = security_inode_init_security(ip, dir, &name, &value, &length); 234 + if (error) { 235 + if (error == -EOPNOTSUPP) 236 + return 0; 237 + return -error; 238 + } 239 + 240 + VOP_ATTR_SET(vp, name, value, length, ATTR_SECURE, NULL, error); 241 + if (!error) 242 + VMODIFY(vp); 243 + 244 + kfree(name); 245 + kfree(value); 246 + return error; 221 247 } 222 248 223 249 /* ··· 318 278 break; 319 279 } 320 280 281 + if (!error) 282 + error = linvfs_init_security(vp, dir); 283 + 321 284 if (default_acl) { 322 285 if (!error) { 323 286 error = _ACL_INHERIT(vp, &va, default_acl); ··· 337 294 teardown.d_inode = ip = LINVFS_GET_IP(vp); 338 295 teardown.d_name = dentry->d_name; 339 296 340 - vn_mark_bad(vp); 341 - 342 297 if (S_ISDIR(mode)) 343 298 VOP_RMDIR(dvp, &teardown, NULL, err2); 344 299 else ··· 547 506 ASSERT(dentry); 548 507 ASSERT(nd); 549 508 550 - link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL); 509 + link = (char *)kmalloc(MAXPATHLEN+1, GFP_KERNEL); 551 510 if (!link) { 552 511 nd_set_link(nd, ERR_PTR(-ENOMEM)); 553 512 return NULL; ··· 563 522 vp = LINVFS_GET_VP(dentry->d_inode); 564 523 565 524 iov.iov_base = link; 566 - iov.iov_len = MAXNAMELEN; 525 + iov.iov_len = MAXPATHLEN; 567 526 568 527 uio->uio_iov = &iov; 569 528 uio->uio_offset = 0; 570 529 uio->uio_segflg = UIO_SYSSPACE; 571 - uio->uio_resid = MAXNAMELEN; 530 + uio->uio_resid = MAXPATHLEN; 572 531 uio->uio_iovcnt = 1; 573 532 574 533 VOP_READLINK(vp, uio, 0, NULL, error); ··· 576 535 kfree(link); 577 536 link = ERR_PTR(-error); 578 537 } else { 579 - link[MAXNAMELEN - uio->uio_resid] = '\0'; 538 + link[MAXPATHLEN - uio->uio_resid] = '\0'; 580 539 } 581 540 kfree(uio); 582 541
-5
fs/xfs/linux-2.6/xfs_iops.h
··· 26 26 extern struct file_operations linvfs_invis_file_operations; 27 27 extern struct file_operations linvfs_dir_operations; 28 28 29 - extern struct address_space_operations linvfs_aops; 30 - 31 - extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int); 32 - extern void linvfs_unwritten_done(struct buffer_head *, int); 33 - 34 29 extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *, 35 30 int, unsigned int, void __user *); 36 31
+1 -5
fs/xfs/linux-2.6/xfs_linux.h
··· 110 110 * delalloc and these ondisk-uninitialised buffers. 111 111 */ 112 112 BUFFER_FNS(PrivateStart, unwritten); 113 - static inline void set_buffer_unwritten_io(struct buffer_head *bh) 114 - { 115 - bh->b_end_io = linvfs_unwritten_done; 116 - } 117 113 118 114 #define restricted_chown xfs_params.restrict_chown.val 119 115 #define irix_sgid_inherit xfs_params.sgid_inherit.val ··· 228 232 #define xfs_itruncate_data(ip, off) \ 229 233 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) 230 234 #define xfs_statvfs_fsid(statp, mp) \ 231 - ({ u64 id = huge_encode_dev((mp)->m_dev); \ 235 + ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \ 232 236 __kernel_fsid_t *fsid = &(statp)->f_fsid; \ 233 237 (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); }) 234 238
+19 -37
fs/xfs/linux-2.6/xfs_lrw.c
··· 233 233 xfs_buftarg_t *target = 234 234 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 235 235 mp->m_rtdev_targp : mp->m_ddev_targp; 236 - if ((*offset & target->pbr_smask) || 237 - (size & target->pbr_smask)) { 236 + if ((*offset & target->bt_smask) || 237 + (size & target->bt_smask)) { 238 238 if (*offset == ip->i_d.di_size) { 239 239 return (0); 240 240 } ··· 280 280 XFS_STATS_ADD(xs_read_bytes, ret); 281 281 282 282 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 283 - 284 - if (likely(!(ioflags & IO_INVIS))) 285 - xfs_ichgtime_fast(ip, inode, XFS_ICHGTIME_ACC); 286 283 287 284 unlock_isem: 288 285 if (unlikely(ioflags & IO_ISDIRECT)) ··· 343 346 if (ret > 0) 344 347 XFS_STATS_ADD(xs_read_bytes, ret); 345 348 346 - if (likely(!(ioflags & IO_INVIS))) 347 - xfs_ichgtime_fast(ip, LINVFS_GET_IP(vp), XFS_ICHGTIME_ACC); 348 - 349 349 return ret; 350 350 } 351 351 ··· 356 362 xfs_zero_last_block( 357 363 struct inode *ip, 358 364 xfs_iocore_t *io, 359 - xfs_off_t offset, 360 365 xfs_fsize_t isize, 361 366 xfs_fsize_t end_size) 362 367 { ··· 364 371 int nimaps; 365 372 int zero_offset; 366 373 int zero_len; 367 - int isize_fsb_offset; 368 374 int error = 0; 369 375 xfs_bmbt_irec_t imap; 370 376 loff_t loff; 371 - size_t lsize; 372 377 373 378 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); 374 - ASSERT(offset > isize); 375 379 376 380 mp = io->io_mount; 377 381 378 - isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize); 379 - if (isize_fsb_offset == 0) { 382 + zero_offset = XFS_B_FSB_OFFSET(mp, isize); 383 + if (zero_offset == 0) { 380 384 /* 381 385 * There are no extra bytes in the last block on disk to 382 386 * zero, so return. ··· 403 413 */ 404 414 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); 405 415 loff = XFS_FSB_TO_B(mp, last_fsb); 406 - lsize = XFS_FSB_TO_B(mp, 1); 407 416 408 - zero_offset = isize_fsb_offset; 409 - zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset; 417 + zero_len = mp->m_sb.sb_blocksize - zero_offset; 410 418 411 419 error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size); 412 420 ··· 435 447 struct inode *ip = LINVFS_GET_IP(vp); 436 448 xfs_fileoff_t start_zero_fsb; 437 449 xfs_fileoff_t end_zero_fsb; 438 - xfs_fileoff_t prev_zero_fsb; 439 450 xfs_fileoff_t zero_count_fsb; 440 451 xfs_fileoff_t last_fsb; 441 452 xfs_extlen_t buf_len_fsb; 442 - xfs_extlen_t prev_zero_count; 443 453 xfs_mount_t *mp; 444 454 int nimaps; 445 455 int error = 0; 446 456 xfs_bmbt_irec_t imap; 447 - loff_t loff; 448 - size_t lsize; 449 457 450 458 ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); 451 459 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); 460 + ASSERT(offset > isize); 452 461 453 462 mp = io->io_mount; 454 463 ··· 453 468 * First handle zeroing the block on which isize resides. 454 469 * We only zero a part of that block so it is handled specially. 455 470 */ 456 - error = xfs_zero_last_block(ip, io, offset, isize, end_size); 471 + error = xfs_zero_last_block(ip, io, isize, end_size); 457 472 if (error) { 458 473 ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); 459 474 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); ··· 481 496 } 482 497 483 498 ASSERT(start_zero_fsb <= end_zero_fsb); 484 - prev_zero_fsb = NULLFILEOFF; 485 - prev_zero_count = 0; 486 499 while (start_zero_fsb <= end_zero_fsb) { 487 500 nimaps = 1; 488 501 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; ··· 502 519 * that sits on a hole and sets the page as P_HOLE 503 520 * and calls remapf if it is a mapped file. 504 521 */ 505 - prev_zero_fsb = NULLFILEOFF; 506 - prev_zero_count = 0; 507 - start_zero_fsb = imap.br_startoff + 508 - imap.br_blockcount; 522 + start_zero_fsb = imap.br_startoff + imap.br_blockcount; 509 523 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 510 524 continue; 511 525 } ··· 523 543 */ 524 544 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 525 545 526 - loff = XFS_FSB_TO_B(mp, start_zero_fsb); 527 - lsize = XFS_FSB_TO_B(mp, buf_len_fsb); 528 - 529 - error = xfs_iozero(ip, loff, lsize, end_size); 546 + error = xfs_iozero(ip, 547 + XFS_FSB_TO_B(mp, start_zero_fsb), 548 + XFS_FSB_TO_B(mp, buf_len_fsb), 549 + end_size); 530 550 531 551 if (error) { 532 552 goto out_lock; 533 553 } 534 554 535 - prev_zero_fsb = start_zero_fsb; 536 - prev_zero_count = buf_len_fsb; 537 555 start_zero_fsb = imap.br_startoff + buf_len_fsb; 538 556 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 539 557 ··· 618 640 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 619 641 mp->m_rtdev_targp : mp->m_ddev_targp; 620 642 621 - if ((pos & target->pbr_smask) || (count & target->pbr_smask)) 643 + if ((pos & target->bt_smask) || (count & target->bt_smask)) 622 644 return XFS_ERROR(-EINVAL); 623 645 624 646 if (!VN_CACHED(vp) && pos < i_size_read(inode)) ··· 809 831 goto retry; 810 832 } 811 833 834 + isize = i_size_read(inode); 835 + if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize)) 836 + *offset = isize; 837 + 812 838 if (*offset > xip->i_d.di_size) { 813 839 xfs_ilock(xip, XFS_ILOCK_EXCL); 814 840 if (*offset > xip->i_d.di_size) { ··· 938 956 939 957 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); 940 958 if (!XFS_FORCED_SHUTDOWN(mp)) { 941 - pagebuf_iorequest(bp); 959 + xfs_buf_iorequest(bp); 942 960 return 0; 943 961 } else { 944 962 xfs_buftrace("XFS__BDSTRAT IOERROR", bp); ··· 991 1009 * if (XFS_BUF_IS_GRIO(bp)) { 992 1010 */ 993 1011 994 - pagebuf_iorequest(bp); 1012 + xfs_buf_iorequest(bp); 995 1013 return 0; 996 1014 } 997 1015
+1 -1
fs/xfs/linux-2.6/xfs_stats.c
··· 34 34 __uint64_t xs_write_bytes = 0; 35 35 __uint64_t xs_read_bytes = 0; 36 36 37 - static struct xstats_entry { 37 + static const struct xstats_entry { 38 38 char *desc; 39 39 int endpoint; 40 40 } xstats[] = {
+9 -9
fs/xfs/linux-2.6/xfs_stats.h
··· 109 109 __uint32_t vn_remove; /* # times vn_remove called */ 110 110 __uint32_t vn_free; /* # times vn_free called */ 111 111 #define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9) 112 - __uint32_t pb_get; 113 - __uint32_t pb_create; 114 - __uint32_t pb_get_locked; 115 - __uint32_t pb_get_locked_waited; 116 - __uint32_t pb_busy_locked; 117 - __uint32_t pb_miss_locked; 118 - __uint32_t pb_page_retries; 119 - __uint32_t pb_page_found; 120 - __uint32_t pb_get_read; 112 + __uint32_t xb_get; 113 + __uint32_t xb_create; 114 + __uint32_t xb_get_locked; 115 + __uint32_t xb_get_locked_waited; 116 + __uint32_t xb_busy_locked; 117 + __uint32_t xb_miss_locked; 118 + __uint32_t xb_page_retries; 119 + __uint32_t xb_page_found; 120 + __uint32_t xb_get_read; 121 121 /* Extra precision counters */ 122 122 __uint64_t xs_xstrat_bytes; 123 123 __uint64_t xs_write_bytes;
+11 -8
fs/xfs/linux-2.6/xfs_super.c
··· 306 306 xfs_fs_cmn_err(CE_NOTE, mp, 307 307 "Disabling barriers, not supported with external log device"); 308 308 mp->m_flags &= ~XFS_MOUNT_BARRIER; 309 + return; 309 310 } 310 311 311 - if (mp->m_ddev_targp->pbr_bdev->bd_disk->queue->ordered == 312 + if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered == 312 313 QUEUE_ORDERED_NONE) { 313 314 xfs_fs_cmn_err(CE_NOTE, mp, 314 315 "Disabling barriers, not supported by the underlying device"); 315 316 mp->m_flags &= ~XFS_MOUNT_BARRIER; 317 + return; 316 318 } 317 319 318 320 error = xfs_barrier_test(mp); ··· 322 320 xfs_fs_cmn_err(CE_NOTE, mp, 323 321 "Disabling barriers, trial barrier write failed"); 324 322 mp->m_flags &= ~XFS_MOUNT_BARRIER; 323 + return; 325 324 } 326 325 } 327 326 ··· 330 327 xfs_blkdev_issue_flush( 331 328 xfs_buftarg_t *buftarg) 332 329 { 333 - blkdev_issue_flush(buftarg->pbr_bdev, NULL); 330 + blkdev_issue_flush(buftarg->bt_bdev, NULL); 334 331 } 335 332 336 333 STATIC struct inode * ··· 579 576 timeleft = schedule_timeout_interruptible(timeleft); 580 577 /* swsusp */ 581 578 try_to_freeze(); 582 - if (kthread_should_stop()) 579 + if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list)) 583 580 break; 584 581 585 582 spin_lock(&vfsp->vfs_sync_lock); ··· 969 966 if (error < 0) 970 967 goto undo_zones; 971 968 972 - error = pagebuf_init(); 969 + error = xfs_buf_init(); 973 970 if (error < 0) 974 - goto undo_pagebuf; 971 + goto undo_buffers; 975 972 976 973 vn_init(); 977 974 xfs_init(); ··· 985 982 return 0; 986 983 987 984 undo_register: 988 - pagebuf_terminate(); 985 + xfs_buf_terminate(); 989 986 990 - undo_pagebuf: 987 + undo_buffers: 991 988 linvfs_destroy_zones(); 992 989 993 990 undo_zones: ··· 1001 998 XFS_DM_EXIT(&xfs_fs_type); 1002 999 unregister_filesystem(&xfs_fs_type); 1003 1000 xfs_cleanup(); 1004 - pagebuf_terminate(); 1001 + xfs_buf_terminate(); 1005 1002 linvfs_destroy_zones(); 1006 1003 ktrace_uninit(); 1007 1004 }
-1
fs/xfs/linux-2.6/xfs_vnode.c
··· 106 106 inode->i_blocks = vap->va_nblocks; 107 107 inode->i_mtime = vap->va_mtime; 108 108 inode->i_ctime = vap->va_ctime; 109 - inode->i_atime = vap->va_atime; 110 109 inode->i_blksize = vap->va_blocksize; 111 110 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE) 112 111 inode->i_flags |= S_IMMUTABLE;
+19
fs/xfs/linux-2.6/xfs_vnode.h
··· 566 566 } 567 567 568 568 /* 569 + * Extracting atime values in various formats 570 + */ 571 + static inline void vn_atime_to_bstime(struct vnode *vp, xfs_bstime_t *bs_atime) 572 + { 573 + bs_atime->tv_sec = vp->v_inode.i_atime.tv_sec; 574 + bs_atime->tv_nsec = vp->v_inode.i_atime.tv_nsec; 575 + } 576 + 577 + static inline void vn_atime_to_timespec(struct vnode *vp, struct timespec *ts) 578 + { 579 + *ts = vp->v_inode.i_atime; 580 + } 581 + 582 + static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt) 583 + { 584 + *tt = vp->v_inode.i_atime.tv_sec; 585 + } 586 + 587 + /* 569 588 * Some useful predicates. 570 589 */ 571 590 #define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
+2 -2
fs/xfs/quota/xfs_dquot_item.c
··· 239 239 * trying to duplicate our effort. 240 240 */ 241 241 ASSERT(qip->qli_pushbuf_flag != 0); 242 - ASSERT(qip->qli_push_owner == get_thread_id()); 242 + ASSERT(qip->qli_push_owner == current_pid()); 243 243 244 244 /* 245 245 * If flushlock isn't locked anymore, chances are that the ··· 333 333 qip->qli_pushbuf_flag = 1; 334 334 ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno); 335 335 #ifdef DEBUG 336 - qip->qli_push_owner = get_thread_id(); 336 + qip->qli_push_owner = current_pid(); 337 337 #endif 338 338 /* 339 339 * The dquot is left locked.
+11 -7
fs/xfs/quota/xfs_qm.c
··· 1392 1392 { 1393 1393 xfs_trans_t *tp; 1394 1394 int error; 1395 - unsigned long s; 1395 + unsigned long s; 1396 1396 cred_t zerocr; 1397 + xfs_inode_t zeroino; 1397 1398 int committed; 1398 1399 1399 - tp = xfs_trans_alloc(mp,XFS_TRANS_QM_QINOCREATE); 1400 + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); 1400 1401 if ((error = xfs_trans_reserve(tp, 1401 1402 XFS_QM_QINOCREATE_SPACE_RES(mp), 1402 1403 XFS_CREATE_LOG_RES(mp), 0, ··· 1407 1406 return (error); 1408 1407 } 1409 1408 memset(&zerocr, 0, sizeof(zerocr)); 1409 + memset(&zeroino, 0, sizeof(zeroino)); 1410 1410 1411 - if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, S_IFREG, 1, 0, 1411 + if ((error = xfs_dir_ialloc(&tp, &zeroino, S_IFREG, 1, 0, 1412 1412 &zerocr, 0, 1, ip, &committed))) { 1413 1413 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 1414 1414 XFS_TRANS_ABORT); ··· 1920 1918 * at this point (because we intentionally didn't in dqget_noattach). 1921 1919 */ 1922 1920 if (error) { 1923 - xfs_qm_dqpurge_all(mp, 1924 - XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA| 1925 - XFS_QMOPT_PQUOTA|XFS_QMOPT_QUOTAOFF); 1921 + xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF); 1926 1922 goto error_return; 1927 1923 } 1928 1924 /* ··· 2743 2743 xfs_dqunlock(udqp); 2744 2744 ASSERT(ip->i_udquot == NULL); 2745 2745 ip->i_udquot = udqp; 2746 + ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp)); 2746 2747 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 2747 2748 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 2748 2749 } ··· 2753 2752 xfs_dqunlock(gdqp); 2754 2753 ASSERT(ip->i_gdquot == NULL); 2755 2754 ip->i_gdquot = gdqp; 2756 - ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); 2755 + ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp)); 2756 + ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ? 2757 + ip->i_d.di_gid : ip->i_d.di_projid) == 2758 + be32_to_cpu(gdqp->q_core.d_id)); 2757 2759 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 2758 2760 } 2759 2761 }
+25 -35
fs/xfs/support/debug.c
··· 27 27 /* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */ 28 28 #define XFS_MAX_ERR_LEVEL 7 29 29 #define XFS_ERR_MASK ((1 << 3) - 1) 30 - static char *err_level[XFS_MAX_ERR_LEVEL+1] = 30 + static const char * const err_level[XFS_MAX_ERR_LEVEL+1] = 31 31 {KERN_EMERG, KERN_ALERT, KERN_CRIT, 32 32 KERN_ERR, KERN_WARNING, KERN_NOTICE, 33 33 KERN_INFO, KERN_DEBUG}; 34 - 35 - void 36 - assfail(char *a, char *f, int l) 37 - { 38 - printk("XFS assertion failed: %s, file: %s, line: %d\n", a, f, l); 39 - BUG(); 40 - } 41 - 42 - #if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM)) 43 - 44 - unsigned long 45 - random(void) 46 - { 47 - static unsigned long RandomValue = 1; 48 - /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ 49 - register long rv = RandomValue; 50 - register long lo; 51 - register long hi; 52 - 53 - hi = rv / 127773; 54 - lo = rv % 127773; 55 - rv = 16807 * lo - 2836 * hi; 56 - if( rv <= 0 ) rv += 2147483647; 57 - return( RandomValue = rv ); 58 - } 59 - 60 - int 61 - get_thread_id(void) 62 - { 63 - return current->pid; 64 - } 65 - 66 - #endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */ 67 34 68 35 void 69 36 cmn_err(register int level, char *fmt, ...) ··· 57 90 BUG(); 58 91 } 59 92 60 - 61 93 void 62 94 icmn_err(register int level, char *fmt, va_list ap) 63 95 { ··· 75 109 if (level == CE_PANIC) 76 110 BUG(); 77 111 } 112 + 113 + void 114 + assfail(char *expr, char *file, int line) 115 + { 116 + printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line); 117 + BUG(); 118 + } 119 + 120 + #if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM)) 121 + unsigned long random(void) 122 + { 123 + static unsigned long RandomValue = 1; 124 + /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ 125 + register long rv = RandomValue; 126 + register long lo; 127 + register long hi; 128 + 129 + hi = rv / 127773; 130 + lo = rv % 127773; 131 + rv = 16807 * lo - 2836 * hi; 132 + if (rv <= 0) rv += 2147483647; 133 + return RandomValue = rv; 134 + } 135 + #endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
+14 -15
fs/xfs/support/debug.h
··· 31 31 __attribute__ ((format (printf, 2, 0))); 32 32 extern void cmn_err(int, char *, ...) 33 33 __attribute__ ((format (printf, 2, 3))); 34 + extern void assfail(char *expr, char *f, int l); 35 + 36 + #define prdev(fmt,targ,args...) \ 37 + printk("Device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args) 38 + 39 + #define ASSERT_ALWAYS(expr) \ 40 + (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 41 + 42 + #ifndef DEBUG 43 + # define ASSERT(expr) ((void)0) 44 + #else 45 + # define ASSERT(expr) ASSERT_ALWAYS(expr) 46 + extern unsigned long random(void); 47 + #endif 34 48 35 49 #ifndef STATIC 36 50 # define STATIC static 37 51 #endif 38 - 39 - #ifdef DEBUG 40 - # define ASSERT(EX) ((EX) ? ((void)0) : assfail(#EX, __FILE__, __LINE__)) 41 - #else 42 - # define ASSERT(x) ((void)0) 43 - #endif 44 - 45 - extern void assfail(char *, char *, int); 46 - #ifdef DEBUG 47 - extern unsigned long random(void); 48 - extern int get_thread_id(void); 49 - #endif 50 - 51 - #define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__)) 52 - #define debug_stop_all_cpus(param) /* param is "cpumask_t *" */ 53 52 54 53 #endif /* __XFS_SUPPORT_DEBUG_H__ */
+14 -9
fs/xfs/support/uuid.c
··· 27 27 mutex_init(&uuid_monitor); 28 28 } 29 29 30 + 31 + /* IRIX interpretation of an uuid_t */ 32 + typedef struct { 33 + __be32 uu_timelow; 34 + __be16 uu_timemid; 35 + __be16 uu_timehi; 36 + __be16 uu_clockseq; 37 + __be16 uu_node[3]; 38 + } xfs_uu_t; 39 + 30 40 /* 31 41 * uuid_getnodeuniq - obtain the node unique fields of a UUID. 32 42 * ··· 46 36 void 47 37 uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) 48 38 { 49 - char *uu = (char *)uuid; 39 + xfs_uu_t *uup = (xfs_uu_t *)uuid; 50 40 51 - /* on IRIX, this function assumes big-endian fields within 52 - * the uuid, so we use INT_GET to get the same result on 53 - * little-endian systems 54 - */ 55 - 56 - fsid[0] = (INT_GET(*(u_int16_t*)(uu+8), ARCH_CONVERT) << 16) + 57 - INT_GET(*(u_int16_t*)(uu+4), ARCH_CONVERT); 58 - fsid[1] = INT_GET(*(u_int32_t*)(uu ), ARCH_CONVERT); 41 + fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) | 42 + be16_to_cpu(uup->uu_timemid); 43 + fsid[1] = be16_to_cpu(uup->uu_timelow); 59 44 } 60 45 61 46 void
+19 -3
fs/xfs/xfs_arch.h
··· 40 40 #undef XFS_NATIVE_HOST 41 41 #endif 42 42 43 + #ifdef XFS_NATIVE_HOST 44 + #define cpu_to_be16(val) ((__be16)(val)) 45 + #define cpu_to_be32(val) ((__be32)(val)) 46 + #define cpu_to_be64(val) ((__be64)(val)) 47 + #define be16_to_cpu(val) ((__uint16_t)(val)) 48 + #define be32_to_cpu(val) ((__uint32_t)(val)) 49 + #define be64_to_cpu(val) ((__uint64_t)(val)) 50 + #else 51 + #define cpu_to_be16(val) (__swab16((__uint16_t)(val))) 52 + #define cpu_to_be32(val) (__swab32((__uint32_t)(val))) 53 + #define cpu_to_be64(val) (__swab64((__uint64_t)(val))) 54 + #define be16_to_cpu(val) (__swab16((__be16)(val))) 55 + #define be32_to_cpu(val) (__swab32((__be32)(val))) 56 + #define be64_to_cpu(val) (__swab64((__be64)(val))) 57 + #endif 58 + 43 59 #endif /* __KERNEL__ */ 44 60 45 61 /* do we need conversion? */ ··· 202 186 */ 203 187 204 188 #define XFS_GET_DIR_INO4(di) \ 205 - (((u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3])) 189 + (((__u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3])) 206 190 207 191 #define XFS_PUT_DIR_INO4(from, di) \ 208 192 do { \ ··· 213 197 } while (0) 214 198 215 199 #define XFS_DI_HI(di) \ 216 - (((u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3])) 200 + (((__u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3])) 217 201 #define XFS_DI_LO(di) \ 218 - (((u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7])) 202 + (((__u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7])) 219 203 220 204 #define XFS_GET_DIR_INO8(di) \ 221 205 (((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
+6 -6
fs/xfs/xfs_attr_leaf.c
··· 128 128 return (offset >= minforkoff) ? minforkoff : 0; 129 129 } 130 130 131 - if (unlikely(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) { 131 + if (!(mp->m_flags & XFS_MOUNT_ATTR2)) { 132 132 if (bytes <= XFS_IFORK_ASIZE(dp)) 133 133 return mp->m_attroffset >> 3; 134 134 return 0; ··· 157 157 { 158 158 unsigned long s; 159 159 160 - if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR) && 160 + if ((mp->m_flags & XFS_MOUNT_ATTR2) && 161 161 !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) { 162 162 s = XFS_SB_LOCK(mp); 163 163 if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) { ··· 311 311 */ 312 312 totsize -= size; 313 313 if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname && 314 - !(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) { 314 + (mp->m_flags & XFS_MOUNT_ATTR2)) { 315 315 /* 316 316 * Last attribute now removed, revert to original 317 317 * inode format making all literal area available ··· 330 330 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); 331 331 ASSERT(dp->i_d.di_forkoff); 332 332 ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname || 333 - (mp->m_flags & XFS_MOUNT_COMPAT_ATTR)); 333 + !(mp->m_flags & XFS_MOUNT_ATTR2)); 334 334 dp->i_afp->if_ext_max = 335 335 XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t); 336 336 dp->i_df.if_ext_max = ··· 739 739 + name_loc->namelen 740 740 + INT_GET(name_loc->valuelen, ARCH_CONVERT); 741 741 } 742 - if (!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR) && 742 + if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && 743 743 (bytes == sizeof(struct xfs_attr_sf_hdr))) 744 744 return(-1); 745 745 return(xfs_attr_shortform_bytesfit(dp, bytes)); ··· 778 778 goto out; 779 779 780 780 if (forkoff == -1) { 781 - ASSERT(!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR)); 781 + ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); 782 782 783 783 /* 784 784 * Last attribute was removed, revert to original
+43 -36
fs/xfs/xfs_attr_leaf.h
··· 63 63 * the leaf_entry. The namespaces are independent only because we also look 64 64 * at the namespace bit when we are looking for a matching attribute name. 65 65 * 66 - * We also store a "incomplete" bit in the leaf_entry. It shows that an 66 + * We also store an "incomplete" bit in the leaf_entry. It shows that an 67 67 * attribute is in the middle of being created and should not be shown to 68 68 * the user if we crash during the time that the bit is set. We clear the 69 69 * bit when we have finished setting up the attribute. We do this because ··· 72 72 */ 73 73 #define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */ 74 74 75 + typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */ 76 + __uint16_t base; /* base of free region */ 77 + __uint16_t size; /* length of free region */ 78 + } xfs_attr_leaf_map_t; 79 + 80 + typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */ 81 + xfs_da_blkinfo_t info; /* block type, links, etc. */ 82 + __uint16_t count; /* count of active leaf_entry's */ 83 + __uint16_t usedbytes; /* num bytes of names/values stored */ 84 + __uint16_t firstused; /* first used byte in name area */ 85 + __uint8_t holes; /* != 0 if blk needs compaction */ 86 + __uint8_t pad1; 87 + xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE]; 88 + /* N largest free regions */ 89 + } xfs_attr_leaf_hdr_t; 90 + 91 + typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */ 92 + xfs_dahash_t hashval; /* hash value of name */ 93 + __uint16_t nameidx; /* index into buffer of name/value */ 94 + __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */ 95 + __uint8_t pad2; /* unused pad byte */ 96 + } xfs_attr_leaf_entry_t; 97 + 98 + typedef struct xfs_attr_leaf_name_local { 99 + __uint16_t valuelen; /* number of bytes in value */ 100 + __uint8_t namelen; /* length of name bytes */ 101 + __uint8_t nameval[1]; /* name/value bytes */ 102 + } xfs_attr_leaf_name_local_t; 103 + 104 + typedef struct xfs_attr_leaf_name_remote { 105 + xfs_dablk_t valueblk; /* block number of value bytes */ 106 + __uint32_t valuelen; /* number of bytes in value */ 107 + __uint8_t namelen; /* length of name bytes */ 108 + __uint8_t name[1]; /* name bytes */ 109 + } xfs_attr_leaf_name_remote_t; 110 + 75 111 typedef struct xfs_attr_leafblock { 76 - struct xfs_attr_leaf_hdr { /* constant-structure header block */ 77 - xfs_da_blkinfo_t info; /* block type, links, etc. */ 78 - __uint16_t count; /* count of active leaf_entry's */ 79 - __uint16_t usedbytes; /* num bytes of names/values stored */ 80 - __uint16_t firstused; /* first used byte in name area */ 81 - __uint8_t holes; /* != 0 if blk needs compaction */ 82 - __uint8_t pad1; 83 - struct xfs_attr_leaf_map { /* RLE map of free bytes */ 84 - __uint16_t base; /* base of free region */ 85 - __uint16_t size; /* length of free region */ 86 - } freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */ 87 - } hdr; 88 - struct xfs_attr_leaf_entry { /* sorted on key, not name */ 89 - xfs_dahash_t hashval; /* hash value of name */ 90 - __uint16_t nameidx; /* index into buffer of name/value */ 91 - __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */ 92 - __uint8_t pad2; /* unused pad byte */ 93 - } entries[1]; /* variable sized array */ 94 - struct xfs_attr_leaf_name_local { 95 - __uint16_t valuelen; /* number of bytes in value */ 96 - __uint8_t namelen; /* length of name bytes */ 97 - __uint8_t nameval[1]; /* name/value bytes */ 98 - } namelist; /* grows from bottom of buf */ 99 - struct xfs_attr_leaf_name_remote { 100 - xfs_dablk_t valueblk; /* block number of value bytes */ 101 - __uint32_t valuelen; /* number of bytes in value */ 102 - __uint8_t namelen; /* length of name bytes */ 103 - __uint8_t name[1]; /* name bytes */ 104 - } valuelist; /* grows from bottom of buf */ 112 + xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */ 113 + xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */ 114 + xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */ 115 + xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */ 105 116 } xfs_attr_leafblock_t; 106 - typedef struct xfs_attr_leaf_hdr xfs_attr_leaf_hdr_t; 107 - typedef struct xfs_attr_leaf_map xfs_attr_leaf_map_t; 108 - typedef struct xfs_attr_leaf_entry xfs_attr_leaf_entry_t; 109 - typedef struct xfs_attr_leaf_name_local xfs_attr_leaf_name_local_t; 110 - typedef struct xfs_attr_leaf_name_remote xfs_attr_leaf_name_remote_t; 111 117 112 118 /* 113 119 * Flags used in the leaf_entry[i].flags field. ··· 156 150 (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; 157 151 } 158 152 159 - #define XFS_ATTR_LEAF_NAME(leafp,idx) xfs_attr_leaf_name(leafp,idx) 153 + #define XFS_ATTR_LEAF_NAME(leafp,idx) \ 154 + xfs_attr_leaf_name(leafp,idx) 160 155 static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) 161 156 { 162 157 return (&((char *)
+250 -160
fs/xfs/xfs_bmap.c
··· 2146 2146 return 0; /* keep gcc quite */ 2147 2147 } 2148 2148 2149 + /* 2150 + * Adjust the size of the new extent based on di_extsize and rt extsize. 2151 + */ 2152 + STATIC int 2153 + xfs_bmap_extsize_align( 2154 + xfs_mount_t *mp, 2155 + xfs_bmbt_irec_t *gotp, /* next extent pointer */ 2156 + xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 2157 + xfs_extlen_t extsz, /* align to this extent size */ 2158 + int rt, /* is this a realtime inode? */ 2159 + int eof, /* is extent at end-of-file? */ 2160 + int delay, /* creating delalloc extent? */ 2161 + int convert, /* overwriting unwritten extent? */ 2162 + xfs_fileoff_t *offp, /* in/out: aligned offset */ 2163 + xfs_extlen_t *lenp) /* in/out: aligned length */ 2164 + { 2165 + xfs_fileoff_t orig_off; /* original offset */ 2166 + xfs_extlen_t orig_alen; /* original length */ 2167 + xfs_fileoff_t orig_end; /* original off+len */ 2168 + xfs_fileoff_t nexto; /* next file offset */ 2169 + xfs_fileoff_t prevo; /* previous file offset */ 2170 + xfs_fileoff_t align_off; /* temp for offset */ 2171 + xfs_extlen_t align_alen; /* temp for length */ 2172 + xfs_extlen_t temp; /* temp for calculations */ 2173 + 2174 + if (convert) 2175 + return 0; 2176 + 2177 + orig_off = align_off = *offp; 2178 + orig_alen = align_alen = *lenp; 2179 + orig_end = orig_off + orig_alen; 2180 + 2181 + /* 2182 + * If this request overlaps an existing extent, then don't 2183 + * attempt to perform any additional alignment. 2184 + */ 2185 + if (!delay && !eof && 2186 + (orig_off >= gotp->br_startoff) && 2187 + (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 2188 + return 0; 2189 + } 2190 + 2191 + /* 2192 + * If the file offset is unaligned vs. the extent size 2193 + * we need to align it. This will be possible unless 2194 + * the file was previously written with a kernel that didn't 2195 + * perform this alignment, or if a truncate shot us in the 2196 + * foot. 2197 + */ 2198 + temp = do_mod(orig_off, extsz); 2199 + if (temp) { 2200 + align_alen += temp; 2201 + align_off -= temp; 2202 + } 2203 + /* 2204 + * Same adjustment for the end of the requested area. 2205 + */ 2206 + if ((temp = (align_alen % extsz))) { 2207 + align_alen += extsz - temp; 2208 + } 2209 + /* 2210 + * If the previous block overlaps with this proposed allocation 2211 + * then move the start forward without adjusting the length. 2212 + */ 2213 + if (prevp->br_startoff != NULLFILEOFF) { 2214 + if (prevp->br_startblock == HOLESTARTBLOCK) 2215 + prevo = prevp->br_startoff; 2216 + else 2217 + prevo = prevp->br_startoff + prevp->br_blockcount; 2218 + } else 2219 + prevo = 0; 2220 + if (align_off != orig_off && align_off < prevo) 2221 + align_off = prevo; 2222 + /* 2223 + * If the next block overlaps with this proposed allocation 2224 + * then move the start back without adjusting the length, 2225 + * but not before offset 0. 2226 + * This may of course make the start overlap previous block, 2227 + * and if we hit the offset 0 limit then the next block 2228 + * can still overlap too. 2229 + */ 2230 + if (!eof && gotp->br_startoff != NULLFILEOFF) { 2231 + if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 2232 + (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 2233 + nexto = gotp->br_startoff + gotp->br_blockcount; 2234 + else 2235 + nexto = gotp->br_startoff; 2236 + } else 2237 + nexto = NULLFILEOFF; 2238 + if (!eof && 2239 + align_off + align_alen != orig_end && 2240 + align_off + align_alen > nexto) 2241 + align_off = nexto > align_alen ? nexto - align_alen : 0; 2242 + /* 2243 + * If we're now overlapping the next or previous extent that 2244 + * means we can't fit an extsz piece in this hole. Just move 2245 + * the start forward to the first valid spot and set 2246 + * the length so we hit the end. 2247 + */ 2248 + if (align_off != orig_off && align_off < prevo) 2249 + align_off = prevo; 2250 + if (align_off + align_alen != orig_end && 2251 + align_off + align_alen > nexto && 2252 + nexto != NULLFILEOFF) { 2253 + ASSERT(nexto > prevo); 2254 + align_alen = nexto - align_off; 2255 + } 2256 + 2257 + /* 2258 + * If realtime, and the result isn't a multiple of the realtime 2259 + * extent size we need to remove blocks until it is. 2260 + */ 2261 + if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 2262 + /* 2263 + * We're not covering the original request, or 2264 + * we won't be able to once we fix the length. 2265 + */ 2266 + if (orig_off < align_off || 2267 + orig_end > align_off + align_alen || 2268 + align_alen - temp < orig_alen) 2269 + return XFS_ERROR(EINVAL); 2270 + /* 2271 + * Try to fix it by moving the start up. 2272 + */ 2273 + if (align_off + temp <= orig_off) { 2274 + align_alen -= temp; 2275 + align_off += temp; 2276 + } 2277 + /* 2278 + * Try to fix it by moving the end in. 2279 + */ 2280 + else if (align_off + align_alen - temp >= orig_end) 2281 + align_alen -= temp; 2282 + /* 2283 + * Set the start to the minimum then trim the length. 2284 + */ 2285 + else { 2286 + align_alen -= orig_off - align_off; 2287 + align_off = orig_off; 2288 + align_alen -= align_alen % mp->m_sb.sb_rextsize; 2289 + } 2290 + /* 2291 + * Result doesn't cover the request, fail it. 2292 + */ 2293 + if (orig_off < align_off || orig_end > align_off + align_alen) 2294 + return XFS_ERROR(EINVAL); 2295 + } else { 2296 + ASSERT(orig_off >= align_off); 2297 + ASSERT(orig_end <= align_off + align_alen); 2298 + } 2299 + 2300 + #ifdef DEBUG 2301 + if (!eof && gotp->br_startoff != NULLFILEOFF) 2302 + ASSERT(align_off + align_alen <= gotp->br_startoff); 2303 + if (prevp->br_startoff != NULLFILEOFF) 2304 + ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 2305 + #endif 2306 + 2307 + *lenp = align_alen; 2308 + *offp = align_off; 2309 + return 0; 2310 + } 2311 + 2149 2312 #define XFS_ALLOC_GAP_UNITS 4 2150 2313 2151 2314 /* 2152 2315 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 2153 2316 * It figures out where to ask the underlying allocator to put the new extent. 2154 2317 */ 2155 - STATIC int /* error */ 2318 + STATIC int 2156 2319 xfs_bmap_alloc( 2157 2320 xfs_bmalloca_t *ap) /* bmap alloc argument struct */ 2158 2321 { ··· 2326 2163 xfs_mount_t *mp; /* mount point structure */ 2327 2164 int nullfb; /* true if ap->firstblock isn't set */ 2328 2165 int rt; /* true if inode is realtime */ 2329 - #ifdef __KERNEL__ 2330 - xfs_extlen_t prod=0; /* product factor for allocators */ 2331 - xfs_extlen_t ralen=0; /* realtime allocation length */ 2332 - #endif 2166 + xfs_extlen_t prod = 0; /* product factor for allocators */ 2167 + xfs_extlen_t ralen = 0; /* realtime allocation length */ 2168 + xfs_extlen_t align; /* minimum allocation alignment */ 2169 + xfs_rtblock_t rtx; 2333 2170 2334 2171 #define ISVALID(x,y) \ 2335 2172 (rt ? \ ··· 2345 2182 nullfb = ap->firstblock == NULLFSBLOCK; 2346 2183 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; 2347 2184 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); 2348 - #ifdef __KERNEL__ 2349 2185 if (rt) { 2350 - xfs_extlen_t extsz; /* file extent size for rt */ 2351 - xfs_fileoff_t nexto; /* next file offset */ 2352 - xfs_extlen_t orig_alen; /* original ap->alen */ 2353 - xfs_fileoff_t orig_end; /* original off+len */ 2354 - xfs_fileoff_t orig_off; /* original ap->off */ 2355 - xfs_extlen_t mod_off; /* modulus calculations */ 2356 - xfs_fileoff_t prevo; /* previous file offset */ 2357 - xfs_rtblock_t rtx; /* realtime extent number */ 2358 - xfs_extlen_t temp; /* temp for rt calculations */ 2186 + align = ap->ip->i_d.di_extsize ? 2187 + ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize; 2188 + /* Set prod to match the extent size */ 2189 + prod = align / mp->m_sb.sb_rextsize; 2359 2190 2360 - /* 2361 - * Set prod to match the realtime extent size. 2362 - */ 2363 - if (!(extsz = ap->ip->i_d.di_extsize)) 2364 - extsz = mp->m_sb.sb_rextsize; 2365 - prod = extsz / mp->m_sb.sb_rextsize; 2366 - orig_off = ap->off; 2367 - orig_alen = ap->alen; 2368 - orig_end = orig_off + orig_alen; 2369 - /* 2370 - * If the file offset is unaligned vs. the extent size 2371 - * we need to align it. This will be possible unless 2372 - * the file was previously written with a kernel that didn't 2373 - * perform this alignment. 2374 - */ 2375 - mod_off = do_mod(orig_off, extsz); 2376 - if (mod_off) { 2377 - ap->alen += mod_off; 2378 - ap->off -= mod_off; 2379 - } 2380 - /* 2381 - * Same adjustment for the end of the requested area. 2382 - */ 2383 - if ((temp = (ap->alen % extsz))) 2384 - ap->alen += extsz - temp; 2385 - /* 2386 - * If the previous block overlaps with this proposed allocation 2387 - * then move the start forward without adjusting the length. 2388 - */ 2389 - prevo = 2390 - ap->prevp->br_startoff == NULLFILEOFF ? 2391 - 0 : 2392 - (ap->prevp->br_startoff + 2393 - ap->prevp->br_blockcount); 2394 - if (ap->off != orig_off && ap->off < prevo) 2395 - ap->off = prevo; 2396 - /* 2397 - * If the next block overlaps with this proposed allocation 2398 - * then move the start back without adjusting the length, 2399 - * but not before offset 0. 2400 - * This may of course make the start overlap previous block, 2401 - * and if we hit the offset 0 limit then the next block 2402 - * can still overlap too. 2403 - */ 2404 - nexto = (ap->eof || ap->gotp->br_startoff == NULLFILEOFF) ? 2405 - NULLFILEOFF : ap->gotp->br_startoff; 2406 - if (!ap->eof && 2407 - ap->off + ap->alen != orig_end && 2408 - ap->off + ap->alen > nexto) 2409 - ap->off = nexto > ap->alen ? nexto - ap->alen : 0; 2410 - /* 2411 - * If we're now overlapping the next or previous extent that 2412 - * means we can't fit an extsz piece in this hole. Just move 2413 - * the start forward to the first valid spot and set 2414 - * the length so we hit the end. 2415 - */ 2416 - if ((ap->off != orig_off && ap->off < prevo) || 2417 - (ap->off + ap->alen != orig_end && 2418 - ap->off + ap->alen > nexto)) { 2419 - ap->off = prevo; 2420 - ap->alen = nexto - prevo; 2421 - } 2422 - /* 2423 - * If the result isn't a multiple of rtextents we need to 2424 - * remove blocks until it is. 2425 - */ 2426 - if ((temp = (ap->alen % mp->m_sb.sb_rextsize))) { 2427 - /* 2428 - * We're not covering the original request, or 2429 - * we won't be able to once we fix the length. 2430 - */ 2431 - if (orig_off < ap->off || 2432 - orig_end > ap->off + ap->alen || 2433 - ap->alen - temp < orig_alen) 2434 - return XFS_ERROR(EINVAL); 2435 - /* 2436 - * Try to fix it by moving the start up. 2437 - */ 2438 - if (ap->off + temp <= orig_off) { 2439 - ap->alen -= temp; 2440 - ap->off += temp; 2441 - } 2442 - /* 2443 - * Try to fix it by moving the end in. 2444 - */ 2445 - else if (ap->off + ap->alen - temp >= orig_end) 2446 - ap->alen -= temp; 2447 - /* 2448 - * Set the start to the minimum then trim the length. 2449 - */ 2450 - else { 2451 - ap->alen -= orig_off - ap->off; 2452 - ap->off = orig_off; 2453 - ap->alen -= ap->alen % mp->m_sb.sb_rextsize; 2454 - } 2455 - /* 2456 - * Result doesn't cover the request, fail it. 2457 - */ 2458 - if (orig_off < ap->off || orig_end > ap->off + ap->alen) 2459 - return XFS_ERROR(EINVAL); 2460 - } 2191 + error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, 2192 + align, rt, ap->eof, 0, 2193 + ap->conv, &ap->off, &ap->alen); 2194 + if (error) 2195 + return error; 2196 + ASSERT(ap->alen); 2461 2197 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); 2198 + 2462 2199 /* 2463 2200 * If the offset & length are not perfectly aligned 2464 2201 * then kill prod, it will just get us in trouble. 2465 2202 */ 2466 - if (do_mod(ap->off, extsz) || ap->alen % extsz) 2203 + if (do_mod(ap->off, align) || ap->alen % align) 2467 2204 prod = 1; 2468 2205 /* 2469 2206 * Set ralen to be the actual requested length in rtextents. ··· 2389 2326 ap->rval = rtx * mp->m_sb.sb_rextsize; 2390 2327 } else 2391 2328 ap->rval = 0; 2329 + } else { 2330 + align = (ap->userdata && ap->ip->i_d.di_extsize && 2331 + (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ? 2332 + ap->ip->i_d.di_extsize : 0; 2333 + if (unlikely(align)) { 2334 + error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, 2335 + align, rt, 2336 + ap->eof, 0, ap->conv, 2337 + &ap->off, &ap->alen); 2338 + ASSERT(!error); 2339 + ASSERT(ap->alen); 2340 + } 2341 + if (nullfb) 2342 + ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 2343 + else 2344 + ap->rval = ap->firstblock; 2392 2345 } 2393 - #else 2394 - if (rt) 2395 - ap->rval = 0; 2396 - #endif /* __KERNEL__ */ 2397 - else if (nullfb) 2398 - ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 2399 - else 2400 - ap->rval = ap->firstblock; 2346 + 2401 2347 /* 2402 2348 * If allocating at eof, and there's a previous real block, 2403 2349 * try to use it's last block as our starting point. ··· 2670 2598 args.total = ap->total; 2671 2599 args.minlen = ap->minlen; 2672 2600 } 2673 - if (ap->ip->i_d.di_extsize) { 2601 + if (unlikely(ap->userdata && ap->ip->i_d.di_extsize && 2602 + (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) { 2674 2603 args.prod = ap->ip->i_d.di_extsize; 2675 2604 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) 2676 2605 args.mod = (xfs_extlen_t)(args.prod - args.mod); 2677 - } else if (mp->m_sb.sb_blocksize >= NBPP) { 2606 + } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) { 2678 2607 args.prod = 1; 2679 2608 args.mod = 0; 2680 2609 } else { ··· 3653 3580 3654 3581 ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp, 3655 3582 lastxp, gotp, prevp); 3656 - rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME; 3657 - if(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM)) { 3583 + rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 3584 + if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) { 3658 3585 cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld " 3659 3586 "start_block : %llx start_off : %llx blkcnt : %llx " 3660 3587 "extent-state : %x \n", 3661 - (ip->i_mount)->m_fsname,(long long)ip->i_ino, 3662 - gotp->br_startblock, gotp->br_startoff, 3663 - gotp->br_blockcount,gotp->br_state); 3588 + (ip->i_mount)->m_fsname, (long long)ip->i_ino, 3589 + (unsigned long long)gotp->br_startblock, 3590 + (unsigned long long)gotp->br_startoff, 3591 + (unsigned long long)gotp->br_blockcount, 3592 + gotp->br_state); 3664 3593 } 3665 3594 return ep; 3666 3595 } ··· 3950 3875 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 3951 3876 if (!ip->i_d.di_forkoff) 3952 3877 ip->i_d.di_forkoff = mp->m_attroffset >> 3; 3953 - else if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) 3878 + else if (mp->m_flags & XFS_MOUNT_ATTR2) 3954 3879 version = 2; 3955 3880 break; 3956 3881 default: ··· 4098 4023 */ 4099 4024 if (whichfork == XFS_DATA_FORK) { 4100 4025 maxleafents = MAXEXTNUM; 4101 - sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ? 4102 - mp->m_attroffset : XFS_BMDR_SPACE_CALC(MINDBTPTRS); 4026 + sz = (mp->m_flags & XFS_MOUNT_ATTR2) ? 4027 + XFS_BMDR_SPACE_CALC(MINDBTPTRS) : mp->m_attroffset; 4103 4028 } else { 4104 4029 maxleafents = MAXAEXTNUM; 4105 - sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ? 4106 - mp->m_sb.sb_inodesize - mp->m_attroffset : 4107 - XFS_BMDR_SPACE_CALC(MINABTPTRS); 4030 + sz = (mp->m_flags & XFS_MOUNT_ATTR2) ? 4031 + XFS_BMDR_SPACE_CALC(MINABTPTRS) : 4032 + mp->m_sb.sb_inodesize - mp->m_attroffset; 4108 4033 } 4109 4034 maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0); 4110 4035 minleafrecs = mp->m_bmap_dmnr[0]; ··· 4493 4418 num_recs = be16_to_cpu(block->bb_numrecs); 4494 4419 if (unlikely(i + num_recs > room)) { 4495 4420 ASSERT(i + num_recs <= room); 4496 - xfs_fs_cmn_err(CE_WARN, ip->i_mount, 4497 - "corrupt dinode %Lu, (btree extents). Unmount and run xfs_repair.", 4421 + xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 4422 + "corrupt dinode %Lu, (btree extents).", 4498 4423 (unsigned long long) ip->i_ino); 4499 4424 XFS_ERROR_REPORT("xfs_bmap_read_extents(1)", 4500 4425 XFS_ERRLEVEL_LOW, ··· 4665 4590 char contig; /* allocation must be one extent */ 4666 4591 char delay; /* this request is for delayed alloc */ 4667 4592 char exact; /* don't do all of wasdelayed extent */ 4593 + char convert; /* unwritten extent I/O completion */ 4668 4594 xfs_bmbt_rec_t *ep; /* extent list entry pointer */ 4669 4595 int error; /* error return */ 4670 4596 xfs_bmbt_irec_t got; /* current extent list record */ ··· 4719 4643 } 4720 4644 if (XFS_FORCED_SHUTDOWN(mp)) 4721 4645 return XFS_ERROR(EIO); 4722 - rt = XFS_IS_REALTIME_INODE(ip); 4646 + rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4723 4647 ifp = XFS_IFORK_PTR(ip, whichfork); 4724 4648 ASSERT(ifp->if_ext_max == 4725 4649 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); ··· 4730 4654 delay = (flags & XFS_BMAPI_DELAY) != 0; 4731 4655 trim = (flags & XFS_BMAPI_ENTIRE) == 0; 4732 4656 userdata = (flags & XFS_BMAPI_METADATA) == 0; 4657 + convert = (flags & XFS_BMAPI_CONVERT) != 0; 4733 4658 exact = (flags & XFS_BMAPI_EXACT) != 0; 4734 4659 rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; 4735 4660 contig = (flags & XFS_BMAPI_CONTIG) != 0; ··· 4825 4748 } 4826 4749 minlen = contig ? alen : 1; 4827 4750 if (delay) { 4828 - xfs_extlen_t extsz = 0; 4751 + xfs_extlen_t extsz; 4829 4752 4830 4753 /* Figure out the extent size, adjust alen */ 4831 4754 if (rt) { 4832 4755 if (!(extsz = ip->i_d.di_extsize)) 4833 4756 extsz = mp->m_sb.sb_rextsize; 4834 - alen = roundup(alen, extsz); 4835 - extsz = alen / mp->m_sb.sb_rextsize; 4757 + } else { 4758 + extsz = ip->i_d.di_extsize; 4836 4759 } 4760 + if (extsz) { 4761 + error = xfs_bmap_extsize_align(mp, 4762 + &got, &prev, extsz, 4763 + rt, eof, delay, convert, 4764 + &aoff, &alen); 4765 + ASSERT(!error); 4766 + } 4767 + 4768 + if (rt) 4769 + extsz = alen / mp->m_sb.sb_rextsize; 4837 4770 4838 4771 /* 4839 4772 * Make a transaction-less quota reservation for ··· 4872 4785 xfs_bmap_worst_indlen(ip, alen); 4873 4786 ASSERT(indlen > 0); 4874 4787 4875 - if (rt) 4788 + if (rt) { 4876 4789 error = xfs_mod_incore_sb(mp, 4877 4790 XFS_SBS_FREXTENTS, 4878 4791 -(extsz), rsvd); 4879 - else 4792 + } else { 4880 4793 error = xfs_mod_incore_sb(mp, 4881 4794 XFS_SBS_FDBLOCKS, 4882 4795 -(alen), rsvd); 4796 + } 4883 4797 if (!error) { 4884 4798 error = xfs_mod_incore_sb(mp, 4885 4799 XFS_SBS_FDBLOCKS, 4886 4800 -(indlen), rsvd); 4887 - if (error && rt) { 4888 - xfs_mod_incore_sb(ip->i_mount, 4801 + if (error && rt) 4802 + xfs_mod_incore_sb(mp, 4889 4803 XFS_SBS_FREXTENTS, 4890 4804 extsz, rsvd); 4891 - } else if (error) { 4892 - xfs_mod_incore_sb(ip->i_mount, 4805 + else if (error) 4806 + xfs_mod_incore_sb(mp, 4893 4807 XFS_SBS_FDBLOCKS, 4894 4808 alen, rsvd); 4895 - } 4896 4809 } 4897 4810 4898 4811 if (error) { 4899 - if (XFS_IS_QUOTA_ON(ip->i_mount)) 4812 + if (XFS_IS_QUOTA_ON(mp)) 4900 4813 /* unreserve the blocks now */ 4814 + (void) 4901 4815 XFS_TRANS_UNRESERVE_QUOTA_NBLKS( 4902 4816 mp, NULL, ip, 4903 4817 (long)alen, 0, rt ? ··· 4937 4849 bma.firstblock = *firstblock; 4938 4850 bma.alen = alen; 4939 4851 bma.off = aoff; 4852 + bma.conv = convert; 4940 4853 bma.wasdel = wasdelay; 4941 4854 bma.minlen = minlen; 4942 4855 bma.low = flist->xbf_low; ··· 5359 5270 return 0; 5360 5271 } 5361 5272 XFS_STATS_INC(xs_blk_unmap); 5362 - isrt = (whichfork == XFS_DATA_FORK) && 5363 - (ip->i_d.di_flags & XFS_DIFLAG_REALTIME); 5273 + isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5364 5274 start = bno; 5365 5275 bno = start + len - 1; 5366 5276 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, ··· 5531 5443 } 5532 5444 if (wasdel) { 5533 5445 ASSERT(STARTBLOCKVAL(del.br_startblock) > 0); 5534 - /* Update realtim/data freespace, unreserve quota */ 5446 + /* Update realtime/data freespace, unreserve quota */ 5535 5447 if (isrt) { 5536 5448 xfs_filblks_t rtexts; 5537 5449 ··· 5539 5451 do_div(rtexts, mp->m_sb.sb_rextsize); 5540 5452 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, 5541 5453 (int)rtexts, rsvd); 5542 - XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip, 5543 - -((long)del.br_blockcount), 0, 5454 + (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, 5455 + NULL, ip, -((long)del.br_blockcount), 0, 5544 5456 XFS_QMOPT_RES_RTBLKS); 5545 5457 } else { 5546 5458 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, 5547 5459 (int)del.br_blockcount, rsvd); 5548 - XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip, 5549 - -((long)del.br_blockcount), 0, 5460 + (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, 5461 + NULL, ip, -((long)del.br_blockcount), 0, 5550 5462 XFS_QMOPT_RES_REGBLKS); 5551 5463 } 5552 5464 ip->i_delayed_blks -= del.br_blockcount; ··· 5740 5652 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5741 5653 return XFS_ERROR(EINVAL); 5742 5654 if (whichfork == XFS_DATA_FORK) { 5743 - if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) { 5655 + if ((ip->i_d.di_extsize && (ip->i_d.di_flags & 5656 + (XFS_DIFLAG_REALTIME|XFS_DIFLAG_EXTSIZE))) || 5657 + ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ 5744 5658 prealloced = 1; 5745 5659 fixlen = XFS_MAXIOFFSET(mp); 5746 5660 } else {
+6 -1
fs/xfs/xfs_bmap.h
··· 62 62 #define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */ 63 63 /* combine contig. space */ 64 64 #define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */ 65 + /* XFS_BMAPI_DIRECT_IO 0x800 */ 66 + #define XFS_BMAPI_CONVERT 0x1000 /* unwritten extent conversion - */ 67 + /* need write cache flushing and no */ 68 + /* additional allocation alignments */ 65 69 66 70 #define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w) 67 71 static inline int xfs_bmapi_aflag(int w) ··· 105 101 char wasdel; /* replacing a delayed allocation */ 106 102 char userdata;/* set if is user data */ 107 103 char low; /* low on space, using seq'l ags */ 108 - char aeof; /* allocated space at eof */ 104 + char aeof; /* allocated space at eof */ 105 + char conv; /* overwriting unwritten extents */ 109 106 } xfs_bmalloca_t; 110 107 111 108 #ifdef __KERNEL__
+1 -1
fs/xfs/xfs_clnt.h
··· 57 57 /* 58 58 * XFS mount option flags -- args->flags1 59 59 */ 60 - #define XFSMNT_COMPAT_ATTR 0x00000001 /* do not use ATTR2 format */ 60 + #define XFSMNT_ATTR2 0x00000001 /* allow ATTR2 EA format */ 61 61 #define XFSMNT_WSYNC 0x00000002 /* safe mode nfs mount 62 62 * compatible */ 63 63 #define XFSMNT_INO64 0x00000004 /* move inode numbers up
+4 -12
fs/xfs/xfs_dfrag.c
··· 60 60 xfs_bstat_t *sbp; 61 61 struct file *fp = NULL, *tfp = NULL; 62 62 vnode_t *vp, *tvp; 63 - bhv_desc_t *bdp, *tbdp; 64 - vn_bhv_head_t *bhp, *tbhp; 65 63 static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; 66 64 int ilf_fields, tilf_fields; 67 65 int error = 0; ··· 88 90 goto error0; 89 91 } 90 92 91 - bhp = VN_BHV_HEAD(vp); 92 - bdp = vn_bhv_lookup(bhp, &xfs_vnodeops); 93 - if (bdp == NULL) { 93 + ip = xfs_vtoi(vp); 94 + if (ip == NULL) { 94 95 error = XFS_ERROR(EBADF); 95 96 goto error0; 96 - } else { 97 - ip = XFS_BHVTOI(bdp); 98 97 } 99 98 100 99 if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) || ··· 100 105 goto error0; 101 106 } 102 107 103 - tbhp = VN_BHV_HEAD(tvp); 104 - tbdp = vn_bhv_lookup(tbhp, &xfs_vnodeops); 105 - if (tbdp == NULL) { 108 + tip = xfs_vtoi(tvp); 109 + if (tip == NULL) { 106 110 error = XFS_ERROR(EBADF); 107 111 goto error0; 108 - } else { 109 - tip = XFS_BHVTOI(tbdp); 110 112 } 111 113 112 114 if (ip->i_mount != tip->i_mount) {
+17 -5
fs/xfs/xfs_dinode.h
··· 199 199 200 200 #define XFS_DFORK_DSIZE(dip,mp) \ 201 201 XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp) 202 + #define XFS_DFORK_DSIZE_HOST(dip,mp) \ 203 + XFS_CFORK_DSIZE(&(dip)->di_core, mp) 202 204 #define XFS_DFORK_ASIZE(dip,mp) \ 203 205 XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp) 206 + #define XFS_DFORK_ASIZE_HOST(dip,mp) \ 207 + XFS_CFORK_ASIZE(&(dip)->di_core, mp) 204 208 #define XFS_DFORK_SIZE(dip,mp,w) \ 205 209 XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w) 210 + #define XFS_DFORK_SIZE_HOST(dip,mp,w) \ 211 + XFS_CFORK_SIZE(&(dip)->di_core, mp, w) 206 212 207 213 #define XFS_DFORK_Q(dip) XFS_CFORK_Q_DISK(&(dip)->di_core) 208 214 #define XFS_DFORK_BOFF(dip) XFS_CFORK_BOFF_DISK(&(dip)->di_core) ··· 222 216 #define XFS_CFORK_FMT_SET(dcp,w,n) \ 223 217 ((w) == XFS_DATA_FORK ? \ 224 218 ((dcp)->di_format = (n)) : ((dcp)->di_aformat = (n))) 219 + #define XFS_DFORK_FORMAT(dip,w) XFS_CFORK_FORMAT(&(dip)->di_core, w) 225 220 226 221 #define XFS_CFORK_NEXTENTS_DISK(dcp,w) \ 227 222 ((w) == XFS_DATA_FORK ? \ ··· 230 223 INT_GET((dcp)->di_anextents, ARCH_CONVERT)) 231 224 #define XFS_CFORK_NEXTENTS(dcp,w) \ 232 225 ((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents) 226 + #define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w) 227 + #define XFS_DFORK_NEXTENTS_HOST(dip,w) XFS_CFORK_NEXTENTS(&(dip)->di_core, w) 233 228 234 229 #define XFS_CFORK_NEXT_SET(dcp,w,n) \ 235 230 ((w) == XFS_DATA_FORK ? \ 236 231 ((dcp)->di_nextents = (n)) : ((dcp)->di_anextents = (n))) 237 - 238 - #define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w) 239 232 240 233 #define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp)) 241 234 ··· 253 246 #define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */ 254 247 #define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */ 255 248 #define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */ 256 - #define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */ 257 - #define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */ 249 + #define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */ 250 + #define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */ 251 + #define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */ 252 + #define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */ 258 253 #define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) 259 254 #define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) 260 255 #define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) ··· 268 259 #define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT) 269 260 #define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT) 270 261 #define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT) 262 + #define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT) 263 + #define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT) 271 264 272 265 #define XFS_DIFLAG_ANY \ 273 266 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \ 274 267 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ 275 268 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \ 276 - XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS) 269 + XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \ 270 + XFS_DIFLAG_EXTSZINHERIT) 277 271 278 272 #endif /* __XFS_DINODE_H__ */
+1 -1
fs/xfs/xfs_dir.c
··· 176 176 uint shortcount, leafcount, count; 177 177 178 178 mp->m_dirversion = 1; 179 - if (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) { 179 + if (!(mp->m_flags & XFS_MOUNT_ATTR2)) { 180 180 shortcount = (mp->m_attroffset - 181 181 (uint)sizeof(xfs_dir_sf_hdr_t)) / 182 182 (uint)sizeof(xfs_dir_sf_entry_t);
+2
fs/xfs/xfs_dir.h
··· 135 135 ((mp)->m_dirops.xd_shortform_to_single(args)) 136 136 137 137 #define XFS_DIR_IS_V1(mp) ((mp)->m_dirversion == 1) 138 + #define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2) 138 139 extern xfs_dirops_t xfsv1_dirops; 140 + extern xfs_dirops_t xfsv2_dirops; 139 141 140 142 #endif /* __XFS_DIR_H__ */
-3
fs/xfs/xfs_dir2.h
··· 72 72 struct uio *uio; /* uio control structure */ 73 73 } xfs_dir2_put_args_t; 74 74 75 - #define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2) 76 - extern xfs_dirops_t xfsv2_dirops; 77 - 78 75 /* 79 76 * Other interfaces used by the rest of the dir v2 code. 80 77 */
+34 -30
fs/xfs/xfs_dir_leaf.h
··· 67 67 */ 68 68 #define XFS_DIR_LEAF_MAPSIZE 3 /* how many freespace slots */ 69 69 70 + typedef struct xfs_dir_leaf_map { /* RLE map of free bytes */ 71 + __uint16_t base; /* base of free region */ 72 + __uint16_t size; /* run length of free region */ 73 + } xfs_dir_leaf_map_t; 74 + 75 + typedef struct xfs_dir_leaf_hdr { /* constant-structure header block */ 76 + xfs_da_blkinfo_t info; /* block type, links, etc. */ 77 + __uint16_t count; /* count of active leaf_entry's */ 78 + __uint16_t namebytes; /* num bytes of name strings stored */ 79 + __uint16_t firstused; /* first used byte in name area */ 80 + __uint8_t holes; /* != 0 if blk needs compaction */ 81 + __uint8_t pad1; 82 + xfs_dir_leaf_map_t freemap[XFS_DIR_LEAF_MAPSIZE]; 83 + } xfs_dir_leaf_hdr_t; 84 + 85 + typedef struct xfs_dir_leaf_entry { /* sorted on key, not name */ 86 + xfs_dahash_t hashval; /* hash value of name */ 87 + __uint16_t nameidx; /* index into buffer of name */ 88 + __uint8_t namelen; /* length of name string */ 89 + __uint8_t pad2; 90 + } xfs_dir_leaf_entry_t; 91 + 92 + typedef struct xfs_dir_leaf_name { 93 + xfs_dir_ino_t inumber; /* inode number for this key */ 94 + __uint8_t name[1]; /* name string itself */ 95 + } xfs_dir_leaf_name_t; 96 + 70 97 typedef struct xfs_dir_leafblock { 71 - struct xfs_dir_leaf_hdr { /* constant-structure header block */ 72 - xfs_da_blkinfo_t info; /* block type, links, etc. */ 73 - __uint16_t count; /* count of active leaf_entry's */ 74 - __uint16_t namebytes; /* num bytes of name strings stored */ 75 - __uint16_t firstused; /* first used byte in name area */ 76 - __uint8_t holes; /* != 0 if blk needs compaction */ 77 - __uint8_t pad1; 78 - struct xfs_dir_leaf_map {/* RLE map of free bytes */ 79 - __uint16_t base; /* base of free region */ 80 - __uint16_t size; /* run length of free region */ 81 - } freemap[XFS_DIR_LEAF_MAPSIZE]; /* N largest free regions */ 82 - } hdr; 83 - struct xfs_dir_leaf_entry { /* sorted on key, not name */ 84 - xfs_dahash_t hashval; /* hash value of name */ 85 - __uint16_t nameidx; /* index into buffer of name */ 86 - __uint8_t namelen; /* length of name string */ 87 - __uint8_t pad2; 88 - } entries[1]; /* var sized array */ 89 - struct xfs_dir_leaf_name { 90 - xfs_dir_ino_t inumber; /* inode number for this key */ 91 - __uint8_t name[1]; /* name string itself */ 92 - } namelist[1]; /* grows from bottom of buf */ 98 + xfs_dir_leaf_hdr_t hdr; /* constant-structure header block */ 99 + xfs_dir_leaf_entry_t entries[1]; /* var sized array */ 100 + xfs_dir_leaf_name_t namelist[1]; /* grows from bottom of buf */ 93 101 } xfs_dir_leafblock_t; 94 - typedef struct xfs_dir_leaf_hdr xfs_dir_leaf_hdr_t; 95 - typedef struct xfs_dir_leaf_map xfs_dir_leaf_map_t; 96 - typedef struct xfs_dir_leaf_entry xfs_dir_leaf_entry_t; 97 - typedef struct xfs_dir_leaf_name xfs_dir_leaf_name_t; 98 102 99 103 /* 100 104 * Length of name for which a 512-byte block filesystem ··· 130 126 #define XFS_PUT_COOKIE(c,mp,bno,entry,hash) \ 131 127 ((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash)) 132 128 133 - typedef struct xfs_dir_put_args 134 - { 129 + typedef struct xfs_dir_put_args { 135 130 xfs_dircook_t cook; /* cookie of (next) entry */ 136 131 xfs_intino_t ino; /* inode number */ 137 - struct xfs_dirent *dbp; /* buffer pointer */ 132 + struct xfs_dirent *dbp; /* buffer pointer */ 138 133 char *name; /* directory entry name */ 139 134 int namelen; /* length of name */ 140 135 int done; /* output: set if value was stored */ ··· 141 138 struct uio *uio; /* uio control structure */ 142 139 } xfs_dir_put_args_t; 143 140 144 - #define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) xfs_dir_leaf_entsize_byname(len) 141 + #define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) \ 142 + xfs_dir_leaf_entsize_byname(len) 145 143 static inline int xfs_dir_leaf_entsize_byname(int len) 146 144 { 147 145 return (uint)sizeof(xfs_dir_leaf_name_t)-1 + len;
-1
fs/xfs/xfs_error.c
··· 54 54 if (e != xfs_etrap[i]) 55 55 continue; 56 56 cmn_err(CE_NOTE, "xfs_error_trap: error %d", e); 57 - debug_stop_all_cpus((void *)-1LL); 58 57 BUG(); 59 58 break; 60 59 }
+4 -4
fs/xfs/xfs_error.h
··· 18 18 #ifndef __XFS_ERROR_H__ 19 19 #define __XFS_ERROR_H__ 20 20 21 - #define prdev(fmt,targ,args...) \ 22 - printk("XFS: device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args) 23 - 24 21 #define XFS_ERECOVER 1 /* Failure to recover log */ 25 22 #define XFS_ELOGSTAT 2 /* Failure to stat log in user space */ 26 23 #define XFS_ENOLOGSPACE 3 /* Reservation too large */ ··· 179 182 struct xfs_mount; 180 183 /* PRINTFLIKE4 */ 181 184 extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp, 182 - char *fmt, ...); 185 + char *fmt, ...); 183 186 /* PRINTFLIKE3 */ 184 187 extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...); 188 + 189 + #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \ 190 + xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args) 185 191 186 192 #endif /* __XFS_ERROR_H__ */
+6 -4
fs/xfs/xfs_fs.h
··· 3 3 * All Rights Reserved. 4 4 * 5 5 * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License as 7 - * published by the Free Software Foundation. 6 + * modify it under the terms of the GNU Lesser General Public License 7 + * as published by the Free Software Foundation. 8 8 * 9 9 * This program is distributed in the hope that it would be useful, 10 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 12 + * GNU Lesser General Public License for more details. 13 13 * 14 - * You should have received a copy of the GNU General Public License 14 + * You should have received a copy of the GNU Lesser General Public License 15 15 * along with this program; if not, write the Free Software Foundation, 16 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 17 */ ··· 65 65 #define XFS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */ 66 66 #define XFS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */ 67 67 #define XFS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */ 68 + #define XFS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */ 69 + #define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */ 68 70 #define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ 69 71 70 72 /*
+26
fs/xfs/xfs_fsops.c
··· 540 540 return(0); 541 541 } 542 542 543 + void 544 + xfs_fs_log_dummy(xfs_mount_t *mp) 545 + { 546 + xfs_trans_t *tp; 547 + xfs_inode_t *ip; 548 + 549 + 550 + tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); 551 + atomic_inc(&mp->m_active_trans); 552 + if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) { 553 + xfs_trans_cancel(tp, 0); 554 + return; 555 + } 556 + 557 + ip = mp->m_rootip; 558 + xfs_ilock(ip, XFS_ILOCK_EXCL); 559 + 560 + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 561 + xfs_trans_ihold(tp, ip); 562 + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 563 + xfs_trans_set_sync(tp); 564 + xfs_trans_commit(tp, 0, NULL); 565 + 566 + xfs_iunlock(ip, XFS_ILOCK_EXCL); 567 + } 568 + 543 569 int 544 570 xfs_fs_goingdown( 545 571 xfs_mount_t *mp,
+1
fs/xfs/xfs_fsops.h
··· 25 25 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval, 26 26 xfs_fsop_resblks_t *outval); 27 27 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags); 28 + extern void xfs_fs_log_dummy(xfs_mount_t *mp); 28 29 29 30 #endif /* __XFS_FSOPS_H__ */
+1 -4
fs/xfs/xfs_iget.c
··· 493 493 494 494 retry: 495 495 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) { 496 - bhv_desc_t *bdp; 497 496 xfs_inode_t *ip; 498 497 499 498 vp = LINVFS_GET_VP(inode); ··· 516 517 * to wait for the inode to go away. 517 518 */ 518 519 if (is_bad_inode(inode) || 519 - ((bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), 520 - &xfs_vnodeops)) == NULL)) { 520 + ((ip = xfs_vtoi(vp)) == NULL)) { 521 521 iput(inode); 522 522 delay(1); 523 523 goto retry; 524 524 } 525 525 526 - ip = XFS_BHVTOI(bdp); 527 526 if (lock_flags != 0) 528 527 xfs_ilock(ip, lock_flags); 529 528 XFS_STATS_INC(xs_ig_found);
+38 -23
fs/xfs/xfs_inode.c
··· 404 404 INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) + 405 405 INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) > 406 406 INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) { 407 - xfs_fs_cmn_err(CE_WARN, ip->i_mount, 408 - "corrupt dinode %Lu, extent total = %d, nblocks = %Lu." 409 - " Unmount and run xfs_repair.", 407 + xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 408 + "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 410 409 (unsigned long long)ip->i_ino, 411 410 (int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) 412 411 + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)), ··· 417 418 } 418 419 419 420 if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) { 420 - xfs_fs_cmn_err(CE_WARN, ip->i_mount, 421 - "corrupt dinode %Lu, forkoff = 0x%x." 422 - " Unmount and run xfs_repair.", 421 + xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 422 + "corrupt dinode %Lu, forkoff = 0x%x.", 423 423 (unsigned long long)ip->i_ino, 424 424 (int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT))); 425 425 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, ··· 449 451 * no local regular files yet 450 452 */ 451 453 if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) { 452 - xfs_fs_cmn_err(CE_WARN, ip->i_mount, 453 - "corrupt inode (local format for regular file) %Lu. Unmount and run xfs_repair.", 454 + xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 455 + "corrupt inode %Lu " 456 + "(local format for regular file).", 454 457 (unsigned long long) ip->i_ino); 455 458 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 456 459 XFS_ERRLEVEL_LOW, ··· 461 462 462 463 di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT); 463 464 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 464 - xfs_fs_cmn_err(CE_WARN, ip->i_mount, 465 - "corrupt inode %Lu (bad size %Ld for local inode). Unmount and run xfs_repair.", 465 + xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 466 + "corrupt inode %Lu " 467 + "(bad size %Ld for local inode).", 466 468 (unsigned long long) ip->i_ino, 467 469 (long long) di_size); 468 470 XFS_CORRUPTION_ERROR("xfs_iformat(5)", ··· 551 551 * kmem_alloc() or memcpy() below. 552 552 */ 553 553 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 554 - xfs_fs_cmn_err(CE_WARN, ip->i_mount, 555 - "corrupt inode %Lu (bad size %d for local fork, size = %d). Unmount and run xfs_repair.", 554 + xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 555 + "corrupt inode %Lu " 556 + "(bad size %d for local fork, size = %d).", 556 557 (unsigned long long) ip->i_ino, size, 557 558 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 558 559 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, ··· 611 610 * kmem_alloc() or memcpy() below. 612 611 */ 613 612 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 614 - xfs_fs_cmn_err(CE_WARN, ip->i_mount, 615 - "corrupt inode %Lu ((a)extents = %d). Unmount and run xfs_repair.", 613 + xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 614 + "corrupt inode %Lu ((a)extents = %d).", 616 615 (unsigned long long) ip->i_ino, nex); 617 616 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 618 617 ip->i_mount, dip); ··· 693 692 || XFS_BMDR_SPACE_CALC(nrecs) > 694 693 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 695 694 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 696 - xfs_fs_cmn_err(CE_WARN, ip->i_mount, 697 - "corrupt inode %Lu (btree). Unmount and run xfs_repair.", 695 + xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 696 + "corrupt inode %Lu (btree).", 698 697 (unsigned long long) ip->i_ino); 699 698 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 700 699 ip->i_mount); ··· 810 809 flags |= XFS_XFLAG_PROJINHERIT; 811 810 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 812 811 flags |= XFS_XFLAG_NOSYMLINKS; 812 + if (di_flags & XFS_DIFLAG_EXTSIZE) 813 + flags |= XFS_XFLAG_EXTSIZE; 814 + if (di_flags & XFS_DIFLAG_EXTSZINHERIT) 815 + flags |= XFS_XFLAG_EXTSZINHERIT; 813 816 } 814 817 815 818 return flags; ··· 1197 1192 if ((mode & S_IFMT) == S_IFDIR) { 1198 1193 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1199 1194 di_flags |= XFS_DIFLAG_RTINHERIT; 1200 - } else { 1195 + if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1196 + di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1197 + ip->i_d.di_extsize = pip->i_d.di_extsize; 1198 + } 1199 + } else if ((mode & S_IFMT) == S_IFREG) { 1201 1200 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) { 1202 1201 di_flags |= XFS_DIFLAG_REALTIME; 1203 1202 ip->i_iocore.io_flags |= XFS_IOCORE_RT; 1203 + } 1204 + if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1205 + di_flags |= XFS_DIFLAG_EXTSIZE; 1206 + ip->i_d.di_extsize = pip->i_d.di_extsize; 1204 1207 } 1205 1208 } 1206 1209 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && ··· 1275 1262 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1276 1263 return; 1277 1264 1278 - if ( ip->i_d.di_flags & XFS_DIFLAG_REALTIME ) 1265 + if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE)) 1279 1266 return; 1280 1267 1281 1268 nimaps = 2; ··· 1778 1765 xfs_fsize_t new_size, 1779 1766 cred_t *credp) 1780 1767 { 1781 - xfs_fsize_t isize; 1782 1768 int error; 1783 1769 1784 1770 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); 1785 1771 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); 1786 1772 ASSERT(new_size > ip->i_d.di_size); 1787 1773 1788 - error = 0; 1789 - isize = ip->i_d.di_size; 1790 1774 /* 1791 1775 * Zero any pages that may have been created by 1792 1776 * xfs_write_file() beyond the end of the file 1793 1777 * and any blocks between the old and new file sizes. 1794 1778 */ 1795 - error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, isize, 1796 - new_size); 1779 + error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, 1780 + ip->i_d.di_size, new_size); 1797 1781 return error; 1798 1782 } 1799 1783 ··· 3364 3354 */ 3365 3355 ip->i_update_core = 0; 3366 3356 SYNCHRONIZE(); 3357 + 3358 + /* 3359 + * Make sure to get the latest atime from the Linux inode. 3360 + */ 3361 + xfs_synchronize_atime(ip); 3367 3362 3368 3363 if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC, 3369 3364 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
+4
fs/xfs/xfs_inode.h
··· 436 436 xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); 437 437 void xfs_lock_inodes(xfs_inode_t **, int, int, uint); 438 438 439 + xfs_inode_t *xfs_vtoi(struct vnode *vp); 440 + 441 + void xfs_synchronize_atime(xfs_inode_t *); 442 + 439 443 #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) 440 444 441 445 #ifdef DEBUG
+7 -2
fs/xfs/xfs_inode_item.c
··· 271 271 if (ip->i_update_size) 272 272 ip->i_update_size = 0; 273 273 274 + /* 275 + * Make sure to get the latest atime from the Linux inode. 276 + */ 277 + xfs_synchronize_atime(ip); 278 + 274 279 vecp->i_addr = (xfs_caddr_t)&ip->i_d; 275 280 vecp->i_len = sizeof(xfs_dinode_core_t); 276 281 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE); ··· 608 603 if (iip->ili_pushbuf_flag == 0) { 609 604 iip->ili_pushbuf_flag = 1; 610 605 #ifdef DEBUG 611 - iip->ili_push_owner = get_thread_id(); 606 + iip->ili_push_owner = current_pid(); 612 607 #endif 613 608 /* 614 609 * Inode is left locked in shared mode. ··· 787 782 * trying to duplicate our effort. 788 783 */ 789 784 ASSERT(iip->ili_pushbuf_flag != 0); 790 - ASSERT(iip->ili_push_owner == get_thread_id()); 785 + ASSERT(iip->ili_push_owner == current_pid()); 791 786 792 787 /* 793 788 * If flushlock isn't locked anymore, chances are that the
+236 -193
fs/xfs/xfs_iomap.c
··· 262 262 case BMAPI_WRITE: 263 263 /* If we found an extent, return it */ 264 264 if (nimaps && 265 - (imap.br_startblock != HOLESTARTBLOCK) && 265 + (imap.br_startblock != HOLESTARTBLOCK) && 266 266 (imap.br_startblock != DELAYSTARTBLOCK)) { 267 267 xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io, 268 268 offset, count, iomapp, &imap, flags); ··· 317 317 } 318 318 319 319 STATIC int 320 + xfs_iomap_eof_align_last_fsb( 321 + xfs_mount_t *mp, 322 + xfs_iocore_t *io, 323 + xfs_fsize_t isize, 324 + xfs_extlen_t extsize, 325 + xfs_fileoff_t *last_fsb) 326 + { 327 + xfs_fileoff_t new_last_fsb = 0; 328 + xfs_extlen_t align; 329 + int eof, error; 330 + 331 + if (io->io_flags & XFS_IOCORE_RT) 332 + ; 333 + /* 334 + * If mounted with the "-o swalloc" option, roundup the allocation 335 + * request to a stripe width boundary if the file size is >= 336 + * stripe width and we are allocating past the allocation eof. 337 + */ 338 + else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) && 339 + (isize >= XFS_FSB_TO_B(mp, mp->m_swidth))) 340 + new_last_fsb = roundup_64(*last_fsb, mp->m_swidth); 341 + /* 342 + * Roundup the allocation request to a stripe unit (m_dalign) boundary 343 + * if the file size is >= stripe unit size, and we are allocating past 344 + * the allocation eof. 345 + */ 346 + else if (mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign))) 347 + new_last_fsb = roundup_64(*last_fsb, mp->m_dalign); 348 + 349 + /* 350 + * Always round up the allocation request to an extent boundary 351 + * (when file on a real-time subvolume or has di_extsize hint). 352 + */ 353 + if (extsize) { 354 + if (new_last_fsb) 355 + align = roundup_64(new_last_fsb, extsize); 356 + else 357 + align = extsize; 358 + new_last_fsb = roundup_64(*last_fsb, align); 359 + } 360 + 361 + if (new_last_fsb) { 362 + error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof); 363 + if (error) 364 + return error; 365 + if (eof) 366 + *last_fsb = new_last_fsb; 367 + } 368 + return 0; 369 + } 370 + 371 + STATIC int 320 372 xfs_flush_space( 321 373 xfs_inode_t *ip, 322 374 int *fsynced, ··· 414 362 xfs_iocore_t *io = &ip->i_iocore; 415 363 xfs_fileoff_t offset_fsb; 416 364 xfs_fileoff_t last_fsb; 417 - xfs_filblks_t count_fsb; 365 + xfs_filblks_t count_fsb, resaligned; 418 366 xfs_fsblock_t firstfsb; 367 + xfs_extlen_t extsz, temp; 368 + xfs_fsize_t isize; 419 369 int nimaps; 420 - int error; 421 370 int bmapi_flag; 422 371 int quota_flag; 423 372 int rt; 424 373 xfs_trans_t *tp; 425 374 xfs_bmbt_irec_t imap; 426 375 xfs_bmap_free_t free_list; 427 - xfs_filblks_t qblocks, resblks; 376 + uint qblocks, resblks, resrtextents; 428 377 int committed; 429 - int resrtextents; 378 + int error; 430 379 431 380 /* 432 381 * Make sure that the dquots are there. This doesn't hold ··· 437 384 if (error) 438 385 return XFS_ERROR(error); 439 386 440 - offset_fsb = XFS_B_TO_FSBT(mp, offset); 441 - last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 442 - count_fsb = last_fsb - offset_fsb; 443 - if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) { 444 - xfs_fileoff_t map_last_fsb; 445 - 446 - map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff; 447 - if (map_last_fsb < last_fsb) { 448 - last_fsb = map_last_fsb; 449 - count_fsb = last_fsb - offset_fsb; 450 - } 451 - ASSERT(count_fsb > 0); 452 - } 453 - 454 - /* 455 - * Determine if reserving space on the data or realtime partition. 456 - */ 457 - if ((rt = XFS_IS_REALTIME_INODE(ip))) { 458 - xfs_extlen_t extsz; 459 - 387 + rt = XFS_IS_REALTIME_INODE(ip); 388 + if (unlikely(rt)) { 460 389 if (!(extsz = ip->i_d.di_extsize)) 461 390 extsz = mp->m_sb.sb_rextsize; 462 - resrtextents = qblocks = (count_fsb + extsz - 1); 463 - do_div(resrtextents, mp->m_sb.sb_rextsize); 464 - resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 465 - quota_flag = XFS_QMOPT_RES_RTBLKS; 466 391 } else { 467 - resrtextents = 0; 468 - resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, count_fsb); 469 - quota_flag = XFS_QMOPT_RES_REGBLKS; 392 + extsz = ip->i_d.di_extsize; 470 393 } 394 + 395 + isize = ip->i_d.di_size; 396 + if (io->io_new_size > isize) 397 + isize = io->io_new_size; 398 + 399 + offset_fsb = XFS_B_TO_FSBT(mp, offset); 400 + last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 401 + if ((offset + count) > isize) { 402 + error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz, 403 + &last_fsb); 404 + if (error) 405 + goto error_out; 406 + } else { 407 + if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) 408 + last_fsb = MIN(last_fsb, (xfs_fileoff_t) 409 + ret_imap->br_blockcount + 410 + ret_imap->br_startoff); 411 + } 412 + count_fsb = last_fsb - offset_fsb; 413 + ASSERT(count_fsb > 0); 414 + 415 + resaligned = count_fsb; 416 + if (unlikely(extsz)) { 417 + if ((temp = do_mod(offset_fsb, extsz))) 418 + resaligned += temp; 419 + if ((temp = do_mod(resaligned, extsz))) 420 + resaligned += extsz - temp; 421 + } 422 + 423 + if (unlikely(rt)) { 424 + resrtextents = qblocks = resaligned; 425 + resrtextents /= mp->m_sb.sb_rextsize; 426 + resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 427 + quota_flag = XFS_QMOPT_RES_RTBLKS; 428 + } else { 429 + resrtextents = 0; 430 + resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 431 + quota_flag = XFS_QMOPT_RES_REGBLKS; 432 + } 471 433 472 434 /* 473 435 * Allocate and setup the transaction ··· 493 425 XFS_WRITE_LOG_RES(mp), resrtextents, 494 426 XFS_TRANS_PERM_LOG_RES, 495 427 XFS_WRITE_LOG_COUNT); 496 - 497 428 /* 498 429 * Check for running out of space, note: need lock to return 499 430 */ ··· 502 435 if (error) 503 436 goto error_out; 504 437 505 - if (XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag)) { 506 - error = (EDQUOT); 438 + error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, 439 + qblocks, 0, quota_flag); 440 + if (error) 507 441 goto error1; 508 - } 509 442 510 - bmapi_flag = XFS_BMAPI_WRITE; 511 443 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 512 444 xfs_trans_ihold(tp, ip); 513 445 514 - if (!(flags & BMAPI_MMAP) && (offset < ip->i_d.di_size || rt)) 446 + bmapi_flag = XFS_BMAPI_WRITE; 447 + if ((flags & BMAPI_DIRECT) && (offset < ip->i_d.di_size || extsz)) 515 448 bmapi_flag |= XFS_BMAPI_PREALLOC; 516 449 517 450 /* 518 - * Issue the bmapi() call to allocate the blocks 451 + * Issue the xfs_bmapi() call to allocate the blocks 519 452 */ 520 453 XFS_BMAP_INIT(&free_list, &firstfsb); 521 454 nimaps = 1; ··· 550 483 "extent-state : %x \n", 551 484 (ip->i_mount)->m_fsname, 552 485 (long long)ip->i_ino, 553 - ret_imap->br_startblock, ret_imap->br_startoff, 554 - ret_imap->br_blockcount,ret_imap->br_state); 486 + (unsigned long long)ret_imap->br_startblock, 487 + (unsigned long long)ret_imap->br_startoff, 488 + (unsigned long long)ret_imap->br_blockcount, 489 + ret_imap->br_state); 555 490 } 556 491 return 0; 557 492 ··· 569 500 return XFS_ERROR(error); 570 501 } 571 502 503 + /* 504 + * If the caller is doing a write at the end of the file, 505 + * then extend the allocation out to the file system's write 506 + * iosize. We clean up any extra space left over when the 507 + * file is closed in xfs_inactive(). 508 + * 509 + * For sync writes, we are flushing delayed allocate space to 510 + * try to make additional space available for allocation near 511 + * the filesystem full boundary - preallocation hurts in that 512 + * situation, of course. 513 + */ 514 + STATIC int 515 + xfs_iomap_eof_want_preallocate( 516 + xfs_mount_t *mp, 517 + xfs_iocore_t *io, 518 + xfs_fsize_t isize, 519 + xfs_off_t offset, 520 + size_t count, 521 + int ioflag, 522 + xfs_bmbt_irec_t *imap, 523 + int nimaps, 524 + int *prealloc) 525 + { 526 + xfs_fileoff_t start_fsb; 527 + xfs_filblks_t count_fsb; 528 + xfs_fsblock_t firstblock; 529 + int n, error, imaps; 530 + 531 + *prealloc = 0; 532 + if ((ioflag & BMAPI_SYNC) || (offset + count) <= isize) 533 + return 0; 534 + 535 + /* 536 + * If there are any real blocks past eof, then don't 537 + * do any speculative allocation. 538 + */ 539 + start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); 540 + count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 541 + while (count_fsb > 0) { 542 + imaps = nimaps; 543 + firstblock = NULLFSBLOCK; 544 + error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb, 545 + 0, &firstblock, 0, imap, &imaps, NULL); 546 + if (error) 547 + return error; 548 + for (n = 0; n < imaps; n++) { 549 + if ((imap[n].br_startblock != HOLESTARTBLOCK) && 550 + (imap[n].br_startblock != DELAYSTARTBLOCK)) 551 + return 0; 552 + start_fsb += imap[n].br_blockcount; 553 + count_fsb -= imap[n].br_blockcount; 554 + } 555 + } 556 + *prealloc = 1; 557 + return 0; 558 + } 559 + 572 560 int 573 561 xfs_iomap_write_delay( 574 562 xfs_inode_t *ip, ··· 639 513 xfs_iocore_t *io = &ip->i_iocore; 640 514 xfs_fileoff_t offset_fsb; 641 515 xfs_fileoff_t last_fsb; 642 - xfs_fsize_t isize; 516 + xfs_off_t aligned_offset; 517 + xfs_fileoff_t ioalign; 643 518 xfs_fsblock_t firstblock; 519 + xfs_extlen_t extsz; 520 + xfs_fsize_t isize; 644 521 int nimaps; 645 - int error; 646 522 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 647 - int aeof; 648 - int fsynced = 0; 523 + int prealloc, fsynced = 0; 524 + int error; 649 525 650 526 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); 651 527 ··· 655 527 * Make sure that the dquots are there. This doesn't hold 656 528 * the ilock across a disk read. 657 529 */ 658 - 659 530 error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); 660 531 if (error) 661 532 return XFS_ERROR(error); 662 533 534 + if (XFS_IS_REALTIME_INODE(ip)) { 535 + if (!(extsz = ip->i_d.di_extsize)) 536 + extsz = mp->m_sb.sb_rextsize; 537 + } else { 538 + extsz = ip->i_d.di_extsize; 539 + } 540 + 541 + offset_fsb = XFS_B_TO_FSBT(mp, offset); 542 + 663 543 retry: 664 544 isize = ip->i_d.di_size; 665 - if (io->io_new_size > isize) { 545 + if (io->io_new_size > isize) 666 546 isize = io->io_new_size; 667 - } 668 547 669 - aeof = 0; 670 - offset_fsb = XFS_B_TO_FSBT(mp, offset); 671 - last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 672 - /* 673 - * If the caller is doing a write at the end of the file, 674 - * then extend the allocation (and the buffer used for the write) 675 - * out to the file system's write iosize. We clean up any extra 676 - * space left over when the file is closed in xfs_inactive(). 677 - * 678 - * For sync writes, we are flushing delayed allocate space to 679 - * try to make additional space available for allocation near 680 - * the filesystem full boundary - preallocation hurts in that 681 - * situation, of course. 682 - */ 683 - if (!(ioflag & BMAPI_SYNC) && ((offset + count) > ip->i_d.di_size)) { 684 - xfs_off_t aligned_offset; 685 - xfs_filblks_t count_fsb; 686 - unsigned int iosize; 687 - xfs_fileoff_t ioalign; 688 - int n; 689 - xfs_fileoff_t start_fsb; 548 + error = xfs_iomap_eof_want_preallocate(mp, io, isize, offset, count, 549 + ioflag, imap, XFS_WRITE_IMAPS, &prealloc); 550 + if (error) 551 + return error; 690 552 691 - /* 692 - * If there are any real blocks past eof, then don't 693 - * do any speculative allocation. 694 - */ 695 - start_fsb = XFS_B_TO_FSBT(mp, 696 - ((xfs_ufsize_t)(offset + count - 1))); 697 - count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 698 - while (count_fsb > 0) { 699 - nimaps = XFS_WRITE_IMAPS; 700 - error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb, 701 - 0, &firstblock, 0, imap, &nimaps, NULL); 702 - if (error) { 703 - return error; 704 - } 705 - for (n = 0; n < nimaps; n++) { 706 - if ( !(io->io_flags & XFS_IOCORE_RT) && 707 - !imap[n].br_startblock) { 708 - cmn_err(CE_PANIC,"Access to block " 709 - "zero: fs <%s> inode: %lld " 710 - "start_block : %llx start_off " 711 - ": %llx blkcnt : %llx " 712 - "extent-state : %x \n", 713 - (ip->i_mount)->m_fsname, 714 - (long long)ip->i_ino, 715 - imap[n].br_startblock, 716 - imap[n].br_startoff, 717 - imap[n].br_blockcount, 718 - imap[n].br_state); 719 - } 720 - if ((imap[n].br_startblock != HOLESTARTBLOCK) && 721 - (imap[n].br_startblock != DELAYSTARTBLOCK)) { 722 - goto write_map; 723 - } 724 - start_fsb += imap[n].br_blockcount; 725 - count_fsb -= imap[n].br_blockcount; 726 - } 727 - } 728 - iosize = mp->m_writeio_blocks; 553 + if (prealloc) { 729 554 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 730 555 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 731 - last_fsb = ioalign + iosize; 732 - aeof = 1; 556 + last_fsb = ioalign + mp->m_writeio_blocks; 557 + } else { 558 + last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 733 559 } 734 - write_map: 560 + 561 + if (prealloc || extsz) { 562 + error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz, 563 + &last_fsb); 564 + if (error) 565 + return error; 566 + } 567 + 735 568 nimaps = XFS_WRITE_IMAPS; 736 569 firstblock = NULLFSBLOCK; 737 - 738 - /* 739 - * If mounted with the "-o swalloc" option, roundup the allocation 740 - * request to a stripe width boundary if the file size is >= 741 - * stripe width and we are allocating past the allocation eof. 742 - */ 743 - if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_swidth 744 - && (mp->m_flags & XFS_MOUNT_SWALLOC) 745 - && (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)) && aeof) { 746 - int eof; 747 - xfs_fileoff_t new_last_fsb; 748 - 749 - new_last_fsb = roundup_64(last_fsb, mp->m_swidth); 750 - error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 751 - if (error) { 752 - return error; 753 - } 754 - if (eof) { 755 - last_fsb = new_last_fsb; 756 - } 757 - /* 758 - * Roundup the allocation request to a stripe unit (m_dalign) boundary 759 - * if the file size is >= stripe unit size, and we are allocating past 760 - * the allocation eof. 761 - */ 762 - } else if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_dalign && 763 - (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)) && aeof) { 764 - int eof; 765 - xfs_fileoff_t new_last_fsb; 766 - new_last_fsb = roundup_64(last_fsb, mp->m_dalign); 767 - error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 768 - if (error) { 769 - return error; 770 - } 771 - if (eof) { 772 - last_fsb = new_last_fsb; 773 - } 774 - /* 775 - * Round up the allocation request to a real-time extent boundary 776 - * if the file is on the real-time subvolume. 777 - */ 778 - } else if (io->io_flags & XFS_IOCORE_RT && aeof) { 779 - int eof; 780 - xfs_fileoff_t new_last_fsb; 781 - 782 - new_last_fsb = roundup_64(last_fsb, mp->m_sb.sb_rextsize); 783 - error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof); 784 - if (error) { 785 - return error; 786 - } 787 - if (eof) 788 - last_fsb = new_last_fsb; 789 - } 790 570 error = xfs_bmapi(NULL, ip, offset_fsb, 791 571 (xfs_filblks_t)(last_fsb - offset_fsb), 792 572 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | 793 573 XFS_BMAPI_ENTIRE, &firstblock, 1, imap, 794 574 &nimaps, NULL); 795 - /* 796 - * This can be EDQUOT, if nimaps == 0 797 - */ 798 - if (error && (error != ENOSPC)) { 575 + if (error && (error != ENOSPC)) 799 576 return XFS_ERROR(error); 800 - } 577 + 801 578 /* 802 579 * If bmapi returned us nothing, and if we didn't get back EDQUOT, 803 - * then we must have run out of space. 580 + * then we must have run out of space - flush delalloc, and retry.. 804 581 */ 805 582 if (nimaps == 0) { 806 583 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, ··· 717 684 goto retry; 718 685 } 719 686 720 - *ret_imap = imap[0]; 721 - *nmaps = 1; 722 - if ( !(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) { 687 + if (!(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) { 723 688 cmn_err(CE_PANIC,"Access to block zero: fs <%s> inode: %lld " 724 689 "start_block : %llx start_off : %llx blkcnt : %llx " 725 690 "extent-state : %x \n", 726 691 (ip->i_mount)->m_fsname, 727 692 (long long)ip->i_ino, 728 - ret_imap->br_startblock, ret_imap->br_startoff, 729 - ret_imap->br_blockcount,ret_imap->br_state); 693 + (unsigned long long)ret_imap->br_startblock, 694 + (unsigned long long)ret_imap->br_startoff, 695 + (unsigned long long)ret_imap->br_blockcount, 696 + ret_imap->br_state); 730 697 } 698 + 699 + *ret_imap = imap[0]; 700 + *nmaps = 1; 701 + 731 702 return 0; 732 703 } 733 704 ··· 857 820 */ 858 821 859 822 for (i = 0; i < nimaps; i++) { 860 - if ( !(io->io_flags & XFS_IOCORE_RT) && 861 - !imap[i].br_startblock) { 823 + if (!(io->io_flags & XFS_IOCORE_RT) && 824 + !imap[i].br_startblock) { 862 825 cmn_err(CE_PANIC,"Access to block zero: " 863 826 "fs <%s> inode: %lld " 864 - "start_block : %llx start_off : %llx " 827 + "start_block : %llx start_off : %llx " 865 828 "blkcnt : %llx extent-state : %x \n", 866 829 (ip->i_mount)->m_fsname, 867 830 (long long)ip->i_ino, 868 - imap[i].br_startblock, 869 - imap[i].br_startoff, 870 - imap[i].br_blockcount,imap[i].br_state); 831 + (unsigned long long) 832 + imap[i].br_startblock, 833 + (unsigned long long) 834 + imap[i].br_startoff, 835 + (unsigned long long) 836 + imap[i].br_blockcount, 837 + imap[i].br_state); 871 838 } 872 839 if ((offset_fsb >= imap[i].br_startoff) && 873 840 (offset_fsb < (imap[i].br_startoff + ··· 908 867 { 909 868 xfs_mount_t *mp = ip->i_mount; 910 869 xfs_iocore_t *io = &ip->i_iocore; 911 - xfs_trans_t *tp; 912 870 xfs_fileoff_t offset_fsb; 913 871 xfs_filblks_t count_fsb; 914 872 xfs_filblks_t numblks_fsb; 915 - xfs_bmbt_irec_t imap; 873 + xfs_fsblock_t firstfsb; 874 + int nimaps; 875 + xfs_trans_t *tp; 876 + xfs_bmbt_irec_t imap; 877 + xfs_bmap_free_t free_list; 878 + uint resblks; 916 879 int committed; 917 880 int error; 918 - int nres; 919 - int nimaps; 920 - xfs_fsblock_t firstfsb; 921 - xfs_bmap_free_t free_list; 922 881 923 882 xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, 924 883 &ip->i_iocore, offset, count); ··· 927 886 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 928 887 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 929 888 930 - do { 931 - nres = XFS_DIOSTRAT_SPACE_RES(mp, 0); 889 + resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 932 890 891 + do { 933 892 /* 934 893 * set up a transaction to convert the range of extents 935 894 * from unwritten to real. Do allocations in a loop until ··· 937 896 */ 938 897 939 898 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 940 - error = xfs_trans_reserve(tp, nres, 899 + error = xfs_trans_reserve(tp, resblks, 941 900 XFS_WRITE_LOG_RES(mp), 0, 942 901 XFS_TRANS_PERM_LOG_RES, 943 902 XFS_WRITE_LOG_COUNT); ··· 956 915 XFS_BMAP_INIT(&free_list, &firstfsb); 957 916 nimaps = 1; 958 917 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, 959 - XFS_BMAPI_WRITE, &firstfsb, 918 + XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, 960 919 1, &imap, &nimaps, &free_list); 961 920 if (error) 962 921 goto error_on_bmapi_transaction; ··· 970 929 xfs_iunlock(ip, XFS_ILOCK_EXCL); 971 930 if (error) 972 931 goto error0; 973 - 932 + 974 933 if ( !(io->io_flags & XFS_IOCORE_RT) && !imap.br_startblock) { 975 934 cmn_err(CE_PANIC,"Access to block zero: fs <%s> " 976 935 "inode: %lld start_block : %llx start_off : " 977 936 "%llx blkcnt : %llx extent-state : %x \n", 978 937 (ip->i_mount)->m_fsname, 979 938 (long long)ip->i_ino, 980 - imap.br_startblock,imap.br_startoff, 981 - imap.br_blockcount,imap.br_state); 939 + (unsigned long long)imap.br_startblock, 940 + (unsigned long long)imap.br_startoff, 941 + (unsigned long long)imap.br_blockcount, 942 + imap.br_state); 982 943 } 983 944 984 945 if ((numblks_fsb = imap.br_blockcount) == 0) {
+3 -2
fs/xfs/xfs_itable.c
··· 56 56 { 57 57 xfs_dinode_core_t *dic; /* dinode core info pointer */ 58 58 xfs_inode_t *ip; /* incore inode pointer */ 59 + vnode_t *vp; 59 60 int error; 60 61 61 62 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno); ··· 73 72 goto out_iput; 74 73 } 75 74 75 + vp = XFS_ITOV(ip); 76 76 dic = &ip->i_d; 77 77 78 78 /* xfs_iget returns the following without needing ··· 86 84 buf->bs_uid = dic->di_uid; 87 85 buf->bs_gid = dic->di_gid; 88 86 buf->bs_size = dic->di_size; 89 - buf->bs_atime.tv_sec = dic->di_atime.t_sec; 90 - buf->bs_atime.tv_nsec = dic->di_atime.t_nsec; 87 + vn_atime_to_bstime(vp, &buf->bs_atime); 91 88 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; 92 89 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; 93 90 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
+96 -27
fs/xfs/xfs_log.c
··· 178 178 #define xlog_trace_iclog(iclog,state) 179 179 #endif /* XFS_LOG_TRACE */ 180 180 181 + 182 + static void 183 + xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic) 184 + { 185 + if (*qp) { 186 + tic->t_next = (*qp); 187 + tic->t_prev = (*qp)->t_prev; 188 + (*qp)->t_prev->t_next = tic; 189 + (*qp)->t_prev = tic; 190 + } else { 191 + tic->t_prev = tic->t_next = tic; 192 + *qp = tic; 193 + } 194 + 195 + tic->t_flags |= XLOG_TIC_IN_Q; 196 + } 197 + 198 + static void 199 + xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic) 200 + { 201 + if (tic == tic->t_next) { 202 + *qp = NULL; 203 + } else { 204 + *qp = tic->t_next; 205 + tic->t_next->t_prev = tic->t_prev; 206 + tic->t_prev->t_next = tic->t_next; 207 + } 208 + 209 + tic->t_next = tic->t_prev = NULL; 210 + tic->t_flags &= ~XLOG_TIC_IN_Q; 211 + } 212 + 213 + static void 214 + xlog_grant_sub_space(struct log *log, int bytes) 215 + { 216 + log->l_grant_write_bytes -= bytes; 217 + if (log->l_grant_write_bytes < 0) { 218 + log->l_grant_write_bytes += log->l_logsize; 219 + log->l_grant_write_cycle--; 220 + } 221 + 222 + log->l_grant_reserve_bytes -= bytes; 223 + if ((log)->l_grant_reserve_bytes < 0) { 224 + log->l_grant_reserve_bytes += log->l_logsize; 225 + log->l_grant_reserve_cycle--; 226 + } 227 + 228 + } 229 + 230 + static void 231 + xlog_grant_add_space_write(struct log *log, int bytes) 232 + { 233 + log->l_grant_write_bytes += bytes; 234 + if (log->l_grant_write_bytes > log->l_logsize) { 235 + log->l_grant_write_bytes -= log->l_logsize; 236 + log->l_grant_write_cycle++; 237 + } 238 + } 239 + 240 + static void 241 + xlog_grant_add_space_reserve(struct log *log, int bytes) 242 + { 243 + log->l_grant_reserve_bytes += bytes; 244 + if (log->l_grant_reserve_bytes > log->l_logsize) { 245 + log->l_grant_reserve_bytes -= log->l_logsize; 246 + log->l_grant_reserve_cycle++; 247 + } 248 + } 249 + 250 + static inline void 251 + xlog_grant_add_space(struct log *log, int bytes) 252 + { 253 + xlog_grant_add_space_write(log, bytes); 254 + xlog_grant_add_space_reserve(log, bytes); 255 + } 256 + 257 + 181 258 /* 182 259 * NOTES: 183 260 * ··· 505 428 if (readonly) 506 429 vfsp->vfs_flag &= ~VFS_RDONLY; 507 430 508 - error = xlog_recover(mp->m_log, readonly); 431 + error = xlog_recover(mp->m_log); 509 432 510 433 if (readonly) 511 434 vfsp->vfs_flag |= VFS_RDONLY; ··· 1397 1320 1398 1321 /* move grant heads by roundoff in sync */ 1399 1322 s = GRANT_LOCK(log); 1400 - XLOG_GRANT_ADD_SPACE(log, roundoff, 'w'); 1401 - XLOG_GRANT_ADD_SPACE(log, roundoff, 'r'); 1323 + xlog_grant_add_space(log, roundoff); 1402 1324 GRANT_UNLOCK(log, s); 1403 1325 1404 1326 /* put cycle number in every block */ ··· 1591 1515 * print out info relating to regions written which consume 1592 1516 * the reservation 1593 1517 */ 1594 - #if defined(XFS_LOG_RES_DEBUG) 1595 1518 STATIC void 1596 1519 xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket) 1597 1520 { ··· 1680 1605 ticket->t_res_arr_sum, ticket->t_res_o_flow, 1681 1606 ticket->t_res_num_ophdrs, ophdr_spc, 1682 1607 ticket->t_res_arr_sum + 1683 - ticket->t_res_o_flow + ophdr_spc, 1608 + ticket->t_res_o_flow + ophdr_spc, 1684 1609 ticket->t_res_num); 1685 1610 1686 1611 for (i = 0; i < ticket->t_res_num; i++) { 1687 - uint r_type = ticket->t_res_arr[i].r_type; 1612 + uint r_type = ticket->t_res_arr[i].r_type; 1688 1613 cmn_err(CE_WARN, 1689 1614 "region[%u]: %s - %u bytes\n", 1690 1615 i, ··· 1693 1618 ticket->t_res_arr[i].r_len); 1694 1619 } 1695 1620 } 1696 - #else 1697 - #define xlog_print_tic_res(mp, ticket) 1698 - #endif 1699 1621 1700 1622 /* 1701 1623 * Write some region out to in-core log ··· 2461 2389 2462 2390 /* something is already sleeping; insert new transaction at end */ 2463 2391 if (log->l_reserve_headq) { 2464 - XLOG_INS_TICKETQ(log->l_reserve_headq, tic); 2392 + xlog_ins_ticketq(&log->l_reserve_headq, tic); 2465 2393 xlog_trace_loggrant(log, tic, 2466 2394 "xlog_grant_log_space: sleep 1"); 2467 2395 /* ··· 2494 2422 log->l_grant_reserve_bytes); 2495 2423 if (free_bytes < need_bytes) { 2496 2424 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2497 - XLOG_INS_TICKETQ(log->l_reserve_headq, tic); 2425 + xlog_ins_ticketq(&log->l_reserve_headq, tic); 2498 2426 xlog_trace_loggrant(log, tic, 2499 2427 "xlog_grant_log_space: sleep 2"); 2500 2428 XFS_STATS_INC(xs_sleep_logspace); ··· 2511 2439 s = GRANT_LOCK(log); 2512 2440 goto redo; 2513 2441 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2514 - XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); 2442 + xlog_del_ticketq(&log->l_reserve_headq, tic); 2515 2443 2516 2444 /* we've got enough space */ 2517 - XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); 2518 - XLOG_GRANT_ADD_SPACE(log, need_bytes, 'r'); 2445 + xlog_grant_add_space(log, need_bytes); 2519 2446 #ifdef DEBUG 2520 2447 tail_lsn = log->l_tail_lsn; 2521 2448 /* ··· 2535 2464 2536 2465 error_return: 2537 2466 if (tic->t_flags & XLOG_TIC_IN_Q) 2538 - XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); 2467 + xlog_del_ticketq(&log->l_reserve_headq, tic); 2539 2468 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret"); 2540 2469 /* 2541 2470 * If we are failing, make sure the ticket doesn't have any ··· 2604 2533 2605 2534 if (ntic != log->l_write_headq) { 2606 2535 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2607 - XLOG_INS_TICKETQ(log->l_write_headq, tic); 2536 + xlog_ins_ticketq(&log->l_write_headq, tic); 2608 2537 2609 2538 xlog_trace_loggrant(log, tic, 2610 2539 "xlog_regrant_write_log_space: sleep 1"); ··· 2636 2565 log->l_grant_write_bytes); 2637 2566 if (free_bytes < need_bytes) { 2638 2567 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2639 - XLOG_INS_TICKETQ(log->l_write_headq, tic); 2568 + xlog_ins_ticketq(&log->l_write_headq, tic); 2640 2569 XFS_STATS_INC(xs_sleep_logspace); 2641 2570 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); 2642 2571 ··· 2652 2581 s = GRANT_LOCK(log); 2653 2582 goto redo; 2654 2583 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2655 - XLOG_DEL_TICKETQ(log->l_write_headq, tic); 2584 + xlog_del_ticketq(&log->l_write_headq, tic); 2656 2585 2657 - XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); /* we've got enough space */ 2586 + /* we've got enough space */ 2587 + xlog_grant_add_space_write(log, need_bytes); 2658 2588 #ifdef DEBUG 2659 2589 tail_lsn = log->l_tail_lsn; 2660 2590 if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) { ··· 2672 2600 2673 2601 error_return: 2674 2602 if (tic->t_flags & XLOG_TIC_IN_Q) 2675 - XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); 2603 + xlog_del_ticketq(&log->l_reserve_headq, tic); 2676 2604 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret"); 2677 2605 /* 2678 2606 * If we are failing, make sure the ticket doesn't have any ··· 2705 2633 ticket->t_cnt--; 2706 2634 2707 2635 s = GRANT_LOCK(log); 2708 - XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); 2709 - XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r'); 2636 + xlog_grant_sub_space(log, ticket->t_curr_res); 2710 2637 ticket->t_curr_res = ticket->t_unit_res; 2711 2638 XLOG_TIC_RESET_RES(ticket); 2712 2639 xlog_trace_loggrant(log, ticket, ··· 2718 2647 return; 2719 2648 } 2720 2649 2721 - XLOG_GRANT_ADD_SPACE(log, ticket->t_unit_res, 'r'); 2650 + xlog_grant_add_space_reserve(log, ticket->t_unit_res); 2722 2651 xlog_trace_loggrant(log, ticket, 2723 2652 "xlog_regrant_reserve_log_space: exit"); 2724 2653 xlog_verify_grant_head(log, 0); ··· 2754 2683 s = GRANT_LOCK(log); 2755 2684 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); 2756 2685 2757 - XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); 2758 - XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r'); 2686 + xlog_grant_sub_space(log, ticket->t_curr_res); 2759 2687 2760 2688 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current"); 2761 2689 ··· 2763 2693 */ 2764 2694 if (ticket->t_cnt > 0) { 2765 2695 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 2766 - XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'w'); 2767 - XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'r'); 2696 + xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt); 2768 2697 } 2769 2698 2770 2699 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit");
+1 -10
fs/xfs/xfs_log.h
··· 96 96 97 97 98 98 /* Region types for iovec's i_type */ 99 - #if defined(XFS_LOG_RES_DEBUG) 100 99 #define XLOG_REG_TYPE_BFORMAT 1 101 100 #define XLOG_REG_TYPE_BCHUNK 2 102 101 #define XLOG_REG_TYPE_EFI_FORMAT 3 ··· 116 117 #define XLOG_REG_TYPE_COMMIT 18 117 118 #define XLOG_REG_TYPE_TRANSHDR 19 118 119 #define XLOG_REG_TYPE_MAX 19 119 - #endif 120 120 121 - #if defined(XFS_LOG_RES_DEBUG) 122 121 #define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t)) 123 - #else 124 - #define XLOG_VEC_SET_TYPE(vecp, t) 125 - #endif 126 - 127 122 128 123 typedef struct xfs_log_iovec { 129 124 xfs_caddr_t i_addr; /* beginning address of region */ 130 125 int i_len; /* length in bytes of region */ 131 - #if defined(XFS_LOG_RES_DEBUG) 132 - uint i_type; /* type of region */ 133 - #endif 126 + uint i_type; /* type of region */ 134 127 } xfs_log_iovec_t; 135 128 136 129 typedef void* xfs_log_ticket_t;
+5 -72
fs/xfs/xfs_log_priv.h
··· 253 253 254 254 255 255 /* Ticket reservation region accounting */ 256 - #if defined(XFS_LOG_RES_DEBUG) 257 256 #define XLOG_TIC_LEN_MAX 15 258 257 #define XLOG_TIC_RESET_RES(t) ((t)->t_res_num = \ 259 258 (t)->t_res_arr_sum = (t)->t_res_num_ophdrs = 0) ··· 277 278 * we don't care about. 278 279 */ 279 280 typedef struct xlog_res { 280 - uint r_len; 281 - uint r_type; 281 + uint r_len; /* region length :4 */ 282 + uint r_type; /* region's transaction type :4 */ 282 283 } xlog_res_t; 283 - #else 284 - #define XLOG_TIC_RESET_RES(t) 285 - #define XLOG_TIC_ADD_OPHDR(t) 286 - #define XLOG_TIC_ADD_REGION(t, len, type) 287 - #endif 288 - 289 284 290 285 typedef struct xlog_ticket { 291 286 sv_t t_sema; /* sleep on this semaphore : 20 */ ··· 294 301 char t_flags; /* properties of reservation : 1 */ 295 302 uint t_trans_type; /* transaction type : 4 */ 296 303 297 - #if defined (XFS_LOG_RES_DEBUG) 298 304 /* reservation array fields */ 299 305 uint t_res_num; /* num in array : 4 */ 300 - xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : X */ 301 306 uint t_res_num_ophdrs; /* num op hdrs : 4 */ 302 307 uint t_res_arr_sum; /* array sum : 4 */ 303 308 uint t_res_o_flow; /* sum overflow : 4 */ 304 - #endif 309 + xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : 8 * 15 */ 305 310 } xlog_ticket_t; 306 311 307 312 #endif ··· 485 494 486 495 #define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) 487 496 488 - #define XLOG_GRANT_SUB_SPACE(log,bytes,type) \ 489 - { \ 490 - if (type == 'w') { \ 491 - (log)->l_grant_write_bytes -= (bytes); \ 492 - if ((log)->l_grant_write_bytes < 0) { \ 493 - (log)->l_grant_write_bytes += (log)->l_logsize; \ 494 - (log)->l_grant_write_cycle--; \ 495 - } \ 496 - } else { \ 497 - (log)->l_grant_reserve_bytes -= (bytes); \ 498 - if ((log)->l_grant_reserve_bytes < 0) { \ 499 - (log)->l_grant_reserve_bytes += (log)->l_logsize;\ 500 - (log)->l_grant_reserve_cycle--; \ 501 - } \ 502 - } \ 503 - } 504 - #define XLOG_GRANT_ADD_SPACE(log,bytes,type) \ 505 - { \ 506 - if (type == 'w') { \ 507 - (log)->l_grant_write_bytes += (bytes); \ 508 - if ((log)->l_grant_write_bytes > (log)->l_logsize) { \ 509 - (log)->l_grant_write_bytes -= (log)->l_logsize; \ 510 - (log)->l_grant_write_cycle++; \ 511 - } \ 512 - } else { \ 513 - (log)->l_grant_reserve_bytes += (bytes); \ 514 - if ((log)->l_grant_reserve_bytes > (log)->l_logsize) { \ 515 - (log)->l_grant_reserve_bytes -= (log)->l_logsize;\ 516 - (log)->l_grant_reserve_cycle++; \ 517 - } \ 518 - } \ 519 - } 520 - #define XLOG_INS_TICKETQ(q, tic) \ 521 - { \ 522 - if (q) { \ 523 - (tic)->t_next = (q); \ 524 - (tic)->t_prev = (q)->t_prev; \ 525 - (q)->t_prev->t_next = (tic); \ 526 - (q)->t_prev = (tic); \ 527 - } else { \ 528 - (tic)->t_prev = (tic)->t_next = (tic); \ 529 - (q) = (tic); \ 530 - } \ 531 - (tic)->t_flags |= XLOG_TIC_IN_Q; \ 532 - } 533 - #define XLOG_DEL_TICKETQ(q, tic) \ 534 - { \ 535 - if ((tic) == (tic)->t_next) { \ 536 - (q) = NULL; \ 537 - } else { \ 538 - (q) = (tic)->t_next; \ 539 - (tic)->t_next->t_prev = (tic)->t_prev; \ 540 - (tic)->t_prev->t_next = (tic)->t_next; \ 541 - } \ 542 - (tic)->t_next = (tic)->t_prev = NULL; \ 543 - (tic)->t_flags &= ~XLOG_TIC_IN_Q; \ 544 - } 545 497 546 498 /* common routines */ 547 499 extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); 548 500 extern int xlog_find_tail(xlog_t *log, 549 501 xfs_daddr_t *head_blk, 550 - xfs_daddr_t *tail_blk, 551 - int readonly); 552 - extern int xlog_recover(xlog_t *log, int readonly); 502 + xfs_daddr_t *tail_blk); 503 + extern int xlog_recover(xlog_t *log); 553 504 extern int xlog_recover_finish(xlog_t *log, int mfsi_flags); 554 505 extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); 555 506 extern void xlog_recover_process_iunlinks(xlog_t *log);
+6 -6
fs/xfs/xfs_log_recover.c
··· 783 783 xlog_find_tail( 784 784 xlog_t *log, 785 785 xfs_daddr_t *head_blk, 786 - xfs_daddr_t *tail_blk, 787 - int readonly) 786 + xfs_daddr_t *tail_blk) 788 787 { 789 788 xlog_rec_header_t *rhead; 790 789 xlog_op_header_t *op_head; ··· 2562 2563 2563 2564 /* 2564 2565 * The logitem format's flag tells us if this was user quotaoff, 2565 - * group quotaoff or both. 2566 + * group/project quotaoff or both. 2566 2567 */ 2567 2568 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) 2568 2569 log->l_quotaoffs_flag |= XFS_DQ_USER; 2570 + if (qoff_f->qf_flags & XFS_PQUOTA_ACCT) 2571 + log->l_quotaoffs_flag |= XFS_DQ_PROJ; 2569 2572 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) 2570 2573 log->l_quotaoffs_flag |= XFS_DQ_GROUP; 2571 2574 ··· 3891 3890 */ 3892 3891 int 3893 3892 xlog_recover( 3894 - xlog_t *log, 3895 - int readonly) 3893 + xlog_t *log) 3896 3894 { 3897 3895 xfs_daddr_t head_blk, tail_blk; 3898 3896 int error; 3899 3897 3900 3898 /* find the tail of the log */ 3901 - if ((error = xlog_find_tail(log, &head_blk, &tail_blk, readonly))) 3899 + if ((error = xlog_find_tail(log, &head_blk, &tail_blk))) 3902 3900 return error; 3903 3901 3904 3902 if (tail_blk != head_blk) {
+2 -3
fs/xfs/xfs_mount.c
··· 51 51 STATIC void xfs_uuid_unmount(xfs_mount_t *mp); 52 52 STATIC void xfs_unmountfs_wait(xfs_mount_t *); 53 53 54 - static struct { 54 + static const struct { 55 55 short offset; 56 56 short type; /* 0 = integer 57 57 * 1 = binary / string (no translation) ··· 1077 1077 1078 1078 xfs_iflush_all(mp); 1079 1079 1080 - XFS_QM_DQPURGEALL(mp, 1081 - XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING); 1080 + XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); 1082 1081 1083 1082 /* 1084 1083 * Flush out the log synchronously so that we know for sure
+1 -2
fs/xfs/xfs_mount.h
··· 308 308 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ 309 309 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ 310 310 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ 311 - #define m_dev m_ddev_targp->pbr_dev 312 311 __uint8_t m_dircook_elog; /* log d-cookie entry bits */ 313 312 __uint8_t m_blkbit_log; /* blocklog + NBBY */ 314 313 __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ ··· 392 393 user */ 393 394 #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment 394 395 allocations */ 395 - #define XFS_MOUNT_COMPAT_ATTR (1ULL << 8) /* do not use attr2 format */ 396 + #define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */ 396 397 /* (1ULL << 9) -- currently unused */ 397 398 #define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */ 398 399 #define XFS_MOUNT_SHARED (1ULL << 11) /* shared mount */
+2 -5
fs/xfs/xfs_rename.c
··· 243 243 xfs_inode_t *inodes[4]; 244 244 int target_ip_dropped = 0; /* dropped target_ip link? */ 245 245 vnode_t *src_dir_vp; 246 - bhv_desc_t *target_dir_bdp; 247 246 int spaceres; 248 247 int target_link_zero = 0; 249 248 int num_inodes; ··· 259 260 * Find the XFS behavior descriptor for the target directory 260 261 * vnode since it was not handed to us. 261 262 */ 262 - target_dir_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(target_dir_vp), 263 - &xfs_vnodeops); 264 - if (target_dir_bdp == NULL) { 263 + target_dp = xfs_vtoi(target_dir_vp); 264 + if (target_dp == NULL) { 265 265 return XFS_ERROR(EXDEV); 266 266 } 267 267 268 268 src_dp = XFS_BHVTOI(src_dir_bdp); 269 - target_dp = XFS_BHVTOI(target_dir_bdp); 270 269 mp = src_dp->i_mount; 271 270 272 271 if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) ||
+4 -5
fs/xfs/xfs_rw.c
··· 238 238 } 239 239 return (EIO); 240 240 } 241 + 241 242 /* 242 243 * Prints out an ALERT message about I/O error. 243 244 */ ··· 253 252 "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx" 254 253 " (\"%s\") error %d buf count %zd", 255 254 (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname, 256 - XFS_BUFTARG_NAME(bp->pb_target), 257 - (__uint64_t)blkno, 258 - func, 259 - XFS_BUF_GETERROR(bp), 260 - XFS_BUF_COUNT(bp)); 255 + XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), 256 + (__uint64_t)blkno, func, 257 + XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp)); 261 258 } 262 259 263 260 /*
-17
fs/xfs/xfs_sb.h
··· 68 68 (XFS_SB_VERSION_NUMBITS | \ 69 69 XFS_SB_VERSION_OKREALFBITS | \ 70 70 XFS_SB_VERSION_OKSASHFBITS) 71 - #define XFS_SB_VERSION_MKFS(ia,dia,extflag,dirv2,na,sflag,morebits) \ 72 - (((ia) || (dia) || (extflag) || (dirv2) || (na) || (sflag) || \ 73 - (morebits)) ? \ 74 - (XFS_SB_VERSION_4 | \ 75 - ((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \ 76 - ((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \ 77 - ((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \ 78 - ((dirv2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \ 79 - ((na) ? XFS_SB_VERSION_LOGV2BIT : 0) | \ 80 - ((sflag) ? XFS_SB_VERSION_SECTORBIT : 0) | \ 81 - ((morebits) ? XFS_SB_VERSION_MOREBITSBIT : 0)) : \ 82 - XFS_SB_VERSION_1) 83 71 84 72 /* 85 73 * There are two words to hold XFS "feature" bits: the original ··· 92 104 #define XFS_SB_VERSION2_OKREALBITS \ 93 105 (XFS_SB_VERSION2_OKREALFBITS | \ 94 106 XFS_SB_VERSION2_OKSASHFBITS ) 95 - 96 - /* 97 - * mkfs macro to set up sb_features2 word 98 - */ 99 - #define XFS_SB_VERSION2_MKFS(resvd1, sbcntr) 0 100 107 101 108 typedef struct xfs_sb 102 109 {
+8 -6
fs/xfs/xfs_trans.c
··· 1014 1014 xfs_log_item_t *lip; 1015 1015 int i; 1016 1016 #endif 1017 + xfs_mount_t *mp = tp->t_mountp; 1017 1018 1018 1019 /* 1019 1020 * See if the caller is being too lazy to figure out if ··· 1027 1026 * filesystem. This happens in paths where we detect 1028 1027 * corruption and decide to give up. 1029 1028 */ 1030 - if ((tp->t_flags & XFS_TRANS_DIRTY) && 1031 - !XFS_FORCED_SHUTDOWN(tp->t_mountp)) 1032 - xfs_force_shutdown(tp->t_mountp, XFS_CORRUPT_INCORE); 1029 + if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) { 1030 + XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 1031 + xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); 1032 + } 1033 1033 #ifdef DEBUG 1034 1034 if (!(flags & XFS_TRANS_ABORT)) { 1035 1035 licp = &(tp->t_items); ··· 1042 1040 } 1043 1041 1044 1042 lip = lidp->lid_item; 1045 - if (!XFS_FORCED_SHUTDOWN(tp->t_mountp)) 1043 + if (!XFS_FORCED_SHUTDOWN(mp)) 1046 1044 ASSERT(!(lip->li_type == XFS_LI_EFD)); 1047 1045 } 1048 1046 licp = licp->lic_next; ··· 1050 1048 } 1051 1049 #endif 1052 1050 xfs_trans_unreserve_and_mod_sb(tp); 1053 - XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); 1051 + XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); 1054 1052 1055 1053 if (tp->t_ticket) { 1056 1054 if (flags & XFS_TRANS_RELEASE_LOG_RES) { ··· 1059 1057 } else { 1060 1058 log_flags = 0; 1061 1059 } 1062 - xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags); 1060 + xfs_log_done(mp, tp->t_ticket, NULL, log_flags); 1063 1061 } 1064 1062 1065 1063 /* mark this thread as no longer being in a transaction */
-1
fs/xfs/xfs_trans.h
··· 973 973 void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *); 974 974 void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *); 975 975 void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); 976 - void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); 977 976 void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *); 978 977 void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); 979 978 void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
+3 -6
fs/xfs/xfs_utils.c
··· 55 55 xfs_inode_t **ipp) 56 56 { 57 57 vnode_t *vp; 58 - bhv_desc_t *bdp; 59 58 60 59 vp = VNAME_TO_VNODE(dentry); 61 - bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops); 62 - if (!bdp) { 63 - *ipp = NULL; 60 + 61 + *ipp = xfs_vtoi(vp); 62 + if (!*ipp) 64 63 return XFS_ERROR(ENOENT); 65 - } 66 64 VN_HOLD(vp); 67 - *ipp = XFS_BHVTOI(bdp); 68 65 return 0; 69 66 } 70 67
+28 -22
fs/xfs/xfs_vfsops.c
··· 53 53 #include "xfs_acl.h" 54 54 #include "xfs_attr.h" 55 55 #include "xfs_clnt.h" 56 + #include "xfs_fsops.h" 56 57 57 58 STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); 58 59 ··· 291 290 mp->m_flags |= XFS_MOUNT_IDELETE; 292 291 if (ap->flags & XFSMNT_DIRSYNC) 293 292 mp->m_flags |= XFS_MOUNT_DIRSYNC; 294 - if (ap->flags & XFSMNT_COMPAT_ATTR) 295 - mp->m_flags |= XFS_MOUNT_COMPAT_ATTR; 293 + if (ap->flags & XFSMNT_ATTR2) 294 + mp->m_flags |= XFS_MOUNT_ATTR2; 296 295 297 296 if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE) 298 297 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; ··· 313 312 mp->m_flags |= XFS_MOUNT_NOUUID; 314 313 if (ap->flags & XFSMNT_BARRIER) 315 314 mp->m_flags |= XFS_MOUNT_BARRIER; 315 + else 316 + mp->m_flags &= ~XFS_MOUNT_BARRIER; 316 317 317 318 return 0; 318 319 } ··· 333 330 334 331 /* Fail a mount where the logbuf is smaller then the log stripe */ 335 332 if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) { 336 - if ((ap->logbufsize == -1) && 333 + if ((ap->logbufsize <= 0) && 337 334 (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) { 338 335 mp->m_logbsize = mp->m_sb.sb_logsunit; 339 - } else if (ap->logbufsize < mp->m_sb.sb_logsunit) { 336 + } else if (ap->logbufsize > 0 && 337 + ap->logbufsize < mp->m_sb.sb_logsunit) { 340 338 cmn_err(CE_WARN, 341 339 "XFS: logbuf size must be greater than or equal to log stripe size"); 342 340 return XFS_ERROR(EINVAL); ··· 349 345 "XFS: logbuf size for version 1 logs must be 16K or 32K"); 350 346 return XFS_ERROR(EINVAL); 351 347 } 348 + } 349 + 350 + if (XFS_SB_VERSION_HASATTR2(&mp->m_sb)) { 351 + mp->m_flags |= XFS_MOUNT_ATTR2; 352 352 } 353 353 354 354 /* ··· 388 380 */ 389 381 if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI)) 390 382 return XFS_ERROR(EINVAL); 391 - } 392 - 393 - if (XFS_SB_VERSION_HASATTR2(&mp->m_sb)) { 394 - mp->m_flags &= ~XFS_MOUNT_COMPAT_ATTR; 395 383 } 396 384 397 385 return 0; ··· 508 504 if (error) 509 505 goto error2; 510 506 507 + if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY)) 508 + xfs_mountfs_check_barriers(mp); 509 + 511 510 error = XFS_IOINIT(vfsp, args, flags); 512 511 if (error) 513 512 goto error2; 514 513 515 - if ((args->flags & XFSMNT_BARRIER) && 516 - !(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)) 517 - xfs_mountfs_check_barriers(mp); 518 514 return 0; 519 515 520 516 error2: ··· 658 654 mp->m_flags |= XFS_MOUNT_NOATIME; 659 655 else 660 656 mp->m_flags &= ~XFS_MOUNT_NOATIME; 657 + 658 + if (args->flags & XFSMNT_BARRIER) 659 + mp->m_flags |= XFS_MOUNT_BARRIER; 660 + else 661 + mp->m_flags &= ~XFS_MOUNT_BARRIER; 661 662 662 663 if ((vfsp->vfs_flag & VFS_RDONLY) && 663 664 !(*flags & MS_RDONLY)) { ··· 1643 1634 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 1644 1635 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 1645 1636 * unwritten extent conversion */ 1637 + #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ 1646 1638 #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ 1647 1639 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 1648 1640 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ ··· 1690 1680 int iosize; 1691 1681 1692 1682 args->flags2 |= XFSMNT2_COMPAT_IOSIZE; 1693 - args->flags |= XFSMNT_COMPAT_ATTR; 1694 1683 1695 1684 #if 0 /* XXX: off by default, until some remaining issues ironed out */ 1696 1685 args->flags |= XFSMNT_IDELETE; /* default to on */ ··· 1815 1806 args->flags |= XFSMNT_NOUUID; 1816 1807 } else if (!strcmp(this_char, MNTOPT_BARRIER)) { 1817 1808 args->flags |= XFSMNT_BARRIER; 1809 + } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { 1810 + args->flags &= ~XFSMNT_BARRIER; 1818 1811 } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 1819 1812 args->flags &= ~XFSMNT_IDELETE; 1820 1813 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { ··· 1826 1815 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { 1827 1816 args->flags2 |= XFSMNT2_COMPAT_IOSIZE; 1828 1817 } else if (!strcmp(this_char, MNTOPT_ATTR2)) { 1829 - args->flags &= ~XFSMNT_COMPAT_ATTR; 1818 + args->flags |= XFSMNT_ATTR2; 1830 1819 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 1831 - args->flags |= XFSMNT_COMPAT_ATTR; 1820 + args->flags &= ~XFSMNT_ATTR2; 1832 1821 } else if (!strcmp(this_char, "osyncisdsync")) { 1833 1822 /* no-op, this is now the default */ 1834 1823 printk("XFS: osyncisdsync is now the default, option is deprecated.\n"); ··· 1903 1892 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 1904 1893 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 1905 1894 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, 1906 - { XFS_MOUNT_BARRIER, "," MNTOPT_BARRIER }, 1907 1895 { XFS_MOUNT_IDELETE, "," MNTOPT_NOIKEEP }, 1908 1896 { 0, NULL } 1909 1897 }; ··· 1924 1914 1925 1915 if (mp->m_logbufs > 0) 1926 1916 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); 1927 - 1928 1917 if (mp->m_logbsize > 0) 1929 1918 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); 1930 1919 1931 1920 if (mp->m_logname) 1932 1921 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); 1933 - 1934 1922 if (mp->m_rtname) 1935 1923 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); 1936 1924 1937 1925 if (mp->m_dalign > 0) 1938 1926 seq_printf(m, "," MNTOPT_SUNIT "=%d", 1939 1927 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 1940 - 1941 1928 if (mp->m_swidth > 0) 1942 1929 seq_printf(m, "," MNTOPT_SWIDTH "=%d", 1943 1930 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 1944 1931 1945 - if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) 1946 - seq_printf(m, "," MNTOPT_ATTR2); 1947 - 1948 1932 if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)) 1949 1933 seq_printf(m, "," MNTOPT_LARGEIO); 1934 + if (mp->m_flags & XFS_MOUNT_BARRIER) 1935 + seq_printf(m, "," MNTOPT_BARRIER); 1950 1936 1951 1937 if (!(vfsp->vfs_flag & VFS_32BITINODES)) 1952 1938 seq_printf(m, "," MNTOPT_64BITINODE); 1953 - 1954 1939 if (vfsp->vfs_flag & VFS_GRPID) 1955 1940 seq_printf(m, "," MNTOPT_GRPID); 1956 1941 ··· 1964 1959 /* Push the superblock and write an unmount record */ 1965 1960 xfs_log_unmount_write(mp); 1966 1961 xfs_unmountfs_writesb(mp); 1962 + xfs_fs_log_dummy(mp); 1967 1963 } 1968 1964 1969 1965
+93 -104
fs/xfs/xfs_vnodeops.c
··· 185 185 break; 186 186 } 187 187 188 - vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec; 189 - vap->va_atime.tv_nsec = ip->i_d.di_atime.t_nsec; 188 + vn_atime_to_timespec(vp, &vap->va_atime); 190 189 vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec; 191 190 vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; 192 191 vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec; ··· 543 544 } 544 545 545 546 /* 546 - * Can't set extent size unless the file is marked, or 547 - * about to be marked as a realtime file. 548 - * 549 - * This check will be removed when fixed size extents 550 - * with buffered data writes is implemented. 551 - * 552 - */ 553 - if ((mask & XFS_AT_EXTSIZE) && 554 - ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != 555 - vap->va_extsize) && 556 - (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || 557 - ((mask & XFS_AT_XFLAGS) && 558 - (vap->va_xflags & XFS_XFLAG_REALTIME))))) { 559 - code = XFS_ERROR(EINVAL); 560 - goto error_return; 561 - } 562 - 563 - /* 564 547 * Can't change realtime flag if any extents are allocated. 565 548 */ 566 549 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && ··· 804 823 di_flags |= XFS_DIFLAG_RTINHERIT; 805 824 if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS) 806 825 di_flags |= XFS_DIFLAG_NOSYMLINKS; 807 - } else { 826 + if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT) 827 + di_flags |= XFS_DIFLAG_EXTSZINHERIT; 828 + } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 808 829 if (vap->va_xflags & XFS_XFLAG_REALTIME) { 809 830 di_flags |= XFS_DIFLAG_REALTIME; 810 831 ip->i_iocore.io_flags |= XFS_IOCORE_RT; 811 832 } else { 812 833 ip->i_iocore.io_flags &= ~XFS_IOCORE_RT; 813 834 } 835 + if (vap->va_xflags & XFS_XFLAG_EXTSIZE) 836 + di_flags |= XFS_DIFLAG_EXTSIZE; 814 837 } 815 838 ip->i_d.di_flags = di_flags; 816 839 } ··· 982 997 if (count <= 0) { 983 998 error = 0; 984 999 goto error_return; 985 - } 986 - 987 - if (!(ioflags & IO_INVIS)) { 988 - xfs_ichgtime(ip, XFS_ICHGTIME_ACC); 989 1000 } 990 1001 991 1002 /* ··· 1215 1234 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1216 1235 1217 1236 if (!error && (nimaps != 0) && 1218 - (imap.br_startblock != HOLESTARTBLOCK)) { 1237 + (imap.br_startblock != HOLESTARTBLOCK || 1238 + ip->i_delayed_blks)) { 1219 1239 /* 1220 1240 * Attach the dquots to the inode up front. 1221 1241 */ ··· 1551 1569 1552 1570 if (ip->i_d.di_nlink != 0) { 1553 1571 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1554 - ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && 1572 + ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 || 1573 + ip->i_delayed_blks > 0)) && 1555 1574 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1556 - (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)))) { 1575 + (!(ip->i_d.di_flags & 1576 + (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { 1557 1577 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1558 1578 return (error); 1559 1579 /* Update linux inode block count after free above */ ··· 1612 1628 * only one with a reference to the inode. 1613 1629 */ 1614 1630 truncate = ((ip->i_d.di_nlink == 0) && 1615 - ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0)) && 1631 + ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0) || 1632 + (ip->i_delayed_blks > 0)) && 1616 1633 ((ip->i_d.di_mode & S_IFMT) == S_IFREG)); 1617 1634 1618 1635 mp = ip->i_mount; ··· 1631 1646 1632 1647 if (ip->i_d.di_nlink != 0) { 1633 1648 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1634 - ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && 1635 - (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1636 - (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)) || 1637 - (ip->i_delayed_blks != 0))) { 1649 + ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 || 1650 + ip->i_delayed_blks > 0)) && 1651 + (ip->i_df.if_flags & XFS_IFEXTENTS) && 1652 + (!(ip->i_d.di_flags & 1653 + (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || 1654 + (ip->i_delayed_blks != 0)))) { 1638 1655 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1639 1656 return (VN_INACTIVE_CACHE); 1640 1657 /* Update linux inode block count after free above */ ··· 2580 2593 int cancel_flags; 2581 2594 int committed; 2582 2595 vnode_t *target_dir_vp; 2583 - bhv_desc_t *src_bdp; 2584 2596 int resblks; 2585 2597 char *target_name = VNAME(dentry); 2586 2598 int target_namelen; ··· 2592 2606 if (VN_ISDIR(src_vp)) 2593 2607 return XFS_ERROR(EPERM); 2594 2608 2595 - src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops); 2596 - sip = XFS_BHVTOI(src_bdp); 2609 + sip = xfs_vtoi(src_vp); 2597 2610 tdp = XFS_BHVTOI(target_dir_bdp); 2598 2611 mp = tdp->i_mount; 2599 2612 if (XFS_FORCED_SHUTDOWN(mp)) ··· 3225 3240 xfs_trans_t *tp = NULL; 3226 3241 int error = 0; 3227 3242 uint lock_mode; 3228 - xfs_off_t start_offset; 3229 3243 3230 3244 vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__, 3231 3245 (inst_t *)__return_address); ··· 3235 3251 } 3236 3252 3237 3253 lock_mode = xfs_ilock_map_shared(dp); 3238 - start_offset = uiop->uio_offset; 3239 3254 error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp); 3240 - if (start_offset != uiop->uio_offset) { 3241 - xfs_ichgtime(dp, XFS_ICHGTIME_ACC); 3242 - } 3243 3255 xfs_iunlock_map_shared(dp, lock_mode); 3244 3256 return error; 3245 3257 } ··· 3812 3832 vn_iowait(vp); 3813 3833 3814 3834 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); 3815 - ASSERT(VN_CACHED(vp) == 0); 3835 + 3836 + /* 3837 + * Make sure the atime in the XFS inode is correct before freeing the 3838 + * Linux inode. 3839 + */ 3840 + xfs_synchronize_atime(ip); 3816 3841 3817 3842 /* If we have nothing to flush with this inode then complete the 3818 3843 * teardown now, otherwise break the link between the xfs inode ··· 3987 4002 int alloc_type, 3988 4003 int attr_flags) 3989 4004 { 4005 + xfs_mount_t *mp = ip->i_mount; 4006 + xfs_off_t count; 3990 4007 xfs_filblks_t allocated_fsb; 3991 4008 xfs_filblks_t allocatesize_fsb; 3992 - int committed; 3993 - xfs_off_t count; 3994 - xfs_filblks_t datablocks; 3995 - int error; 3996 - xfs_fsblock_t firstfsb; 3997 - xfs_bmap_free_t free_list; 3998 - xfs_bmbt_irec_t *imapp; 3999 - xfs_bmbt_irec_t imaps[1]; 4000 - xfs_mount_t *mp; 4001 - int numrtextents; 4002 - int reccount; 4003 - uint resblks; 4004 - int rt; 4005 - int rtextsize; 4009 + xfs_extlen_t extsz, temp; 4006 4010 xfs_fileoff_t startoffset_fsb; 4011 + xfs_fsblock_t firstfsb; 4012 + int nimaps; 4013 + int bmapi_flag; 4014 + int quota_flag; 4015 + int rt; 4007 4016 xfs_trans_t *tp; 4008 - int xfs_bmapi_flags; 4017 + xfs_bmbt_irec_t imaps[1], *imapp; 4018 + xfs_bmap_free_t free_list; 4019 + uint qblocks, resblks, resrtextents; 4020 + int committed; 4021 + int error; 4009 4022 4010 4023 vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); 4011 - mp = ip->i_mount; 4012 4024 4013 4025 if (XFS_FORCED_SHUTDOWN(mp)) 4014 4026 return XFS_ERROR(EIO); 4015 4027 4016 - /* 4017 - * determine if this is a realtime file 4018 - */ 4019 - if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) { 4020 - if (ip->i_d.di_extsize) 4021 - rtextsize = ip->i_d.di_extsize; 4022 - else 4023 - rtextsize = mp->m_sb.sb_rextsize; 4024 - } else 4025 - rtextsize = 0; 4028 + rt = XFS_IS_REALTIME_INODE(ip); 4029 + if (unlikely(rt)) { 4030 + if (!(extsz = ip->i_d.di_extsize)) 4031 + extsz = mp->m_sb.sb_rextsize; 4032 + } else { 4033 + extsz = ip->i_d.di_extsize; 4034 + } 4026 4035 4027 4036 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 4028 4037 return error; ··· 4027 4048 count = len; 4028 4049 error = 0; 4029 4050 imapp = &imaps[0]; 4030 - reccount = 1; 4031 - xfs_bmapi_flags = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); 4051 + nimaps = 1; 4052 + bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); 4032 4053 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 4033 4054 allocatesize_fsb = XFS_B_TO_FSB(mp, count); 4034 4055 ··· 4049 4070 } 4050 4071 4051 4072 /* 4052 - * allocate file space until done or until there is an error 4073 + * Allocate file space until done or until there is an error 4053 4074 */ 4054 4075 retry: 4055 4076 while (allocatesize_fsb && !error) { 4056 - /* 4057 - * determine if reserving space on 4058 - * the data or realtime partition. 4059 - */ 4060 - if (rt) { 4061 - xfs_fileoff_t s, e; 4077 + xfs_fileoff_t s, e; 4062 4078 4079 + /* 4080 + * Determine space reservations for data/realtime. 4081 + */ 4082 + if (unlikely(extsz)) { 4063 4083 s = startoffset_fsb; 4064 - do_div(s, rtextsize); 4065 - s *= rtextsize; 4066 - e = roundup_64(startoffset_fsb + allocatesize_fsb, 4067 - rtextsize); 4068 - numrtextents = (int)(e - s) / mp->m_sb.sb_rextsize; 4069 - datablocks = 0; 4084 + do_div(s, extsz); 4085 + s *= extsz; 4086 + e = startoffset_fsb + allocatesize_fsb; 4087 + if ((temp = do_mod(startoffset_fsb, extsz))) 4088 + e += temp; 4089 + if ((temp = do_mod(e, extsz))) 4090 + e += extsz - temp; 4070 4091 } else { 4071 - datablocks = allocatesize_fsb; 4072 - numrtextents = 0; 4092 + s = 0; 4093 + e = allocatesize_fsb; 4094 + } 4095 + 4096 + if (unlikely(rt)) { 4097 + resrtextents = qblocks = (uint)(e - s); 4098 + resrtextents /= mp->m_sb.sb_rextsize; 4099 + resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 4100 + quota_flag = XFS_QMOPT_RES_RTBLKS; 4101 + } else { 4102 + resrtextents = 0; 4103 + resblks = qblocks = \ 4104 + XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s)); 4105 + quota_flag = XFS_QMOPT_RES_REGBLKS; 4073 4106 } 4074 4107 4075 4108 /* 4076 - * allocate and setup the transaction 4109 + * Allocate and setup the transaction. 4077 4110 */ 4078 4111 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 4079 - resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks); 4080 - error = xfs_trans_reserve(tp, 4081 - resblks, 4082 - XFS_WRITE_LOG_RES(mp), 4083 - numrtextents, 4112 + error = xfs_trans_reserve(tp, resblks, 4113 + XFS_WRITE_LOG_RES(mp), resrtextents, 4084 4114 XFS_TRANS_PERM_LOG_RES, 4085 4115 XFS_WRITE_LOG_COUNT); 4086 - 4087 4116 /* 4088 - * check for running out of space 4117 + * Check for running out of space 4089 4118 */ 4090 4119 if (error) { 4091 4120 /* ··· 4104 4117 break; 4105 4118 } 4106 4119 xfs_ilock(ip, XFS_ILOCK_EXCL); 4107 - error = XFS_TRANS_RESERVE_QUOTA(mp, tp, 4108 - ip->i_udquot, ip->i_gdquot, resblks, 0, 0); 4120 + error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, 4121 + qblocks, 0, quota_flag); 4109 4122 if (error) 4110 4123 goto error1; 4111 4124 ··· 4113 4126 xfs_trans_ihold(tp, ip); 4114 4127 4115 4128 /* 4116 - * issue the bmapi() call to allocate the blocks 4129 + * Issue the xfs_bmapi() call to allocate the blocks 4117 4130 */ 4118 4131 XFS_BMAP_INIT(&free_list, &firstfsb); 4119 4132 error = xfs_bmapi(tp, ip, startoffset_fsb, 4120 - allocatesize_fsb, xfs_bmapi_flags, 4121 - &firstfsb, 0, imapp, &reccount, 4133 + allocatesize_fsb, bmapi_flag, 4134 + &firstfsb, 0, imapp, &nimaps, 4122 4135 &free_list); 4123 4136 if (error) { 4124 4137 goto error0; 4125 4138 } 4126 4139 4127 4140 /* 4128 - * complete the transaction 4141 + * Complete the transaction 4129 4142 */ 4130 4143 error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); 4131 4144 if (error) { ··· 4140 4153 4141 4154 allocated_fsb = imapp->br_blockcount; 4142 4155 4143 - if (reccount == 0) { 4156 + if (nimaps == 0) { 4144 4157 error = XFS_ERROR(ENOSPC); 4145 4158 break; 4146 4159 } ··· 4163 4176 4164 4177 return error; 4165 4178 4166 - error0: 4179 + error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ 4167 4180 xfs_bmap_cancel(&free_list); 4168 - error1: 4181 + XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag); 4182 + 4183 + error1: /* Just cancel transaction */ 4169 4184 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 4170 4185 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4171 4186 goto dmapi_enospc_check; ··· 4412 4423 } 4413 4424 xfs_ilock(ip, XFS_ILOCK_EXCL); 4414 4425 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, 4415 - ip->i_udquot, ip->i_gdquot, resblks, 0, rt ? 4416 - XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4426 + ip->i_udquot, ip->i_gdquot, resblks, 0, 4427 + XFS_QMOPT_RES_REGBLKS); 4417 4428 if (error) 4418 4429 goto error1; 4419 4430
+2
mm/swap.c
··· 384 384 return pagevec_count(pvec); 385 385 } 386 386 387 + EXPORT_SYMBOL(pagevec_lookup); 388 + 387 389 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 388 390 pgoff_t *index, int tag, unsigned nr_pages) 389 391 {