Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.15-rc4 3575 lines 107 kB view raw
1/* 2 * linux/fs/ext3/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 23 */ 24 25#include <linux/highuid.h> 26#include <linux/quotaops.h> 27#include <linux/writeback.h> 28#include <linux/mpage.h> 29#include <linux/namei.h> 30#include <linux/aio.h> 31#include "ext3.h" 32#include "xattr.h" 33#include "acl.h" 34 35static int ext3_writepage_trans_blocks(struct inode *inode); 36static int ext3_block_truncate_page(struct inode *inode, loff_t from); 37 38/* 39 * Test whether an inode is a fast symlink. 40 */ 41static int ext3_inode_is_fast_symlink(struct inode *inode) 42{ 43 int ea_blocks = EXT3_I(inode)->i_file_acl ? 44 (inode->i_sb->s_blocksize >> 9) : 0; 45 46 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 47} 48 49/* 50 * The ext3 forget function must perform a revoke if we are freeing data 51 * which has been journaled. Metadata (eg. indirect blocks) must be 52 * revoked in all cases. 53 * 54 * "bh" may be NULL: a metadata block may have been freed from memory 55 * but there may still be a record of it in the journal, and that record 56 * still needs to be revoked. 57 */ 58int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, 59 struct buffer_head *bh, ext3_fsblk_t blocknr) 60{ 61 int err; 62 63 might_sleep(); 64 65 trace_ext3_forget(inode, is_metadata, blocknr); 66 BUFFER_TRACE(bh, "enter"); 67 68 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 69 "data mode %lx\n", 70 bh, is_metadata, inode->i_mode, 71 test_opt(inode->i_sb, DATA_FLAGS)); 72 73 /* Never use the revoke function if we are doing full data 74 * journaling: there is no need to, and a V1 superblock won't 75 * support it. Otherwise, only skip the revoke on un-journaled 76 * data blocks. */ 77 78 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA || 79 (!is_metadata && !ext3_should_journal_data(inode))) { 80 if (bh) { 81 BUFFER_TRACE(bh, "call journal_forget"); 82 return ext3_journal_forget(handle, bh); 83 } 84 return 0; 85 } 86 87 /* 88 * data!=journal && (is_metadata || should_journal_data(inode)) 89 */ 90 BUFFER_TRACE(bh, "call ext3_journal_revoke"); 91 err = ext3_journal_revoke(handle, blocknr, bh); 92 if (err) 93 ext3_abort(inode->i_sb, __func__, 94 "error %d when attempting revoke", err); 95 BUFFER_TRACE(bh, "exit"); 96 return err; 97} 98 99/* 100 * Work out how many blocks we need to proceed with the next chunk of a 101 * truncate transaction. 102 */ 103static unsigned long blocks_for_truncate(struct inode *inode) 104{ 105 unsigned long needed; 106 107 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 108 109 /* Give ourselves just enough room to cope with inodes in which 110 * i_blocks is corrupt: we've seen disk corruptions in the past 111 * which resulted in random data in an inode which looked enough 112 * like a regular file for ext3 to try to delete it. Things 113 * will go a bit crazy if that happens, but at least we should 114 * try not to panic the whole kernel. */ 115 if (needed < 2) 116 needed = 2; 117 118 /* But we need to bound the transaction so we don't overflow the 119 * journal. */ 120 if (needed > EXT3_MAX_TRANS_DATA) 121 needed = EXT3_MAX_TRANS_DATA; 122 123 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 124} 125 126/* 127 * Truncate transactions can be complex and absolutely huge. So we need to 128 * be able to restart the transaction at a conventient checkpoint to make 129 * sure we don't overflow the journal. 130 * 131 * start_transaction gets us a new handle for a truncate transaction, 132 * and extend_transaction tries to extend the existing one a bit. If 133 * extend fails, we need to propagate the failure up and restart the 134 * transaction in the top-level truncate loop. --sct 135 */ 136static handle_t *start_transaction(struct inode *inode) 137{ 138 handle_t *result; 139 140 result = ext3_journal_start(inode, blocks_for_truncate(inode)); 141 if (!IS_ERR(result)) 142 return result; 143 144 ext3_std_error(inode->i_sb, PTR_ERR(result)); 145 return result; 146} 147 148/* 149 * Try to extend this transaction for the purposes of truncation. 150 * 151 * Returns 0 if we managed to create more room. If we can't create more 152 * room, and the transaction must be restarted we return 1. 153 */ 154static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 155{ 156 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS) 157 return 0; 158 if (!ext3_journal_extend(handle, blocks_for_truncate(inode))) 159 return 0; 160 return 1; 161} 162 163/* 164 * Restart the transaction associated with *handle. This does a commit, 165 * so before we call here everything must be consistently dirtied against 166 * this transaction. 167 */ 168static int truncate_restart_transaction(handle_t *handle, struct inode *inode) 169{ 170 int ret; 171 172 jbd_debug(2, "restarting handle %p\n", handle); 173 /* 174 * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle 175 * At this moment, get_block can be called only for blocks inside 176 * i_size since page cache has been already dropped and writes are 177 * blocked by i_mutex. So we can safely drop the truncate_mutex. 178 */ 179 mutex_unlock(&EXT3_I(inode)->truncate_mutex); 180 ret = ext3_journal_restart(handle, blocks_for_truncate(inode)); 181 mutex_lock(&EXT3_I(inode)->truncate_mutex); 182 return ret; 183} 184 185/* 186 * Called at inode eviction from icache 187 */ 188void ext3_evict_inode (struct inode *inode) 189{ 190 struct ext3_inode_info *ei = EXT3_I(inode); 191 struct ext3_block_alloc_info *rsv; 192 handle_t *handle; 193 int want_delete = 0; 194 195 trace_ext3_evict_inode(inode); 196 if (!inode->i_nlink && !is_bad_inode(inode)) { 197 dquot_initialize(inode); 198 want_delete = 1; 199 } 200 201 /* 202 * When journalling data dirty buffers are tracked only in the journal. 203 * So although mm thinks everything is clean and ready for reaping the 204 * inode might still have some pages to write in the running 205 * transaction or waiting to be checkpointed. Thus calling 206 * journal_invalidatepage() (via truncate_inode_pages()) to discard 207 * these buffers can cause data loss. Also even if we did not discard 208 * these buffers, we would have no way to find them after the inode 209 * is reaped and thus user could see stale data if he tries to read 210 * them before the transaction is checkpointed. So be careful and 211 * force everything to disk here... We use ei->i_datasync_tid to 212 * store the newest transaction containing inode's data. 213 * 214 * Note that directories do not have this problem because they don't 215 * use page cache. 216 * 217 * The s_journal check handles the case when ext3_get_journal() fails 218 * and puts the journal inode. 219 */ 220 if (inode->i_nlink && ext3_should_journal_data(inode) && 221 EXT3_SB(inode->i_sb)->s_journal && 222 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && 223 inode->i_ino != EXT3_JOURNAL_INO) { 224 tid_t commit_tid = atomic_read(&ei->i_datasync_tid); 225 journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; 226 227 log_start_commit(journal, commit_tid); 228 log_wait_commit(journal, commit_tid); 229 filemap_write_and_wait(&inode->i_data); 230 } 231 truncate_inode_pages_final(&inode->i_data); 232 233 ext3_discard_reservation(inode); 234 rsv = ei->i_block_alloc_info; 235 ei->i_block_alloc_info = NULL; 236 if (unlikely(rsv)) 237 kfree(rsv); 238 239 if (!want_delete) 240 goto no_delete; 241 242 handle = start_transaction(inode); 243 if (IS_ERR(handle)) { 244 /* 245 * If we're going to skip the normal cleanup, we still need to 246 * make sure that the in-core orphan linked list is properly 247 * cleaned up. 248 */ 249 ext3_orphan_del(NULL, inode); 250 goto no_delete; 251 } 252 253 if (IS_SYNC(inode)) 254 handle->h_sync = 1; 255 inode->i_size = 0; 256 if (inode->i_blocks) 257 ext3_truncate(inode); 258 /* 259 * Kill off the orphan record created when the inode lost the last 260 * link. Note that ext3_orphan_del() has to be able to cope with the 261 * deletion of a non-existent orphan - ext3_truncate() could 262 * have removed the record. 263 */ 264 ext3_orphan_del(handle, inode); 265 ei->i_dtime = get_seconds(); 266 267 /* 268 * One subtle ordering requirement: if anything has gone wrong 269 * (transaction abort, IO errors, whatever), then we can still 270 * do these next steps (the fs will already have been marked as 271 * having errors), but we can't free the inode if the mark_dirty 272 * fails. 273 */ 274 if (ext3_mark_inode_dirty(handle, inode)) { 275 /* If that failed, just dquot_drop() and be done with that */ 276 dquot_drop(inode); 277 clear_inode(inode); 278 } else { 279 ext3_xattr_delete_inode(handle, inode); 280 dquot_free_inode(inode); 281 dquot_drop(inode); 282 clear_inode(inode); 283 ext3_free_inode(handle, inode); 284 } 285 ext3_journal_stop(handle); 286 return; 287no_delete: 288 clear_inode(inode); 289 dquot_drop(inode); 290} 291 292typedef struct { 293 __le32 *p; 294 __le32 key; 295 struct buffer_head *bh; 296} Indirect; 297 298static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 299{ 300 p->key = *(p->p = v); 301 p->bh = bh; 302} 303 304static int verify_chain(Indirect *from, Indirect *to) 305{ 306 while (from <= to && from->key == *from->p) 307 from++; 308 return (from > to); 309} 310 311/** 312 * ext3_block_to_path - parse the block number into array of offsets 313 * @inode: inode in question (we are only interested in its superblock) 314 * @i_block: block number to be parsed 315 * @offsets: array to store the offsets in 316 * @boundary: set this non-zero if the referred-to block is likely to be 317 * followed (on disk) by an indirect block. 318 * 319 * To store the locations of file's data ext3 uses a data structure common 320 * for UNIX filesystems - tree of pointers anchored in the inode, with 321 * data blocks at leaves and indirect blocks in intermediate nodes. 322 * This function translates the block number into path in that tree - 323 * return value is the path length and @offsets[n] is the offset of 324 * pointer to (n+1)th node in the nth one. If @block is out of range 325 * (negative or too large) warning is printed and zero returned. 326 * 327 * Note: function doesn't find node addresses, so no IO is needed. All 328 * we need to know is the capacity of indirect blocks (taken from the 329 * inode->i_sb). 330 */ 331 332/* 333 * Portability note: the last comparison (check that we fit into triple 334 * indirect block) is spelled differently, because otherwise on an 335 * architecture with 32-bit longs and 8Kb pages we might get into trouble 336 * if our filesystem had 8Kb blocks. We might use long long, but that would 337 * kill us on x86. Oh, well, at least the sign propagation does not matter - 338 * i_block would have to be negative in the very beginning, so we would not 339 * get there at all. 340 */ 341 342static int ext3_block_to_path(struct inode *inode, 343 long i_block, int offsets[4], int *boundary) 344{ 345 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb); 346 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb); 347 const long direct_blocks = EXT3_NDIR_BLOCKS, 348 indirect_blocks = ptrs, 349 double_blocks = (1 << (ptrs_bits * 2)); 350 int n = 0; 351 int final = 0; 352 353 if (i_block < 0) { 354 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0"); 355 } else if (i_block < direct_blocks) { 356 offsets[n++] = i_block; 357 final = direct_blocks; 358 } else if ( (i_block -= direct_blocks) < indirect_blocks) { 359 offsets[n++] = EXT3_IND_BLOCK; 360 offsets[n++] = i_block; 361 final = ptrs; 362 } else if ((i_block -= indirect_blocks) < double_blocks) { 363 offsets[n++] = EXT3_DIND_BLOCK; 364 offsets[n++] = i_block >> ptrs_bits; 365 offsets[n++] = i_block & (ptrs - 1); 366 final = ptrs; 367 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 368 offsets[n++] = EXT3_TIND_BLOCK; 369 offsets[n++] = i_block >> (ptrs_bits * 2); 370 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 371 offsets[n++] = i_block & (ptrs - 1); 372 final = ptrs; 373 } else { 374 ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big"); 375 } 376 if (boundary) 377 *boundary = final - 1 - (i_block & (ptrs - 1)); 378 return n; 379} 380 381/** 382 * ext3_get_branch - read the chain of indirect blocks leading to data 383 * @inode: inode in question 384 * @depth: depth of the chain (1 - direct pointer, etc.) 385 * @offsets: offsets of pointers in inode/indirect blocks 386 * @chain: place to store the result 387 * @err: here we store the error value 388 * 389 * Function fills the array of triples <key, p, bh> and returns %NULL 390 * if everything went OK or the pointer to the last filled triple 391 * (incomplete one) otherwise. Upon the return chain[i].key contains 392 * the number of (i+1)-th block in the chain (as it is stored in memory, 393 * i.e. little-endian 32-bit), chain[i].p contains the address of that 394 * number (it points into struct inode for i==0 and into the bh->b_data 395 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 396 * block for i>0 and NULL for i==0. In other words, it holds the block 397 * numbers of the chain, addresses they were taken from (and where we can 398 * verify that chain did not change) and buffer_heads hosting these 399 * numbers. 400 * 401 * Function stops when it stumbles upon zero pointer (absent block) 402 * (pointer to last triple returned, *@err == 0) 403 * or when it gets an IO error reading an indirect block 404 * (ditto, *@err == -EIO) 405 * or when it notices that chain had been changed while it was reading 406 * (ditto, *@err == -EAGAIN) 407 * or when it reads all @depth-1 indirect blocks successfully and finds 408 * the whole chain, all way to the data (returns %NULL, *err == 0). 409 */ 410static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets, 411 Indirect chain[4], int *err) 412{ 413 struct super_block *sb = inode->i_sb; 414 Indirect *p = chain; 415 struct buffer_head *bh; 416 417 *err = 0; 418 /* i_data is not going away, no lock needed */ 419 add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets); 420 if (!p->key) 421 goto no_block; 422 while (--depth) { 423 bh = sb_bread(sb, le32_to_cpu(p->key)); 424 if (!bh) 425 goto failure; 426 /* Reader: pointers */ 427 if (!verify_chain(chain, p)) 428 goto changed; 429 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets); 430 /* Reader: end */ 431 if (!p->key) 432 goto no_block; 433 } 434 return NULL; 435 436changed: 437 brelse(bh); 438 *err = -EAGAIN; 439 goto no_block; 440failure: 441 *err = -EIO; 442no_block: 443 return p; 444} 445 446/** 447 * ext3_find_near - find a place for allocation with sufficient locality 448 * @inode: owner 449 * @ind: descriptor of indirect block. 450 * 451 * This function returns the preferred place for block allocation. 452 * It is used when heuristic for sequential allocation fails. 453 * Rules are: 454 * + if there is a block to the left of our position - allocate near it. 455 * + if pointer will live in indirect block - allocate near that block. 456 * + if pointer will live in inode - allocate in the same 457 * cylinder group. 458 * 459 * In the latter case we colour the starting block by the callers PID to 460 * prevent it from clashing with concurrent allocations for a different inode 461 * in the same block group. The PID is used here so that functionally related 462 * files will be close-by on-disk. 463 * 464 * Caller must make sure that @ind is valid and will stay that way. 465 */ 466static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind) 467{ 468 struct ext3_inode_info *ei = EXT3_I(inode); 469 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; 470 __le32 *p; 471 ext3_fsblk_t bg_start; 472 ext3_grpblk_t colour; 473 474 /* Try to find previous block */ 475 for (p = ind->p - 1; p >= start; p--) { 476 if (*p) 477 return le32_to_cpu(*p); 478 } 479 480 /* No such thing, so let's try location of indirect block */ 481 if (ind->bh) 482 return ind->bh->b_blocknr; 483 484 /* 485 * It is going to be referred to from the inode itself? OK, just put it 486 * into the same cylinder group then. 487 */ 488 bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group); 489 colour = (current->pid % 16) * 490 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16); 491 return bg_start + colour; 492} 493 494/** 495 * ext3_find_goal - find a preferred place for allocation. 496 * @inode: owner 497 * @block: block we want 498 * @partial: pointer to the last triple within a chain 499 * 500 * Normally this function find the preferred place for block allocation, 501 * returns it. 502 */ 503 504static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, 505 Indirect *partial) 506{ 507 struct ext3_block_alloc_info *block_i; 508 509 block_i = EXT3_I(inode)->i_block_alloc_info; 510 511 /* 512 * try the heuristic for sequential allocation, 513 * failing that at least try to get decent locality. 514 */ 515 if (block_i && (block == block_i->last_alloc_logical_block + 1) 516 && (block_i->last_alloc_physical_block != 0)) { 517 return block_i->last_alloc_physical_block + 1; 518 } 519 520 return ext3_find_near(inode, partial); 521} 522 523/** 524 * ext3_blks_to_allocate - Look up the block map and count the number 525 * of direct blocks need to be allocated for the given branch. 526 * 527 * @branch: chain of indirect blocks 528 * @k: number of blocks need for indirect blocks 529 * @blks: number of data blocks to be mapped. 530 * @blocks_to_boundary: the offset in the indirect block 531 * 532 * return the total number of blocks to be allocate, including the 533 * direct and indirect blocks. 534 */ 535static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, 536 int blocks_to_boundary) 537{ 538 unsigned long count = 0; 539 540 /* 541 * Simple case, [t,d]Indirect block(s) has not allocated yet 542 * then it's clear blocks on that path have not allocated 543 */ 544 if (k > 0) { 545 /* right now we don't handle cross boundary allocation */ 546 if (blks < blocks_to_boundary + 1) 547 count += blks; 548 else 549 count += blocks_to_boundary + 1; 550 return count; 551 } 552 553 count++; 554 while (count < blks && count <= blocks_to_boundary && 555 le32_to_cpu(*(branch[0].p + count)) == 0) { 556 count++; 557 } 558 return count; 559} 560 561/** 562 * ext3_alloc_blocks - multiple allocate blocks needed for a branch 563 * @handle: handle for this transaction 564 * @inode: owner 565 * @goal: preferred place for allocation 566 * @indirect_blks: the number of blocks need to allocate for indirect 567 * blocks 568 * @blks: number of blocks need to allocated for direct blocks 569 * @new_blocks: on return it will store the new block numbers for 570 * the indirect blocks(if needed) and the first direct block, 571 * @err: here we store the error value 572 * 573 * return the number of direct blocks allocated 574 */ 575static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, 576 ext3_fsblk_t goal, int indirect_blks, int blks, 577 ext3_fsblk_t new_blocks[4], int *err) 578{ 579 int target, i; 580 unsigned long count = 0; 581 int index = 0; 582 ext3_fsblk_t current_block = 0; 583 int ret = 0; 584 585 /* 586 * Here we try to allocate the requested multiple blocks at once, 587 * on a best-effort basis. 588 * To build a branch, we should allocate blocks for 589 * the indirect blocks(if not allocated yet), and at least 590 * the first direct block of this branch. That's the 591 * minimum number of blocks need to allocate(required) 592 */ 593 target = blks + indirect_blks; 594 595 while (1) { 596 count = target; 597 /* allocating blocks for indirect blocks and direct blocks */ 598 current_block = ext3_new_blocks(handle,inode,goal,&count,err); 599 if (*err) 600 goto failed_out; 601 602 target -= count; 603 /* allocate blocks for indirect blocks */ 604 while (index < indirect_blks && count) { 605 new_blocks[index++] = current_block++; 606 count--; 607 } 608 609 if (count > 0) 610 break; 611 } 612 613 /* save the new block number for the first direct block */ 614 new_blocks[index] = current_block; 615 616 /* total number of blocks allocated for direct blocks */ 617 ret = count; 618 *err = 0; 619 return ret; 620failed_out: 621 for (i = 0; i <index; i++) 622 ext3_free_blocks(handle, inode, new_blocks[i], 1); 623 return ret; 624} 625 626/** 627 * ext3_alloc_branch - allocate and set up a chain of blocks. 628 * @handle: handle for this transaction 629 * @inode: owner 630 * @indirect_blks: number of allocated indirect blocks 631 * @blks: number of allocated direct blocks 632 * @goal: preferred place for allocation 633 * @offsets: offsets (in the blocks) to store the pointers to next. 634 * @branch: place to store the chain in. 635 * 636 * This function allocates blocks, zeroes out all but the last one, 637 * links them into chain and (if we are synchronous) writes them to disk. 638 * In other words, it prepares a branch that can be spliced onto the 639 * inode. It stores the information about that chain in the branch[], in 640 * the same format as ext3_get_branch() would do. We are calling it after 641 * we had read the existing part of chain and partial points to the last 642 * triple of that (one with zero ->key). Upon the exit we have the same 643 * picture as after the successful ext3_get_block(), except that in one 644 * place chain is disconnected - *branch->p is still zero (we did not 645 * set the last link), but branch->key contains the number that should 646 * be placed into *branch->p to fill that gap. 647 * 648 * If allocation fails we free all blocks we've allocated (and forget 649 * their buffer_heads) and return the error value the from failed 650 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain 651 * as described above and return 0. 652 */ 653static int ext3_alloc_branch(handle_t *handle, struct inode *inode, 654 int indirect_blks, int *blks, ext3_fsblk_t goal, 655 int *offsets, Indirect *branch) 656{ 657 int blocksize = inode->i_sb->s_blocksize; 658 int i, n = 0; 659 int err = 0; 660 struct buffer_head *bh; 661 int num; 662 ext3_fsblk_t new_blocks[4]; 663 ext3_fsblk_t current_block; 664 665 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks, 666 *blks, new_blocks, &err); 667 if (err) 668 return err; 669 670 branch[0].key = cpu_to_le32(new_blocks[0]); 671 /* 672 * metadata blocks and data blocks are allocated. 673 */ 674 for (n = 1; n <= indirect_blks; n++) { 675 /* 676 * Get buffer_head for parent block, zero it out 677 * and set the pointer to new one, then send 678 * parent to disk. 679 */ 680 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 681 if (unlikely(!bh)) { 682 err = -ENOMEM; 683 goto failed; 684 } 685 branch[n].bh = bh; 686 lock_buffer(bh); 687 BUFFER_TRACE(bh, "call get_create_access"); 688 err = ext3_journal_get_create_access(handle, bh); 689 if (err) { 690 unlock_buffer(bh); 691 brelse(bh); 692 goto failed; 693 } 694 695 memset(bh->b_data, 0, blocksize); 696 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 697 branch[n].key = cpu_to_le32(new_blocks[n]); 698 *branch[n].p = branch[n].key; 699 if ( n == indirect_blks) { 700 current_block = new_blocks[n]; 701 /* 702 * End of chain, update the last new metablock of 703 * the chain to point to the new allocated 704 * data blocks numbers 705 */ 706 for (i=1; i < num; i++) 707 *(branch[n].p + i) = cpu_to_le32(++current_block); 708 } 709 BUFFER_TRACE(bh, "marking uptodate"); 710 set_buffer_uptodate(bh); 711 unlock_buffer(bh); 712 713 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 714 err = ext3_journal_dirty_metadata(handle, bh); 715 if (err) 716 goto failed; 717 } 718 *blks = num; 719 return err; 720failed: 721 /* Allocation failed, free what we already allocated */ 722 for (i = 1; i <= n ; i++) { 723 BUFFER_TRACE(branch[i].bh, "call journal_forget"); 724 ext3_journal_forget(handle, branch[i].bh); 725 } 726 for (i = 0; i < indirect_blks; i++) 727 ext3_free_blocks(handle, inode, new_blocks[i], 1); 728 729 ext3_free_blocks(handle, inode, new_blocks[i], num); 730 731 return err; 732} 733 734/** 735 * ext3_splice_branch - splice the allocated branch onto inode. 736 * @handle: handle for this transaction 737 * @inode: owner 738 * @block: (logical) number of block we are adding 739 * @where: location of missing link 740 * @num: number of indirect blocks we are adding 741 * @blks: number of direct blocks we are adding 742 * 743 * This function fills the missing link and does all housekeeping needed in 744 * inode (->i_blocks, etc.). In case of success we end up with the full 745 * chain to new block and return 0. 746 */ 747static int ext3_splice_branch(handle_t *handle, struct inode *inode, 748 long block, Indirect *where, int num, int blks) 749{ 750 int i; 751 int err = 0; 752 struct ext3_block_alloc_info *block_i; 753 ext3_fsblk_t current_block; 754 struct ext3_inode_info *ei = EXT3_I(inode); 755 struct timespec now; 756 757 block_i = ei->i_block_alloc_info; 758 /* 759 * If we're splicing into a [td]indirect block (as opposed to the 760 * inode) then we need to get write access to the [td]indirect block 761 * before the splice. 762 */ 763 if (where->bh) { 764 BUFFER_TRACE(where->bh, "get_write_access"); 765 err = ext3_journal_get_write_access(handle, where->bh); 766 if (err) 767 goto err_out; 768 } 769 /* That's it */ 770 771 *where->p = where->key; 772 773 /* 774 * Update the host buffer_head or inode to point to more just allocated 775 * direct blocks blocks 776 */ 777 if (num == 0 && blks > 1) { 778 current_block = le32_to_cpu(where->key) + 1; 779 for (i = 1; i < blks; i++) 780 *(where->p + i ) = cpu_to_le32(current_block++); 781 } 782 783 /* 784 * update the most recently allocated logical & physical block 785 * in i_block_alloc_info, to assist find the proper goal block for next 786 * allocation 787 */ 788 if (block_i) { 789 block_i->last_alloc_logical_block = block + blks - 1; 790 block_i->last_alloc_physical_block = 791 le32_to_cpu(where[num].key) + blks - 1; 792 } 793 794 /* We are done with atomic stuff, now do the rest of housekeeping */ 795 now = CURRENT_TIME_SEC; 796 if (!timespec_equal(&inode->i_ctime, &now) || !where->bh) { 797 inode->i_ctime = now; 798 ext3_mark_inode_dirty(handle, inode); 799 } 800 /* ext3_mark_inode_dirty already updated i_sync_tid */ 801 atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid); 802 803 /* had we spliced it onto indirect block? */ 804 if (where->bh) { 805 /* 806 * If we spliced it onto an indirect block, we haven't 807 * altered the inode. Note however that if it is being spliced 808 * onto an indirect block at the very end of the file (the 809 * file is growing) then we *will* alter the inode to reflect 810 * the new i_size. But that is not done here - it is done in 811 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode. 812 */ 813 jbd_debug(5, "splicing indirect only\n"); 814 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); 815 err = ext3_journal_dirty_metadata(handle, where->bh); 816 if (err) 817 goto err_out; 818 } else { 819 /* 820 * OK, we spliced it into the inode itself on a direct block. 821 * Inode was dirtied above. 822 */ 823 jbd_debug(5, "splicing direct\n"); 824 } 825 return err; 826 827err_out: 828 for (i = 1; i <= num; i++) { 829 BUFFER_TRACE(where[i].bh, "call journal_forget"); 830 ext3_journal_forget(handle, where[i].bh); 831 ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1); 832 } 833 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); 834 835 return err; 836} 837 838/* 839 * Allocation strategy is simple: if we have to allocate something, we will 840 * have to go the whole way to leaf. So let's do it before attaching anything 841 * to tree, set linkage between the newborn blocks, write them if sync is 842 * required, recheck the path, free and repeat if check fails, otherwise 843 * set the last missing link (that will protect us from any truncate-generated 844 * removals - all blocks on the path are immune now) and possibly force the 845 * write on the parent block. 846 * That has a nice additional property: no special recovery from the failed 847 * allocations is needed - we simply release blocks and do not touch anything 848 * reachable from inode. 849 * 850 * `handle' can be NULL if create == 0. 851 * 852 * The BKL may not be held on entry here. Be sure to take it early. 853 * return > 0, # of blocks mapped or allocated. 854 * return = 0, if plain lookup failed. 855 * return < 0, error case. 856 */ 857int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, 858 sector_t iblock, unsigned long maxblocks, 859 struct buffer_head *bh_result, 860 int create) 861{ 862 int err = -EIO; 863 int offsets[4]; 864 Indirect chain[4]; 865 Indirect *partial; 866 ext3_fsblk_t goal; 867 int indirect_blks; 868 int blocks_to_boundary = 0; 869 int depth; 870 struct ext3_inode_info *ei = EXT3_I(inode); 871 int count = 0; 872 ext3_fsblk_t first_block = 0; 873 874 875 trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create); 876 J_ASSERT(handle != NULL || create == 0); 877 depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); 878 879 if (depth == 0) 880 goto out; 881 882 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 883 884 /* Simplest case - block found, no allocation needed */ 885 if (!partial) { 886 first_block = le32_to_cpu(chain[depth - 1].key); 887 clear_buffer_new(bh_result); 888 count++; 889 /*map more blocks*/ 890 while (count < maxblocks && count <= blocks_to_boundary) { 891 ext3_fsblk_t blk; 892 893 if (!verify_chain(chain, chain + depth - 1)) { 894 /* 895 * Indirect block might be removed by 896 * truncate while we were reading it. 897 * Handling of that case: forget what we've 898 * got now. Flag the err as EAGAIN, so it 899 * will reread. 900 */ 901 err = -EAGAIN; 902 count = 0; 903 break; 904 } 905 blk = le32_to_cpu(*(chain[depth-1].p + count)); 906 907 if (blk == first_block + count) 908 count++; 909 else 910 break; 911 } 912 if (err != -EAGAIN) 913 goto got_it; 914 } 915 916 /* Next simple case - plain lookup or failed read of indirect block */ 917 if (!create || err == -EIO) 918 goto cleanup; 919 920 /* 921 * Block out ext3_truncate while we alter the tree 922 */ 923 mutex_lock(&ei->truncate_mutex); 924 925 /* 926 * If the indirect block is missing while we are reading 927 * the chain(ext3_get_branch() returns -EAGAIN err), or 928 * if the chain has been changed after we grab the semaphore, 929 * (either because another process truncated this branch, or 930 * another get_block allocated this branch) re-grab the chain to see if 931 * the request block has been allocated or not. 932 * 933 * Since we already block the truncate/other get_block 934 * at this point, we will have the current copy of the chain when we 935 * splice the branch into the tree. 936 */ 937 if (err == -EAGAIN || !verify_chain(chain, partial)) { 938 while (partial > chain) { 939 brelse(partial->bh); 940 partial--; 941 } 942 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 943 if (!partial) { 944 count++; 945 mutex_unlock(&ei->truncate_mutex); 946 if (err) 947 goto cleanup; 948 clear_buffer_new(bh_result); 949 goto got_it; 950 } 951 } 952 953 /* 954 * Okay, we need to do block allocation. Lazily initialize the block 955 * allocation info here if necessary 956 */ 957 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) 958 ext3_init_block_alloc_info(inode); 959 960 goal = ext3_find_goal(inode, iblock, partial); 961 962 /* the number of blocks need to allocate for [d,t]indirect blocks */ 963 indirect_blks = (chain + depth) - partial - 1; 964 965 /* 966 * Next look up the indirect map to count the totoal number of 967 * direct blocks to allocate for this branch. 968 */ 969 count = ext3_blks_to_allocate(partial, indirect_blks, 970 maxblocks, blocks_to_boundary); 971 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal, 972 offsets + (partial - chain), partial); 973 974 /* 975 * The ext3_splice_branch call will free and forget any buffers 976 * on the new chain if there is a failure, but that risks using 977 * up transaction credits, especially for bitmaps where the 978 * credits cannot be returned. Can we handle this somehow? We 979 * may need to return -EAGAIN upwards in the worst case. --sct 980 */ 981 if (!err) 982 err = ext3_splice_branch(handle, inode, iblock, 983 partial, indirect_blks, count); 984 mutex_unlock(&ei->truncate_mutex); 985 if (err) 986 goto cleanup; 987 988 set_buffer_new(bh_result); 989got_it: 990 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 991 if (count > blocks_to_boundary) 992 set_buffer_boundary(bh_result); 993 err = count; 994 /* Clean up and exit */ 995 partial = chain + depth - 1; /* the whole chain */ 996cleanup: 997 while (partial > chain) { 998 BUFFER_TRACE(partial->bh, "call brelse"); 999 brelse(partial->bh); 1000 partial--; 1001 } 1002 BUFFER_TRACE(bh_result, "returned"); 1003out: 1004 trace_ext3_get_blocks_exit(inode, iblock, 1005 depth ? le32_to_cpu(chain[depth-1].key) : 0, 1006 count, err); 1007 return err; 1008} 1009 1010/* Maximum number of blocks we map for direct IO at once. */ 1011#define DIO_MAX_BLOCKS 4096 1012/* 1013 * Number of credits we need for writing DIO_MAX_BLOCKS: 1014 * We need sb + group descriptor + bitmap + inode -> 4 1015 * For B blocks with A block pointers per block we need: 1016 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect). 1017 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25. 1018 */ 1019#define DIO_CREDITS 25 1020 1021static int ext3_get_block(struct inode *inode, sector_t iblock, 1022 struct buffer_head *bh_result, int create) 1023{ 1024 handle_t *handle = ext3_journal_current_handle(); 1025 int ret = 0, started = 0; 1026 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1027 1028 if (create && !handle) { /* Direct IO write... */ 1029 if (max_blocks > DIO_MAX_BLOCKS) 1030 max_blocks = DIO_MAX_BLOCKS; 1031 handle = ext3_journal_start(inode, DIO_CREDITS + 1032 EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); 1033 if (IS_ERR(handle)) { 1034 ret = PTR_ERR(handle); 1035 goto out; 1036 } 1037 started = 1; 1038 } 1039 1040 ret = ext3_get_blocks_handle(handle, inode, iblock, 1041 max_blocks, bh_result, create); 1042 if (ret > 0) { 1043 bh_result->b_size = (ret << inode->i_blkbits); 1044 ret = 0; 1045 } 1046 if (started) 1047 ext3_journal_stop(handle); 1048out: 1049 return ret; 1050} 1051 1052int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1053 u64 start, u64 len) 1054{ 1055 return generic_block_fiemap(inode, fieinfo, start, len, 1056 ext3_get_block); 1057} 1058 1059/* 1060 * `handle' can be NULL if create is zero 1061 */ 1062struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, 1063 long block, int create, int *errp) 1064{ 1065 struct buffer_head dummy; 1066 int fatal = 0, err; 1067 1068 J_ASSERT(handle != NULL || create == 0); 1069 1070 dummy.b_state = 0; 1071 dummy.b_blocknr = -1000; 1072 buffer_trace_init(&dummy.b_history); 1073 err = ext3_get_blocks_handle(handle, inode, block, 1, 1074 &dummy, create); 1075 /* 1076 * ext3_get_blocks_handle() returns number of blocks 1077 * mapped. 0 in case of a HOLE. 1078 */ 1079 if (err > 0) { 1080 WARN_ON(err > 1); 1081 err = 0; 1082 } 1083 *errp = err; 1084 if (!err && buffer_mapped(&dummy)) { 1085 struct buffer_head *bh; 1086 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1087 if (unlikely(!bh)) { 1088 *errp = -ENOMEM; 1089 goto err; 1090 } 1091 if (buffer_new(&dummy)) { 1092 J_ASSERT(create != 0); 1093 J_ASSERT(handle != NULL); 1094 1095 /* 1096 * Now that we do not always journal data, we should 1097 * keep in mind whether this should always journal the 1098 * new buffer as metadata. For now, regular file 1099 * writes use ext3_get_block instead, so it's not a 1100 * problem. 1101 */ 1102 lock_buffer(bh); 1103 BUFFER_TRACE(bh, "call get_create_access"); 1104 fatal = ext3_journal_get_create_access(handle, bh); 1105 if (!fatal && !buffer_uptodate(bh)) { 1106 memset(bh->b_data,0,inode->i_sb->s_blocksize); 1107 set_buffer_uptodate(bh); 1108 } 1109 unlock_buffer(bh); 1110 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 1111 err = ext3_journal_dirty_metadata(handle, bh); 1112 if (!fatal) 1113 fatal = err; 1114 } else { 1115 BUFFER_TRACE(bh, "not a new buffer"); 1116 } 1117 if (fatal) { 1118 *errp = fatal; 1119 brelse(bh); 1120 bh = NULL; 1121 } 1122 return bh; 1123 } 1124err: 1125 return NULL; 1126} 1127 1128struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode, 1129 int block, int create, int *err) 1130{ 1131 struct buffer_head * bh; 1132 1133 bh = ext3_getblk(handle, inode, block, create, err); 1134 if (!bh) 1135 return bh; 1136 if (bh_uptodate_or_lock(bh)) 1137 return bh; 1138 get_bh(bh); 1139 bh->b_end_io = end_buffer_read_sync; 1140 submit_bh(READ | REQ_META | REQ_PRIO, bh); 1141 wait_on_buffer(bh); 1142 if (buffer_uptodate(bh)) 1143 return bh; 1144 put_bh(bh); 1145 *err = -EIO; 1146 return NULL; 1147} 1148 1149static int walk_page_buffers( handle_t *handle, 1150 struct buffer_head *head, 1151 unsigned from, 1152 unsigned to, 1153 int *partial, 1154 int (*fn)( handle_t *handle, 1155 struct buffer_head *bh)) 1156{ 1157 struct buffer_head *bh; 1158 unsigned block_start, block_end; 1159 unsigned blocksize = head->b_size; 1160 int err, ret = 0; 1161 struct buffer_head *next; 1162 1163 for ( bh = head, block_start = 0; 1164 ret == 0 && (bh != head || !block_start); 1165 block_start = block_end, bh = next) 1166 { 1167 next = bh->b_this_page; 1168 block_end = block_start + blocksize; 1169 if (block_end <= from || block_start >= to) { 1170 if (partial && !buffer_uptodate(bh)) 1171 *partial = 1; 1172 continue; 1173 } 1174 err = (*fn)(handle, bh); 1175 if (!ret) 1176 ret = err; 1177 } 1178 return ret; 1179} 1180 1181/* 1182 * To preserve ordering, it is essential that the hole instantiation and 1183 * the data write be encapsulated in a single transaction. We cannot 1184 * close off a transaction and start a new one between the ext3_get_block() 1185 * and the commit_write(). So doing the journal_start at the start of 1186 * prepare_write() is the right place. 1187 * 1188 * Also, this function can nest inside ext3_writepage() -> 1189 * block_write_full_page(). In that case, we *know* that ext3_writepage() 1190 * has generated enough buffer credits to do the whole page. So we won't 1191 * block on the journal in that case, which is good, because the caller may 1192 * be PF_MEMALLOC. 1193 * 1194 * By accident, ext3 can be reentered when a transaction is open via 1195 * quota file writes. If we were to commit the transaction while thus 1196 * reentered, there can be a deadlock - we would be holding a quota 1197 * lock, and the commit would never complete if another thread had a 1198 * transaction open and was blocking on the quota lock - a ranking 1199 * violation. 1200 * 1201 * So what we do is to rely on the fact that journal_stop/journal_start 1202 * will _not_ run commit under these circumstances because handle->h_ref 1203 * is elevated. We'll still have enough credits for the tiny quotafile 1204 * write. 1205 */ 1206static int do_journal_get_write_access(handle_t *handle, 1207 struct buffer_head *bh) 1208{ 1209 int dirty = buffer_dirty(bh); 1210 int ret; 1211 1212 if (!buffer_mapped(bh) || buffer_freed(bh)) 1213 return 0; 1214 /* 1215 * __block_prepare_write() could have dirtied some buffers. Clean 1216 * the dirty bit as jbd2_journal_get_write_access() could complain 1217 * otherwise about fs integrity issues. Setting of the dirty bit 1218 * by __block_prepare_write() isn't a real problem here as we clear 1219 * the bit before releasing a page lock and thus writeback cannot 1220 * ever write the buffer. 1221 */ 1222 if (dirty) 1223 clear_buffer_dirty(bh); 1224 ret = ext3_journal_get_write_access(handle, bh); 1225 if (!ret && dirty) 1226 ret = ext3_journal_dirty_metadata(handle, bh); 1227 return ret; 1228} 1229 1230/* 1231 * Truncate blocks that were not used by write. We have to truncate the 1232 * pagecache as well so that corresponding buffers get properly unmapped. 1233 */ 1234static void ext3_truncate_failed_write(struct inode *inode) 1235{ 1236 truncate_inode_pages(inode->i_mapping, inode->i_size); 1237 ext3_truncate(inode); 1238} 1239 1240/* 1241 * Truncate blocks that were not used by direct IO write. We have to zero out 1242 * the last file block as well because direct IO might have written to it. 1243 */ 1244static void ext3_truncate_failed_direct_write(struct inode *inode) 1245{ 1246 ext3_block_truncate_page(inode, inode->i_size); 1247 ext3_truncate(inode); 1248} 1249 1250static int ext3_write_begin(struct file *file, struct address_space *mapping, 1251 loff_t pos, unsigned len, unsigned flags, 1252 struct page **pagep, void **fsdata) 1253{ 1254 struct inode *inode = mapping->host; 1255 int ret; 1256 handle_t *handle; 1257 int retries = 0; 1258 struct page *page; 1259 pgoff_t index; 1260 unsigned from, to; 1261 /* Reserve one block more for addition to orphan list in case 1262 * we allocate blocks but write fails for some reason */ 1263 int needed_blocks = ext3_writepage_trans_blocks(inode) + 1; 1264 1265 trace_ext3_write_begin(inode, pos, len, flags); 1266 1267 index = pos >> PAGE_CACHE_SHIFT; 1268 from = pos & (PAGE_CACHE_SIZE - 1); 1269 to = from + len; 1270 1271retry: 1272 page = grab_cache_page_write_begin(mapping, index, flags); 1273 if (!page) 1274 return -ENOMEM; 1275 *pagep = page; 1276 1277 handle = ext3_journal_start(inode, needed_blocks); 1278 if (IS_ERR(handle)) { 1279 unlock_page(page); 1280 page_cache_release(page); 1281 ret = PTR_ERR(handle); 1282 goto out; 1283 } 1284 ret = __block_write_begin(page, pos, len, ext3_get_block); 1285 if (ret) 1286 goto write_begin_failed; 1287 1288 if (ext3_should_journal_data(inode)) { 1289 ret = walk_page_buffers(handle, page_buffers(page), 1290 from, to, NULL, do_journal_get_write_access); 1291 } 1292write_begin_failed: 1293 if (ret) { 1294 /* 1295 * block_write_begin may have instantiated a few blocks 1296 * outside i_size. Trim these off again. Don't need 1297 * i_size_read because we hold i_mutex. 1298 * 1299 * Add inode to orphan list in case we crash before truncate 1300 * finishes. Do this only if ext3_can_truncate() agrees so 1301 * that orphan processing code is happy. 1302 */ 1303 if (pos + len > inode->i_size && ext3_can_truncate(inode)) 1304 ext3_orphan_add(handle, inode); 1305 ext3_journal_stop(handle); 1306 unlock_page(page); 1307 page_cache_release(page); 1308 if (pos + len > inode->i_size) 1309 ext3_truncate_failed_write(inode); 1310 } 1311 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) 1312 goto retry; 1313out: 1314 return ret; 1315} 1316 1317 1318int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) 1319{ 1320 int err = journal_dirty_data(handle, bh); 1321 if (err) 1322 ext3_journal_abort_handle(__func__, __func__, 1323 bh, handle, err); 1324 return err; 1325} 1326 1327/* For ordered writepage and write_end functions */ 1328static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) 1329{ 1330 /* 1331 * Write could have mapped the buffer but it didn't copy the data in 1332 * yet. So avoid filing such buffer into a transaction. 1333 */ 1334 if (buffer_mapped(bh) && buffer_uptodate(bh)) 1335 return ext3_journal_dirty_data(handle, bh); 1336 return 0; 1337} 1338 1339/* For write_end() in data=journal mode */ 1340static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1341{ 1342 if (!buffer_mapped(bh) || buffer_freed(bh)) 1343 return 0; 1344 set_buffer_uptodate(bh); 1345 return ext3_journal_dirty_metadata(handle, bh); 1346} 1347 1348/* 1349 * This is nasty and subtle: ext3_write_begin() could have allocated blocks 1350 * for the whole page but later we failed to copy the data in. Update inode 1351 * size according to what we managed to copy. The rest is going to be 1352 * truncated in write_end function. 1353 */ 1354static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied) 1355{ 1356 /* What matters to us is i_disksize. We don't write i_size anywhere */ 1357 if (pos + copied > inode->i_size) 1358 i_size_write(inode, pos + copied); 1359 if (pos + copied > EXT3_I(inode)->i_disksize) { 1360 EXT3_I(inode)->i_disksize = pos + copied; 1361 mark_inode_dirty(inode); 1362 } 1363} 1364 1365/* 1366 * We need to pick up the new inode size which generic_commit_write gave us 1367 * `file' can be NULL - eg, when called from page_symlink(). 1368 * 1369 * ext3 never places buffers on inode->i_mapping->private_list. metadata 1370 * buffers are managed internally. 1371 */ 1372static int ext3_ordered_write_end(struct file *file, 1373 struct address_space *mapping, 1374 loff_t pos, unsigned len, unsigned copied, 1375 struct page *page, void *fsdata) 1376{ 1377 handle_t *handle = ext3_journal_current_handle(); 1378 struct inode *inode = file->f_mapping->host; 1379 unsigned from, to; 1380 int ret = 0, ret2; 1381 1382 trace_ext3_ordered_write_end(inode, pos, len, copied); 1383 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1384 1385 from = pos & (PAGE_CACHE_SIZE - 1); 1386 to = from + copied; 1387 ret = walk_page_buffers(handle, page_buffers(page), 1388 from, to, NULL, journal_dirty_data_fn); 1389 1390 if (ret == 0) 1391 update_file_sizes(inode, pos, copied); 1392 /* 1393 * There may be allocated blocks outside of i_size because 1394 * we failed to copy some data. Prepare for truncate. 1395 */ 1396 if (pos + len > inode->i_size && ext3_can_truncate(inode)) 1397 ext3_orphan_add(handle, inode); 1398 ret2 = ext3_journal_stop(handle); 1399 if (!ret) 1400 ret = ret2; 1401 unlock_page(page); 1402 page_cache_release(page); 1403 1404 if (pos + len > inode->i_size) 1405 ext3_truncate_failed_write(inode); 1406 return ret ? ret : copied; 1407} 1408 1409static int ext3_writeback_write_end(struct file *file, 1410 struct address_space *mapping, 1411 loff_t pos, unsigned len, unsigned copied, 1412 struct page *page, void *fsdata) 1413{ 1414 handle_t *handle = ext3_journal_current_handle(); 1415 struct inode *inode = file->f_mapping->host; 1416 int ret; 1417 1418 trace_ext3_writeback_write_end(inode, pos, len, copied); 1419 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1420 update_file_sizes(inode, pos, copied); 1421 /* 1422 * There may be allocated blocks outside of i_size because 1423 * we failed to copy some data. Prepare for truncate. 1424 */ 1425 if (pos + len > inode->i_size && ext3_can_truncate(inode)) 1426 ext3_orphan_add(handle, inode); 1427 ret = ext3_journal_stop(handle); 1428 unlock_page(page); 1429 page_cache_release(page); 1430 1431 if (pos + len > inode->i_size) 1432 ext3_truncate_failed_write(inode); 1433 return ret ? ret : copied; 1434} 1435 1436static int ext3_journalled_write_end(struct file *file, 1437 struct address_space *mapping, 1438 loff_t pos, unsigned len, unsigned copied, 1439 struct page *page, void *fsdata) 1440{ 1441 handle_t *handle = ext3_journal_current_handle(); 1442 struct inode *inode = mapping->host; 1443 struct ext3_inode_info *ei = EXT3_I(inode); 1444 int ret = 0, ret2; 1445 int partial = 0; 1446 unsigned from, to; 1447 1448 trace_ext3_journalled_write_end(inode, pos, len, copied); 1449 from = pos & (PAGE_CACHE_SIZE - 1); 1450 to = from + len; 1451 1452 if (copied < len) { 1453 if (!PageUptodate(page)) 1454 copied = 0; 1455 page_zero_new_buffers(page, from + copied, to); 1456 to = from + copied; 1457 } 1458 1459 ret = walk_page_buffers(handle, page_buffers(page), from, 1460 to, &partial, write_end_fn); 1461 if (!partial) 1462 SetPageUptodate(page); 1463 1464 if (pos + copied > inode->i_size) 1465 i_size_write(inode, pos + copied); 1466 /* 1467 * There may be allocated blocks outside of i_size because 1468 * we failed to copy some data. Prepare for truncate. 1469 */ 1470 if (pos + len > inode->i_size && ext3_can_truncate(inode)) 1471 ext3_orphan_add(handle, inode); 1472 ext3_set_inode_state(inode, EXT3_STATE_JDATA); 1473 atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid); 1474 if (inode->i_size > ei->i_disksize) { 1475 ei->i_disksize = inode->i_size; 1476 ret2 = ext3_mark_inode_dirty(handle, inode); 1477 if (!ret) 1478 ret = ret2; 1479 } 1480 1481 ret2 = ext3_journal_stop(handle); 1482 if (!ret) 1483 ret = ret2; 1484 unlock_page(page); 1485 page_cache_release(page); 1486 1487 if (pos + len > inode->i_size) 1488 ext3_truncate_failed_write(inode); 1489 return ret ? ret : copied; 1490} 1491 1492/* 1493 * bmap() is special. It gets used by applications such as lilo and by 1494 * the swapper to find the on-disk block of a specific piece of data. 1495 * 1496 * Naturally, this is dangerous if the block concerned is still in the 1497 * journal. If somebody makes a swapfile on an ext3 data-journaling 1498 * filesystem and enables swap, then they may get a nasty shock when the 1499 * data getting swapped to that swapfile suddenly gets overwritten by 1500 * the original zero's written out previously to the journal and 1501 * awaiting writeback in the kernel's buffer cache. 1502 * 1503 * So, if we see any bmap calls here on a modified, data-journaled file, 1504 * take extra steps to flush any blocks which might be in the cache. 1505 */ 1506static sector_t ext3_bmap(struct address_space *mapping, sector_t block) 1507{ 1508 struct inode *inode = mapping->host; 1509 journal_t *journal; 1510 int err; 1511 1512 if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) { 1513 /* 1514 * This is a REALLY heavyweight approach, but the use of 1515 * bmap on dirty files is expected to be extremely rare: 1516 * only if we run lilo or swapon on a freshly made file 1517 * do we expect this to happen. 1518 * 1519 * (bmap requires CAP_SYS_RAWIO so this does not 1520 * represent an unprivileged user DOS attack --- we'd be 1521 * in trouble if mortal users could trigger this path at 1522 * will.) 1523 * 1524 * NB. EXT3_STATE_JDATA is not set on files other than 1525 * regular files. If somebody wants to bmap a directory 1526 * or symlink and gets confused because the buffer 1527 * hasn't yet been flushed to disk, they deserve 1528 * everything they get. 1529 */ 1530 1531 ext3_clear_inode_state(inode, EXT3_STATE_JDATA); 1532 journal = EXT3_JOURNAL(inode); 1533 journal_lock_updates(journal); 1534 err = journal_flush(journal); 1535 journal_unlock_updates(journal); 1536 1537 if (err) 1538 return 0; 1539 } 1540 1541 return generic_block_bmap(mapping,block,ext3_get_block); 1542} 1543 1544static int bget_one(handle_t *handle, struct buffer_head *bh) 1545{ 1546 get_bh(bh); 1547 return 0; 1548} 1549 1550static int bput_one(handle_t *handle, struct buffer_head *bh) 1551{ 1552 put_bh(bh); 1553 return 0; 1554} 1555 1556static int buffer_unmapped(handle_t *handle, struct buffer_head *bh) 1557{ 1558 return !buffer_mapped(bh); 1559} 1560 1561/* 1562 * Note that whenever we need to map blocks we start a transaction even if 1563 * we're not journalling data. This is to preserve ordering: any hole 1564 * instantiation within __block_write_full_page -> ext3_get_block() should be 1565 * journalled along with the data so we don't crash and then get metadata which 1566 * refers to old data. 1567 * 1568 * In all journalling modes block_write_full_page() will start the I/O. 1569 * 1570 * We don't honour synchronous mounts for writepage(). That would be 1571 * disastrous. Any write() or metadata operation will sync the fs for 1572 * us. 1573 */ 1574static int ext3_ordered_writepage(struct page *page, 1575 struct writeback_control *wbc) 1576{ 1577 struct inode *inode = page->mapping->host; 1578 struct buffer_head *page_bufs; 1579 handle_t *handle = NULL; 1580 int ret = 0; 1581 int err; 1582 1583 J_ASSERT(PageLocked(page)); 1584 /* 1585 * We don't want to warn for emergency remount. The condition is 1586 * ordered to avoid dereferencing inode->i_sb in non-error case to 1587 * avoid slow-downs. 1588 */ 1589 WARN_ON_ONCE(IS_RDONLY(inode) && 1590 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS)); 1591 1592 /* 1593 * We give up here if we're reentered, because it might be for a 1594 * different filesystem. 1595 */ 1596 if (ext3_journal_current_handle()) 1597 goto out_fail; 1598 1599 trace_ext3_ordered_writepage(page); 1600 if (!page_has_buffers(page)) { 1601 create_empty_buffers(page, inode->i_sb->s_blocksize, 1602 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1603 page_bufs = page_buffers(page); 1604 } else { 1605 page_bufs = page_buffers(page); 1606 if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE, 1607 NULL, buffer_unmapped)) { 1608 /* Provide NULL get_block() to catch bugs if buffers 1609 * weren't really mapped */ 1610 return block_write_full_page(page, NULL, wbc); 1611 } 1612 } 1613 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); 1614 1615 if (IS_ERR(handle)) { 1616 ret = PTR_ERR(handle); 1617 goto out_fail; 1618 } 1619 1620 walk_page_buffers(handle, page_bufs, 0, 1621 PAGE_CACHE_SIZE, NULL, bget_one); 1622 1623 ret = block_write_full_page(page, ext3_get_block, wbc); 1624 1625 /* 1626 * The page can become unlocked at any point now, and 1627 * truncate can then come in and change things. So we 1628 * can't touch *page from now on. But *page_bufs is 1629 * safe due to elevated refcount. 1630 */ 1631 1632 /* 1633 * And attach them to the current transaction. But only if 1634 * block_write_full_page() succeeded. Otherwise they are unmapped, 1635 * and generally junk. 1636 */ 1637 if (ret == 0) 1638 ret = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, 1639 NULL, journal_dirty_data_fn); 1640 walk_page_buffers(handle, page_bufs, 0, 1641 PAGE_CACHE_SIZE, NULL, bput_one); 1642 err = ext3_journal_stop(handle); 1643 if (!ret) 1644 ret = err; 1645 return ret; 1646 1647out_fail: 1648 redirty_page_for_writepage(wbc, page); 1649 unlock_page(page); 1650 return ret; 1651} 1652 1653static int ext3_writeback_writepage(struct page *page, 1654 struct writeback_control *wbc) 1655{ 1656 struct inode *inode = page->mapping->host; 1657 handle_t *handle = NULL; 1658 int ret = 0; 1659 int err; 1660 1661 J_ASSERT(PageLocked(page)); 1662 /* 1663 * We don't want to warn for emergency remount. The condition is 1664 * ordered to avoid dereferencing inode->i_sb in non-error case to 1665 * avoid slow-downs. 1666 */ 1667 WARN_ON_ONCE(IS_RDONLY(inode) && 1668 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS)); 1669 1670 if (ext3_journal_current_handle()) 1671 goto out_fail; 1672 1673 trace_ext3_writeback_writepage(page); 1674 if (page_has_buffers(page)) { 1675 if (!walk_page_buffers(NULL, page_buffers(page), 0, 1676 PAGE_CACHE_SIZE, NULL, buffer_unmapped)) { 1677 /* Provide NULL get_block() to catch bugs if buffers 1678 * weren't really mapped */ 1679 return block_write_full_page(page, NULL, wbc); 1680 } 1681 } 1682 1683 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); 1684 if (IS_ERR(handle)) { 1685 ret = PTR_ERR(handle); 1686 goto out_fail; 1687 } 1688 1689 ret = block_write_full_page(page, ext3_get_block, wbc); 1690 1691 err = ext3_journal_stop(handle); 1692 if (!ret) 1693 ret = err; 1694 return ret; 1695 1696out_fail: 1697 redirty_page_for_writepage(wbc, page); 1698 unlock_page(page); 1699 return ret; 1700} 1701 1702static int ext3_journalled_writepage(struct page *page, 1703 struct writeback_control *wbc) 1704{ 1705 struct inode *inode = page->mapping->host; 1706 handle_t *handle = NULL; 1707 int ret = 0; 1708 int err; 1709 1710 J_ASSERT(PageLocked(page)); 1711 /* 1712 * We don't want to warn for emergency remount. The condition is 1713 * ordered to avoid dereferencing inode->i_sb in non-error case to 1714 * avoid slow-downs. 1715 */ 1716 WARN_ON_ONCE(IS_RDONLY(inode) && 1717 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS)); 1718 1719 if (ext3_journal_current_handle()) 1720 goto no_write; 1721 1722 trace_ext3_journalled_writepage(page); 1723 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); 1724 if (IS_ERR(handle)) { 1725 ret = PTR_ERR(handle); 1726 goto no_write; 1727 } 1728 1729 if (!page_has_buffers(page) || PageChecked(page)) { 1730 /* 1731 * It's mmapped pagecache. Add buffers and journal it. There 1732 * doesn't seem much point in redirtying the page here. 1733 */ 1734 ClearPageChecked(page); 1735 ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE, 1736 ext3_get_block); 1737 if (ret != 0) { 1738 ext3_journal_stop(handle); 1739 goto out_unlock; 1740 } 1741 ret = walk_page_buffers(handle, page_buffers(page), 0, 1742 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); 1743 1744 err = walk_page_buffers(handle, page_buffers(page), 0, 1745 PAGE_CACHE_SIZE, NULL, write_end_fn); 1746 if (ret == 0) 1747 ret = err; 1748 ext3_set_inode_state(inode, EXT3_STATE_JDATA); 1749 atomic_set(&EXT3_I(inode)->i_datasync_tid, 1750 handle->h_transaction->t_tid); 1751 unlock_page(page); 1752 } else { 1753 /* 1754 * It may be a page full of checkpoint-mode buffers. We don't 1755 * really know unless we go poke around in the buffer_heads. 1756 * But block_write_full_page will do the right thing. 1757 */ 1758 ret = block_write_full_page(page, ext3_get_block, wbc); 1759 } 1760 err = ext3_journal_stop(handle); 1761 if (!ret) 1762 ret = err; 1763out: 1764 return ret; 1765 1766no_write: 1767 redirty_page_for_writepage(wbc, page); 1768out_unlock: 1769 unlock_page(page); 1770 goto out; 1771} 1772 1773static int ext3_readpage(struct file *file, struct page *page) 1774{ 1775 trace_ext3_readpage(page); 1776 return mpage_readpage(page, ext3_get_block); 1777} 1778 1779static int 1780ext3_readpages(struct file *file, struct address_space *mapping, 1781 struct list_head *pages, unsigned nr_pages) 1782{ 1783 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); 1784} 1785 1786static void ext3_invalidatepage(struct page *page, unsigned int offset, 1787 unsigned int length) 1788{ 1789 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1790 1791 trace_ext3_invalidatepage(page, offset, length); 1792 1793 /* 1794 * If it's a full truncate we just forget about the pending dirtying 1795 */ 1796 if (offset == 0 && length == PAGE_CACHE_SIZE) 1797 ClearPageChecked(page); 1798 1799 journal_invalidatepage(journal, page, offset, length); 1800} 1801 1802static int ext3_releasepage(struct page *page, gfp_t wait) 1803{ 1804 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1805 1806 trace_ext3_releasepage(page); 1807 WARN_ON(PageChecked(page)); 1808 if (!page_has_buffers(page)) 1809 return 0; 1810 return journal_try_to_free_buffers(journal, page, wait); 1811} 1812 1813/* 1814 * If the O_DIRECT write will extend the file then add this inode to the 1815 * orphan list. So recovery will truncate it back to the original size 1816 * if the machine crashes during the write. 1817 * 1818 * If the O_DIRECT write is intantiating holes inside i_size and the machine 1819 * crashes then stale disk data _may_ be exposed inside the file. But current 1820 * VFS code falls back into buffered path in that case so we are safe. 1821 */ 1822static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, 1823 const struct iovec *iov, loff_t offset, 1824 unsigned long nr_segs) 1825{ 1826 struct file *file = iocb->ki_filp; 1827 struct inode *inode = file->f_mapping->host; 1828 struct ext3_inode_info *ei = EXT3_I(inode); 1829 handle_t *handle; 1830 ssize_t ret; 1831 int orphan = 0; 1832 size_t count = iov_length(iov, nr_segs); 1833 int retries = 0; 1834 1835 trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 1836 1837 if (rw == WRITE) { 1838 loff_t final_size = offset + count; 1839 1840 if (final_size > inode->i_size) { 1841 /* Credits for sb + inode write */ 1842 handle = ext3_journal_start(inode, 2); 1843 if (IS_ERR(handle)) { 1844 ret = PTR_ERR(handle); 1845 goto out; 1846 } 1847 ret = ext3_orphan_add(handle, inode); 1848 if (ret) { 1849 ext3_journal_stop(handle); 1850 goto out; 1851 } 1852 orphan = 1; 1853 ei->i_disksize = inode->i_size; 1854 ext3_journal_stop(handle); 1855 } 1856 } 1857 1858retry: 1859 ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, 1860 ext3_get_block); 1861 /* 1862 * In case of error extending write may have instantiated a few 1863 * blocks outside i_size. Trim these off again. 1864 */ 1865 if (unlikely((rw & WRITE) && ret < 0)) { 1866 loff_t isize = i_size_read(inode); 1867 loff_t end = offset + iov_length(iov, nr_segs); 1868 1869 if (end > isize) 1870 ext3_truncate_failed_direct_write(inode); 1871 } 1872 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) 1873 goto retry; 1874 1875 if (orphan) { 1876 int err; 1877 1878 /* Credits for sb + inode write */ 1879 handle = ext3_journal_start(inode, 2); 1880 if (IS_ERR(handle)) { 1881 /* This is really bad luck. We've written the data 1882 * but cannot extend i_size. Truncate allocated blocks 1883 * and pretend the write failed... */ 1884 ext3_truncate_failed_direct_write(inode); 1885 ret = PTR_ERR(handle); 1886 if (inode->i_nlink) 1887 ext3_orphan_del(NULL, inode); 1888 goto out; 1889 } 1890 if (inode->i_nlink) 1891 ext3_orphan_del(handle, inode); 1892 if (ret > 0) { 1893 loff_t end = offset + ret; 1894 if (end > inode->i_size) { 1895 ei->i_disksize = end; 1896 i_size_write(inode, end); 1897 /* 1898 * We're going to return a positive `ret' 1899 * here due to non-zero-length I/O, so there's 1900 * no way of reporting error returns from 1901 * ext3_mark_inode_dirty() to userspace. So 1902 * ignore it. 1903 */ 1904 ext3_mark_inode_dirty(handle, inode); 1905 } 1906 } 1907 err = ext3_journal_stop(handle); 1908 if (ret == 0) 1909 ret = err; 1910 } 1911out: 1912 trace_ext3_direct_IO_exit(inode, offset, 1913 iov_length(iov, nr_segs), rw, ret); 1914 return ret; 1915} 1916 1917/* 1918 * Pages can be marked dirty completely asynchronously from ext3's journalling 1919 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 1920 * much here because ->set_page_dirty is called under VFS locks. The page is 1921 * not necessarily locked. 1922 * 1923 * We cannot just dirty the page and leave attached buffers clean, because the 1924 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 1925 * or jbddirty because all the journalling code will explode. 1926 * 1927 * So what we do is to mark the page "pending dirty" and next time writepage 1928 * is called, propagate that into the buffers appropriately. 1929 */ 1930static int ext3_journalled_set_page_dirty(struct page *page) 1931{ 1932 SetPageChecked(page); 1933 return __set_page_dirty_nobuffers(page); 1934} 1935 1936static const struct address_space_operations ext3_ordered_aops = { 1937 .readpage = ext3_readpage, 1938 .readpages = ext3_readpages, 1939 .writepage = ext3_ordered_writepage, 1940 .write_begin = ext3_write_begin, 1941 .write_end = ext3_ordered_write_end, 1942 .bmap = ext3_bmap, 1943 .invalidatepage = ext3_invalidatepage, 1944 .releasepage = ext3_releasepage, 1945 .direct_IO = ext3_direct_IO, 1946 .migratepage = buffer_migrate_page, 1947 .is_partially_uptodate = block_is_partially_uptodate, 1948 .is_dirty_writeback = buffer_check_dirty_writeback, 1949 .error_remove_page = generic_error_remove_page, 1950}; 1951 1952static const struct address_space_operations ext3_writeback_aops = { 1953 .readpage = ext3_readpage, 1954 .readpages = ext3_readpages, 1955 .writepage = ext3_writeback_writepage, 1956 .write_begin = ext3_write_begin, 1957 .write_end = ext3_writeback_write_end, 1958 .bmap = ext3_bmap, 1959 .invalidatepage = ext3_invalidatepage, 1960 .releasepage = ext3_releasepage, 1961 .direct_IO = ext3_direct_IO, 1962 .migratepage = buffer_migrate_page, 1963 .is_partially_uptodate = block_is_partially_uptodate, 1964 .error_remove_page = generic_error_remove_page, 1965}; 1966 1967static const struct address_space_operations ext3_journalled_aops = { 1968 .readpage = ext3_readpage, 1969 .readpages = ext3_readpages, 1970 .writepage = ext3_journalled_writepage, 1971 .write_begin = ext3_write_begin, 1972 .write_end = ext3_journalled_write_end, 1973 .set_page_dirty = ext3_journalled_set_page_dirty, 1974 .bmap = ext3_bmap, 1975 .invalidatepage = ext3_invalidatepage, 1976 .releasepage = ext3_releasepage, 1977 .is_partially_uptodate = block_is_partially_uptodate, 1978 .error_remove_page = generic_error_remove_page, 1979}; 1980 1981void ext3_set_aops(struct inode *inode) 1982{ 1983 if (ext3_should_order_data(inode)) 1984 inode->i_mapping->a_ops = &ext3_ordered_aops; 1985 else if (ext3_should_writeback_data(inode)) 1986 inode->i_mapping->a_ops = &ext3_writeback_aops; 1987 else 1988 inode->i_mapping->a_ops = &ext3_journalled_aops; 1989} 1990 1991/* 1992 * ext3_block_truncate_page() zeroes out a mapping from file offset `from' 1993 * up to the end of the block which corresponds to `from'. 1994 * This required during truncate. We need to physically zero the tail end 1995 * of that block so it doesn't yield old data if the file is later grown. 1996 */ 1997static int ext3_block_truncate_page(struct inode *inode, loff_t from) 1998{ 1999 ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT; 2000 unsigned offset = from & (PAGE_CACHE_SIZE - 1); 2001 unsigned blocksize, iblock, length, pos; 2002 struct page *page; 2003 handle_t *handle = NULL; 2004 struct buffer_head *bh; 2005 int err = 0; 2006 2007 /* Truncated on block boundary - nothing to do */ 2008 blocksize = inode->i_sb->s_blocksize; 2009 if ((from & (blocksize - 1)) == 0) 2010 return 0; 2011 2012 page = grab_cache_page(inode->i_mapping, index); 2013 if (!page) 2014 return -ENOMEM; 2015 length = blocksize - (offset & (blocksize - 1)); 2016 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 2017 2018 if (!page_has_buffers(page)) 2019 create_empty_buffers(page, blocksize, 0); 2020 2021 /* Find the buffer that contains "offset" */ 2022 bh = page_buffers(page); 2023 pos = blocksize; 2024 while (offset >= pos) { 2025 bh = bh->b_this_page; 2026 iblock++; 2027 pos += blocksize; 2028 } 2029 2030 err = 0; 2031 if (buffer_freed(bh)) { 2032 BUFFER_TRACE(bh, "freed: skip"); 2033 goto unlock; 2034 } 2035 2036 if (!buffer_mapped(bh)) { 2037 BUFFER_TRACE(bh, "unmapped"); 2038 ext3_get_block(inode, iblock, bh, 0); 2039 /* unmapped? It's a hole - nothing to do */ 2040 if (!buffer_mapped(bh)) { 2041 BUFFER_TRACE(bh, "still unmapped"); 2042 goto unlock; 2043 } 2044 } 2045 2046 /* Ok, it's mapped. Make sure it's up-to-date */ 2047 if (PageUptodate(page)) 2048 set_buffer_uptodate(bh); 2049 2050 if (!bh_uptodate_or_lock(bh)) { 2051 err = bh_submit_read(bh); 2052 /* Uhhuh. Read error. Complain and punt. */ 2053 if (err) 2054 goto unlock; 2055 } 2056 2057 /* data=writeback mode doesn't need transaction to zero-out data */ 2058 if (!ext3_should_writeback_data(inode)) { 2059 /* We journal at most one block */ 2060 handle = ext3_journal_start(inode, 1); 2061 if (IS_ERR(handle)) { 2062 clear_highpage(page); 2063 flush_dcache_page(page); 2064 err = PTR_ERR(handle); 2065 goto unlock; 2066 } 2067 } 2068 2069 if (ext3_should_journal_data(inode)) { 2070 BUFFER_TRACE(bh, "get write access"); 2071 err = ext3_journal_get_write_access(handle, bh); 2072 if (err) 2073 goto stop; 2074 } 2075 2076 zero_user(page, offset, length); 2077 BUFFER_TRACE(bh, "zeroed end of block"); 2078 2079 err = 0; 2080 if (ext3_should_journal_data(inode)) { 2081 err = ext3_journal_dirty_metadata(handle, bh); 2082 } else { 2083 if (ext3_should_order_data(inode)) 2084 err = ext3_journal_dirty_data(handle, bh); 2085 mark_buffer_dirty(bh); 2086 } 2087stop: 2088 if (handle) 2089 ext3_journal_stop(handle); 2090 2091unlock: 2092 unlock_page(page); 2093 page_cache_release(page); 2094 return err; 2095} 2096 2097/* 2098 * Probably it should be a library function... search for first non-zero word 2099 * or memcmp with zero_page, whatever is better for particular architecture. 2100 * Linus? 2101 */ 2102static inline int all_zeroes(__le32 *p, __le32 *q) 2103{ 2104 while (p < q) 2105 if (*p++) 2106 return 0; 2107 return 1; 2108} 2109 2110/** 2111 * ext3_find_shared - find the indirect blocks for partial truncation. 2112 * @inode: inode in question 2113 * @depth: depth of the affected branch 2114 * @offsets: offsets of pointers in that branch (see ext3_block_to_path) 2115 * @chain: place to store the pointers to partial indirect blocks 2116 * @top: place to the (detached) top of branch 2117 * 2118 * This is a helper function used by ext3_truncate(). 2119 * 2120 * When we do truncate() we may have to clean the ends of several 2121 * indirect blocks but leave the blocks themselves alive. Block is 2122 * partially truncated if some data below the new i_size is referred 2123 * from it (and it is on the path to the first completely truncated 2124 * data block, indeed). We have to free the top of that path along 2125 * with everything to the right of the path. Since no allocation 2126 * past the truncation point is possible until ext3_truncate() 2127 * finishes, we may safely do the latter, but top of branch may 2128 * require special attention - pageout below the truncation point 2129 * might try to populate it. 2130 * 2131 * We atomically detach the top of branch from the tree, store the 2132 * block number of its root in *@top, pointers to buffer_heads of 2133 * partially truncated blocks - in @chain[].bh and pointers to 2134 * their last elements that should not be removed - in 2135 * @chain[].p. Return value is the pointer to last filled element 2136 * of @chain. 2137 * 2138 * The work left to caller to do the actual freeing of subtrees: 2139 * a) free the subtree starting from *@top 2140 * b) free the subtrees whose roots are stored in 2141 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 2142 * c) free the subtrees growing from the inode past the @chain[0]. 2143 * (no partially truncated stuff there). */ 2144 2145static Indirect *ext3_find_shared(struct inode *inode, int depth, 2146 int offsets[4], Indirect chain[4], __le32 *top) 2147{ 2148 Indirect *partial, *p; 2149 int k, err; 2150 2151 *top = 0; 2152 /* Make k index the deepest non-null offset + 1 */ 2153 for (k = depth; k > 1 && !offsets[k-1]; k--) 2154 ; 2155 partial = ext3_get_branch(inode, k, offsets, chain, &err); 2156 /* Writer: pointers */ 2157 if (!partial) 2158 partial = chain + k-1; 2159 /* 2160 * If the branch acquired continuation since we've looked at it - 2161 * fine, it should all survive and (new) top doesn't belong to us. 2162 */ 2163 if (!partial->key && *partial->p) 2164 /* Writer: end */ 2165 goto no_top; 2166 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--) 2167 ; 2168 /* 2169 * OK, we've found the last block that must survive. The rest of our 2170 * branch should be detached before unlocking. However, if that rest 2171 * of branch is all ours and does not grow immediately from the inode 2172 * it's easier to cheat and just decrement partial->p. 2173 */ 2174 if (p == chain + k - 1 && p > chain) { 2175 p->p--; 2176 } else { 2177 *top = *p->p; 2178 /* Nope, don't do this in ext3. Must leave the tree intact */ 2179#if 0 2180 *p->p = 0; 2181#endif 2182 } 2183 /* Writer: end */ 2184 2185 while(partial > p) { 2186 brelse(partial->bh); 2187 partial--; 2188 } 2189no_top: 2190 return partial; 2191} 2192 2193/* 2194 * Zero a number of block pointers in either an inode or an indirect block. 2195 * If we restart the transaction we must again get write access to the 2196 * indirect block for further modification. 2197 * 2198 * We release `count' blocks on disk, but (last - first) may be greater 2199 * than `count' because there can be holes in there. 2200 */ 2201static void ext3_clear_blocks(handle_t *handle, struct inode *inode, 2202 struct buffer_head *bh, ext3_fsblk_t block_to_free, 2203 unsigned long count, __le32 *first, __le32 *last) 2204{ 2205 __le32 *p; 2206 if (try_to_extend_transaction(handle, inode)) { 2207 if (bh) { 2208 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 2209 if (ext3_journal_dirty_metadata(handle, bh)) 2210 return; 2211 } 2212 ext3_mark_inode_dirty(handle, inode); 2213 truncate_restart_transaction(handle, inode); 2214 if (bh) { 2215 BUFFER_TRACE(bh, "retaking write access"); 2216 if (ext3_journal_get_write_access(handle, bh)) 2217 return; 2218 } 2219 } 2220 2221 /* 2222 * Any buffers which are on the journal will be in memory. We find 2223 * them on the hash table so journal_revoke() will run journal_forget() 2224 * on them. We've already detached each block from the file, so 2225 * bforget() in journal_forget() should be safe. 2226 * 2227 * AKPM: turn on bforget in journal_forget()!!! 2228 */ 2229 for (p = first; p < last; p++) { 2230 u32 nr = le32_to_cpu(*p); 2231 if (nr) { 2232 struct buffer_head *bh; 2233 2234 *p = 0; 2235 bh = sb_find_get_block(inode->i_sb, nr); 2236 ext3_forget(handle, 0, inode, bh, nr); 2237 } 2238 } 2239 2240 ext3_free_blocks(handle, inode, block_to_free, count); 2241} 2242 2243/** 2244 * ext3_free_data - free a list of data blocks 2245 * @handle: handle for this transaction 2246 * @inode: inode we are dealing with 2247 * @this_bh: indirect buffer_head which contains *@first and *@last 2248 * @first: array of block numbers 2249 * @last: points immediately past the end of array 2250 * 2251 * We are freeing all blocks referred from that array (numbers are stored as 2252 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 2253 * 2254 * We accumulate contiguous runs of blocks to free. Conveniently, if these 2255 * blocks are contiguous then releasing them at one time will only affect one 2256 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 2257 * actually use a lot of journal space. 2258 * 2259 * @this_bh will be %NULL if @first and @last point into the inode's direct 2260 * block pointers. 2261 */ 2262static void ext3_free_data(handle_t *handle, struct inode *inode, 2263 struct buffer_head *this_bh, 2264 __le32 *first, __le32 *last) 2265{ 2266 ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ 2267 unsigned long count = 0; /* Number of blocks in the run */ 2268 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 2269 corresponding to 2270 block_to_free */ 2271 ext3_fsblk_t nr; /* Current block # */ 2272 __le32 *p; /* Pointer into inode/ind 2273 for current block */ 2274 int err; 2275 2276 if (this_bh) { /* For indirect block */ 2277 BUFFER_TRACE(this_bh, "get_write_access"); 2278 err = ext3_journal_get_write_access(handle, this_bh); 2279 /* Important: if we can't update the indirect pointers 2280 * to the blocks, we can't free them. */ 2281 if (err) 2282 return; 2283 } 2284 2285 for (p = first; p < last; p++) { 2286 nr = le32_to_cpu(*p); 2287 if (nr) { 2288 /* accumulate blocks to free if they're contiguous */ 2289 if (count == 0) { 2290 block_to_free = nr; 2291 block_to_free_p = p; 2292 count = 1; 2293 } else if (nr == block_to_free + count) { 2294 count++; 2295 } else { 2296 ext3_clear_blocks(handle, inode, this_bh, 2297 block_to_free, 2298 count, block_to_free_p, p); 2299 block_to_free = nr; 2300 block_to_free_p = p; 2301 count = 1; 2302 } 2303 } 2304 } 2305 2306 if (count > 0) 2307 ext3_clear_blocks(handle, inode, this_bh, block_to_free, 2308 count, block_to_free_p, p); 2309 2310 if (this_bh) { 2311 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata"); 2312 2313 /* 2314 * The buffer head should have an attached journal head at this 2315 * point. However, if the data is corrupted and an indirect 2316 * block pointed to itself, it would have been detached when 2317 * the block was cleared. Check for this instead of OOPSing. 2318 */ 2319 if (bh2jh(this_bh)) 2320 ext3_journal_dirty_metadata(handle, this_bh); 2321 else 2322 ext3_error(inode->i_sb, "ext3_free_data", 2323 "circular indirect block detected, " 2324 "inode=%lu, block=%llu", 2325 inode->i_ino, 2326 (unsigned long long)this_bh->b_blocknr); 2327 } 2328} 2329 2330/** 2331 * ext3_free_branches - free an array of branches 2332 * @handle: JBD handle for this transaction 2333 * @inode: inode we are dealing with 2334 * @parent_bh: the buffer_head which contains *@first and *@last 2335 * @first: array of block numbers 2336 * @last: pointer immediately past the end of array 2337 * @depth: depth of the branches to free 2338 * 2339 * We are freeing all blocks referred from these branches (numbers are 2340 * stored as little-endian 32-bit) and updating @inode->i_blocks 2341 * appropriately. 2342 */ 2343static void ext3_free_branches(handle_t *handle, struct inode *inode, 2344 struct buffer_head *parent_bh, 2345 __le32 *first, __le32 *last, int depth) 2346{ 2347 ext3_fsblk_t nr; 2348 __le32 *p; 2349 2350 if (is_handle_aborted(handle)) 2351 return; 2352 2353 if (depth--) { 2354 struct buffer_head *bh; 2355 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); 2356 p = last; 2357 while (--p >= first) { 2358 nr = le32_to_cpu(*p); 2359 if (!nr) 2360 continue; /* A hole */ 2361 2362 /* Go read the buffer for the next level down */ 2363 bh = sb_bread(inode->i_sb, nr); 2364 2365 /* 2366 * A read failure? Report error and clear slot 2367 * (should be rare). 2368 */ 2369 if (!bh) { 2370 ext3_error(inode->i_sb, "ext3_free_branches", 2371 "Read failure, inode=%lu, block="E3FSBLK, 2372 inode->i_ino, nr); 2373 continue; 2374 } 2375 2376 /* This zaps the entire block. Bottom up. */ 2377 BUFFER_TRACE(bh, "free child branches"); 2378 ext3_free_branches(handle, inode, bh, 2379 (__le32*)bh->b_data, 2380 (__le32*)bh->b_data + addr_per_block, 2381 depth); 2382 2383 /* 2384 * Everything below this this pointer has been 2385 * released. Now let this top-of-subtree go. 2386 * 2387 * We want the freeing of this indirect block to be 2388 * atomic in the journal with the updating of the 2389 * bitmap block which owns it. So make some room in 2390 * the journal. 2391 * 2392 * We zero the parent pointer *after* freeing its 2393 * pointee in the bitmaps, so if extend_transaction() 2394 * for some reason fails to put the bitmap changes and 2395 * the release into the same transaction, recovery 2396 * will merely complain about releasing a free block, 2397 * rather than leaking blocks. 2398 */ 2399 if (is_handle_aborted(handle)) 2400 return; 2401 if (try_to_extend_transaction(handle, inode)) { 2402 ext3_mark_inode_dirty(handle, inode); 2403 truncate_restart_transaction(handle, inode); 2404 } 2405 2406 /* 2407 * We've probably journalled the indirect block several 2408 * times during the truncate. But it's no longer 2409 * needed and we now drop it from the transaction via 2410 * journal_revoke(). 2411 * 2412 * That's easy if it's exclusively part of this 2413 * transaction. But if it's part of the committing 2414 * transaction then journal_forget() will simply 2415 * brelse() it. That means that if the underlying 2416 * block is reallocated in ext3_get_block(), 2417 * unmap_underlying_metadata() will find this block 2418 * and will try to get rid of it. damn, damn. Thus 2419 * we don't allow a block to be reallocated until 2420 * a transaction freeing it has fully committed. 2421 * 2422 * We also have to make sure journal replay after a 2423 * crash does not overwrite non-journaled data blocks 2424 * with old metadata when the block got reallocated for 2425 * data. Thus we have to store a revoke record for a 2426 * block in the same transaction in which we free the 2427 * block. 2428 */ 2429 ext3_forget(handle, 1, inode, bh, bh->b_blocknr); 2430 2431 ext3_free_blocks(handle, inode, nr, 1); 2432 2433 if (parent_bh) { 2434 /* 2435 * The block which we have just freed is 2436 * pointed to by an indirect block: journal it 2437 */ 2438 BUFFER_TRACE(parent_bh, "get_write_access"); 2439 if (!ext3_journal_get_write_access(handle, 2440 parent_bh)){ 2441 *p = 0; 2442 BUFFER_TRACE(parent_bh, 2443 "call ext3_journal_dirty_metadata"); 2444 ext3_journal_dirty_metadata(handle, 2445 parent_bh); 2446 } 2447 } 2448 } 2449 } else { 2450 /* We have reached the bottom of the tree. */ 2451 BUFFER_TRACE(parent_bh, "free data blocks"); 2452 ext3_free_data(handle, inode, parent_bh, first, last); 2453 } 2454} 2455 2456int ext3_can_truncate(struct inode *inode) 2457{ 2458 if (S_ISREG(inode->i_mode)) 2459 return 1; 2460 if (S_ISDIR(inode->i_mode)) 2461 return 1; 2462 if (S_ISLNK(inode->i_mode)) 2463 return !ext3_inode_is_fast_symlink(inode); 2464 return 0; 2465} 2466 2467/* 2468 * ext3_truncate() 2469 * 2470 * We block out ext3_get_block() block instantiations across the entire 2471 * transaction, and VFS/VM ensures that ext3_truncate() cannot run 2472 * simultaneously on behalf of the same inode. 2473 * 2474 * As we work through the truncate and commit bits of it to the journal there 2475 * is one core, guiding principle: the file's tree must always be consistent on 2476 * disk. We must be able to restart the truncate after a crash. 2477 * 2478 * The file's tree may be transiently inconsistent in memory (although it 2479 * probably isn't), but whenever we close off and commit a journal transaction, 2480 * the contents of (the filesystem + the journal) must be consistent and 2481 * restartable. It's pretty simple, really: bottom up, right to left (although 2482 * left-to-right works OK too). 2483 * 2484 * Note that at recovery time, journal replay occurs *before* the restart of 2485 * truncate against the orphan inode list. 2486 * 2487 * The committed inode has the new, desired i_size (which is the same as 2488 * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see 2489 * that this inode's truncate did not complete and it will again call 2490 * ext3_truncate() to have another go. So there will be instantiated blocks 2491 * to the right of the truncation point in a crashed ext3 filesystem. But 2492 * that's fine - as long as they are linked from the inode, the post-crash 2493 * ext3_truncate() run will find them and release them. 2494 */ 2495void ext3_truncate(struct inode *inode) 2496{ 2497 handle_t *handle; 2498 struct ext3_inode_info *ei = EXT3_I(inode); 2499 __le32 *i_data = ei->i_data; 2500 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); 2501 int offsets[4]; 2502 Indirect chain[4]; 2503 Indirect *partial; 2504 __le32 nr = 0; 2505 int n; 2506 long last_block; 2507 unsigned blocksize = inode->i_sb->s_blocksize; 2508 2509 trace_ext3_truncate_enter(inode); 2510 2511 if (!ext3_can_truncate(inode)) 2512 goto out_notrans; 2513 2514 if (inode->i_size == 0 && ext3_should_writeback_data(inode)) 2515 ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); 2516 2517 handle = start_transaction(inode); 2518 if (IS_ERR(handle)) 2519 goto out_notrans; 2520 2521 last_block = (inode->i_size + blocksize-1) 2522 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb); 2523 n = ext3_block_to_path(inode, last_block, offsets, NULL); 2524 if (n == 0) 2525 goto out_stop; /* error */ 2526 2527 /* 2528 * OK. This truncate is going to happen. We add the inode to the 2529 * orphan list, so that if this truncate spans multiple transactions, 2530 * and we crash, we will resume the truncate when the filesystem 2531 * recovers. It also marks the inode dirty, to catch the new size. 2532 * 2533 * Implication: the file must always be in a sane, consistent 2534 * truncatable state while each transaction commits. 2535 */ 2536 if (ext3_orphan_add(handle, inode)) 2537 goto out_stop; 2538 2539 /* 2540 * The orphan list entry will now protect us from any crash which 2541 * occurs before the truncate completes, so it is now safe to propagate 2542 * the new, shorter inode size (held for now in i_size) into the 2543 * on-disk inode. We do this via i_disksize, which is the value which 2544 * ext3 *really* writes onto the disk inode. 2545 */ 2546 ei->i_disksize = inode->i_size; 2547 2548 /* 2549 * From here we block out all ext3_get_block() callers who want to 2550 * modify the block allocation tree. 2551 */ 2552 mutex_lock(&ei->truncate_mutex); 2553 2554 if (n == 1) { /* direct blocks */ 2555 ext3_free_data(handle, inode, NULL, i_data+offsets[0], 2556 i_data + EXT3_NDIR_BLOCKS); 2557 goto do_indirects; 2558 } 2559 2560 partial = ext3_find_shared(inode, n, offsets, chain, &nr); 2561 /* Kill the top of shared branch (not detached) */ 2562 if (nr) { 2563 if (partial == chain) { 2564 /* Shared branch grows from the inode */ 2565 ext3_free_branches(handle, inode, NULL, 2566 &nr, &nr+1, (chain+n-1) - partial); 2567 *partial->p = 0; 2568 /* 2569 * We mark the inode dirty prior to restart, 2570 * and prior to stop. No need for it here. 2571 */ 2572 } else { 2573 /* Shared branch grows from an indirect block */ 2574 ext3_free_branches(handle, inode, partial->bh, 2575 partial->p, 2576 partial->p+1, (chain+n-1) - partial); 2577 } 2578 } 2579 /* Clear the ends of indirect blocks on the shared branch */ 2580 while (partial > chain) { 2581 ext3_free_branches(handle, inode, partial->bh, partial->p + 1, 2582 (__le32*)partial->bh->b_data+addr_per_block, 2583 (chain+n-1) - partial); 2584 BUFFER_TRACE(partial->bh, "call brelse"); 2585 brelse (partial->bh); 2586 partial--; 2587 } 2588do_indirects: 2589 /* Kill the remaining (whole) subtrees */ 2590 switch (offsets[0]) { 2591 default: 2592 nr = i_data[EXT3_IND_BLOCK]; 2593 if (nr) { 2594 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 2595 i_data[EXT3_IND_BLOCK] = 0; 2596 } 2597 case EXT3_IND_BLOCK: 2598 nr = i_data[EXT3_DIND_BLOCK]; 2599 if (nr) { 2600 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 2601 i_data[EXT3_DIND_BLOCK] = 0; 2602 } 2603 case EXT3_DIND_BLOCK: 2604 nr = i_data[EXT3_TIND_BLOCK]; 2605 if (nr) { 2606 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 2607 i_data[EXT3_TIND_BLOCK] = 0; 2608 } 2609 case EXT3_TIND_BLOCK: 2610 ; 2611 } 2612 2613 ext3_discard_reservation(inode); 2614 2615 mutex_unlock(&ei->truncate_mutex); 2616 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 2617 ext3_mark_inode_dirty(handle, inode); 2618 2619 /* 2620 * In a multi-transaction truncate, we only make the final transaction 2621 * synchronous 2622 */ 2623 if (IS_SYNC(inode)) 2624 handle->h_sync = 1; 2625out_stop: 2626 /* 2627 * If this was a simple ftruncate(), and the file will remain alive 2628 * then we need to clear up the orphan record which we created above. 2629 * However, if this was a real unlink then we were called by 2630 * ext3_evict_inode(), and we allow that function to clean up the 2631 * orphan info for us. 2632 */ 2633 if (inode->i_nlink) 2634 ext3_orphan_del(handle, inode); 2635 2636 ext3_journal_stop(handle); 2637 trace_ext3_truncate_exit(inode); 2638 return; 2639out_notrans: 2640 /* 2641 * Delete the inode from orphan list so that it doesn't stay there 2642 * forever and trigger assertion on umount. 2643 */ 2644 if (inode->i_nlink) 2645 ext3_orphan_del(NULL, inode); 2646 trace_ext3_truncate_exit(inode); 2647} 2648 2649static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb, 2650 unsigned long ino, struct ext3_iloc *iloc) 2651{ 2652 unsigned long block_group; 2653 unsigned long offset; 2654 ext3_fsblk_t block; 2655 struct ext3_group_desc *gdp; 2656 2657 if (!ext3_valid_inum(sb, ino)) { 2658 /* 2659 * This error is already checked for in namei.c unless we are 2660 * looking at an NFS filehandle, in which case no error 2661 * report is needed 2662 */ 2663 return 0; 2664 } 2665 2666 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); 2667 gdp = ext3_get_group_desc(sb, block_group, NULL); 2668 if (!gdp) 2669 return 0; 2670 /* 2671 * Figure out the offset within the block group inode table 2672 */ 2673 offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) * 2674 EXT3_INODE_SIZE(sb); 2675 block = le32_to_cpu(gdp->bg_inode_table) + 2676 (offset >> EXT3_BLOCK_SIZE_BITS(sb)); 2677 2678 iloc->block_group = block_group; 2679 iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1); 2680 return block; 2681} 2682 2683/* 2684 * ext3_get_inode_loc returns with an extra refcount against the inode's 2685 * underlying buffer_head on success. If 'in_mem' is true, we have all 2686 * data in memory that is needed to recreate the on-disk version of this 2687 * inode. 2688 */ 2689static int __ext3_get_inode_loc(struct inode *inode, 2690 struct ext3_iloc *iloc, int in_mem) 2691{ 2692 ext3_fsblk_t block; 2693 struct buffer_head *bh; 2694 2695 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc); 2696 if (!block) 2697 return -EIO; 2698 2699 bh = sb_getblk(inode->i_sb, block); 2700 if (unlikely(!bh)) { 2701 ext3_error (inode->i_sb, "ext3_get_inode_loc", 2702 "unable to read inode block - " 2703 "inode=%lu, block="E3FSBLK, 2704 inode->i_ino, block); 2705 return -ENOMEM; 2706 } 2707 if (!buffer_uptodate(bh)) { 2708 lock_buffer(bh); 2709 2710 /* 2711 * If the buffer has the write error flag, we have failed 2712 * to write out another inode in the same block. In this 2713 * case, we don't have to read the block because we may 2714 * read the old inode data successfully. 2715 */ 2716 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 2717 set_buffer_uptodate(bh); 2718 2719 if (buffer_uptodate(bh)) { 2720 /* someone brought it uptodate while we waited */ 2721 unlock_buffer(bh); 2722 goto has_buffer; 2723 } 2724 2725 /* 2726 * If we have all information of the inode in memory and this 2727 * is the only valid inode in the block, we need not read the 2728 * block. 2729 */ 2730 if (in_mem) { 2731 struct buffer_head *bitmap_bh; 2732 struct ext3_group_desc *desc; 2733 int inodes_per_buffer; 2734 int inode_offset, i; 2735 int block_group; 2736 int start; 2737 2738 block_group = (inode->i_ino - 1) / 2739 EXT3_INODES_PER_GROUP(inode->i_sb); 2740 inodes_per_buffer = bh->b_size / 2741 EXT3_INODE_SIZE(inode->i_sb); 2742 inode_offset = ((inode->i_ino - 1) % 2743 EXT3_INODES_PER_GROUP(inode->i_sb)); 2744 start = inode_offset & ~(inodes_per_buffer - 1); 2745 2746 /* Is the inode bitmap in cache? */ 2747 desc = ext3_get_group_desc(inode->i_sb, 2748 block_group, NULL); 2749 if (!desc) 2750 goto make_io; 2751 2752 bitmap_bh = sb_getblk(inode->i_sb, 2753 le32_to_cpu(desc->bg_inode_bitmap)); 2754 if (unlikely(!bitmap_bh)) 2755 goto make_io; 2756 2757 /* 2758 * If the inode bitmap isn't in cache then the 2759 * optimisation may end up performing two reads instead 2760 * of one, so skip it. 2761 */ 2762 if (!buffer_uptodate(bitmap_bh)) { 2763 brelse(bitmap_bh); 2764 goto make_io; 2765 } 2766 for (i = start; i < start + inodes_per_buffer; i++) { 2767 if (i == inode_offset) 2768 continue; 2769 if (ext3_test_bit(i, bitmap_bh->b_data)) 2770 break; 2771 } 2772 brelse(bitmap_bh); 2773 if (i == start + inodes_per_buffer) { 2774 /* all other inodes are free, so skip I/O */ 2775 memset(bh->b_data, 0, bh->b_size); 2776 set_buffer_uptodate(bh); 2777 unlock_buffer(bh); 2778 goto has_buffer; 2779 } 2780 } 2781 2782make_io: 2783 /* 2784 * There are other valid inodes in the buffer, this inode 2785 * has in-inode xattrs, or we don't have this inode in memory. 2786 * Read the block from disk. 2787 */ 2788 trace_ext3_load_inode(inode); 2789 get_bh(bh); 2790 bh->b_end_io = end_buffer_read_sync; 2791 submit_bh(READ | REQ_META | REQ_PRIO, bh); 2792 wait_on_buffer(bh); 2793 if (!buffer_uptodate(bh)) { 2794 ext3_error(inode->i_sb, "ext3_get_inode_loc", 2795 "unable to read inode block - " 2796 "inode=%lu, block="E3FSBLK, 2797 inode->i_ino, block); 2798 brelse(bh); 2799 return -EIO; 2800 } 2801 } 2802has_buffer: 2803 iloc->bh = bh; 2804 return 0; 2805} 2806 2807int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc) 2808{ 2809 /* We have all inode data except xattrs in memory here. */ 2810 return __ext3_get_inode_loc(inode, iloc, 2811 !ext3_test_inode_state(inode, EXT3_STATE_XATTR)); 2812} 2813 2814void ext3_set_inode_flags(struct inode *inode) 2815{ 2816 unsigned int flags = EXT3_I(inode)->i_flags; 2817 2818 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 2819 if (flags & EXT3_SYNC_FL) 2820 inode->i_flags |= S_SYNC; 2821 if (flags & EXT3_APPEND_FL) 2822 inode->i_flags |= S_APPEND; 2823 if (flags & EXT3_IMMUTABLE_FL) 2824 inode->i_flags |= S_IMMUTABLE; 2825 if (flags & EXT3_NOATIME_FL) 2826 inode->i_flags |= S_NOATIME; 2827 if (flags & EXT3_DIRSYNC_FL) 2828 inode->i_flags |= S_DIRSYNC; 2829} 2830 2831/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */ 2832void ext3_get_inode_flags(struct ext3_inode_info *ei) 2833{ 2834 unsigned int flags = ei->vfs_inode.i_flags; 2835 2836 ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL| 2837 EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL); 2838 if (flags & S_SYNC) 2839 ei->i_flags |= EXT3_SYNC_FL; 2840 if (flags & S_APPEND) 2841 ei->i_flags |= EXT3_APPEND_FL; 2842 if (flags & S_IMMUTABLE) 2843 ei->i_flags |= EXT3_IMMUTABLE_FL; 2844 if (flags & S_NOATIME) 2845 ei->i_flags |= EXT3_NOATIME_FL; 2846 if (flags & S_DIRSYNC) 2847 ei->i_flags |= EXT3_DIRSYNC_FL; 2848} 2849 2850struct inode *ext3_iget(struct super_block *sb, unsigned long ino) 2851{ 2852 struct ext3_iloc iloc; 2853 struct ext3_inode *raw_inode; 2854 struct ext3_inode_info *ei; 2855 struct buffer_head *bh; 2856 struct inode *inode; 2857 journal_t *journal = EXT3_SB(sb)->s_journal; 2858 transaction_t *transaction; 2859 long ret; 2860 int block; 2861 uid_t i_uid; 2862 gid_t i_gid; 2863 2864 inode = iget_locked(sb, ino); 2865 if (!inode) 2866 return ERR_PTR(-ENOMEM); 2867 if (!(inode->i_state & I_NEW)) 2868 return inode; 2869 2870 ei = EXT3_I(inode); 2871 ei->i_block_alloc_info = NULL; 2872 2873 ret = __ext3_get_inode_loc(inode, &iloc, 0); 2874 if (ret < 0) 2875 goto bad_inode; 2876 bh = iloc.bh; 2877 raw_inode = ext3_raw_inode(&iloc); 2878 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 2879 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 2880 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 2881 if(!(test_opt (inode->i_sb, NO_UID32))) { 2882 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 2883 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 2884 } 2885 i_uid_write(inode, i_uid); 2886 i_gid_write(inode, i_gid); 2887 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 2888 inode->i_size = le32_to_cpu(raw_inode->i_size); 2889 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); 2890 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime); 2891 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime); 2892 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0; 2893 2894 ei->i_state_flags = 0; 2895 ei->i_dir_start_lookup = 0; 2896 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 2897 /* We now have enough fields to check if the inode was active or not. 2898 * This is needed because nfsd might try to access dead inodes 2899 * the test is that same one that e2fsck uses 2900 * NeilBrown 1999oct15 2901 */ 2902 if (inode->i_nlink == 0) { 2903 if (inode->i_mode == 0 || 2904 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) { 2905 /* this inode is deleted */ 2906 brelse (bh); 2907 ret = -ESTALE; 2908 goto bad_inode; 2909 } 2910 /* The only unlinked inodes we let through here have 2911 * valid i_mode and are being read by the orphan 2912 * recovery code: that's fine, we're about to complete 2913 * the process of deleting those. */ 2914 } 2915 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); 2916 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 2917#ifdef EXT3_FRAGMENTS 2918 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); 2919 ei->i_frag_no = raw_inode->i_frag; 2920 ei->i_frag_size = raw_inode->i_fsize; 2921#endif 2922 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 2923 if (!S_ISREG(inode->i_mode)) { 2924 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl); 2925 } else { 2926 inode->i_size |= 2927 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32; 2928 } 2929 ei->i_disksize = inode->i_size; 2930 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 2931 ei->i_block_group = iloc.block_group; 2932 /* 2933 * NOTE! The in-memory inode i_data array is in little-endian order 2934 * even on big-endian machines: we do NOT byteswap the block numbers! 2935 */ 2936 for (block = 0; block < EXT3_N_BLOCKS; block++) 2937 ei->i_data[block] = raw_inode->i_block[block]; 2938 INIT_LIST_HEAD(&ei->i_orphan); 2939 2940 /* 2941 * Set transaction id's of transactions that have to be committed 2942 * to finish f[data]sync. We set them to currently running transaction 2943 * as we cannot be sure that the inode or some of its metadata isn't 2944 * part of the transaction - the inode could have been reclaimed and 2945 * now it is reread from disk. 2946 */ 2947 if (journal) { 2948 tid_t tid; 2949 2950 spin_lock(&journal->j_state_lock); 2951 if (journal->j_running_transaction) 2952 transaction = journal->j_running_transaction; 2953 else 2954 transaction = journal->j_committing_transaction; 2955 if (transaction) 2956 tid = transaction->t_tid; 2957 else 2958 tid = journal->j_commit_sequence; 2959 spin_unlock(&journal->j_state_lock); 2960 atomic_set(&ei->i_sync_tid, tid); 2961 atomic_set(&ei->i_datasync_tid, tid); 2962 } 2963 2964 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 && 2965 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) { 2966 /* 2967 * When mke2fs creates big inodes it does not zero out 2968 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE, 2969 * so ignore those first few inodes. 2970 */ 2971 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 2972 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 2973 EXT3_INODE_SIZE(inode->i_sb)) { 2974 brelse (bh); 2975 ret = -EIO; 2976 goto bad_inode; 2977 } 2978 if (ei->i_extra_isize == 0) { 2979 /* The extra space is currently unused. Use it. */ 2980 ei->i_extra_isize = sizeof(struct ext3_inode) - 2981 EXT3_GOOD_OLD_INODE_SIZE; 2982 } else { 2983 __le32 *magic = (void *)raw_inode + 2984 EXT3_GOOD_OLD_INODE_SIZE + 2985 ei->i_extra_isize; 2986 if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC)) 2987 ext3_set_inode_state(inode, EXT3_STATE_XATTR); 2988 } 2989 } else 2990 ei->i_extra_isize = 0; 2991 2992 if (S_ISREG(inode->i_mode)) { 2993 inode->i_op = &ext3_file_inode_operations; 2994 inode->i_fop = &ext3_file_operations; 2995 ext3_set_aops(inode); 2996 } else if (S_ISDIR(inode->i_mode)) { 2997 inode->i_op = &ext3_dir_inode_operations; 2998 inode->i_fop = &ext3_dir_operations; 2999 } else if (S_ISLNK(inode->i_mode)) { 3000 if (ext3_inode_is_fast_symlink(inode)) { 3001 inode->i_op = &ext3_fast_symlink_inode_operations; 3002 nd_terminate_link(ei->i_data, inode->i_size, 3003 sizeof(ei->i_data) - 1); 3004 } else { 3005 inode->i_op = &ext3_symlink_inode_operations; 3006 ext3_set_aops(inode); 3007 } 3008 } else { 3009 inode->i_op = &ext3_special_inode_operations; 3010 if (raw_inode->i_block[0]) 3011 init_special_inode(inode, inode->i_mode, 3012 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 3013 else 3014 init_special_inode(inode, inode->i_mode, 3015 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 3016 } 3017 brelse (iloc.bh); 3018 ext3_set_inode_flags(inode); 3019 unlock_new_inode(inode); 3020 return inode; 3021 3022bad_inode: 3023 iget_failed(inode); 3024 return ERR_PTR(ret); 3025} 3026 3027/* 3028 * Post the struct inode info into an on-disk inode location in the 3029 * buffer-cache. This gobbles the caller's reference to the 3030 * buffer_head in the inode location struct. 3031 * 3032 * The caller must have write access to iloc->bh. 3033 */ 3034static int ext3_do_update_inode(handle_t *handle, 3035 struct inode *inode, 3036 struct ext3_iloc *iloc) 3037{ 3038 struct ext3_inode *raw_inode = ext3_raw_inode(iloc); 3039 struct ext3_inode_info *ei = EXT3_I(inode); 3040 struct buffer_head *bh = iloc->bh; 3041 int err = 0, rc, block; 3042 int need_datasync = 0; 3043 __le32 disksize; 3044 uid_t i_uid; 3045 gid_t i_gid; 3046 3047again: 3048 /* we can't allow multiple procs in here at once, its a bit racey */ 3049 lock_buffer(bh); 3050 3051 /* For fields not not tracking in the in-memory inode, 3052 * initialise them to zero for new inodes. */ 3053 if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) 3054 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); 3055 3056 ext3_get_inode_flags(ei); 3057 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 3058 i_uid = i_uid_read(inode); 3059 i_gid = i_gid_read(inode); 3060 if(!(test_opt(inode->i_sb, NO_UID32))) { 3061 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 3062 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 3063/* 3064 * Fix up interoperability with old kernels. Otherwise, old inodes get 3065 * re-used with the upper 16 bits of the uid/gid intact 3066 */ 3067 if(!ei->i_dtime) { 3068 raw_inode->i_uid_high = 3069 cpu_to_le16(high_16_bits(i_uid)); 3070 raw_inode->i_gid_high = 3071 cpu_to_le16(high_16_bits(i_gid)); 3072 } else { 3073 raw_inode->i_uid_high = 0; 3074 raw_inode->i_gid_high = 0; 3075 } 3076 } else { 3077 raw_inode->i_uid_low = 3078 cpu_to_le16(fs_high2lowuid(i_uid)); 3079 raw_inode->i_gid_low = 3080 cpu_to_le16(fs_high2lowgid(i_gid)); 3081 raw_inode->i_uid_high = 0; 3082 raw_inode->i_gid_high = 0; 3083 } 3084 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 3085 disksize = cpu_to_le32(ei->i_disksize); 3086 if (disksize != raw_inode->i_size) { 3087 need_datasync = 1; 3088 raw_inode->i_size = disksize; 3089 } 3090 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); 3091 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); 3092 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); 3093 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); 3094 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 3095 raw_inode->i_flags = cpu_to_le32(ei->i_flags); 3096#ifdef EXT3_FRAGMENTS 3097 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr); 3098 raw_inode->i_frag = ei->i_frag_no; 3099 raw_inode->i_fsize = ei->i_frag_size; 3100#endif 3101 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl); 3102 if (!S_ISREG(inode->i_mode)) { 3103 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl); 3104 } else { 3105 disksize = cpu_to_le32(ei->i_disksize >> 32); 3106 if (disksize != raw_inode->i_size_high) { 3107 raw_inode->i_size_high = disksize; 3108 need_datasync = 1; 3109 } 3110 if (ei->i_disksize > 0x7fffffffULL) { 3111 struct super_block *sb = inode->i_sb; 3112 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb, 3113 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) || 3114 EXT3_SB(sb)->s_es->s_rev_level == 3115 cpu_to_le32(EXT3_GOOD_OLD_REV)) { 3116 /* If this is the first large file 3117 * created, add a flag to the superblock. 3118 */ 3119 unlock_buffer(bh); 3120 err = ext3_journal_get_write_access(handle, 3121 EXT3_SB(sb)->s_sbh); 3122 if (err) 3123 goto out_brelse; 3124 3125 ext3_update_dynamic_rev(sb); 3126 EXT3_SET_RO_COMPAT_FEATURE(sb, 3127 EXT3_FEATURE_RO_COMPAT_LARGE_FILE); 3128 handle->h_sync = 1; 3129 err = ext3_journal_dirty_metadata(handle, 3130 EXT3_SB(sb)->s_sbh); 3131 /* get our lock and start over */ 3132 goto again; 3133 } 3134 } 3135 } 3136 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 3137 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 3138 if (old_valid_dev(inode->i_rdev)) { 3139 raw_inode->i_block[0] = 3140 cpu_to_le32(old_encode_dev(inode->i_rdev)); 3141 raw_inode->i_block[1] = 0; 3142 } else { 3143 raw_inode->i_block[0] = 0; 3144 raw_inode->i_block[1] = 3145 cpu_to_le32(new_encode_dev(inode->i_rdev)); 3146 raw_inode->i_block[2] = 0; 3147 } 3148 } else for (block = 0; block < EXT3_N_BLOCKS; block++) 3149 raw_inode->i_block[block] = ei->i_data[block]; 3150 3151 if (ei->i_extra_isize) 3152 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 3153 3154 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 3155 unlock_buffer(bh); 3156 rc = ext3_journal_dirty_metadata(handle, bh); 3157 if (!err) 3158 err = rc; 3159 ext3_clear_inode_state(inode, EXT3_STATE_NEW); 3160 3161 atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid); 3162 if (need_datasync) 3163 atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid); 3164out_brelse: 3165 brelse (bh); 3166 ext3_std_error(inode->i_sb, err); 3167 return err; 3168} 3169 3170/* 3171 * ext3_write_inode() 3172 * 3173 * We are called from a few places: 3174 * 3175 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. 3176 * Here, there will be no transaction running. We wait for any running 3177 * transaction to commit. 3178 * 3179 * - Within flush work (for sys_sync(), kupdate and such). 3180 * We wait on commit, if told to. 3181 * 3182 * - Within iput_final() -> write_inode_now() 3183 * We wait on commit, if told to. 3184 * 3185 * In all cases it is actually safe for us to return without doing anything, 3186 * because the inode has been copied into a raw inode buffer in 3187 * ext3_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL 3188 * writeback. 3189 * 3190 * Note that we are absolutely dependent upon all inode dirtiers doing the 3191 * right thing: they *must* call mark_inode_dirty() after dirtying info in 3192 * which we are interested. 3193 * 3194 * It would be a bug for them to not do this. The code: 3195 * 3196 * mark_inode_dirty(inode) 3197 * stuff(); 3198 * inode->i_size = expr; 3199 * 3200 * is in error because write_inode() could occur while `stuff()' is running, 3201 * and the new i_size will be lost. Plus the inode will no longer be on the 3202 * superblock's dirty inode list. 3203 */ 3204int ext3_write_inode(struct inode *inode, struct writeback_control *wbc) 3205{ 3206 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 3207 return 0; 3208 3209 if (ext3_journal_current_handle()) { 3210 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 3211 dump_stack(); 3212 return -EIO; 3213 } 3214 3215 /* 3216 * No need to force transaction in WB_SYNC_NONE mode. Also 3217 * ext3_sync_fs() will force the commit after everything is 3218 * written. 3219 */ 3220 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) 3221 return 0; 3222 3223 return ext3_force_commit(inode->i_sb); 3224} 3225 3226/* 3227 * ext3_setattr() 3228 * 3229 * Called from notify_change. 3230 * 3231 * We want to trap VFS attempts to truncate the file as soon as 3232 * possible. In particular, we want to make sure that when the VFS 3233 * shrinks i_size, we put the inode on the orphan list and modify 3234 * i_disksize immediately, so that during the subsequent flushing of 3235 * dirty pages and freeing of disk blocks, we can guarantee that any 3236 * commit will leave the blocks being flushed in an unused state on 3237 * disk. (On recovery, the inode will get truncated and the blocks will 3238 * be freed, so we have a strong guarantee that no future commit will 3239 * leave these blocks visible to the user.) 3240 * 3241 * Called with inode->sem down. 3242 */ 3243int ext3_setattr(struct dentry *dentry, struct iattr *attr) 3244{ 3245 struct inode *inode = dentry->d_inode; 3246 int error, rc = 0; 3247 const unsigned int ia_valid = attr->ia_valid; 3248 3249 error = inode_change_ok(inode, attr); 3250 if (error) 3251 return error; 3252 3253 if (is_quota_modification(inode, attr)) 3254 dquot_initialize(inode); 3255 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 3256 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 3257 handle_t *handle; 3258 3259 /* (user+group)*(old+new) structure, inode write (sb, 3260 * inode block, ? - but truncate inode update has it) */ 3261 handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ 3262 EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3); 3263 if (IS_ERR(handle)) { 3264 error = PTR_ERR(handle); 3265 goto err_out; 3266 } 3267 error = dquot_transfer(inode, attr); 3268 if (error) { 3269 ext3_journal_stop(handle); 3270 return error; 3271 } 3272 /* Update corresponding info in inode so that everything is in 3273 * one transaction */ 3274 if (attr->ia_valid & ATTR_UID) 3275 inode->i_uid = attr->ia_uid; 3276 if (attr->ia_valid & ATTR_GID) 3277 inode->i_gid = attr->ia_gid; 3278 error = ext3_mark_inode_dirty(handle, inode); 3279 ext3_journal_stop(handle); 3280 } 3281 3282 if (attr->ia_valid & ATTR_SIZE) 3283 inode_dio_wait(inode); 3284 3285 if (S_ISREG(inode->i_mode) && 3286 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 3287 handle_t *handle; 3288 3289 handle = ext3_journal_start(inode, 3); 3290 if (IS_ERR(handle)) { 3291 error = PTR_ERR(handle); 3292 goto err_out; 3293 } 3294 3295 error = ext3_orphan_add(handle, inode); 3296 if (error) { 3297 ext3_journal_stop(handle); 3298 goto err_out; 3299 } 3300 EXT3_I(inode)->i_disksize = attr->ia_size; 3301 error = ext3_mark_inode_dirty(handle, inode); 3302 ext3_journal_stop(handle); 3303 if (error) { 3304 /* Some hard fs error must have happened. Bail out. */ 3305 ext3_orphan_del(NULL, inode); 3306 goto err_out; 3307 } 3308 rc = ext3_block_truncate_page(inode, attr->ia_size); 3309 if (rc) { 3310 /* Cleanup orphan list and exit */ 3311 handle = ext3_journal_start(inode, 3); 3312 if (IS_ERR(handle)) { 3313 ext3_orphan_del(NULL, inode); 3314 goto err_out; 3315 } 3316 ext3_orphan_del(handle, inode); 3317 ext3_journal_stop(handle); 3318 goto err_out; 3319 } 3320 } 3321 3322 if ((attr->ia_valid & ATTR_SIZE) && 3323 attr->ia_size != i_size_read(inode)) { 3324 truncate_setsize(inode, attr->ia_size); 3325 ext3_truncate(inode); 3326 } 3327 3328 setattr_copy(inode, attr); 3329 mark_inode_dirty(inode); 3330 3331 if (ia_valid & ATTR_MODE) 3332 rc = posix_acl_chmod(inode, inode->i_mode); 3333 3334err_out: 3335 ext3_std_error(inode->i_sb, error); 3336 if (!error) 3337 error = rc; 3338 return error; 3339} 3340 3341 3342/* 3343 * How many blocks doth make a writepage()? 3344 * 3345 * With N blocks per page, it may be: 3346 * N data blocks 3347 * 2 indirect block 3348 * 2 dindirect 3349 * 1 tindirect 3350 * N+5 bitmap blocks (from the above) 3351 * N+5 group descriptor summary blocks 3352 * 1 inode block 3353 * 1 superblock. 3354 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files 3355 * 3356 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS 3357 * 3358 * With ordered or writeback data it's the same, less the N data blocks. 3359 * 3360 * If the inode's direct blocks can hold an integral number of pages then a 3361 * page cannot straddle two indirect blocks, and we can only touch one indirect 3362 * and dindirect block, and the "5" above becomes "3". 3363 * 3364 * This still overestimates under most circumstances. If we were to pass the 3365 * start and end offsets in here as well we could do block_to_path() on each 3366 * block and work out the exact number of indirects which are touched. Pah. 3367 */ 3368 3369static int ext3_writepage_trans_blocks(struct inode *inode) 3370{ 3371 int bpp = ext3_journal_blocks_per_page(inode); 3372 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3; 3373 int ret; 3374 3375 if (ext3_should_journal_data(inode)) 3376 ret = 3 * (bpp + indirects) + 2; 3377 else 3378 ret = 2 * (bpp + indirects) + indirects + 2; 3379 3380#ifdef CONFIG_QUOTA 3381 /* We know that structure was already allocated during dquot_initialize so 3382 * we will be updating only the data blocks + inodes */ 3383 ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 3384#endif 3385 3386 return ret; 3387} 3388 3389/* 3390 * The caller must have previously called ext3_reserve_inode_write(). 3391 * Give this, we know that the caller already has write access to iloc->bh. 3392 */ 3393int ext3_mark_iloc_dirty(handle_t *handle, 3394 struct inode *inode, struct ext3_iloc *iloc) 3395{ 3396 int err = 0; 3397 3398 /* the do_update_inode consumes one bh->b_count */ 3399 get_bh(iloc->bh); 3400 3401 /* ext3_do_update_inode() does journal_dirty_metadata */ 3402 err = ext3_do_update_inode(handle, inode, iloc); 3403 put_bh(iloc->bh); 3404 return err; 3405} 3406 3407/* 3408 * On success, We end up with an outstanding reference count against 3409 * iloc->bh. This _must_ be cleaned up later. 3410 */ 3411 3412int 3413ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 3414 struct ext3_iloc *iloc) 3415{ 3416 int err = 0; 3417 if (handle) { 3418 err = ext3_get_inode_loc(inode, iloc); 3419 if (!err) { 3420 BUFFER_TRACE(iloc->bh, "get_write_access"); 3421 err = ext3_journal_get_write_access(handle, iloc->bh); 3422 if (err) { 3423 brelse(iloc->bh); 3424 iloc->bh = NULL; 3425 } 3426 } 3427 } 3428 ext3_std_error(inode->i_sb, err); 3429 return err; 3430} 3431 3432/* 3433 * What we do here is to mark the in-core inode as clean with respect to inode 3434 * dirtiness (it may still be data-dirty). 3435 * This means that the in-core inode may be reaped by prune_icache 3436 * without having to perform any I/O. This is a very good thing, 3437 * because *any* task may call prune_icache - even ones which 3438 * have a transaction open against a different journal. 3439 * 3440 * Is this cheating? Not really. Sure, we haven't written the 3441 * inode out, but prune_icache isn't a user-visible syncing function. 3442 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 3443 * we start and wait on commits. 3444 */ 3445int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) 3446{ 3447 struct ext3_iloc iloc; 3448 int err; 3449 3450 might_sleep(); 3451 trace_ext3_mark_inode_dirty(inode, _RET_IP_); 3452 err = ext3_reserve_inode_write(handle, inode, &iloc); 3453 if (!err) 3454 err = ext3_mark_iloc_dirty(handle, inode, &iloc); 3455 return err; 3456} 3457 3458/* 3459 * ext3_dirty_inode() is called from __mark_inode_dirty() 3460 * 3461 * We're really interested in the case where a file is being extended. 3462 * i_size has been changed by generic_commit_write() and we thus need 3463 * to include the updated inode in the current transaction. 3464 * 3465 * Also, dquot_alloc_space() will always dirty the inode when blocks 3466 * are allocated to the file. 3467 * 3468 * If the inode is marked synchronous, we don't honour that here - doing 3469 * so would cause a commit on atime updates, which we don't bother doing. 3470 * We handle synchronous inodes at the highest possible level. 3471 */ 3472void ext3_dirty_inode(struct inode *inode, int flags) 3473{ 3474 handle_t *current_handle = ext3_journal_current_handle(); 3475 handle_t *handle; 3476 3477 handle = ext3_journal_start(inode, 2); 3478 if (IS_ERR(handle)) 3479 goto out; 3480 if (current_handle && 3481 current_handle->h_transaction != handle->h_transaction) { 3482 /* This task has a transaction open against a different fs */ 3483 printk(KERN_EMERG "%s: transactions do not match!\n", 3484 __func__); 3485 } else { 3486 jbd_debug(5, "marking dirty. outer handle=%p\n", 3487 current_handle); 3488 ext3_mark_inode_dirty(handle, inode); 3489 } 3490 ext3_journal_stop(handle); 3491out: 3492 return; 3493} 3494 3495#if 0 3496/* 3497 * Bind an inode's backing buffer_head into this transaction, to prevent 3498 * it from being flushed to disk early. Unlike 3499 * ext3_reserve_inode_write, this leaves behind no bh reference and 3500 * returns no iloc structure, so the caller needs to repeat the iloc 3501 * lookup to mark the inode dirty later. 3502 */ 3503static int ext3_pin_inode(handle_t *handle, struct inode *inode) 3504{ 3505 struct ext3_iloc iloc; 3506 3507 int err = 0; 3508 if (handle) { 3509 err = ext3_get_inode_loc(inode, &iloc); 3510 if (!err) { 3511 BUFFER_TRACE(iloc.bh, "get_write_access"); 3512 err = journal_get_write_access(handle, iloc.bh); 3513 if (!err) 3514 err = ext3_journal_dirty_metadata(handle, 3515 iloc.bh); 3516 brelse(iloc.bh); 3517 } 3518 } 3519 ext3_std_error(inode->i_sb, err); 3520 return err; 3521} 3522#endif 3523 3524int ext3_change_inode_journal_flag(struct inode *inode, int val) 3525{ 3526 journal_t *journal; 3527 handle_t *handle; 3528 int err; 3529 3530 /* 3531 * We have to be very careful here: changing a data block's 3532 * journaling status dynamically is dangerous. If we write a 3533 * data block to the journal, change the status and then delete 3534 * that block, we risk forgetting to revoke the old log record 3535 * from the journal and so a subsequent replay can corrupt data. 3536 * So, first we make sure that the journal is empty and that 3537 * nobody is changing anything. 3538 */ 3539 3540 journal = EXT3_JOURNAL(inode); 3541 if (is_journal_aborted(journal)) 3542 return -EROFS; 3543 3544 journal_lock_updates(journal); 3545 journal_flush(journal); 3546 3547 /* 3548 * OK, there are no updates running now, and all cached data is 3549 * synced to disk. We are now in a completely consistent state 3550 * which doesn't have anything in the journal, and we know that 3551 * no filesystem updates are running, so it is safe to modify 3552 * the inode's in-core data-journaling state flag now. 3553 */ 3554 3555 if (val) 3556 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL; 3557 else 3558 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL; 3559 ext3_set_aops(inode); 3560 3561 journal_unlock_updates(journal); 3562 3563 /* Finally we can mark the inode as dirty. */ 3564 3565 handle = ext3_journal_start(inode, 1); 3566 if (IS_ERR(handle)) 3567 return PTR_ERR(handle); 3568 3569 err = ext3_mark_inode_dirty(handle, inode); 3570 handle->h_sync = 1; 3571 ext3_journal_stop(handle); 3572 ext3_std_error(inode->i_sb, err); 3573 3574 return err; 3575}