Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 33bc227e4e48ddadcf2eacb381c19df338f0a6c8 3076 lines 91 kB view raw
1/* 2 * linux/fs/ext3/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 23 */ 24 25#include <linux/module.h> 26#include <linux/fs.h> 27#include <linux/time.h> 28#include <linux/ext3_jbd.h> 29#include <linux/jbd.h> 30#include <linux/smp_lock.h> 31#include <linux/highuid.h> 32#include <linux/pagemap.h> 33#include <linux/quotaops.h> 34#include <linux/string.h> 35#include <linux/buffer_head.h> 36#include <linux/writeback.h> 37#include <linux/mpage.h> 38#include <linux/uio.h> 39#include "xattr.h" 40#include "acl.h" 41 42static int ext3_writepage_trans_blocks(struct inode *inode); 43 44/* 45 * Test whether an inode is a fast symlink. 46 */ 47static inline int ext3_inode_is_fast_symlink(struct inode *inode) 48{ 49 int ea_blocks = EXT3_I(inode)->i_file_acl ? 50 (inode->i_sb->s_blocksize >> 9) : 0; 51 52 return (S_ISLNK(inode->i_mode) && 53 inode->i_blocks - ea_blocks == 0); 54} 55 56/* The ext3 forget function must perform a revoke if we are freeing data 57 * which has been journaled. Metadata (eg. indirect blocks) must be 58 * revoked in all cases. 59 * 60 * "bh" may be NULL: a metadata block may have been freed from memory 61 * but there may still be a record of it in the journal, and that record 62 * still needs to be revoked. 63 */ 64 65int ext3_forget(handle_t *handle, int is_metadata, 66 struct inode *inode, struct buffer_head *bh, 67 int blocknr) 68{ 69 int err; 70 71 might_sleep(); 72 73 BUFFER_TRACE(bh, "enter"); 74 75 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 76 "data mode %lx\n", 77 bh, is_metadata, inode->i_mode, 78 test_opt(inode->i_sb, DATA_FLAGS)); 79 80 /* Never use the revoke function if we are doing full data 81 * journaling: there is no need to, and a V1 superblock won't 82 * support it. Otherwise, only skip the revoke on un-journaled 83 * data blocks. */ 84 85 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA || 86 (!is_metadata && !ext3_should_journal_data(inode))) { 87 if (bh) { 88 BUFFER_TRACE(bh, "call journal_forget"); 89 return ext3_journal_forget(handle, bh); 90 } 91 return 0; 92 } 93 94 /* 95 * data!=journal && (is_metadata || should_journal_data(inode)) 96 */ 97 BUFFER_TRACE(bh, "call ext3_journal_revoke"); 98 err = ext3_journal_revoke(handle, blocknr, bh); 99 if (err) 100 ext3_abort(inode->i_sb, __FUNCTION__, 101 "error %d when attempting revoke", err); 102 BUFFER_TRACE(bh, "exit"); 103 return err; 104} 105 106/* 107 * Work out how many blocks we need to progress with the next chunk of a 108 * truncate transaction. 109 */ 110 111static unsigned long blocks_for_truncate(struct inode *inode) 112{ 113 unsigned long needed; 114 115 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 116 117 /* Give ourselves just enough room to cope with inodes in which 118 * i_blocks is corrupt: we've seen disk corruptions in the past 119 * which resulted in random data in an inode which looked enough 120 * like a regular file for ext3 to try to delete it. Things 121 * will go a bit crazy if that happens, but at least we should 122 * try not to panic the whole kernel. */ 123 if (needed < 2) 124 needed = 2; 125 126 /* But we need to bound the transaction so we don't overflow the 127 * journal. */ 128 if (needed > EXT3_MAX_TRANS_DATA) 129 needed = EXT3_MAX_TRANS_DATA; 130 131 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 132} 133 134/* 135 * Truncate transactions can be complex and absolutely huge. So we need to 136 * be able to restart the transaction at a conventient checkpoint to make 137 * sure we don't overflow the journal. 138 * 139 * start_transaction gets us a new handle for a truncate transaction, 140 * and extend_transaction tries to extend the existing one a bit. If 141 * extend fails, we need to propagate the failure up and restart the 142 * transaction in the top-level truncate loop. --sct 143 */ 144 145static handle_t *start_transaction(struct inode *inode) 146{ 147 handle_t *result; 148 149 result = ext3_journal_start(inode, blocks_for_truncate(inode)); 150 if (!IS_ERR(result)) 151 return result; 152 153 ext3_std_error(inode->i_sb, PTR_ERR(result)); 154 return result; 155} 156 157/* 158 * Try to extend this transaction for the purposes of truncation. 159 * 160 * Returns 0 if we managed to create more room. If we can't create more 161 * room, and the transaction must be restarted we return 1. 162 */ 163static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 164{ 165 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS) 166 return 0; 167 if (!ext3_journal_extend(handle, blocks_for_truncate(inode))) 168 return 0; 169 return 1; 170} 171 172/* 173 * Restart the transaction associated with *handle. This does a commit, 174 * so before we call here everything must be consistently dirtied against 175 * this transaction. 176 */ 177static int ext3_journal_test_restart(handle_t *handle, struct inode *inode) 178{ 179 jbd_debug(2, "restarting handle %p\n", handle); 180 return ext3_journal_restart(handle, blocks_for_truncate(inode)); 181} 182 183/* 184 * Called at the last iput() if i_nlink is zero. 185 */ 186void ext3_delete_inode (struct inode * inode) 187{ 188 handle_t *handle; 189 190 truncate_inode_pages(&inode->i_data, 0); 191 192 if (is_bad_inode(inode)) 193 goto no_delete; 194 195 handle = start_transaction(inode); 196 if (IS_ERR(handle)) { 197 /* If we're going to skip the normal cleanup, we still 198 * need to make sure that the in-core orphan linked list 199 * is properly cleaned up. */ 200 ext3_orphan_del(NULL, inode); 201 goto no_delete; 202 } 203 204 if (IS_SYNC(inode)) 205 handle->h_sync = 1; 206 inode->i_size = 0; 207 if (inode->i_blocks) 208 ext3_truncate(inode); 209 /* 210 * Kill off the orphan record which ext3_truncate created. 211 * AKPM: I think this can be inside the above `if'. 212 * Note that ext3_orphan_del() has to be able to cope with the 213 * deletion of a non-existent orphan - this is because we don't 214 * know if ext3_truncate() actually created an orphan record. 215 * (Well, we could do this if we need to, but heck - it works) 216 */ 217 ext3_orphan_del(handle, inode); 218 EXT3_I(inode)->i_dtime = get_seconds(); 219 220 /* 221 * One subtle ordering requirement: if anything has gone wrong 222 * (transaction abort, IO errors, whatever), then we can still 223 * do these next steps (the fs will already have been marked as 224 * having errors), but we can't free the inode if the mark_dirty 225 * fails. 226 */ 227 if (ext3_mark_inode_dirty(handle, inode)) 228 /* If that failed, just do the required in-core inode clear. */ 229 clear_inode(inode); 230 else 231 ext3_free_inode(handle, inode); 232 ext3_journal_stop(handle); 233 return; 234no_delete: 235 clear_inode(inode); /* We must guarantee clearing of inode... */ 236} 237 238static int ext3_alloc_block (handle_t *handle, 239 struct inode * inode, unsigned long goal, int *err) 240{ 241 unsigned long result; 242 243 result = ext3_new_block(handle, inode, goal, err); 244 return result; 245} 246 247 248typedef struct { 249 __le32 *p; 250 __le32 key; 251 struct buffer_head *bh; 252} Indirect; 253 254static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 255{ 256 p->key = *(p->p = v); 257 p->bh = bh; 258} 259 260static inline int verify_chain(Indirect *from, Indirect *to) 261{ 262 while (from <= to && from->key == *from->p) 263 from++; 264 return (from > to); 265} 266 267/** 268 * ext3_block_to_path - parse the block number into array of offsets 269 * @inode: inode in question (we are only interested in its superblock) 270 * @i_block: block number to be parsed 271 * @offsets: array to store the offsets in 272 * @boundary: set this non-zero if the referred-to block is likely to be 273 * followed (on disk) by an indirect block. 274 * 275 * To store the locations of file's data ext3 uses a data structure common 276 * for UNIX filesystems - tree of pointers anchored in the inode, with 277 * data blocks at leaves and indirect blocks in intermediate nodes. 278 * This function translates the block number into path in that tree - 279 * return value is the path length and @offsets[n] is the offset of 280 * pointer to (n+1)th node in the nth one. If @block is out of range 281 * (negative or too large) warning is printed and zero returned. 282 * 283 * Note: function doesn't find node addresses, so no IO is needed. All 284 * we need to know is the capacity of indirect blocks (taken from the 285 * inode->i_sb). 286 */ 287 288/* 289 * Portability note: the last comparison (check that we fit into triple 290 * indirect block) is spelled differently, because otherwise on an 291 * architecture with 32-bit longs and 8Kb pages we might get into trouble 292 * if our filesystem had 8Kb blocks. We might use long long, but that would 293 * kill us on x86. Oh, well, at least the sign propagation does not matter - 294 * i_block would have to be negative in the very beginning, so we would not 295 * get there at all. 296 */ 297 298static int ext3_block_to_path(struct inode *inode, 299 long i_block, int offsets[4], int *boundary) 300{ 301 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb); 302 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb); 303 const long direct_blocks = EXT3_NDIR_BLOCKS, 304 indirect_blocks = ptrs, 305 double_blocks = (1 << (ptrs_bits * 2)); 306 int n = 0; 307 int final = 0; 308 309 if (i_block < 0) { 310 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0"); 311 } else if (i_block < direct_blocks) { 312 offsets[n++] = i_block; 313 final = direct_blocks; 314 } else if ( (i_block -= direct_blocks) < indirect_blocks) { 315 offsets[n++] = EXT3_IND_BLOCK; 316 offsets[n++] = i_block; 317 final = ptrs; 318 } else if ((i_block -= indirect_blocks) < double_blocks) { 319 offsets[n++] = EXT3_DIND_BLOCK; 320 offsets[n++] = i_block >> ptrs_bits; 321 offsets[n++] = i_block & (ptrs - 1); 322 final = ptrs; 323 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 324 offsets[n++] = EXT3_TIND_BLOCK; 325 offsets[n++] = i_block >> (ptrs_bits * 2); 326 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 327 offsets[n++] = i_block & (ptrs - 1); 328 final = ptrs; 329 } else { 330 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big"); 331 } 332 if (boundary) 333 *boundary = (i_block & (ptrs - 1)) == (final - 1); 334 return n; 335} 336 337/** 338 * ext3_get_branch - read the chain of indirect blocks leading to data 339 * @inode: inode in question 340 * @depth: depth of the chain (1 - direct pointer, etc.) 341 * @offsets: offsets of pointers in inode/indirect blocks 342 * @chain: place to store the result 343 * @err: here we store the error value 344 * 345 * Function fills the array of triples <key, p, bh> and returns %NULL 346 * if everything went OK or the pointer to the last filled triple 347 * (incomplete one) otherwise. Upon the return chain[i].key contains 348 * the number of (i+1)-th block in the chain (as it is stored in memory, 349 * i.e. little-endian 32-bit), chain[i].p contains the address of that 350 * number (it points into struct inode for i==0 and into the bh->b_data 351 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 352 * block for i>0 and NULL for i==0. In other words, it holds the block 353 * numbers of the chain, addresses they were taken from (and where we can 354 * verify that chain did not change) and buffer_heads hosting these 355 * numbers. 356 * 357 * Function stops when it stumbles upon zero pointer (absent block) 358 * (pointer to last triple returned, *@err == 0) 359 * or when it gets an IO error reading an indirect block 360 * (ditto, *@err == -EIO) 361 * or when it notices that chain had been changed while it was reading 362 * (ditto, *@err == -EAGAIN) 363 * or when it reads all @depth-1 indirect blocks successfully and finds 364 * the whole chain, all way to the data (returns %NULL, *err == 0). 365 */ 366static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets, 367 Indirect chain[4], int *err) 368{ 369 struct super_block *sb = inode->i_sb; 370 Indirect *p = chain; 371 struct buffer_head *bh; 372 373 *err = 0; 374 /* i_data is not going away, no lock needed */ 375 add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets); 376 if (!p->key) 377 goto no_block; 378 while (--depth) { 379 bh = sb_bread(sb, le32_to_cpu(p->key)); 380 if (!bh) 381 goto failure; 382 /* Reader: pointers */ 383 if (!verify_chain(chain, p)) 384 goto changed; 385 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets); 386 /* Reader: end */ 387 if (!p->key) 388 goto no_block; 389 } 390 return NULL; 391 392changed: 393 brelse(bh); 394 *err = -EAGAIN; 395 goto no_block; 396failure: 397 *err = -EIO; 398no_block: 399 return p; 400} 401 402/** 403 * ext3_find_near - find a place for allocation with sufficient locality 404 * @inode: owner 405 * @ind: descriptor of indirect block. 406 * 407 * This function returns the prefered place for block allocation. 408 * It is used when heuristic for sequential allocation fails. 409 * Rules are: 410 * + if there is a block to the left of our position - allocate near it. 411 * + if pointer will live in indirect block - allocate near that block. 412 * + if pointer will live in inode - allocate in the same 413 * cylinder group. 414 * 415 * In the latter case we colour the starting block by the callers PID to 416 * prevent it from clashing with concurrent allocations for a different inode 417 * in the same block group. The PID is used here so that functionally related 418 * files will be close-by on-disk. 419 * 420 * Caller must make sure that @ind is valid and will stay that way. 421 */ 422 423static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) 424{ 425 struct ext3_inode_info *ei = EXT3_I(inode); 426 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; 427 __le32 *p; 428 unsigned long bg_start; 429 unsigned long colour; 430 431 /* Try to find previous block */ 432 for (p = ind->p - 1; p >= start; p--) 433 if (*p) 434 return le32_to_cpu(*p); 435 436 /* No such thing, so let's try location of indirect block */ 437 if (ind->bh) 438 return ind->bh->b_blocknr; 439 440 /* 441 * It is going to be refered from inode itself? OK, just put it into 442 * the same cylinder group then. 443 */ 444 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + 445 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); 446 colour = (current->pid % 16) * 447 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16); 448 return bg_start + colour; 449} 450 451/** 452 * ext3_find_goal - find a prefered place for allocation. 453 * @inode: owner 454 * @block: block we want 455 * @chain: chain of indirect blocks 456 * @partial: pointer to the last triple within a chain 457 * @goal: place to store the result. 458 * 459 * Normally this function find the prefered place for block allocation, 460 * stores it in *@goal and returns zero. 461 */ 462 463static unsigned long ext3_find_goal(struct inode *inode, long block, 464 Indirect chain[4], Indirect *partial) 465{ 466 struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; 467 468 /* 469 * try the heuristic for sequential allocation, 470 * failing that at least try to get decent locality. 471 */ 472 if (block_i && (block == block_i->last_alloc_logical_block + 1) 473 && (block_i->last_alloc_physical_block != 0)) { 474 return block_i->last_alloc_physical_block + 1; 475 } 476 477 return ext3_find_near(inode, partial); 478} 479 480/** 481 * ext3_alloc_branch - allocate and set up a chain of blocks. 482 * @inode: owner 483 * @num: depth of the chain (number of blocks to allocate) 484 * @offsets: offsets (in the blocks) to store the pointers to next. 485 * @branch: place to store the chain in. 486 * 487 * This function allocates @num blocks, zeroes out all but the last one, 488 * links them into chain and (if we are synchronous) writes them to disk. 489 * In other words, it prepares a branch that can be spliced onto the 490 * inode. It stores the information about that chain in the branch[], in 491 * the same format as ext3_get_branch() would do. We are calling it after 492 * we had read the existing part of chain and partial points to the last 493 * triple of that (one with zero ->key). Upon the exit we have the same 494 * picture as after the successful ext3_get_block(), except that in one 495 * place chain is disconnected - *branch->p is still zero (we did not 496 * set the last link), but branch->key contains the number that should 497 * be placed into *branch->p to fill that gap. 498 * 499 * If allocation fails we free all blocks we've allocated (and forget 500 * their buffer_heads) and return the error value the from failed 501 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain 502 * as described above and return 0. 503 */ 504 505static int ext3_alloc_branch(handle_t *handle, struct inode *inode, 506 int num, 507 unsigned long goal, 508 int *offsets, 509 Indirect *branch) 510{ 511 int blocksize = inode->i_sb->s_blocksize; 512 int n = 0, keys = 0; 513 int err = 0; 514 int i; 515 int parent = ext3_alloc_block(handle, inode, goal, &err); 516 517 branch[0].key = cpu_to_le32(parent); 518 if (parent) { 519 for (n = 1; n < num; n++) { 520 struct buffer_head *bh; 521 /* Allocate the next block */ 522 int nr = ext3_alloc_block(handle, inode, parent, &err); 523 if (!nr) 524 break; 525 branch[n].key = cpu_to_le32(nr); 526 527 /* 528 * Get buffer_head for parent block, zero it out 529 * and set the pointer to new one, then send 530 * parent to disk. 531 */ 532 bh = sb_getblk(inode->i_sb, parent); 533 if (!bh) 534 break; 535 keys = n+1; 536 branch[n].bh = bh; 537 lock_buffer(bh); 538 BUFFER_TRACE(bh, "call get_create_access"); 539 err = ext3_journal_get_create_access(handle, bh); 540 if (err) { 541 unlock_buffer(bh); 542 brelse(bh); 543 break; 544 } 545 546 memset(bh->b_data, 0, blocksize); 547 branch[n].p = (__le32*) bh->b_data + offsets[n]; 548 *branch[n].p = branch[n].key; 549 BUFFER_TRACE(bh, "marking uptodate"); 550 set_buffer_uptodate(bh); 551 unlock_buffer(bh); 552 553 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 554 err = ext3_journal_dirty_metadata(handle, bh); 555 if (err) 556 break; 557 558 parent = nr; 559 } 560 } 561 if (n == num) 562 return 0; 563 564 /* Allocation failed, free what we already allocated */ 565 for (i = 1; i < keys; i++) { 566 BUFFER_TRACE(branch[i].bh, "call journal_forget"); 567 ext3_journal_forget(handle, branch[i].bh); 568 } 569 for (i = 0; i < keys; i++) 570 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1); 571 return err; 572} 573 574/** 575 * ext3_splice_branch - splice the allocated branch onto inode. 576 * @inode: owner 577 * @block: (logical) number of block we are adding 578 * @chain: chain of indirect blocks (with a missing link - see 579 * ext3_alloc_branch) 580 * @where: location of missing link 581 * @num: number of blocks we are adding 582 * 583 * This function fills the missing link and does all housekeeping needed in 584 * inode (->i_blocks, etc.). In case of success we end up with the full 585 * chain to new block and return 0. 586 */ 587 588static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, 589 Indirect chain[4], Indirect *where, int num) 590{ 591 int i; 592 int err = 0; 593 struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; 594 595 /* 596 * If we're splicing into a [td]indirect block (as opposed to the 597 * inode) then we need to get write access to the [td]indirect block 598 * before the splice. 599 */ 600 if (where->bh) { 601 BUFFER_TRACE(where->bh, "get_write_access"); 602 err = ext3_journal_get_write_access(handle, where->bh); 603 if (err) 604 goto err_out; 605 } 606 /* That's it */ 607 608 *where->p = where->key; 609 610 /* 611 * update the most recently allocated logical & physical block 612 * in i_block_alloc_info, to assist find the proper goal block for next 613 * allocation 614 */ 615 if (block_i) { 616 block_i->last_alloc_logical_block = block; 617 block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key); 618 } 619 620 /* We are done with atomic stuff, now do the rest of housekeeping */ 621 622 inode->i_ctime = CURRENT_TIME_SEC; 623 ext3_mark_inode_dirty(handle, inode); 624 625 /* had we spliced it onto indirect block? */ 626 if (where->bh) { 627 /* 628 * akpm: If we spliced it onto an indirect block, we haven't 629 * altered the inode. Note however that if it is being spliced 630 * onto an indirect block at the very end of the file (the 631 * file is growing) then we *will* alter the inode to reflect 632 * the new i_size. But that is not done here - it is done in 633 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode. 634 */ 635 jbd_debug(5, "splicing indirect only\n"); 636 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); 637 err = ext3_journal_dirty_metadata(handle, where->bh); 638 if (err) 639 goto err_out; 640 } else { 641 /* 642 * OK, we spliced it into the inode itself on a direct block. 643 * Inode was dirtied above. 644 */ 645 jbd_debug(5, "splicing direct\n"); 646 } 647 return err; 648 649err_out: 650 for (i = 1; i < num; i++) { 651 BUFFER_TRACE(where[i].bh, "call journal_forget"); 652 ext3_journal_forget(handle, where[i].bh); 653 } 654 return err; 655} 656 657/* 658 * Allocation strategy is simple: if we have to allocate something, we will 659 * have to go the whole way to leaf. So let's do it before attaching anything 660 * to tree, set linkage between the newborn blocks, write them if sync is 661 * required, recheck the path, free and repeat if check fails, otherwise 662 * set the last missing link (that will protect us from any truncate-generated 663 * removals - all blocks on the path are immune now) and possibly force the 664 * write on the parent block. 665 * That has a nice additional property: no special recovery from the failed 666 * allocations is needed - we simply release blocks and do not touch anything 667 * reachable from inode. 668 * 669 * akpm: `handle' can be NULL if create == 0. 670 * 671 * The BKL may not be held on entry here. Be sure to take it early. 672 */ 673 674static int 675ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, 676 struct buffer_head *bh_result, int create, int extend_disksize) 677{ 678 int err = -EIO; 679 int offsets[4]; 680 Indirect chain[4]; 681 Indirect *partial; 682 unsigned long goal; 683 int left; 684 int boundary = 0; 685 const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary); 686 struct ext3_inode_info *ei = EXT3_I(inode); 687 688 J_ASSERT(handle != NULL || create == 0); 689 690 if (depth == 0) 691 goto out; 692 693 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 694 695 /* Simplest case - block found, no allocation needed */ 696 if (!partial) { 697 clear_buffer_new(bh_result); 698 goto got_it; 699 } 700 701 /* Next simple case - plain lookup or failed read of indirect block */ 702 if (!create || err == -EIO) 703 goto cleanup; 704 705 down(&ei->truncate_sem); 706 707 /* 708 * If the indirect block is missing while we are reading 709 * the chain(ext3_get_branch() returns -EAGAIN err), or 710 * if the chain has been changed after we grab the semaphore, 711 * (either because another process truncated this branch, or 712 * another get_block allocated this branch) re-grab the chain to see if 713 * the request block has been allocated or not. 714 * 715 * Since we already block the truncate/other get_block 716 * at this point, we will have the current copy of the chain when we 717 * splice the branch into the tree. 718 */ 719 if (err == -EAGAIN || !verify_chain(chain, partial)) { 720 while (partial > chain) { 721 brelse(partial->bh); 722 partial--; 723 } 724 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 725 if (!partial) { 726 up(&ei->truncate_sem); 727 if (err) 728 goto cleanup; 729 clear_buffer_new(bh_result); 730 goto got_it; 731 } 732 } 733 734 /* 735 * Okay, we need to do block allocation. Lazily initialize the block 736 * allocation info here if necessary 737 */ 738 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) 739 ext3_init_block_alloc_info(inode); 740 741 goal = ext3_find_goal(inode, iblock, chain, partial); 742 743 left = (chain + depth) - partial; 744 745 /* 746 * Block out ext3_truncate while we alter the tree 747 */ 748 err = ext3_alloc_branch(handle, inode, left, goal, 749 offsets + (partial - chain), partial); 750 751 /* 752 * The ext3_splice_branch call will free and forget any buffers 753 * on the new chain if there is a failure, but that risks using 754 * up transaction credits, especially for bitmaps where the 755 * credits cannot be returned. Can we handle this somehow? We 756 * may need to return -EAGAIN upwards in the worst case. --sct 757 */ 758 if (!err) 759 err = ext3_splice_branch(handle, inode, iblock, chain, 760 partial, left); 761 /* 762 * i_disksize growing is protected by truncate_sem. Don't forget to 763 * protect it if you're about to implement concurrent 764 * ext3_get_block() -bzzz 765 */ 766 if (!err && extend_disksize && inode->i_size > ei->i_disksize) 767 ei->i_disksize = inode->i_size; 768 up(&ei->truncate_sem); 769 if (err) 770 goto cleanup; 771 772 set_buffer_new(bh_result); 773got_it: 774 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 775 if (boundary) 776 set_buffer_boundary(bh_result); 777 /* Clean up and exit */ 778 partial = chain + depth - 1; /* the whole chain */ 779cleanup: 780 while (partial > chain) { 781 BUFFER_TRACE(partial->bh, "call brelse"); 782 brelse(partial->bh); 783 partial--; 784 } 785 BUFFER_TRACE(bh_result, "returned"); 786out: 787 return err; 788} 789 790static int ext3_get_block(struct inode *inode, sector_t iblock, 791 struct buffer_head *bh_result, int create) 792{ 793 handle_t *handle = NULL; 794 int ret; 795 796 if (create) { 797 handle = ext3_journal_current_handle(); 798 J_ASSERT(handle != 0); 799 } 800 ret = ext3_get_block_handle(handle, inode, iblock, 801 bh_result, create, 1); 802 return ret; 803} 804 805#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) 806 807static int 808ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, 809 unsigned long max_blocks, struct buffer_head *bh_result, 810 int create) 811{ 812 handle_t *handle = journal_current_handle(); 813 int ret = 0; 814 815 if (!handle) 816 goto get_block; /* A read */ 817 818 if (handle->h_transaction->t_state == T_LOCKED) { 819 /* 820 * Huge direct-io writes can hold off commits for long 821 * periods of time. Let this commit run. 822 */ 823 ext3_journal_stop(handle); 824 handle = ext3_journal_start(inode, DIO_CREDITS); 825 if (IS_ERR(handle)) 826 ret = PTR_ERR(handle); 827 goto get_block; 828 } 829 830 if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) { 831 /* 832 * Getting low on buffer credits... 833 */ 834 ret = ext3_journal_extend(handle, DIO_CREDITS); 835 if (ret > 0) { 836 /* 837 * Couldn't extend the transaction. Start a new one. 838 */ 839 ret = ext3_journal_restart(handle, DIO_CREDITS); 840 } 841 } 842 843get_block: 844 if (ret == 0) 845 ret = ext3_get_block_handle(handle, inode, iblock, 846 bh_result, create, 0); 847 bh_result->b_size = (1 << inode->i_blkbits); 848 return ret; 849} 850 851/* 852 * `handle' can be NULL if create is zero 853 */ 854struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, 855 long block, int create, int * errp) 856{ 857 struct buffer_head dummy; 858 int fatal = 0, err; 859 860 J_ASSERT(handle != NULL || create == 0); 861 862 dummy.b_state = 0; 863 dummy.b_blocknr = -1000; 864 buffer_trace_init(&dummy.b_history); 865 *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1); 866 if (!*errp && buffer_mapped(&dummy)) { 867 struct buffer_head *bh; 868 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 869 if (!bh) { 870 *errp = -EIO; 871 goto err; 872 } 873 if (buffer_new(&dummy)) { 874 J_ASSERT(create != 0); 875 J_ASSERT(handle != 0); 876 877 /* Now that we do not always journal data, we 878 should keep in mind whether this should 879 always journal the new buffer as metadata. 880 For now, regular file writes use 881 ext3_get_block instead, so it's not a 882 problem. */ 883 lock_buffer(bh); 884 BUFFER_TRACE(bh, "call get_create_access"); 885 fatal = ext3_journal_get_create_access(handle, bh); 886 if (!fatal && !buffer_uptodate(bh)) { 887 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 888 set_buffer_uptodate(bh); 889 } 890 unlock_buffer(bh); 891 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 892 err = ext3_journal_dirty_metadata(handle, bh); 893 if (!fatal) 894 fatal = err; 895 } else { 896 BUFFER_TRACE(bh, "not a new buffer"); 897 } 898 if (fatal) { 899 *errp = fatal; 900 brelse(bh); 901 bh = NULL; 902 } 903 return bh; 904 } 905err: 906 return NULL; 907} 908 909struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode, 910 int block, int create, int *err) 911{ 912 struct buffer_head * bh; 913 914 bh = ext3_getblk(handle, inode, block, create, err); 915 if (!bh) 916 return bh; 917 if (buffer_uptodate(bh)) 918 return bh; 919 ll_rw_block(READ, 1, &bh); 920 wait_on_buffer(bh); 921 if (buffer_uptodate(bh)) 922 return bh; 923 put_bh(bh); 924 *err = -EIO; 925 return NULL; 926} 927 928static int walk_page_buffers( handle_t *handle, 929 struct buffer_head *head, 930 unsigned from, 931 unsigned to, 932 int *partial, 933 int (*fn)( handle_t *handle, 934 struct buffer_head *bh)) 935{ 936 struct buffer_head *bh; 937 unsigned block_start, block_end; 938 unsigned blocksize = head->b_size; 939 int err, ret = 0; 940 struct buffer_head *next; 941 942 for ( bh = head, block_start = 0; 943 ret == 0 && (bh != head || !block_start); 944 block_start = block_end, bh = next) 945 { 946 next = bh->b_this_page; 947 block_end = block_start + blocksize; 948 if (block_end <= from || block_start >= to) { 949 if (partial && !buffer_uptodate(bh)) 950 *partial = 1; 951 continue; 952 } 953 err = (*fn)(handle, bh); 954 if (!ret) 955 ret = err; 956 } 957 return ret; 958} 959 960/* 961 * To preserve ordering, it is essential that the hole instantiation and 962 * the data write be encapsulated in a single transaction. We cannot 963 * close off a transaction and start a new one between the ext3_get_block() 964 * and the commit_write(). So doing the journal_start at the start of 965 * prepare_write() is the right place. 966 * 967 * Also, this function can nest inside ext3_writepage() -> 968 * block_write_full_page(). In that case, we *know* that ext3_writepage() 969 * has generated enough buffer credits to do the whole page. So we won't 970 * block on the journal in that case, which is good, because the caller may 971 * be PF_MEMALLOC. 972 * 973 * By accident, ext3 can be reentered when a transaction is open via 974 * quota file writes. If we were to commit the transaction while thus 975 * reentered, there can be a deadlock - we would be holding a quota 976 * lock, and the commit would never complete if another thread had a 977 * transaction open and was blocking on the quota lock - a ranking 978 * violation. 979 * 980 * So what we do is to rely on the fact that journal_stop/journal_start 981 * will _not_ run commit under these circumstances because handle->h_ref 982 * is elevated. We'll still have enough credits for the tiny quotafile 983 * write. 984 */ 985 986static int do_journal_get_write_access(handle_t *handle, 987 struct buffer_head *bh) 988{ 989 if (!buffer_mapped(bh) || buffer_freed(bh)) 990 return 0; 991 return ext3_journal_get_write_access(handle, bh); 992} 993 994static int ext3_prepare_write(struct file *file, struct page *page, 995 unsigned from, unsigned to) 996{ 997 struct inode *inode = page->mapping->host; 998 int ret, needed_blocks = ext3_writepage_trans_blocks(inode); 999 handle_t *handle; 1000 int retries = 0; 1001 1002retry: 1003 handle = ext3_journal_start(inode, needed_blocks); 1004 if (IS_ERR(handle)) { 1005 ret = PTR_ERR(handle); 1006 goto out; 1007 } 1008 if (test_opt(inode->i_sb, NOBH)) 1009 ret = nobh_prepare_write(page, from, to, ext3_get_block); 1010 else 1011 ret = block_prepare_write(page, from, to, ext3_get_block); 1012 if (ret) 1013 goto prepare_write_failed; 1014 1015 if (ext3_should_journal_data(inode)) { 1016 ret = walk_page_buffers(handle, page_buffers(page), 1017 from, to, NULL, do_journal_get_write_access); 1018 } 1019prepare_write_failed: 1020 if (ret) 1021 ext3_journal_stop(handle); 1022 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) 1023 goto retry; 1024out: 1025 return ret; 1026} 1027 1028int 1029ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) 1030{ 1031 int err = journal_dirty_data(handle, bh); 1032 if (err) 1033 ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__, 1034 bh, handle,err); 1035 return err; 1036} 1037 1038/* For commit_write() in data=journal mode */ 1039static int commit_write_fn(handle_t *handle, struct buffer_head *bh) 1040{ 1041 if (!buffer_mapped(bh) || buffer_freed(bh)) 1042 return 0; 1043 set_buffer_uptodate(bh); 1044 return ext3_journal_dirty_metadata(handle, bh); 1045} 1046 1047/* 1048 * We need to pick up the new inode size which generic_commit_write gave us 1049 * `file' can be NULL - eg, when called from page_symlink(). 1050 * 1051 * ext3 never places buffers on inode->i_mapping->private_list. metadata 1052 * buffers are managed internally. 1053 */ 1054 1055static int ext3_ordered_commit_write(struct file *file, struct page *page, 1056 unsigned from, unsigned to) 1057{ 1058 handle_t *handle = ext3_journal_current_handle(); 1059 struct inode *inode = page->mapping->host; 1060 int ret = 0, ret2; 1061 1062 ret = walk_page_buffers(handle, page_buffers(page), 1063 from, to, NULL, ext3_journal_dirty_data); 1064 1065 if (ret == 0) { 1066 /* 1067 * generic_commit_write() will run mark_inode_dirty() if i_size 1068 * changes. So let's piggyback the i_disksize mark_inode_dirty 1069 * into that. 1070 */ 1071 loff_t new_i_size; 1072 1073 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1074 if (new_i_size > EXT3_I(inode)->i_disksize) 1075 EXT3_I(inode)->i_disksize = new_i_size; 1076 ret = generic_commit_write(file, page, from, to); 1077 } 1078 ret2 = ext3_journal_stop(handle); 1079 if (!ret) 1080 ret = ret2; 1081 return ret; 1082} 1083 1084static int ext3_writeback_commit_write(struct file *file, struct page *page, 1085 unsigned from, unsigned to) 1086{ 1087 handle_t *handle = ext3_journal_current_handle(); 1088 struct inode *inode = page->mapping->host; 1089 int ret = 0, ret2; 1090 loff_t new_i_size; 1091 1092 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1093 if (new_i_size > EXT3_I(inode)->i_disksize) 1094 EXT3_I(inode)->i_disksize = new_i_size; 1095 1096 if (test_opt(inode->i_sb, NOBH)) 1097 ret = nobh_commit_write(file, page, from, to); 1098 else 1099 ret = generic_commit_write(file, page, from, to); 1100 1101 ret2 = ext3_journal_stop(handle); 1102 if (!ret) 1103 ret = ret2; 1104 return ret; 1105} 1106 1107static int ext3_journalled_commit_write(struct file *file, 1108 struct page *page, unsigned from, unsigned to) 1109{ 1110 handle_t *handle = ext3_journal_current_handle(); 1111 struct inode *inode = page->mapping->host; 1112 int ret = 0, ret2; 1113 int partial = 0; 1114 loff_t pos; 1115 1116 /* 1117 * Here we duplicate the generic_commit_write() functionality 1118 */ 1119 pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1120 1121 ret = walk_page_buffers(handle, page_buffers(page), from, 1122 to, &partial, commit_write_fn); 1123 if (!partial) 1124 SetPageUptodate(page); 1125 if (pos > inode->i_size) 1126 i_size_write(inode, pos); 1127 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; 1128 if (inode->i_size > EXT3_I(inode)->i_disksize) { 1129 EXT3_I(inode)->i_disksize = inode->i_size; 1130 ret2 = ext3_mark_inode_dirty(handle, inode); 1131 if (!ret) 1132 ret = ret2; 1133 } 1134 ret2 = ext3_journal_stop(handle); 1135 if (!ret) 1136 ret = ret2; 1137 return ret; 1138} 1139 1140/* 1141 * bmap() is special. It gets used by applications such as lilo and by 1142 * the swapper to find the on-disk block of a specific piece of data. 1143 * 1144 * Naturally, this is dangerous if the block concerned is still in the 1145 * journal. If somebody makes a swapfile on an ext3 data-journaling 1146 * filesystem and enables swap, then they may get a nasty shock when the 1147 * data getting swapped to that swapfile suddenly gets overwritten by 1148 * the original zero's written out previously to the journal and 1149 * awaiting writeback in the kernel's buffer cache. 1150 * 1151 * So, if we see any bmap calls here on a modified, data-journaled file, 1152 * take extra steps to flush any blocks which might be in the cache. 1153 */ 1154static sector_t ext3_bmap(struct address_space *mapping, sector_t block) 1155{ 1156 struct inode *inode = mapping->host; 1157 journal_t *journal; 1158 int err; 1159 1160 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { 1161 /* 1162 * This is a REALLY heavyweight approach, but the use of 1163 * bmap on dirty files is expected to be extremely rare: 1164 * only if we run lilo or swapon on a freshly made file 1165 * do we expect this to happen. 1166 * 1167 * (bmap requires CAP_SYS_RAWIO so this does not 1168 * represent an unprivileged user DOS attack --- we'd be 1169 * in trouble if mortal users could trigger this path at 1170 * will.) 1171 * 1172 * NB. EXT3_STATE_JDATA is not set on files other than 1173 * regular files. If somebody wants to bmap a directory 1174 * or symlink and gets confused because the buffer 1175 * hasn't yet been flushed to disk, they deserve 1176 * everything they get. 1177 */ 1178 1179 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA; 1180 journal = EXT3_JOURNAL(inode); 1181 journal_lock_updates(journal); 1182 err = journal_flush(journal); 1183 journal_unlock_updates(journal); 1184 1185 if (err) 1186 return 0; 1187 } 1188 1189 return generic_block_bmap(mapping,block,ext3_get_block); 1190} 1191 1192static int bget_one(handle_t *handle, struct buffer_head *bh) 1193{ 1194 get_bh(bh); 1195 return 0; 1196} 1197 1198static int bput_one(handle_t *handle, struct buffer_head *bh) 1199{ 1200 put_bh(bh); 1201 return 0; 1202} 1203 1204static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) 1205{ 1206 if (buffer_mapped(bh)) 1207 return ext3_journal_dirty_data(handle, bh); 1208 return 0; 1209} 1210 1211/* 1212 * Note that we always start a transaction even if we're not journalling 1213 * data. This is to preserve ordering: any hole instantiation within 1214 * __block_write_full_page -> ext3_get_block() should be journalled 1215 * along with the data so we don't crash and then get metadata which 1216 * refers to old data. 1217 * 1218 * In all journalling modes block_write_full_page() will start the I/O. 1219 * 1220 * Problem: 1221 * 1222 * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 1223 * ext3_writepage() 1224 * 1225 * Similar for: 1226 * 1227 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... 1228 * 1229 * Same applies to ext3_get_block(). We will deadlock on various things like 1230 * lock_journal and i_truncate_sem. 1231 * 1232 * Setting PF_MEMALLOC here doesn't work - too many internal memory 1233 * allocations fail. 1234 * 1235 * 16May01: If we're reentered then journal_current_handle() will be 1236 * non-zero. We simply *return*. 1237 * 1238 * 1 July 2001: @@@ FIXME: 1239 * In journalled data mode, a data buffer may be metadata against the 1240 * current transaction. But the same file is part of a shared mapping 1241 * and someone does a writepage() on it. 1242 * 1243 * We will move the buffer onto the async_data list, but *after* it has 1244 * been dirtied. So there's a small window where we have dirty data on 1245 * BJ_Metadata. 1246 * 1247 * Note that this only applies to the last partial page in the file. The 1248 * bit which block_write_full_page() uses prepare/commit for. (That's 1249 * broken code anyway: it's wrong for msync()). 1250 * 1251 * It's a rare case: affects the final partial page, for journalled data 1252 * where the file is subject to bith write() and writepage() in the same 1253 * transction. To fix it we'll need a custom block_write_full_page(). 1254 * We'll probably need that anyway for journalling writepage() output. 1255 * 1256 * We don't honour synchronous mounts for writepage(). That would be 1257 * disastrous. Any write() or metadata operation will sync the fs for 1258 * us. 1259 * 1260 * AKPM2: if all the page's buffers are mapped to disk and !data=journal, 1261 * we don't need to open a transaction here. 1262 */ 1263static int ext3_ordered_writepage(struct page *page, 1264 struct writeback_control *wbc) 1265{ 1266 struct inode *inode = page->mapping->host; 1267 struct buffer_head *page_bufs; 1268 handle_t *handle = NULL; 1269 int ret = 0; 1270 int err; 1271 1272 J_ASSERT(PageLocked(page)); 1273 1274 /* 1275 * We give up here if we're reentered, because it might be for a 1276 * different filesystem. 1277 */ 1278 if (ext3_journal_current_handle()) 1279 goto out_fail; 1280 1281 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); 1282 1283 if (IS_ERR(handle)) { 1284 ret = PTR_ERR(handle); 1285 goto out_fail; 1286 } 1287 1288 if (!page_has_buffers(page)) { 1289 create_empty_buffers(page, inode->i_sb->s_blocksize, 1290 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1291 } 1292 page_bufs = page_buffers(page); 1293 walk_page_buffers(handle, page_bufs, 0, 1294 PAGE_CACHE_SIZE, NULL, bget_one); 1295 1296 ret = block_write_full_page(page, ext3_get_block, wbc); 1297 1298 /* 1299 * The page can become unlocked at any point now, and 1300 * truncate can then come in and change things. So we 1301 * can't touch *page from now on. But *page_bufs is 1302 * safe due to elevated refcount. 1303 */ 1304 1305 /* 1306 * And attach them to the current transaction. But only if 1307 * block_write_full_page() succeeded. Otherwise they are unmapped, 1308 * and generally junk. 1309 */ 1310 if (ret == 0) { 1311 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, 1312 NULL, journal_dirty_data_fn); 1313 if (!ret) 1314 ret = err; 1315 } 1316 walk_page_buffers(handle, page_bufs, 0, 1317 PAGE_CACHE_SIZE, NULL, bput_one); 1318 err = ext3_journal_stop(handle); 1319 if (!ret) 1320 ret = err; 1321 return ret; 1322 1323out_fail: 1324 redirty_page_for_writepage(wbc, page); 1325 unlock_page(page); 1326 return ret; 1327} 1328 1329static int ext3_writeback_writepage(struct page *page, 1330 struct writeback_control *wbc) 1331{ 1332 struct inode *inode = page->mapping->host; 1333 handle_t *handle = NULL; 1334 int ret = 0; 1335 int err; 1336 1337 if (ext3_journal_current_handle()) 1338 goto out_fail; 1339 1340 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); 1341 if (IS_ERR(handle)) { 1342 ret = PTR_ERR(handle); 1343 goto out_fail; 1344 } 1345 1346 if (test_opt(inode->i_sb, NOBH)) 1347 ret = nobh_writepage(page, ext3_get_block, wbc); 1348 else 1349 ret = block_write_full_page(page, ext3_get_block, wbc); 1350 1351 err = ext3_journal_stop(handle); 1352 if (!ret) 1353 ret = err; 1354 return ret; 1355 1356out_fail: 1357 redirty_page_for_writepage(wbc, page); 1358 unlock_page(page); 1359 return ret; 1360} 1361 1362static int ext3_journalled_writepage(struct page *page, 1363 struct writeback_control *wbc) 1364{ 1365 struct inode *inode = page->mapping->host; 1366 handle_t *handle = NULL; 1367 int ret = 0; 1368 int err; 1369 1370 if (ext3_journal_current_handle()) 1371 goto no_write; 1372 1373 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); 1374 if (IS_ERR(handle)) { 1375 ret = PTR_ERR(handle); 1376 goto no_write; 1377 } 1378 1379 if (!page_has_buffers(page) || PageChecked(page)) { 1380 /* 1381 * It's mmapped pagecache. Add buffers and journal it. There 1382 * doesn't seem much point in redirtying the page here. 1383 */ 1384 ClearPageChecked(page); 1385 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 1386 ext3_get_block); 1387 if (ret != 0) { 1388 ext3_journal_stop(handle); 1389 goto out_unlock; 1390 } 1391 ret = walk_page_buffers(handle, page_buffers(page), 0, 1392 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); 1393 1394 err = walk_page_buffers(handle, page_buffers(page), 0, 1395 PAGE_CACHE_SIZE, NULL, commit_write_fn); 1396 if (ret == 0) 1397 ret = err; 1398 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; 1399 unlock_page(page); 1400 } else { 1401 /* 1402 * It may be a page full of checkpoint-mode buffers. We don't 1403 * really know unless we go poke around in the buffer_heads. 1404 * But block_write_full_page will do the right thing. 1405 */ 1406 ret = block_write_full_page(page, ext3_get_block, wbc); 1407 } 1408 err = ext3_journal_stop(handle); 1409 if (!ret) 1410 ret = err; 1411out: 1412 return ret; 1413 1414no_write: 1415 redirty_page_for_writepage(wbc, page); 1416out_unlock: 1417 unlock_page(page); 1418 goto out; 1419} 1420 1421static int ext3_readpage(struct file *file, struct page *page) 1422{ 1423 return mpage_readpage(page, ext3_get_block); 1424} 1425 1426static int 1427ext3_readpages(struct file *file, struct address_space *mapping, 1428 struct list_head *pages, unsigned nr_pages) 1429{ 1430 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); 1431} 1432 1433static int ext3_invalidatepage(struct page *page, unsigned long offset) 1434{ 1435 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1436 1437 /* 1438 * If it's a full truncate we just forget about the pending dirtying 1439 */ 1440 if (offset == 0) 1441 ClearPageChecked(page); 1442 1443 return journal_invalidatepage(journal, page, offset); 1444} 1445 1446static int ext3_releasepage(struct page *page, gfp_t wait) 1447{ 1448 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1449 1450 WARN_ON(PageChecked(page)); 1451 if (!page_has_buffers(page)) 1452 return 0; 1453 return journal_try_to_free_buffers(journal, page, wait); 1454} 1455 1456/* 1457 * If the O_DIRECT write will extend the file then add this inode to the 1458 * orphan list. So recovery will truncate it back to the original size 1459 * if the machine crashes during the write. 1460 * 1461 * If the O_DIRECT write is intantiating holes inside i_size and the machine 1462 * crashes then stale disk data _may_ be exposed inside the file. 1463 */ 1464static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, 1465 const struct iovec *iov, loff_t offset, 1466 unsigned long nr_segs) 1467{ 1468 struct file *file = iocb->ki_filp; 1469 struct inode *inode = file->f_mapping->host; 1470 struct ext3_inode_info *ei = EXT3_I(inode); 1471 handle_t *handle = NULL; 1472 ssize_t ret; 1473 int orphan = 0; 1474 size_t count = iov_length(iov, nr_segs); 1475 1476 if (rw == WRITE) { 1477 loff_t final_size = offset + count; 1478 1479 handle = ext3_journal_start(inode, DIO_CREDITS); 1480 if (IS_ERR(handle)) { 1481 ret = PTR_ERR(handle); 1482 goto out; 1483 } 1484 if (final_size > inode->i_size) { 1485 ret = ext3_orphan_add(handle, inode); 1486 if (ret) 1487 goto out_stop; 1488 orphan = 1; 1489 ei->i_disksize = inode->i_size; 1490 } 1491 } 1492 1493 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1494 offset, nr_segs, 1495 ext3_direct_io_get_blocks, NULL); 1496 1497 /* 1498 * Reacquire the handle: ext3_direct_io_get_block() can restart the 1499 * transaction 1500 */ 1501 handle = journal_current_handle(); 1502 1503out_stop: 1504 if (handle) { 1505 int err; 1506 1507 if (orphan && inode->i_nlink) 1508 ext3_orphan_del(handle, inode); 1509 if (orphan && ret > 0) { 1510 loff_t end = offset + ret; 1511 if (end > inode->i_size) { 1512 ei->i_disksize = end; 1513 i_size_write(inode, end); 1514 /* 1515 * We're going to return a positive `ret' 1516 * here due to non-zero-length I/O, so there's 1517 * no way of reporting error returns from 1518 * ext3_mark_inode_dirty() to userspace. So 1519 * ignore it. 1520 */ 1521 ext3_mark_inode_dirty(handle, inode); 1522 } 1523 } 1524 err = ext3_journal_stop(handle); 1525 if (ret == 0) 1526 ret = err; 1527 } 1528out: 1529 return ret; 1530} 1531 1532/* 1533 * Pages can be marked dirty completely asynchronously from ext3's journalling 1534 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 1535 * much here because ->set_page_dirty is called under VFS locks. The page is 1536 * not necessarily locked. 1537 * 1538 * We cannot just dirty the page and leave attached buffers clean, because the 1539 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 1540 * or jbddirty because all the journalling code will explode. 1541 * 1542 * So what we do is to mark the page "pending dirty" and next time writepage 1543 * is called, propagate that into the buffers appropriately. 1544 */ 1545static int ext3_journalled_set_page_dirty(struct page *page) 1546{ 1547 SetPageChecked(page); 1548 return __set_page_dirty_nobuffers(page); 1549} 1550 1551static struct address_space_operations ext3_ordered_aops = { 1552 .readpage = ext3_readpage, 1553 .readpages = ext3_readpages, 1554 .writepage = ext3_ordered_writepage, 1555 .sync_page = block_sync_page, 1556 .prepare_write = ext3_prepare_write, 1557 .commit_write = ext3_ordered_commit_write, 1558 .bmap = ext3_bmap, 1559 .invalidatepage = ext3_invalidatepage, 1560 .releasepage = ext3_releasepage, 1561 .direct_IO = ext3_direct_IO, 1562}; 1563 1564static struct address_space_operations ext3_writeback_aops = { 1565 .readpage = ext3_readpage, 1566 .readpages = ext3_readpages, 1567 .writepage = ext3_writeback_writepage, 1568 .sync_page = block_sync_page, 1569 .prepare_write = ext3_prepare_write, 1570 .commit_write = ext3_writeback_commit_write, 1571 .bmap = ext3_bmap, 1572 .invalidatepage = ext3_invalidatepage, 1573 .releasepage = ext3_releasepage, 1574 .direct_IO = ext3_direct_IO, 1575}; 1576 1577static struct address_space_operations ext3_journalled_aops = { 1578 .readpage = ext3_readpage, 1579 .readpages = ext3_readpages, 1580 .writepage = ext3_journalled_writepage, 1581 .sync_page = block_sync_page, 1582 .prepare_write = ext3_prepare_write, 1583 .commit_write = ext3_journalled_commit_write, 1584 .set_page_dirty = ext3_journalled_set_page_dirty, 1585 .bmap = ext3_bmap, 1586 .invalidatepage = ext3_invalidatepage, 1587 .releasepage = ext3_releasepage, 1588}; 1589 1590void ext3_set_aops(struct inode *inode) 1591{ 1592 if (ext3_should_order_data(inode)) 1593 inode->i_mapping->a_ops = &ext3_ordered_aops; 1594 else if (ext3_should_writeback_data(inode)) 1595 inode->i_mapping->a_ops = &ext3_writeback_aops; 1596 else 1597 inode->i_mapping->a_ops = &ext3_journalled_aops; 1598} 1599 1600/* 1601 * ext3_block_truncate_page() zeroes out a mapping from file offset `from' 1602 * up to the end of the block which corresponds to `from'. 1603 * This required during truncate. We need to physically zero the tail end 1604 * of that block so it doesn't yield old data if the file is later grown. 1605 */ 1606static int ext3_block_truncate_page(handle_t *handle, struct page *page, 1607 struct address_space *mapping, loff_t from) 1608{ 1609 unsigned long index = from >> PAGE_CACHE_SHIFT; 1610 unsigned offset = from & (PAGE_CACHE_SIZE-1); 1611 unsigned blocksize, iblock, length, pos; 1612 struct inode *inode = mapping->host; 1613 struct buffer_head *bh; 1614 int err = 0; 1615 void *kaddr; 1616 1617 blocksize = inode->i_sb->s_blocksize; 1618 length = blocksize - (offset & (blocksize - 1)); 1619 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 1620 1621 /* 1622 * For "nobh" option, we can only work if we don't need to 1623 * read-in the page - otherwise we create buffers to do the IO. 1624 */ 1625 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH)) { 1626 if (PageUptodate(page)) { 1627 kaddr = kmap_atomic(page, KM_USER0); 1628 memset(kaddr + offset, 0, length); 1629 flush_dcache_page(page); 1630 kunmap_atomic(kaddr, KM_USER0); 1631 set_page_dirty(page); 1632 goto unlock; 1633 } 1634 } 1635 1636 if (!page_has_buffers(page)) 1637 create_empty_buffers(page, blocksize, 0); 1638 1639 /* Find the buffer that contains "offset" */ 1640 bh = page_buffers(page); 1641 pos = blocksize; 1642 while (offset >= pos) { 1643 bh = bh->b_this_page; 1644 iblock++; 1645 pos += blocksize; 1646 } 1647 1648 err = 0; 1649 if (buffer_freed(bh)) { 1650 BUFFER_TRACE(bh, "freed: skip"); 1651 goto unlock; 1652 } 1653 1654 if (!buffer_mapped(bh)) { 1655 BUFFER_TRACE(bh, "unmapped"); 1656 ext3_get_block(inode, iblock, bh, 0); 1657 /* unmapped? It's a hole - nothing to do */ 1658 if (!buffer_mapped(bh)) { 1659 BUFFER_TRACE(bh, "still unmapped"); 1660 goto unlock; 1661 } 1662 } 1663 1664 /* Ok, it's mapped. Make sure it's up-to-date */ 1665 if (PageUptodate(page)) 1666 set_buffer_uptodate(bh); 1667 1668 if (!buffer_uptodate(bh)) { 1669 err = -EIO; 1670 ll_rw_block(READ, 1, &bh); 1671 wait_on_buffer(bh); 1672 /* Uhhuh. Read error. Complain and punt. */ 1673 if (!buffer_uptodate(bh)) 1674 goto unlock; 1675 } 1676 1677 if (ext3_should_journal_data(inode)) { 1678 BUFFER_TRACE(bh, "get write access"); 1679 err = ext3_journal_get_write_access(handle, bh); 1680 if (err) 1681 goto unlock; 1682 } 1683 1684 kaddr = kmap_atomic(page, KM_USER0); 1685 memset(kaddr + offset, 0, length); 1686 flush_dcache_page(page); 1687 kunmap_atomic(kaddr, KM_USER0); 1688 1689 BUFFER_TRACE(bh, "zeroed end of block"); 1690 1691 err = 0; 1692 if (ext3_should_journal_data(inode)) { 1693 err = ext3_journal_dirty_metadata(handle, bh); 1694 } else { 1695 if (ext3_should_order_data(inode)) 1696 err = ext3_journal_dirty_data(handle, bh); 1697 mark_buffer_dirty(bh); 1698 } 1699 1700unlock: 1701 unlock_page(page); 1702 page_cache_release(page); 1703 return err; 1704} 1705 1706/* 1707 * Probably it should be a library function... search for first non-zero word 1708 * or memcmp with zero_page, whatever is better for particular architecture. 1709 * Linus? 1710 */ 1711static inline int all_zeroes(__le32 *p, __le32 *q) 1712{ 1713 while (p < q) 1714 if (*p++) 1715 return 0; 1716 return 1; 1717} 1718 1719/** 1720 * ext3_find_shared - find the indirect blocks for partial truncation. 1721 * @inode: inode in question 1722 * @depth: depth of the affected branch 1723 * @offsets: offsets of pointers in that branch (see ext3_block_to_path) 1724 * @chain: place to store the pointers to partial indirect blocks 1725 * @top: place to the (detached) top of branch 1726 * 1727 * This is a helper function used by ext3_truncate(). 1728 * 1729 * When we do truncate() we may have to clean the ends of several 1730 * indirect blocks but leave the blocks themselves alive. Block is 1731 * partially truncated if some data below the new i_size is refered 1732 * from it (and it is on the path to the first completely truncated 1733 * data block, indeed). We have to free the top of that path along 1734 * with everything to the right of the path. Since no allocation 1735 * past the truncation point is possible until ext3_truncate() 1736 * finishes, we may safely do the latter, but top of branch may 1737 * require special attention - pageout below the truncation point 1738 * might try to populate it. 1739 * 1740 * We atomically detach the top of branch from the tree, store the 1741 * block number of its root in *@top, pointers to buffer_heads of 1742 * partially truncated blocks - in @chain[].bh and pointers to 1743 * their last elements that should not be removed - in 1744 * @chain[].p. Return value is the pointer to last filled element 1745 * of @chain. 1746 * 1747 * The work left to caller to do the actual freeing of subtrees: 1748 * a) free the subtree starting from *@top 1749 * b) free the subtrees whose roots are stored in 1750 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 1751 * c) free the subtrees growing from the inode past the @chain[0]. 1752 * (no partially truncated stuff there). */ 1753 1754static Indirect *ext3_find_shared(struct inode *inode, 1755 int depth, 1756 int offsets[4], 1757 Indirect chain[4], 1758 __le32 *top) 1759{ 1760 Indirect *partial, *p; 1761 int k, err; 1762 1763 *top = 0; 1764 /* Make k index the deepest non-null offest + 1 */ 1765 for (k = depth; k > 1 && !offsets[k-1]; k--) 1766 ; 1767 partial = ext3_get_branch(inode, k, offsets, chain, &err); 1768 /* Writer: pointers */ 1769 if (!partial) 1770 partial = chain + k-1; 1771 /* 1772 * If the branch acquired continuation since we've looked at it - 1773 * fine, it should all survive and (new) top doesn't belong to us. 1774 */ 1775 if (!partial->key && *partial->p) 1776 /* Writer: end */ 1777 goto no_top; 1778 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--) 1779 ; 1780 /* 1781 * OK, we've found the last block that must survive. The rest of our 1782 * branch should be detached before unlocking. However, if that rest 1783 * of branch is all ours and does not grow immediately from the inode 1784 * it's easier to cheat and just decrement partial->p. 1785 */ 1786 if (p == chain + k - 1 && p > chain) { 1787 p->p--; 1788 } else { 1789 *top = *p->p; 1790 /* Nope, don't do this in ext3. Must leave the tree intact */ 1791#if 0 1792 *p->p = 0; 1793#endif 1794 } 1795 /* Writer: end */ 1796 1797 while(partial > p) 1798 { 1799 brelse(partial->bh); 1800 partial--; 1801 } 1802no_top: 1803 return partial; 1804} 1805 1806/* 1807 * Zero a number of block pointers in either an inode or an indirect block. 1808 * If we restart the transaction we must again get write access to the 1809 * indirect block for further modification. 1810 * 1811 * We release `count' blocks on disk, but (last - first) may be greater 1812 * than `count' because there can be holes in there. 1813 */ 1814static void 1815ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, 1816 unsigned long block_to_free, unsigned long count, 1817 __le32 *first, __le32 *last) 1818{ 1819 __le32 *p; 1820 if (try_to_extend_transaction(handle, inode)) { 1821 if (bh) { 1822 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 1823 ext3_journal_dirty_metadata(handle, bh); 1824 } 1825 ext3_mark_inode_dirty(handle, inode); 1826 ext3_journal_test_restart(handle, inode); 1827 if (bh) { 1828 BUFFER_TRACE(bh, "retaking write access"); 1829 ext3_journal_get_write_access(handle, bh); 1830 } 1831 } 1832 1833 /* 1834 * Any buffers which are on the journal will be in memory. We find 1835 * them on the hash table so journal_revoke() will run journal_forget() 1836 * on them. We've already detached each block from the file, so 1837 * bforget() in journal_forget() should be safe. 1838 * 1839 * AKPM: turn on bforget in journal_forget()!!! 1840 */ 1841 for (p = first; p < last; p++) { 1842 u32 nr = le32_to_cpu(*p); 1843 if (nr) { 1844 struct buffer_head *bh; 1845 1846 *p = 0; 1847 bh = sb_find_get_block(inode->i_sb, nr); 1848 ext3_forget(handle, 0, inode, bh, nr); 1849 } 1850 } 1851 1852 ext3_free_blocks(handle, inode, block_to_free, count); 1853} 1854 1855/** 1856 * ext3_free_data - free a list of data blocks 1857 * @handle: handle for this transaction 1858 * @inode: inode we are dealing with 1859 * @this_bh: indirect buffer_head which contains *@first and *@last 1860 * @first: array of block numbers 1861 * @last: points immediately past the end of array 1862 * 1863 * We are freeing all blocks refered from that array (numbers are stored as 1864 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 1865 * 1866 * We accumulate contiguous runs of blocks to free. Conveniently, if these 1867 * blocks are contiguous then releasing them at one time will only affect one 1868 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 1869 * actually use a lot of journal space. 1870 * 1871 * @this_bh will be %NULL if @first and @last point into the inode's direct 1872 * block pointers. 1873 */ 1874static void ext3_free_data(handle_t *handle, struct inode *inode, 1875 struct buffer_head *this_bh, 1876 __le32 *first, __le32 *last) 1877{ 1878 unsigned long block_to_free = 0; /* Starting block # of a run */ 1879 unsigned long count = 0; /* Number of blocks in the run */ 1880 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 1881 corresponding to 1882 block_to_free */ 1883 unsigned long nr; /* Current block # */ 1884 __le32 *p; /* Pointer into inode/ind 1885 for current block */ 1886 int err; 1887 1888 if (this_bh) { /* For indirect block */ 1889 BUFFER_TRACE(this_bh, "get_write_access"); 1890 err = ext3_journal_get_write_access(handle, this_bh); 1891 /* Important: if we can't update the indirect pointers 1892 * to the blocks, we can't free them. */ 1893 if (err) 1894 return; 1895 } 1896 1897 for (p = first; p < last; p++) { 1898 nr = le32_to_cpu(*p); 1899 if (nr) { 1900 /* accumulate blocks to free if they're contiguous */ 1901 if (count == 0) { 1902 block_to_free = nr; 1903 block_to_free_p = p; 1904 count = 1; 1905 } else if (nr == block_to_free + count) { 1906 count++; 1907 } else { 1908 ext3_clear_blocks(handle, inode, this_bh, 1909 block_to_free, 1910 count, block_to_free_p, p); 1911 block_to_free = nr; 1912 block_to_free_p = p; 1913 count = 1; 1914 } 1915 } 1916 } 1917 1918 if (count > 0) 1919 ext3_clear_blocks(handle, inode, this_bh, block_to_free, 1920 count, block_to_free_p, p); 1921 1922 if (this_bh) { 1923 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata"); 1924 ext3_journal_dirty_metadata(handle, this_bh); 1925 } 1926} 1927 1928/** 1929 * ext3_free_branches - free an array of branches 1930 * @handle: JBD handle for this transaction 1931 * @inode: inode we are dealing with 1932 * @parent_bh: the buffer_head which contains *@first and *@last 1933 * @first: array of block numbers 1934 * @last: pointer immediately past the end of array 1935 * @depth: depth of the branches to free 1936 * 1937 * We are freeing all blocks refered from these branches (numbers are 1938 * stored as little-endian 32-bit) and updating @inode->i_blocks 1939 * appropriately. 1940 */ 1941static void ext3_free_branches(handle_t *handle, struct inode *inode, 1942 struct buffer_head *parent_bh, 1943 __le32 *first, __le32 *last, int depth) 1944{ 1945 unsigned long nr; 1946 __le32 *p; 1947 1948 if (is_handle_aborted(handle)) 1949 return; 1950 1951 if (depth--) { 1952 struct buffer_head *bh; 1953 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); 1954 p = last; 1955 while (--p >= first) { 1956 nr = le32_to_cpu(*p); 1957 if (!nr) 1958 continue; /* A hole */ 1959 1960 /* Go read the buffer for the next level down */ 1961 bh = sb_bread(inode->i_sb, nr); 1962 1963 /* 1964 * A read failure? Report error and clear slot 1965 * (should be rare). 1966 */ 1967 if (!bh) { 1968 ext3_error(inode->i_sb, "ext3_free_branches", 1969 "Read failure, inode=%ld, block=%ld", 1970 inode->i_ino, nr); 1971 continue; 1972 } 1973 1974 /* This zaps the entire block. Bottom up. */ 1975 BUFFER_TRACE(bh, "free child branches"); 1976 ext3_free_branches(handle, inode, bh, 1977 (__le32*)bh->b_data, 1978 (__le32*)bh->b_data + addr_per_block, 1979 depth); 1980 1981 /* 1982 * We've probably journalled the indirect block several 1983 * times during the truncate. But it's no longer 1984 * needed and we now drop it from the transaction via 1985 * journal_revoke(). 1986 * 1987 * That's easy if it's exclusively part of this 1988 * transaction. But if it's part of the committing 1989 * transaction then journal_forget() will simply 1990 * brelse() it. That means that if the underlying 1991 * block is reallocated in ext3_get_block(), 1992 * unmap_underlying_metadata() will find this block 1993 * and will try to get rid of it. damn, damn. 1994 * 1995 * If this block has already been committed to the 1996 * journal, a revoke record will be written. And 1997 * revoke records must be emitted *before* clearing 1998 * this block's bit in the bitmaps. 1999 */ 2000 ext3_forget(handle, 1, inode, bh, bh->b_blocknr); 2001 2002 /* 2003 * Everything below this this pointer has been 2004 * released. Now let this top-of-subtree go. 2005 * 2006 * We want the freeing of this indirect block to be 2007 * atomic in the journal with the updating of the 2008 * bitmap block which owns it. So make some room in 2009 * the journal. 2010 * 2011 * We zero the parent pointer *after* freeing its 2012 * pointee in the bitmaps, so if extend_transaction() 2013 * for some reason fails to put the bitmap changes and 2014 * the release into the same transaction, recovery 2015 * will merely complain about releasing a free block, 2016 * rather than leaking blocks. 2017 */ 2018 if (is_handle_aborted(handle)) 2019 return; 2020 if (try_to_extend_transaction(handle, inode)) { 2021 ext3_mark_inode_dirty(handle, inode); 2022 ext3_journal_test_restart(handle, inode); 2023 } 2024 2025 ext3_free_blocks(handle, inode, nr, 1); 2026 2027 if (parent_bh) { 2028 /* 2029 * The block which we have just freed is 2030 * pointed to by an indirect block: journal it 2031 */ 2032 BUFFER_TRACE(parent_bh, "get_write_access"); 2033 if (!ext3_journal_get_write_access(handle, 2034 parent_bh)){ 2035 *p = 0; 2036 BUFFER_TRACE(parent_bh, 2037 "call ext3_journal_dirty_metadata"); 2038 ext3_journal_dirty_metadata(handle, 2039 parent_bh); 2040 } 2041 } 2042 } 2043 } else { 2044 /* We have reached the bottom of the tree. */ 2045 BUFFER_TRACE(parent_bh, "free data blocks"); 2046 ext3_free_data(handle, inode, parent_bh, first, last); 2047 } 2048} 2049 2050/* 2051 * ext3_truncate() 2052 * 2053 * We block out ext3_get_block() block instantiations across the entire 2054 * transaction, and VFS/VM ensures that ext3_truncate() cannot run 2055 * simultaneously on behalf of the same inode. 2056 * 2057 * As we work through the truncate and commmit bits of it to the journal there 2058 * is one core, guiding principle: the file's tree must always be consistent on 2059 * disk. We must be able to restart the truncate after a crash. 2060 * 2061 * The file's tree may be transiently inconsistent in memory (although it 2062 * probably isn't), but whenever we close off and commit a journal transaction, 2063 * the contents of (the filesystem + the journal) must be consistent and 2064 * restartable. It's pretty simple, really: bottom up, right to left (although 2065 * left-to-right works OK too). 2066 * 2067 * Note that at recovery time, journal replay occurs *before* the restart of 2068 * truncate against the orphan inode list. 2069 * 2070 * The committed inode has the new, desired i_size (which is the same as 2071 * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see 2072 * that this inode's truncate did not complete and it will again call 2073 * ext3_truncate() to have another go. So there will be instantiated blocks 2074 * to the right of the truncation point in a crashed ext3 filesystem. But 2075 * that's fine - as long as they are linked from the inode, the post-crash 2076 * ext3_truncate() run will find them and release them. 2077 */ 2078 2079void ext3_truncate(struct inode * inode) 2080{ 2081 handle_t *handle; 2082 struct ext3_inode_info *ei = EXT3_I(inode); 2083 __le32 *i_data = ei->i_data; 2084 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); 2085 struct address_space *mapping = inode->i_mapping; 2086 int offsets[4]; 2087 Indirect chain[4]; 2088 Indirect *partial; 2089 __le32 nr = 0; 2090 int n; 2091 long last_block; 2092 unsigned blocksize = inode->i_sb->s_blocksize; 2093 struct page *page; 2094 2095 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 2096 S_ISLNK(inode->i_mode))) 2097 return; 2098 if (ext3_inode_is_fast_symlink(inode)) 2099 return; 2100 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 2101 return; 2102 2103 /* 2104 * We have to lock the EOF page here, because lock_page() nests 2105 * outside journal_start(). 2106 */ 2107 if ((inode->i_size & (blocksize - 1)) == 0) { 2108 /* Block boundary? Nothing to do */ 2109 page = NULL; 2110 } else { 2111 page = grab_cache_page(mapping, 2112 inode->i_size >> PAGE_CACHE_SHIFT); 2113 if (!page) 2114 return; 2115 } 2116 2117 handle = start_transaction(inode); 2118 if (IS_ERR(handle)) { 2119 if (page) { 2120 clear_highpage(page); 2121 flush_dcache_page(page); 2122 unlock_page(page); 2123 page_cache_release(page); 2124 } 2125 return; /* AKPM: return what? */ 2126 } 2127 2128 last_block = (inode->i_size + blocksize-1) 2129 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb); 2130 2131 if (page) 2132 ext3_block_truncate_page(handle, page, mapping, inode->i_size); 2133 2134 n = ext3_block_to_path(inode, last_block, offsets, NULL); 2135 if (n == 0) 2136 goto out_stop; /* error */ 2137 2138 /* 2139 * OK. This truncate is going to happen. We add the inode to the 2140 * orphan list, so that if this truncate spans multiple transactions, 2141 * and we crash, we will resume the truncate when the filesystem 2142 * recovers. It also marks the inode dirty, to catch the new size. 2143 * 2144 * Implication: the file must always be in a sane, consistent 2145 * truncatable state while each transaction commits. 2146 */ 2147 if (ext3_orphan_add(handle, inode)) 2148 goto out_stop; 2149 2150 /* 2151 * The orphan list entry will now protect us from any crash which 2152 * occurs before the truncate completes, so it is now safe to propagate 2153 * the new, shorter inode size (held for now in i_size) into the 2154 * on-disk inode. We do this via i_disksize, which is the value which 2155 * ext3 *really* writes onto the disk inode. 2156 */ 2157 ei->i_disksize = inode->i_size; 2158 2159 /* 2160 * From here we block out all ext3_get_block() callers who want to 2161 * modify the block allocation tree. 2162 */ 2163 down(&ei->truncate_sem); 2164 2165 if (n == 1) { /* direct blocks */ 2166 ext3_free_data(handle, inode, NULL, i_data+offsets[0], 2167 i_data + EXT3_NDIR_BLOCKS); 2168 goto do_indirects; 2169 } 2170 2171 partial = ext3_find_shared(inode, n, offsets, chain, &nr); 2172 /* Kill the top of shared branch (not detached) */ 2173 if (nr) { 2174 if (partial == chain) { 2175 /* Shared branch grows from the inode */ 2176 ext3_free_branches(handle, inode, NULL, 2177 &nr, &nr+1, (chain+n-1) - partial); 2178 *partial->p = 0; 2179 /* 2180 * We mark the inode dirty prior to restart, 2181 * and prior to stop. No need for it here. 2182 */ 2183 } else { 2184 /* Shared branch grows from an indirect block */ 2185 BUFFER_TRACE(partial->bh, "get_write_access"); 2186 ext3_free_branches(handle, inode, partial->bh, 2187 partial->p, 2188 partial->p+1, (chain+n-1) - partial); 2189 } 2190 } 2191 /* Clear the ends of indirect blocks on the shared branch */ 2192 while (partial > chain) { 2193 ext3_free_branches(handle, inode, partial->bh, partial->p + 1, 2194 (__le32*)partial->bh->b_data+addr_per_block, 2195 (chain+n-1) - partial); 2196 BUFFER_TRACE(partial->bh, "call brelse"); 2197 brelse (partial->bh); 2198 partial--; 2199 } 2200do_indirects: 2201 /* Kill the remaining (whole) subtrees */ 2202 switch (offsets[0]) { 2203 default: 2204 nr = i_data[EXT3_IND_BLOCK]; 2205 if (nr) { 2206 ext3_free_branches(handle, inode, NULL, 2207 &nr, &nr+1, 1); 2208 i_data[EXT3_IND_BLOCK] = 0; 2209 } 2210 case EXT3_IND_BLOCK: 2211 nr = i_data[EXT3_DIND_BLOCK]; 2212 if (nr) { 2213 ext3_free_branches(handle, inode, NULL, 2214 &nr, &nr+1, 2); 2215 i_data[EXT3_DIND_BLOCK] = 0; 2216 } 2217 case EXT3_DIND_BLOCK: 2218 nr = i_data[EXT3_TIND_BLOCK]; 2219 if (nr) { 2220 ext3_free_branches(handle, inode, NULL, 2221 &nr, &nr+1, 3); 2222 i_data[EXT3_TIND_BLOCK] = 0; 2223 } 2224 case EXT3_TIND_BLOCK: 2225 ; 2226 } 2227 2228 ext3_discard_reservation(inode); 2229 2230 up(&ei->truncate_sem); 2231 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 2232 ext3_mark_inode_dirty(handle, inode); 2233 2234 /* In a multi-transaction truncate, we only make the final 2235 * transaction synchronous */ 2236 if (IS_SYNC(inode)) 2237 handle->h_sync = 1; 2238out_stop: 2239 /* 2240 * If this was a simple ftruncate(), and the file will remain alive 2241 * then we need to clear up the orphan record which we created above. 2242 * However, if this was a real unlink then we were called by 2243 * ext3_delete_inode(), and we allow that function to clean up the 2244 * orphan info for us. 2245 */ 2246 if (inode->i_nlink) 2247 ext3_orphan_del(handle, inode); 2248 2249 ext3_journal_stop(handle); 2250} 2251 2252static unsigned long ext3_get_inode_block(struct super_block *sb, 2253 unsigned long ino, struct ext3_iloc *iloc) 2254{ 2255 unsigned long desc, group_desc, block_group; 2256 unsigned long offset, block; 2257 struct buffer_head *bh; 2258 struct ext3_group_desc * gdp; 2259 2260 2261 if ((ino != EXT3_ROOT_INO && 2262 ino != EXT3_JOURNAL_INO && 2263 ino != EXT3_RESIZE_INO && 2264 ino < EXT3_FIRST_INO(sb)) || 2265 ino > le32_to_cpu( 2266 EXT3_SB(sb)->s_es->s_inodes_count)) { 2267 ext3_error (sb, "ext3_get_inode_block", 2268 "bad inode number: %lu", ino); 2269 return 0; 2270 } 2271 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); 2272 if (block_group >= EXT3_SB(sb)->s_groups_count) { 2273 ext3_error (sb, "ext3_get_inode_block", 2274 "group >= groups count"); 2275 return 0; 2276 } 2277 smp_rmb(); 2278 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb); 2279 desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1); 2280 bh = EXT3_SB(sb)->s_group_desc[group_desc]; 2281 if (!bh) { 2282 ext3_error (sb, "ext3_get_inode_block", 2283 "Descriptor not loaded"); 2284 return 0; 2285 } 2286 2287 gdp = (struct ext3_group_desc *) bh->b_data; 2288 /* 2289 * Figure out the offset within the block group inode table 2290 */ 2291 offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) * 2292 EXT3_INODE_SIZE(sb); 2293 block = le32_to_cpu(gdp[desc].bg_inode_table) + 2294 (offset >> EXT3_BLOCK_SIZE_BITS(sb)); 2295 2296 iloc->block_group = block_group; 2297 iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1); 2298 return block; 2299} 2300 2301/* 2302 * ext3_get_inode_loc returns with an extra refcount against the inode's 2303 * underlying buffer_head on success. If 'in_mem' is true, we have all 2304 * data in memory that is needed to recreate the on-disk version of this 2305 * inode. 2306 */ 2307static int __ext3_get_inode_loc(struct inode *inode, 2308 struct ext3_iloc *iloc, int in_mem) 2309{ 2310 unsigned long block; 2311 struct buffer_head *bh; 2312 2313 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc); 2314 if (!block) 2315 return -EIO; 2316 2317 bh = sb_getblk(inode->i_sb, block); 2318 if (!bh) { 2319 ext3_error (inode->i_sb, "ext3_get_inode_loc", 2320 "unable to read inode block - " 2321 "inode=%lu, block=%lu", inode->i_ino, block); 2322 return -EIO; 2323 } 2324 if (!buffer_uptodate(bh)) { 2325 lock_buffer(bh); 2326 if (buffer_uptodate(bh)) { 2327 /* someone brought it uptodate while we waited */ 2328 unlock_buffer(bh); 2329 goto has_buffer; 2330 } 2331 2332 /* 2333 * If we have all information of the inode in memory and this 2334 * is the only valid inode in the block, we need not read the 2335 * block. 2336 */ 2337 if (in_mem) { 2338 struct buffer_head *bitmap_bh; 2339 struct ext3_group_desc *desc; 2340 int inodes_per_buffer; 2341 int inode_offset, i; 2342 int block_group; 2343 int start; 2344 2345 block_group = (inode->i_ino - 1) / 2346 EXT3_INODES_PER_GROUP(inode->i_sb); 2347 inodes_per_buffer = bh->b_size / 2348 EXT3_INODE_SIZE(inode->i_sb); 2349 inode_offset = ((inode->i_ino - 1) % 2350 EXT3_INODES_PER_GROUP(inode->i_sb)); 2351 start = inode_offset & ~(inodes_per_buffer - 1); 2352 2353 /* Is the inode bitmap in cache? */ 2354 desc = ext3_get_group_desc(inode->i_sb, 2355 block_group, NULL); 2356 if (!desc) 2357 goto make_io; 2358 2359 bitmap_bh = sb_getblk(inode->i_sb, 2360 le32_to_cpu(desc->bg_inode_bitmap)); 2361 if (!bitmap_bh) 2362 goto make_io; 2363 2364 /* 2365 * If the inode bitmap isn't in cache then the 2366 * optimisation may end up performing two reads instead 2367 * of one, so skip it. 2368 */ 2369 if (!buffer_uptodate(bitmap_bh)) { 2370 brelse(bitmap_bh); 2371 goto make_io; 2372 } 2373 for (i = start; i < start + inodes_per_buffer; i++) { 2374 if (i == inode_offset) 2375 continue; 2376 if (ext3_test_bit(i, bitmap_bh->b_data)) 2377 break; 2378 } 2379 brelse(bitmap_bh); 2380 if (i == start + inodes_per_buffer) { 2381 /* all other inodes are free, so skip I/O */ 2382 memset(bh->b_data, 0, bh->b_size); 2383 set_buffer_uptodate(bh); 2384 unlock_buffer(bh); 2385 goto has_buffer; 2386 } 2387 } 2388 2389make_io: 2390 /* 2391 * There are other valid inodes in the buffer, this inode 2392 * has in-inode xattrs, or we don't have this inode in memory. 2393 * Read the block from disk. 2394 */ 2395 get_bh(bh); 2396 bh->b_end_io = end_buffer_read_sync; 2397 submit_bh(READ, bh); 2398 wait_on_buffer(bh); 2399 if (!buffer_uptodate(bh)) { 2400 ext3_error(inode->i_sb, "ext3_get_inode_loc", 2401 "unable to read inode block - " 2402 "inode=%lu, block=%lu", 2403 inode->i_ino, block); 2404 brelse(bh); 2405 return -EIO; 2406 } 2407 } 2408has_buffer: 2409 iloc->bh = bh; 2410 return 0; 2411} 2412 2413int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc) 2414{ 2415 /* We have all inode data except xattrs in memory here. */ 2416 return __ext3_get_inode_loc(inode, iloc, 2417 !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR)); 2418} 2419 2420void ext3_set_inode_flags(struct inode *inode) 2421{ 2422 unsigned int flags = EXT3_I(inode)->i_flags; 2423 2424 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 2425 if (flags & EXT3_SYNC_FL) 2426 inode->i_flags |= S_SYNC; 2427 if (flags & EXT3_APPEND_FL) 2428 inode->i_flags |= S_APPEND; 2429 if (flags & EXT3_IMMUTABLE_FL) 2430 inode->i_flags |= S_IMMUTABLE; 2431 if (flags & EXT3_NOATIME_FL) 2432 inode->i_flags |= S_NOATIME; 2433 if (flags & EXT3_DIRSYNC_FL) 2434 inode->i_flags |= S_DIRSYNC; 2435} 2436 2437void ext3_read_inode(struct inode * inode) 2438{ 2439 struct ext3_iloc iloc; 2440 struct ext3_inode *raw_inode; 2441 struct ext3_inode_info *ei = EXT3_I(inode); 2442 struct buffer_head *bh; 2443 int block; 2444 2445#ifdef CONFIG_EXT3_FS_POSIX_ACL 2446 ei->i_acl = EXT3_ACL_NOT_CACHED; 2447 ei->i_default_acl = EXT3_ACL_NOT_CACHED; 2448#endif 2449 ei->i_block_alloc_info = NULL; 2450 2451 if (__ext3_get_inode_loc(inode, &iloc, 0)) 2452 goto bad_inode; 2453 bh = iloc.bh; 2454 raw_inode = ext3_raw_inode(&iloc); 2455 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 2456 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 2457 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 2458 if(!(test_opt (inode->i_sb, NO_UID32))) { 2459 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 2460 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 2461 } 2462 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 2463 inode->i_size = le32_to_cpu(raw_inode->i_size); 2464 inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime); 2465 inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime); 2466 inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime); 2467 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0; 2468 2469 ei->i_state = 0; 2470 ei->i_dir_start_lookup = 0; 2471 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 2472 /* We now have enough fields to check if the inode was active or not. 2473 * This is needed because nfsd might try to access dead inodes 2474 * the test is that same one that e2fsck uses 2475 * NeilBrown 1999oct15 2476 */ 2477 if (inode->i_nlink == 0) { 2478 if (inode->i_mode == 0 || 2479 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) { 2480 /* this inode is deleted */ 2481 brelse (bh); 2482 goto bad_inode; 2483 } 2484 /* The only unlinked inodes we let through here have 2485 * valid i_mode and are being read by the orphan 2486 * recovery code: that's fine, we're about to complete 2487 * the process of deleting those. */ 2488 } 2489 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size 2490 * (for stat), not the fs block 2491 * size */ 2492 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); 2493 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 2494#ifdef EXT3_FRAGMENTS 2495 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); 2496 ei->i_frag_no = raw_inode->i_frag; 2497 ei->i_frag_size = raw_inode->i_fsize; 2498#endif 2499 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 2500 if (!S_ISREG(inode->i_mode)) { 2501 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl); 2502 } else { 2503 inode->i_size |= 2504 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32; 2505 } 2506 ei->i_disksize = inode->i_size; 2507 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 2508 ei->i_block_group = iloc.block_group; 2509 /* 2510 * NOTE! The in-memory inode i_data array is in little-endian order 2511 * even on big-endian machines: we do NOT byteswap the block numbers! 2512 */ 2513 for (block = 0; block < EXT3_N_BLOCKS; block++) 2514 ei->i_data[block] = raw_inode->i_block[block]; 2515 INIT_LIST_HEAD(&ei->i_orphan); 2516 2517 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 && 2518 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) { 2519 /* 2520 * When mke2fs creates big inodes it does not zero out 2521 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE, 2522 * so ignore those first few inodes. 2523 */ 2524 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 2525 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 2526 EXT3_INODE_SIZE(inode->i_sb)) 2527 goto bad_inode; 2528 if (ei->i_extra_isize == 0) { 2529 /* The extra space is currently unused. Use it. */ 2530 ei->i_extra_isize = sizeof(struct ext3_inode) - 2531 EXT3_GOOD_OLD_INODE_SIZE; 2532 } else { 2533 __le32 *magic = (void *)raw_inode + 2534 EXT3_GOOD_OLD_INODE_SIZE + 2535 ei->i_extra_isize; 2536 if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC)) 2537 ei->i_state |= EXT3_STATE_XATTR; 2538 } 2539 } else 2540 ei->i_extra_isize = 0; 2541 2542 if (S_ISREG(inode->i_mode)) { 2543 inode->i_op = &ext3_file_inode_operations; 2544 inode->i_fop = &ext3_file_operations; 2545 ext3_set_aops(inode); 2546 } else if (S_ISDIR(inode->i_mode)) { 2547 inode->i_op = &ext3_dir_inode_operations; 2548 inode->i_fop = &ext3_dir_operations; 2549 } else if (S_ISLNK(inode->i_mode)) { 2550 if (ext3_inode_is_fast_symlink(inode)) 2551 inode->i_op = &ext3_fast_symlink_inode_operations; 2552 else { 2553 inode->i_op = &ext3_symlink_inode_operations; 2554 ext3_set_aops(inode); 2555 } 2556 } else { 2557 inode->i_op = &ext3_special_inode_operations; 2558 if (raw_inode->i_block[0]) 2559 init_special_inode(inode, inode->i_mode, 2560 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 2561 else 2562 init_special_inode(inode, inode->i_mode, 2563 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 2564 } 2565 brelse (iloc.bh); 2566 ext3_set_inode_flags(inode); 2567 return; 2568 2569bad_inode: 2570 make_bad_inode(inode); 2571 return; 2572} 2573 2574/* 2575 * Post the struct inode info into an on-disk inode location in the 2576 * buffer-cache. This gobbles the caller's reference to the 2577 * buffer_head in the inode location struct. 2578 * 2579 * The caller must have write access to iloc->bh. 2580 */ 2581static int ext3_do_update_inode(handle_t *handle, 2582 struct inode *inode, 2583 struct ext3_iloc *iloc) 2584{ 2585 struct ext3_inode *raw_inode = ext3_raw_inode(iloc); 2586 struct ext3_inode_info *ei = EXT3_I(inode); 2587 struct buffer_head *bh = iloc->bh; 2588 int err = 0, rc, block; 2589 2590 /* For fields not not tracking in the in-memory inode, 2591 * initialise them to zero for new inodes. */ 2592 if (ei->i_state & EXT3_STATE_NEW) 2593 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); 2594 2595 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 2596 if(!(test_opt(inode->i_sb, NO_UID32))) { 2597 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 2598 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 2599/* 2600 * Fix up interoperability with old kernels. Otherwise, old inodes get 2601 * re-used with the upper 16 bits of the uid/gid intact 2602 */ 2603 if(!ei->i_dtime) { 2604 raw_inode->i_uid_high = 2605 cpu_to_le16(high_16_bits(inode->i_uid)); 2606 raw_inode->i_gid_high = 2607 cpu_to_le16(high_16_bits(inode->i_gid)); 2608 } else { 2609 raw_inode->i_uid_high = 0; 2610 raw_inode->i_gid_high = 0; 2611 } 2612 } else { 2613 raw_inode->i_uid_low = 2614 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 2615 raw_inode->i_gid_low = 2616 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 2617 raw_inode->i_uid_high = 0; 2618 raw_inode->i_gid_high = 0; 2619 } 2620 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 2621 raw_inode->i_size = cpu_to_le32(ei->i_disksize); 2622 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); 2623 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); 2624 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); 2625 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); 2626 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 2627 raw_inode->i_flags = cpu_to_le32(ei->i_flags); 2628#ifdef EXT3_FRAGMENTS 2629 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr); 2630 raw_inode->i_frag = ei->i_frag_no; 2631 raw_inode->i_fsize = ei->i_frag_size; 2632#endif 2633 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl); 2634 if (!S_ISREG(inode->i_mode)) { 2635 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl); 2636 } else { 2637 raw_inode->i_size_high = 2638 cpu_to_le32(ei->i_disksize >> 32); 2639 if (ei->i_disksize > 0x7fffffffULL) { 2640 struct super_block *sb = inode->i_sb; 2641 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb, 2642 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) || 2643 EXT3_SB(sb)->s_es->s_rev_level == 2644 cpu_to_le32(EXT3_GOOD_OLD_REV)) { 2645 /* If this is the first large file 2646 * created, add a flag to the superblock. 2647 */ 2648 err = ext3_journal_get_write_access(handle, 2649 EXT3_SB(sb)->s_sbh); 2650 if (err) 2651 goto out_brelse; 2652 ext3_update_dynamic_rev(sb); 2653 EXT3_SET_RO_COMPAT_FEATURE(sb, 2654 EXT3_FEATURE_RO_COMPAT_LARGE_FILE); 2655 sb->s_dirt = 1; 2656 handle->h_sync = 1; 2657 err = ext3_journal_dirty_metadata(handle, 2658 EXT3_SB(sb)->s_sbh); 2659 } 2660 } 2661 } 2662 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 2663 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 2664 if (old_valid_dev(inode->i_rdev)) { 2665 raw_inode->i_block[0] = 2666 cpu_to_le32(old_encode_dev(inode->i_rdev)); 2667 raw_inode->i_block[1] = 0; 2668 } else { 2669 raw_inode->i_block[0] = 0; 2670 raw_inode->i_block[1] = 2671 cpu_to_le32(new_encode_dev(inode->i_rdev)); 2672 raw_inode->i_block[2] = 0; 2673 } 2674 } else for (block = 0; block < EXT3_N_BLOCKS; block++) 2675 raw_inode->i_block[block] = ei->i_data[block]; 2676 2677 if (ei->i_extra_isize) 2678 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 2679 2680 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 2681 rc = ext3_journal_dirty_metadata(handle, bh); 2682 if (!err) 2683 err = rc; 2684 ei->i_state &= ~EXT3_STATE_NEW; 2685 2686out_brelse: 2687 brelse (bh); 2688 ext3_std_error(inode->i_sb, err); 2689 return err; 2690} 2691 2692/* 2693 * ext3_write_inode() 2694 * 2695 * We are called from a few places: 2696 * 2697 * - Within generic_file_write() for O_SYNC files. 2698 * Here, there will be no transaction running. We wait for any running 2699 * trasnaction to commit. 2700 * 2701 * - Within sys_sync(), kupdate and such. 2702 * We wait on commit, if tol to. 2703 * 2704 * - Within prune_icache() (PF_MEMALLOC == true) 2705 * Here we simply return. We can't afford to block kswapd on the 2706 * journal commit. 2707 * 2708 * In all cases it is actually safe for us to return without doing anything, 2709 * because the inode has been copied into a raw inode buffer in 2710 * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 2711 * knfsd. 2712 * 2713 * Note that we are absolutely dependent upon all inode dirtiers doing the 2714 * right thing: they *must* call mark_inode_dirty() after dirtying info in 2715 * which we are interested. 2716 * 2717 * It would be a bug for them to not do this. The code: 2718 * 2719 * mark_inode_dirty(inode) 2720 * stuff(); 2721 * inode->i_size = expr; 2722 * 2723 * is in error because a kswapd-driven write_inode() could occur while 2724 * `stuff()' is running, and the new i_size will be lost. Plus the inode 2725 * will no longer be on the superblock's dirty inode list. 2726 */ 2727int ext3_write_inode(struct inode *inode, int wait) 2728{ 2729 if (current->flags & PF_MEMALLOC) 2730 return 0; 2731 2732 if (ext3_journal_current_handle()) { 2733 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n"); 2734 dump_stack(); 2735 return -EIO; 2736 } 2737 2738 if (!wait) 2739 return 0; 2740 2741 return ext3_force_commit(inode->i_sb); 2742} 2743 2744/* 2745 * ext3_setattr() 2746 * 2747 * Called from notify_change. 2748 * 2749 * We want to trap VFS attempts to truncate the file as soon as 2750 * possible. In particular, we want to make sure that when the VFS 2751 * shrinks i_size, we put the inode on the orphan list and modify 2752 * i_disksize immediately, so that during the subsequent flushing of 2753 * dirty pages and freeing of disk blocks, we can guarantee that any 2754 * commit will leave the blocks being flushed in an unused state on 2755 * disk. (On recovery, the inode will get truncated and the blocks will 2756 * be freed, so we have a strong guarantee that no future commit will 2757 * leave these blocks visible to the user.) 2758 * 2759 * Called with inode->sem down. 2760 */ 2761int ext3_setattr(struct dentry *dentry, struct iattr *attr) 2762{ 2763 struct inode *inode = dentry->d_inode; 2764 int error, rc = 0; 2765 const unsigned int ia_valid = attr->ia_valid; 2766 2767 error = inode_change_ok(inode, attr); 2768 if (error) 2769 return error; 2770 2771 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 2772 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 2773 handle_t *handle; 2774 2775 /* (user+group)*(old+new) structure, inode write (sb, 2776 * inode block, ? - but truncate inode update has it) */ 2777 handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+ 2778 EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3); 2779 if (IS_ERR(handle)) { 2780 error = PTR_ERR(handle); 2781 goto err_out; 2782 } 2783 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 2784 if (error) { 2785 ext3_journal_stop(handle); 2786 return error; 2787 } 2788 /* Update corresponding info in inode so that everything is in 2789 * one transaction */ 2790 if (attr->ia_valid & ATTR_UID) 2791 inode->i_uid = attr->ia_uid; 2792 if (attr->ia_valid & ATTR_GID) 2793 inode->i_gid = attr->ia_gid; 2794 error = ext3_mark_inode_dirty(handle, inode); 2795 ext3_journal_stop(handle); 2796 } 2797 2798 if (S_ISREG(inode->i_mode) && 2799 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 2800 handle_t *handle; 2801 2802 handle = ext3_journal_start(inode, 3); 2803 if (IS_ERR(handle)) { 2804 error = PTR_ERR(handle); 2805 goto err_out; 2806 } 2807 2808 error = ext3_orphan_add(handle, inode); 2809 EXT3_I(inode)->i_disksize = attr->ia_size; 2810 rc = ext3_mark_inode_dirty(handle, inode); 2811 if (!error) 2812 error = rc; 2813 ext3_journal_stop(handle); 2814 } 2815 2816 rc = inode_setattr(inode, attr); 2817 2818 /* If inode_setattr's call to ext3_truncate failed to get a 2819 * transaction handle at all, we need to clean up the in-core 2820 * orphan list manually. */ 2821 if (inode->i_nlink) 2822 ext3_orphan_del(NULL, inode); 2823 2824 if (!rc && (ia_valid & ATTR_MODE)) 2825 rc = ext3_acl_chmod(inode); 2826 2827err_out: 2828 ext3_std_error(inode->i_sb, error); 2829 if (!error) 2830 error = rc; 2831 return error; 2832} 2833 2834 2835/* 2836 * akpm: how many blocks doth make a writepage()? 2837 * 2838 * With N blocks per page, it may be: 2839 * N data blocks 2840 * 2 indirect block 2841 * 2 dindirect 2842 * 1 tindirect 2843 * N+5 bitmap blocks (from the above) 2844 * N+5 group descriptor summary blocks 2845 * 1 inode block 2846 * 1 superblock. 2847 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files 2848 * 2849 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS 2850 * 2851 * With ordered or writeback data it's the same, less the N data blocks. 2852 * 2853 * If the inode's direct blocks can hold an integral number of pages then a 2854 * page cannot straddle two indirect blocks, and we can only touch one indirect 2855 * and dindirect block, and the "5" above becomes "3". 2856 * 2857 * This still overestimates under most circumstances. If we were to pass the 2858 * start and end offsets in here as well we could do block_to_path() on each 2859 * block and work out the exact number of indirects which are touched. Pah. 2860 */ 2861 2862static int ext3_writepage_trans_blocks(struct inode *inode) 2863{ 2864 int bpp = ext3_journal_blocks_per_page(inode); 2865 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3; 2866 int ret; 2867 2868 if (ext3_should_journal_data(inode)) 2869 ret = 3 * (bpp + indirects) + 2; 2870 else 2871 ret = 2 * (bpp + indirects) + 2; 2872 2873#ifdef CONFIG_QUOTA 2874 /* We know that structure was already allocated during DQUOT_INIT so 2875 * we will be updating only the data blocks + inodes */ 2876 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); 2877#endif 2878 2879 return ret; 2880} 2881 2882/* 2883 * The caller must have previously called ext3_reserve_inode_write(). 2884 * Give this, we know that the caller already has write access to iloc->bh. 2885 */ 2886int ext3_mark_iloc_dirty(handle_t *handle, 2887 struct inode *inode, struct ext3_iloc *iloc) 2888{ 2889 int err = 0; 2890 2891 /* the do_update_inode consumes one bh->b_count */ 2892 get_bh(iloc->bh); 2893 2894 /* ext3_do_update_inode() does journal_dirty_metadata */ 2895 err = ext3_do_update_inode(handle, inode, iloc); 2896 put_bh(iloc->bh); 2897 return err; 2898} 2899 2900/* 2901 * On success, We end up with an outstanding reference count against 2902 * iloc->bh. This _must_ be cleaned up later. 2903 */ 2904 2905int 2906ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 2907 struct ext3_iloc *iloc) 2908{ 2909 int err = 0; 2910 if (handle) { 2911 err = ext3_get_inode_loc(inode, iloc); 2912 if (!err) { 2913 BUFFER_TRACE(iloc->bh, "get_write_access"); 2914 err = ext3_journal_get_write_access(handle, iloc->bh); 2915 if (err) { 2916 brelse(iloc->bh); 2917 iloc->bh = NULL; 2918 } 2919 } 2920 } 2921 ext3_std_error(inode->i_sb, err); 2922 return err; 2923} 2924 2925/* 2926 * akpm: What we do here is to mark the in-core inode as clean 2927 * with respect to inode dirtiness (it may still be data-dirty). 2928 * This means that the in-core inode may be reaped by prune_icache 2929 * without having to perform any I/O. This is a very good thing, 2930 * because *any* task may call prune_icache - even ones which 2931 * have a transaction open against a different journal. 2932 * 2933 * Is this cheating? Not really. Sure, we haven't written the 2934 * inode out, but prune_icache isn't a user-visible syncing function. 2935 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 2936 * we start and wait on commits. 2937 * 2938 * Is this efficient/effective? Well, we're being nice to the system 2939 * by cleaning up our inodes proactively so they can be reaped 2940 * without I/O. But we are potentially leaving up to five seconds' 2941 * worth of inodes floating about which prune_icache wants us to 2942 * write out. One way to fix that would be to get prune_icache() 2943 * to do a write_super() to free up some memory. It has the desired 2944 * effect. 2945 */ 2946int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) 2947{ 2948 struct ext3_iloc iloc; 2949 int err; 2950 2951 might_sleep(); 2952 err = ext3_reserve_inode_write(handle, inode, &iloc); 2953 if (!err) 2954 err = ext3_mark_iloc_dirty(handle, inode, &iloc); 2955 return err; 2956} 2957 2958/* 2959 * akpm: ext3_dirty_inode() is called from __mark_inode_dirty() 2960 * 2961 * We're really interested in the case where a file is being extended. 2962 * i_size has been changed by generic_commit_write() and we thus need 2963 * to include the updated inode in the current transaction. 2964 * 2965 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 2966 * are allocated to the file. 2967 * 2968 * If the inode is marked synchronous, we don't honour that here - doing 2969 * so would cause a commit on atime updates, which we don't bother doing. 2970 * We handle synchronous inodes at the highest possible level. 2971 */ 2972void ext3_dirty_inode(struct inode *inode) 2973{ 2974 handle_t *current_handle = ext3_journal_current_handle(); 2975 handle_t *handle; 2976 2977 handle = ext3_journal_start(inode, 2); 2978 if (IS_ERR(handle)) 2979 goto out; 2980 if (current_handle && 2981 current_handle->h_transaction != handle->h_transaction) { 2982 /* This task has a transaction open against a different fs */ 2983 printk(KERN_EMERG "%s: transactions do not match!\n", 2984 __FUNCTION__); 2985 } else { 2986 jbd_debug(5, "marking dirty. outer handle=%p\n", 2987 current_handle); 2988 ext3_mark_inode_dirty(handle, inode); 2989 } 2990 ext3_journal_stop(handle); 2991out: 2992 return; 2993} 2994 2995#ifdef AKPM 2996/* 2997 * Bind an inode's backing buffer_head into this transaction, to prevent 2998 * it from being flushed to disk early. Unlike 2999 * ext3_reserve_inode_write, this leaves behind no bh reference and 3000 * returns no iloc structure, so the caller needs to repeat the iloc 3001 * lookup to mark the inode dirty later. 3002 */ 3003static inline int 3004ext3_pin_inode(handle_t *handle, struct inode *inode) 3005{ 3006 struct ext3_iloc iloc; 3007 3008 int err = 0; 3009 if (handle) { 3010 err = ext3_get_inode_loc(inode, &iloc); 3011 if (!err) { 3012 BUFFER_TRACE(iloc.bh, "get_write_access"); 3013 err = journal_get_write_access(handle, iloc.bh); 3014 if (!err) 3015 err = ext3_journal_dirty_metadata(handle, 3016 iloc.bh); 3017 brelse(iloc.bh); 3018 } 3019 } 3020 ext3_std_error(inode->i_sb, err); 3021 return err; 3022} 3023#endif 3024 3025int ext3_change_inode_journal_flag(struct inode *inode, int val) 3026{ 3027 journal_t *journal; 3028 handle_t *handle; 3029 int err; 3030 3031 /* 3032 * We have to be very careful here: changing a data block's 3033 * journaling status dynamically is dangerous. If we write a 3034 * data block to the journal, change the status and then delete 3035 * that block, we risk forgetting to revoke the old log record 3036 * from the journal and so a subsequent replay can corrupt data. 3037 * So, first we make sure that the journal is empty and that 3038 * nobody is changing anything. 3039 */ 3040 3041 journal = EXT3_JOURNAL(inode); 3042 if (is_journal_aborted(journal) || IS_RDONLY(inode)) 3043 return -EROFS; 3044 3045 journal_lock_updates(journal); 3046 journal_flush(journal); 3047 3048 /* 3049 * OK, there are no updates running now, and all cached data is 3050 * synced to disk. We are now in a completely consistent state 3051 * which doesn't have anything in the journal, and we know that 3052 * no filesystem updates are running, so it is safe to modify 3053 * the inode's in-core data-journaling state flag now. 3054 */ 3055 3056 if (val) 3057 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL; 3058 else 3059 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL; 3060 ext3_set_aops(inode); 3061 3062 journal_unlock_updates(journal); 3063 3064 /* Finally we can mark the inode as dirty. */ 3065 3066 handle = ext3_journal_start(inode, 1); 3067 if (IS_ERR(handle)) 3068 return PTR_ERR(handle); 3069 3070 err = ext3_mark_inode_dirty(handle, inode); 3071 handle->h_sync = 1; 3072 ext3_journal_stop(handle); 3073 ext3_std_error(inode->i_sb, err); 3074 3075 return err; 3076}