at v2.6.13 191 lines 6.3 kB view raw
1/* 2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README 3 */ 4 5/* 6 * Written by Alexander Zarochentcev. 7 * 8 * The kernel part of the (on-line) reiserfs resizer. 9 */ 10 11#include <linux/kernel.h> 12#include <linux/mm.h> 13#include <linux/vmalloc.h> 14#include <linux/string.h> 15#include <linux/errno.h> 16#include <linux/reiserfs_fs.h> 17#include <linux/reiserfs_fs_sb.h> 18#include <linux/buffer_head.h> 19 20int reiserfs_resize(struct super_block *s, unsigned long block_count_new) 21{ 22 int err = 0; 23 struct reiserfs_super_block *sb; 24 struct reiserfs_bitmap_info *bitmap; 25 struct reiserfs_bitmap_info *old_bitmap = SB_AP_BITMAP(s); 26 struct buffer_head *bh; 27 struct reiserfs_transaction_handle th; 28 unsigned int bmap_nr_new, bmap_nr; 29 unsigned int block_r_new, block_r; 30 31 struct reiserfs_list_bitmap *jb; 32 struct reiserfs_list_bitmap jbitmap[JOURNAL_NUM_BITMAPS]; 33 34 unsigned long int block_count, free_blocks; 35 int i; 36 int copy_size; 37 38 sb = SB_DISK_SUPER_BLOCK(s); 39 40 if (SB_BLOCK_COUNT(s) >= block_count_new) { 41 printk("can\'t shrink filesystem on-line\n"); 42 return -EINVAL; 43 } 44 45 /* check the device size */ 46 bh = sb_bread(s, block_count_new - 1); 47 if (!bh) { 48 printk("reiserfs_resize: can\'t read last block\n"); 49 return -EINVAL; 50 } 51 bforget(bh); 52 53 /* old disk layout detection; those partitions can be mounted, but 54 * cannot be resized */ 55 if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size 56 != REISERFS_DISK_OFFSET_IN_BYTES) { 57 printk 58 ("reiserfs_resize: unable to resize a reiserfs without distributed bitmap (fs version < 3.5.12)\n"); 59 return -ENOTSUPP; 60 } 61 62 /* count used bits in last bitmap block */ 63 block_r = SB_BLOCK_COUNT(s) - (SB_BMAP_NR(s) - 1) * s->s_blocksize * 8; 64 65 /* count bitmap blocks in new fs */ 66 bmap_nr_new = block_count_new / (s->s_blocksize * 8); 67 block_r_new = block_count_new - bmap_nr_new * s->s_blocksize * 8; 68 if (block_r_new) 69 bmap_nr_new++; 70 else 71 block_r_new = s->s_blocksize * 8; 72 73 /* save old values */ 74 block_count = SB_BLOCK_COUNT(s); 75 bmap_nr = SB_BMAP_NR(s); 76 77 /* resizing of reiserfs bitmaps (journal and real), if needed */ 78 if (bmap_nr_new > bmap_nr) { 79 /* reallocate journal bitmaps */ 80 if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) { 81 printk 82 ("reiserfs_resize: unable to allocate memory for journal bitmaps\n"); 83 unlock_super(s); 84 return -ENOMEM; 85 } 86 /* the new journal bitmaps are zero filled, now we copy in the bitmap 87 ** node pointers from the old journal bitmap structs, and then 88 ** transfer the new data structures into the journal struct. 89 ** 90 ** using the copy_size var below allows this code to work for 91 ** both shrinking and expanding the FS. 92 */ 93 copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr; 94 copy_size = 95 copy_size * sizeof(struct reiserfs_list_bitmap_node *); 96 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { 97 struct reiserfs_bitmap_node **node_tmp; 98 jb = SB_JOURNAL(s)->j_list_bitmap + i; 99 memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size); 100 101 /* just in case vfree schedules on us, copy the new 102 ** pointer into the journal struct before freeing the 103 ** old one 104 */ 105 node_tmp = jb->bitmaps; 106 jb->bitmaps = jbitmap[i].bitmaps; 107 vfree(node_tmp); 108 } 109 110 /* allocate additional bitmap blocks, reallocate array of bitmap 111 * block pointers */ 112 bitmap = 113 vmalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new); 114 if (!bitmap) { 115 /* Journal bitmaps are still supersized, but the memory isn't 116 * leaked, so I guess it's ok */ 117 printk("reiserfs_resize: unable to allocate memory.\n"); 118 return -ENOMEM; 119 } 120 memset(bitmap, 0, 121 sizeof(struct reiserfs_bitmap_info) * SB_BMAP_NR(s)); 122 for (i = 0; i < bmap_nr; i++) 123 bitmap[i] = old_bitmap[i]; 124 125 /* This doesn't go through the journal, but it doesn't have to. 126 * The changes are still atomic: We're synced up when the journal 127 * transaction begins, and the new bitmaps don't matter if the 128 * transaction fails. */ 129 for (i = bmap_nr; i < bmap_nr_new; i++) { 130 bitmap[i].bh = sb_getblk(s, i * s->s_blocksize * 8); 131 memset(bitmap[i].bh->b_data, 0, sb_blocksize(sb)); 132 reiserfs_test_and_set_le_bit(0, bitmap[i].bh->b_data); 133 134 set_buffer_uptodate(bitmap[i].bh); 135 mark_buffer_dirty(bitmap[i].bh); 136 sync_dirty_buffer(bitmap[i].bh); 137 // update bitmap_info stuff 138 bitmap[i].first_zero_hint = 1; 139 bitmap[i].free_count = sb_blocksize(sb) * 8 - 1; 140 } 141 /* free old bitmap blocks array */ 142 SB_AP_BITMAP(s) = bitmap; 143 vfree(old_bitmap); 144 } 145 146 /* begin transaction, if there was an error, it's fine. Yes, we have 147 * incorrect bitmaps now, but none of it is ever going to touch the 148 * disk anyway. */ 149 err = journal_begin(&th, s, 10); 150 if (err) 151 return err; 152 153 /* correct last bitmap blocks in old and new disk layout */ 154 reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr - 1].bh, 1); 155 for (i = block_r; i < s->s_blocksize * 8; i++) 156 reiserfs_test_and_clear_le_bit(i, 157 SB_AP_BITMAP(s)[bmap_nr - 158 1].bh->b_data); 159 SB_AP_BITMAP(s)[bmap_nr - 1].free_count += s->s_blocksize * 8 - block_r; 160 if (!SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint) 161 SB_AP_BITMAP(s)[bmap_nr - 1].first_zero_hint = block_r; 162 163 journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr - 1].bh); 164 165 reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh, 1); 166 for (i = block_r_new; i < s->s_blocksize * 8; i++) 167 reiserfs_test_and_set_le_bit(i, 168 SB_AP_BITMAP(s)[bmap_nr_new - 169 1].bh->b_data); 170 journal_mark_dirty(&th, s, SB_AP_BITMAP(s)[bmap_nr_new - 1].bh); 171 172 SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count -= 173 s->s_blocksize * 8 - block_r_new; 174 /* Extreme case where last bitmap is the only valid block in itself. */ 175 if (!SB_AP_BITMAP(s)[bmap_nr_new - 1].free_count) 176 SB_AP_BITMAP(s)[bmap_nr_new - 1].first_zero_hint = 0; 177 /* update super */ 178 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); 179 free_blocks = SB_FREE_BLOCKS(s); 180 PUT_SB_FREE_BLOCKS(s, 181 free_blocks + (block_count_new - block_count - 182 (bmap_nr_new - bmap_nr))); 183 PUT_SB_BLOCK_COUNT(s, block_count_new); 184 PUT_SB_BMAP_NR(s, bmap_nr_new); 185 s->s_dirt = 1; 186 187 journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); 188 189 SB_JOURNAL(s)->j_must_wait = 1; 190 return journal_end(&th, s, 10); 191}