at v2.6.34-rc2 327 lines 8.5 kB view raw
1/* 2 * fs/logfs/dev_bdev.c - Device access methods for block devices 3 * 4 * As should be obvious for Linux kernel code, license is GPLv2 5 * 6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> 7 */ 8#include "logfs.h" 9#include <linux/bio.h> 10#include <linux/blkdev.h> 11#include <linux/buffer_head.h> 12 13#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) 14 15static void request_complete(struct bio *bio, int err) 16{ 17 complete((struct completion *)bio->bi_private); 18} 19 20static int sync_request(struct page *page, struct block_device *bdev, int rw) 21{ 22 struct bio bio; 23 struct bio_vec bio_vec; 24 struct completion complete; 25 26 bio_init(&bio); 27 bio.bi_io_vec = &bio_vec; 28 bio_vec.bv_page = page; 29 bio_vec.bv_len = PAGE_SIZE; 30 bio_vec.bv_offset = 0; 31 bio.bi_vcnt = 1; 32 bio.bi_idx = 0; 33 bio.bi_size = PAGE_SIZE; 34 bio.bi_bdev = bdev; 35 bio.bi_sector = page->index * (PAGE_SIZE >> 9); 36 init_completion(&complete); 37 bio.bi_private = &complete; 38 bio.bi_end_io = request_complete; 39 40 submit_bio(rw, &bio); 41 generic_unplug_device(bdev_get_queue(bdev)); 42 wait_for_completion(&complete); 43 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO; 44} 45 46static int bdev_readpage(void *_sb, struct page *page) 47{ 48 struct super_block *sb = _sb; 49 struct block_device *bdev = logfs_super(sb)->s_bdev; 50 int err; 51 52 err = sync_request(page, bdev, READ); 53 if (err) { 54 ClearPageUptodate(page); 55 SetPageError(page); 56 } else { 57 SetPageUptodate(page); 58 ClearPageError(page); 59 } 60 unlock_page(page); 61 return err; 62} 63 64static DECLARE_WAIT_QUEUE_HEAD(wq); 65 66static void writeseg_end_io(struct bio *bio, int err) 67{ 68 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 69 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 70 struct super_block *sb = bio->bi_private; 71 struct logfs_super *super = logfs_super(sb); 72 struct page *page; 73 74 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 75 BUG_ON(err); 76 BUG_ON(bio->bi_vcnt == 0); 77 do { 78 page = bvec->bv_page; 79 if (--bvec >= bio->bi_io_vec) 80 prefetchw(&bvec->bv_page->flags); 81 82 end_page_writeback(page); 83 } while (bvec >= bio->bi_io_vec); 84 bio_put(bio); 85 if (atomic_dec_and_test(&super->s_pending_writes)) 86 wake_up(&wq); 87} 88 89static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, 90 size_t nr_pages) 91{ 92 struct logfs_super *super = logfs_super(sb); 93 struct address_space *mapping = super->s_mapping_inode->i_mapping; 94 struct bio *bio; 95 struct page *page; 96 struct request_queue *q = bdev_get_queue(sb->s_bdev); 97 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 98 int i; 99 100 bio = bio_alloc(GFP_NOFS, max_pages); 101 BUG_ON(!bio); /* FIXME: handle this */ 102 103 for (i = 0; i < nr_pages; i++) { 104 if (i >= max_pages) { 105 /* Block layer cannot split bios :( */ 106 bio->bi_vcnt = i; 107 bio->bi_idx = 0; 108 bio->bi_size = i * PAGE_SIZE; 109 bio->bi_bdev = super->s_bdev; 110 bio->bi_sector = ofs >> 9; 111 bio->bi_private = sb; 112 bio->bi_end_io = writeseg_end_io; 113 atomic_inc(&super->s_pending_writes); 114 submit_bio(WRITE, bio); 115 116 ofs += i * PAGE_SIZE; 117 index += i; 118 nr_pages -= i; 119 i = 0; 120 121 bio = bio_alloc(GFP_NOFS, max_pages); 122 BUG_ON(!bio); 123 } 124 page = find_lock_page(mapping, index + i); 125 BUG_ON(!page); 126 bio->bi_io_vec[i].bv_page = page; 127 bio->bi_io_vec[i].bv_len = PAGE_SIZE; 128 bio->bi_io_vec[i].bv_offset = 0; 129 130 BUG_ON(PageWriteback(page)); 131 set_page_writeback(page); 132 unlock_page(page); 133 } 134 bio->bi_vcnt = nr_pages; 135 bio->bi_idx = 0; 136 bio->bi_size = nr_pages * PAGE_SIZE; 137 bio->bi_bdev = super->s_bdev; 138 bio->bi_sector = ofs >> 9; 139 bio->bi_private = sb; 140 bio->bi_end_io = writeseg_end_io; 141 atomic_inc(&super->s_pending_writes); 142 submit_bio(WRITE, bio); 143 return 0; 144} 145 146static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len) 147{ 148 struct logfs_super *super = logfs_super(sb); 149 int head; 150 151 BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO); 152 153 if (len == 0) { 154 /* This can happen when the object fit perfectly into a 155 * segment, the segment gets written per sync and subsequently 156 * closed. 157 */ 158 return; 159 } 160 head = ofs & (PAGE_SIZE - 1); 161 if (head) { 162 ofs -= head; 163 len += head; 164 } 165 len = PAGE_ALIGN(len); 166 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); 167 generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev)); 168} 169 170 171static void erase_end_io(struct bio *bio, int err) 172{ 173 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 174 struct super_block *sb = bio->bi_private; 175 struct logfs_super *super = logfs_super(sb); 176 177 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 178 BUG_ON(err); 179 BUG_ON(bio->bi_vcnt == 0); 180 bio_put(bio); 181 if (atomic_dec_and_test(&super->s_pending_writes)) 182 wake_up(&wq); 183} 184 185static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, 186 size_t nr_pages) 187{ 188 struct logfs_super *super = logfs_super(sb); 189 struct bio *bio; 190 struct request_queue *q = bdev_get_queue(sb->s_bdev); 191 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 192 int i; 193 194 bio = bio_alloc(GFP_NOFS, max_pages); 195 BUG_ON(!bio); /* FIXME: handle this */ 196 197 for (i = 0; i < nr_pages; i++) { 198 if (i >= max_pages) { 199 /* Block layer cannot split bios :( */ 200 bio->bi_vcnt = i; 201 bio->bi_idx = 0; 202 bio->bi_size = i * PAGE_SIZE; 203 bio->bi_bdev = super->s_bdev; 204 bio->bi_sector = ofs >> 9; 205 bio->bi_private = sb; 206 bio->bi_end_io = erase_end_io; 207 atomic_inc(&super->s_pending_writes); 208 submit_bio(WRITE, bio); 209 210 ofs += i * PAGE_SIZE; 211 index += i; 212 nr_pages -= i; 213 i = 0; 214 215 bio = bio_alloc(GFP_NOFS, max_pages); 216 BUG_ON(!bio); 217 } 218 bio->bi_io_vec[i].bv_page = super->s_erase_page; 219 bio->bi_io_vec[i].bv_len = PAGE_SIZE; 220 bio->bi_io_vec[i].bv_offset = 0; 221 } 222 bio->bi_vcnt = nr_pages; 223 bio->bi_idx = 0; 224 bio->bi_size = nr_pages * PAGE_SIZE; 225 bio->bi_bdev = super->s_bdev; 226 bio->bi_sector = ofs >> 9; 227 bio->bi_private = sb; 228 bio->bi_end_io = erase_end_io; 229 atomic_inc(&super->s_pending_writes); 230 submit_bio(WRITE, bio); 231 return 0; 232} 233 234static int bdev_erase(struct super_block *sb, loff_t to, size_t len, 235 int ensure_write) 236{ 237 struct logfs_super *super = logfs_super(sb); 238 239 BUG_ON(to & (PAGE_SIZE - 1)); 240 BUG_ON(len & (PAGE_SIZE - 1)); 241 242 if (super->s_flags & LOGFS_SB_FLAG_RO) 243 return -EROFS; 244 245 if (ensure_write) { 246 /* 247 * Object store doesn't care whether erases happen or not. 248 * But for the journal they are required. Otherwise a scan 249 * can find an old commit entry and assume it is the current 250 * one, travelling back in time. 251 */ 252 do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT); 253 } 254 255 return 0; 256} 257 258static void bdev_sync(struct super_block *sb) 259{ 260 struct logfs_super *super = logfs_super(sb); 261 262 wait_event(wq, atomic_read(&super->s_pending_writes) == 0); 263} 264 265static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs) 266{ 267 struct logfs_super *super = logfs_super(sb); 268 struct address_space *mapping = super->s_mapping_inode->i_mapping; 269 filler_t *filler = bdev_readpage; 270 271 *ofs = 0; 272 return read_cache_page(mapping, 0, filler, sb); 273} 274 275static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs) 276{ 277 struct logfs_super *super = logfs_super(sb); 278 struct address_space *mapping = super->s_mapping_inode->i_mapping; 279 filler_t *filler = bdev_readpage; 280 u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000; 281 pgoff_t index = pos >> PAGE_SHIFT; 282 283 *ofs = pos; 284 return read_cache_page(mapping, index, filler, sb); 285} 286 287static int bdev_write_sb(struct super_block *sb, struct page *page) 288{ 289 struct block_device *bdev = logfs_super(sb)->s_bdev; 290 291 /* Nothing special to do for block devices. */ 292 return sync_request(page, bdev, WRITE); 293} 294 295static void bdev_put_device(struct super_block *sb) 296{ 297 close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE); 298} 299 300static const struct logfs_device_ops bd_devops = { 301 .find_first_sb = bdev_find_first_sb, 302 .find_last_sb = bdev_find_last_sb, 303 .write_sb = bdev_write_sb, 304 .readpage = bdev_readpage, 305 .writeseg = bdev_writeseg, 306 .erase = bdev_erase, 307 .sync = bdev_sync, 308 .put_device = bdev_put_device, 309}; 310 311int logfs_get_sb_bdev(struct file_system_type *type, int flags, 312 const char *devname, struct vfsmount *mnt) 313{ 314 struct block_device *bdev; 315 316 bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type); 317 if (IS_ERR(bdev)) 318 return PTR_ERR(bdev); 319 320 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { 321 int mtdnr = MINOR(bdev->bd_dev); 322 close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE); 323 return logfs_get_sb_mtd(type, flags, mtdnr, mnt); 324 } 325 326 return logfs_get_sb_device(type, flags, NULL, bdev, &bd_devops, mnt); 327}