at v2.6.15 1653 lines 44 kB view raw
1/* 2 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 3 * 4 * bitmap_create - sets up the bitmap structure 5 * bitmap_destroy - destroys the bitmap structure 6 * 7 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.: 8 * - added disk storage for bitmap 9 * - changes to allow various bitmap chunk sizes 10 * - added bitmap daemon (to asynchronously clear bitmap bits from disk) 11 */ 12 13/* 14 * Still to do: 15 * 16 * flush after percent set rather than just time based. (maybe both). 17 * wait if count gets too high, wake when it drops to half. 18 * allow bitmap to be mirrored with superblock (before or after...) 19 * allow hot-add to re-instate a current device. 20 * allow hot-add of bitmap after quiessing device 21 */ 22 23#include <linux/module.h> 24#include <linux/errno.h> 25#include <linux/slab.h> 26#include <linux/init.h> 27#include <linux/config.h> 28#include <linux/timer.h> 29#include <linux/sched.h> 30#include <linux/list.h> 31#include <linux/file.h> 32#include <linux/mount.h> 33#include <linux/buffer_head.h> 34#include <linux/raid/md.h> 35#include <linux/raid/bitmap.h> 36 37/* debug macros */ 38 39#define DEBUG 0 40 41#if DEBUG 42/* these are for debugging purposes only! */ 43 44/* define one and only one of these */ 45#define INJECT_FAULTS_1 0 /* cause bitmap_alloc_page to fail always */ 46#define INJECT_FAULTS_2 0 /* cause bitmap file to be kicked when first bit set*/ 47#define INJECT_FAULTS_3 0 /* treat bitmap file as kicked at init time */ 48#define INJECT_FAULTS_4 0 /* undef */ 49#define INJECT_FAULTS_5 0 /* undef */ 50#define INJECT_FAULTS_6 0 51 52/* if these are defined, the driver will fail! debug only */ 53#define INJECT_FATAL_FAULT_1 0 /* fail kmalloc, causing bitmap_create to fail */ 54#define INJECT_FATAL_FAULT_2 0 /* undef */ 55#define INJECT_FATAL_FAULT_3 0 /* undef */ 56#endif 57 58//#define DPRINTK PRINTK /* set this NULL to avoid verbose debug output */ 59#define DPRINTK(x...) do { } while(0) 60 61#ifndef PRINTK 62# if DEBUG > 0 63# define PRINTK(x...) printk(KERN_DEBUG x) 64# else 65# define PRINTK(x...) 66# endif 67#endif 68 69static inline char * bmname(struct bitmap *bitmap) 70{ 71 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; 72} 73 74 75/* 76 * test if the bitmap is active 77 */ 78int bitmap_active(struct bitmap *bitmap) 79{ 80 unsigned long flags; 81 int res = 0; 82 83 if (!bitmap) 84 return res; 85 spin_lock_irqsave(&bitmap->lock, flags); 86 res = bitmap->flags & BITMAP_ACTIVE; 87 spin_unlock_irqrestore(&bitmap->lock, flags); 88 return res; 89} 90 91#define WRITE_POOL_SIZE 256 92/* mempool for queueing pending writes on the bitmap file */ 93static void *write_pool_alloc(gfp_t gfp_flags, void *data) 94{ 95 return kmalloc(sizeof(struct page_list), gfp_flags); 96} 97 98static void write_pool_free(void *ptr, void *data) 99{ 100 kfree(ptr); 101} 102 103/* 104 * just a placeholder - calls kmalloc for bitmap pages 105 */ 106static unsigned char *bitmap_alloc_page(struct bitmap *bitmap) 107{ 108 unsigned char *page; 109 110#ifdef INJECT_FAULTS_1 111 page = NULL; 112#else 113 page = kmalloc(PAGE_SIZE, GFP_NOIO); 114#endif 115 if (!page) 116 printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap)); 117 else 118 PRINTK("%s: bitmap_alloc_page: allocated page at %p\n", 119 bmname(bitmap), page); 120 return page; 121} 122 123/* 124 * for now just a placeholder -- just calls kfree for bitmap pages 125 */ 126static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page) 127{ 128 PRINTK("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page); 129 kfree(page); 130} 131 132/* 133 * check a page and, if necessary, allocate it (or hijack it if the alloc fails) 134 * 135 * 1) check to see if this page is allocated, if it's not then try to alloc 136 * 2) if the alloc fails, set the page's hijacked flag so we'll use the 137 * page pointer directly as a counter 138 * 139 * if we find our page, we increment the page's refcount so that it stays 140 * allocated while we're using it 141 */ 142static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create) 143{ 144 unsigned char *mappage; 145 146 if (page >= bitmap->pages) { 147 printk(KERN_ALERT 148 "%s: invalid bitmap page request: %lu (> %lu)\n", 149 bmname(bitmap), page, bitmap->pages-1); 150 return -EINVAL; 151 } 152 153 154 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ 155 return 0; 156 157 if (bitmap->bp[page].map) /* page is already allocated, just return */ 158 return 0; 159 160 if (!create) 161 return -ENOENT; 162 163 spin_unlock_irq(&bitmap->lock); 164 165 /* this page has not been allocated yet */ 166 167 if ((mappage = bitmap_alloc_page(bitmap)) == NULL) { 168 PRINTK("%s: bitmap map page allocation failed, hijacking\n", 169 bmname(bitmap)); 170 /* failed - set the hijacked flag so that we can use the 171 * pointer as a counter */ 172 spin_lock_irq(&bitmap->lock); 173 if (!bitmap->bp[page].map) 174 bitmap->bp[page].hijacked = 1; 175 goto out; 176 } 177 178 /* got a page */ 179 180 spin_lock_irq(&bitmap->lock); 181 182 /* recheck the page */ 183 184 if (bitmap->bp[page].map || bitmap->bp[page].hijacked) { 185 /* somebody beat us to getting the page */ 186 bitmap_free_page(bitmap, mappage); 187 return 0; 188 } 189 190 /* no page was in place and we have one, so install it */ 191 192 memset(mappage, 0, PAGE_SIZE); 193 bitmap->bp[page].map = mappage; 194 bitmap->missing_pages--; 195out: 196 return 0; 197} 198 199 200/* if page is completely empty, put it back on the free list, or dealloc it */ 201/* if page was hijacked, unmark the flag so it might get alloced next time */ 202/* Note: lock should be held when calling this */ 203static inline void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) 204{ 205 char *ptr; 206 207 if (bitmap->bp[page].count) /* page is still busy */ 208 return; 209 210 /* page is no longer in use, it can be released */ 211 212 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ 213 bitmap->bp[page].hijacked = 0; 214 bitmap->bp[page].map = NULL; 215 return; 216 } 217 218 /* normal case, free the page */ 219 220#if 0 221/* actually ... let's not. We will probably need the page again exactly when 222 * memory is tight and we are flusing to disk 223 */ 224 return; 225#else 226 ptr = bitmap->bp[page].map; 227 bitmap->bp[page].map = NULL; 228 bitmap->missing_pages++; 229 bitmap_free_page(bitmap, ptr); 230 return; 231#endif 232} 233 234 235/* 236 * bitmap file handling - read and write the bitmap file and its superblock 237 */ 238 239/* copy the pathname of a file to a buffer */ 240char *file_path(struct file *file, char *buf, int count) 241{ 242 struct dentry *d; 243 struct vfsmount *v; 244 245 if (!buf) 246 return NULL; 247 248 d = file->f_dentry; 249 v = file->f_vfsmnt; 250 251 buf = d_path(d, v, buf, count); 252 253 return IS_ERR(buf) ? NULL : buf; 254} 255 256/* 257 * basic page I/O operations 258 */ 259 260/* IO operations when bitmap is stored near all superblocks */ 261static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index) 262{ 263 /* choose a good rdev and read the page from there */ 264 265 mdk_rdev_t *rdev; 266 struct list_head *tmp; 267 struct page *page = alloc_page(GFP_KERNEL); 268 sector_t target; 269 270 if (!page) 271 return ERR_PTR(-ENOMEM); 272 273 ITERATE_RDEV(mddev, rdev, tmp) { 274 if (! test_bit(In_sync, &rdev->flags) 275 || test_bit(Faulty, &rdev->flags)) 276 continue; 277 278 target = (rdev->sb_offset << 1) + offset + index * (PAGE_SIZE/512); 279 280 if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) { 281 page->index = index; 282 return page; 283 } 284 } 285 return ERR_PTR(-EIO); 286 287} 288 289static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wait) 290{ 291 mdk_rdev_t *rdev; 292 struct list_head *tmp; 293 294 ITERATE_RDEV(mddev, rdev, tmp) 295 if (test_bit(In_sync, &rdev->flags) 296 && !test_bit(Faulty, &rdev->flags)) 297 md_super_write(mddev, rdev, 298 (rdev->sb_offset<<1) + offset 299 + page->index * (PAGE_SIZE/512), 300 PAGE_SIZE, 301 page); 302 303 if (wait) 304 md_super_wait(mddev); 305 return 0; 306} 307 308/* 309 * write out a page to a file 310 */ 311static int write_page(struct bitmap *bitmap, struct page *page, int wait) 312{ 313 int ret = -ENOMEM; 314 315 if (bitmap->file == NULL) 316 return write_sb_page(bitmap->mddev, bitmap->offset, page, wait); 317 318 if (wait) 319 lock_page(page); 320 else { 321 if (TestSetPageLocked(page)) 322 return -EAGAIN; /* already locked */ 323 if (PageWriteback(page)) { 324 unlock_page(page); 325 return -EAGAIN; 326 } 327 } 328 329 ret = page->mapping->a_ops->prepare_write(bitmap->file, page, 0, PAGE_SIZE); 330 if (!ret) 331 ret = page->mapping->a_ops->commit_write(bitmap->file, page, 0, 332 PAGE_SIZE); 333 if (ret) { 334 unlock_page(page); 335 return ret; 336 } 337 338 set_page_dirty(page); /* force it to be written out */ 339 340 if (!wait) { 341 /* add to list to be waited for by daemon */ 342 struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO); 343 item->page = page; 344 page_cache_get(page); 345 spin_lock(&bitmap->write_lock); 346 list_add(&item->list, &bitmap->complete_pages); 347 spin_unlock(&bitmap->write_lock); 348 md_wakeup_thread(bitmap->writeback_daemon); 349 } 350 return write_one_page(page, wait); 351} 352 353/* read a page from a file, pinning it into cache, and return bytes_read */ 354static struct page *read_page(struct file *file, unsigned long index, 355 unsigned long *bytes_read) 356{ 357 struct inode *inode = file->f_mapping->host; 358 struct page *page = NULL; 359 loff_t isize = i_size_read(inode); 360 unsigned long end_index = isize >> PAGE_CACHE_SHIFT; 361 362 PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE, 363 (unsigned long long)index << PAGE_CACHE_SHIFT); 364 365 page = read_cache_page(inode->i_mapping, index, 366 (filler_t *)inode->i_mapping->a_ops->readpage, file); 367 if (IS_ERR(page)) 368 goto out; 369 wait_on_page_locked(page); 370 if (!PageUptodate(page) || PageError(page)) { 371 page_cache_release(page); 372 page = ERR_PTR(-EIO); 373 goto out; 374 } 375 376 if (index > end_index) /* we have read beyond EOF */ 377 *bytes_read = 0; 378 else if (index == end_index) /* possible short read */ 379 *bytes_read = isize & ~PAGE_CACHE_MASK; 380 else 381 *bytes_read = PAGE_CACHE_SIZE; /* got a full page */ 382out: 383 if (IS_ERR(page)) 384 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", 385 (int)PAGE_CACHE_SIZE, 386 (unsigned long long)index << PAGE_CACHE_SHIFT, 387 PTR_ERR(page)); 388 return page; 389} 390 391/* 392 * bitmap file superblock operations 393 */ 394 395/* update the event counter and sync the superblock to disk */ 396int bitmap_update_sb(struct bitmap *bitmap) 397{ 398 bitmap_super_t *sb; 399 unsigned long flags; 400 401 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ 402 return 0; 403 spin_lock_irqsave(&bitmap->lock, flags); 404 if (!bitmap->sb_page) { /* no superblock */ 405 spin_unlock_irqrestore(&bitmap->lock, flags); 406 return 0; 407 } 408 spin_unlock_irqrestore(&bitmap->lock, flags); 409 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 410 sb->events = cpu_to_le64(bitmap->mddev->events); 411 if (!bitmap->mddev->degraded) 412 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 413 kunmap(bitmap->sb_page); 414 return write_page(bitmap, bitmap->sb_page, 1); 415} 416 417/* print out the bitmap file superblock */ 418void bitmap_print_sb(struct bitmap *bitmap) 419{ 420 bitmap_super_t *sb; 421 422 if (!bitmap || !bitmap->sb_page) 423 return; 424 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 425 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); 426 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); 427 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); 428 printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n", 429 *(__u32 *)(sb->uuid+0), 430 *(__u32 *)(sb->uuid+4), 431 *(__u32 *)(sb->uuid+8), 432 *(__u32 *)(sb->uuid+12)); 433 printk(KERN_DEBUG " events: %llu\n", 434 (unsigned long long) le64_to_cpu(sb->events)); 435 printk(KERN_DEBUG "events cleared: %llu\n", 436 (unsigned long long) le64_to_cpu(sb->events_cleared)); 437 printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state)); 438 printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize)); 439 printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); 440 printk(KERN_DEBUG " sync size: %llu KB\n", 441 (unsigned long long)le64_to_cpu(sb->sync_size)/2); 442 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); 443 kunmap(bitmap->sb_page); 444} 445 446/* read the superblock from the bitmap file and initialize some bitmap fields */ 447static int bitmap_read_sb(struct bitmap *bitmap) 448{ 449 char *reason = NULL; 450 bitmap_super_t *sb; 451 unsigned long chunksize, daemon_sleep, write_behind; 452 unsigned long bytes_read; 453 unsigned long long events; 454 int err = -EINVAL; 455 456 /* page 0 is the superblock, read it... */ 457 if (bitmap->file) 458 bitmap->sb_page = read_page(bitmap->file, 0, &bytes_read); 459 else { 460 bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0); 461 bytes_read = PAGE_SIZE; 462 } 463 if (IS_ERR(bitmap->sb_page)) { 464 err = PTR_ERR(bitmap->sb_page); 465 bitmap->sb_page = NULL; 466 return err; 467 } 468 469 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 470 471 if (bytes_read < sizeof(*sb)) { /* short read */ 472 printk(KERN_INFO "%s: bitmap file superblock truncated\n", 473 bmname(bitmap)); 474 err = -ENOSPC; 475 goto out; 476 } 477 478 chunksize = le32_to_cpu(sb->chunksize); 479 daemon_sleep = le32_to_cpu(sb->daemon_sleep); 480 write_behind = le32_to_cpu(sb->write_behind); 481 482 /* verify that the bitmap-specific fields are valid */ 483 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) 484 reason = "bad magic"; 485 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || 486 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) 487 reason = "unrecognized superblock version"; 488 else if (chunksize < 512 || chunksize > (1024 * 1024 * 4)) 489 reason = "bitmap chunksize out of range (512B - 4MB)"; 490 else if ((1 << ffz(~chunksize)) != chunksize) 491 reason = "bitmap chunksize not a power of 2"; 492 else if (daemon_sleep < 1 || daemon_sleep > 15) 493 reason = "daemon sleep period out of range (1-15s)"; 494 else if (write_behind > COUNTER_MAX) 495 reason = "write-behind limit out of range (0 - 16383)"; 496 if (reason) { 497 printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n", 498 bmname(bitmap), reason); 499 goto out; 500 } 501 502 /* keep the array size field of the bitmap superblock up to date */ 503 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 504 505 if (!bitmap->mddev->persistent) 506 goto success; 507 508 /* 509 * if we have a persistent array superblock, compare the 510 * bitmap's UUID and event counter to the mddev's 511 */ 512 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { 513 printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n", 514 bmname(bitmap)); 515 goto out; 516 } 517 events = le64_to_cpu(sb->events); 518 if (events < bitmap->mddev->events) { 519 printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) " 520 "-- forcing full recovery\n", bmname(bitmap), events, 521 (unsigned long long) bitmap->mddev->events); 522 sb->state |= BITMAP_STALE; 523 } 524success: 525 /* assign fields using values from superblock */ 526 bitmap->chunksize = chunksize; 527 bitmap->daemon_sleep = daemon_sleep; 528 bitmap->daemon_lastrun = jiffies; 529 bitmap->max_write_behind = write_behind; 530 bitmap->flags |= sb->state; 531 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) 532 bitmap->flags |= BITMAP_HOSTENDIAN; 533 bitmap->events_cleared = le64_to_cpu(sb->events_cleared); 534 if (sb->state & BITMAP_STALE) 535 bitmap->events_cleared = bitmap->mddev->events; 536 err = 0; 537out: 538 kunmap(bitmap->sb_page); 539 if (err) 540 bitmap_print_sb(bitmap); 541 return err; 542} 543 544enum bitmap_mask_op { 545 MASK_SET, 546 MASK_UNSET 547}; 548 549/* record the state of the bitmap in the superblock */ 550static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, 551 enum bitmap_mask_op op) 552{ 553 bitmap_super_t *sb; 554 unsigned long flags; 555 556 spin_lock_irqsave(&bitmap->lock, flags); 557 if (!bitmap || !bitmap->sb_page) { /* can't set the state */ 558 spin_unlock_irqrestore(&bitmap->lock, flags); 559 return; 560 } 561 page_cache_get(bitmap->sb_page); 562 spin_unlock_irqrestore(&bitmap->lock, flags); 563 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 564 switch (op) { 565 case MASK_SET: sb->state |= bits; 566 break; 567 case MASK_UNSET: sb->state &= ~bits; 568 break; 569 default: BUG(); 570 } 571 kunmap(bitmap->sb_page); 572 page_cache_release(bitmap->sb_page); 573} 574 575/* 576 * general bitmap file operations 577 */ 578 579/* calculate the index of the page that contains this bit */ 580static inline unsigned long file_page_index(unsigned long chunk) 581{ 582 return CHUNK_BIT_OFFSET(chunk) >> PAGE_BIT_SHIFT; 583} 584 585/* calculate the (bit) offset of this bit within a page */ 586static inline unsigned long file_page_offset(unsigned long chunk) 587{ 588 return CHUNK_BIT_OFFSET(chunk) & (PAGE_BITS - 1); 589} 590 591/* 592 * return a pointer to the page in the filemap that contains the given bit 593 * 594 * this lookup is complicated by the fact that the bitmap sb might be exactly 595 * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page 596 * 0 or page 1 597 */ 598static inline struct page *filemap_get_page(struct bitmap *bitmap, 599 unsigned long chunk) 600{ 601 return bitmap->filemap[file_page_index(chunk) - file_page_index(0)]; 602} 603 604 605static void bitmap_file_unmap(struct bitmap *bitmap) 606{ 607 struct page **map, *sb_page; 608 unsigned long *attr; 609 int pages; 610 unsigned long flags; 611 612 spin_lock_irqsave(&bitmap->lock, flags); 613 map = bitmap->filemap; 614 bitmap->filemap = NULL; 615 attr = bitmap->filemap_attr; 616 bitmap->filemap_attr = NULL; 617 pages = bitmap->file_pages; 618 bitmap->file_pages = 0; 619 sb_page = bitmap->sb_page; 620 bitmap->sb_page = NULL; 621 spin_unlock_irqrestore(&bitmap->lock, flags); 622 623 while (pages--) 624 if (map[pages]->index != 0) /* 0 is sb_page, release it below */ 625 page_cache_release(map[pages]); 626 kfree(map); 627 kfree(attr); 628 629 if (sb_page) 630 page_cache_release(sb_page); 631} 632 633static void bitmap_stop_daemon(struct bitmap *bitmap); 634 635/* dequeue the next item in a page list -- don't call from irq context */ 636static struct page_list *dequeue_page(struct bitmap *bitmap) 637{ 638 struct page_list *item = NULL; 639 struct list_head *head = &bitmap->complete_pages; 640 641 spin_lock(&bitmap->write_lock); 642 if (list_empty(head)) 643 goto out; 644 item = list_entry(head->prev, struct page_list, list); 645 list_del(head->prev); 646out: 647 spin_unlock(&bitmap->write_lock); 648 return item; 649} 650 651static void drain_write_queues(struct bitmap *bitmap) 652{ 653 struct page_list *item; 654 655 while ((item = dequeue_page(bitmap))) { 656 /* don't bother to wait */ 657 page_cache_release(item->page); 658 mempool_free(item, bitmap->write_pool); 659 } 660 661 wake_up(&bitmap->write_wait); 662} 663 664static void bitmap_file_put(struct bitmap *bitmap) 665{ 666 struct file *file; 667 struct inode *inode; 668 unsigned long flags; 669 670 spin_lock_irqsave(&bitmap->lock, flags); 671 file = bitmap->file; 672 bitmap->file = NULL; 673 spin_unlock_irqrestore(&bitmap->lock, flags); 674 675 bitmap_stop_daemon(bitmap); 676 677 drain_write_queues(bitmap); 678 679 bitmap_file_unmap(bitmap); 680 681 if (file) { 682 inode = file->f_mapping->host; 683 spin_lock(&inode->i_lock); 684 atomic_set(&inode->i_writecount, 1); /* allow writes again */ 685 spin_unlock(&inode->i_lock); 686 fput(file); 687 } 688} 689 690 691/* 692 * bitmap_file_kick - if an error occurs while manipulating the bitmap file 693 * then it is no longer reliable, so we stop using it and we mark the file 694 * as failed in the superblock 695 */ 696static void bitmap_file_kick(struct bitmap *bitmap) 697{ 698 char *path, *ptr = NULL; 699 700 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET); 701 bitmap_update_sb(bitmap); 702 703 if (bitmap->file) { 704 path = kmalloc(PAGE_SIZE, GFP_KERNEL); 705 if (path) 706 ptr = file_path(bitmap->file, path, PAGE_SIZE); 707 708 printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n", 709 bmname(bitmap), ptr ? ptr : ""); 710 711 kfree(path); 712 } 713 714 bitmap_file_put(bitmap); 715 716 return; 717} 718 719enum bitmap_page_attr { 720 BITMAP_PAGE_DIRTY = 1, // there are set bits that need to be synced 721 BITMAP_PAGE_CLEAN = 2, // there are bits that might need to be cleared 722 BITMAP_PAGE_NEEDWRITE=4, // there are cleared bits that need to be synced 723}; 724 725static inline void set_page_attr(struct bitmap *bitmap, struct page *page, 726 enum bitmap_page_attr attr) 727{ 728 bitmap->filemap_attr[page->index] |= attr; 729} 730 731static inline void clear_page_attr(struct bitmap *bitmap, struct page *page, 732 enum bitmap_page_attr attr) 733{ 734 bitmap->filemap_attr[page->index] &= ~attr; 735} 736 737static inline unsigned long get_page_attr(struct bitmap *bitmap, struct page *page) 738{ 739 return bitmap->filemap_attr[page->index]; 740} 741 742/* 743 * bitmap_file_set_bit -- called before performing a write to the md device 744 * to set (and eventually sync) a particular bit in the bitmap file 745 * 746 * we set the bit immediately, then we record the page number so that 747 * when an unplug occurs, we can flush the dirty pages out to disk 748 */ 749static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) 750{ 751 unsigned long bit; 752 struct page *page; 753 void *kaddr; 754 unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap); 755 756 if (!bitmap->filemap) { 757 return; 758 } 759 760 page = filemap_get_page(bitmap, chunk); 761 bit = file_page_offset(chunk); 762 763 764 /* make sure the page stays cached until it gets written out */ 765 if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY)) 766 page_cache_get(page); 767 768 /* set the bit */ 769 kaddr = kmap_atomic(page, KM_USER0); 770 if (bitmap->flags & BITMAP_HOSTENDIAN) 771 set_bit(bit, kaddr); 772 else 773 ext2_set_bit(bit, kaddr); 774 kunmap_atomic(kaddr, KM_USER0); 775 PRINTK("set file bit %lu page %lu\n", bit, page->index); 776 777 /* record page number so it gets flushed to disk when unplug occurs */ 778 set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); 779 780} 781 782/* this gets called when the md device is ready to unplug its underlying 783 * (slave) device queues -- before we let any writes go down, we need to 784 * sync the dirty pages of the bitmap file to disk */ 785int bitmap_unplug(struct bitmap *bitmap) 786{ 787 unsigned long i, attr, flags; 788 struct page *page; 789 int wait = 0; 790 int err; 791 792 if (!bitmap) 793 return 0; 794 795 /* look at each page to see if there are any set bits that need to be 796 * flushed out to disk */ 797 for (i = 0; i < bitmap->file_pages; i++) { 798 spin_lock_irqsave(&bitmap->lock, flags); 799 if (!bitmap->filemap) { 800 spin_unlock_irqrestore(&bitmap->lock, flags); 801 return 0; 802 } 803 page = bitmap->filemap[i]; 804 attr = get_page_attr(bitmap, page); 805 clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); 806 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 807 if ((attr & BITMAP_PAGE_DIRTY)) 808 wait = 1; 809 spin_unlock_irqrestore(&bitmap->lock, flags); 810 811 if (attr & (BITMAP_PAGE_DIRTY | BITMAP_PAGE_NEEDWRITE)) { 812 err = write_page(bitmap, page, 0); 813 if (err == -EAGAIN) { 814 if (attr & BITMAP_PAGE_DIRTY) 815 err = write_page(bitmap, page, 1); 816 else 817 err = 0; 818 } 819 if (err) 820 return 1; 821 } 822 } 823 if (wait) { /* if any writes were performed, we need to wait on them */ 824 if (bitmap->file) { 825 spin_lock_irq(&bitmap->write_lock); 826 wait_event_lock_irq(bitmap->write_wait, 827 list_empty(&bitmap->complete_pages), bitmap->write_lock, 828 wake_up_process(bitmap->writeback_daemon->tsk)); 829 spin_unlock_irq(&bitmap->write_lock); 830 } else 831 md_super_wait(bitmap->mddev); 832 } 833 return 0; 834} 835 836static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); 837/* * bitmap_init_from_disk -- called at bitmap_create time to initialize 838 * the in-memory bitmap from the on-disk bitmap -- also, sets up the 839 * memory mapping of the bitmap file 840 * Special cases: 841 * if there's no bitmap file, or if the bitmap file had been 842 * previously kicked from the array, we mark all the bits as 843 * 1's in order to cause a full resync. 844 * 845 * We ignore all bits for sectors that end earlier than 'start'. 846 * This is used when reading an out-of-date bitmap... 847 */ 848static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) 849{ 850 unsigned long i, chunks, index, oldindex, bit; 851 struct page *page = NULL, *oldpage = NULL; 852 unsigned long num_pages, bit_cnt = 0; 853 struct file *file; 854 unsigned long bytes, offset, dummy; 855 int outofdate; 856 int ret = -ENOSPC; 857 858 chunks = bitmap->chunks; 859 file = bitmap->file; 860 861 BUG_ON(!file && !bitmap->offset); 862 863#ifdef INJECT_FAULTS_3 864 outofdate = 1; 865#else 866 outofdate = bitmap->flags & BITMAP_STALE; 867#endif 868 if (outofdate) 869 printk(KERN_INFO "%s: bitmap file is out of date, doing full " 870 "recovery\n", bmname(bitmap)); 871 872 bytes = (chunks + 7) / 8; 873 874 num_pages = (bytes + sizeof(bitmap_super_t) + PAGE_SIZE - 1) / PAGE_SIZE; 875 876 if (file && i_size_read(file->f_mapping->host) < bytes + sizeof(bitmap_super_t)) { 877 printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n", 878 bmname(bitmap), 879 (unsigned long) i_size_read(file->f_mapping->host), 880 bytes + sizeof(bitmap_super_t)); 881 goto out; 882 } 883 884 ret = -ENOMEM; 885 886 bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); 887 if (!bitmap->filemap) 888 goto out; 889 890 bitmap->filemap_attr = kmalloc(sizeof(long) * num_pages, GFP_KERNEL); 891 if (!bitmap->filemap_attr) 892 goto out; 893 894 memset(bitmap->filemap_attr, 0, sizeof(long) * num_pages); 895 896 oldindex = ~0L; 897 898 for (i = 0; i < chunks; i++) { 899 int b; 900 index = file_page_index(i); 901 bit = file_page_offset(i); 902 if (index != oldindex) { /* this is a new page, read it in */ 903 /* unmap the old page, we're done with it */ 904 if (oldpage != NULL) 905 kunmap(oldpage); 906 if (index == 0) { 907 /* 908 * if we're here then the superblock page 909 * contains some bits (PAGE_SIZE != sizeof sb) 910 * we've already read it in, so just use it 911 */ 912 page = bitmap->sb_page; 913 offset = sizeof(bitmap_super_t); 914 } else if (file) { 915 page = read_page(file, index, &dummy); 916 offset = 0; 917 } else { 918 page = read_sb_page(bitmap->mddev, bitmap->offset, index); 919 offset = 0; 920 } 921 if (IS_ERR(page)) { /* read error */ 922 ret = PTR_ERR(page); 923 goto out; 924 } 925 926 oldindex = index; 927 oldpage = page; 928 kmap(page); 929 930 if (outofdate) { 931 /* 932 * if bitmap is out of date, dirty the 933 * whole page and write it out 934 */ 935 memset(page_address(page) + offset, 0xff, 936 PAGE_SIZE - offset); 937 ret = write_page(bitmap, page, 1); 938 if (ret) { 939 kunmap(page); 940 /* release, page not in filemap yet */ 941 page_cache_release(page); 942 goto out; 943 } 944 } 945 946 bitmap->filemap[bitmap->file_pages++] = page; 947 } 948 if (bitmap->flags & BITMAP_HOSTENDIAN) 949 b = test_bit(bit, page_address(page)); 950 else 951 b = ext2_test_bit(bit, page_address(page)); 952 if (b) { 953 /* if the disk bit is set, set the memory bit */ 954 bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap), 955 ((i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) >= start) 956 ); 957 bit_cnt++; 958 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); 959 } 960 } 961 962 /* everything went OK */ 963 ret = 0; 964 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); 965 966 if (page) /* unmap the last page */ 967 kunmap(page); 968 969 if (bit_cnt) { /* Kick recovery if any bits were set */ 970 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); 971 md_wakeup_thread(bitmap->mddev->thread); 972 } 973 974out: 975 printk(KERN_INFO "%s: bitmap initialized from disk: " 976 "read %lu/%lu pages, set %lu bits, status: %d\n", 977 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, ret); 978 979 return ret; 980} 981 982void bitmap_write_all(struct bitmap *bitmap) 983{ 984 /* We don't actually write all bitmap blocks here, 985 * just flag them as needing to be written 986 */ 987 988 unsigned long chunks = bitmap->chunks; 989 unsigned long bytes = (chunks+7)/8 + sizeof(bitmap_super_t); 990 unsigned long num_pages = (bytes + PAGE_SIZE-1) / PAGE_SIZE; 991 while (num_pages--) 992 bitmap->filemap_attr[num_pages] |= BITMAP_PAGE_NEEDWRITE; 993} 994 995 996static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) 997{ 998 sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap); 999 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1000 bitmap->bp[page].count += inc; 1001/* 1002 if (page == 0) printk("count page 0, offset %llu: %d gives %d\n", 1003 (unsigned long long)offset, inc, bitmap->bp[page].count); 1004*/ 1005 bitmap_checkfree(bitmap, page); 1006} 1007static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, 1008 sector_t offset, int *blocks, 1009 int create); 1010 1011/* 1012 * bitmap daemon -- periodically wakes up to clean bits and flush pages 1013 * out to disk 1014 */ 1015 1016int bitmap_daemon_work(struct bitmap *bitmap) 1017{ 1018 unsigned long j; 1019 unsigned long flags; 1020 struct page *page = NULL, *lastpage = NULL; 1021 int err = 0; 1022 int blocks; 1023 int attr; 1024 1025 if (bitmap == NULL) 1026 return 0; 1027 if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) 1028 return 0; 1029 bitmap->daemon_lastrun = jiffies; 1030 1031 for (j = 0; j < bitmap->chunks; j++) { 1032 bitmap_counter_t *bmc; 1033 spin_lock_irqsave(&bitmap->lock, flags); 1034 if (!bitmap->filemap) { 1035 /* error or shutdown */ 1036 spin_unlock_irqrestore(&bitmap->lock, flags); 1037 break; 1038 } 1039 1040 page = filemap_get_page(bitmap, j); 1041 1042 if (page != lastpage) { 1043 /* skip this page unless it's marked as needing cleaning */ 1044 if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) { 1045 if (attr & BITMAP_PAGE_NEEDWRITE) { 1046 page_cache_get(page); 1047 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 1048 } 1049 spin_unlock_irqrestore(&bitmap->lock, flags); 1050 if (attr & BITMAP_PAGE_NEEDWRITE) { 1051 switch (write_page(bitmap, page, 0)) { 1052 case -EAGAIN: 1053 set_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 1054 break; 1055 case 0: 1056 break; 1057 default: 1058 bitmap_file_kick(bitmap); 1059 } 1060 page_cache_release(page); 1061 } 1062 continue; 1063 } 1064 1065 /* grab the new page, sync and release the old */ 1066 page_cache_get(page); 1067 if (lastpage != NULL) { 1068 if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) { 1069 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1070 spin_unlock_irqrestore(&bitmap->lock, flags); 1071 err = write_page(bitmap, lastpage, 0); 1072 if (err == -EAGAIN) { 1073 err = 0; 1074 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1075 } 1076 } else { 1077 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1078 spin_unlock_irqrestore(&bitmap->lock, flags); 1079 } 1080 kunmap(lastpage); 1081 page_cache_release(lastpage); 1082 if (err) 1083 bitmap_file_kick(bitmap); 1084 } else 1085 spin_unlock_irqrestore(&bitmap->lock, flags); 1086 lastpage = page; 1087 kmap(page); 1088/* 1089 printk("bitmap clean at page %lu\n", j); 1090*/ 1091 spin_lock_irqsave(&bitmap->lock, flags); 1092 clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); 1093 } 1094 bmc = bitmap_get_counter(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap), 1095 &blocks, 0); 1096 if (bmc) { 1097/* 1098 if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); 1099*/ 1100 if (*bmc == 2) { 1101 *bmc=1; /* maybe clear the bit next time */ 1102 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); 1103 } else if (*bmc == 1) { 1104 /* we can clear the bit */ 1105 *bmc = 0; 1106 bitmap_count_page(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap), 1107 -1); 1108 1109 /* clear the bit */ 1110 if (bitmap->flags & BITMAP_HOSTENDIAN) 1111 clear_bit(file_page_offset(j), page_address(page)); 1112 else 1113 ext2_clear_bit(file_page_offset(j), page_address(page)); 1114 } 1115 } 1116 spin_unlock_irqrestore(&bitmap->lock, flags); 1117 } 1118 1119 /* now sync the final page */ 1120 if (lastpage != NULL) { 1121 kunmap(lastpage); 1122 spin_lock_irqsave(&bitmap->lock, flags); 1123 if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) { 1124 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1125 spin_unlock_irqrestore(&bitmap->lock, flags); 1126 err = write_page(bitmap, lastpage, 0); 1127 if (err == -EAGAIN) { 1128 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1129 err = 0; 1130 } 1131 } else { 1132 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1133 spin_unlock_irqrestore(&bitmap->lock, flags); 1134 } 1135 1136 page_cache_release(lastpage); 1137 } 1138 1139 return err; 1140} 1141 1142static void daemon_exit(struct bitmap *bitmap, mdk_thread_t **daemon) 1143{ 1144 mdk_thread_t *dmn; 1145 unsigned long flags; 1146 1147 /* if no one is waiting on us, we'll free the md thread struct 1148 * and exit, otherwise we let the waiter clean things up */ 1149 spin_lock_irqsave(&bitmap->lock, flags); 1150 if ((dmn = *daemon)) { /* no one is waiting, cleanup and exit */ 1151 *daemon = NULL; 1152 spin_unlock_irqrestore(&bitmap->lock, flags); 1153 kfree(dmn); 1154 complete_and_exit(NULL, 0); /* do_exit not exported */ 1155 } 1156 spin_unlock_irqrestore(&bitmap->lock, flags); 1157} 1158 1159static void bitmap_writeback_daemon(mddev_t *mddev) 1160{ 1161 struct bitmap *bitmap = mddev->bitmap; 1162 struct page *page; 1163 struct page_list *item; 1164 int err = 0; 1165 1166 if (signal_pending(current)) { 1167 printk(KERN_INFO 1168 "%s: bitmap writeback daemon got signal, exiting...\n", 1169 bmname(bitmap)); 1170 err = -EINTR; 1171 goto out; 1172 } 1173 if (bitmap == NULL) 1174 /* about to be stopped. */ 1175 return; 1176 1177 PRINTK("%s: bitmap writeback daemon woke up...\n", bmname(bitmap)); 1178 /* wait on bitmap page writebacks */ 1179 while ((item = dequeue_page(bitmap))) { 1180 page = item->page; 1181 mempool_free(item, bitmap->write_pool); 1182 PRINTK("wait on page writeback: %p\n", page); 1183 wait_on_page_writeback(page); 1184 PRINTK("finished page writeback: %p\n", page); 1185 1186 err = PageError(page); 1187 page_cache_release(page); 1188 if (err) { 1189 printk(KERN_WARNING "%s: bitmap file writeback " 1190 "failed (page %lu): %d\n", 1191 bmname(bitmap), page->index, err); 1192 bitmap_file_kick(bitmap); 1193 goto out; 1194 } 1195 } 1196 out: 1197 wake_up(&bitmap->write_wait); 1198 if (err) { 1199 printk(KERN_INFO "%s: bitmap writeback daemon exiting (%d)\n", 1200 bmname(bitmap), err); 1201 daemon_exit(bitmap, &bitmap->writeback_daemon); 1202 } 1203} 1204 1205static mdk_thread_t *bitmap_start_daemon(struct bitmap *bitmap, 1206 void (*func)(mddev_t *), char *name) 1207{ 1208 mdk_thread_t *daemon; 1209 char namebuf[32]; 1210 1211#ifdef INJECT_FATAL_FAULT_2 1212 daemon = NULL; 1213#else 1214 sprintf(namebuf, "%%s_%s", name); 1215 daemon = md_register_thread(func, bitmap->mddev, namebuf); 1216#endif 1217 if (!daemon) { 1218 printk(KERN_ERR "%s: failed to start bitmap daemon\n", 1219 bmname(bitmap)); 1220 return ERR_PTR(-ECHILD); 1221 } 1222 1223 md_wakeup_thread(daemon); /* start it running */ 1224 1225 PRINTK("%s: %s daemon (pid %d) started...\n", 1226 bmname(bitmap), name, daemon->tsk->pid); 1227 1228 return daemon; 1229} 1230 1231static void bitmap_stop_daemon(struct bitmap *bitmap) 1232{ 1233 /* the daemon can't stop itself... it'll just exit instead... */ 1234 if (bitmap->writeback_daemon && ! IS_ERR(bitmap->writeback_daemon) && 1235 current->pid != bitmap->writeback_daemon->tsk->pid) { 1236 mdk_thread_t *daemon; 1237 unsigned long flags; 1238 1239 spin_lock_irqsave(&bitmap->lock, flags); 1240 daemon = bitmap->writeback_daemon; 1241 bitmap->writeback_daemon = NULL; 1242 spin_unlock_irqrestore(&bitmap->lock, flags); 1243 if (daemon && ! IS_ERR(daemon)) 1244 md_unregister_thread(daemon); /* destroy the thread */ 1245 } 1246} 1247 1248static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, 1249 sector_t offset, int *blocks, 1250 int create) 1251{ 1252 /* If 'create', we might release the lock and reclaim it. 1253 * The lock must have been taken with interrupts enabled. 1254 * If !create, we don't release the lock. 1255 */ 1256 sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap); 1257 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1258 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; 1259 sector_t csize; 1260 1261 if (bitmap_checkpage(bitmap, page, create) < 0) { 1262 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); 1263 *blocks = csize - (offset & (csize- 1)); 1264 return NULL; 1265 } 1266 /* now locked ... */ 1267 1268 if (bitmap->bp[page].hijacked) { /* hijacked pointer */ 1269 /* should we use the first or second counter field 1270 * of the hijacked pointer? */ 1271 int hi = (pageoff > PAGE_COUNTER_MASK); 1272 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) + 1273 PAGE_COUNTER_SHIFT - 1); 1274 *blocks = csize - (offset & (csize- 1)); 1275 return &((bitmap_counter_t *) 1276 &bitmap->bp[page].map)[hi]; 1277 } else { /* page is allocated */ 1278 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); 1279 *blocks = csize - (offset & (csize- 1)); 1280 return (bitmap_counter_t *) 1281 &(bitmap->bp[page].map[pageoff]); 1282 } 1283} 1284 1285int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) 1286{ 1287 if (!bitmap) return 0; 1288 1289 if (behind) { 1290 atomic_inc(&bitmap->behind_writes); 1291 PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n", 1292 atomic_read(&bitmap->behind_writes), bitmap->max_write_behind); 1293 } 1294 1295 while (sectors) { 1296 int blocks; 1297 bitmap_counter_t *bmc; 1298 1299 spin_lock_irq(&bitmap->lock); 1300 bmc = bitmap_get_counter(bitmap, offset, &blocks, 1); 1301 if (!bmc) { 1302 spin_unlock_irq(&bitmap->lock); 1303 return 0; 1304 } 1305 1306 switch(*bmc) { 1307 case 0: 1308 bitmap_file_set_bit(bitmap, offset); 1309 bitmap_count_page(bitmap,offset, 1); 1310 blk_plug_device(bitmap->mddev->queue); 1311 /* fall through */ 1312 case 1: 1313 *bmc = 2; 1314 } 1315 if ((*bmc & COUNTER_MAX) == COUNTER_MAX) BUG(); 1316 (*bmc)++; 1317 1318 spin_unlock_irq(&bitmap->lock); 1319 1320 offset += blocks; 1321 if (sectors > blocks) 1322 sectors -= blocks; 1323 else sectors = 0; 1324 } 1325 return 0; 1326} 1327 1328void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, 1329 int success, int behind) 1330{ 1331 if (!bitmap) return; 1332 if (behind) { 1333 atomic_dec(&bitmap->behind_writes); 1334 PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n", 1335 atomic_read(&bitmap->behind_writes), bitmap->max_write_behind); 1336 } 1337 1338 while (sectors) { 1339 int blocks; 1340 unsigned long flags; 1341 bitmap_counter_t *bmc; 1342 1343 spin_lock_irqsave(&bitmap->lock, flags); 1344 bmc = bitmap_get_counter(bitmap, offset, &blocks, 0); 1345 if (!bmc) { 1346 spin_unlock_irqrestore(&bitmap->lock, flags); 1347 return; 1348 } 1349 1350 if (!success && ! (*bmc & NEEDED_MASK)) 1351 *bmc |= NEEDED_MASK; 1352 1353 (*bmc)--; 1354 if (*bmc <= 2) { 1355 set_page_attr(bitmap, 1356 filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), 1357 BITMAP_PAGE_CLEAN); 1358 } 1359 spin_unlock_irqrestore(&bitmap->lock, flags); 1360 offset += blocks; 1361 if (sectors > blocks) 1362 sectors -= blocks; 1363 else sectors = 0; 1364 } 1365} 1366 1367int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, 1368 int degraded) 1369{ 1370 bitmap_counter_t *bmc; 1371 int rv; 1372 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ 1373 *blocks = 1024; 1374 return 1; /* always resync if no bitmap */ 1375 } 1376 spin_lock_irq(&bitmap->lock); 1377 bmc = bitmap_get_counter(bitmap, offset, blocks, 0); 1378 rv = 0; 1379 if (bmc) { 1380 /* locked */ 1381 if (RESYNC(*bmc)) 1382 rv = 1; 1383 else if (NEEDED(*bmc)) { 1384 rv = 1; 1385 if (!degraded) { /* don't set/clear bits if degraded */ 1386 *bmc |= RESYNC_MASK; 1387 *bmc &= ~NEEDED_MASK; 1388 } 1389 } 1390 } 1391 spin_unlock_irq(&bitmap->lock); 1392 return rv; 1393} 1394 1395void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted) 1396{ 1397 bitmap_counter_t *bmc; 1398 unsigned long flags; 1399/* 1400 if (offset == 0) printk("bitmap_end_sync 0 (%d)\n", aborted); 1401*/ if (bitmap == NULL) { 1402 *blocks = 1024; 1403 return; 1404 } 1405 spin_lock_irqsave(&bitmap->lock, flags); 1406 bmc = bitmap_get_counter(bitmap, offset, blocks, 0); 1407 if (bmc == NULL) 1408 goto unlock; 1409 /* locked */ 1410/* 1411 if (offset == 0) printk("bitmap_end sync found 0x%x, blocks %d\n", *bmc, *blocks); 1412*/ 1413 if (RESYNC(*bmc)) { 1414 *bmc &= ~RESYNC_MASK; 1415 1416 if (!NEEDED(*bmc) && aborted) 1417 *bmc |= NEEDED_MASK; 1418 else { 1419 if (*bmc <= 2) { 1420 set_page_attr(bitmap, 1421 filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), 1422 BITMAP_PAGE_CLEAN); 1423 } 1424 } 1425 } 1426 unlock: 1427 spin_unlock_irqrestore(&bitmap->lock, flags); 1428} 1429 1430void bitmap_close_sync(struct bitmap *bitmap) 1431{ 1432 /* Sync has finished, and any bitmap chunks that weren't synced 1433 * properly have been aborted. It remains to us to clear the 1434 * RESYNC bit wherever it is still on 1435 */ 1436 sector_t sector = 0; 1437 int blocks; 1438 if (!bitmap) return; 1439 while (sector < bitmap->mddev->resync_max_sectors) { 1440 bitmap_end_sync(bitmap, sector, &blocks, 0); 1441/* 1442 if (sector < 500) printk("bitmap_close_sync: sec %llu blks %d\n", 1443 (unsigned long long)sector, blocks); 1444*/ sector += blocks; 1445 } 1446} 1447 1448static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) 1449{ 1450 /* For each chunk covered by any of these sectors, set the 1451 * counter to 1 and set resync_needed. They should all 1452 * be 0 at this point 1453 */ 1454 1455 int secs; 1456 bitmap_counter_t *bmc; 1457 spin_lock_irq(&bitmap->lock); 1458 bmc = bitmap_get_counter(bitmap, offset, &secs, 1); 1459 if (!bmc) { 1460 spin_unlock_irq(&bitmap->lock); 1461 return; 1462 } 1463 if (! *bmc) { 1464 struct page *page; 1465 *bmc = 1 | (needed?NEEDED_MASK:0); 1466 bitmap_count_page(bitmap, offset, 1); 1467 page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); 1468 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); 1469 } 1470 spin_unlock_irq(&bitmap->lock); 1471 1472} 1473 1474/* 1475 * flush out any pending updates 1476 */ 1477void bitmap_flush(mddev_t *mddev) 1478{ 1479 struct bitmap *bitmap = mddev->bitmap; 1480 int sleep; 1481 1482 if (!bitmap) /* there was no bitmap */ 1483 return; 1484 1485 /* run the daemon_work three time to ensure everything is flushed 1486 * that can be 1487 */ 1488 sleep = bitmap->daemon_sleep; 1489 bitmap->daemon_sleep = 0; 1490 bitmap_daemon_work(bitmap); 1491 bitmap_daemon_work(bitmap); 1492 bitmap_daemon_work(bitmap); 1493 bitmap->daemon_sleep = sleep; 1494 bitmap_update_sb(bitmap); 1495} 1496 1497/* 1498 * free memory that was allocated 1499 */ 1500static void bitmap_free(struct bitmap *bitmap) 1501{ 1502 unsigned long k, pages; 1503 struct bitmap_page *bp; 1504 1505 if (!bitmap) /* there was no bitmap */ 1506 return; 1507 1508 /* release the bitmap file and kill the daemon */ 1509 bitmap_file_put(bitmap); 1510 1511 bp = bitmap->bp; 1512 pages = bitmap->pages; 1513 1514 /* free all allocated memory */ 1515 1516 mempool_destroy(bitmap->write_pool); 1517 1518 if (bp) /* deallocate the page memory */ 1519 for (k = 0; k < pages; k++) 1520 if (bp[k].map && !bp[k].hijacked) 1521 kfree(bp[k].map); 1522 kfree(bp); 1523 kfree(bitmap); 1524} 1525void bitmap_destroy(mddev_t *mddev) 1526{ 1527 struct bitmap *bitmap = mddev->bitmap; 1528 1529 if (!bitmap) /* there was no bitmap */ 1530 return; 1531 1532 mddev->bitmap = NULL; /* disconnect from the md device */ 1533 1534 bitmap_free(bitmap); 1535} 1536 1537/* 1538 * initialize the bitmap structure 1539 * if this returns an error, bitmap_destroy must be called to do clean up 1540 */ 1541int bitmap_create(mddev_t *mddev) 1542{ 1543 struct bitmap *bitmap; 1544 unsigned long blocks = mddev->resync_max_sectors; 1545 unsigned long chunks; 1546 unsigned long pages; 1547 struct file *file = mddev->bitmap_file; 1548 int err; 1549 sector_t start; 1550 1551 BUG_ON(sizeof(bitmap_super_t) != 256); 1552 1553 if (!file && !mddev->bitmap_offset) /* bitmap disabled, nothing to do */ 1554 return 0; 1555 1556 BUG_ON(file && mddev->bitmap_offset); 1557 1558 bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); 1559 if (!bitmap) 1560 return -ENOMEM; 1561 1562 memset(bitmap, 0, sizeof(*bitmap)); 1563 1564 spin_lock_init(&bitmap->lock); 1565 bitmap->mddev = mddev; 1566 1567 spin_lock_init(&bitmap->write_lock); 1568 INIT_LIST_HEAD(&bitmap->complete_pages); 1569 init_waitqueue_head(&bitmap->write_wait); 1570 bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc, 1571 write_pool_free, NULL); 1572 err = -ENOMEM; 1573 if (!bitmap->write_pool) 1574 goto error; 1575 1576 bitmap->file = file; 1577 bitmap->offset = mddev->bitmap_offset; 1578 if (file) get_file(file); 1579 /* read superblock from bitmap file (this sets bitmap->chunksize) */ 1580 err = bitmap_read_sb(bitmap); 1581 if (err) 1582 goto error; 1583 1584 bitmap->chunkshift = find_first_bit(&bitmap->chunksize, 1585 sizeof(bitmap->chunksize)); 1586 1587 /* now that chunksize and chunkshift are set, we can use these macros */ 1588 chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) / 1589 CHUNK_BLOCK_RATIO(bitmap); 1590 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; 1591 1592 BUG_ON(!pages); 1593 1594 bitmap->chunks = chunks; 1595 bitmap->pages = pages; 1596 bitmap->missing_pages = pages; 1597 bitmap->counter_bits = COUNTER_BITS; 1598 1599 bitmap->syncchunk = ~0UL; 1600 1601#ifdef INJECT_FATAL_FAULT_1 1602 bitmap->bp = NULL; 1603#else 1604 bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); 1605#endif 1606 err = -ENOMEM; 1607 if (!bitmap->bp) 1608 goto error; 1609 memset(bitmap->bp, 0, pages * sizeof(*bitmap->bp)); 1610 1611 bitmap->flags |= BITMAP_ACTIVE; 1612 1613 /* now that we have some pages available, initialize the in-memory 1614 * bitmap from the on-disk bitmap */ 1615 start = 0; 1616 if (mddev->degraded == 0 1617 || bitmap->events_cleared == mddev->events) 1618 /* no need to keep dirty bits to optimise a re-add of a missing device */ 1619 start = mddev->recovery_cp; 1620 err = bitmap_init_from_disk(bitmap, start); 1621 1622 if (err) 1623 goto error; 1624 1625 printk(KERN_INFO "created bitmap (%lu pages) for device %s\n", 1626 pages, bmname(bitmap)); 1627 1628 mddev->bitmap = bitmap; 1629 1630 if (file) 1631 /* kick off the bitmap writeback daemon */ 1632 bitmap->writeback_daemon = 1633 bitmap_start_daemon(bitmap, 1634 bitmap_writeback_daemon, 1635 "bitmap_wb"); 1636 1637 if (IS_ERR(bitmap->writeback_daemon)) 1638 return PTR_ERR(bitmap->writeback_daemon); 1639 return bitmap_update_sb(bitmap); 1640 1641 error: 1642 bitmap_free(bitmap); 1643 return err; 1644} 1645 1646/* the bitmap API -- for raid personalities */ 1647EXPORT_SYMBOL(bitmap_startwrite); 1648EXPORT_SYMBOL(bitmap_endwrite); 1649EXPORT_SYMBOL(bitmap_start_sync); 1650EXPORT_SYMBOL(bitmap_end_sync); 1651EXPORT_SYMBOL(bitmap_unplug); 1652EXPORT_SYMBOL(bitmap_close_sync); 1653EXPORT_SYMBOL(bitmap_daemon_work);