at v2.6.30-rc2 1520 lines 34 kB view raw
1/* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9#include <linux/blkdev.h> 10#include <linux/device-mapper.h> 11#include <linux/delay.h> 12#include <linux/fs.h> 13#include <linux/init.h> 14#include <linux/kdev_t.h> 15#include <linux/list.h> 16#include <linux/mempool.h> 17#include <linux/module.h> 18#include <linux/slab.h> 19#include <linux/vmalloc.h> 20#include <linux/log2.h> 21#include <linux/dm-kcopyd.h> 22#include <linux/workqueue.h> 23 24#include "dm-exception-store.h" 25#include "dm-bio-list.h" 26 27#define DM_MSG_PREFIX "snapshots" 28 29/* 30 * The percentage increment we will wake up users at 31 */ 32#define WAKE_UP_PERCENT 5 33 34/* 35 * kcopyd priority of snapshot operations 36 */ 37#define SNAPSHOT_COPY_PRIORITY 2 38 39/* 40 * Reserve 1MB for each snapshot initially (with minimum of 1 page). 41 */ 42#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) 43 44/* 45 * The size of the mempool used to track chunks in use. 46 */ 47#define MIN_IOS 256 48 49#define DM_TRACKED_CHUNK_HASH_SIZE 16 50#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 51 (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 52 53struct exception_table { 54 uint32_t hash_mask; 55 unsigned hash_shift; 56 struct list_head *table; 57}; 58 59struct dm_snapshot { 60 struct rw_semaphore lock; 61 62 struct dm_dev *origin; 63 64 /* List of snapshots per Origin */ 65 struct list_head list; 66 67 /* You can't use a snapshot if this is 0 (e.g. if full) */ 68 int valid; 69 70 /* Origin writes don't trigger exceptions until this is set */ 71 int active; 72 73 mempool_t *pending_pool; 74 75 atomic_t pending_exceptions_count; 76 77 struct exception_table pending; 78 struct exception_table complete; 79 80 /* 81 * pe_lock protects all pending_exception operations and access 82 * as well as the snapshot_bios list. 83 */ 84 spinlock_t pe_lock; 85 86 /* The on disk metadata handler */ 87 struct dm_exception_store *store; 88 89 struct dm_kcopyd_client *kcopyd_client; 90 91 /* Queue of snapshot writes for ksnapd to flush */ 92 struct bio_list queued_bios; 93 struct work_struct queued_bios_work; 94 95 /* Chunks with outstanding reads */ 96 mempool_t *tracked_chunk_pool; 97 spinlock_t tracked_chunk_lock; 98 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 99}; 100 101static struct workqueue_struct *ksnapd; 102static void flush_queued_bios(struct work_struct *work); 103 104static sector_t chunk_to_sector(struct dm_exception_store *store, 105 chunk_t chunk) 106{ 107 return chunk << store->chunk_shift; 108} 109 110static int bdev_equal(struct block_device *lhs, struct block_device *rhs) 111{ 112 /* 113 * There is only ever one instance of a particular block 114 * device so we can compare pointers safely. 115 */ 116 return lhs == rhs; 117} 118 119struct dm_snap_pending_exception { 120 struct dm_snap_exception e; 121 122 /* 123 * Origin buffers waiting for this to complete are held 124 * in a bio list 125 */ 126 struct bio_list origin_bios; 127 struct bio_list snapshot_bios; 128 129 /* 130 * Short-term queue of pending exceptions prior to submission. 131 */ 132 struct list_head list; 133 134 /* 135 * The primary pending_exception is the one that holds 136 * the ref_count and the list of origin_bios for a 137 * group of pending_exceptions. It is always last to get freed. 138 * These fields get set up when writing to the origin. 139 */ 140 struct dm_snap_pending_exception *primary_pe; 141 142 /* 143 * Number of pending_exceptions processing this chunk. 144 * When this drops to zero we must complete the origin bios. 145 * If incrementing or decrementing this, hold pe->snap->lock for 146 * the sibling concerned and not pe->primary_pe->snap->lock unless 147 * they are the same. 148 */ 149 atomic_t ref_count; 150 151 /* Pointer back to snapshot context */ 152 struct dm_snapshot *snap; 153 154 /* 155 * 1 indicates the exception has already been sent to 156 * kcopyd. 157 */ 158 int started; 159}; 160 161/* 162 * Hash table mapping origin volumes to lists of snapshots and 163 * a lock to protect it 164 */ 165static struct kmem_cache *exception_cache; 166static struct kmem_cache *pending_cache; 167 168struct dm_snap_tracked_chunk { 169 struct hlist_node node; 170 chunk_t chunk; 171}; 172 173static struct kmem_cache *tracked_chunk_cache; 174 175static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 176 chunk_t chunk) 177{ 178 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 179 GFP_NOIO); 180 unsigned long flags; 181 182 c->chunk = chunk; 183 184 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 185 hlist_add_head(&c->node, 186 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 187 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 188 189 return c; 190} 191 192static void stop_tracking_chunk(struct dm_snapshot *s, 193 struct dm_snap_tracked_chunk *c) 194{ 195 unsigned long flags; 196 197 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 198 hlist_del(&c->node); 199 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 200 201 mempool_free(c, s->tracked_chunk_pool); 202} 203 204static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 205{ 206 struct dm_snap_tracked_chunk *c; 207 struct hlist_node *hn; 208 int found = 0; 209 210 spin_lock_irq(&s->tracked_chunk_lock); 211 212 hlist_for_each_entry(c, hn, 213 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 214 if (c->chunk == chunk) { 215 found = 1; 216 break; 217 } 218 } 219 220 spin_unlock_irq(&s->tracked_chunk_lock); 221 222 return found; 223} 224 225/* 226 * One of these per registered origin, held in the snapshot_origins hash 227 */ 228struct origin { 229 /* The origin device */ 230 struct block_device *bdev; 231 232 struct list_head hash_list; 233 234 /* List of snapshots for this origin */ 235 struct list_head snapshots; 236}; 237 238/* 239 * Size of the hash table for origin volumes. If we make this 240 * the size of the minors list then it should be nearly perfect 241 */ 242#define ORIGIN_HASH_SIZE 256 243#define ORIGIN_MASK 0xFF 244static struct list_head *_origins; 245static struct rw_semaphore _origins_lock; 246 247static int init_origin_hash(void) 248{ 249 int i; 250 251 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 252 GFP_KERNEL); 253 if (!_origins) { 254 DMERR("unable to allocate memory"); 255 return -ENOMEM; 256 } 257 258 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 259 INIT_LIST_HEAD(_origins + i); 260 init_rwsem(&_origins_lock); 261 262 return 0; 263} 264 265static void exit_origin_hash(void) 266{ 267 kfree(_origins); 268} 269 270static unsigned origin_hash(struct block_device *bdev) 271{ 272 return bdev->bd_dev & ORIGIN_MASK; 273} 274 275static struct origin *__lookup_origin(struct block_device *origin) 276{ 277 struct list_head *ol; 278 struct origin *o; 279 280 ol = &_origins[origin_hash(origin)]; 281 list_for_each_entry (o, ol, hash_list) 282 if (bdev_equal(o->bdev, origin)) 283 return o; 284 285 return NULL; 286} 287 288static void __insert_origin(struct origin *o) 289{ 290 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 291 list_add_tail(&o->hash_list, sl); 292} 293 294/* 295 * Make a note of the snapshot and its origin so we can look it 296 * up when the origin has a write on it. 297 */ 298static int register_snapshot(struct dm_snapshot *snap) 299{ 300 struct origin *o, *new_o; 301 struct block_device *bdev = snap->origin->bdev; 302 303 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 304 if (!new_o) 305 return -ENOMEM; 306 307 down_write(&_origins_lock); 308 o = __lookup_origin(bdev); 309 310 if (o) 311 kfree(new_o); 312 else { 313 /* New origin */ 314 o = new_o; 315 316 /* Initialise the struct */ 317 INIT_LIST_HEAD(&o->snapshots); 318 o->bdev = bdev; 319 320 __insert_origin(o); 321 } 322 323 list_add_tail(&snap->list, &o->snapshots); 324 325 up_write(&_origins_lock); 326 return 0; 327} 328 329static void unregister_snapshot(struct dm_snapshot *s) 330{ 331 struct origin *o; 332 333 down_write(&_origins_lock); 334 o = __lookup_origin(s->origin->bdev); 335 336 list_del(&s->list); 337 if (list_empty(&o->snapshots)) { 338 list_del(&o->hash_list); 339 kfree(o); 340 } 341 342 up_write(&_origins_lock); 343} 344 345/* 346 * Implementation of the exception hash tables. 347 * The lowest hash_shift bits of the chunk number are ignored, allowing 348 * some consecutive chunks to be grouped together. 349 */ 350static int init_exception_table(struct exception_table *et, uint32_t size, 351 unsigned hash_shift) 352{ 353 unsigned int i; 354 355 et->hash_shift = hash_shift; 356 et->hash_mask = size - 1; 357 et->table = dm_vcalloc(size, sizeof(struct list_head)); 358 if (!et->table) 359 return -ENOMEM; 360 361 for (i = 0; i < size; i++) 362 INIT_LIST_HEAD(et->table + i); 363 364 return 0; 365} 366 367static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) 368{ 369 struct list_head *slot; 370 struct dm_snap_exception *ex, *next; 371 int i, size; 372 373 size = et->hash_mask + 1; 374 for (i = 0; i < size; i++) { 375 slot = et->table + i; 376 377 list_for_each_entry_safe (ex, next, slot, hash_list) 378 kmem_cache_free(mem, ex); 379 } 380 381 vfree(et->table); 382} 383 384static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 385{ 386 return (chunk >> et->hash_shift) & et->hash_mask; 387} 388 389static void insert_exception(struct exception_table *eh, 390 struct dm_snap_exception *e) 391{ 392 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; 393 list_add(&e->hash_list, l); 394} 395 396static void remove_exception(struct dm_snap_exception *e) 397{ 398 list_del(&e->hash_list); 399} 400 401/* 402 * Return the exception data for a sector, or NULL if not 403 * remapped. 404 */ 405static struct dm_snap_exception *lookup_exception(struct exception_table *et, 406 chunk_t chunk) 407{ 408 struct list_head *slot; 409 struct dm_snap_exception *e; 410 411 slot = &et->table[exception_hash(et, chunk)]; 412 list_for_each_entry (e, slot, hash_list) 413 if (chunk >= e->old_chunk && 414 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 415 return e; 416 417 return NULL; 418} 419 420static struct dm_snap_exception *alloc_exception(void) 421{ 422 struct dm_snap_exception *e; 423 424 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 425 if (!e) 426 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 427 428 return e; 429} 430 431static void free_exception(struct dm_snap_exception *e) 432{ 433 kmem_cache_free(exception_cache, e); 434} 435 436static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 437{ 438 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 439 GFP_NOIO); 440 441 atomic_inc(&s->pending_exceptions_count); 442 pe->snap = s; 443 444 return pe; 445} 446 447static void free_pending_exception(struct dm_snap_pending_exception *pe) 448{ 449 struct dm_snapshot *s = pe->snap; 450 451 mempool_free(pe, s->pending_pool); 452 smp_mb__before_atomic_dec(); 453 atomic_dec(&s->pending_exceptions_count); 454} 455 456static void insert_completed_exception(struct dm_snapshot *s, 457 struct dm_snap_exception *new_e) 458{ 459 struct exception_table *eh = &s->complete; 460 struct list_head *l; 461 struct dm_snap_exception *e = NULL; 462 463 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 464 465 /* Add immediately if this table doesn't support consecutive chunks */ 466 if (!eh->hash_shift) 467 goto out; 468 469 /* List is ordered by old_chunk */ 470 list_for_each_entry_reverse(e, l, hash_list) { 471 /* Insert after an existing chunk? */ 472 if (new_e->old_chunk == (e->old_chunk + 473 dm_consecutive_chunk_count(e) + 1) && 474 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 475 dm_consecutive_chunk_count(e) + 1)) { 476 dm_consecutive_chunk_count_inc(e); 477 free_exception(new_e); 478 return; 479 } 480 481 /* Insert before an existing chunk? */ 482 if (new_e->old_chunk == (e->old_chunk - 1) && 483 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 484 dm_consecutive_chunk_count_inc(e); 485 e->old_chunk--; 486 e->new_chunk--; 487 free_exception(new_e); 488 return; 489 } 490 491 if (new_e->old_chunk > e->old_chunk) 492 break; 493 } 494 495out: 496 list_add(&new_e->hash_list, e ? &e->hash_list : l); 497} 498 499/* 500 * Callback used by the exception stores to load exceptions when 501 * initialising. 502 */ 503static int dm_add_exception(void *context, chunk_t old, chunk_t new) 504{ 505 struct dm_snapshot *s = context; 506 struct dm_snap_exception *e; 507 508 e = alloc_exception(); 509 if (!e) 510 return -ENOMEM; 511 512 e->old_chunk = old; 513 514 /* Consecutive_count is implicitly initialised to zero */ 515 e->new_chunk = new; 516 517 insert_completed_exception(s, e); 518 519 return 0; 520} 521 522/* 523 * Hard coded magic. 524 */ 525static int calc_max_buckets(void) 526{ 527 /* use a fixed size of 2MB */ 528 unsigned long mem = 2 * 1024 * 1024; 529 mem /= sizeof(struct list_head); 530 531 return mem; 532} 533 534/* 535 * Allocate room for a suitable hash table. 536 */ 537static int init_hash_tables(struct dm_snapshot *s) 538{ 539 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 540 541 /* 542 * Calculate based on the size of the original volume or 543 * the COW volume... 544 */ 545 cow_dev_size = get_dev_size(s->store->cow->bdev); 546 origin_dev_size = get_dev_size(s->origin->bdev); 547 max_buckets = calc_max_buckets(); 548 549 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 550 hash_size = min(hash_size, max_buckets); 551 552 hash_size = rounddown_pow_of_two(hash_size); 553 if (init_exception_table(&s->complete, hash_size, 554 DM_CHUNK_CONSECUTIVE_BITS)) 555 return -ENOMEM; 556 557 /* 558 * Allocate hash table for in-flight exceptions 559 * Make this smaller than the real hash table 560 */ 561 hash_size >>= 3; 562 if (hash_size < 64) 563 hash_size = 64; 564 565 if (init_exception_table(&s->pending, hash_size, 0)) { 566 exit_exception_table(&s->complete, exception_cache); 567 return -ENOMEM; 568 } 569 570 return 0; 571} 572 573/* 574 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 575 */ 576static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 577{ 578 struct dm_snapshot *s; 579 int i; 580 int r = -EINVAL; 581 char *origin_path; 582 struct dm_exception_store *store; 583 unsigned args_used; 584 585 if (argc != 4) { 586 ti->error = "requires exactly 4 arguments"; 587 r = -EINVAL; 588 goto bad_args; 589 } 590 591 origin_path = argv[0]; 592 argv++; 593 argc--; 594 595 r = dm_exception_store_create(ti, argc, argv, &args_used, &store); 596 if (r) { 597 ti->error = "Couldn't create exception store"; 598 r = -EINVAL; 599 goto bad_args; 600 } 601 602 argv += args_used; 603 argc -= args_used; 604 605 s = kmalloc(sizeof(*s), GFP_KERNEL); 606 if (!s) { 607 ti->error = "Cannot allocate snapshot context private " 608 "structure"; 609 r = -ENOMEM; 610 goto bad_snap; 611 } 612 613 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); 614 if (r) { 615 ti->error = "Cannot get origin device"; 616 goto bad_origin; 617 } 618 619 s->store = store; 620 s->valid = 1; 621 s->active = 0; 622 atomic_set(&s->pending_exceptions_count, 0); 623 init_rwsem(&s->lock); 624 spin_lock_init(&s->pe_lock); 625 626 /* Allocate hash table for COW data */ 627 if (init_hash_tables(s)) { 628 ti->error = "Unable to allocate hash table space"; 629 r = -ENOMEM; 630 goto bad_hash_tables; 631 } 632 633 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 634 if (r) { 635 ti->error = "Could not create kcopyd client"; 636 goto bad_kcopyd; 637 } 638 639 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); 640 if (!s->pending_pool) { 641 ti->error = "Could not allocate mempool for pending exceptions"; 642 goto bad_pending_pool; 643 } 644 645 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 646 tracked_chunk_cache); 647 if (!s->tracked_chunk_pool) { 648 ti->error = "Could not allocate tracked_chunk mempool for " 649 "tracking reads"; 650 goto bad_tracked_chunk_pool; 651 } 652 653 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 654 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 655 656 spin_lock_init(&s->tracked_chunk_lock); 657 658 /* Metadata must only be loaded into one table at once */ 659 r = s->store->type->read_metadata(s->store, dm_add_exception, 660 (void *)s); 661 if (r < 0) { 662 ti->error = "Failed to read snapshot metadata"; 663 goto bad_load_and_register; 664 } else if (r > 0) { 665 s->valid = 0; 666 DMWARN("Snapshot is marked invalid."); 667 } 668 669 bio_list_init(&s->queued_bios); 670 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 671 672 /* Add snapshot to the list of snapshots for this origin */ 673 /* Exceptions aren't triggered till snapshot_resume() is called */ 674 if (register_snapshot(s)) { 675 r = -EINVAL; 676 ti->error = "Cannot register snapshot origin"; 677 goto bad_load_and_register; 678 } 679 680 ti->private = s; 681 ti->split_io = s->store->chunk_size; 682 683 return 0; 684 685bad_load_and_register: 686 mempool_destroy(s->tracked_chunk_pool); 687 688bad_tracked_chunk_pool: 689 mempool_destroy(s->pending_pool); 690 691bad_pending_pool: 692 dm_kcopyd_client_destroy(s->kcopyd_client); 693 694bad_kcopyd: 695 exit_exception_table(&s->pending, pending_cache); 696 exit_exception_table(&s->complete, exception_cache); 697 698bad_hash_tables: 699 dm_put_device(ti, s->origin); 700 701bad_origin: 702 kfree(s); 703 704bad_snap: 705 dm_exception_store_destroy(store); 706 707bad_args: 708 return r; 709} 710 711static void __free_exceptions(struct dm_snapshot *s) 712{ 713 dm_kcopyd_client_destroy(s->kcopyd_client); 714 s->kcopyd_client = NULL; 715 716 exit_exception_table(&s->pending, pending_cache); 717 exit_exception_table(&s->complete, exception_cache); 718} 719 720static void snapshot_dtr(struct dm_target *ti) 721{ 722#ifdef CONFIG_DM_DEBUG 723 int i; 724#endif 725 struct dm_snapshot *s = ti->private; 726 727 flush_workqueue(ksnapd); 728 729 /* Prevent further origin writes from using this snapshot. */ 730 /* After this returns there can be no new kcopyd jobs. */ 731 unregister_snapshot(s); 732 733 while (atomic_read(&s->pending_exceptions_count)) 734 msleep(1); 735 /* 736 * Ensure instructions in mempool_destroy aren't reordered 737 * before atomic_read. 738 */ 739 smp_mb(); 740 741#ifdef CONFIG_DM_DEBUG 742 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 743 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 744#endif 745 746 mempool_destroy(s->tracked_chunk_pool); 747 748 __free_exceptions(s); 749 750 mempool_destroy(s->pending_pool); 751 752 dm_put_device(ti, s->origin); 753 754 dm_exception_store_destroy(s->store); 755 756 kfree(s); 757} 758 759/* 760 * Flush a list of buffers. 761 */ 762static void flush_bios(struct bio *bio) 763{ 764 struct bio *n; 765 766 while (bio) { 767 n = bio->bi_next; 768 bio->bi_next = NULL; 769 generic_make_request(bio); 770 bio = n; 771 } 772} 773 774static void flush_queued_bios(struct work_struct *work) 775{ 776 struct dm_snapshot *s = 777 container_of(work, struct dm_snapshot, queued_bios_work); 778 struct bio *queued_bios; 779 unsigned long flags; 780 781 spin_lock_irqsave(&s->pe_lock, flags); 782 queued_bios = bio_list_get(&s->queued_bios); 783 spin_unlock_irqrestore(&s->pe_lock, flags); 784 785 flush_bios(queued_bios); 786} 787 788/* 789 * Error a list of buffers. 790 */ 791static void error_bios(struct bio *bio) 792{ 793 struct bio *n; 794 795 while (bio) { 796 n = bio->bi_next; 797 bio->bi_next = NULL; 798 bio_io_error(bio); 799 bio = n; 800 } 801} 802 803static void __invalidate_snapshot(struct dm_snapshot *s, int err) 804{ 805 if (!s->valid) 806 return; 807 808 if (err == -EIO) 809 DMERR("Invalidating snapshot: Error reading/writing."); 810 else if (err == -ENOMEM) 811 DMERR("Invalidating snapshot: Unable to allocate exception."); 812 813 if (s->store->type->drop_snapshot) 814 s->store->type->drop_snapshot(s->store); 815 816 s->valid = 0; 817 818 dm_table_event(s->store->ti->table); 819} 820 821static void get_pending_exception(struct dm_snap_pending_exception *pe) 822{ 823 atomic_inc(&pe->ref_count); 824} 825 826static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) 827{ 828 struct dm_snap_pending_exception *primary_pe; 829 struct bio *origin_bios = NULL; 830 831 primary_pe = pe->primary_pe; 832 833 /* 834 * If this pe is involved in a write to the origin and 835 * it is the last sibling to complete then release 836 * the bios for the original write to the origin. 837 */ 838 if (primary_pe && 839 atomic_dec_and_test(&primary_pe->ref_count)) { 840 origin_bios = bio_list_get(&primary_pe->origin_bios); 841 free_pending_exception(primary_pe); 842 } 843 844 /* 845 * Free the pe if it's not linked to an origin write or if 846 * it's not itself a primary pe. 847 */ 848 if (!primary_pe || primary_pe != pe) 849 free_pending_exception(pe); 850 851 return origin_bios; 852} 853 854static void pending_complete(struct dm_snap_pending_exception *pe, int success) 855{ 856 struct dm_snap_exception *e; 857 struct dm_snapshot *s = pe->snap; 858 struct bio *origin_bios = NULL; 859 struct bio *snapshot_bios = NULL; 860 int error = 0; 861 862 if (!success) { 863 /* Read/write error - snapshot is unusable */ 864 down_write(&s->lock); 865 __invalidate_snapshot(s, -EIO); 866 error = 1; 867 goto out; 868 } 869 870 e = alloc_exception(); 871 if (!e) { 872 down_write(&s->lock); 873 __invalidate_snapshot(s, -ENOMEM); 874 error = 1; 875 goto out; 876 } 877 *e = pe->e; 878 879 down_write(&s->lock); 880 if (!s->valid) { 881 free_exception(e); 882 error = 1; 883 goto out; 884 } 885 886 /* 887 * Check for conflicting reads. This is extremely improbable, 888 * so msleep(1) is sufficient and there is no need for a wait queue. 889 */ 890 while (__chunk_is_tracked(s, pe->e.old_chunk)) 891 msleep(1); 892 893 /* 894 * Add a proper exception, and remove the 895 * in-flight exception from the list. 896 */ 897 insert_completed_exception(s, e); 898 899 out: 900 remove_exception(&pe->e); 901 snapshot_bios = bio_list_get(&pe->snapshot_bios); 902 origin_bios = put_pending_exception(pe); 903 904 up_write(&s->lock); 905 906 /* Submit any pending write bios */ 907 if (error) 908 error_bios(snapshot_bios); 909 else 910 flush_bios(snapshot_bios); 911 912 flush_bios(origin_bios); 913} 914 915static void commit_callback(void *context, int success) 916{ 917 struct dm_snap_pending_exception *pe = context; 918 919 pending_complete(pe, success); 920} 921 922/* 923 * Called when the copy I/O has finished. kcopyd actually runs 924 * this code so don't block. 925 */ 926static void copy_callback(int read_err, unsigned long write_err, void *context) 927{ 928 struct dm_snap_pending_exception *pe = context; 929 struct dm_snapshot *s = pe->snap; 930 931 if (read_err || write_err) 932 pending_complete(pe, 0); 933 934 else 935 /* Update the metadata if we are persistent */ 936 s->store->type->commit_exception(s->store, &pe->e, 937 commit_callback, pe); 938} 939 940/* 941 * Dispatches the copy operation to kcopyd. 942 */ 943static void start_copy(struct dm_snap_pending_exception *pe) 944{ 945 struct dm_snapshot *s = pe->snap; 946 struct dm_io_region src, dest; 947 struct block_device *bdev = s->origin->bdev; 948 sector_t dev_size; 949 950 dev_size = get_dev_size(bdev); 951 952 src.bdev = bdev; 953 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 954 src.count = min(s->store->chunk_size, dev_size - src.sector); 955 956 dest.bdev = s->store->cow->bdev; 957 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 958 dest.count = src.count; 959 960 /* Hand over to kcopyd */ 961 dm_kcopyd_copy(s->kcopyd_client, 962 &src, 1, &dest, 0, copy_callback, pe); 963} 964 965static struct dm_snap_pending_exception * 966__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 967{ 968 struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); 969 970 if (!e) 971 return NULL; 972 973 return container_of(e, struct dm_snap_pending_exception, e); 974} 975 976/* 977 * Looks to see if this snapshot already has a pending exception 978 * for this chunk, otherwise it allocates a new one and inserts 979 * it into the pending table. 980 * 981 * NOTE: a write lock must be held on snap->lock before calling 982 * this. 983 */ 984static struct dm_snap_pending_exception * 985__find_pending_exception(struct dm_snapshot *s, 986 struct dm_snap_pending_exception *pe, chunk_t chunk) 987{ 988 struct dm_snap_pending_exception *pe2; 989 990 pe2 = __lookup_pending_exception(s, chunk); 991 if (pe2) { 992 free_pending_exception(pe); 993 return pe2; 994 } 995 996 pe->e.old_chunk = chunk; 997 bio_list_init(&pe->origin_bios); 998 bio_list_init(&pe->snapshot_bios); 999 pe->primary_pe = NULL; 1000 atomic_set(&pe->ref_count, 0); 1001 pe->started = 0; 1002 1003 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1004 free_pending_exception(pe); 1005 return NULL; 1006 } 1007 1008 get_pending_exception(pe); 1009 insert_exception(&s->pending, &pe->e); 1010 1011 return pe; 1012} 1013 1014static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, 1015 struct bio *bio, chunk_t chunk) 1016{ 1017 bio->bi_bdev = s->store->cow->bdev; 1018 bio->bi_sector = chunk_to_sector(s->store, 1019 dm_chunk_number(e->new_chunk) + 1020 (chunk - e->old_chunk)) + 1021 (bio->bi_sector & 1022 s->store->chunk_mask); 1023} 1024 1025static int snapshot_map(struct dm_target *ti, struct bio *bio, 1026 union map_info *map_context) 1027{ 1028 struct dm_snap_exception *e; 1029 struct dm_snapshot *s = ti->private; 1030 int r = DM_MAPIO_REMAPPED; 1031 chunk_t chunk; 1032 struct dm_snap_pending_exception *pe = NULL; 1033 1034 chunk = sector_to_chunk(s->store, bio->bi_sector); 1035 1036 /* Full snapshots are not usable */ 1037 /* To get here the table must be live so s->active is always set. */ 1038 if (!s->valid) 1039 return -EIO; 1040 1041 /* FIXME: should only take write lock if we need 1042 * to copy an exception */ 1043 down_write(&s->lock); 1044 1045 if (!s->valid) { 1046 r = -EIO; 1047 goto out_unlock; 1048 } 1049 1050 /* If the block is already remapped - use that, else remap it */ 1051 e = lookup_exception(&s->complete, chunk); 1052 if (e) { 1053 remap_exception(s, e, bio, chunk); 1054 goto out_unlock; 1055 } 1056 1057 /* 1058 * Write to snapshot - higher level takes care of RW/RO 1059 * flags so we should only get this if we are 1060 * writeable. 1061 */ 1062 if (bio_rw(bio) == WRITE) { 1063 pe = __lookup_pending_exception(s, chunk); 1064 if (!pe) { 1065 up_write(&s->lock); 1066 pe = alloc_pending_exception(s); 1067 down_write(&s->lock); 1068 1069 if (!s->valid) { 1070 free_pending_exception(pe); 1071 r = -EIO; 1072 goto out_unlock; 1073 } 1074 1075 e = lookup_exception(&s->complete, chunk); 1076 if (e) { 1077 free_pending_exception(pe); 1078 remap_exception(s, e, bio, chunk); 1079 goto out_unlock; 1080 } 1081 1082 pe = __find_pending_exception(s, pe, chunk); 1083 if (!pe) { 1084 __invalidate_snapshot(s, -ENOMEM); 1085 r = -EIO; 1086 goto out_unlock; 1087 } 1088 } 1089 1090 remap_exception(s, &pe->e, bio, chunk); 1091 bio_list_add(&pe->snapshot_bios, bio); 1092 1093 r = DM_MAPIO_SUBMITTED; 1094 1095 if (!pe->started) { 1096 /* this is protected by snap->lock */ 1097 pe->started = 1; 1098 up_write(&s->lock); 1099 start_copy(pe); 1100 goto out; 1101 } 1102 } else { 1103 bio->bi_bdev = s->origin->bdev; 1104 map_context->ptr = track_chunk(s, chunk); 1105 } 1106 1107 out_unlock: 1108 up_write(&s->lock); 1109 out: 1110 return r; 1111} 1112 1113static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1114 int error, union map_info *map_context) 1115{ 1116 struct dm_snapshot *s = ti->private; 1117 struct dm_snap_tracked_chunk *c = map_context->ptr; 1118 1119 if (c) 1120 stop_tracking_chunk(s, c); 1121 1122 return 0; 1123} 1124 1125static void snapshot_resume(struct dm_target *ti) 1126{ 1127 struct dm_snapshot *s = ti->private; 1128 1129 down_write(&s->lock); 1130 s->active = 1; 1131 up_write(&s->lock); 1132} 1133 1134static int snapshot_status(struct dm_target *ti, status_type_t type, 1135 char *result, unsigned int maxlen) 1136{ 1137 unsigned sz = 0; 1138 struct dm_snapshot *snap = ti->private; 1139 1140 switch (type) { 1141 case STATUSTYPE_INFO: 1142 if (!snap->valid) 1143 DMEMIT("Invalid"); 1144 else { 1145 if (snap->store->type->fraction_full) { 1146 sector_t numerator, denominator; 1147 snap->store->type->fraction_full(snap->store, 1148 &numerator, 1149 &denominator); 1150 DMEMIT("%llu/%llu", 1151 (unsigned long long)numerator, 1152 (unsigned long long)denominator); 1153 } 1154 else 1155 DMEMIT("Unknown"); 1156 } 1157 break; 1158 1159 case STATUSTYPE_TABLE: 1160 /* 1161 * kdevname returns a static pointer so we need 1162 * to make private copies if the output is to 1163 * make sense. 1164 */ 1165 DMEMIT("%s", snap->origin->name); 1166 snap->store->type->status(snap->store, type, result + sz, 1167 maxlen - sz); 1168 break; 1169 } 1170 1171 return 0; 1172} 1173 1174/*----------------------------------------------------------------- 1175 * Origin methods 1176 *---------------------------------------------------------------*/ 1177static int __origin_write(struct list_head *snapshots, struct bio *bio) 1178{ 1179 int r = DM_MAPIO_REMAPPED, first = 0; 1180 struct dm_snapshot *snap; 1181 struct dm_snap_exception *e; 1182 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; 1183 chunk_t chunk; 1184 LIST_HEAD(pe_queue); 1185 1186 /* Do all the snapshots on this origin */ 1187 list_for_each_entry (snap, snapshots, list) { 1188 1189 down_write(&snap->lock); 1190 1191 /* Only deal with valid and active snapshots */ 1192 if (!snap->valid || !snap->active) 1193 goto next_snapshot; 1194 1195 /* Nothing to do if writing beyond end of snapshot */ 1196 if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table)) 1197 goto next_snapshot; 1198 1199 /* 1200 * Remember, different snapshots can have 1201 * different chunk sizes. 1202 */ 1203 chunk = sector_to_chunk(snap->store, bio->bi_sector); 1204 1205 /* 1206 * Check exception table to see if block 1207 * is already remapped in this snapshot 1208 * and trigger an exception if not. 1209 * 1210 * ref_count is initialised to 1 so pending_complete() 1211 * won't destroy the primary_pe while we're inside this loop. 1212 */ 1213 e = lookup_exception(&snap->complete, chunk); 1214 if (e) 1215 goto next_snapshot; 1216 1217 pe = __lookup_pending_exception(snap, chunk); 1218 if (!pe) { 1219 up_write(&snap->lock); 1220 pe = alloc_pending_exception(snap); 1221 down_write(&snap->lock); 1222 1223 if (!snap->valid) { 1224 free_pending_exception(pe); 1225 goto next_snapshot; 1226 } 1227 1228 e = lookup_exception(&snap->complete, chunk); 1229 if (e) { 1230 free_pending_exception(pe); 1231 goto next_snapshot; 1232 } 1233 1234 pe = __find_pending_exception(snap, pe, chunk); 1235 if (!pe) { 1236 __invalidate_snapshot(snap, -ENOMEM); 1237 goto next_snapshot; 1238 } 1239 } 1240 1241 if (!primary_pe) { 1242 /* 1243 * Either every pe here has same 1244 * primary_pe or none has one yet. 1245 */ 1246 if (pe->primary_pe) 1247 primary_pe = pe->primary_pe; 1248 else { 1249 primary_pe = pe; 1250 first = 1; 1251 } 1252 1253 bio_list_add(&primary_pe->origin_bios, bio); 1254 1255 r = DM_MAPIO_SUBMITTED; 1256 } 1257 1258 if (!pe->primary_pe) { 1259 pe->primary_pe = primary_pe; 1260 get_pending_exception(primary_pe); 1261 } 1262 1263 if (!pe->started) { 1264 pe->started = 1; 1265 list_add_tail(&pe->list, &pe_queue); 1266 } 1267 1268 next_snapshot: 1269 up_write(&snap->lock); 1270 } 1271 1272 if (!primary_pe) 1273 return r; 1274 1275 /* 1276 * If this is the first time we're processing this chunk and 1277 * ref_count is now 1 it means all the pending exceptions 1278 * got completed while we were in the loop above, so it falls to 1279 * us here to remove the primary_pe and submit any origin_bios. 1280 */ 1281 1282 if (first && atomic_dec_and_test(&primary_pe->ref_count)) { 1283 flush_bios(bio_list_get(&primary_pe->origin_bios)); 1284 free_pending_exception(primary_pe); 1285 /* If we got here, pe_queue is necessarily empty. */ 1286 return r; 1287 } 1288 1289 /* 1290 * Now that we have a complete pe list we can start the copying. 1291 */ 1292 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 1293 start_copy(pe); 1294 1295 return r; 1296} 1297 1298/* 1299 * Called on a write from the origin driver. 1300 */ 1301static int do_origin(struct dm_dev *origin, struct bio *bio) 1302{ 1303 struct origin *o; 1304 int r = DM_MAPIO_REMAPPED; 1305 1306 down_read(&_origins_lock); 1307 o = __lookup_origin(origin->bdev); 1308 if (o) 1309 r = __origin_write(&o->snapshots, bio); 1310 up_read(&_origins_lock); 1311 1312 return r; 1313} 1314 1315/* 1316 * Origin: maps a linear range of a device, with hooks for snapshotting. 1317 */ 1318 1319/* 1320 * Construct an origin mapping: <dev_path> 1321 * The context for an origin is merely a 'struct dm_dev *' 1322 * pointing to the real device. 1323 */ 1324static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1325{ 1326 int r; 1327 struct dm_dev *dev; 1328 1329 if (argc != 1) { 1330 ti->error = "origin: incorrect number of arguments"; 1331 return -EINVAL; 1332 } 1333 1334 r = dm_get_device(ti, argv[0], 0, ti->len, 1335 dm_table_get_mode(ti->table), &dev); 1336 if (r) { 1337 ti->error = "Cannot get target device"; 1338 return r; 1339 } 1340 1341 ti->private = dev; 1342 return 0; 1343} 1344 1345static void origin_dtr(struct dm_target *ti) 1346{ 1347 struct dm_dev *dev = ti->private; 1348 dm_put_device(ti, dev); 1349} 1350 1351static int origin_map(struct dm_target *ti, struct bio *bio, 1352 union map_info *map_context) 1353{ 1354 struct dm_dev *dev = ti->private; 1355 bio->bi_bdev = dev->bdev; 1356 1357 /* Only tell snapshots if this is a write */ 1358 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 1359} 1360 1361#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 1362 1363/* 1364 * Set the target "split_io" field to the minimum of all the snapshots' 1365 * chunk sizes. 1366 */ 1367static void origin_resume(struct dm_target *ti) 1368{ 1369 struct dm_dev *dev = ti->private; 1370 struct dm_snapshot *snap; 1371 struct origin *o; 1372 chunk_t chunk_size = 0; 1373 1374 down_read(&_origins_lock); 1375 o = __lookup_origin(dev->bdev); 1376 if (o) 1377 list_for_each_entry (snap, &o->snapshots, list) 1378 chunk_size = min_not_zero(chunk_size, 1379 snap->store->chunk_size); 1380 up_read(&_origins_lock); 1381 1382 ti->split_io = chunk_size; 1383} 1384 1385static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1386 unsigned int maxlen) 1387{ 1388 struct dm_dev *dev = ti->private; 1389 1390 switch (type) { 1391 case STATUSTYPE_INFO: 1392 result[0] = '\0'; 1393 break; 1394 1395 case STATUSTYPE_TABLE: 1396 snprintf(result, maxlen, "%s", dev->name); 1397 break; 1398 } 1399 1400 return 0; 1401} 1402 1403static struct target_type origin_target = { 1404 .name = "snapshot-origin", 1405 .version = {1, 6, 0}, 1406 .module = THIS_MODULE, 1407 .ctr = origin_ctr, 1408 .dtr = origin_dtr, 1409 .map = origin_map, 1410 .resume = origin_resume, 1411 .status = origin_status, 1412}; 1413 1414static struct target_type snapshot_target = { 1415 .name = "snapshot", 1416 .version = {1, 6, 0}, 1417 .module = THIS_MODULE, 1418 .ctr = snapshot_ctr, 1419 .dtr = snapshot_dtr, 1420 .map = snapshot_map, 1421 .end_io = snapshot_end_io, 1422 .resume = snapshot_resume, 1423 .status = snapshot_status, 1424}; 1425 1426static int __init dm_snapshot_init(void) 1427{ 1428 int r; 1429 1430 r = dm_exception_store_init(); 1431 if (r) { 1432 DMERR("Failed to initialize exception stores"); 1433 return r; 1434 } 1435 1436 r = dm_register_target(&snapshot_target); 1437 if (r) { 1438 DMERR("snapshot target register failed %d", r); 1439 return r; 1440 } 1441 1442 r = dm_register_target(&origin_target); 1443 if (r < 0) { 1444 DMERR("Origin target register failed %d", r); 1445 goto bad1; 1446 } 1447 1448 r = init_origin_hash(); 1449 if (r) { 1450 DMERR("init_origin_hash failed."); 1451 goto bad2; 1452 } 1453 1454 exception_cache = KMEM_CACHE(dm_snap_exception, 0); 1455 if (!exception_cache) { 1456 DMERR("Couldn't create exception cache."); 1457 r = -ENOMEM; 1458 goto bad3; 1459 } 1460 1461 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 1462 if (!pending_cache) { 1463 DMERR("Couldn't create pending cache."); 1464 r = -ENOMEM; 1465 goto bad4; 1466 } 1467 1468 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 1469 if (!tracked_chunk_cache) { 1470 DMERR("Couldn't create cache to track chunks in use."); 1471 r = -ENOMEM; 1472 goto bad5; 1473 } 1474 1475 ksnapd = create_singlethread_workqueue("ksnapd"); 1476 if (!ksnapd) { 1477 DMERR("Failed to create ksnapd workqueue."); 1478 r = -ENOMEM; 1479 goto bad_pending_pool; 1480 } 1481 1482 return 0; 1483 1484bad_pending_pool: 1485 kmem_cache_destroy(tracked_chunk_cache); 1486bad5: 1487 kmem_cache_destroy(pending_cache); 1488bad4: 1489 kmem_cache_destroy(exception_cache); 1490bad3: 1491 exit_origin_hash(); 1492bad2: 1493 dm_unregister_target(&origin_target); 1494bad1: 1495 dm_unregister_target(&snapshot_target); 1496 return r; 1497} 1498 1499static void __exit dm_snapshot_exit(void) 1500{ 1501 destroy_workqueue(ksnapd); 1502 1503 dm_unregister_target(&snapshot_target); 1504 dm_unregister_target(&origin_target); 1505 1506 exit_origin_hash(); 1507 kmem_cache_destroy(pending_cache); 1508 kmem_cache_destroy(exception_cache); 1509 kmem_cache_destroy(tracked_chunk_cache); 1510 1511 dm_exception_store_exit(); 1512} 1513 1514/* Module hooks */ 1515module_init(dm_snapshot_init); 1516module_exit(dm_snapshot_exit); 1517 1518MODULE_DESCRIPTION(DM_NAME " snapshot target"); 1519MODULE_AUTHOR("Joe Thornber"); 1520MODULE_LICENSE("GPL");