at v2.6.19-rc2 1324 lines 28 kB view raw
1/* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9#include <linux/blkdev.h> 10#include <linux/ctype.h> 11#include <linux/device-mapper.h> 12#include <linux/fs.h> 13#include <linux/init.h> 14#include <linux/kdev_t.h> 15#include <linux/list.h> 16#include <linux/mempool.h> 17#include <linux/module.h> 18#include <linux/slab.h> 19#include <linux/vmalloc.h> 20 21#include "dm-snap.h" 22#include "dm-bio-list.h" 23#include "kcopyd.h" 24 25#define DM_MSG_PREFIX "snapshots" 26 27/* 28 * The percentage increment we will wake up users at 29 */ 30#define WAKE_UP_PERCENT 5 31 32/* 33 * kcopyd priority of snapshot operations 34 */ 35#define SNAPSHOT_COPY_PRIORITY 2 36 37/* 38 * Each snapshot reserves this many pages for io 39 */ 40#define SNAPSHOT_PAGES 256 41 42struct workqueue_struct *ksnapd; 43static void flush_queued_bios(void *data); 44 45struct pending_exception { 46 struct exception e; 47 48 /* 49 * Origin buffers waiting for this to complete are held 50 * in a bio list 51 */ 52 struct bio_list origin_bios; 53 struct bio_list snapshot_bios; 54 55 /* 56 * Short-term queue of pending exceptions prior to submission. 57 */ 58 struct list_head list; 59 60 /* 61 * The primary pending_exception is the one that holds 62 * the ref_count and the list of origin_bios for a 63 * group of pending_exceptions. It is always last to get freed. 64 * These fields get set up when writing to the origin. 65 */ 66 struct pending_exception *primary_pe; 67 68 /* 69 * Number of pending_exceptions processing this chunk. 70 * When this drops to zero we must complete the origin bios. 71 * If incrementing or decrementing this, hold pe->snap->lock for 72 * the sibling concerned and not pe->primary_pe->snap->lock unless 73 * they are the same. 74 */ 75 atomic_t ref_count; 76 77 /* Pointer back to snapshot context */ 78 struct dm_snapshot *snap; 79 80 /* 81 * 1 indicates the exception has already been sent to 82 * kcopyd. 83 */ 84 int started; 85}; 86 87/* 88 * Hash table mapping origin volumes to lists of snapshots and 89 * a lock to protect it 90 */ 91static kmem_cache_t *exception_cache; 92static kmem_cache_t *pending_cache; 93static mempool_t *pending_pool; 94 95/* 96 * One of these per registered origin, held in the snapshot_origins hash 97 */ 98struct origin { 99 /* The origin device */ 100 struct block_device *bdev; 101 102 struct list_head hash_list; 103 104 /* List of snapshots for this origin */ 105 struct list_head snapshots; 106}; 107 108/* 109 * Size of the hash table for origin volumes. If we make this 110 * the size of the minors list then it should be nearly perfect 111 */ 112#define ORIGIN_HASH_SIZE 256 113#define ORIGIN_MASK 0xFF 114static struct list_head *_origins; 115static struct rw_semaphore _origins_lock; 116 117static int init_origin_hash(void) 118{ 119 int i; 120 121 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 122 GFP_KERNEL); 123 if (!_origins) { 124 DMERR("unable to allocate memory"); 125 return -ENOMEM; 126 } 127 128 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 129 INIT_LIST_HEAD(_origins + i); 130 init_rwsem(&_origins_lock); 131 132 return 0; 133} 134 135static void exit_origin_hash(void) 136{ 137 kfree(_origins); 138} 139 140static inline unsigned int origin_hash(struct block_device *bdev) 141{ 142 return bdev->bd_dev & ORIGIN_MASK; 143} 144 145static struct origin *__lookup_origin(struct block_device *origin) 146{ 147 struct list_head *ol; 148 struct origin *o; 149 150 ol = &_origins[origin_hash(origin)]; 151 list_for_each_entry (o, ol, hash_list) 152 if (bdev_equal(o->bdev, origin)) 153 return o; 154 155 return NULL; 156} 157 158static void __insert_origin(struct origin *o) 159{ 160 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 161 list_add_tail(&o->hash_list, sl); 162} 163 164/* 165 * Make a note of the snapshot and its origin so we can look it 166 * up when the origin has a write on it. 167 */ 168static int register_snapshot(struct dm_snapshot *snap) 169{ 170 struct origin *o; 171 struct block_device *bdev = snap->origin->bdev; 172 173 down_write(&_origins_lock); 174 o = __lookup_origin(bdev); 175 176 if (!o) { 177 /* New origin */ 178 o = kmalloc(sizeof(*o), GFP_KERNEL); 179 if (!o) { 180 up_write(&_origins_lock); 181 return -ENOMEM; 182 } 183 184 /* Initialise the struct */ 185 INIT_LIST_HEAD(&o->snapshots); 186 o->bdev = bdev; 187 188 __insert_origin(o); 189 } 190 191 list_add_tail(&snap->list, &o->snapshots); 192 193 up_write(&_origins_lock); 194 return 0; 195} 196 197static void unregister_snapshot(struct dm_snapshot *s) 198{ 199 struct origin *o; 200 201 down_write(&_origins_lock); 202 o = __lookup_origin(s->origin->bdev); 203 204 list_del(&s->list); 205 if (list_empty(&o->snapshots)) { 206 list_del(&o->hash_list); 207 kfree(o); 208 } 209 210 up_write(&_origins_lock); 211} 212 213/* 214 * Implementation of the exception hash tables. 215 */ 216static int init_exception_table(struct exception_table *et, uint32_t size) 217{ 218 unsigned int i; 219 220 et->hash_mask = size - 1; 221 et->table = dm_vcalloc(size, sizeof(struct list_head)); 222 if (!et->table) 223 return -ENOMEM; 224 225 for (i = 0; i < size; i++) 226 INIT_LIST_HEAD(et->table + i); 227 228 return 0; 229} 230 231static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem) 232{ 233 struct list_head *slot; 234 struct exception *ex, *next; 235 int i, size; 236 237 size = et->hash_mask + 1; 238 for (i = 0; i < size; i++) { 239 slot = et->table + i; 240 241 list_for_each_entry_safe (ex, next, slot, hash_list) 242 kmem_cache_free(mem, ex); 243 } 244 245 vfree(et->table); 246} 247 248static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 249{ 250 return chunk & et->hash_mask; 251} 252 253static void insert_exception(struct exception_table *eh, struct exception *e) 254{ 255 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; 256 list_add(&e->hash_list, l); 257} 258 259static inline void remove_exception(struct exception *e) 260{ 261 list_del(&e->hash_list); 262} 263 264/* 265 * Return the exception data for a sector, or NULL if not 266 * remapped. 267 */ 268static struct exception *lookup_exception(struct exception_table *et, 269 chunk_t chunk) 270{ 271 struct list_head *slot; 272 struct exception *e; 273 274 slot = &et->table[exception_hash(et, chunk)]; 275 list_for_each_entry (e, slot, hash_list) 276 if (e->old_chunk == chunk) 277 return e; 278 279 return NULL; 280} 281 282static inline struct exception *alloc_exception(void) 283{ 284 struct exception *e; 285 286 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 287 if (!e) 288 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 289 290 return e; 291} 292 293static inline void free_exception(struct exception *e) 294{ 295 kmem_cache_free(exception_cache, e); 296} 297 298static inline struct pending_exception *alloc_pending_exception(void) 299{ 300 return mempool_alloc(pending_pool, GFP_NOIO); 301} 302 303static inline void free_pending_exception(struct pending_exception *pe) 304{ 305 mempool_free(pe, pending_pool); 306} 307 308int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) 309{ 310 struct exception *e; 311 312 e = alloc_exception(); 313 if (!e) 314 return -ENOMEM; 315 316 e->old_chunk = old; 317 e->new_chunk = new; 318 insert_exception(&s->complete, e); 319 return 0; 320} 321 322/* 323 * Hard coded magic. 324 */ 325static int calc_max_buckets(void) 326{ 327 /* use a fixed size of 2MB */ 328 unsigned long mem = 2 * 1024 * 1024; 329 mem /= sizeof(struct list_head); 330 331 return mem; 332} 333 334/* 335 * Rounds a number down to a power of 2. 336 */ 337static inline uint32_t round_down(uint32_t n) 338{ 339 while (n & (n - 1)) 340 n &= (n - 1); 341 return n; 342} 343 344/* 345 * Allocate room for a suitable hash table. 346 */ 347static int init_hash_tables(struct dm_snapshot *s) 348{ 349 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 350 351 /* 352 * Calculate based on the size of the original volume or 353 * the COW volume... 354 */ 355 cow_dev_size = get_dev_size(s->cow->bdev); 356 origin_dev_size = get_dev_size(s->origin->bdev); 357 max_buckets = calc_max_buckets(); 358 359 hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; 360 hash_size = min(hash_size, max_buckets); 361 362 /* Round it down to a power of 2 */ 363 hash_size = round_down(hash_size); 364 if (init_exception_table(&s->complete, hash_size)) 365 return -ENOMEM; 366 367 /* 368 * Allocate hash table for in-flight exceptions 369 * Make this smaller than the real hash table 370 */ 371 hash_size >>= 3; 372 if (hash_size < 64) 373 hash_size = 64; 374 375 if (init_exception_table(&s->pending, hash_size)) { 376 exit_exception_table(&s->complete, exception_cache); 377 return -ENOMEM; 378 } 379 380 return 0; 381} 382 383/* 384 * Round a number up to the nearest 'size' boundary. size must 385 * be a power of 2. 386 */ 387static inline ulong round_up(ulong n, ulong size) 388{ 389 size--; 390 return (n + size) & ~size; 391} 392 393static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg, 394 char **error) 395{ 396 unsigned long chunk_size; 397 char *value; 398 399 chunk_size = simple_strtoul(chunk_size_arg, &value, 10); 400 if (*chunk_size_arg == '\0' || *value != '\0') { 401 *error = "Invalid chunk size"; 402 return -EINVAL; 403 } 404 405 if (!chunk_size) { 406 s->chunk_size = s->chunk_mask = s->chunk_shift = 0; 407 return 0; 408 } 409 410 /* 411 * Chunk size must be multiple of page size. Silently 412 * round up if it's not. 413 */ 414 chunk_size = round_up(chunk_size, PAGE_SIZE >> 9); 415 416 /* Check chunk_size is a power of 2 */ 417 if (chunk_size & (chunk_size - 1)) { 418 *error = "Chunk size is not a power of 2"; 419 return -EINVAL; 420 } 421 422 /* Validate the chunk size against the device block size */ 423 if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) { 424 *error = "Chunk size is not a multiple of device blocksize"; 425 return -EINVAL; 426 } 427 428 s->chunk_size = chunk_size; 429 s->chunk_mask = chunk_size - 1; 430 s->chunk_shift = ffs(chunk_size) - 1; 431 432 return 0; 433} 434 435/* 436 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 437 */ 438static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 439{ 440 struct dm_snapshot *s; 441 int r = -EINVAL; 442 char persistent; 443 char *origin_path; 444 char *cow_path; 445 446 if (argc != 4) { 447 ti->error = "requires exactly 4 arguments"; 448 r = -EINVAL; 449 goto bad1; 450 } 451 452 origin_path = argv[0]; 453 cow_path = argv[1]; 454 persistent = toupper(*argv[2]); 455 456 if (persistent != 'P' && persistent != 'N') { 457 ti->error = "Persistent flag is not P or N"; 458 r = -EINVAL; 459 goto bad1; 460 } 461 462 s = kmalloc(sizeof(*s), GFP_KERNEL); 463 if (s == NULL) { 464 ti->error = "Cannot allocate snapshot context private " 465 "structure"; 466 r = -ENOMEM; 467 goto bad1; 468 } 469 470 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); 471 if (r) { 472 ti->error = "Cannot get origin device"; 473 goto bad2; 474 } 475 476 r = dm_get_device(ti, cow_path, 0, 0, 477 FMODE_READ | FMODE_WRITE, &s->cow); 478 if (r) { 479 dm_put_device(ti, s->origin); 480 ti->error = "Cannot get COW device"; 481 goto bad2; 482 } 483 484 r = set_chunk_size(s, argv[3], &ti->error); 485 if (r) 486 goto bad3; 487 488 s->type = persistent; 489 490 s->valid = 1; 491 s->active = 0; 492 s->last_percent = 0; 493 init_rwsem(&s->lock); 494 spin_lock_init(&s->pe_lock); 495 s->table = ti->table; 496 497 /* Allocate hash table for COW data */ 498 if (init_hash_tables(s)) { 499 ti->error = "Unable to allocate hash table space"; 500 r = -ENOMEM; 501 goto bad3; 502 } 503 504 s->store.snap = s; 505 506 if (persistent == 'P') 507 r = dm_create_persistent(&s->store); 508 else 509 r = dm_create_transient(&s->store); 510 511 if (r) { 512 ti->error = "Couldn't create exception store"; 513 r = -EINVAL; 514 goto bad4; 515 } 516 517 r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 518 if (r) { 519 ti->error = "Could not create kcopyd client"; 520 goto bad5; 521 } 522 523 /* Metadata must only be loaded into one table at once */ 524 r = s->store.read_metadata(&s->store); 525 if (r) { 526 ti->error = "Failed to read snapshot metadata"; 527 goto bad6; 528 } 529 530 bio_list_init(&s->queued_bios); 531 INIT_WORK(&s->queued_bios_work, flush_queued_bios, s); 532 533 /* Add snapshot to the list of snapshots for this origin */ 534 /* Exceptions aren't triggered till snapshot_resume() is called */ 535 if (register_snapshot(s)) { 536 r = -EINVAL; 537 ti->error = "Cannot register snapshot origin"; 538 goto bad6; 539 } 540 541 ti->private = s; 542 ti->split_io = s->chunk_size; 543 544 return 0; 545 546 bad6: 547 kcopyd_client_destroy(s->kcopyd_client); 548 549 bad5: 550 s->store.destroy(&s->store); 551 552 bad4: 553 exit_exception_table(&s->pending, pending_cache); 554 exit_exception_table(&s->complete, exception_cache); 555 556 bad3: 557 dm_put_device(ti, s->cow); 558 dm_put_device(ti, s->origin); 559 560 bad2: 561 kfree(s); 562 563 bad1: 564 return r; 565} 566 567static void snapshot_dtr(struct dm_target *ti) 568{ 569 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 570 571 flush_workqueue(ksnapd); 572 573 /* Prevent further origin writes from using this snapshot. */ 574 /* After this returns there can be no new kcopyd jobs. */ 575 unregister_snapshot(s); 576 577 kcopyd_client_destroy(s->kcopyd_client); 578 579 exit_exception_table(&s->pending, pending_cache); 580 exit_exception_table(&s->complete, exception_cache); 581 582 /* Deallocate memory used */ 583 s->store.destroy(&s->store); 584 585 dm_put_device(ti, s->origin); 586 dm_put_device(ti, s->cow); 587 588 kfree(s); 589} 590 591/* 592 * Flush a list of buffers. 593 */ 594static void flush_bios(struct bio *bio) 595{ 596 struct bio *n; 597 598 while (bio) { 599 n = bio->bi_next; 600 bio->bi_next = NULL; 601 generic_make_request(bio); 602 bio = n; 603 } 604} 605 606static void flush_queued_bios(void *data) 607{ 608 struct dm_snapshot *s = (struct dm_snapshot *) data; 609 struct bio *queued_bios; 610 unsigned long flags; 611 612 spin_lock_irqsave(&s->pe_lock, flags); 613 queued_bios = bio_list_get(&s->queued_bios); 614 spin_unlock_irqrestore(&s->pe_lock, flags); 615 616 flush_bios(queued_bios); 617} 618 619/* 620 * Error a list of buffers. 621 */ 622static void error_bios(struct bio *bio) 623{ 624 struct bio *n; 625 626 while (bio) { 627 n = bio->bi_next; 628 bio->bi_next = NULL; 629 bio_io_error(bio, bio->bi_size); 630 bio = n; 631 } 632} 633 634static void __invalidate_snapshot(struct dm_snapshot *s, int err) 635{ 636 if (!s->valid) 637 return; 638 639 if (err == -EIO) 640 DMERR("Invalidating snapshot: Error reading/writing."); 641 else if (err == -ENOMEM) 642 DMERR("Invalidating snapshot: Unable to allocate exception."); 643 644 if (s->store.drop_snapshot) 645 s->store.drop_snapshot(&s->store); 646 647 s->valid = 0; 648 649 dm_table_event(s->table); 650} 651 652static void get_pending_exception(struct pending_exception *pe) 653{ 654 atomic_inc(&pe->ref_count); 655} 656 657static struct bio *put_pending_exception(struct pending_exception *pe) 658{ 659 struct pending_exception *primary_pe; 660 struct bio *origin_bios = NULL; 661 662 primary_pe = pe->primary_pe; 663 664 /* 665 * If this pe is involved in a write to the origin and 666 * it is the last sibling to complete then release 667 * the bios for the original write to the origin. 668 */ 669 if (primary_pe && 670 atomic_dec_and_test(&primary_pe->ref_count)) 671 origin_bios = bio_list_get(&primary_pe->origin_bios); 672 673 /* 674 * Free the pe if it's not linked to an origin write or if 675 * it's not itself a primary pe. 676 */ 677 if (!primary_pe || primary_pe != pe) 678 free_pending_exception(pe); 679 680 /* 681 * Free the primary pe if nothing references it. 682 */ 683 if (primary_pe && !atomic_read(&primary_pe->ref_count)) 684 free_pending_exception(primary_pe); 685 686 return origin_bios; 687} 688 689static void pending_complete(struct pending_exception *pe, int success) 690{ 691 struct exception *e; 692 struct dm_snapshot *s = pe->snap; 693 struct bio *origin_bios = NULL; 694 struct bio *snapshot_bios = NULL; 695 int error = 0; 696 697 if (!success) { 698 /* Read/write error - snapshot is unusable */ 699 down_write(&s->lock); 700 __invalidate_snapshot(s, -EIO); 701 error = 1; 702 goto out; 703 } 704 705 e = alloc_exception(); 706 if (!e) { 707 down_write(&s->lock); 708 __invalidate_snapshot(s, -ENOMEM); 709 error = 1; 710 goto out; 711 } 712 *e = pe->e; 713 714 down_write(&s->lock); 715 if (!s->valid) { 716 free_exception(e); 717 error = 1; 718 goto out; 719 } 720 721 /* 722 * Add a proper exception, and remove the 723 * in-flight exception from the list. 724 */ 725 insert_exception(&s->complete, e); 726 727 out: 728 remove_exception(&pe->e); 729 snapshot_bios = bio_list_get(&pe->snapshot_bios); 730 origin_bios = put_pending_exception(pe); 731 732 up_write(&s->lock); 733 734 /* Submit any pending write bios */ 735 if (error) 736 error_bios(snapshot_bios); 737 else 738 flush_bios(snapshot_bios); 739 740 flush_bios(origin_bios); 741} 742 743static void commit_callback(void *context, int success) 744{ 745 struct pending_exception *pe = (struct pending_exception *) context; 746 pending_complete(pe, success); 747} 748 749/* 750 * Called when the copy I/O has finished. kcopyd actually runs 751 * this code so don't block. 752 */ 753static void copy_callback(int read_err, unsigned int write_err, void *context) 754{ 755 struct pending_exception *pe = (struct pending_exception *) context; 756 struct dm_snapshot *s = pe->snap; 757 758 if (read_err || write_err) 759 pending_complete(pe, 0); 760 761 else 762 /* Update the metadata if we are persistent */ 763 s->store.commit_exception(&s->store, &pe->e, commit_callback, 764 pe); 765} 766 767/* 768 * Dispatches the copy operation to kcopyd. 769 */ 770static void start_copy(struct pending_exception *pe) 771{ 772 struct dm_snapshot *s = pe->snap; 773 struct io_region src, dest; 774 struct block_device *bdev = s->origin->bdev; 775 sector_t dev_size; 776 777 dev_size = get_dev_size(bdev); 778 779 src.bdev = bdev; 780 src.sector = chunk_to_sector(s, pe->e.old_chunk); 781 src.count = min(s->chunk_size, dev_size - src.sector); 782 783 dest.bdev = s->cow->bdev; 784 dest.sector = chunk_to_sector(s, pe->e.new_chunk); 785 dest.count = src.count; 786 787 /* Hand over to kcopyd */ 788 kcopyd_copy(s->kcopyd_client, 789 &src, 1, &dest, 0, copy_callback, pe); 790} 791 792/* 793 * Looks to see if this snapshot already has a pending exception 794 * for this chunk, otherwise it allocates a new one and inserts 795 * it into the pending table. 796 * 797 * NOTE: a write lock must be held on snap->lock before calling 798 * this. 799 */ 800static struct pending_exception * 801__find_pending_exception(struct dm_snapshot *s, struct bio *bio) 802{ 803 struct exception *e; 804 struct pending_exception *pe; 805 chunk_t chunk = sector_to_chunk(s, bio->bi_sector); 806 807 /* 808 * Is there a pending exception for this already ? 809 */ 810 e = lookup_exception(&s->pending, chunk); 811 if (e) { 812 /* cast the exception to a pending exception */ 813 pe = container_of(e, struct pending_exception, e); 814 goto out; 815 } 816 817 /* 818 * Create a new pending exception, we don't want 819 * to hold the lock while we do this. 820 */ 821 up_write(&s->lock); 822 pe = alloc_pending_exception(); 823 down_write(&s->lock); 824 825 if (!s->valid) { 826 free_pending_exception(pe); 827 return NULL; 828 } 829 830 e = lookup_exception(&s->pending, chunk); 831 if (e) { 832 free_pending_exception(pe); 833 pe = container_of(e, struct pending_exception, e); 834 goto out; 835 } 836 837 pe->e.old_chunk = chunk; 838 bio_list_init(&pe->origin_bios); 839 bio_list_init(&pe->snapshot_bios); 840 pe->primary_pe = NULL; 841 atomic_set(&pe->ref_count, 0); 842 pe->snap = s; 843 pe->started = 0; 844 845 if (s->store.prepare_exception(&s->store, &pe->e)) { 846 free_pending_exception(pe); 847 return NULL; 848 } 849 850 get_pending_exception(pe); 851 insert_exception(&s->pending, &pe->e); 852 853 out: 854 return pe; 855} 856 857static inline void remap_exception(struct dm_snapshot *s, struct exception *e, 858 struct bio *bio) 859{ 860 bio->bi_bdev = s->cow->bdev; 861 bio->bi_sector = chunk_to_sector(s, e->new_chunk) + 862 (bio->bi_sector & s->chunk_mask); 863} 864 865static int snapshot_map(struct dm_target *ti, struct bio *bio, 866 union map_info *map_context) 867{ 868 struct exception *e; 869 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 870 int r = 1; 871 chunk_t chunk; 872 struct pending_exception *pe = NULL; 873 874 chunk = sector_to_chunk(s, bio->bi_sector); 875 876 /* Full snapshots are not usable */ 877 /* To get here the table must be live so s->active is always set. */ 878 if (!s->valid) 879 return -EIO; 880 881 if (unlikely(bio_barrier(bio))) 882 return -EOPNOTSUPP; 883 884 /* FIXME: should only take write lock if we need 885 * to copy an exception */ 886 down_write(&s->lock); 887 888 if (!s->valid) { 889 r = -EIO; 890 goto out_unlock; 891 } 892 893 /* If the block is already remapped - use that, else remap it */ 894 e = lookup_exception(&s->complete, chunk); 895 if (e) { 896 remap_exception(s, e, bio); 897 goto out_unlock; 898 } 899 900 /* 901 * Write to snapshot - higher level takes care of RW/RO 902 * flags so we should only get this if we are 903 * writeable. 904 */ 905 if (bio_rw(bio) == WRITE) { 906 pe = __find_pending_exception(s, bio); 907 if (!pe) { 908 __invalidate_snapshot(s, -ENOMEM); 909 r = -EIO; 910 goto out_unlock; 911 } 912 913 remap_exception(s, &pe->e, bio); 914 bio_list_add(&pe->snapshot_bios, bio); 915 916 r = 0; 917 918 if (!pe->started) { 919 /* this is protected by snap->lock */ 920 pe->started = 1; 921 up_write(&s->lock); 922 start_copy(pe); 923 goto out; 924 } 925 } else 926 /* 927 * FIXME: this read path scares me because we 928 * always use the origin when we have a pending 929 * exception. However I can't think of a 930 * situation where this is wrong - ejt. 931 */ 932 bio->bi_bdev = s->origin->bdev; 933 934 out_unlock: 935 up_write(&s->lock); 936 out: 937 return r; 938} 939 940static void snapshot_resume(struct dm_target *ti) 941{ 942 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 943 944 down_write(&s->lock); 945 s->active = 1; 946 up_write(&s->lock); 947} 948 949static int snapshot_status(struct dm_target *ti, status_type_t type, 950 char *result, unsigned int maxlen) 951{ 952 struct dm_snapshot *snap = (struct dm_snapshot *) ti->private; 953 954 switch (type) { 955 case STATUSTYPE_INFO: 956 if (!snap->valid) 957 snprintf(result, maxlen, "Invalid"); 958 else { 959 if (snap->store.fraction_full) { 960 sector_t numerator, denominator; 961 snap->store.fraction_full(&snap->store, 962 &numerator, 963 &denominator); 964 snprintf(result, maxlen, "%llu/%llu", 965 (unsigned long long)numerator, 966 (unsigned long long)denominator); 967 } 968 else 969 snprintf(result, maxlen, "Unknown"); 970 } 971 break; 972 973 case STATUSTYPE_TABLE: 974 /* 975 * kdevname returns a static pointer so we need 976 * to make private copies if the output is to 977 * make sense. 978 */ 979 snprintf(result, maxlen, "%s %s %c %llu", 980 snap->origin->name, snap->cow->name, 981 snap->type, 982 (unsigned long long)snap->chunk_size); 983 break; 984 } 985 986 return 0; 987} 988 989/*----------------------------------------------------------------- 990 * Origin methods 991 *---------------------------------------------------------------*/ 992static int __origin_write(struct list_head *snapshots, struct bio *bio) 993{ 994 int r = 1, first = 0; 995 struct dm_snapshot *snap; 996 struct exception *e; 997 struct pending_exception *pe, *next_pe, *primary_pe = NULL; 998 chunk_t chunk; 999 LIST_HEAD(pe_queue); 1000 1001 /* Do all the snapshots on this origin */ 1002 list_for_each_entry (snap, snapshots, list) { 1003 1004 down_write(&snap->lock); 1005 1006 /* Only deal with valid and active snapshots */ 1007 if (!snap->valid || !snap->active) 1008 goto next_snapshot; 1009 1010 /* Nothing to do if writing beyond end of snapshot */ 1011 if (bio->bi_sector >= dm_table_get_size(snap->table)) 1012 goto next_snapshot; 1013 1014 /* 1015 * Remember, different snapshots can have 1016 * different chunk sizes. 1017 */ 1018 chunk = sector_to_chunk(snap, bio->bi_sector); 1019 1020 /* 1021 * Check exception table to see if block 1022 * is already remapped in this snapshot 1023 * and trigger an exception if not. 1024 * 1025 * ref_count is initialised to 1 so pending_complete() 1026 * won't destroy the primary_pe while we're inside this loop. 1027 */ 1028 e = lookup_exception(&snap->complete, chunk); 1029 if (e) 1030 goto next_snapshot; 1031 1032 pe = __find_pending_exception(snap, bio); 1033 if (!pe) { 1034 __invalidate_snapshot(snap, -ENOMEM); 1035 goto next_snapshot; 1036 } 1037 1038 if (!primary_pe) { 1039 /* 1040 * Either every pe here has same 1041 * primary_pe or none has one yet. 1042 */ 1043 if (pe->primary_pe) 1044 primary_pe = pe->primary_pe; 1045 else { 1046 primary_pe = pe; 1047 first = 1; 1048 } 1049 1050 bio_list_add(&primary_pe->origin_bios, bio); 1051 1052 r = 0; 1053 } 1054 1055 if (!pe->primary_pe) { 1056 pe->primary_pe = primary_pe; 1057 get_pending_exception(primary_pe); 1058 } 1059 1060 if (!pe->started) { 1061 pe->started = 1; 1062 list_add_tail(&pe->list, &pe_queue); 1063 } 1064 1065 next_snapshot: 1066 up_write(&snap->lock); 1067 } 1068 1069 if (!primary_pe) 1070 return r; 1071 1072 /* 1073 * If this is the first time we're processing this chunk and 1074 * ref_count is now 1 it means all the pending exceptions 1075 * got completed while we were in the loop above, so it falls to 1076 * us here to remove the primary_pe and submit any origin_bios. 1077 */ 1078 1079 if (first && atomic_dec_and_test(&primary_pe->ref_count)) { 1080 flush_bios(bio_list_get(&primary_pe->origin_bios)); 1081 free_pending_exception(primary_pe); 1082 /* If we got here, pe_queue is necessarily empty. */ 1083 return r; 1084 } 1085 1086 /* 1087 * Now that we have a complete pe list we can start the copying. 1088 */ 1089 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 1090 start_copy(pe); 1091 1092 return r; 1093} 1094 1095/* 1096 * Called on a write from the origin driver. 1097 */ 1098static int do_origin(struct dm_dev *origin, struct bio *bio) 1099{ 1100 struct origin *o; 1101 int r = 1; 1102 1103 down_read(&_origins_lock); 1104 o = __lookup_origin(origin->bdev); 1105 if (o) 1106 r = __origin_write(&o->snapshots, bio); 1107 up_read(&_origins_lock); 1108 1109 return r; 1110} 1111 1112/* 1113 * Origin: maps a linear range of a device, with hooks for snapshotting. 1114 */ 1115 1116/* 1117 * Construct an origin mapping: <dev_path> 1118 * The context for an origin is merely a 'struct dm_dev *' 1119 * pointing to the real device. 1120 */ 1121static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1122{ 1123 int r; 1124 struct dm_dev *dev; 1125 1126 if (argc != 1) { 1127 ti->error = "origin: incorrect number of arguments"; 1128 return -EINVAL; 1129 } 1130 1131 r = dm_get_device(ti, argv[0], 0, ti->len, 1132 dm_table_get_mode(ti->table), &dev); 1133 if (r) { 1134 ti->error = "Cannot get target device"; 1135 return r; 1136 } 1137 1138 ti->private = dev; 1139 return 0; 1140} 1141 1142static void origin_dtr(struct dm_target *ti) 1143{ 1144 struct dm_dev *dev = (struct dm_dev *) ti->private; 1145 dm_put_device(ti, dev); 1146} 1147 1148static int origin_map(struct dm_target *ti, struct bio *bio, 1149 union map_info *map_context) 1150{ 1151 struct dm_dev *dev = (struct dm_dev *) ti->private; 1152 bio->bi_bdev = dev->bdev; 1153 1154 if (unlikely(bio_barrier(bio))) 1155 return -EOPNOTSUPP; 1156 1157 /* Only tell snapshots if this is a write */ 1158 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : 1; 1159} 1160 1161#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 1162 1163/* 1164 * Set the target "split_io" field to the minimum of all the snapshots' 1165 * chunk sizes. 1166 */ 1167static void origin_resume(struct dm_target *ti) 1168{ 1169 struct dm_dev *dev = (struct dm_dev *) ti->private; 1170 struct dm_snapshot *snap; 1171 struct origin *o; 1172 chunk_t chunk_size = 0; 1173 1174 down_read(&_origins_lock); 1175 o = __lookup_origin(dev->bdev); 1176 if (o) 1177 list_for_each_entry (snap, &o->snapshots, list) 1178 chunk_size = min_not_zero(chunk_size, snap->chunk_size); 1179 up_read(&_origins_lock); 1180 1181 ti->split_io = chunk_size; 1182} 1183 1184static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1185 unsigned int maxlen) 1186{ 1187 struct dm_dev *dev = (struct dm_dev *) ti->private; 1188 1189 switch (type) { 1190 case STATUSTYPE_INFO: 1191 result[0] = '\0'; 1192 break; 1193 1194 case STATUSTYPE_TABLE: 1195 snprintf(result, maxlen, "%s", dev->name); 1196 break; 1197 } 1198 1199 return 0; 1200} 1201 1202static struct target_type origin_target = { 1203 .name = "snapshot-origin", 1204 .version = {1, 5, 0}, 1205 .module = THIS_MODULE, 1206 .ctr = origin_ctr, 1207 .dtr = origin_dtr, 1208 .map = origin_map, 1209 .resume = origin_resume, 1210 .status = origin_status, 1211}; 1212 1213static struct target_type snapshot_target = { 1214 .name = "snapshot", 1215 .version = {1, 5, 0}, 1216 .module = THIS_MODULE, 1217 .ctr = snapshot_ctr, 1218 .dtr = snapshot_dtr, 1219 .map = snapshot_map, 1220 .resume = snapshot_resume, 1221 .status = snapshot_status, 1222}; 1223 1224static int __init dm_snapshot_init(void) 1225{ 1226 int r; 1227 1228 r = dm_register_target(&snapshot_target); 1229 if (r) { 1230 DMERR("snapshot target register failed %d", r); 1231 return r; 1232 } 1233 1234 r = dm_register_target(&origin_target); 1235 if (r < 0) { 1236 DMERR("Origin target register failed %d", r); 1237 goto bad1; 1238 } 1239 1240 r = init_origin_hash(); 1241 if (r) { 1242 DMERR("init_origin_hash failed."); 1243 goto bad2; 1244 } 1245 1246 exception_cache = kmem_cache_create("dm-snapshot-ex", 1247 sizeof(struct exception), 1248 __alignof__(struct exception), 1249 0, NULL, NULL); 1250 if (!exception_cache) { 1251 DMERR("Couldn't create exception cache."); 1252 r = -ENOMEM; 1253 goto bad3; 1254 } 1255 1256 pending_cache = 1257 kmem_cache_create("dm-snapshot-in", 1258 sizeof(struct pending_exception), 1259 __alignof__(struct pending_exception), 1260 0, NULL, NULL); 1261 if (!pending_cache) { 1262 DMERR("Couldn't create pending cache."); 1263 r = -ENOMEM; 1264 goto bad4; 1265 } 1266 1267 pending_pool = mempool_create_slab_pool(128, pending_cache); 1268 if (!pending_pool) { 1269 DMERR("Couldn't create pending pool."); 1270 r = -ENOMEM; 1271 goto bad5; 1272 } 1273 1274 ksnapd = create_singlethread_workqueue("ksnapd"); 1275 if (!ksnapd) { 1276 DMERR("Failed to create ksnapd workqueue."); 1277 r = -ENOMEM; 1278 goto bad6; 1279 } 1280 1281 return 0; 1282 1283 bad6: 1284 mempool_destroy(pending_pool); 1285 bad5: 1286 kmem_cache_destroy(pending_cache); 1287 bad4: 1288 kmem_cache_destroy(exception_cache); 1289 bad3: 1290 exit_origin_hash(); 1291 bad2: 1292 dm_unregister_target(&origin_target); 1293 bad1: 1294 dm_unregister_target(&snapshot_target); 1295 return r; 1296} 1297 1298static void __exit dm_snapshot_exit(void) 1299{ 1300 int r; 1301 1302 destroy_workqueue(ksnapd); 1303 1304 r = dm_unregister_target(&snapshot_target); 1305 if (r) 1306 DMERR("snapshot unregister failed %d", r); 1307 1308 r = dm_unregister_target(&origin_target); 1309 if (r) 1310 DMERR("origin unregister failed %d", r); 1311 1312 exit_origin_hash(); 1313 mempool_destroy(pending_pool); 1314 kmem_cache_destroy(pending_cache); 1315 kmem_cache_destroy(exception_cache); 1316} 1317 1318/* Module hooks */ 1319module_init(dm_snapshot_init); 1320module_exit(dm_snapshot_exit); 1321 1322MODULE_DESCRIPTION(DM_NAME " snapshot target"); 1323MODULE_AUTHOR("Joe Thornber"); 1324MODULE_LICENSE("GPL");