Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.27 1714 lines 35 kB view raw
1/* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm.h" 9#include "dm-bio-list.h" 10#include "dm-uevent.h" 11 12#include <linux/init.h> 13#include <linux/module.h> 14#include <linux/mutex.h> 15#include <linux/moduleparam.h> 16#include <linux/blkpg.h> 17#include <linux/bio.h> 18#include <linux/buffer_head.h> 19#include <linux/mempool.h> 20#include <linux/slab.h> 21#include <linux/idr.h> 22#include <linux/hdreg.h> 23#include <linux/blktrace_api.h> 24#include <linux/smp_lock.h> 25 26#define DM_MSG_PREFIX "core" 27 28static const char *_name = DM_NAME; 29 30static unsigned int major = 0; 31static unsigned int _major = 0; 32 33static DEFINE_SPINLOCK(_minor_lock); 34/* 35 * One of these is allocated per bio. 36 */ 37struct dm_io { 38 struct mapped_device *md; 39 int error; 40 atomic_t io_count; 41 struct bio *bio; 42 unsigned long start_time; 43}; 44 45/* 46 * One of these is allocated per target within a bio. Hopefully 47 * this will be simplified out one day. 48 */ 49struct dm_target_io { 50 struct dm_io *io; 51 struct dm_target *ti; 52 union map_info info; 53}; 54 55union map_info *dm_get_mapinfo(struct bio *bio) 56{ 57 if (bio && bio->bi_private) 58 return &((struct dm_target_io *)bio->bi_private)->info; 59 return NULL; 60} 61 62#define MINOR_ALLOCED ((void *)-1) 63 64/* 65 * Bits for the md->flags field. 66 */ 67#define DMF_BLOCK_IO 0 68#define DMF_SUSPENDED 1 69#define DMF_FROZEN 2 70#define DMF_FREEING 3 71#define DMF_DELETING 4 72#define DMF_NOFLUSH_SUSPENDING 5 73 74/* 75 * Work processed by per-device workqueue. 76 */ 77struct dm_wq_req { 78 enum { 79 DM_WQ_FLUSH_ALL, 80 DM_WQ_FLUSH_DEFERRED, 81 } type; 82 struct work_struct work; 83 struct mapped_device *md; 84 void *context; 85}; 86 87struct mapped_device { 88 struct rw_semaphore io_lock; 89 struct mutex suspend_lock; 90 spinlock_t pushback_lock; 91 rwlock_t map_lock; 92 atomic_t holders; 93 atomic_t open_count; 94 95 unsigned long flags; 96 97 struct request_queue *queue; 98 struct gendisk *disk; 99 char name[16]; 100 101 void *interface_ptr; 102 103 /* 104 * A list of ios that arrived while we were suspended. 105 */ 106 atomic_t pending; 107 wait_queue_head_t wait; 108 struct bio_list deferred; 109 struct bio_list pushback; 110 111 /* 112 * Processing queue (flush/barriers) 113 */ 114 struct workqueue_struct *wq; 115 116 /* 117 * The current mapping. 118 */ 119 struct dm_table *map; 120 121 /* 122 * io objects are allocated from here. 123 */ 124 mempool_t *io_pool; 125 mempool_t *tio_pool; 126 127 struct bio_set *bs; 128 129 /* 130 * Event handling. 131 */ 132 atomic_t event_nr; 133 wait_queue_head_t eventq; 134 atomic_t uevent_seq; 135 struct list_head uevent_list; 136 spinlock_t uevent_lock; /* Protect access to uevent_list */ 137 138 /* 139 * freeze/thaw support require holding onto a super block 140 */ 141 struct super_block *frozen_sb; 142 struct block_device *suspended_bdev; 143 144 /* forced geometry settings */ 145 struct hd_geometry geometry; 146}; 147 148#define MIN_IOS 256 149static struct kmem_cache *_io_cache; 150static struct kmem_cache *_tio_cache; 151 152static int __init local_init(void) 153{ 154 int r; 155 156 /* allocate a slab for the dm_ios */ 157 _io_cache = KMEM_CACHE(dm_io, 0); 158 if (!_io_cache) 159 return -ENOMEM; 160 161 /* allocate a slab for the target ios */ 162 _tio_cache = KMEM_CACHE(dm_target_io, 0); 163 if (!_tio_cache) { 164 kmem_cache_destroy(_io_cache); 165 return -ENOMEM; 166 } 167 168 r = dm_uevent_init(); 169 if (r) { 170 kmem_cache_destroy(_tio_cache); 171 kmem_cache_destroy(_io_cache); 172 return r; 173 } 174 175 _major = major; 176 r = register_blkdev(_major, _name); 177 if (r < 0) { 178 kmem_cache_destroy(_tio_cache); 179 kmem_cache_destroy(_io_cache); 180 dm_uevent_exit(); 181 return r; 182 } 183 184 if (!_major) 185 _major = r; 186 187 return 0; 188} 189 190static void local_exit(void) 191{ 192 kmem_cache_destroy(_tio_cache); 193 kmem_cache_destroy(_io_cache); 194 unregister_blkdev(_major, _name); 195 dm_uevent_exit(); 196 197 _major = 0; 198 199 DMINFO("cleaned up"); 200} 201 202static int (*_inits[])(void) __initdata = { 203 local_init, 204 dm_target_init, 205 dm_linear_init, 206 dm_stripe_init, 207 dm_kcopyd_init, 208 dm_interface_init, 209}; 210 211static void (*_exits[])(void) = { 212 local_exit, 213 dm_target_exit, 214 dm_linear_exit, 215 dm_stripe_exit, 216 dm_kcopyd_exit, 217 dm_interface_exit, 218}; 219 220static int __init dm_init(void) 221{ 222 const int count = ARRAY_SIZE(_inits); 223 224 int r, i; 225 226 for (i = 0; i < count; i++) { 227 r = _inits[i](); 228 if (r) 229 goto bad; 230 } 231 232 return 0; 233 234 bad: 235 while (i--) 236 _exits[i](); 237 238 return r; 239} 240 241static void __exit dm_exit(void) 242{ 243 int i = ARRAY_SIZE(_exits); 244 245 while (i--) 246 _exits[i](); 247} 248 249/* 250 * Block device functions 251 */ 252static int dm_blk_open(struct inode *inode, struct file *file) 253{ 254 struct mapped_device *md; 255 256 spin_lock(&_minor_lock); 257 258 md = inode->i_bdev->bd_disk->private_data; 259 if (!md) 260 goto out; 261 262 if (test_bit(DMF_FREEING, &md->flags) || 263 test_bit(DMF_DELETING, &md->flags)) { 264 md = NULL; 265 goto out; 266 } 267 268 dm_get(md); 269 atomic_inc(&md->open_count); 270 271out: 272 spin_unlock(&_minor_lock); 273 274 return md ? 0 : -ENXIO; 275} 276 277static int dm_blk_close(struct inode *inode, struct file *file) 278{ 279 struct mapped_device *md; 280 281 md = inode->i_bdev->bd_disk->private_data; 282 atomic_dec(&md->open_count); 283 dm_put(md); 284 return 0; 285} 286 287int dm_open_count(struct mapped_device *md) 288{ 289 return atomic_read(&md->open_count); 290} 291 292/* 293 * Guarantees nothing is using the device before it's deleted. 294 */ 295int dm_lock_for_deletion(struct mapped_device *md) 296{ 297 int r = 0; 298 299 spin_lock(&_minor_lock); 300 301 if (dm_open_count(md)) 302 r = -EBUSY; 303 else 304 set_bit(DMF_DELETING, &md->flags); 305 306 spin_unlock(&_minor_lock); 307 308 return r; 309} 310 311static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 312{ 313 struct mapped_device *md = bdev->bd_disk->private_data; 314 315 return dm_get_geometry(md, geo); 316} 317 318static int dm_blk_ioctl(struct inode *inode, struct file *file, 319 unsigned int cmd, unsigned long arg) 320{ 321 struct mapped_device *md; 322 struct dm_table *map; 323 struct dm_target *tgt; 324 int r = -ENOTTY; 325 326 /* We don't really need this lock, but we do need 'inode'. */ 327 unlock_kernel(); 328 329 md = inode->i_bdev->bd_disk->private_data; 330 331 map = dm_get_table(md); 332 333 if (!map || !dm_table_get_size(map)) 334 goto out; 335 336 /* We only support devices that have a single target */ 337 if (dm_table_get_num_targets(map) != 1) 338 goto out; 339 340 tgt = dm_table_get_target(map, 0); 341 342 if (dm_suspended(md)) { 343 r = -EAGAIN; 344 goto out; 345 } 346 347 if (tgt->type->ioctl) 348 r = tgt->type->ioctl(tgt, inode, file, cmd, arg); 349 350out: 351 dm_table_put(map); 352 353 lock_kernel(); 354 return r; 355} 356 357static struct dm_io *alloc_io(struct mapped_device *md) 358{ 359 return mempool_alloc(md->io_pool, GFP_NOIO); 360} 361 362static void free_io(struct mapped_device *md, struct dm_io *io) 363{ 364 mempool_free(io, md->io_pool); 365} 366 367static struct dm_target_io *alloc_tio(struct mapped_device *md) 368{ 369 return mempool_alloc(md->tio_pool, GFP_NOIO); 370} 371 372static void free_tio(struct mapped_device *md, struct dm_target_io *tio) 373{ 374 mempool_free(tio, md->tio_pool); 375} 376 377static void start_io_acct(struct dm_io *io) 378{ 379 struct mapped_device *md = io->md; 380 381 io->start_time = jiffies; 382 383 preempt_disable(); 384 disk_round_stats(dm_disk(md)); 385 preempt_enable(); 386 dm_disk(md)->in_flight = atomic_inc_return(&md->pending); 387} 388 389static int end_io_acct(struct dm_io *io) 390{ 391 struct mapped_device *md = io->md; 392 struct bio *bio = io->bio; 393 unsigned long duration = jiffies - io->start_time; 394 int pending; 395 int rw = bio_data_dir(bio); 396 397 preempt_disable(); 398 disk_round_stats(dm_disk(md)); 399 preempt_enable(); 400 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 401 402 disk_stat_add(dm_disk(md), ticks[rw], duration); 403 404 return !pending; 405} 406 407/* 408 * Add the bio to the list of deferred io. 409 */ 410static int queue_io(struct mapped_device *md, struct bio *bio) 411{ 412 down_write(&md->io_lock); 413 414 if (!test_bit(DMF_BLOCK_IO, &md->flags)) { 415 up_write(&md->io_lock); 416 return 1; 417 } 418 419 bio_list_add(&md->deferred, bio); 420 421 up_write(&md->io_lock); 422 return 0; /* deferred successfully */ 423} 424 425/* 426 * Everyone (including functions in this file), should use this 427 * function to access the md->map field, and make sure they call 428 * dm_table_put() when finished. 429 */ 430struct dm_table *dm_get_table(struct mapped_device *md) 431{ 432 struct dm_table *t; 433 434 read_lock(&md->map_lock); 435 t = md->map; 436 if (t) 437 dm_table_get(t); 438 read_unlock(&md->map_lock); 439 440 return t; 441} 442 443/* 444 * Get the geometry associated with a dm device 445 */ 446int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) 447{ 448 *geo = md->geometry; 449 450 return 0; 451} 452 453/* 454 * Set the geometry of a device. 455 */ 456int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) 457{ 458 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; 459 460 if (geo->start > sz) { 461 DMWARN("Start sector is beyond the geometry limits."); 462 return -EINVAL; 463 } 464 465 md->geometry = *geo; 466 467 return 0; 468} 469 470/*----------------------------------------------------------------- 471 * CRUD START: 472 * A more elegant soln is in the works that uses the queue 473 * merge fn, unfortunately there are a couple of changes to 474 * the block layer that I want to make for this. So in the 475 * interests of getting something for people to use I give 476 * you this clearly demarcated crap. 477 *---------------------------------------------------------------*/ 478 479static int __noflush_suspending(struct mapped_device *md) 480{ 481 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 482} 483 484/* 485 * Decrements the number of outstanding ios that a bio has been 486 * cloned into, completing the original io if necc. 487 */ 488static void dec_pending(struct dm_io *io, int error) 489{ 490 unsigned long flags; 491 492 /* Push-back supersedes any I/O errors */ 493 if (error && !(io->error > 0 && __noflush_suspending(io->md))) 494 io->error = error; 495 496 if (atomic_dec_and_test(&io->io_count)) { 497 if (io->error == DM_ENDIO_REQUEUE) { 498 /* 499 * Target requested pushing back the I/O. 500 * This must be handled before the sleeper on 501 * suspend queue merges the pushback list. 502 */ 503 spin_lock_irqsave(&io->md->pushback_lock, flags); 504 if (__noflush_suspending(io->md)) 505 bio_list_add(&io->md->pushback, io->bio); 506 else 507 /* noflush suspend was interrupted. */ 508 io->error = -EIO; 509 spin_unlock_irqrestore(&io->md->pushback_lock, flags); 510 } 511 512 if (end_io_acct(io)) 513 /* nudge anyone waiting on suspend queue */ 514 wake_up(&io->md->wait); 515 516 if (io->error != DM_ENDIO_REQUEUE) { 517 blk_add_trace_bio(io->md->queue, io->bio, 518 BLK_TA_COMPLETE); 519 520 bio_endio(io->bio, io->error); 521 } 522 523 free_io(io->md, io); 524 } 525} 526 527static void clone_endio(struct bio *bio, int error) 528{ 529 int r = 0; 530 struct dm_target_io *tio = bio->bi_private; 531 struct mapped_device *md = tio->io->md; 532 dm_endio_fn endio = tio->ti->type->end_io; 533 534 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 535 error = -EIO; 536 537 if (endio) { 538 r = endio(tio->ti, bio, error, &tio->info); 539 if (r < 0 || r == DM_ENDIO_REQUEUE) 540 /* 541 * error and requeue request are handled 542 * in dec_pending(). 543 */ 544 error = r; 545 else if (r == DM_ENDIO_INCOMPLETE) 546 /* The target will handle the io */ 547 return; 548 else if (r) { 549 DMWARN("unimplemented target endio return value: %d", r); 550 BUG(); 551 } 552 } 553 554 dec_pending(tio->io, error); 555 556 /* 557 * Store md for cleanup instead of tio which is about to get freed. 558 */ 559 bio->bi_private = md->bs; 560 561 bio_put(bio); 562 free_tio(md, tio); 563} 564 565static sector_t max_io_len(struct mapped_device *md, 566 sector_t sector, struct dm_target *ti) 567{ 568 sector_t offset = sector - ti->begin; 569 sector_t len = ti->len - offset; 570 571 /* 572 * Does the target need to split even further ? 573 */ 574 if (ti->split_io) { 575 sector_t boundary; 576 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 577 - offset; 578 if (len > boundary) 579 len = boundary; 580 } 581 582 return len; 583} 584 585static void __map_bio(struct dm_target *ti, struct bio *clone, 586 struct dm_target_io *tio) 587{ 588 int r; 589 sector_t sector; 590 struct mapped_device *md; 591 592 /* 593 * Sanity checks. 594 */ 595 BUG_ON(!clone->bi_size); 596 597 clone->bi_end_io = clone_endio; 598 clone->bi_private = tio; 599 600 /* 601 * Map the clone. If r == 0 we don't need to do 602 * anything, the target has assumed ownership of 603 * this io. 604 */ 605 atomic_inc(&tio->io->io_count); 606 sector = clone->bi_sector; 607 r = ti->type->map(ti, clone, &tio->info); 608 if (r == DM_MAPIO_REMAPPED) { 609 /* the bio has been remapped so dispatch it */ 610 611 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, 612 tio->io->bio->bi_bdev->bd_dev, 613 clone->bi_sector, sector); 614 615 generic_make_request(clone); 616 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 617 /* error the io and bail out, or requeue it if needed */ 618 md = tio->io->md; 619 dec_pending(tio->io, r); 620 /* 621 * Store bio_set for cleanup. 622 */ 623 clone->bi_private = md->bs; 624 bio_put(clone); 625 free_tio(md, tio); 626 } else if (r) { 627 DMWARN("unimplemented target map return value: %d", r); 628 BUG(); 629 } 630} 631 632struct clone_info { 633 struct mapped_device *md; 634 struct dm_table *map; 635 struct bio *bio; 636 struct dm_io *io; 637 sector_t sector; 638 sector_t sector_count; 639 unsigned short idx; 640}; 641 642static void dm_bio_destructor(struct bio *bio) 643{ 644 struct bio_set *bs = bio->bi_private; 645 646 bio_free(bio, bs); 647} 648 649/* 650 * Creates a little bio that is just does part of a bvec. 651 */ 652static struct bio *split_bvec(struct bio *bio, sector_t sector, 653 unsigned short idx, unsigned int offset, 654 unsigned int len, struct bio_set *bs) 655{ 656 struct bio *clone; 657 struct bio_vec *bv = bio->bi_io_vec + idx; 658 659 clone = bio_alloc_bioset(GFP_NOIO, 1, bs); 660 clone->bi_destructor = dm_bio_destructor; 661 *clone->bi_io_vec = *bv; 662 663 clone->bi_sector = sector; 664 clone->bi_bdev = bio->bi_bdev; 665 clone->bi_rw = bio->bi_rw; 666 clone->bi_vcnt = 1; 667 clone->bi_size = to_bytes(len); 668 clone->bi_io_vec->bv_offset = offset; 669 clone->bi_io_vec->bv_len = clone->bi_size; 670 671 return clone; 672} 673 674/* 675 * Creates a bio that consists of range of complete bvecs. 676 */ 677static struct bio *clone_bio(struct bio *bio, sector_t sector, 678 unsigned short idx, unsigned short bv_count, 679 unsigned int len, struct bio_set *bs) 680{ 681 struct bio *clone; 682 683 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 684 __bio_clone(clone, bio); 685 clone->bi_destructor = dm_bio_destructor; 686 clone->bi_sector = sector; 687 clone->bi_idx = idx; 688 clone->bi_vcnt = idx + bv_count; 689 clone->bi_size = to_bytes(len); 690 clone->bi_flags &= ~(1 << BIO_SEG_VALID); 691 692 return clone; 693} 694 695static int __clone_and_map(struct clone_info *ci) 696{ 697 struct bio *clone, *bio = ci->bio; 698 struct dm_target *ti; 699 sector_t len = 0, max; 700 struct dm_target_io *tio; 701 702 ti = dm_table_find_target(ci->map, ci->sector); 703 if (!dm_target_is_valid(ti)) 704 return -EIO; 705 706 max = max_io_len(ci->md, ci->sector, ti); 707 708 /* 709 * Allocate a target io object. 710 */ 711 tio = alloc_tio(ci->md); 712 tio->io = ci->io; 713 tio->ti = ti; 714 memset(&tio->info, 0, sizeof(tio->info)); 715 716 if (ci->sector_count <= max) { 717 /* 718 * Optimise for the simple case where we can do all of 719 * the remaining io with a single clone. 720 */ 721 clone = clone_bio(bio, ci->sector, ci->idx, 722 bio->bi_vcnt - ci->idx, ci->sector_count, 723 ci->md->bs); 724 __map_bio(ti, clone, tio); 725 ci->sector_count = 0; 726 727 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 728 /* 729 * There are some bvecs that don't span targets. 730 * Do as many of these as possible. 731 */ 732 int i; 733 sector_t remaining = max; 734 sector_t bv_len; 735 736 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) { 737 bv_len = to_sector(bio->bi_io_vec[i].bv_len); 738 739 if (bv_len > remaining) 740 break; 741 742 remaining -= bv_len; 743 len += bv_len; 744 } 745 746 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 747 ci->md->bs); 748 __map_bio(ti, clone, tio); 749 750 ci->sector += len; 751 ci->sector_count -= len; 752 ci->idx = i; 753 754 } else { 755 /* 756 * Handle a bvec that must be split between two or more targets. 757 */ 758 struct bio_vec *bv = bio->bi_io_vec + ci->idx; 759 sector_t remaining = to_sector(bv->bv_len); 760 unsigned int offset = 0; 761 762 do { 763 if (offset) { 764 ti = dm_table_find_target(ci->map, ci->sector); 765 if (!dm_target_is_valid(ti)) 766 return -EIO; 767 768 max = max_io_len(ci->md, ci->sector, ti); 769 770 tio = alloc_tio(ci->md); 771 tio->io = ci->io; 772 tio->ti = ti; 773 memset(&tio->info, 0, sizeof(tio->info)); 774 } 775 776 len = min(remaining, max); 777 778 clone = split_bvec(bio, ci->sector, ci->idx, 779 bv->bv_offset + offset, len, 780 ci->md->bs); 781 782 __map_bio(ti, clone, tio); 783 784 ci->sector += len; 785 ci->sector_count -= len; 786 offset += to_bytes(len); 787 } while (remaining -= len); 788 789 ci->idx++; 790 } 791 792 return 0; 793} 794 795/* 796 * Split the bio into several clones. 797 */ 798static int __split_bio(struct mapped_device *md, struct bio *bio) 799{ 800 struct clone_info ci; 801 int error = 0; 802 803 ci.map = dm_get_table(md); 804 if (unlikely(!ci.map)) 805 return -EIO; 806 807 ci.md = md; 808 ci.bio = bio; 809 ci.io = alloc_io(md); 810 ci.io->error = 0; 811 atomic_set(&ci.io->io_count, 1); 812 ci.io->bio = bio; 813 ci.io->md = md; 814 ci.sector = bio->bi_sector; 815 ci.sector_count = bio_sectors(bio); 816 ci.idx = bio->bi_idx; 817 818 start_io_acct(ci.io); 819 while (ci.sector_count && !error) 820 error = __clone_and_map(&ci); 821 822 /* drop the extra reference count */ 823 dec_pending(ci.io, error); 824 dm_table_put(ci.map); 825 826 return 0; 827} 828/*----------------------------------------------------------------- 829 * CRUD END 830 *---------------------------------------------------------------*/ 831 832static int dm_merge_bvec(struct request_queue *q, 833 struct bvec_merge_data *bvm, 834 struct bio_vec *biovec) 835{ 836 struct mapped_device *md = q->queuedata; 837 struct dm_table *map = dm_get_table(md); 838 struct dm_target *ti; 839 sector_t max_sectors; 840 int max_size = 0; 841 842 if (unlikely(!map)) 843 goto out; 844 845 ti = dm_table_find_target(map, bvm->bi_sector); 846 if (!dm_target_is_valid(ti)) 847 goto out_table; 848 849 /* 850 * Find maximum amount of I/O that won't need splitting 851 */ 852 max_sectors = min(max_io_len(md, bvm->bi_sector, ti), 853 (sector_t) BIO_MAX_SECTORS); 854 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 855 if (max_size < 0) 856 max_size = 0; 857 858 /* 859 * merge_bvec_fn() returns number of bytes 860 * it can accept at this offset 861 * max is precomputed maximal io size 862 */ 863 if (max_size && ti->type->merge) 864 max_size = ti->type->merge(ti, bvm, biovec, max_size); 865 866out_table: 867 dm_table_put(map); 868 869out: 870 /* 871 * Always allow an entire first page 872 */ 873 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) 874 max_size = biovec->bv_len; 875 876 return max_size; 877} 878 879/* 880 * The request function that just remaps the bio built up by 881 * dm_merge_bvec. 882 */ 883static int dm_request(struct request_queue *q, struct bio *bio) 884{ 885 int r = -EIO; 886 int rw = bio_data_dir(bio); 887 struct mapped_device *md = q->queuedata; 888 889 /* 890 * There is no use in forwarding any barrier request since we can't 891 * guarantee it is (or can be) handled by the targets correctly. 892 */ 893 if (unlikely(bio_barrier(bio))) { 894 bio_endio(bio, -EOPNOTSUPP); 895 return 0; 896 } 897 898 down_read(&md->io_lock); 899 900 disk_stat_inc(dm_disk(md), ios[rw]); 901 disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); 902 903 /* 904 * If we're suspended we have to queue 905 * this io for later. 906 */ 907 while (test_bit(DMF_BLOCK_IO, &md->flags)) { 908 up_read(&md->io_lock); 909 910 if (bio_rw(bio) != READA) 911 r = queue_io(md, bio); 912 913 if (r <= 0) 914 goto out_req; 915 916 /* 917 * We're in a while loop, because someone could suspend 918 * before we get to the following read lock. 919 */ 920 down_read(&md->io_lock); 921 } 922 923 r = __split_bio(md, bio); 924 up_read(&md->io_lock); 925 926out_req: 927 if (r < 0) 928 bio_io_error(bio); 929 930 return 0; 931} 932 933static void dm_unplug_all(struct request_queue *q) 934{ 935 struct mapped_device *md = q->queuedata; 936 struct dm_table *map = dm_get_table(md); 937 938 if (map) { 939 dm_table_unplug_all(map); 940 dm_table_put(map); 941 } 942} 943 944static int dm_any_congested(void *congested_data, int bdi_bits) 945{ 946 int r; 947 struct mapped_device *md = (struct mapped_device *) congested_data; 948 struct dm_table *map = dm_get_table(md); 949 950 if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) 951 r = bdi_bits; 952 else 953 r = dm_table_any_congested(map, bdi_bits); 954 955 dm_table_put(map); 956 return r; 957} 958 959/*----------------------------------------------------------------- 960 * An IDR is used to keep track of allocated minor numbers. 961 *---------------------------------------------------------------*/ 962static DEFINE_IDR(_minor_idr); 963 964static void free_minor(int minor) 965{ 966 spin_lock(&_minor_lock); 967 idr_remove(&_minor_idr, minor); 968 spin_unlock(&_minor_lock); 969} 970 971/* 972 * See if the device with a specific minor # is free. 973 */ 974static int specific_minor(int minor) 975{ 976 int r, m; 977 978 if (minor >= (1 << MINORBITS)) 979 return -EINVAL; 980 981 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 982 if (!r) 983 return -ENOMEM; 984 985 spin_lock(&_minor_lock); 986 987 if (idr_find(&_minor_idr, minor)) { 988 r = -EBUSY; 989 goto out; 990 } 991 992 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m); 993 if (r) 994 goto out; 995 996 if (m != minor) { 997 idr_remove(&_minor_idr, m); 998 r = -EBUSY; 999 goto out; 1000 } 1001 1002out: 1003 spin_unlock(&_minor_lock); 1004 return r; 1005} 1006 1007static int next_free_minor(int *minor) 1008{ 1009 int r, m; 1010 1011 r = idr_pre_get(&_minor_idr, GFP_KERNEL); 1012 if (!r) 1013 return -ENOMEM; 1014 1015 spin_lock(&_minor_lock); 1016 1017 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m); 1018 if (r) 1019 goto out; 1020 1021 if (m >= (1 << MINORBITS)) { 1022 idr_remove(&_minor_idr, m); 1023 r = -ENOSPC; 1024 goto out; 1025 } 1026 1027 *minor = m; 1028 1029out: 1030 spin_unlock(&_minor_lock); 1031 return r; 1032} 1033 1034static struct block_device_operations dm_blk_dops; 1035 1036/* 1037 * Allocate and initialise a blank device with a given minor. 1038 */ 1039static struct mapped_device *alloc_dev(int minor) 1040{ 1041 int r; 1042 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); 1043 void *old_md; 1044 1045 if (!md) { 1046 DMWARN("unable to allocate device, out of memory."); 1047 return NULL; 1048 } 1049 1050 if (!try_module_get(THIS_MODULE)) 1051 goto bad_module_get; 1052 1053 /* get a minor number for the dev */ 1054 if (minor == DM_ANY_MINOR) 1055 r = next_free_minor(&minor); 1056 else 1057 r = specific_minor(minor); 1058 if (r < 0) 1059 goto bad_minor; 1060 1061 init_rwsem(&md->io_lock); 1062 mutex_init(&md->suspend_lock); 1063 spin_lock_init(&md->pushback_lock); 1064 rwlock_init(&md->map_lock); 1065 atomic_set(&md->holders, 1); 1066 atomic_set(&md->open_count, 0); 1067 atomic_set(&md->event_nr, 0); 1068 atomic_set(&md->uevent_seq, 0); 1069 INIT_LIST_HEAD(&md->uevent_list); 1070 spin_lock_init(&md->uevent_lock); 1071 1072 md->queue = blk_alloc_queue(GFP_KERNEL); 1073 if (!md->queue) 1074 goto bad_queue; 1075 1076 md->queue->queuedata = md; 1077 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1078 md->queue->backing_dev_info.congested_data = md; 1079 blk_queue_make_request(md->queue, dm_request); 1080 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1081 md->queue->unplug_fn = dm_unplug_all; 1082 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1083 1084 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 1085 if (!md->io_pool) 1086 goto bad_io_pool; 1087 1088 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); 1089 if (!md->tio_pool) 1090 goto bad_tio_pool; 1091 1092 md->bs = bioset_create(16, 16); 1093 if (!md->bs) 1094 goto bad_no_bioset; 1095 1096 md->disk = alloc_disk(1); 1097 if (!md->disk) 1098 goto bad_disk; 1099 1100 atomic_set(&md->pending, 0); 1101 init_waitqueue_head(&md->wait); 1102 init_waitqueue_head(&md->eventq); 1103 1104 md->disk->major = _major; 1105 md->disk->first_minor = minor; 1106 md->disk->fops = &dm_blk_dops; 1107 md->disk->queue = md->queue; 1108 md->disk->private_data = md; 1109 sprintf(md->disk->disk_name, "dm-%d", minor); 1110 add_disk(md->disk); 1111 format_dev_t(md->name, MKDEV(_major, minor)); 1112 1113 md->wq = create_singlethread_workqueue("kdmflush"); 1114 if (!md->wq) 1115 goto bad_thread; 1116 1117 /* Populate the mapping, nobody knows we exist yet */ 1118 spin_lock(&_minor_lock); 1119 old_md = idr_replace(&_minor_idr, md, minor); 1120 spin_unlock(&_minor_lock); 1121 1122 BUG_ON(old_md != MINOR_ALLOCED); 1123 1124 return md; 1125 1126bad_thread: 1127 put_disk(md->disk); 1128bad_disk: 1129 bioset_free(md->bs); 1130bad_no_bioset: 1131 mempool_destroy(md->tio_pool); 1132bad_tio_pool: 1133 mempool_destroy(md->io_pool); 1134bad_io_pool: 1135 blk_cleanup_queue(md->queue); 1136bad_queue: 1137 free_minor(minor); 1138bad_minor: 1139 module_put(THIS_MODULE); 1140bad_module_get: 1141 kfree(md); 1142 return NULL; 1143} 1144 1145static void unlock_fs(struct mapped_device *md); 1146 1147static void free_dev(struct mapped_device *md) 1148{ 1149 int minor = md->disk->first_minor; 1150 1151 if (md->suspended_bdev) { 1152 unlock_fs(md); 1153 bdput(md->suspended_bdev); 1154 } 1155 destroy_workqueue(md->wq); 1156 mempool_destroy(md->tio_pool); 1157 mempool_destroy(md->io_pool); 1158 bioset_free(md->bs); 1159 del_gendisk(md->disk); 1160 free_minor(minor); 1161 1162 spin_lock(&_minor_lock); 1163 md->disk->private_data = NULL; 1164 spin_unlock(&_minor_lock); 1165 1166 put_disk(md->disk); 1167 blk_cleanup_queue(md->queue); 1168 module_put(THIS_MODULE); 1169 kfree(md); 1170} 1171 1172/* 1173 * Bind a table to the device. 1174 */ 1175static void event_callback(void *context) 1176{ 1177 unsigned long flags; 1178 LIST_HEAD(uevents); 1179 struct mapped_device *md = (struct mapped_device *) context; 1180 1181 spin_lock_irqsave(&md->uevent_lock, flags); 1182 list_splice_init(&md->uevent_list, &uevents); 1183 spin_unlock_irqrestore(&md->uevent_lock, flags); 1184 1185 dm_send_uevents(&uevents, &md->disk->dev.kobj); 1186 1187 atomic_inc(&md->event_nr); 1188 wake_up(&md->eventq); 1189} 1190 1191static void __set_size(struct mapped_device *md, sector_t size) 1192{ 1193 set_capacity(md->disk, size); 1194 1195 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex); 1196 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 1197 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex); 1198} 1199 1200static int __bind(struct mapped_device *md, struct dm_table *t) 1201{ 1202 struct request_queue *q = md->queue; 1203 sector_t size; 1204 1205 size = dm_table_get_size(t); 1206 1207 /* 1208 * Wipe any geometry if the size of the table changed. 1209 */ 1210 if (size != get_capacity(md->disk)) 1211 memset(&md->geometry, 0, sizeof(md->geometry)); 1212 1213 if (md->suspended_bdev) 1214 __set_size(md, size); 1215 if (size == 0) 1216 return 0; 1217 1218 dm_table_get(t); 1219 dm_table_event_callback(t, event_callback, md); 1220 1221 write_lock(&md->map_lock); 1222 md->map = t; 1223 dm_table_set_restrictions(t, q); 1224 write_unlock(&md->map_lock); 1225 1226 return 0; 1227} 1228 1229static void __unbind(struct mapped_device *md) 1230{ 1231 struct dm_table *map = md->map; 1232 1233 if (!map) 1234 return; 1235 1236 dm_table_event_callback(map, NULL, NULL); 1237 write_lock(&md->map_lock); 1238 md->map = NULL; 1239 write_unlock(&md->map_lock); 1240 dm_table_put(map); 1241} 1242 1243/* 1244 * Constructor for a new device. 1245 */ 1246int dm_create(int minor, struct mapped_device **result) 1247{ 1248 struct mapped_device *md; 1249 1250 md = alloc_dev(minor); 1251 if (!md) 1252 return -ENXIO; 1253 1254 *result = md; 1255 return 0; 1256} 1257 1258static struct mapped_device *dm_find_md(dev_t dev) 1259{ 1260 struct mapped_device *md; 1261 unsigned minor = MINOR(dev); 1262 1263 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 1264 return NULL; 1265 1266 spin_lock(&_minor_lock); 1267 1268 md = idr_find(&_minor_idr, minor); 1269 if (md && (md == MINOR_ALLOCED || 1270 (dm_disk(md)->first_minor != minor) || 1271 test_bit(DMF_FREEING, &md->flags))) { 1272 md = NULL; 1273 goto out; 1274 } 1275 1276out: 1277 spin_unlock(&_minor_lock); 1278 1279 return md; 1280} 1281 1282struct mapped_device *dm_get_md(dev_t dev) 1283{ 1284 struct mapped_device *md = dm_find_md(dev); 1285 1286 if (md) 1287 dm_get(md); 1288 1289 return md; 1290} 1291 1292void *dm_get_mdptr(struct mapped_device *md) 1293{ 1294 return md->interface_ptr; 1295} 1296 1297void dm_set_mdptr(struct mapped_device *md, void *ptr) 1298{ 1299 md->interface_ptr = ptr; 1300} 1301 1302void dm_get(struct mapped_device *md) 1303{ 1304 atomic_inc(&md->holders); 1305} 1306 1307const char *dm_device_name(struct mapped_device *md) 1308{ 1309 return md->name; 1310} 1311EXPORT_SYMBOL_GPL(dm_device_name); 1312 1313void dm_put(struct mapped_device *md) 1314{ 1315 struct dm_table *map; 1316 1317 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 1318 1319 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 1320 map = dm_get_table(md); 1321 idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); 1322 set_bit(DMF_FREEING, &md->flags); 1323 spin_unlock(&_minor_lock); 1324 if (!dm_suspended(md)) { 1325 dm_table_presuspend_targets(map); 1326 dm_table_postsuspend_targets(map); 1327 } 1328 __unbind(md); 1329 dm_table_put(map); 1330 free_dev(md); 1331 } 1332} 1333EXPORT_SYMBOL_GPL(dm_put); 1334 1335static int dm_wait_for_completion(struct mapped_device *md) 1336{ 1337 int r = 0; 1338 1339 while (1) { 1340 set_current_state(TASK_INTERRUPTIBLE); 1341 1342 smp_mb(); 1343 if (!atomic_read(&md->pending)) 1344 break; 1345 1346 if (signal_pending(current)) { 1347 r = -EINTR; 1348 break; 1349 } 1350 1351 io_schedule(); 1352 } 1353 set_current_state(TASK_RUNNING); 1354 1355 return r; 1356} 1357 1358/* 1359 * Process the deferred bios 1360 */ 1361static void __flush_deferred_io(struct mapped_device *md) 1362{ 1363 struct bio *c; 1364 1365 while ((c = bio_list_pop(&md->deferred))) { 1366 if (__split_bio(md, c)) 1367 bio_io_error(c); 1368 } 1369 1370 clear_bit(DMF_BLOCK_IO, &md->flags); 1371} 1372 1373static void __merge_pushback_list(struct mapped_device *md) 1374{ 1375 unsigned long flags; 1376 1377 spin_lock_irqsave(&md->pushback_lock, flags); 1378 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1379 bio_list_merge_head(&md->deferred, &md->pushback); 1380 bio_list_init(&md->pushback); 1381 spin_unlock_irqrestore(&md->pushback_lock, flags); 1382} 1383 1384static void dm_wq_work(struct work_struct *work) 1385{ 1386 struct dm_wq_req *req = container_of(work, struct dm_wq_req, work); 1387 struct mapped_device *md = req->md; 1388 1389 down_write(&md->io_lock); 1390 switch (req->type) { 1391 case DM_WQ_FLUSH_ALL: 1392 __merge_pushback_list(md); 1393 /* pass through */ 1394 case DM_WQ_FLUSH_DEFERRED: 1395 __flush_deferred_io(md); 1396 break; 1397 default: 1398 DMERR("dm_wq_work: unrecognised work type %d", req->type); 1399 BUG(); 1400 } 1401 up_write(&md->io_lock); 1402} 1403 1404static void dm_wq_queue(struct mapped_device *md, int type, void *context, 1405 struct dm_wq_req *req) 1406{ 1407 req->type = type; 1408 req->md = md; 1409 req->context = context; 1410 INIT_WORK(&req->work, dm_wq_work); 1411 queue_work(md->wq, &req->work); 1412} 1413 1414static void dm_queue_flush(struct mapped_device *md, int type, void *context) 1415{ 1416 struct dm_wq_req req; 1417 1418 dm_wq_queue(md, type, context, &req); 1419 flush_workqueue(md->wq); 1420} 1421 1422/* 1423 * Swap in a new table (destroying old one). 1424 */ 1425int dm_swap_table(struct mapped_device *md, struct dm_table *table) 1426{ 1427 int r = -EINVAL; 1428 1429 mutex_lock(&md->suspend_lock); 1430 1431 /* device must be suspended */ 1432 if (!dm_suspended(md)) 1433 goto out; 1434 1435 /* without bdev, the device size cannot be changed */ 1436 if (!md->suspended_bdev) 1437 if (get_capacity(md->disk) != dm_table_get_size(table)) 1438 goto out; 1439 1440 __unbind(md); 1441 r = __bind(md, table); 1442 1443out: 1444 mutex_unlock(&md->suspend_lock); 1445 return r; 1446} 1447 1448/* 1449 * Functions to lock and unlock any filesystem running on the 1450 * device. 1451 */ 1452static int lock_fs(struct mapped_device *md) 1453{ 1454 int r; 1455 1456 WARN_ON(md->frozen_sb); 1457 1458 md->frozen_sb = freeze_bdev(md->suspended_bdev); 1459 if (IS_ERR(md->frozen_sb)) { 1460 r = PTR_ERR(md->frozen_sb); 1461 md->frozen_sb = NULL; 1462 return r; 1463 } 1464 1465 set_bit(DMF_FROZEN, &md->flags); 1466 1467 /* don't bdput right now, we don't want the bdev 1468 * to go away while it is locked. 1469 */ 1470 return 0; 1471} 1472 1473static void unlock_fs(struct mapped_device *md) 1474{ 1475 if (!test_bit(DMF_FROZEN, &md->flags)) 1476 return; 1477 1478 thaw_bdev(md->suspended_bdev, md->frozen_sb); 1479 md->frozen_sb = NULL; 1480 clear_bit(DMF_FROZEN, &md->flags); 1481} 1482 1483/* 1484 * We need to be able to change a mapping table under a mounted 1485 * filesystem. For example we might want to move some data in 1486 * the background. Before the table can be swapped with 1487 * dm_bind_table, dm_suspend must be called to flush any in 1488 * flight bios and ensure that any further io gets deferred. 1489 */ 1490int dm_suspend(struct mapped_device *md, unsigned suspend_flags) 1491{ 1492 struct dm_table *map = NULL; 1493 DECLARE_WAITQUEUE(wait, current); 1494 int r = 0; 1495 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; 1496 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; 1497 1498 mutex_lock(&md->suspend_lock); 1499 1500 if (dm_suspended(md)) { 1501 r = -EINVAL; 1502 goto out_unlock; 1503 } 1504 1505 map = dm_get_table(md); 1506 1507 /* 1508 * DMF_NOFLUSH_SUSPENDING must be set before presuspend. 1509 * This flag is cleared before dm_suspend returns. 1510 */ 1511 if (noflush) 1512 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); 1513 1514 /* This does not get reverted if there's an error later. */ 1515 dm_table_presuspend_targets(map); 1516 1517 /* bdget() can stall if the pending I/Os are not flushed */ 1518 if (!noflush) { 1519 md->suspended_bdev = bdget_disk(md->disk, 0); 1520 if (!md->suspended_bdev) { 1521 DMWARN("bdget failed in dm_suspend"); 1522 r = -ENOMEM; 1523 goto flush_and_out; 1524 } 1525 1526 /* 1527 * Flush I/O to the device. noflush supersedes do_lockfs, 1528 * because lock_fs() needs to flush I/Os. 1529 */ 1530 if (do_lockfs) { 1531 r = lock_fs(md); 1532 if (r) 1533 goto out; 1534 } 1535 } 1536 1537 /* 1538 * First we set the BLOCK_IO flag so no more ios will be mapped. 1539 */ 1540 down_write(&md->io_lock); 1541 set_bit(DMF_BLOCK_IO, &md->flags); 1542 1543 add_wait_queue(&md->wait, &wait); 1544 up_write(&md->io_lock); 1545 1546 /* unplug */ 1547 if (map) 1548 dm_table_unplug_all(map); 1549 1550 /* 1551 * Wait for the already-mapped ios to complete. 1552 */ 1553 r = dm_wait_for_completion(md); 1554 1555 down_write(&md->io_lock); 1556 remove_wait_queue(&md->wait, &wait); 1557 1558 if (noflush) 1559 __merge_pushback_list(md); 1560 up_write(&md->io_lock); 1561 1562 /* were we interrupted ? */ 1563 if (r < 0) { 1564 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); 1565 1566 unlock_fs(md); 1567 goto out; /* pushback list is already flushed, so skip flush */ 1568 } 1569 1570 dm_table_postsuspend_targets(map); 1571 1572 set_bit(DMF_SUSPENDED, &md->flags); 1573 1574flush_and_out: 1575 if (r && noflush) 1576 /* 1577 * Because there may be already I/Os in the pushback list, 1578 * flush them before return. 1579 */ 1580 dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL); 1581 1582out: 1583 if (r && md->suspended_bdev) { 1584 bdput(md->suspended_bdev); 1585 md->suspended_bdev = NULL; 1586 } 1587 1588 dm_table_put(map); 1589 1590out_unlock: 1591 mutex_unlock(&md->suspend_lock); 1592 return r; 1593} 1594 1595int dm_resume(struct mapped_device *md) 1596{ 1597 int r = -EINVAL; 1598 struct dm_table *map = NULL; 1599 1600 mutex_lock(&md->suspend_lock); 1601 if (!dm_suspended(md)) 1602 goto out; 1603 1604 map = dm_get_table(md); 1605 if (!map || !dm_table_get_size(map)) 1606 goto out; 1607 1608 r = dm_table_resume_targets(map); 1609 if (r) 1610 goto out; 1611 1612 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); 1613 1614 unlock_fs(md); 1615 1616 if (md->suspended_bdev) { 1617 bdput(md->suspended_bdev); 1618 md->suspended_bdev = NULL; 1619 } 1620 1621 clear_bit(DMF_SUSPENDED, &md->flags); 1622 1623 dm_table_unplug_all(map); 1624 1625 dm_kobject_uevent(md); 1626 1627 r = 0; 1628 1629out: 1630 dm_table_put(map); 1631 mutex_unlock(&md->suspend_lock); 1632 1633 return r; 1634} 1635 1636/*----------------------------------------------------------------- 1637 * Event notification. 1638 *---------------------------------------------------------------*/ 1639void dm_kobject_uevent(struct mapped_device *md) 1640{ 1641 kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE); 1642} 1643 1644uint32_t dm_next_uevent_seq(struct mapped_device *md) 1645{ 1646 return atomic_add_return(1, &md->uevent_seq); 1647} 1648 1649uint32_t dm_get_event_nr(struct mapped_device *md) 1650{ 1651 return atomic_read(&md->event_nr); 1652} 1653 1654int dm_wait_event(struct mapped_device *md, int event_nr) 1655{ 1656 return wait_event_interruptible(md->eventq, 1657 (event_nr != atomic_read(&md->event_nr))); 1658} 1659 1660void dm_uevent_add(struct mapped_device *md, struct list_head *elist) 1661{ 1662 unsigned long flags; 1663 1664 spin_lock_irqsave(&md->uevent_lock, flags); 1665 list_add(elist, &md->uevent_list); 1666 spin_unlock_irqrestore(&md->uevent_lock, flags); 1667} 1668 1669/* 1670 * The gendisk is only valid as long as you have a reference 1671 * count on 'md'. 1672 */ 1673struct gendisk *dm_disk(struct mapped_device *md) 1674{ 1675 return md->disk; 1676} 1677 1678int dm_suspended(struct mapped_device *md) 1679{ 1680 return test_bit(DMF_SUSPENDED, &md->flags); 1681} 1682 1683int dm_noflush_suspending(struct dm_target *ti) 1684{ 1685 struct mapped_device *md = dm_table_get_md(ti->table); 1686 int r = __noflush_suspending(md); 1687 1688 dm_put(md); 1689 1690 return r; 1691} 1692EXPORT_SYMBOL_GPL(dm_noflush_suspending); 1693 1694static struct block_device_operations dm_blk_dops = { 1695 .open = dm_blk_open, 1696 .release = dm_blk_close, 1697 .ioctl = dm_blk_ioctl, 1698 .getgeo = dm_blk_getgeo, 1699 .owner = THIS_MODULE 1700}; 1701 1702EXPORT_SYMBOL(dm_get_mapinfo); 1703 1704/* 1705 * module hooks 1706 */ 1707module_init(dm_init); 1708module_exit(dm_exit); 1709 1710module_param(major, uint, 0); 1711MODULE_PARM_DESC(major, "The major number of the device mapper"); 1712MODULE_DESCRIPTION(DM_NAME " driver"); 1713MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1714MODULE_LICENSE("GPL");