at v3.10-rc1 559 lines 14 kB view raw
1/* 2 * multipath.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 5 * 6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 7 * 8 * MULTIPATH management functions. 9 * 10 * derived from raid1.c. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * You should have received a copy of the GNU General Public License 18 * (for example /usr/src/linux/COPYING); if not, write to the Free 19 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 */ 21 22#include <linux/blkdev.h> 23#include <linux/module.h> 24#include <linux/raid/md_u.h> 25#include <linux/seq_file.h> 26#include <linux/slab.h> 27#include "md.h" 28#include "multipath.h" 29 30#define MAX_WORK_PER_DISK 128 31 32#define NR_RESERVED_BUFS 32 33 34 35static int multipath_map (struct mpconf *conf) 36{ 37 int i, disks = conf->raid_disks; 38 39 /* 40 * Later we do read balancing on the read side 41 * now we use the first available disk. 42 */ 43 44 rcu_read_lock(); 45 for (i = 0; i < disks; i++) { 46 struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); 47 if (rdev && test_bit(In_sync, &rdev->flags)) { 48 atomic_inc(&rdev->nr_pending); 49 rcu_read_unlock(); 50 return i; 51 } 52 } 53 rcu_read_unlock(); 54 55 printk(KERN_ERR "multipath_map(): no more operational IO paths?\n"); 56 return (-1); 57} 58 59static void multipath_reschedule_retry (struct multipath_bh *mp_bh) 60{ 61 unsigned long flags; 62 struct mddev *mddev = mp_bh->mddev; 63 struct mpconf *conf = mddev->private; 64 65 spin_lock_irqsave(&conf->device_lock, flags); 66 list_add(&mp_bh->retry_list, &conf->retry_list); 67 spin_unlock_irqrestore(&conf->device_lock, flags); 68 md_wakeup_thread(mddev->thread); 69} 70 71 72/* 73 * multipath_end_bh_io() is called when we have finished servicing a multipathed 74 * operation and are ready to return a success/failure code to the buffer 75 * cache layer. 76 */ 77static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) 78{ 79 struct bio *bio = mp_bh->master_bio; 80 struct mpconf *conf = mp_bh->mddev->private; 81 82 bio_endio(bio, err); 83 mempool_free(mp_bh, conf->pool); 84} 85 86static void multipath_end_request(struct bio *bio, int error) 87{ 88 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 89 struct multipath_bh *mp_bh = bio->bi_private; 90 struct mpconf *conf = mp_bh->mddev->private; 91 struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; 92 93 if (uptodate) 94 multipath_end_bh_io(mp_bh, 0); 95 else if (!(bio->bi_rw & REQ_RAHEAD)) { 96 /* 97 * oops, IO error: 98 */ 99 char b[BDEVNAME_SIZE]; 100 md_error (mp_bh->mddev, rdev); 101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 102 bdevname(rdev->bdev,b), 103 (unsigned long long)bio->bi_sector); 104 multipath_reschedule_retry(mp_bh); 105 } else 106 multipath_end_bh_io(mp_bh, error); 107 rdev_dec_pending(rdev, conf->mddev); 108} 109 110static void multipath_make_request(struct mddev *mddev, struct bio * bio) 111{ 112 struct mpconf *conf = mddev->private; 113 struct multipath_bh * mp_bh; 114 struct multipath_info *multipath; 115 116 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 117 md_flush_request(mddev, bio); 118 return; 119 } 120 121 mp_bh = mempool_alloc(conf->pool, GFP_NOIO); 122 123 mp_bh->master_bio = bio; 124 mp_bh->mddev = mddev; 125 126 mp_bh->path = multipath_map(conf); 127 if (mp_bh->path < 0) { 128 bio_endio(bio, -EIO); 129 mempool_free(mp_bh, conf->pool); 130 return; 131 } 132 multipath = conf->multipaths + mp_bh->path; 133 134 mp_bh->bio = *bio; 135 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 136 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; 138 mp_bh->bio.bi_end_io = multipath_end_request; 139 mp_bh->bio.bi_private = mp_bh; 140 generic_make_request(&mp_bh->bio); 141 return; 142} 143 144static void multipath_status (struct seq_file *seq, struct mddev *mddev) 145{ 146 struct mpconf *conf = mddev->private; 147 int i; 148 149 seq_printf (seq, " [%d/%d] [", conf->raid_disks, 150 conf->raid_disks - mddev->degraded); 151 for (i = 0; i < conf->raid_disks; i++) 152 seq_printf (seq, "%s", 153 conf->multipaths[i].rdev && 154 test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_"); 155 seq_printf (seq, "]"); 156} 157 158static int multipath_congested(void *data, int bits) 159{ 160 struct mddev *mddev = data; 161 struct mpconf *conf = mddev->private; 162 int i, ret = 0; 163 164 if (mddev_congested(mddev, bits)) 165 return 1; 166 167 rcu_read_lock(); 168 for (i = 0; i < mddev->raid_disks ; i++) { 169 struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); 170 if (rdev && !test_bit(Faulty, &rdev->flags)) { 171 struct request_queue *q = bdev_get_queue(rdev->bdev); 172 173 ret |= bdi_congested(&q->backing_dev_info, bits); 174 /* Just like multipath_map, we just check the 175 * first available device 176 */ 177 break; 178 } 179 } 180 rcu_read_unlock(); 181 return ret; 182} 183 184/* 185 * Careful, this can execute in IRQ contexts as well! 186 */ 187static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) 188{ 189 struct mpconf *conf = mddev->private; 190 char b[BDEVNAME_SIZE]; 191 192 if (conf->raid_disks - mddev->degraded <= 1) { 193 /* 194 * Uh oh, we can do nothing if this is our last path, but 195 * first check if this is a queued request for a device 196 * which has just failed. 197 */ 198 printk(KERN_ALERT 199 "multipath: only one IO path left and IO error.\n"); 200 /* leave it active... it's all we have */ 201 return; 202 } 203 /* 204 * Mark disk as unusable 205 */ 206 if (test_and_clear_bit(In_sync, &rdev->flags)) { 207 unsigned long flags; 208 spin_lock_irqsave(&conf->device_lock, flags); 209 mddev->degraded++; 210 spin_unlock_irqrestore(&conf->device_lock, flags); 211 } 212 set_bit(Faulty, &rdev->flags); 213 set_bit(MD_CHANGE_DEVS, &mddev->flags); 214 printk(KERN_ALERT "multipath: IO failure on %s," 215 " disabling IO path.\n" 216 "multipath: Operation continuing" 217 " on %d IO paths.\n", 218 bdevname(rdev->bdev, b), 219 conf->raid_disks - mddev->degraded); 220} 221 222static void print_multipath_conf (struct mpconf *conf) 223{ 224 int i; 225 struct multipath_info *tmp; 226 227 printk("MULTIPATH conf printout:\n"); 228 if (!conf) { 229 printk("(conf==NULL)\n"); 230 return; 231 } 232 printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 233 conf->raid_disks); 234 235 for (i = 0; i < conf->raid_disks; i++) { 236 char b[BDEVNAME_SIZE]; 237 tmp = conf->multipaths + i; 238 if (tmp->rdev) 239 printk(" disk%d, o:%d, dev:%s\n", 240 i,!test_bit(Faulty, &tmp->rdev->flags), 241 bdevname(tmp->rdev->bdev,b)); 242 } 243} 244 245 246static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) 247{ 248 struct mpconf *conf = mddev->private; 249 struct request_queue *q; 250 int err = -EEXIST; 251 int path; 252 struct multipath_info *p; 253 int first = 0; 254 int last = mddev->raid_disks - 1; 255 256 if (rdev->raid_disk >= 0) 257 first = last = rdev->raid_disk; 258 259 print_multipath_conf(conf); 260 261 for (path = first; path <= last; path++) 262 if ((p=conf->multipaths+path)->rdev == NULL) { 263 q = rdev->bdev->bd_disk->queue; 264 disk_stack_limits(mddev->gendisk, rdev->bdev, 265 rdev->data_offset << 9); 266 267 /* as we don't honour merge_bvec_fn, we must never risk 268 * violating it, so limit ->max_segments to one, lying 269 * within a single page. 270 * (Note: it is very unlikely that a device with 271 * merge_bvec_fn will be involved in multipath.) 272 */ 273 if (q->merge_bvec_fn) { 274 blk_queue_max_segments(mddev->queue, 1); 275 blk_queue_segment_boundary(mddev->queue, 276 PAGE_CACHE_SIZE - 1); 277 } 278 279 spin_lock_irq(&conf->device_lock); 280 mddev->degraded--; 281 rdev->raid_disk = path; 282 set_bit(In_sync, &rdev->flags); 283 spin_unlock_irq(&conf->device_lock); 284 rcu_assign_pointer(p->rdev, rdev); 285 err = 0; 286 md_integrity_add_rdev(rdev, mddev); 287 break; 288 } 289 290 print_multipath_conf(conf); 291 292 return err; 293} 294 295static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 296{ 297 struct mpconf *conf = mddev->private; 298 int err = 0; 299 int number = rdev->raid_disk; 300 struct multipath_info *p = conf->multipaths + number; 301 302 print_multipath_conf(conf); 303 304 if (rdev == p->rdev) { 305 if (test_bit(In_sync, &rdev->flags) || 306 atomic_read(&rdev->nr_pending)) { 307 printk(KERN_ERR "hot-remove-disk, slot %d is identified" 308 " but is still operational!\n", number); 309 err = -EBUSY; 310 goto abort; 311 } 312 p->rdev = NULL; 313 synchronize_rcu(); 314 if (atomic_read(&rdev->nr_pending)) { 315 /* lost the race, try later */ 316 err = -EBUSY; 317 p->rdev = rdev; 318 goto abort; 319 } 320 err = md_integrity_register(mddev); 321 } 322abort: 323 324 print_multipath_conf(conf); 325 return err; 326} 327 328 329 330/* 331 * This is a kernel thread which: 332 * 333 * 1. Retries failed read operations on working multipaths. 334 * 2. Updates the raid superblock when problems encounter. 335 * 3. Performs writes following reads for array syncronising. 336 */ 337 338static void multipathd(struct md_thread *thread) 339{ 340 struct mddev *mddev = thread->mddev; 341 struct multipath_bh *mp_bh; 342 struct bio *bio; 343 unsigned long flags; 344 struct mpconf *conf = mddev->private; 345 struct list_head *head = &conf->retry_list; 346 347 md_check_recovery(mddev); 348 for (;;) { 349 char b[BDEVNAME_SIZE]; 350 spin_lock_irqsave(&conf->device_lock, flags); 351 if (list_empty(head)) 352 break; 353 mp_bh = list_entry(head->prev, struct multipath_bh, retry_list); 354 list_del(head->prev); 355 spin_unlock_irqrestore(&conf->device_lock, flags); 356 357 bio = &mp_bh->bio; 358 bio->bi_sector = mp_bh->master_bio->bi_sector; 359 360 if ((mp_bh->path = multipath_map (conf))<0) { 361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read" 362 " error for block %llu\n", 363 bdevname(bio->bi_bdev,b), 364 (unsigned long long)bio->bi_sector); 365 multipath_end_bh_io(mp_bh, -EIO); 366 } else { 367 printk(KERN_ERR "multipath: %s: redirecting sector %llu" 368 " to another IO path\n", 369 bdevname(bio->bi_bdev,b), 370 (unsigned long long)bio->bi_sector); 371 *bio = *(mp_bh->master_bio); 372 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 373 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 374 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 375 bio->bi_end_io = multipath_end_request; 376 bio->bi_private = mp_bh; 377 generic_make_request(bio); 378 } 379 } 380 spin_unlock_irqrestore(&conf->device_lock, flags); 381} 382 383static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks) 384{ 385 WARN_ONCE(sectors || raid_disks, 386 "%s does not support generic reshape\n", __func__); 387 388 return mddev->dev_sectors; 389} 390 391static int multipath_run (struct mddev *mddev) 392{ 393 struct mpconf *conf; 394 int disk_idx; 395 struct multipath_info *disk; 396 struct md_rdev *rdev; 397 int working_disks; 398 399 if (md_check_no_bitmap(mddev)) 400 return -EINVAL; 401 402 if (mddev->level != LEVEL_MULTIPATH) { 403 printk("multipath: %s: raid level not set to multipath IO (%d)\n", 404 mdname(mddev), mddev->level); 405 goto out; 406 } 407 /* 408 * copy the already verified devices into our private MULTIPATH 409 * bookkeeping area. [whatever we allocate in multipath_run(), 410 * should be freed in multipath_stop()] 411 */ 412 413 conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL); 414 mddev->private = conf; 415 if (!conf) { 416 printk(KERN_ERR 417 "multipath: couldn't allocate memory for %s\n", 418 mdname(mddev)); 419 goto out; 420 } 421 422 conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, 423 GFP_KERNEL); 424 if (!conf->multipaths) { 425 printk(KERN_ERR 426 "multipath: couldn't allocate memory for %s\n", 427 mdname(mddev)); 428 goto out_free_conf; 429 } 430 431 working_disks = 0; 432 rdev_for_each(rdev, mddev) { 433 disk_idx = rdev->raid_disk; 434 if (disk_idx < 0 || 435 disk_idx >= mddev->raid_disks) 436 continue; 437 438 disk = conf->multipaths + disk_idx; 439 disk->rdev = rdev; 440 disk_stack_limits(mddev->gendisk, rdev->bdev, 441 rdev->data_offset << 9); 442 443 /* as we don't honour merge_bvec_fn, we must never risk 444 * violating it, not that we ever expect a device with 445 * a merge_bvec_fn to be involved in multipath */ 446 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { 447 blk_queue_max_segments(mddev->queue, 1); 448 blk_queue_segment_boundary(mddev->queue, 449 PAGE_CACHE_SIZE - 1); 450 } 451 452 if (!test_bit(Faulty, &rdev->flags)) 453 working_disks++; 454 } 455 456 conf->raid_disks = mddev->raid_disks; 457 conf->mddev = mddev; 458 spin_lock_init(&conf->device_lock); 459 INIT_LIST_HEAD(&conf->retry_list); 460 461 if (!working_disks) { 462 printk(KERN_ERR "multipath: no operational IO paths for %s\n", 463 mdname(mddev)); 464 goto out_free_conf; 465 } 466 mddev->degraded = conf->raid_disks - working_disks; 467 468 conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, 469 sizeof(struct multipath_bh)); 470 if (conf->pool == NULL) { 471 printk(KERN_ERR 472 "multipath: couldn't allocate memory for %s\n", 473 mdname(mddev)); 474 goto out_free_conf; 475 } 476 477 { 478 mddev->thread = md_register_thread(multipathd, mddev, 479 "multipath"); 480 if (!mddev->thread) { 481 printk(KERN_ERR "multipath: couldn't allocate thread" 482 " for %s\n", mdname(mddev)); 483 goto out_free_conf; 484 } 485 } 486 487 printk(KERN_INFO 488 "multipath: array %s active with %d out of %d IO paths\n", 489 mdname(mddev), conf->raid_disks - mddev->degraded, 490 mddev->raid_disks); 491 /* 492 * Ok, everything is just fine now 493 */ 494 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); 495 496 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 497 mddev->queue->backing_dev_info.congested_data = mddev; 498 499 if (md_integrity_register(mddev)) 500 goto out_free_conf; 501 502 return 0; 503 504out_free_conf: 505 if (conf->pool) 506 mempool_destroy(conf->pool); 507 kfree(conf->multipaths); 508 kfree(conf); 509 mddev->private = NULL; 510out: 511 return -EIO; 512} 513 514 515static int multipath_stop (struct mddev *mddev) 516{ 517 struct mpconf *conf = mddev->private; 518 519 md_unregister_thread(&mddev->thread); 520 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 521 mempool_destroy(conf->pool); 522 kfree(conf->multipaths); 523 kfree(conf); 524 mddev->private = NULL; 525 return 0; 526} 527 528static struct md_personality multipath_personality = 529{ 530 .name = "multipath", 531 .level = LEVEL_MULTIPATH, 532 .owner = THIS_MODULE, 533 .make_request = multipath_make_request, 534 .run = multipath_run, 535 .stop = multipath_stop, 536 .status = multipath_status, 537 .error_handler = multipath_error, 538 .hot_add_disk = multipath_add_disk, 539 .hot_remove_disk= multipath_remove_disk, 540 .size = multipath_size, 541}; 542 543static int __init multipath_init (void) 544{ 545 return register_md_personality (&multipath_personality); 546} 547 548static void __exit multipath_exit (void) 549{ 550 unregister_md_personality (&multipath_personality); 551} 552 553module_init(multipath_init); 554module_exit(multipath_exit); 555MODULE_LICENSE("GPL"); 556MODULE_DESCRIPTION("simple multi-path personality for MD"); 557MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ 558MODULE_ALIAS("md-multipath"); 559MODULE_ALIAS("md-level--4");