at v2.6.26-rc3 4919 lines 141 kB view raw
1/* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21/* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46#include <linux/module.h> 47#include <linux/slab.h> 48#include <linux/highmem.h> 49#include <linux/bitops.h> 50#include <linux/kthread.h> 51#include <asm/atomic.h> 52#include "raid6.h" 53 54#include <linux/raid/bitmap.h> 55#include <linux/async_tx.h> 56 57/* 58 * Stripe cache 59 */ 60 61#define NR_STRIPES 256 62#define STRIPE_SIZE PAGE_SIZE 63#define STRIPE_SHIFT (PAGE_SHIFT - 9) 64#define STRIPE_SECTORS (STRIPE_SIZE>>9) 65#define IO_THRESHOLD 1 66#define BYPASS_THRESHOLD 1 67#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 68#define HASH_MASK (NR_HASH - 1) 69 70#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 71 72/* bio's attached to a stripe+device for I/O are linked together in bi_sector 73 * order without overlap. There may be several bio's per stripe+device, and 74 * a bio could span several devices. 75 * When walking this list for a particular stripe+device, we must never proceed 76 * beyond a bio that extends past this device, as the next bio might no longer 77 * be valid. 78 * This macro is used to determine the 'next' bio in the list, given the sector 79 * of the current stripe+device 80 */ 81#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 82/* 83 * The following can be used to debug the driver 84 */ 85#define RAID5_PARANOIA 1 86#if RAID5_PARANOIA && defined(CONFIG_SMP) 87# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 88#else 89# define CHECK_DEVLOCK() 90#endif 91 92#ifdef DEBUG 93#define inline 94#define __inline__ 95#endif 96 97#if !RAID6_USE_EMPTY_ZERO_PAGE 98/* In .bss so it's zeroed */ 99const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); 100#endif 101 102static inline int raid6_next_disk(int disk, int raid_disks) 103{ 104 disk++; 105 return (disk < raid_disks) ? disk : 0; 106} 107 108static void return_io(struct bio *return_bi) 109{ 110 struct bio *bi = return_bi; 111 while (bi) { 112 113 return_bi = bi->bi_next; 114 bi->bi_next = NULL; 115 bi->bi_size = 0; 116 bi->bi_end_io(bi, 117 test_bit(BIO_UPTODATE, &bi->bi_flags) 118 ? 0 : -EIO); 119 bi = return_bi; 120 } 121} 122 123static void print_raid5_conf (raid5_conf_t *conf); 124 125static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 126{ 127 if (atomic_dec_and_test(&sh->count)) { 128 BUG_ON(!list_empty(&sh->lru)); 129 BUG_ON(atomic_read(&conf->active_stripes)==0); 130 if (test_bit(STRIPE_HANDLE, &sh->state)) { 131 if (test_bit(STRIPE_DELAYED, &sh->state)) { 132 list_add_tail(&sh->lru, &conf->delayed_list); 133 blk_plug_device(conf->mddev->queue); 134 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 135 sh->bm_seq - conf->seq_write > 0) { 136 list_add_tail(&sh->lru, &conf->bitmap_list); 137 blk_plug_device(conf->mddev->queue); 138 } else { 139 clear_bit(STRIPE_BIT_DELAY, &sh->state); 140 list_add_tail(&sh->lru, &conf->handle_list); 141 } 142 md_wakeup_thread(conf->mddev->thread); 143 } else { 144 BUG_ON(sh->ops.pending); 145 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 146 atomic_dec(&conf->preread_active_stripes); 147 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 148 md_wakeup_thread(conf->mddev->thread); 149 } 150 atomic_dec(&conf->active_stripes); 151 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 152 list_add_tail(&sh->lru, &conf->inactive_list); 153 wake_up(&conf->wait_for_stripe); 154 if (conf->retry_read_aligned) 155 md_wakeup_thread(conf->mddev->thread); 156 } 157 } 158 } 159} 160static void release_stripe(struct stripe_head *sh) 161{ 162 raid5_conf_t *conf = sh->raid_conf; 163 unsigned long flags; 164 165 spin_lock_irqsave(&conf->device_lock, flags); 166 __release_stripe(conf, sh); 167 spin_unlock_irqrestore(&conf->device_lock, flags); 168} 169 170static inline void remove_hash(struct stripe_head *sh) 171{ 172 pr_debug("remove_hash(), stripe %llu\n", 173 (unsigned long long)sh->sector); 174 175 hlist_del_init(&sh->hash); 176} 177 178static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 179{ 180 struct hlist_head *hp = stripe_hash(conf, sh->sector); 181 182 pr_debug("insert_hash(), stripe %llu\n", 183 (unsigned long long)sh->sector); 184 185 CHECK_DEVLOCK(); 186 hlist_add_head(&sh->hash, hp); 187} 188 189 190/* find an idle stripe, make sure it is unhashed, and return it. */ 191static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 192{ 193 struct stripe_head *sh = NULL; 194 struct list_head *first; 195 196 CHECK_DEVLOCK(); 197 if (list_empty(&conf->inactive_list)) 198 goto out; 199 first = conf->inactive_list.next; 200 sh = list_entry(first, struct stripe_head, lru); 201 list_del_init(first); 202 remove_hash(sh); 203 atomic_inc(&conf->active_stripes); 204out: 205 return sh; 206} 207 208static void shrink_buffers(struct stripe_head *sh, int num) 209{ 210 struct page *p; 211 int i; 212 213 for (i=0; i<num ; i++) { 214 p = sh->dev[i].page; 215 if (!p) 216 continue; 217 sh->dev[i].page = NULL; 218 put_page(p); 219 } 220} 221 222static int grow_buffers(struct stripe_head *sh, int num) 223{ 224 int i; 225 226 for (i=0; i<num; i++) { 227 struct page *page; 228 229 if (!(page = alloc_page(GFP_KERNEL))) { 230 return 1; 231 } 232 sh->dev[i].page = page; 233 } 234 return 0; 235} 236 237static void raid5_build_block (struct stripe_head *sh, int i); 238 239static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) 240{ 241 raid5_conf_t *conf = sh->raid_conf; 242 int i; 243 244 BUG_ON(atomic_read(&sh->count) != 0); 245 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 246 BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete); 247 248 CHECK_DEVLOCK(); 249 pr_debug("init_stripe called, stripe %llu\n", 250 (unsigned long long)sh->sector); 251 252 remove_hash(sh); 253 254 sh->sector = sector; 255 sh->pd_idx = pd_idx; 256 sh->state = 0; 257 258 sh->disks = disks; 259 260 for (i = sh->disks; i--; ) { 261 struct r5dev *dev = &sh->dev[i]; 262 263 if (dev->toread || dev->read || dev->towrite || dev->written || 264 test_bit(R5_LOCKED, &dev->flags)) { 265 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 266 (unsigned long long)sh->sector, i, dev->toread, 267 dev->read, dev->towrite, dev->written, 268 test_bit(R5_LOCKED, &dev->flags)); 269 BUG(); 270 } 271 dev->flags = 0; 272 raid5_build_block(sh, i); 273 } 274 insert_hash(conf, sh); 275} 276 277static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) 278{ 279 struct stripe_head *sh; 280 struct hlist_node *hn; 281 282 CHECK_DEVLOCK(); 283 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 284 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 285 if (sh->sector == sector && sh->disks == disks) 286 return sh; 287 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 288 return NULL; 289} 290 291static void unplug_slaves(mddev_t *mddev); 292static void raid5_unplug_device(struct request_queue *q); 293 294static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 295 int pd_idx, int noblock) 296{ 297 struct stripe_head *sh; 298 299 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 300 301 spin_lock_irq(&conf->device_lock); 302 303 do { 304 wait_event_lock_irq(conf->wait_for_stripe, 305 conf->quiesce == 0, 306 conf->device_lock, /* nothing */); 307 sh = __find_stripe(conf, sector, disks); 308 if (!sh) { 309 if (!conf->inactive_blocked) 310 sh = get_free_stripe(conf); 311 if (noblock && sh == NULL) 312 break; 313 if (!sh) { 314 conf->inactive_blocked = 1; 315 wait_event_lock_irq(conf->wait_for_stripe, 316 !list_empty(&conf->inactive_list) && 317 (atomic_read(&conf->active_stripes) 318 < (conf->max_nr_stripes *3/4) 319 || !conf->inactive_blocked), 320 conf->device_lock, 321 raid5_unplug_device(conf->mddev->queue) 322 ); 323 conf->inactive_blocked = 0; 324 } else 325 init_stripe(sh, sector, pd_idx, disks); 326 } else { 327 if (atomic_read(&sh->count)) { 328 BUG_ON(!list_empty(&sh->lru)); 329 } else { 330 if (!test_bit(STRIPE_HANDLE, &sh->state)) 331 atomic_inc(&conf->active_stripes); 332 if (list_empty(&sh->lru) && 333 !test_bit(STRIPE_EXPANDING, &sh->state)) 334 BUG(); 335 list_del_init(&sh->lru); 336 } 337 } 338 } while (sh == NULL); 339 340 if (sh) 341 atomic_inc(&sh->count); 342 343 spin_unlock_irq(&conf->device_lock); 344 return sh; 345} 346 347/* test_and_ack_op() ensures that we only dequeue an operation once */ 348#define test_and_ack_op(op, pend) \ 349do { \ 350 if (test_bit(op, &sh->ops.pending) && \ 351 !test_bit(op, &sh->ops.complete)) { \ 352 if (test_and_set_bit(op, &sh->ops.ack)) \ 353 clear_bit(op, &pend); \ 354 else \ 355 ack++; \ 356 } else \ 357 clear_bit(op, &pend); \ 358} while (0) 359 360/* find new work to run, do not resubmit work that is already 361 * in flight 362 */ 363static unsigned long get_stripe_work(struct stripe_head *sh) 364{ 365 unsigned long pending; 366 int ack = 0; 367 368 pending = sh->ops.pending; 369 370 test_and_ack_op(STRIPE_OP_BIOFILL, pending); 371 test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending); 372 test_and_ack_op(STRIPE_OP_PREXOR, pending); 373 test_and_ack_op(STRIPE_OP_BIODRAIN, pending); 374 test_and_ack_op(STRIPE_OP_POSTXOR, pending); 375 test_and_ack_op(STRIPE_OP_CHECK, pending); 376 if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending)) 377 ack++; 378 379 sh->ops.count -= ack; 380 if (unlikely(sh->ops.count < 0)) { 381 printk(KERN_ERR "pending: %#lx ops.pending: %#lx ops.ack: %#lx " 382 "ops.complete: %#lx\n", pending, sh->ops.pending, 383 sh->ops.ack, sh->ops.complete); 384 BUG(); 385 } 386 387 return pending; 388} 389 390static void 391raid5_end_read_request(struct bio *bi, int error); 392static void 393raid5_end_write_request(struct bio *bi, int error); 394 395static void ops_run_io(struct stripe_head *sh) 396{ 397 raid5_conf_t *conf = sh->raid_conf; 398 int i, disks = sh->disks; 399 400 might_sleep(); 401 402 set_bit(STRIPE_IO_STARTED, &sh->state); 403 for (i = disks; i--; ) { 404 int rw; 405 struct bio *bi; 406 mdk_rdev_t *rdev; 407 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 408 rw = WRITE; 409 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 410 rw = READ; 411 else 412 continue; 413 414 bi = &sh->dev[i].req; 415 416 bi->bi_rw = rw; 417 if (rw == WRITE) 418 bi->bi_end_io = raid5_end_write_request; 419 else 420 bi->bi_end_io = raid5_end_read_request; 421 422 rcu_read_lock(); 423 rdev = rcu_dereference(conf->disks[i].rdev); 424 if (rdev && test_bit(Faulty, &rdev->flags)) 425 rdev = NULL; 426 if (rdev) 427 atomic_inc(&rdev->nr_pending); 428 rcu_read_unlock(); 429 430 if (rdev) { 431 if (test_bit(STRIPE_SYNCING, &sh->state) || 432 test_bit(STRIPE_EXPAND_SOURCE, &sh->state) || 433 test_bit(STRIPE_EXPAND_READY, &sh->state)) 434 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 435 436 bi->bi_bdev = rdev->bdev; 437 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 438 __func__, (unsigned long long)sh->sector, 439 bi->bi_rw, i); 440 atomic_inc(&sh->count); 441 bi->bi_sector = sh->sector + rdev->data_offset; 442 bi->bi_flags = 1 << BIO_UPTODATE; 443 bi->bi_vcnt = 1; 444 bi->bi_max_vecs = 1; 445 bi->bi_idx = 0; 446 bi->bi_io_vec = &sh->dev[i].vec; 447 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 448 bi->bi_io_vec[0].bv_offset = 0; 449 bi->bi_size = STRIPE_SIZE; 450 bi->bi_next = NULL; 451 if (rw == WRITE && 452 test_bit(R5_ReWrite, &sh->dev[i].flags)) 453 atomic_add(STRIPE_SECTORS, 454 &rdev->corrected_errors); 455 generic_make_request(bi); 456 } else { 457 if (rw == WRITE) 458 set_bit(STRIPE_DEGRADED, &sh->state); 459 pr_debug("skip op %ld on disc %d for sector %llu\n", 460 bi->bi_rw, i, (unsigned long long)sh->sector); 461 clear_bit(R5_LOCKED, &sh->dev[i].flags); 462 set_bit(STRIPE_HANDLE, &sh->state); 463 } 464 } 465} 466 467static struct dma_async_tx_descriptor * 468async_copy_data(int frombio, struct bio *bio, struct page *page, 469 sector_t sector, struct dma_async_tx_descriptor *tx) 470{ 471 struct bio_vec *bvl; 472 struct page *bio_page; 473 int i; 474 int page_offset; 475 476 if (bio->bi_sector >= sector) 477 page_offset = (signed)(bio->bi_sector - sector) * 512; 478 else 479 page_offset = (signed)(sector - bio->bi_sector) * -512; 480 bio_for_each_segment(bvl, bio, i) { 481 int len = bio_iovec_idx(bio, i)->bv_len; 482 int clen; 483 int b_offset = 0; 484 485 if (page_offset < 0) { 486 b_offset = -page_offset; 487 page_offset += b_offset; 488 len -= b_offset; 489 } 490 491 if (len > 0 && page_offset + len > STRIPE_SIZE) 492 clen = STRIPE_SIZE - page_offset; 493 else 494 clen = len; 495 496 if (clen > 0) { 497 b_offset += bio_iovec_idx(bio, i)->bv_offset; 498 bio_page = bio_iovec_idx(bio, i)->bv_page; 499 if (frombio) 500 tx = async_memcpy(page, bio_page, page_offset, 501 b_offset, clen, 502 ASYNC_TX_DEP_ACK, 503 tx, NULL, NULL); 504 else 505 tx = async_memcpy(bio_page, page, b_offset, 506 page_offset, clen, 507 ASYNC_TX_DEP_ACK, 508 tx, NULL, NULL); 509 } 510 if (clen < len) /* hit end of page */ 511 break; 512 page_offset += len; 513 } 514 515 return tx; 516} 517 518static void ops_complete_biofill(void *stripe_head_ref) 519{ 520 struct stripe_head *sh = stripe_head_ref; 521 struct bio *return_bi = NULL; 522 raid5_conf_t *conf = sh->raid_conf; 523 int i; 524 525 pr_debug("%s: stripe %llu\n", __func__, 526 (unsigned long long)sh->sector); 527 528 /* clear completed biofills */ 529 for (i = sh->disks; i--; ) { 530 struct r5dev *dev = &sh->dev[i]; 531 532 /* acknowledge completion of a biofill operation */ 533 /* and check if we need to reply to a read request, 534 * new R5_Wantfill requests are held off until 535 * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) 536 */ 537 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 538 struct bio *rbi, *rbi2; 539 540 /* The access to dev->read is outside of the 541 * spin_lock_irq(&conf->device_lock), but is protected 542 * by the STRIPE_OP_BIOFILL pending bit 543 */ 544 BUG_ON(!dev->read); 545 rbi = dev->read; 546 dev->read = NULL; 547 while (rbi && rbi->bi_sector < 548 dev->sector + STRIPE_SECTORS) { 549 rbi2 = r5_next_bio(rbi, dev->sector); 550 spin_lock_irq(&conf->device_lock); 551 if (--rbi->bi_phys_segments == 0) { 552 rbi->bi_next = return_bi; 553 return_bi = rbi; 554 } 555 spin_unlock_irq(&conf->device_lock); 556 rbi = rbi2; 557 } 558 } 559 } 560 set_bit(STRIPE_OP_BIOFILL, &sh->ops.complete); 561 562 return_io(return_bi); 563 564 set_bit(STRIPE_HANDLE, &sh->state); 565 release_stripe(sh); 566} 567 568static void ops_run_biofill(struct stripe_head *sh) 569{ 570 struct dma_async_tx_descriptor *tx = NULL; 571 raid5_conf_t *conf = sh->raid_conf; 572 int i; 573 574 pr_debug("%s: stripe %llu\n", __func__, 575 (unsigned long long)sh->sector); 576 577 for (i = sh->disks; i--; ) { 578 struct r5dev *dev = &sh->dev[i]; 579 if (test_bit(R5_Wantfill, &dev->flags)) { 580 struct bio *rbi; 581 spin_lock_irq(&conf->device_lock); 582 dev->read = rbi = dev->toread; 583 dev->toread = NULL; 584 spin_unlock_irq(&conf->device_lock); 585 while (rbi && rbi->bi_sector < 586 dev->sector + STRIPE_SECTORS) { 587 tx = async_copy_data(0, rbi, dev->page, 588 dev->sector, tx); 589 rbi = r5_next_bio(rbi, dev->sector); 590 } 591 } 592 } 593 594 atomic_inc(&sh->count); 595 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 596 ops_complete_biofill, sh); 597} 598 599static void ops_complete_compute5(void *stripe_head_ref) 600{ 601 struct stripe_head *sh = stripe_head_ref; 602 int target = sh->ops.target; 603 struct r5dev *tgt = &sh->dev[target]; 604 605 pr_debug("%s: stripe %llu\n", __func__, 606 (unsigned long long)sh->sector); 607 608 set_bit(R5_UPTODATE, &tgt->flags); 609 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 610 clear_bit(R5_Wantcompute, &tgt->flags); 611 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 612 set_bit(STRIPE_HANDLE, &sh->state); 613 release_stripe(sh); 614} 615 616static struct dma_async_tx_descriptor * 617ops_run_compute5(struct stripe_head *sh, unsigned long pending) 618{ 619 /* kernel stack size limits the total number of disks */ 620 int disks = sh->disks; 621 struct page *xor_srcs[disks]; 622 int target = sh->ops.target; 623 struct r5dev *tgt = &sh->dev[target]; 624 struct page *xor_dest = tgt->page; 625 int count = 0; 626 struct dma_async_tx_descriptor *tx; 627 int i; 628 629 pr_debug("%s: stripe %llu block: %d\n", 630 __func__, (unsigned long long)sh->sector, target); 631 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 632 633 for (i = disks; i--; ) 634 if (i != target) 635 xor_srcs[count++] = sh->dev[i].page; 636 637 atomic_inc(&sh->count); 638 639 if (unlikely(count == 1)) 640 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 641 0, NULL, ops_complete_compute5, sh); 642 else 643 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 644 ASYNC_TX_XOR_ZERO_DST, NULL, 645 ops_complete_compute5, sh); 646 647 /* ack now if postxor is not set to be run */ 648 if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending)) 649 async_tx_ack(tx); 650 651 return tx; 652} 653 654static void ops_complete_prexor(void *stripe_head_ref) 655{ 656 struct stripe_head *sh = stripe_head_ref; 657 658 pr_debug("%s: stripe %llu\n", __func__, 659 (unsigned long long)sh->sector); 660 661 set_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 662} 663 664static struct dma_async_tx_descriptor * 665ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 666{ 667 /* kernel stack size limits the total number of disks */ 668 int disks = sh->disks; 669 struct page *xor_srcs[disks]; 670 int count = 0, pd_idx = sh->pd_idx, i; 671 672 /* existing parity data subtracted */ 673 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 674 675 pr_debug("%s: stripe %llu\n", __func__, 676 (unsigned long long)sh->sector); 677 678 for (i = disks; i--; ) { 679 struct r5dev *dev = &sh->dev[i]; 680 /* Only process blocks that are known to be uptodate */ 681 if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags)) 682 xor_srcs[count++] = dev->page; 683 } 684 685 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 686 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, 687 ops_complete_prexor, sh); 688 689 return tx; 690} 691 692static struct dma_async_tx_descriptor * 693ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, 694 unsigned long pending) 695{ 696 int disks = sh->disks; 697 int pd_idx = sh->pd_idx, i; 698 699 /* check if prexor is active which means only process blocks 700 * that are part of a read-modify-write (Wantprexor) 701 */ 702 int prexor = test_bit(STRIPE_OP_PREXOR, &pending); 703 704 pr_debug("%s: stripe %llu\n", __func__, 705 (unsigned long long)sh->sector); 706 707 for (i = disks; i--; ) { 708 struct r5dev *dev = &sh->dev[i]; 709 struct bio *chosen; 710 int towrite; 711 712 towrite = 0; 713 if (prexor) { /* rmw */ 714 if (dev->towrite && 715 test_bit(R5_Wantprexor, &dev->flags)) 716 towrite = 1; 717 } else { /* rcw */ 718 if (i != pd_idx && dev->towrite && 719 test_bit(R5_LOCKED, &dev->flags)) 720 towrite = 1; 721 } 722 723 if (towrite) { 724 struct bio *wbi; 725 726 spin_lock(&sh->lock); 727 chosen = dev->towrite; 728 dev->towrite = NULL; 729 BUG_ON(dev->written); 730 wbi = dev->written = chosen; 731 spin_unlock(&sh->lock); 732 733 while (wbi && wbi->bi_sector < 734 dev->sector + STRIPE_SECTORS) { 735 tx = async_copy_data(1, wbi, dev->page, 736 dev->sector, tx); 737 wbi = r5_next_bio(wbi, dev->sector); 738 } 739 } 740 } 741 742 return tx; 743} 744 745static void ops_complete_postxor(void *stripe_head_ref) 746{ 747 struct stripe_head *sh = stripe_head_ref; 748 749 pr_debug("%s: stripe %llu\n", __func__, 750 (unsigned long long)sh->sector); 751 752 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 753 set_bit(STRIPE_HANDLE, &sh->state); 754 release_stripe(sh); 755} 756 757static void ops_complete_write(void *stripe_head_ref) 758{ 759 struct stripe_head *sh = stripe_head_ref; 760 int disks = sh->disks, i, pd_idx = sh->pd_idx; 761 762 pr_debug("%s: stripe %llu\n", __func__, 763 (unsigned long long)sh->sector); 764 765 for (i = disks; i--; ) { 766 struct r5dev *dev = &sh->dev[i]; 767 if (dev->written || i == pd_idx) 768 set_bit(R5_UPTODATE, &dev->flags); 769 } 770 771 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 772 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 773 774 set_bit(STRIPE_HANDLE, &sh->state); 775 release_stripe(sh); 776} 777 778static void 779ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, 780 unsigned long pending) 781{ 782 /* kernel stack size limits the total number of disks */ 783 int disks = sh->disks; 784 struct page *xor_srcs[disks]; 785 786 int count = 0, pd_idx = sh->pd_idx, i; 787 struct page *xor_dest; 788 int prexor = test_bit(STRIPE_OP_PREXOR, &pending); 789 unsigned long flags; 790 dma_async_tx_callback callback; 791 792 pr_debug("%s: stripe %llu\n", __func__, 793 (unsigned long long)sh->sector); 794 795 /* check if prexor is active which means only process blocks 796 * that are part of a read-modify-write (written) 797 */ 798 if (prexor) { 799 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 800 for (i = disks; i--; ) { 801 struct r5dev *dev = &sh->dev[i]; 802 if (dev->written) 803 xor_srcs[count++] = dev->page; 804 } 805 } else { 806 xor_dest = sh->dev[pd_idx].page; 807 for (i = disks; i--; ) { 808 struct r5dev *dev = &sh->dev[i]; 809 if (i != pd_idx) 810 xor_srcs[count++] = dev->page; 811 } 812 } 813 814 /* check whether this postxor is part of a write */ 815 callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ? 816 ops_complete_write : ops_complete_postxor; 817 818 /* 1/ if we prexor'd then the dest is reused as a source 819 * 2/ if we did not prexor then we are redoing the parity 820 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 821 * for the synchronous xor case 822 */ 823 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | 824 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 825 826 atomic_inc(&sh->count); 827 828 if (unlikely(count == 1)) { 829 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 830 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 831 flags, tx, callback, sh); 832 } else 833 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 834 flags, tx, callback, sh); 835} 836 837static void ops_complete_check(void *stripe_head_ref) 838{ 839 struct stripe_head *sh = stripe_head_ref; 840 int pd_idx = sh->pd_idx; 841 842 pr_debug("%s: stripe %llu\n", __func__, 843 (unsigned long long)sh->sector); 844 845 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) && 846 sh->ops.zero_sum_result == 0) 847 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 848 849 set_bit(STRIPE_OP_CHECK, &sh->ops.complete); 850 set_bit(STRIPE_HANDLE, &sh->state); 851 release_stripe(sh); 852} 853 854static void ops_run_check(struct stripe_head *sh) 855{ 856 /* kernel stack size limits the total number of disks */ 857 int disks = sh->disks; 858 struct page *xor_srcs[disks]; 859 struct dma_async_tx_descriptor *tx; 860 861 int count = 0, pd_idx = sh->pd_idx, i; 862 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 863 864 pr_debug("%s: stripe %llu\n", __func__, 865 (unsigned long long)sh->sector); 866 867 for (i = disks; i--; ) { 868 struct r5dev *dev = &sh->dev[i]; 869 if (i != pd_idx) 870 xor_srcs[count++] = dev->page; 871 } 872 873 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 874 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 875 876 if (tx) 877 set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 878 else 879 clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 880 881 atomic_inc(&sh->count); 882 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 883 ops_complete_check, sh); 884} 885 886static void raid5_run_ops(struct stripe_head *sh, unsigned long pending) 887{ 888 int overlap_clear = 0, i, disks = sh->disks; 889 struct dma_async_tx_descriptor *tx = NULL; 890 891 if (test_bit(STRIPE_OP_BIOFILL, &pending)) { 892 ops_run_biofill(sh); 893 overlap_clear++; 894 } 895 896 if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending)) 897 tx = ops_run_compute5(sh, pending); 898 899 if (test_bit(STRIPE_OP_PREXOR, &pending)) 900 tx = ops_run_prexor(sh, tx); 901 902 if (test_bit(STRIPE_OP_BIODRAIN, &pending)) { 903 tx = ops_run_biodrain(sh, tx, pending); 904 overlap_clear++; 905 } 906 907 if (test_bit(STRIPE_OP_POSTXOR, &pending)) 908 ops_run_postxor(sh, tx, pending); 909 910 if (test_bit(STRIPE_OP_CHECK, &pending)) 911 ops_run_check(sh); 912 913 if (test_bit(STRIPE_OP_IO, &pending)) 914 ops_run_io(sh); 915 916 if (overlap_clear) 917 for (i = disks; i--; ) { 918 struct r5dev *dev = &sh->dev[i]; 919 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 920 wake_up(&sh->raid_conf->wait_for_overlap); 921 } 922} 923 924static int grow_one_stripe(raid5_conf_t *conf) 925{ 926 struct stripe_head *sh; 927 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 928 if (!sh) 929 return 0; 930 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 931 sh->raid_conf = conf; 932 spin_lock_init(&sh->lock); 933 934 if (grow_buffers(sh, conf->raid_disks)) { 935 shrink_buffers(sh, conf->raid_disks); 936 kmem_cache_free(conf->slab_cache, sh); 937 return 0; 938 } 939 sh->disks = conf->raid_disks; 940 /* we just created an active stripe so... */ 941 atomic_set(&sh->count, 1); 942 atomic_inc(&conf->active_stripes); 943 INIT_LIST_HEAD(&sh->lru); 944 release_stripe(sh); 945 return 1; 946} 947 948static int grow_stripes(raid5_conf_t *conf, int num) 949{ 950 struct kmem_cache *sc; 951 int devs = conf->raid_disks; 952 953 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev)); 954 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev)); 955 conf->active_name = 0; 956 sc = kmem_cache_create(conf->cache_name[conf->active_name], 957 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 958 0, 0, NULL); 959 if (!sc) 960 return 1; 961 conf->slab_cache = sc; 962 conf->pool_size = devs; 963 while (num--) 964 if (!grow_one_stripe(conf)) 965 return 1; 966 return 0; 967} 968 969#ifdef CONFIG_MD_RAID5_RESHAPE 970static int resize_stripes(raid5_conf_t *conf, int newsize) 971{ 972 /* Make all the stripes able to hold 'newsize' devices. 973 * New slots in each stripe get 'page' set to a new page. 974 * 975 * This happens in stages: 976 * 1/ create a new kmem_cache and allocate the required number of 977 * stripe_heads. 978 * 2/ gather all the old stripe_heads and tranfer the pages across 979 * to the new stripe_heads. This will have the side effect of 980 * freezing the array as once all stripe_heads have been collected, 981 * no IO will be possible. Old stripe heads are freed once their 982 * pages have been transferred over, and the old kmem_cache is 983 * freed when all stripes are done. 984 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 985 * we simple return a failre status - no need to clean anything up. 986 * 4/ allocate new pages for the new slots in the new stripe_heads. 987 * If this fails, we don't bother trying the shrink the 988 * stripe_heads down again, we just leave them as they are. 989 * As each stripe_head is processed the new one is released into 990 * active service. 991 * 992 * Once step2 is started, we cannot afford to wait for a write, 993 * so we use GFP_NOIO allocations. 994 */ 995 struct stripe_head *osh, *nsh; 996 LIST_HEAD(newstripes); 997 struct disk_info *ndisks; 998 int err = 0; 999 struct kmem_cache *sc; 1000 int i; 1001 1002 if (newsize <= conf->pool_size) 1003 return 0; /* never bother to shrink */ 1004 1005 md_allow_write(conf->mddev); 1006 1007 /* Step 1 */ 1008 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1009 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1010 0, 0, NULL); 1011 if (!sc) 1012 return -ENOMEM; 1013 1014 for (i = conf->max_nr_stripes; i; i--) { 1015 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 1016 if (!nsh) 1017 break; 1018 1019 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 1020 1021 nsh->raid_conf = conf; 1022 spin_lock_init(&nsh->lock); 1023 1024 list_add(&nsh->lru, &newstripes); 1025 } 1026 if (i) { 1027 /* didn't get enough, give up */ 1028 while (!list_empty(&newstripes)) { 1029 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1030 list_del(&nsh->lru); 1031 kmem_cache_free(sc, nsh); 1032 } 1033 kmem_cache_destroy(sc); 1034 return -ENOMEM; 1035 } 1036 /* Step 2 - Must use GFP_NOIO now. 1037 * OK, we have enough stripes, start collecting inactive 1038 * stripes and copying them over 1039 */ 1040 list_for_each_entry(nsh, &newstripes, lru) { 1041 spin_lock_irq(&conf->device_lock); 1042 wait_event_lock_irq(conf->wait_for_stripe, 1043 !list_empty(&conf->inactive_list), 1044 conf->device_lock, 1045 unplug_slaves(conf->mddev) 1046 ); 1047 osh = get_free_stripe(conf); 1048 spin_unlock_irq(&conf->device_lock); 1049 atomic_set(&nsh->count, 1); 1050 for(i=0; i<conf->pool_size; i++) 1051 nsh->dev[i].page = osh->dev[i].page; 1052 for( ; i<newsize; i++) 1053 nsh->dev[i].page = NULL; 1054 kmem_cache_free(conf->slab_cache, osh); 1055 } 1056 kmem_cache_destroy(conf->slab_cache); 1057 1058 /* Step 3. 1059 * At this point, we are holding all the stripes so the array 1060 * is completely stalled, so now is a good time to resize 1061 * conf->disks. 1062 */ 1063 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1064 if (ndisks) { 1065 for (i=0; i<conf->raid_disks; i++) 1066 ndisks[i] = conf->disks[i]; 1067 kfree(conf->disks); 1068 conf->disks = ndisks; 1069 } else 1070 err = -ENOMEM; 1071 1072 /* Step 4, return new stripes to service */ 1073 while(!list_empty(&newstripes)) { 1074 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1075 list_del_init(&nsh->lru); 1076 for (i=conf->raid_disks; i < newsize; i++) 1077 if (nsh->dev[i].page == NULL) { 1078 struct page *p = alloc_page(GFP_NOIO); 1079 nsh->dev[i].page = p; 1080 if (!p) 1081 err = -ENOMEM; 1082 } 1083 release_stripe(nsh); 1084 } 1085 /* critical section pass, GFP_NOIO no longer needed */ 1086 1087 conf->slab_cache = sc; 1088 conf->active_name = 1-conf->active_name; 1089 conf->pool_size = newsize; 1090 return err; 1091} 1092#endif 1093 1094static int drop_one_stripe(raid5_conf_t *conf) 1095{ 1096 struct stripe_head *sh; 1097 1098 spin_lock_irq(&conf->device_lock); 1099 sh = get_free_stripe(conf); 1100 spin_unlock_irq(&conf->device_lock); 1101 if (!sh) 1102 return 0; 1103 BUG_ON(atomic_read(&sh->count)); 1104 shrink_buffers(sh, conf->pool_size); 1105 kmem_cache_free(conf->slab_cache, sh); 1106 atomic_dec(&conf->active_stripes); 1107 return 1; 1108} 1109 1110static void shrink_stripes(raid5_conf_t *conf) 1111{ 1112 while (drop_one_stripe(conf)) 1113 ; 1114 1115 if (conf->slab_cache) 1116 kmem_cache_destroy(conf->slab_cache); 1117 conf->slab_cache = NULL; 1118} 1119 1120static void raid5_end_read_request(struct bio * bi, int error) 1121{ 1122 struct stripe_head *sh = bi->bi_private; 1123 raid5_conf_t *conf = sh->raid_conf; 1124 int disks = sh->disks, i; 1125 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1126 char b[BDEVNAME_SIZE]; 1127 mdk_rdev_t *rdev; 1128 1129 1130 for (i=0 ; i<disks; i++) 1131 if (bi == &sh->dev[i].req) 1132 break; 1133 1134 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1135 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1136 uptodate); 1137 if (i == disks) { 1138 BUG(); 1139 return; 1140 } 1141 1142 if (uptodate) { 1143 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1144 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1145 rdev = conf->disks[i].rdev; 1146 printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n", 1147 mdname(conf->mddev), STRIPE_SECTORS, 1148 (unsigned long long)(sh->sector + rdev->data_offset), 1149 bdevname(rdev->bdev, b)); 1150 clear_bit(R5_ReadError, &sh->dev[i].flags); 1151 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1152 } 1153 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1154 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1155 } else { 1156 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1157 int retry = 0; 1158 rdev = conf->disks[i].rdev; 1159 1160 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1161 atomic_inc(&rdev->read_errors); 1162 if (conf->mddev->degraded) 1163 printk(KERN_WARNING "raid5:%s: read error not correctable (sector %llu on %s).\n", 1164 mdname(conf->mddev), 1165 (unsigned long long)(sh->sector + rdev->data_offset), 1166 bdn); 1167 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1168 /* Oh, no!!! */ 1169 printk(KERN_WARNING "raid5:%s: read error NOT corrected!! (sector %llu on %s).\n", 1170 mdname(conf->mddev), 1171 (unsigned long long)(sh->sector + rdev->data_offset), 1172 bdn); 1173 else if (atomic_read(&rdev->read_errors) 1174 > conf->max_nr_stripes) 1175 printk(KERN_WARNING 1176 "raid5:%s: Too many read errors, failing device %s.\n", 1177 mdname(conf->mddev), bdn); 1178 else 1179 retry = 1; 1180 if (retry) 1181 set_bit(R5_ReadError, &sh->dev[i].flags); 1182 else { 1183 clear_bit(R5_ReadError, &sh->dev[i].flags); 1184 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1185 md_error(conf->mddev, rdev); 1186 } 1187 } 1188 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1189 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1190 set_bit(STRIPE_HANDLE, &sh->state); 1191 release_stripe(sh); 1192} 1193 1194static void raid5_end_write_request (struct bio *bi, int error) 1195{ 1196 struct stripe_head *sh = bi->bi_private; 1197 raid5_conf_t *conf = sh->raid_conf; 1198 int disks = sh->disks, i; 1199 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1200 1201 for (i=0 ; i<disks; i++) 1202 if (bi == &sh->dev[i].req) 1203 break; 1204 1205 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1206 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1207 uptodate); 1208 if (i == disks) { 1209 BUG(); 1210 return; 1211 } 1212 1213 if (!uptodate) 1214 md_error(conf->mddev, conf->disks[i].rdev); 1215 1216 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1217 1218 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1219 set_bit(STRIPE_HANDLE, &sh->state); 1220 release_stripe(sh); 1221} 1222 1223 1224static sector_t compute_blocknr(struct stripe_head *sh, int i); 1225 1226static void raid5_build_block (struct stripe_head *sh, int i) 1227{ 1228 struct r5dev *dev = &sh->dev[i]; 1229 1230 bio_init(&dev->req); 1231 dev->req.bi_io_vec = &dev->vec; 1232 dev->req.bi_vcnt++; 1233 dev->req.bi_max_vecs++; 1234 dev->vec.bv_page = dev->page; 1235 dev->vec.bv_len = STRIPE_SIZE; 1236 dev->vec.bv_offset = 0; 1237 1238 dev->req.bi_sector = sh->sector; 1239 dev->req.bi_private = sh; 1240 1241 dev->flags = 0; 1242 dev->sector = compute_blocknr(sh, i); 1243} 1244 1245static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1246{ 1247 char b[BDEVNAME_SIZE]; 1248 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1249 pr_debug("raid5: error called\n"); 1250 1251 if (!test_bit(Faulty, &rdev->flags)) { 1252 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1253 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1254 unsigned long flags; 1255 spin_lock_irqsave(&conf->device_lock, flags); 1256 mddev->degraded++; 1257 spin_unlock_irqrestore(&conf->device_lock, flags); 1258 /* 1259 * if recovery was running, make sure it aborts. 1260 */ 1261 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 1262 } 1263 set_bit(Faulty, &rdev->flags); 1264 printk (KERN_ALERT 1265 "raid5: Disk failure on %s, disabling device.\n" 1266 "raid5: Operation continuing on %d devices.\n", 1267 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1268 } 1269} 1270 1271/* 1272 * Input: a 'big' sector number, 1273 * Output: index of the data and parity disk, and the sector # in them. 1274 */ 1275static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, 1276 unsigned int data_disks, unsigned int * dd_idx, 1277 unsigned int * pd_idx, raid5_conf_t *conf) 1278{ 1279 long stripe; 1280 unsigned long chunk_number; 1281 unsigned int chunk_offset; 1282 sector_t new_sector; 1283 int sectors_per_chunk = conf->chunk_size >> 9; 1284 1285 /* First compute the information on this sector */ 1286 1287 /* 1288 * Compute the chunk number and the sector offset inside the chunk 1289 */ 1290 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1291 chunk_number = r_sector; 1292 BUG_ON(r_sector != chunk_number); 1293 1294 /* 1295 * Compute the stripe number 1296 */ 1297 stripe = chunk_number / data_disks; 1298 1299 /* 1300 * Compute the data disk and parity disk indexes inside the stripe 1301 */ 1302 *dd_idx = chunk_number % data_disks; 1303 1304 /* 1305 * Select the parity disk based on the user selected algorithm. 1306 */ 1307 switch(conf->level) { 1308 case 4: 1309 *pd_idx = data_disks; 1310 break; 1311 case 5: 1312 switch (conf->algorithm) { 1313 case ALGORITHM_LEFT_ASYMMETRIC: 1314 *pd_idx = data_disks - stripe % raid_disks; 1315 if (*dd_idx >= *pd_idx) 1316 (*dd_idx)++; 1317 break; 1318 case ALGORITHM_RIGHT_ASYMMETRIC: 1319 *pd_idx = stripe % raid_disks; 1320 if (*dd_idx >= *pd_idx) 1321 (*dd_idx)++; 1322 break; 1323 case ALGORITHM_LEFT_SYMMETRIC: 1324 *pd_idx = data_disks - stripe % raid_disks; 1325 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1326 break; 1327 case ALGORITHM_RIGHT_SYMMETRIC: 1328 *pd_idx = stripe % raid_disks; 1329 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1330 break; 1331 default: 1332 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1333 conf->algorithm); 1334 } 1335 break; 1336 case 6: 1337 1338 /**** FIX THIS ****/ 1339 switch (conf->algorithm) { 1340 case ALGORITHM_LEFT_ASYMMETRIC: 1341 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1342 if (*pd_idx == raid_disks-1) 1343 (*dd_idx)++; /* Q D D D P */ 1344 else if (*dd_idx >= *pd_idx) 1345 (*dd_idx) += 2; /* D D P Q D */ 1346 break; 1347 case ALGORITHM_RIGHT_ASYMMETRIC: 1348 *pd_idx = stripe % raid_disks; 1349 if (*pd_idx == raid_disks-1) 1350 (*dd_idx)++; /* Q D D D P */ 1351 else if (*dd_idx >= *pd_idx) 1352 (*dd_idx) += 2; /* D D P Q D */ 1353 break; 1354 case ALGORITHM_LEFT_SYMMETRIC: 1355 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1356 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1357 break; 1358 case ALGORITHM_RIGHT_SYMMETRIC: 1359 *pd_idx = stripe % raid_disks; 1360 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1361 break; 1362 default: 1363 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 1364 conf->algorithm); 1365 } 1366 break; 1367 } 1368 1369 /* 1370 * Finally, compute the new sector number 1371 */ 1372 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1373 return new_sector; 1374} 1375 1376 1377static sector_t compute_blocknr(struct stripe_head *sh, int i) 1378{ 1379 raid5_conf_t *conf = sh->raid_conf; 1380 int raid_disks = sh->disks; 1381 int data_disks = raid_disks - conf->max_degraded; 1382 sector_t new_sector = sh->sector, check; 1383 int sectors_per_chunk = conf->chunk_size >> 9; 1384 sector_t stripe; 1385 int chunk_offset; 1386 int chunk_number, dummy1, dummy2, dd_idx = i; 1387 sector_t r_sector; 1388 1389 1390 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1391 stripe = new_sector; 1392 BUG_ON(new_sector != stripe); 1393 1394 if (i == sh->pd_idx) 1395 return 0; 1396 switch(conf->level) { 1397 case 4: break; 1398 case 5: 1399 switch (conf->algorithm) { 1400 case ALGORITHM_LEFT_ASYMMETRIC: 1401 case ALGORITHM_RIGHT_ASYMMETRIC: 1402 if (i > sh->pd_idx) 1403 i--; 1404 break; 1405 case ALGORITHM_LEFT_SYMMETRIC: 1406 case ALGORITHM_RIGHT_SYMMETRIC: 1407 if (i < sh->pd_idx) 1408 i += raid_disks; 1409 i -= (sh->pd_idx + 1); 1410 break; 1411 default: 1412 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1413 conf->algorithm); 1414 } 1415 break; 1416 case 6: 1417 if (i == raid6_next_disk(sh->pd_idx, raid_disks)) 1418 return 0; /* It is the Q disk */ 1419 switch (conf->algorithm) { 1420 case ALGORITHM_LEFT_ASYMMETRIC: 1421 case ALGORITHM_RIGHT_ASYMMETRIC: 1422 if (sh->pd_idx == raid_disks-1) 1423 i--; /* Q D D D P */ 1424 else if (i > sh->pd_idx) 1425 i -= 2; /* D D P Q D */ 1426 break; 1427 case ALGORITHM_LEFT_SYMMETRIC: 1428 case ALGORITHM_RIGHT_SYMMETRIC: 1429 if (sh->pd_idx == raid_disks-1) 1430 i--; /* Q D D D P */ 1431 else { 1432 /* D D P Q D */ 1433 if (i < sh->pd_idx) 1434 i += raid_disks; 1435 i -= (sh->pd_idx + 2); 1436 } 1437 break; 1438 default: 1439 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 1440 conf->algorithm); 1441 } 1442 break; 1443 } 1444 1445 chunk_number = stripe * data_disks + i; 1446 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1447 1448 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 1449 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 1450 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1451 return 0; 1452 } 1453 return r_sector; 1454} 1455 1456 1457 1458/* 1459 * Copy data between a page in the stripe cache, and one or more bion 1460 * The page could align with the middle of the bio, or there could be 1461 * several bion, each with several bio_vecs, which cover part of the page 1462 * Multiple bion are linked together on bi_next. There may be extras 1463 * at the end of this list. We ignore them. 1464 */ 1465static void copy_data(int frombio, struct bio *bio, 1466 struct page *page, 1467 sector_t sector) 1468{ 1469 char *pa = page_address(page); 1470 struct bio_vec *bvl; 1471 int i; 1472 int page_offset; 1473 1474 if (bio->bi_sector >= sector) 1475 page_offset = (signed)(bio->bi_sector - sector) * 512; 1476 else 1477 page_offset = (signed)(sector - bio->bi_sector) * -512; 1478 bio_for_each_segment(bvl, bio, i) { 1479 int len = bio_iovec_idx(bio,i)->bv_len; 1480 int clen; 1481 int b_offset = 0; 1482 1483 if (page_offset < 0) { 1484 b_offset = -page_offset; 1485 page_offset += b_offset; 1486 len -= b_offset; 1487 } 1488 1489 if (len > 0 && page_offset + len > STRIPE_SIZE) 1490 clen = STRIPE_SIZE - page_offset; 1491 else clen = len; 1492 1493 if (clen > 0) { 1494 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 1495 if (frombio) 1496 memcpy(pa+page_offset, ba+b_offset, clen); 1497 else 1498 memcpy(ba+b_offset, pa+page_offset, clen); 1499 __bio_kunmap_atomic(ba, KM_USER0); 1500 } 1501 if (clen < len) /* hit end of page */ 1502 break; 1503 page_offset += len; 1504 } 1505} 1506 1507#define check_xor() do { \ 1508 if (count == MAX_XOR_BLOCKS) { \ 1509 xor_blocks(count, STRIPE_SIZE, dest, ptr);\ 1510 count = 0; \ 1511 } \ 1512 } while(0) 1513 1514static void compute_parity6(struct stripe_head *sh, int method) 1515{ 1516 raid6_conf_t *conf = sh->raid_conf; 1517 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count; 1518 struct bio *chosen; 1519 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1520 void *ptrs[disks]; 1521 1522 qd_idx = raid6_next_disk(pd_idx, disks); 1523 d0_idx = raid6_next_disk(qd_idx, disks); 1524 1525 pr_debug("compute_parity, stripe %llu, method %d\n", 1526 (unsigned long long)sh->sector, method); 1527 1528 switch(method) { 1529 case READ_MODIFY_WRITE: 1530 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1531 case RECONSTRUCT_WRITE: 1532 for (i= disks; i-- ;) 1533 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1534 chosen = sh->dev[i].towrite; 1535 sh->dev[i].towrite = NULL; 1536 1537 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1538 wake_up(&conf->wait_for_overlap); 1539 1540 BUG_ON(sh->dev[i].written); 1541 sh->dev[i].written = chosen; 1542 } 1543 break; 1544 case CHECK_PARITY: 1545 BUG(); /* Not implemented yet */ 1546 } 1547 1548 for (i = disks; i--;) 1549 if (sh->dev[i].written) { 1550 sector_t sector = sh->dev[i].sector; 1551 struct bio *wbi = sh->dev[i].written; 1552 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1553 copy_data(1, wbi, sh->dev[i].page, sector); 1554 wbi = r5_next_bio(wbi, sector); 1555 } 1556 1557 set_bit(R5_LOCKED, &sh->dev[i].flags); 1558 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1559 } 1560 1561// switch(method) { 1562// case RECONSTRUCT_WRITE: 1563// case CHECK_PARITY: 1564// case UPDATE_PARITY: 1565 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ 1566 /* FIX: Is this ordering of drives even remotely optimal? */ 1567 count = 0; 1568 i = d0_idx; 1569 do { 1570 ptrs[count++] = page_address(sh->dev[i].page); 1571 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1572 printk("block %d/%d not uptodate on parity calc\n", i,count); 1573 i = raid6_next_disk(i, disks); 1574 } while ( i != d0_idx ); 1575// break; 1576// } 1577 1578 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); 1579 1580 switch(method) { 1581 case RECONSTRUCT_WRITE: 1582 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1583 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1584 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1585 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1586 break; 1587 case UPDATE_PARITY: 1588 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1589 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1590 break; 1591 } 1592} 1593 1594 1595/* Compute one missing block */ 1596static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1597{ 1598 int i, count, disks = sh->disks; 1599 void *ptr[MAX_XOR_BLOCKS], *dest, *p; 1600 int pd_idx = sh->pd_idx; 1601 int qd_idx = raid6_next_disk(pd_idx, disks); 1602 1603 pr_debug("compute_block_1, stripe %llu, idx %d\n", 1604 (unsigned long long)sh->sector, dd_idx); 1605 1606 if ( dd_idx == qd_idx ) { 1607 /* We're actually computing the Q drive */ 1608 compute_parity6(sh, UPDATE_PARITY); 1609 } else { 1610 dest = page_address(sh->dev[dd_idx].page); 1611 if (!nozero) memset(dest, 0, STRIPE_SIZE); 1612 count = 0; 1613 for (i = disks ; i--; ) { 1614 if (i == dd_idx || i == qd_idx) 1615 continue; 1616 p = page_address(sh->dev[i].page); 1617 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1618 ptr[count++] = p; 1619 else 1620 printk("compute_block() %d, stripe %llu, %d" 1621 " not present\n", dd_idx, 1622 (unsigned long long)sh->sector, i); 1623 1624 check_xor(); 1625 } 1626 if (count) 1627 xor_blocks(count, STRIPE_SIZE, dest, ptr); 1628 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1629 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1630 } 1631} 1632 1633/* Compute two missing blocks */ 1634static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1635{ 1636 int i, count, disks = sh->disks; 1637 int pd_idx = sh->pd_idx; 1638 int qd_idx = raid6_next_disk(pd_idx, disks); 1639 int d0_idx = raid6_next_disk(qd_idx, disks); 1640 int faila, failb; 1641 1642 /* faila and failb are disk numbers relative to d0_idx */ 1643 /* pd_idx become disks-2 and qd_idx become disks-1 */ 1644 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; 1645 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; 1646 1647 BUG_ON(faila == failb); 1648 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1649 1650 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1651 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); 1652 1653 if ( failb == disks-1 ) { 1654 /* Q disk is one of the missing disks */ 1655 if ( faila == disks-2 ) { 1656 /* Missing P+Q, just recompute */ 1657 compute_parity6(sh, UPDATE_PARITY); 1658 return; 1659 } else { 1660 /* We're missing D+Q; recompute D from P */ 1661 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); 1662 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1663 return; 1664 } 1665 } 1666 1667 /* We're missing D+P or D+D; build pointer table */ 1668 { 1669 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1670 void *ptrs[disks]; 1671 1672 count = 0; 1673 i = d0_idx; 1674 do { 1675 ptrs[count++] = page_address(sh->dev[i].page); 1676 i = raid6_next_disk(i, disks); 1677 if (i != dd_idx1 && i != dd_idx2 && 1678 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1679 printk("compute_2 with missing block %d/%d\n", count, i); 1680 } while ( i != d0_idx ); 1681 1682 if ( failb == disks-2 ) { 1683 /* We're missing D+P. */ 1684 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); 1685 } else { 1686 /* We're missing D+D. */ 1687 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); 1688 } 1689 1690 /* Both the above update both missing blocks */ 1691 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1692 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1693 } 1694} 1695 1696static int 1697handle_write_operations5(struct stripe_head *sh, int rcw, int expand) 1698{ 1699 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1700 int locked = 0; 1701 1702 if (rcw) { 1703 /* if we are not expanding this is a proper write request, and 1704 * there will be bios with new data to be drained into the 1705 * stripe cache 1706 */ 1707 if (!expand) { 1708 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1709 sh->ops.count++; 1710 } 1711 1712 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1713 sh->ops.count++; 1714 1715 for (i = disks; i--; ) { 1716 struct r5dev *dev = &sh->dev[i]; 1717 1718 if (dev->towrite) { 1719 set_bit(R5_LOCKED, &dev->flags); 1720 if (!expand) 1721 clear_bit(R5_UPTODATE, &dev->flags); 1722 locked++; 1723 } 1724 } 1725 if (locked + 1 == disks) 1726 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 1727 atomic_inc(&sh->raid_conf->pending_full_writes); 1728 } else { 1729 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1730 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1731 1732 set_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 1733 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1734 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1735 1736 sh->ops.count += 3; 1737 1738 for (i = disks; i--; ) { 1739 struct r5dev *dev = &sh->dev[i]; 1740 if (i == pd_idx) 1741 continue; 1742 1743 /* For a read-modify write there may be blocks that are 1744 * locked for reading while others are ready to be 1745 * written so we distinguish these blocks by the 1746 * R5_Wantprexor bit 1747 */ 1748 if (dev->towrite && 1749 (test_bit(R5_UPTODATE, &dev->flags) || 1750 test_bit(R5_Wantcompute, &dev->flags))) { 1751 set_bit(R5_Wantprexor, &dev->flags); 1752 set_bit(R5_LOCKED, &dev->flags); 1753 clear_bit(R5_UPTODATE, &dev->flags); 1754 locked++; 1755 } 1756 } 1757 } 1758 1759 /* keep the parity disk locked while asynchronous operations 1760 * are in flight 1761 */ 1762 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1763 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1764 locked++; 1765 1766 pr_debug("%s: stripe %llu locked: %d pending: %lx\n", 1767 __func__, (unsigned long long)sh->sector, 1768 locked, sh->ops.pending); 1769 1770 return locked; 1771} 1772 1773/* 1774 * Each stripe/dev can have one or more bion attached. 1775 * toread/towrite point to the first in a chain. 1776 * The bi_next chain must be in order. 1777 */ 1778static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1779{ 1780 struct bio **bip; 1781 raid5_conf_t *conf = sh->raid_conf; 1782 int firstwrite=0; 1783 1784 pr_debug("adding bh b#%llu to stripe s#%llu\n", 1785 (unsigned long long)bi->bi_sector, 1786 (unsigned long long)sh->sector); 1787 1788 1789 spin_lock(&sh->lock); 1790 spin_lock_irq(&conf->device_lock); 1791 if (forwrite) { 1792 bip = &sh->dev[dd_idx].towrite; 1793 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1794 firstwrite = 1; 1795 } else 1796 bip = &sh->dev[dd_idx].toread; 1797 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1798 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1799 goto overlap; 1800 bip = & (*bip)->bi_next; 1801 } 1802 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1803 goto overlap; 1804 1805 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1806 if (*bip) 1807 bi->bi_next = *bip; 1808 *bip = bi; 1809 bi->bi_phys_segments ++; 1810 spin_unlock_irq(&conf->device_lock); 1811 spin_unlock(&sh->lock); 1812 1813 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 1814 (unsigned long long)bi->bi_sector, 1815 (unsigned long long)sh->sector, dd_idx); 1816 1817 if (conf->mddev->bitmap && firstwrite) { 1818 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1819 STRIPE_SECTORS, 0); 1820 sh->bm_seq = conf->seq_flush+1; 1821 set_bit(STRIPE_BIT_DELAY, &sh->state); 1822 } 1823 1824 if (forwrite) { 1825 /* check if page is covered */ 1826 sector_t sector = sh->dev[dd_idx].sector; 1827 for (bi=sh->dev[dd_idx].towrite; 1828 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1829 bi && bi->bi_sector <= sector; 1830 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1831 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1832 sector = bi->bi_sector + (bi->bi_size>>9); 1833 } 1834 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1835 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1836 } 1837 return 1; 1838 1839 overlap: 1840 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1841 spin_unlock_irq(&conf->device_lock); 1842 spin_unlock(&sh->lock); 1843 return 0; 1844} 1845 1846static void end_reshape(raid5_conf_t *conf); 1847 1848static int page_is_zero(struct page *p) 1849{ 1850 char *a = page_address(p); 1851 return ((*(u32*)a) == 0 && 1852 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1853} 1854 1855static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) 1856{ 1857 int sectors_per_chunk = conf->chunk_size >> 9; 1858 int pd_idx, dd_idx; 1859 int chunk_offset = sector_div(stripe, sectors_per_chunk); 1860 1861 raid5_compute_sector(stripe * (disks - conf->max_degraded) 1862 *sectors_per_chunk + chunk_offset, 1863 disks, disks - conf->max_degraded, 1864 &dd_idx, &pd_idx, conf); 1865 return pd_idx; 1866} 1867 1868static void 1869handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh, 1870 struct stripe_head_state *s, int disks, 1871 struct bio **return_bi) 1872{ 1873 int i; 1874 for (i = disks; i--; ) { 1875 struct bio *bi; 1876 int bitmap_end = 0; 1877 1878 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1879 mdk_rdev_t *rdev; 1880 rcu_read_lock(); 1881 rdev = rcu_dereference(conf->disks[i].rdev); 1882 if (rdev && test_bit(In_sync, &rdev->flags)) 1883 /* multiple read failures in one stripe */ 1884 md_error(conf->mddev, rdev); 1885 rcu_read_unlock(); 1886 } 1887 spin_lock_irq(&conf->device_lock); 1888 /* fail all writes first */ 1889 bi = sh->dev[i].towrite; 1890 sh->dev[i].towrite = NULL; 1891 if (bi) { 1892 s->to_write--; 1893 bitmap_end = 1; 1894 } 1895 1896 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1897 wake_up(&conf->wait_for_overlap); 1898 1899 while (bi && bi->bi_sector < 1900 sh->dev[i].sector + STRIPE_SECTORS) { 1901 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1902 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1903 if (--bi->bi_phys_segments == 0) { 1904 md_write_end(conf->mddev); 1905 bi->bi_next = *return_bi; 1906 *return_bi = bi; 1907 } 1908 bi = nextbi; 1909 } 1910 /* and fail all 'written' */ 1911 bi = sh->dev[i].written; 1912 sh->dev[i].written = NULL; 1913 if (bi) bitmap_end = 1; 1914 while (bi && bi->bi_sector < 1915 sh->dev[i].sector + STRIPE_SECTORS) { 1916 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1917 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1918 if (--bi->bi_phys_segments == 0) { 1919 md_write_end(conf->mddev); 1920 bi->bi_next = *return_bi; 1921 *return_bi = bi; 1922 } 1923 bi = bi2; 1924 } 1925 1926 /* fail any reads if this device is non-operational and 1927 * the data has not reached the cache yet. 1928 */ 1929 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 1930 (!test_bit(R5_Insync, &sh->dev[i].flags) || 1931 test_bit(R5_ReadError, &sh->dev[i].flags))) { 1932 bi = sh->dev[i].toread; 1933 sh->dev[i].toread = NULL; 1934 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1935 wake_up(&conf->wait_for_overlap); 1936 if (bi) s->to_read--; 1937 while (bi && bi->bi_sector < 1938 sh->dev[i].sector + STRIPE_SECTORS) { 1939 struct bio *nextbi = 1940 r5_next_bio(bi, sh->dev[i].sector); 1941 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1942 if (--bi->bi_phys_segments == 0) { 1943 bi->bi_next = *return_bi; 1944 *return_bi = bi; 1945 } 1946 bi = nextbi; 1947 } 1948 } 1949 spin_unlock_irq(&conf->device_lock); 1950 if (bitmap_end) 1951 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1952 STRIPE_SECTORS, 0, 0); 1953 } 1954 1955 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 1956 if (atomic_dec_and_test(&conf->pending_full_writes)) 1957 md_wakeup_thread(conf->mddev->thread); 1958} 1959 1960/* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks 1961 * to process 1962 */ 1963static int __handle_issuing_new_read_requests5(struct stripe_head *sh, 1964 struct stripe_head_state *s, int disk_idx, int disks) 1965{ 1966 struct r5dev *dev = &sh->dev[disk_idx]; 1967 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 1968 1969 /* don't schedule compute operations or reads on the parity block while 1970 * a check is in flight 1971 */ 1972 if ((disk_idx == sh->pd_idx) && 1973 test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 1974 return ~0; 1975 1976 /* is the data in this block needed, and can we get it? */ 1977 if (!test_bit(R5_LOCKED, &dev->flags) && 1978 !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || 1979 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1980 s->syncing || s->expanding || (s->failed && 1981 (failed_dev->toread || (failed_dev->towrite && 1982 !test_bit(R5_OVERWRITE, &failed_dev->flags) 1983 ))))) { 1984 /* 1/ We would like to get this block, possibly by computing it, 1985 * but we might not be able to. 1986 * 1987 * 2/ Since parity check operations potentially make the parity 1988 * block !uptodate it will need to be refreshed before any 1989 * compute operations on data disks are scheduled. 1990 * 1991 * 3/ We hold off parity block re-reads until check operations 1992 * have quiesced. 1993 */ 1994 if ((s->uptodate == disks - 1) && 1995 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 1996 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 1997 set_bit(R5_Wantcompute, &dev->flags); 1998 sh->ops.target = disk_idx; 1999 s->req_compute = 1; 2000 sh->ops.count++; 2001 /* Careful: from this point on 'uptodate' is in the eye 2002 * of raid5_run_ops which services 'compute' operations 2003 * before writes. R5_Wantcompute flags a block that will 2004 * be R5_UPTODATE by the time it is needed for a 2005 * subsequent operation. 2006 */ 2007 s->uptodate++; 2008 return 0; /* uptodate + compute == disks */ 2009 } else if ((s->uptodate < disks - 1) && 2010 test_bit(R5_Insync, &dev->flags)) { 2011 /* Note: we hold off compute operations while checks are 2012 * in flight, but we still prefer 'compute' over 'read' 2013 * hence we only read if (uptodate < * disks-1) 2014 */ 2015 set_bit(R5_LOCKED, &dev->flags); 2016 set_bit(R5_Wantread, &dev->flags); 2017 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2018 sh->ops.count++; 2019 s->locked++; 2020 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2021 s->syncing); 2022 } 2023 } 2024 2025 return ~0; 2026} 2027 2028static void handle_issuing_new_read_requests5(struct stripe_head *sh, 2029 struct stripe_head_state *s, int disks) 2030{ 2031 int i; 2032 2033 /* Clear completed compute operations. Parity recovery 2034 * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled 2035 * later on in this routine 2036 */ 2037 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 2038 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2039 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 2040 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 2041 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 2042 } 2043 2044 /* look for blocks to read/compute, skip this if a compute 2045 * is already in flight, or if the stripe contents are in the 2046 * midst of changing due to a write 2047 */ 2048 if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 2049 !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) && 2050 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2051 for (i = disks; i--; ) 2052 if (__handle_issuing_new_read_requests5( 2053 sh, s, i, disks) == 0) 2054 break; 2055 } 2056 set_bit(STRIPE_HANDLE, &sh->state); 2057} 2058 2059static void handle_issuing_new_read_requests6(struct stripe_head *sh, 2060 struct stripe_head_state *s, struct r6_state *r6s, 2061 int disks) 2062{ 2063 int i; 2064 for (i = disks; i--; ) { 2065 struct r5dev *dev = &sh->dev[i]; 2066 if (!test_bit(R5_LOCKED, &dev->flags) && 2067 !test_bit(R5_UPTODATE, &dev->flags) && 2068 (dev->toread || (dev->towrite && 2069 !test_bit(R5_OVERWRITE, &dev->flags)) || 2070 s->syncing || s->expanding || 2071 (s->failed >= 1 && 2072 (sh->dev[r6s->failed_num[0]].toread || 2073 s->to_write)) || 2074 (s->failed >= 2 && 2075 (sh->dev[r6s->failed_num[1]].toread || 2076 s->to_write)))) { 2077 /* we would like to get this block, possibly 2078 * by computing it, but we might not be able to 2079 */ 2080 if (s->uptodate == disks-1) { 2081 pr_debug("Computing stripe %llu block %d\n", 2082 (unsigned long long)sh->sector, i); 2083 compute_block_1(sh, i, 0); 2084 s->uptodate++; 2085 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { 2086 /* Computing 2-failure is *very* expensive; only 2087 * do it if failed >= 2 2088 */ 2089 int other; 2090 for (other = disks; other--; ) { 2091 if (other == i) 2092 continue; 2093 if (!test_bit(R5_UPTODATE, 2094 &sh->dev[other].flags)) 2095 break; 2096 } 2097 BUG_ON(other < 0); 2098 pr_debug("Computing stripe %llu blocks %d,%d\n", 2099 (unsigned long long)sh->sector, 2100 i, other); 2101 compute_block_2(sh, i, other); 2102 s->uptodate += 2; 2103 } else if (test_bit(R5_Insync, &dev->flags)) { 2104 set_bit(R5_LOCKED, &dev->flags); 2105 set_bit(R5_Wantread, &dev->flags); 2106 s->locked++; 2107 pr_debug("Reading block %d (sync=%d)\n", 2108 i, s->syncing); 2109 } 2110 } 2111 } 2112 set_bit(STRIPE_HANDLE, &sh->state); 2113} 2114 2115 2116/* handle_completed_write_requests 2117 * any written block on an uptodate or failed drive can be returned. 2118 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2119 * never LOCKED, so we don't need to test 'failed' directly. 2120 */ 2121static void handle_completed_write_requests(raid5_conf_t *conf, 2122 struct stripe_head *sh, int disks, struct bio **return_bi) 2123{ 2124 int i; 2125 struct r5dev *dev; 2126 2127 for (i = disks; i--; ) 2128 if (sh->dev[i].written) { 2129 dev = &sh->dev[i]; 2130 if (!test_bit(R5_LOCKED, &dev->flags) && 2131 test_bit(R5_UPTODATE, &dev->flags)) { 2132 /* We can return any write requests */ 2133 struct bio *wbi, *wbi2; 2134 int bitmap_end = 0; 2135 pr_debug("Return write for disc %d\n", i); 2136 spin_lock_irq(&conf->device_lock); 2137 wbi = dev->written; 2138 dev->written = NULL; 2139 while (wbi && wbi->bi_sector < 2140 dev->sector + STRIPE_SECTORS) { 2141 wbi2 = r5_next_bio(wbi, dev->sector); 2142 if (--wbi->bi_phys_segments == 0) { 2143 md_write_end(conf->mddev); 2144 wbi->bi_next = *return_bi; 2145 *return_bi = wbi; 2146 } 2147 wbi = wbi2; 2148 } 2149 if (dev->towrite == NULL) 2150 bitmap_end = 1; 2151 spin_unlock_irq(&conf->device_lock); 2152 if (bitmap_end) 2153 bitmap_endwrite(conf->mddev->bitmap, 2154 sh->sector, 2155 STRIPE_SECTORS, 2156 !test_bit(STRIPE_DEGRADED, &sh->state), 2157 0); 2158 } 2159 } 2160 2161 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2162 if (atomic_dec_and_test(&conf->pending_full_writes)) 2163 md_wakeup_thread(conf->mddev->thread); 2164} 2165 2166static void handle_issuing_new_write_requests5(raid5_conf_t *conf, 2167 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2168{ 2169 int rmw = 0, rcw = 0, i; 2170 for (i = disks; i--; ) { 2171 /* would I have to read this buffer for read_modify_write */ 2172 struct r5dev *dev = &sh->dev[i]; 2173 if ((dev->towrite || i == sh->pd_idx) && 2174 !test_bit(R5_LOCKED, &dev->flags) && 2175 !(test_bit(R5_UPTODATE, &dev->flags) || 2176 test_bit(R5_Wantcompute, &dev->flags))) { 2177 if (test_bit(R5_Insync, &dev->flags)) 2178 rmw++; 2179 else 2180 rmw += 2*disks; /* cannot read it */ 2181 } 2182 /* Would I have to read this buffer for reconstruct_write */ 2183 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2184 !test_bit(R5_LOCKED, &dev->flags) && 2185 !(test_bit(R5_UPTODATE, &dev->flags) || 2186 test_bit(R5_Wantcompute, &dev->flags))) { 2187 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2188 else 2189 rcw += 2*disks; 2190 } 2191 } 2192 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2193 (unsigned long long)sh->sector, rmw, rcw); 2194 set_bit(STRIPE_HANDLE, &sh->state); 2195 if (rmw < rcw && rmw > 0) 2196 /* prefer read-modify-write, but need to get some data */ 2197 for (i = disks; i--; ) { 2198 struct r5dev *dev = &sh->dev[i]; 2199 if ((dev->towrite || i == sh->pd_idx) && 2200 !test_bit(R5_LOCKED, &dev->flags) && 2201 !(test_bit(R5_UPTODATE, &dev->flags) || 2202 test_bit(R5_Wantcompute, &dev->flags)) && 2203 test_bit(R5_Insync, &dev->flags)) { 2204 if ( 2205 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2206 pr_debug("Read_old block " 2207 "%d for r-m-w\n", i); 2208 set_bit(R5_LOCKED, &dev->flags); 2209 set_bit(R5_Wantread, &dev->flags); 2210 if (!test_and_set_bit( 2211 STRIPE_OP_IO, &sh->ops.pending)) 2212 sh->ops.count++; 2213 s->locked++; 2214 } else { 2215 set_bit(STRIPE_DELAYED, &sh->state); 2216 set_bit(STRIPE_HANDLE, &sh->state); 2217 } 2218 } 2219 } 2220 if (rcw <= rmw && rcw > 0) 2221 /* want reconstruct write, but need to get some data */ 2222 for (i = disks; i--; ) { 2223 struct r5dev *dev = &sh->dev[i]; 2224 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2225 i != sh->pd_idx && 2226 !test_bit(R5_LOCKED, &dev->flags) && 2227 !(test_bit(R5_UPTODATE, &dev->flags) || 2228 test_bit(R5_Wantcompute, &dev->flags)) && 2229 test_bit(R5_Insync, &dev->flags)) { 2230 if ( 2231 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2232 pr_debug("Read_old block " 2233 "%d for Reconstruct\n", i); 2234 set_bit(R5_LOCKED, &dev->flags); 2235 set_bit(R5_Wantread, &dev->flags); 2236 if (!test_and_set_bit( 2237 STRIPE_OP_IO, &sh->ops.pending)) 2238 sh->ops.count++; 2239 s->locked++; 2240 } else { 2241 set_bit(STRIPE_DELAYED, &sh->state); 2242 set_bit(STRIPE_HANDLE, &sh->state); 2243 } 2244 } 2245 } 2246 /* now if nothing is locked, and if we have enough data, 2247 * we can start a write request 2248 */ 2249 /* since handle_stripe can be called at any time we need to handle the 2250 * case where a compute block operation has been submitted and then a 2251 * subsequent call wants to start a write request. raid5_run_ops only 2252 * handles the case where compute block and postxor are requested 2253 * simultaneously. If this is not the case then new writes need to be 2254 * held off until the compute completes. 2255 */ 2256 if ((s->req_compute || 2257 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) && 2258 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2259 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2260 s->locked += handle_write_operations5(sh, rcw == 0, 0); 2261} 2262 2263static void handle_issuing_new_write_requests6(raid5_conf_t *conf, 2264 struct stripe_head *sh, struct stripe_head_state *s, 2265 struct r6_state *r6s, int disks) 2266{ 2267 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; 2268 int qd_idx = r6s->qd_idx; 2269 for (i = disks; i--; ) { 2270 struct r5dev *dev = &sh->dev[i]; 2271 /* Would I have to read this buffer for reconstruct_write */ 2272 if (!test_bit(R5_OVERWRITE, &dev->flags) 2273 && i != pd_idx && i != qd_idx 2274 && (!test_bit(R5_LOCKED, &dev->flags) 2275 ) && 2276 !test_bit(R5_UPTODATE, &dev->flags)) { 2277 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2278 else { 2279 pr_debug("raid6: must_compute: " 2280 "disk %d flags=%#lx\n", i, dev->flags); 2281 must_compute++; 2282 } 2283 } 2284 } 2285 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", 2286 (unsigned long long)sh->sector, rcw, must_compute); 2287 set_bit(STRIPE_HANDLE, &sh->state); 2288 2289 if (rcw > 0) 2290 /* want reconstruct write, but need to get some data */ 2291 for (i = disks; i--; ) { 2292 struct r5dev *dev = &sh->dev[i]; 2293 if (!test_bit(R5_OVERWRITE, &dev->flags) 2294 && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) 2295 && !test_bit(R5_LOCKED, &dev->flags) && 2296 !test_bit(R5_UPTODATE, &dev->flags) && 2297 test_bit(R5_Insync, &dev->flags)) { 2298 if ( 2299 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2300 pr_debug("Read_old stripe %llu " 2301 "block %d for Reconstruct\n", 2302 (unsigned long long)sh->sector, i); 2303 set_bit(R5_LOCKED, &dev->flags); 2304 set_bit(R5_Wantread, &dev->flags); 2305 s->locked++; 2306 } else { 2307 pr_debug("Request delayed stripe %llu " 2308 "block %d for Reconstruct\n", 2309 (unsigned long long)sh->sector, i); 2310 set_bit(STRIPE_DELAYED, &sh->state); 2311 set_bit(STRIPE_HANDLE, &sh->state); 2312 } 2313 } 2314 } 2315 /* now if nothing is locked, and if we have enough data, we can start a 2316 * write request 2317 */ 2318 if (s->locked == 0 && rcw == 0 && 2319 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2320 if (must_compute > 0) { 2321 /* We have failed blocks and need to compute them */ 2322 switch (s->failed) { 2323 case 0: 2324 BUG(); 2325 case 1: 2326 compute_block_1(sh, r6s->failed_num[0], 0); 2327 break; 2328 case 2: 2329 compute_block_2(sh, r6s->failed_num[0], 2330 r6s->failed_num[1]); 2331 break; 2332 default: /* This request should have been failed? */ 2333 BUG(); 2334 } 2335 } 2336 2337 pr_debug("Computing parity for stripe %llu\n", 2338 (unsigned long long)sh->sector); 2339 compute_parity6(sh, RECONSTRUCT_WRITE); 2340 /* now every locked buffer is ready to be written */ 2341 for (i = disks; i--; ) 2342 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2343 pr_debug("Writing stripe %llu block %d\n", 2344 (unsigned long long)sh->sector, i); 2345 s->locked++; 2346 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2347 } 2348 if (s->locked == disks) 2349 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2350 atomic_inc(&conf->pending_full_writes); 2351 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2352 set_bit(STRIPE_INSYNC, &sh->state); 2353 2354 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2355 atomic_dec(&conf->preread_active_stripes); 2356 if (atomic_read(&conf->preread_active_stripes) < 2357 IO_THRESHOLD) 2358 md_wakeup_thread(conf->mddev->thread); 2359 } 2360 } 2361} 2362 2363static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2364 struct stripe_head_state *s, int disks) 2365{ 2366 int canceled_check = 0; 2367 2368 set_bit(STRIPE_HANDLE, &sh->state); 2369 2370 /* complete a check operation */ 2371 if (test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) { 2372 clear_bit(STRIPE_OP_CHECK, &sh->ops.ack); 2373 clear_bit(STRIPE_OP_CHECK, &sh->ops.pending); 2374 if (s->failed == 0) { 2375 if (sh->ops.zero_sum_result == 0) 2376 /* parity is correct (on disc, 2377 * not in buffer any more) 2378 */ 2379 set_bit(STRIPE_INSYNC, &sh->state); 2380 else { 2381 conf->mddev->resync_mismatches += 2382 STRIPE_SECTORS; 2383 if (test_bit( 2384 MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2385 /* don't try to repair!! */ 2386 set_bit(STRIPE_INSYNC, &sh->state); 2387 else { 2388 set_bit(STRIPE_OP_COMPUTE_BLK, 2389 &sh->ops.pending); 2390 set_bit(STRIPE_OP_MOD_REPAIR_PD, 2391 &sh->ops.pending); 2392 set_bit(R5_Wantcompute, 2393 &sh->dev[sh->pd_idx].flags); 2394 sh->ops.target = sh->pd_idx; 2395 sh->ops.count++; 2396 s->uptodate++; 2397 } 2398 } 2399 } else 2400 canceled_check = 1; /* STRIPE_INSYNC is not set */ 2401 } 2402 2403 /* start a new check operation if there are no failures, the stripe is 2404 * not insync, and a repair is not in flight 2405 */ 2406 if (s->failed == 0 && 2407 !test_bit(STRIPE_INSYNC, &sh->state) && 2408 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2409 if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 2410 BUG_ON(s->uptodate != disks); 2411 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2412 sh->ops.count++; 2413 s->uptodate--; 2414 } 2415 } 2416 2417 /* check if we can clear a parity disk reconstruct */ 2418 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 2419 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2420 2421 clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending); 2422 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 2423 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 2424 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 2425 } 2426 2427 2428 /* Wait for check parity and compute block operations to complete 2429 * before write-back. If a failure occurred while the check operation 2430 * was in flight we need to cycle this stripe through handle_stripe 2431 * since the parity block may not be uptodate 2432 */ 2433 if (!canceled_check && !test_bit(STRIPE_INSYNC, &sh->state) && 2434 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) && 2435 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) { 2436 struct r5dev *dev; 2437 /* either failed parity check, or recovery is happening */ 2438 if (s->failed == 0) 2439 s->failed_num = sh->pd_idx; 2440 dev = &sh->dev[s->failed_num]; 2441 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2442 BUG_ON(s->uptodate != disks); 2443 2444 set_bit(R5_LOCKED, &dev->flags); 2445 set_bit(R5_Wantwrite, &dev->flags); 2446 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2447 sh->ops.count++; 2448 2449 clear_bit(STRIPE_DEGRADED, &sh->state); 2450 s->locked++; 2451 set_bit(STRIPE_INSYNC, &sh->state); 2452 } 2453} 2454 2455 2456static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2457 struct stripe_head_state *s, 2458 struct r6_state *r6s, struct page *tmp_page, 2459 int disks) 2460{ 2461 int update_p = 0, update_q = 0; 2462 struct r5dev *dev; 2463 int pd_idx = sh->pd_idx; 2464 int qd_idx = r6s->qd_idx; 2465 2466 set_bit(STRIPE_HANDLE, &sh->state); 2467 2468 BUG_ON(s->failed > 2); 2469 BUG_ON(s->uptodate < disks); 2470 /* Want to check and possibly repair P and Q. 2471 * However there could be one 'failed' device, in which 2472 * case we can only check one of them, possibly using the 2473 * other to generate missing data 2474 */ 2475 2476 /* If !tmp_page, we cannot do the calculations, 2477 * but as we have set STRIPE_HANDLE, we will soon be called 2478 * by stripe_handle with a tmp_page - just wait until then. 2479 */ 2480 if (tmp_page) { 2481 if (s->failed == r6s->q_failed) { 2482 /* The only possible failed device holds 'Q', so it 2483 * makes sense to check P (If anything else were failed, 2484 * we would have used P to recreate it). 2485 */ 2486 compute_block_1(sh, pd_idx, 1); 2487 if (!page_is_zero(sh->dev[pd_idx].page)) { 2488 compute_block_1(sh, pd_idx, 0); 2489 update_p = 1; 2490 } 2491 } 2492 if (!r6s->q_failed && s->failed < 2) { 2493 /* q is not failed, and we didn't use it to generate 2494 * anything, so it makes sense to check it 2495 */ 2496 memcpy(page_address(tmp_page), 2497 page_address(sh->dev[qd_idx].page), 2498 STRIPE_SIZE); 2499 compute_parity6(sh, UPDATE_PARITY); 2500 if (memcmp(page_address(tmp_page), 2501 page_address(sh->dev[qd_idx].page), 2502 STRIPE_SIZE) != 0) { 2503 clear_bit(STRIPE_INSYNC, &sh->state); 2504 update_q = 1; 2505 } 2506 } 2507 if (update_p || update_q) { 2508 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2509 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2510 /* don't try to repair!! */ 2511 update_p = update_q = 0; 2512 } 2513 2514 /* now write out any block on a failed drive, 2515 * or P or Q if they need it 2516 */ 2517 2518 if (s->failed == 2) { 2519 dev = &sh->dev[r6s->failed_num[1]]; 2520 s->locked++; 2521 set_bit(R5_LOCKED, &dev->flags); 2522 set_bit(R5_Wantwrite, &dev->flags); 2523 } 2524 if (s->failed >= 1) { 2525 dev = &sh->dev[r6s->failed_num[0]]; 2526 s->locked++; 2527 set_bit(R5_LOCKED, &dev->flags); 2528 set_bit(R5_Wantwrite, &dev->flags); 2529 } 2530 2531 if (update_p) { 2532 dev = &sh->dev[pd_idx]; 2533 s->locked++; 2534 set_bit(R5_LOCKED, &dev->flags); 2535 set_bit(R5_Wantwrite, &dev->flags); 2536 } 2537 if (update_q) { 2538 dev = &sh->dev[qd_idx]; 2539 s->locked++; 2540 set_bit(R5_LOCKED, &dev->flags); 2541 set_bit(R5_Wantwrite, &dev->flags); 2542 } 2543 clear_bit(STRIPE_DEGRADED, &sh->state); 2544 2545 set_bit(STRIPE_INSYNC, &sh->state); 2546 } 2547} 2548 2549static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2550 struct r6_state *r6s) 2551{ 2552 int i; 2553 2554 /* We have read all the blocks in this stripe and now we need to 2555 * copy some of them into a target stripe for expand. 2556 */ 2557 struct dma_async_tx_descriptor *tx = NULL; 2558 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2559 for (i = 0; i < sh->disks; i++) 2560 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) { 2561 int dd_idx, pd_idx, j; 2562 struct stripe_head *sh2; 2563 2564 sector_t bn = compute_blocknr(sh, i); 2565 sector_t s = raid5_compute_sector(bn, conf->raid_disks, 2566 conf->raid_disks - 2567 conf->max_degraded, &dd_idx, 2568 &pd_idx, conf); 2569 sh2 = get_active_stripe(conf, s, conf->raid_disks, 2570 pd_idx, 1); 2571 if (sh2 == NULL) 2572 /* so far only the early blocks of this stripe 2573 * have been requested. When later blocks 2574 * get requested, we will try again 2575 */ 2576 continue; 2577 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2578 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2579 /* must have already done this block */ 2580 release_stripe(sh2); 2581 continue; 2582 } 2583 2584 /* place all the copies on one channel */ 2585 tx = async_memcpy(sh2->dev[dd_idx].page, 2586 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2587 ASYNC_TX_DEP_ACK, tx, NULL, NULL); 2588 2589 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2590 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2591 for (j = 0; j < conf->raid_disks; j++) 2592 if (j != sh2->pd_idx && 2593 (!r6s || j != raid6_next_disk(sh2->pd_idx, 2594 sh2->disks)) && 2595 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2596 break; 2597 if (j == conf->raid_disks) { 2598 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2599 set_bit(STRIPE_HANDLE, &sh2->state); 2600 } 2601 release_stripe(sh2); 2602 2603 } 2604 /* done submitting copies, wait for them to complete */ 2605 if (tx) { 2606 async_tx_ack(tx); 2607 dma_wait_for_async_tx(tx); 2608 } 2609} 2610 2611 2612/* 2613 * handle_stripe - do things to a stripe. 2614 * 2615 * We lock the stripe and then examine the state of various bits 2616 * to see what needs to be done. 2617 * Possible results: 2618 * return some read request which now have data 2619 * return some write requests which are safely on disc 2620 * schedule a read on some buffers 2621 * schedule a write of some buffers 2622 * return confirmation of parity correctness 2623 * 2624 * buffers are taken off read_list or write_list, and bh_cache buffers 2625 * get BH_Lock set before the stripe lock is released. 2626 * 2627 */ 2628 2629static void handle_stripe5(struct stripe_head *sh) 2630{ 2631 raid5_conf_t *conf = sh->raid_conf; 2632 int disks = sh->disks, i; 2633 struct bio *return_bi = NULL; 2634 struct stripe_head_state s; 2635 struct r5dev *dev; 2636 unsigned long pending = 0; 2637 mdk_rdev_t *blocked_rdev = NULL; 2638 2639 memset(&s, 0, sizeof(s)); 2640 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d " 2641 "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state, 2642 atomic_read(&sh->count), sh->pd_idx, 2643 sh->ops.pending, sh->ops.ack, sh->ops.complete); 2644 2645 spin_lock(&sh->lock); 2646 clear_bit(STRIPE_HANDLE, &sh->state); 2647 clear_bit(STRIPE_DELAYED, &sh->state); 2648 2649 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2650 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2651 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2652 /* Now to look around and see what can be done */ 2653 2654 /* clean-up completed biofill operations */ 2655 if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) { 2656 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending); 2657 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack); 2658 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete); 2659 } 2660 2661 rcu_read_lock(); 2662 for (i=disks; i--; ) { 2663 mdk_rdev_t *rdev; 2664 struct r5dev *dev = &sh->dev[i]; 2665 clear_bit(R5_Insync, &dev->flags); 2666 2667 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 2668 "written %p\n", i, dev->flags, dev->toread, dev->read, 2669 dev->towrite, dev->written); 2670 2671 /* maybe we can request a biofill operation 2672 * 2673 * new wantfill requests are only permitted while 2674 * STRIPE_OP_BIOFILL is clear 2675 */ 2676 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2677 !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2678 set_bit(R5_Wantfill, &dev->flags); 2679 2680 /* now count some things */ 2681 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2682 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2683 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2684 2685 if (test_bit(R5_Wantfill, &dev->flags)) 2686 s.to_fill++; 2687 else if (dev->toread) 2688 s.to_read++; 2689 if (dev->towrite) { 2690 s.to_write++; 2691 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2692 s.non_overwrite++; 2693 } 2694 if (dev->written) 2695 s.written++; 2696 rdev = rcu_dereference(conf->disks[i].rdev); 2697 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2698 blocked_rdev = rdev; 2699 atomic_inc(&rdev->nr_pending); 2700 break; 2701 } 2702 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2703 /* The ReadError flag will just be confusing now */ 2704 clear_bit(R5_ReadError, &dev->flags); 2705 clear_bit(R5_ReWrite, &dev->flags); 2706 } 2707 if (!rdev || !test_bit(In_sync, &rdev->flags) 2708 || test_bit(R5_ReadError, &dev->flags)) { 2709 s.failed++; 2710 s.failed_num = i; 2711 } else 2712 set_bit(R5_Insync, &dev->flags); 2713 } 2714 rcu_read_unlock(); 2715 2716 if (unlikely(blocked_rdev)) { 2717 set_bit(STRIPE_HANDLE, &sh->state); 2718 goto unlock; 2719 } 2720 2721 if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2722 sh->ops.count++; 2723 2724 pr_debug("locked=%d uptodate=%d to_read=%d" 2725 " to_write=%d failed=%d failed_num=%d\n", 2726 s.locked, s.uptodate, s.to_read, s.to_write, 2727 s.failed, s.failed_num); 2728 /* check if the array has lost two devices and, if so, some requests might 2729 * need to be failed 2730 */ 2731 if (s.failed > 1 && s.to_read+s.to_write+s.written) 2732 handle_requests_to_failed_array(conf, sh, &s, disks, 2733 &return_bi); 2734 if (s.failed > 1 && s.syncing) { 2735 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2736 clear_bit(STRIPE_SYNCING, &sh->state); 2737 s.syncing = 0; 2738 } 2739 2740 /* might be able to return some write requests if the parity block 2741 * is safe, or on a failed drive 2742 */ 2743 dev = &sh->dev[sh->pd_idx]; 2744 if ( s.written && 2745 ((test_bit(R5_Insync, &dev->flags) && 2746 !test_bit(R5_LOCKED, &dev->flags) && 2747 test_bit(R5_UPTODATE, &dev->flags)) || 2748 (s.failed == 1 && s.failed_num == sh->pd_idx))) 2749 handle_completed_write_requests(conf, sh, disks, &return_bi); 2750 2751 /* Now we might consider reading some blocks, either to check/generate 2752 * parity, or to satisfy requests 2753 * or to load a block that is being partially written. 2754 */ 2755 if (s.to_read || s.non_overwrite || 2756 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding || 2757 test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 2758 handle_issuing_new_read_requests5(sh, &s, disks); 2759 2760 /* Now we check to see if any write operations have recently 2761 * completed 2762 */ 2763 2764 /* leave prexor set until postxor is done, allows us to distinguish 2765 * a rmw from a rcw during biodrain 2766 */ 2767 if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) && 2768 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2769 2770 clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 2771 clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack); 2772 clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 2773 2774 for (i = disks; i--; ) 2775 clear_bit(R5_Wantprexor, &sh->dev[i].flags); 2776 } 2777 2778 /* if only POSTXOR is set then this is an 'expand' postxor */ 2779 if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) && 2780 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2781 2782 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 2783 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack); 2784 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 2785 2786 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2787 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2788 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2789 2790 /* All the 'written' buffers and the parity block are ready to 2791 * be written back to disk 2792 */ 2793 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 2794 for (i = disks; i--; ) { 2795 dev = &sh->dev[i]; 2796 if (test_bit(R5_LOCKED, &dev->flags) && 2797 (i == sh->pd_idx || dev->written)) { 2798 pr_debug("Writing block %d\n", i); 2799 set_bit(R5_Wantwrite, &dev->flags); 2800 if (!test_and_set_bit( 2801 STRIPE_OP_IO, &sh->ops.pending)) 2802 sh->ops.count++; 2803 if (!test_bit(R5_Insync, &dev->flags) || 2804 (i == sh->pd_idx && s.failed == 0)) 2805 set_bit(STRIPE_INSYNC, &sh->state); 2806 } 2807 } 2808 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2809 atomic_dec(&conf->preread_active_stripes); 2810 if (atomic_read(&conf->preread_active_stripes) < 2811 IO_THRESHOLD) 2812 md_wakeup_thread(conf->mddev->thread); 2813 } 2814 } 2815 2816 /* Now to consider new write requests and what else, if anything 2817 * should be read. We do not handle new writes when: 2818 * 1/ A 'write' operation (copy+xor) is already in flight. 2819 * 2/ A 'check' operation is in flight, as it may clobber the parity 2820 * block. 2821 */ 2822 if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) && 2823 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 2824 handle_issuing_new_write_requests5(conf, sh, &s, disks); 2825 2826 /* maybe we need to check and possibly fix the parity for this stripe 2827 * Any reads will already have been scheduled, so we just see if enough 2828 * data is available. The parity check is held off while parity 2829 * dependent operations are in flight. 2830 */ 2831 if ((s.syncing && s.locked == 0 && 2832 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 2833 !test_bit(STRIPE_INSYNC, &sh->state)) || 2834 test_bit(STRIPE_OP_CHECK, &sh->ops.pending) || 2835 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) 2836 handle_parity_checks5(conf, sh, &s, disks); 2837 2838 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2839 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2840 clear_bit(STRIPE_SYNCING, &sh->state); 2841 } 2842 2843 /* If the failed drive is just a ReadError, then we might need to progress 2844 * the repair/check process 2845 */ 2846 if (s.failed == 1 && !conf->mddev->ro && 2847 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) 2848 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) 2849 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) 2850 ) { 2851 dev = &sh->dev[s.failed_num]; 2852 if (!test_bit(R5_ReWrite, &dev->flags)) { 2853 set_bit(R5_Wantwrite, &dev->flags); 2854 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2855 sh->ops.count++; 2856 set_bit(R5_ReWrite, &dev->flags); 2857 set_bit(R5_LOCKED, &dev->flags); 2858 s.locked++; 2859 } else { 2860 /* let's read it back */ 2861 set_bit(R5_Wantread, &dev->flags); 2862 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2863 sh->ops.count++; 2864 set_bit(R5_LOCKED, &dev->flags); 2865 s.locked++; 2866 } 2867 } 2868 2869 /* Finish postxor operations initiated by the expansion 2870 * process 2871 */ 2872 if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) && 2873 !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) { 2874 2875 clear_bit(STRIPE_EXPANDING, &sh->state); 2876 2877 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2878 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2879 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2880 2881 for (i = conf->raid_disks; i--; ) { 2882 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2883 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2884 sh->ops.count++; 2885 } 2886 } 2887 2888 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2889 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2890 /* Need to write out all blocks after computing parity */ 2891 sh->disks = conf->raid_disks; 2892 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 2893 conf->raid_disks); 2894 s.locked += handle_write_operations5(sh, 1, 1); 2895 } else if (s.expanded && 2896 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2897 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2898 atomic_dec(&conf->reshape_stripes); 2899 wake_up(&conf->wait_for_overlap); 2900 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 2901 } 2902 2903 if (s.expanding && s.locked == 0 && 2904 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 2905 handle_stripe_expansion(conf, sh, NULL); 2906 2907 if (sh->ops.count) 2908 pending = get_stripe_work(sh); 2909 2910 unlock: 2911 spin_unlock(&sh->lock); 2912 2913 /* wait for this device to become unblocked */ 2914 if (unlikely(blocked_rdev)) 2915 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 2916 2917 if (pending) 2918 raid5_run_ops(sh, pending); 2919 2920 return_io(return_bi); 2921 2922} 2923 2924static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 2925{ 2926 raid6_conf_t *conf = sh->raid_conf; 2927 int disks = sh->disks; 2928 struct bio *return_bi = NULL; 2929 int i, pd_idx = sh->pd_idx; 2930 struct stripe_head_state s; 2931 struct r6_state r6s; 2932 struct r5dev *dev, *pdev, *qdev; 2933 mdk_rdev_t *blocked_rdev = NULL; 2934 2935 r6s.qd_idx = raid6_next_disk(pd_idx, disks); 2936 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 2937 "pd_idx=%d, qd_idx=%d\n", 2938 (unsigned long long)sh->sector, sh->state, 2939 atomic_read(&sh->count), pd_idx, r6s.qd_idx); 2940 memset(&s, 0, sizeof(s)); 2941 2942 spin_lock(&sh->lock); 2943 clear_bit(STRIPE_HANDLE, &sh->state); 2944 clear_bit(STRIPE_DELAYED, &sh->state); 2945 2946 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2947 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2948 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2949 /* Now to look around and see what can be done */ 2950 2951 rcu_read_lock(); 2952 for (i=disks; i--; ) { 2953 mdk_rdev_t *rdev; 2954 dev = &sh->dev[i]; 2955 clear_bit(R5_Insync, &dev->flags); 2956 2957 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 2958 i, dev->flags, dev->toread, dev->towrite, dev->written); 2959 /* maybe we can reply to a read */ 2960 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 2961 struct bio *rbi, *rbi2; 2962 pr_debug("Return read for disc %d\n", i); 2963 spin_lock_irq(&conf->device_lock); 2964 rbi = dev->toread; 2965 dev->toread = NULL; 2966 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 2967 wake_up(&conf->wait_for_overlap); 2968 spin_unlock_irq(&conf->device_lock); 2969 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 2970 copy_data(0, rbi, dev->page, dev->sector); 2971 rbi2 = r5_next_bio(rbi, dev->sector); 2972 spin_lock_irq(&conf->device_lock); 2973 if (--rbi->bi_phys_segments == 0) { 2974 rbi->bi_next = return_bi; 2975 return_bi = rbi; 2976 } 2977 spin_unlock_irq(&conf->device_lock); 2978 rbi = rbi2; 2979 } 2980 } 2981 2982 /* now count some things */ 2983 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2984 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2985 2986 2987 if (dev->toread) 2988 s.to_read++; 2989 if (dev->towrite) { 2990 s.to_write++; 2991 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2992 s.non_overwrite++; 2993 } 2994 if (dev->written) 2995 s.written++; 2996 rdev = rcu_dereference(conf->disks[i].rdev); 2997 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2998 blocked_rdev = rdev; 2999 atomic_inc(&rdev->nr_pending); 3000 break; 3001 } 3002 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 3003 /* The ReadError flag will just be confusing now */ 3004 clear_bit(R5_ReadError, &dev->flags); 3005 clear_bit(R5_ReWrite, &dev->flags); 3006 } 3007 if (!rdev || !test_bit(In_sync, &rdev->flags) 3008 || test_bit(R5_ReadError, &dev->flags)) { 3009 if (s.failed < 2) 3010 r6s.failed_num[s.failed] = i; 3011 s.failed++; 3012 } else 3013 set_bit(R5_Insync, &dev->flags); 3014 } 3015 rcu_read_unlock(); 3016 3017 if (unlikely(blocked_rdev)) { 3018 set_bit(STRIPE_HANDLE, &sh->state); 3019 goto unlock; 3020 } 3021 pr_debug("locked=%d uptodate=%d to_read=%d" 3022 " to_write=%d failed=%d failed_num=%d,%d\n", 3023 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3024 r6s.failed_num[0], r6s.failed_num[1]); 3025 /* check if the array has lost >2 devices and, if so, some requests 3026 * might need to be failed 3027 */ 3028 if (s.failed > 2 && s.to_read+s.to_write+s.written) 3029 handle_requests_to_failed_array(conf, sh, &s, disks, 3030 &return_bi); 3031 if (s.failed > 2 && s.syncing) { 3032 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3033 clear_bit(STRIPE_SYNCING, &sh->state); 3034 s.syncing = 0; 3035 } 3036 3037 /* 3038 * might be able to return some write requests if the parity blocks 3039 * are safe, or on a failed drive 3040 */ 3041 pdev = &sh->dev[pd_idx]; 3042 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 3043 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 3044 qdev = &sh->dev[r6s.qd_idx]; 3045 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == r6s.qd_idx) 3046 || (s.failed >= 2 && r6s.failed_num[1] == r6s.qd_idx); 3047 3048 if ( s.written && 3049 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3050 && !test_bit(R5_LOCKED, &pdev->flags) 3051 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3052 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3053 && !test_bit(R5_LOCKED, &qdev->flags) 3054 && test_bit(R5_UPTODATE, &qdev->flags))))) 3055 handle_completed_write_requests(conf, sh, disks, &return_bi); 3056 3057 /* Now we might consider reading some blocks, either to check/generate 3058 * parity, or to satisfy requests 3059 * or to load a block that is being partially written. 3060 */ 3061 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 3062 (s.syncing && (s.uptodate < disks)) || s.expanding) 3063 handle_issuing_new_read_requests6(sh, &s, &r6s, disks); 3064 3065 /* now to consider writing and what else, if anything should be read */ 3066 if (s.to_write) 3067 handle_issuing_new_write_requests6(conf, sh, &s, &r6s, disks); 3068 3069 /* maybe we need to check and possibly fix the parity for this stripe 3070 * Any reads will already have been scheduled, so we just see if enough 3071 * data is available 3072 */ 3073 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) 3074 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); 3075 3076 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3077 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3078 clear_bit(STRIPE_SYNCING, &sh->state); 3079 } 3080 3081 /* If the failed drives are just a ReadError, then we might need 3082 * to progress the repair/check process 3083 */ 3084 if (s.failed <= 2 && !conf->mddev->ro) 3085 for (i = 0; i < s.failed; i++) { 3086 dev = &sh->dev[r6s.failed_num[i]]; 3087 if (test_bit(R5_ReadError, &dev->flags) 3088 && !test_bit(R5_LOCKED, &dev->flags) 3089 && test_bit(R5_UPTODATE, &dev->flags) 3090 ) { 3091 if (!test_bit(R5_ReWrite, &dev->flags)) { 3092 set_bit(R5_Wantwrite, &dev->flags); 3093 set_bit(R5_ReWrite, &dev->flags); 3094 set_bit(R5_LOCKED, &dev->flags); 3095 } else { 3096 /* let's read it back */ 3097 set_bit(R5_Wantread, &dev->flags); 3098 set_bit(R5_LOCKED, &dev->flags); 3099 } 3100 } 3101 } 3102 3103 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3104 /* Need to write out all blocks after computing P&Q */ 3105 sh->disks = conf->raid_disks; 3106 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 3107 conf->raid_disks); 3108 compute_parity6(sh, RECONSTRUCT_WRITE); 3109 for (i = conf->raid_disks ; i-- ; ) { 3110 set_bit(R5_LOCKED, &sh->dev[i].flags); 3111 s.locked++; 3112 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3113 } 3114 clear_bit(STRIPE_EXPANDING, &sh->state); 3115 } else if (s.expanded) { 3116 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3117 atomic_dec(&conf->reshape_stripes); 3118 wake_up(&conf->wait_for_overlap); 3119 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3120 } 3121 3122 if (s.expanding && s.locked == 0 && 3123 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 3124 handle_stripe_expansion(conf, sh, &r6s); 3125 3126 unlock: 3127 spin_unlock(&sh->lock); 3128 3129 /* wait for this device to become unblocked */ 3130 if (unlikely(blocked_rdev)) 3131 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3132 3133 return_io(return_bi); 3134 3135 for (i=disks; i-- ;) { 3136 int rw; 3137 struct bio *bi; 3138 mdk_rdev_t *rdev; 3139 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 3140 rw = WRITE; 3141 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 3142 rw = READ; 3143 else 3144 continue; 3145 3146 set_bit(STRIPE_IO_STARTED, &sh->state); 3147 3148 bi = &sh->dev[i].req; 3149 3150 bi->bi_rw = rw; 3151 if (rw == WRITE) 3152 bi->bi_end_io = raid5_end_write_request; 3153 else 3154 bi->bi_end_io = raid5_end_read_request; 3155 3156 rcu_read_lock(); 3157 rdev = rcu_dereference(conf->disks[i].rdev); 3158 if (rdev && test_bit(Faulty, &rdev->flags)) 3159 rdev = NULL; 3160 if (rdev) 3161 atomic_inc(&rdev->nr_pending); 3162 rcu_read_unlock(); 3163 3164 if (rdev) { 3165 if (s.syncing || s.expanding || s.expanded) 3166 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 3167 3168 bi->bi_bdev = rdev->bdev; 3169 pr_debug("for %llu schedule op %ld on disc %d\n", 3170 (unsigned long long)sh->sector, bi->bi_rw, i); 3171 atomic_inc(&sh->count); 3172 bi->bi_sector = sh->sector + rdev->data_offset; 3173 bi->bi_flags = 1 << BIO_UPTODATE; 3174 bi->bi_vcnt = 1; 3175 bi->bi_max_vecs = 1; 3176 bi->bi_idx = 0; 3177 bi->bi_io_vec = &sh->dev[i].vec; 3178 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 3179 bi->bi_io_vec[0].bv_offset = 0; 3180 bi->bi_size = STRIPE_SIZE; 3181 bi->bi_next = NULL; 3182 if (rw == WRITE && 3183 test_bit(R5_ReWrite, &sh->dev[i].flags)) 3184 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 3185 generic_make_request(bi); 3186 } else { 3187 if (rw == WRITE) 3188 set_bit(STRIPE_DEGRADED, &sh->state); 3189 pr_debug("skip op %ld on disc %d for sector %llu\n", 3190 bi->bi_rw, i, (unsigned long long)sh->sector); 3191 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3192 set_bit(STRIPE_HANDLE, &sh->state); 3193 } 3194 } 3195} 3196 3197static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) 3198{ 3199 if (sh->raid_conf->level == 6) 3200 handle_stripe6(sh, tmp_page); 3201 else 3202 handle_stripe5(sh); 3203} 3204 3205 3206 3207static void raid5_activate_delayed(raid5_conf_t *conf) 3208{ 3209 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3210 while (!list_empty(&conf->delayed_list)) { 3211 struct list_head *l = conf->delayed_list.next; 3212 struct stripe_head *sh; 3213 sh = list_entry(l, struct stripe_head, lru); 3214 list_del_init(l); 3215 clear_bit(STRIPE_DELAYED, &sh->state); 3216 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3217 atomic_inc(&conf->preread_active_stripes); 3218 list_add_tail(&sh->lru, &conf->hold_list); 3219 } 3220 } else 3221 blk_plug_device(conf->mddev->queue); 3222} 3223 3224static void activate_bit_delay(raid5_conf_t *conf) 3225{ 3226 /* device_lock is held */ 3227 struct list_head head; 3228 list_add(&head, &conf->bitmap_list); 3229 list_del_init(&conf->bitmap_list); 3230 while (!list_empty(&head)) { 3231 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3232 list_del_init(&sh->lru); 3233 atomic_inc(&sh->count); 3234 __release_stripe(conf, sh); 3235 } 3236} 3237 3238static void unplug_slaves(mddev_t *mddev) 3239{ 3240 raid5_conf_t *conf = mddev_to_conf(mddev); 3241 int i; 3242 3243 rcu_read_lock(); 3244 for (i=0; i<mddev->raid_disks; i++) { 3245 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3246 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3247 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3248 3249 atomic_inc(&rdev->nr_pending); 3250 rcu_read_unlock(); 3251 3252 blk_unplug(r_queue); 3253 3254 rdev_dec_pending(rdev, mddev); 3255 rcu_read_lock(); 3256 } 3257 } 3258 rcu_read_unlock(); 3259} 3260 3261static void raid5_unplug_device(struct request_queue *q) 3262{ 3263 mddev_t *mddev = q->queuedata; 3264 raid5_conf_t *conf = mddev_to_conf(mddev); 3265 unsigned long flags; 3266 3267 spin_lock_irqsave(&conf->device_lock, flags); 3268 3269 if (blk_remove_plug(q)) { 3270 conf->seq_flush++; 3271 raid5_activate_delayed(conf); 3272 } 3273 md_wakeup_thread(mddev->thread); 3274 3275 spin_unlock_irqrestore(&conf->device_lock, flags); 3276 3277 unplug_slaves(mddev); 3278} 3279 3280static int raid5_congested(void *data, int bits) 3281{ 3282 mddev_t *mddev = data; 3283 raid5_conf_t *conf = mddev_to_conf(mddev); 3284 3285 /* No difference between reads and writes. Just check 3286 * how busy the stripe_cache is 3287 */ 3288 if (conf->inactive_blocked) 3289 return 1; 3290 if (conf->quiesce) 3291 return 1; 3292 if (list_empty_careful(&conf->inactive_list)) 3293 return 1; 3294 3295 return 0; 3296} 3297 3298/* We want read requests to align with chunks where possible, 3299 * but write requests don't need to. 3300 */ 3301static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 3302{ 3303 mddev_t *mddev = q->queuedata; 3304 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3305 int max; 3306 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3307 unsigned int bio_sectors = bio->bi_size >> 9; 3308 3309 if (bio_data_dir(bio) == WRITE) 3310 return biovec->bv_len; /* always allow writes to be mergeable */ 3311 3312 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3313 if (max < 0) max = 0; 3314 if (max <= biovec->bv_len && bio_sectors == 0) 3315 return biovec->bv_len; 3316 else 3317 return max; 3318} 3319 3320 3321static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3322{ 3323 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3324 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3325 unsigned int bio_sectors = bio->bi_size >> 9; 3326 3327 return chunk_sectors >= 3328 ((sector & (chunk_sectors - 1)) + bio_sectors); 3329} 3330 3331/* 3332 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3333 * later sampled by raid5d. 3334 */ 3335static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) 3336{ 3337 unsigned long flags; 3338 3339 spin_lock_irqsave(&conf->device_lock, flags); 3340 3341 bi->bi_next = conf->retry_read_aligned_list; 3342 conf->retry_read_aligned_list = bi; 3343 3344 spin_unlock_irqrestore(&conf->device_lock, flags); 3345 md_wakeup_thread(conf->mddev->thread); 3346} 3347 3348 3349static struct bio *remove_bio_from_retry(raid5_conf_t *conf) 3350{ 3351 struct bio *bi; 3352 3353 bi = conf->retry_read_aligned; 3354 if (bi) { 3355 conf->retry_read_aligned = NULL; 3356 return bi; 3357 } 3358 bi = conf->retry_read_aligned_list; 3359 if(bi) { 3360 conf->retry_read_aligned_list = bi->bi_next; 3361 bi->bi_next = NULL; 3362 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3363 bi->bi_hw_segments = 0; /* count of processed stripes */ 3364 } 3365 3366 return bi; 3367} 3368 3369 3370/* 3371 * The "raid5_align_endio" should check if the read succeeded and if it 3372 * did, call bio_endio on the original bio (having bio_put the new bio 3373 * first). 3374 * If the read failed.. 3375 */ 3376static void raid5_align_endio(struct bio *bi, int error) 3377{ 3378 struct bio* raid_bi = bi->bi_private; 3379 mddev_t *mddev; 3380 raid5_conf_t *conf; 3381 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3382 mdk_rdev_t *rdev; 3383 3384 bio_put(bi); 3385 3386 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3387 conf = mddev_to_conf(mddev); 3388 rdev = (void*)raid_bi->bi_next; 3389 raid_bi->bi_next = NULL; 3390 3391 rdev_dec_pending(rdev, conf->mddev); 3392 3393 if (!error && uptodate) { 3394 bio_endio(raid_bi, 0); 3395 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3396 wake_up(&conf->wait_for_stripe); 3397 return; 3398 } 3399 3400 3401 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3402 3403 add_bio_to_retry(raid_bi, conf); 3404} 3405 3406static int bio_fits_rdev(struct bio *bi) 3407{ 3408 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3409 3410 if ((bi->bi_size>>9) > q->max_sectors) 3411 return 0; 3412 blk_recount_segments(q, bi); 3413 if (bi->bi_phys_segments > q->max_phys_segments || 3414 bi->bi_hw_segments > q->max_hw_segments) 3415 return 0; 3416 3417 if (q->merge_bvec_fn) 3418 /* it's too hard to apply the merge_bvec_fn at this stage, 3419 * just just give up 3420 */ 3421 return 0; 3422 3423 return 1; 3424} 3425 3426 3427static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3428{ 3429 mddev_t *mddev = q->queuedata; 3430 raid5_conf_t *conf = mddev_to_conf(mddev); 3431 const unsigned int raid_disks = conf->raid_disks; 3432 const unsigned int data_disks = raid_disks - conf->max_degraded; 3433 unsigned int dd_idx, pd_idx; 3434 struct bio* align_bi; 3435 mdk_rdev_t *rdev; 3436 3437 if (!in_chunk_boundary(mddev, raid_bio)) { 3438 pr_debug("chunk_aligned_read : non aligned\n"); 3439 return 0; 3440 } 3441 /* 3442 * use bio_clone to make a copy of the bio 3443 */ 3444 align_bi = bio_clone(raid_bio, GFP_NOIO); 3445 if (!align_bi) 3446 return 0; 3447 /* 3448 * set bi_end_io to a new function, and set bi_private to the 3449 * original bio. 3450 */ 3451 align_bi->bi_end_io = raid5_align_endio; 3452 align_bi->bi_private = raid_bio; 3453 /* 3454 * compute position 3455 */ 3456 align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector, 3457 raid_disks, 3458 data_disks, 3459 &dd_idx, 3460 &pd_idx, 3461 conf); 3462 3463 rcu_read_lock(); 3464 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3465 if (rdev && test_bit(In_sync, &rdev->flags)) { 3466 atomic_inc(&rdev->nr_pending); 3467 rcu_read_unlock(); 3468 raid_bio->bi_next = (void*)rdev; 3469 align_bi->bi_bdev = rdev->bdev; 3470 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3471 align_bi->bi_sector += rdev->data_offset; 3472 3473 if (!bio_fits_rdev(align_bi)) { 3474 /* too big in some way */ 3475 bio_put(align_bi); 3476 rdev_dec_pending(rdev, mddev); 3477 return 0; 3478 } 3479 3480 spin_lock_irq(&conf->device_lock); 3481 wait_event_lock_irq(conf->wait_for_stripe, 3482 conf->quiesce == 0, 3483 conf->device_lock, /* nothing */); 3484 atomic_inc(&conf->active_aligned_reads); 3485 spin_unlock_irq(&conf->device_lock); 3486 3487 generic_make_request(align_bi); 3488 return 1; 3489 } else { 3490 rcu_read_unlock(); 3491 bio_put(align_bi); 3492 return 0; 3493 } 3494} 3495 3496/* __get_priority_stripe - get the next stripe to process 3497 * 3498 * Full stripe writes are allowed to pass preread active stripes up until 3499 * the bypass_threshold is exceeded. In general the bypass_count 3500 * increments when the handle_list is handled before the hold_list; however, it 3501 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 3502 * stripe with in flight i/o. The bypass_count will be reset when the 3503 * head of the hold_list has changed, i.e. the head was promoted to the 3504 * handle_list. 3505 */ 3506static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) 3507{ 3508 struct stripe_head *sh; 3509 3510 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 3511 __func__, 3512 list_empty(&conf->handle_list) ? "empty" : "busy", 3513 list_empty(&conf->hold_list) ? "empty" : "busy", 3514 atomic_read(&conf->pending_full_writes), conf->bypass_count); 3515 3516 if (!list_empty(&conf->handle_list)) { 3517 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); 3518 3519 if (list_empty(&conf->hold_list)) 3520 conf->bypass_count = 0; 3521 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 3522 if (conf->hold_list.next == conf->last_hold) 3523 conf->bypass_count++; 3524 else { 3525 conf->last_hold = conf->hold_list.next; 3526 conf->bypass_count -= conf->bypass_threshold; 3527 if (conf->bypass_count < 0) 3528 conf->bypass_count = 0; 3529 } 3530 } 3531 } else if (!list_empty(&conf->hold_list) && 3532 ((conf->bypass_threshold && 3533 conf->bypass_count > conf->bypass_threshold) || 3534 atomic_read(&conf->pending_full_writes) == 0)) { 3535 sh = list_entry(conf->hold_list.next, 3536 typeof(*sh), lru); 3537 conf->bypass_count -= conf->bypass_threshold; 3538 if (conf->bypass_count < 0) 3539 conf->bypass_count = 0; 3540 } else 3541 return NULL; 3542 3543 list_del_init(&sh->lru); 3544 atomic_inc(&sh->count); 3545 BUG_ON(atomic_read(&sh->count) != 1); 3546 return sh; 3547} 3548 3549static int make_request(struct request_queue *q, struct bio * bi) 3550{ 3551 mddev_t *mddev = q->queuedata; 3552 raid5_conf_t *conf = mddev_to_conf(mddev); 3553 unsigned int dd_idx, pd_idx; 3554 sector_t new_sector; 3555 sector_t logical_sector, last_sector; 3556 struct stripe_head *sh; 3557 const int rw = bio_data_dir(bi); 3558 int remaining; 3559 3560 if (unlikely(bio_barrier(bi))) { 3561 bio_endio(bi, -EOPNOTSUPP); 3562 return 0; 3563 } 3564 3565 md_write_start(mddev, bi); 3566 3567 disk_stat_inc(mddev->gendisk, ios[rw]); 3568 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 3569 3570 if (rw == READ && 3571 mddev->reshape_position == MaxSector && 3572 chunk_aligned_read(q,bi)) 3573 return 0; 3574 3575 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3576 last_sector = bi->bi_sector + (bi->bi_size>>9); 3577 bi->bi_next = NULL; 3578 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3579 3580 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3581 DEFINE_WAIT(w); 3582 int disks, data_disks; 3583 3584 retry: 3585 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3586 if (likely(conf->expand_progress == MaxSector)) 3587 disks = conf->raid_disks; 3588 else { 3589 /* spinlock is needed as expand_progress may be 3590 * 64bit on a 32bit platform, and so it might be 3591 * possible to see a half-updated value 3592 * Ofcourse expand_progress could change after 3593 * the lock is dropped, so once we get a reference 3594 * to the stripe that we think it is, we will have 3595 * to check again. 3596 */ 3597 spin_lock_irq(&conf->device_lock); 3598 disks = conf->raid_disks; 3599 if (logical_sector >= conf->expand_progress) 3600 disks = conf->previous_raid_disks; 3601 else { 3602 if (logical_sector >= conf->expand_lo) { 3603 spin_unlock_irq(&conf->device_lock); 3604 schedule(); 3605 goto retry; 3606 } 3607 } 3608 spin_unlock_irq(&conf->device_lock); 3609 } 3610 data_disks = disks - conf->max_degraded; 3611 3612 new_sector = raid5_compute_sector(logical_sector, disks, data_disks, 3613 &dd_idx, &pd_idx, conf); 3614 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3615 (unsigned long long)new_sector, 3616 (unsigned long long)logical_sector); 3617 3618 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); 3619 if (sh) { 3620 if (unlikely(conf->expand_progress != MaxSector)) { 3621 /* expansion might have moved on while waiting for a 3622 * stripe, so we must do the range check again. 3623 * Expansion could still move past after this 3624 * test, but as we are holding a reference to 3625 * 'sh', we know that if that happens, 3626 * STRIPE_EXPANDING will get set and the expansion 3627 * won't proceed until we finish with the stripe. 3628 */ 3629 int must_retry = 0; 3630 spin_lock_irq(&conf->device_lock); 3631 if (logical_sector < conf->expand_progress && 3632 disks == conf->previous_raid_disks) 3633 /* mismatch, need to try again */ 3634 must_retry = 1; 3635 spin_unlock_irq(&conf->device_lock); 3636 if (must_retry) { 3637 release_stripe(sh); 3638 goto retry; 3639 } 3640 } 3641 /* FIXME what if we get a false positive because these 3642 * are being updated. 3643 */ 3644 if (logical_sector >= mddev->suspend_lo && 3645 logical_sector < mddev->suspend_hi) { 3646 release_stripe(sh); 3647 schedule(); 3648 goto retry; 3649 } 3650 3651 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3652 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3653 /* Stripe is busy expanding or 3654 * add failed due to overlap. Flush everything 3655 * and wait a while 3656 */ 3657 raid5_unplug_device(mddev->queue); 3658 release_stripe(sh); 3659 schedule(); 3660 goto retry; 3661 } 3662 finish_wait(&conf->wait_for_overlap, &w); 3663 set_bit(STRIPE_HANDLE, &sh->state); 3664 clear_bit(STRIPE_DELAYED, &sh->state); 3665 release_stripe(sh); 3666 } else { 3667 /* cannot get stripe for read-ahead, just give-up */ 3668 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3669 finish_wait(&conf->wait_for_overlap, &w); 3670 break; 3671 } 3672 3673 } 3674 spin_lock_irq(&conf->device_lock); 3675 remaining = --bi->bi_phys_segments; 3676 spin_unlock_irq(&conf->device_lock); 3677 if (remaining == 0) { 3678 3679 if ( rw == WRITE ) 3680 md_write_end(mddev); 3681 3682 bi->bi_end_io(bi, 3683 test_bit(BIO_UPTODATE, &bi->bi_flags) 3684 ? 0 : -EIO); 3685 } 3686 return 0; 3687} 3688 3689static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 3690{ 3691 /* reshaping is quite different to recovery/resync so it is 3692 * handled quite separately ... here. 3693 * 3694 * On each call to sync_request, we gather one chunk worth of 3695 * destination stripes and flag them as expanding. 3696 * Then we find all the source stripes and request reads. 3697 * As the reads complete, handle_stripe will copy the data 3698 * into the destination stripe and release that stripe. 3699 */ 3700 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3701 struct stripe_head *sh; 3702 int pd_idx; 3703 sector_t first_sector, last_sector; 3704 int raid_disks = conf->previous_raid_disks; 3705 int data_disks = raid_disks - conf->max_degraded; 3706 int new_data_disks = conf->raid_disks - conf->max_degraded; 3707 int i; 3708 int dd_idx; 3709 sector_t writepos, safepos, gap; 3710 3711 if (sector_nr == 0 && 3712 conf->expand_progress != 0) { 3713 /* restarting in the middle, skip the initial sectors */ 3714 sector_nr = conf->expand_progress; 3715 sector_div(sector_nr, new_data_disks); 3716 *skipped = 1; 3717 return sector_nr; 3718 } 3719 3720 /* we update the metadata when there is more than 3Meg 3721 * in the block range (that is rather arbitrary, should 3722 * probably be time based) or when the data about to be 3723 * copied would over-write the source of the data at 3724 * the front of the range. 3725 * i.e. one new_stripe forward from expand_progress new_maps 3726 * to after where expand_lo old_maps to 3727 */ 3728 writepos = conf->expand_progress + 3729 conf->chunk_size/512*(new_data_disks); 3730 sector_div(writepos, new_data_disks); 3731 safepos = conf->expand_lo; 3732 sector_div(safepos, data_disks); 3733 gap = conf->expand_progress - conf->expand_lo; 3734 3735 if (writepos >= safepos || 3736 gap > (new_data_disks)*3000*2 /*3Meg*/) { 3737 /* Cannot proceed until we've updated the superblock... */ 3738 wait_event(conf->wait_for_overlap, 3739 atomic_read(&conf->reshape_stripes)==0); 3740 mddev->reshape_position = conf->expand_progress; 3741 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3742 md_wakeup_thread(mddev->thread); 3743 wait_event(mddev->sb_wait, mddev->flags == 0 || 3744 kthread_should_stop()); 3745 spin_lock_irq(&conf->device_lock); 3746 conf->expand_lo = mddev->reshape_position; 3747 spin_unlock_irq(&conf->device_lock); 3748 wake_up(&conf->wait_for_overlap); 3749 } 3750 3751 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { 3752 int j; 3753 int skipped = 0; 3754 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); 3755 sh = get_active_stripe(conf, sector_nr+i, 3756 conf->raid_disks, pd_idx, 0); 3757 set_bit(STRIPE_EXPANDING, &sh->state); 3758 atomic_inc(&conf->reshape_stripes); 3759 /* If any of this stripe is beyond the end of the old 3760 * array, then we need to zero those blocks 3761 */ 3762 for (j=sh->disks; j--;) { 3763 sector_t s; 3764 if (j == sh->pd_idx) 3765 continue; 3766 if (conf->level == 6 && 3767 j == raid6_next_disk(sh->pd_idx, sh->disks)) 3768 continue; 3769 s = compute_blocknr(sh, j); 3770 if (s < (mddev->array_size<<1)) { 3771 skipped = 1; 3772 continue; 3773 } 3774 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 3775 set_bit(R5_Expanded, &sh->dev[j].flags); 3776 set_bit(R5_UPTODATE, &sh->dev[j].flags); 3777 } 3778 if (!skipped) { 3779 set_bit(STRIPE_EXPAND_READY, &sh->state); 3780 set_bit(STRIPE_HANDLE, &sh->state); 3781 } 3782 release_stripe(sh); 3783 } 3784 spin_lock_irq(&conf->device_lock); 3785 conf->expand_progress = (sector_nr + i) * new_data_disks; 3786 spin_unlock_irq(&conf->device_lock); 3787 /* Ok, those stripe are ready. We can start scheduling 3788 * reads on the source stripes. 3789 * The source stripes are determined by mapping the first and last 3790 * block on the destination stripes. 3791 */ 3792 first_sector = 3793 raid5_compute_sector(sector_nr*(new_data_disks), 3794 raid_disks, data_disks, 3795 &dd_idx, &pd_idx, conf); 3796 last_sector = 3797 raid5_compute_sector((sector_nr+conf->chunk_size/512) 3798 *(new_data_disks) -1, 3799 raid_disks, data_disks, 3800 &dd_idx, &pd_idx, conf); 3801 if (last_sector >= (mddev->size<<1)) 3802 last_sector = (mddev->size<<1)-1; 3803 while (first_sector <= last_sector) { 3804 pd_idx = stripe_to_pdidx(first_sector, conf, 3805 conf->previous_raid_disks); 3806 sh = get_active_stripe(conf, first_sector, 3807 conf->previous_raid_disks, pd_idx, 0); 3808 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3809 set_bit(STRIPE_HANDLE, &sh->state); 3810 release_stripe(sh); 3811 first_sector += STRIPE_SECTORS; 3812 } 3813 /* If this takes us to the resync_max point where we have to pause, 3814 * then we need to write out the superblock. 3815 */ 3816 sector_nr += conf->chunk_size>>9; 3817 if (sector_nr >= mddev->resync_max) { 3818 /* Cannot proceed until we've updated the superblock... */ 3819 wait_event(conf->wait_for_overlap, 3820 atomic_read(&conf->reshape_stripes) == 0); 3821 mddev->reshape_position = conf->expand_progress; 3822 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3823 md_wakeup_thread(mddev->thread); 3824 wait_event(mddev->sb_wait, 3825 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 3826 || kthread_should_stop()); 3827 spin_lock_irq(&conf->device_lock); 3828 conf->expand_lo = mddev->reshape_position; 3829 spin_unlock_irq(&conf->device_lock); 3830 wake_up(&conf->wait_for_overlap); 3831 } 3832 return conf->chunk_size>>9; 3833} 3834 3835/* FIXME go_faster isn't used */ 3836static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 3837{ 3838 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3839 struct stripe_head *sh; 3840 int pd_idx; 3841 int raid_disks = conf->raid_disks; 3842 sector_t max_sector = mddev->size << 1; 3843 int sync_blocks; 3844 int still_degraded = 0; 3845 int i; 3846 3847 if (sector_nr >= max_sector) { 3848 /* just being told to finish up .. nothing much to do */ 3849 unplug_slaves(mddev); 3850 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 3851 end_reshape(conf); 3852 return 0; 3853 } 3854 3855 if (mddev->curr_resync < max_sector) /* aborted */ 3856 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 3857 &sync_blocks, 1); 3858 else /* completed sync */ 3859 conf->fullsync = 0; 3860 bitmap_close_sync(mddev->bitmap); 3861 3862 return 0; 3863 } 3864 3865 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3866 return reshape_request(mddev, sector_nr, skipped); 3867 3868 /* No need to check resync_max as we never do more than one 3869 * stripe, and as resync_max will always be on a chunk boundary, 3870 * if the check in md_do_sync didn't fire, there is no chance 3871 * of overstepping resync_max here 3872 */ 3873 3874 /* if there is too many failed drives and we are trying 3875 * to resync, then assert that we are finished, because there is 3876 * nothing we can do. 3877 */ 3878 if (mddev->degraded >= conf->max_degraded && 3879 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3880 sector_t rv = (mddev->size << 1) - sector_nr; 3881 *skipped = 1; 3882 return rv; 3883 } 3884 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 3885 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 3886 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 3887 /* we can skip this block, and probably more */ 3888 sync_blocks /= STRIPE_SECTORS; 3889 *skipped = 1; 3890 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 3891 } 3892 3893 3894 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 3895 3896 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); 3897 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); 3898 if (sh == NULL) { 3899 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); 3900 /* make sure we don't swamp the stripe cache if someone else 3901 * is trying to get access 3902 */ 3903 schedule_timeout_uninterruptible(1); 3904 } 3905 /* Need to check if array will still be degraded after recovery/resync 3906 * We don't need to check the 'failed' flag as when that gets set, 3907 * recovery aborts. 3908 */ 3909 for (i=0; i<mddev->raid_disks; i++) 3910 if (conf->disks[i].rdev == NULL) 3911 still_degraded = 1; 3912 3913 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 3914 3915 spin_lock(&sh->lock); 3916 set_bit(STRIPE_SYNCING, &sh->state); 3917 clear_bit(STRIPE_INSYNC, &sh->state); 3918 spin_unlock(&sh->lock); 3919 3920 handle_stripe(sh, NULL); 3921 release_stripe(sh); 3922 3923 return STRIPE_SECTORS; 3924} 3925 3926static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) 3927{ 3928 /* We may not be able to submit a whole bio at once as there 3929 * may not be enough stripe_heads available. 3930 * We cannot pre-allocate enough stripe_heads as we may need 3931 * more than exist in the cache (if we allow ever large chunks). 3932 * So we do one stripe head at a time and record in 3933 * ->bi_hw_segments how many have been done. 3934 * 3935 * We *know* that this entire raid_bio is in one chunk, so 3936 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 3937 */ 3938 struct stripe_head *sh; 3939 int dd_idx, pd_idx; 3940 sector_t sector, logical_sector, last_sector; 3941 int scnt = 0; 3942 int remaining; 3943 int handled = 0; 3944 3945 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3946 sector = raid5_compute_sector( logical_sector, 3947 conf->raid_disks, 3948 conf->raid_disks - conf->max_degraded, 3949 &dd_idx, 3950 &pd_idx, 3951 conf); 3952 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 3953 3954 for (; logical_sector < last_sector; 3955 logical_sector += STRIPE_SECTORS, 3956 sector += STRIPE_SECTORS, 3957 scnt++) { 3958 3959 if (scnt < raid_bio->bi_hw_segments) 3960 /* already done this stripe */ 3961 continue; 3962 3963 sh = get_active_stripe(conf, sector, conf->raid_disks, pd_idx, 1); 3964 3965 if (!sh) { 3966 /* failed to get a stripe - must wait */ 3967 raid_bio->bi_hw_segments = scnt; 3968 conf->retry_read_aligned = raid_bio; 3969 return handled; 3970 } 3971 3972 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 3973 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 3974 release_stripe(sh); 3975 raid_bio->bi_hw_segments = scnt; 3976 conf->retry_read_aligned = raid_bio; 3977 return handled; 3978 } 3979 3980 handle_stripe(sh, NULL); 3981 release_stripe(sh); 3982 handled++; 3983 } 3984 spin_lock_irq(&conf->device_lock); 3985 remaining = --raid_bio->bi_phys_segments; 3986 spin_unlock_irq(&conf->device_lock); 3987 if (remaining == 0) { 3988 3989 raid_bio->bi_end_io(raid_bio, 3990 test_bit(BIO_UPTODATE, &raid_bio->bi_flags) 3991 ? 0 : -EIO); 3992 } 3993 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3994 wake_up(&conf->wait_for_stripe); 3995 return handled; 3996} 3997 3998 3999 4000/* 4001 * This is our raid5 kernel thread. 4002 * 4003 * We scan the hash table for stripes which can be handled now. 4004 * During the scan, completed stripes are saved for us by the interrupt 4005 * handler, so that they will not have to wait for our next wakeup. 4006 */ 4007static void raid5d(mddev_t *mddev) 4008{ 4009 struct stripe_head *sh; 4010 raid5_conf_t *conf = mddev_to_conf(mddev); 4011 int handled; 4012 4013 pr_debug("+++ raid5d active\n"); 4014 4015 md_check_recovery(mddev); 4016 4017 handled = 0; 4018 spin_lock_irq(&conf->device_lock); 4019 while (1) { 4020 struct bio *bio; 4021 4022 if (conf->seq_flush != conf->seq_write) { 4023 int seq = conf->seq_flush; 4024 spin_unlock_irq(&conf->device_lock); 4025 bitmap_unplug(mddev->bitmap); 4026 spin_lock_irq(&conf->device_lock); 4027 conf->seq_write = seq; 4028 activate_bit_delay(conf); 4029 } 4030 4031 while ((bio = remove_bio_from_retry(conf))) { 4032 int ok; 4033 spin_unlock_irq(&conf->device_lock); 4034 ok = retry_aligned_read(conf, bio); 4035 spin_lock_irq(&conf->device_lock); 4036 if (!ok) 4037 break; 4038 handled++; 4039 } 4040 4041 sh = __get_priority_stripe(conf); 4042 4043 if (!sh) { 4044 async_tx_issue_pending_all(); 4045 break; 4046 } 4047 spin_unlock_irq(&conf->device_lock); 4048 4049 handled++; 4050 handle_stripe(sh, conf->spare_page); 4051 release_stripe(sh); 4052 4053 spin_lock_irq(&conf->device_lock); 4054 } 4055 pr_debug("%d stripes handled\n", handled); 4056 4057 spin_unlock_irq(&conf->device_lock); 4058 4059 unplug_slaves(mddev); 4060 4061 pr_debug("--- raid5d inactive\n"); 4062} 4063 4064static ssize_t 4065raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 4066{ 4067 raid5_conf_t *conf = mddev_to_conf(mddev); 4068 if (conf) 4069 return sprintf(page, "%d\n", conf->max_nr_stripes); 4070 else 4071 return 0; 4072} 4073 4074static ssize_t 4075raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 4076{ 4077 raid5_conf_t *conf = mddev_to_conf(mddev); 4078 unsigned long new; 4079 if (len >= PAGE_SIZE) 4080 return -EINVAL; 4081 if (!conf) 4082 return -ENODEV; 4083 4084 if (strict_strtoul(page, 10, &new)) 4085 return -EINVAL; 4086 if (new <= 16 || new > 32768) 4087 return -EINVAL; 4088 while (new < conf->max_nr_stripes) { 4089 if (drop_one_stripe(conf)) 4090 conf->max_nr_stripes--; 4091 else 4092 break; 4093 } 4094 md_allow_write(mddev); 4095 while (new > conf->max_nr_stripes) { 4096 if (grow_one_stripe(conf)) 4097 conf->max_nr_stripes++; 4098 else break; 4099 } 4100 return len; 4101} 4102 4103static struct md_sysfs_entry 4104raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4105 raid5_show_stripe_cache_size, 4106 raid5_store_stripe_cache_size); 4107 4108static ssize_t 4109raid5_show_preread_threshold(mddev_t *mddev, char *page) 4110{ 4111 raid5_conf_t *conf = mddev_to_conf(mddev); 4112 if (conf) 4113 return sprintf(page, "%d\n", conf->bypass_threshold); 4114 else 4115 return 0; 4116} 4117 4118static ssize_t 4119raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) 4120{ 4121 raid5_conf_t *conf = mddev_to_conf(mddev); 4122 unsigned long new; 4123 if (len >= PAGE_SIZE) 4124 return -EINVAL; 4125 if (!conf) 4126 return -ENODEV; 4127 4128 if (strict_strtoul(page, 10, &new)) 4129 return -EINVAL; 4130 if (new > conf->max_nr_stripes) 4131 return -EINVAL; 4132 conf->bypass_threshold = new; 4133 return len; 4134} 4135 4136static struct md_sysfs_entry 4137raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 4138 S_IRUGO | S_IWUSR, 4139 raid5_show_preread_threshold, 4140 raid5_store_preread_threshold); 4141 4142static ssize_t 4143stripe_cache_active_show(mddev_t *mddev, char *page) 4144{ 4145 raid5_conf_t *conf = mddev_to_conf(mddev); 4146 if (conf) 4147 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4148 else 4149 return 0; 4150} 4151 4152static struct md_sysfs_entry 4153raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4154 4155static struct attribute *raid5_attrs[] = { 4156 &raid5_stripecache_size.attr, 4157 &raid5_stripecache_active.attr, 4158 &raid5_preread_bypass_threshold.attr, 4159 NULL, 4160}; 4161static struct attribute_group raid5_attrs_group = { 4162 .name = NULL, 4163 .attrs = raid5_attrs, 4164}; 4165 4166static int run(mddev_t *mddev) 4167{ 4168 raid5_conf_t *conf; 4169 int raid_disk, memory; 4170 mdk_rdev_t *rdev; 4171 struct disk_info *disk; 4172 struct list_head *tmp; 4173 int working_disks = 0; 4174 4175 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { 4176 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4177 mdname(mddev), mddev->level); 4178 return -EIO; 4179 } 4180 4181 if (mddev->reshape_position != MaxSector) { 4182 /* Check that we can continue the reshape. 4183 * Currently only disks can change, it must 4184 * increase, and we must be past the point where 4185 * a stripe over-writes itself 4186 */ 4187 sector_t here_new, here_old; 4188 int old_disks; 4189 int max_degraded = (mddev->level == 5 ? 1 : 2); 4190 4191 if (mddev->new_level != mddev->level || 4192 mddev->new_layout != mddev->layout || 4193 mddev->new_chunk != mddev->chunk_size) { 4194 printk(KERN_ERR "raid5: %s: unsupported reshape " 4195 "required - aborting.\n", 4196 mdname(mddev)); 4197 return -EINVAL; 4198 } 4199 if (mddev->delta_disks <= 0) { 4200 printk(KERN_ERR "raid5: %s: unsupported reshape " 4201 "(reduce disks) required - aborting.\n", 4202 mdname(mddev)); 4203 return -EINVAL; 4204 } 4205 old_disks = mddev->raid_disks - mddev->delta_disks; 4206 /* reshape_position must be on a new-stripe boundary, and one 4207 * further up in new geometry must map after here in old 4208 * geometry. 4209 */ 4210 here_new = mddev->reshape_position; 4211 if (sector_div(here_new, (mddev->chunk_size>>9)* 4212 (mddev->raid_disks - max_degraded))) { 4213 printk(KERN_ERR "raid5: reshape_position not " 4214 "on a stripe boundary\n"); 4215 return -EINVAL; 4216 } 4217 /* here_new is the stripe we will write to */ 4218 here_old = mddev->reshape_position; 4219 sector_div(here_old, (mddev->chunk_size>>9)* 4220 (old_disks-max_degraded)); 4221 /* here_old is the first stripe that we might need to read 4222 * from */ 4223 if (here_new >= here_old) { 4224 /* Reading from the same stripe as writing to - bad */ 4225 printk(KERN_ERR "raid5: reshape_position too early for " 4226 "auto-recovery - aborting.\n"); 4227 return -EINVAL; 4228 } 4229 printk(KERN_INFO "raid5: reshape will continue\n"); 4230 /* OK, we should be able to continue; */ 4231 } 4232 4233 4234 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); 4235 if ((conf = mddev->private) == NULL) 4236 goto abort; 4237 if (mddev->reshape_position == MaxSector) { 4238 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; 4239 } else { 4240 conf->raid_disks = mddev->raid_disks; 4241 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4242 } 4243 4244 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4245 GFP_KERNEL); 4246 if (!conf->disks) 4247 goto abort; 4248 4249 conf->mddev = mddev; 4250 4251 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4252 goto abort; 4253 4254 if (mddev->level == 6) { 4255 conf->spare_page = alloc_page(GFP_KERNEL); 4256 if (!conf->spare_page) 4257 goto abort; 4258 } 4259 spin_lock_init(&conf->device_lock); 4260 mddev->queue->queue_lock = &conf->device_lock; 4261 init_waitqueue_head(&conf->wait_for_stripe); 4262 init_waitqueue_head(&conf->wait_for_overlap); 4263 INIT_LIST_HEAD(&conf->handle_list); 4264 INIT_LIST_HEAD(&conf->hold_list); 4265 INIT_LIST_HEAD(&conf->delayed_list); 4266 INIT_LIST_HEAD(&conf->bitmap_list); 4267 INIT_LIST_HEAD(&conf->inactive_list); 4268 atomic_set(&conf->active_stripes, 0); 4269 atomic_set(&conf->preread_active_stripes, 0); 4270 atomic_set(&conf->active_aligned_reads, 0); 4271 conf->bypass_threshold = BYPASS_THRESHOLD; 4272 4273 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4274 4275 rdev_for_each(rdev, tmp, mddev) { 4276 raid_disk = rdev->raid_disk; 4277 if (raid_disk >= conf->raid_disks 4278 || raid_disk < 0) 4279 continue; 4280 disk = conf->disks + raid_disk; 4281 4282 disk->rdev = rdev; 4283 4284 if (test_bit(In_sync, &rdev->flags)) { 4285 char b[BDEVNAME_SIZE]; 4286 printk(KERN_INFO "raid5: device %s operational as raid" 4287 " disk %d\n", bdevname(rdev->bdev,b), 4288 raid_disk); 4289 working_disks++; 4290 } 4291 } 4292 4293 /* 4294 * 0 for a fully functional array, 1 or 2 for a degraded array. 4295 */ 4296 mddev->degraded = conf->raid_disks - working_disks; 4297 conf->mddev = mddev; 4298 conf->chunk_size = mddev->chunk_size; 4299 conf->level = mddev->level; 4300 if (conf->level == 6) 4301 conf->max_degraded = 2; 4302 else 4303 conf->max_degraded = 1; 4304 conf->algorithm = mddev->layout; 4305 conf->max_nr_stripes = NR_STRIPES; 4306 conf->expand_progress = mddev->reshape_position; 4307 4308 /* device size must be a multiple of chunk size */ 4309 mddev->size &= ~(mddev->chunk_size/1024 -1); 4310 mddev->resync_max_sectors = mddev->size << 1; 4311 4312 if (conf->level == 6 && conf->raid_disks < 4) { 4313 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4314 mdname(mddev), conf->raid_disks); 4315 goto abort; 4316 } 4317 if (!conf->chunk_size || conf->chunk_size % 4) { 4318 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4319 conf->chunk_size, mdname(mddev)); 4320 goto abort; 4321 } 4322 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { 4323 printk(KERN_ERR 4324 "raid5: unsupported parity algorithm %d for %s\n", 4325 conf->algorithm, mdname(mddev)); 4326 goto abort; 4327 } 4328 if (mddev->degraded > conf->max_degraded) { 4329 printk(KERN_ERR "raid5: not enough operational devices for %s" 4330 " (%d/%d failed)\n", 4331 mdname(mddev), mddev->degraded, conf->raid_disks); 4332 goto abort; 4333 } 4334 4335 if (mddev->degraded > 0 && 4336 mddev->recovery_cp != MaxSector) { 4337 if (mddev->ok_start_degraded) 4338 printk(KERN_WARNING 4339 "raid5: starting dirty degraded array: %s" 4340 "- data corruption possible.\n", 4341 mdname(mddev)); 4342 else { 4343 printk(KERN_ERR 4344 "raid5: cannot start dirty degraded array for %s\n", 4345 mdname(mddev)); 4346 goto abort; 4347 } 4348 } 4349 4350 { 4351 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 4352 if (!mddev->thread) { 4353 printk(KERN_ERR 4354 "raid5: couldn't allocate thread for %s\n", 4355 mdname(mddev)); 4356 goto abort; 4357 } 4358 } 4359 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4360 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4361 if (grow_stripes(conf, conf->max_nr_stripes)) { 4362 printk(KERN_ERR 4363 "raid5: couldn't allocate %dkB for buffers\n", memory); 4364 shrink_stripes(conf); 4365 md_unregister_thread(mddev->thread); 4366 goto abort; 4367 } else 4368 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4369 memory, mdname(mddev)); 4370 4371 if (mddev->degraded == 0) 4372 printk("raid5: raid level %d set %s active with %d out of %d" 4373 " devices, algorithm %d\n", conf->level, mdname(mddev), 4374 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4375 conf->algorithm); 4376 else 4377 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 4378 " out of %d devices, algorithm %d\n", conf->level, 4379 mdname(mddev), mddev->raid_disks - mddev->degraded, 4380 mddev->raid_disks, conf->algorithm); 4381 4382 print_raid5_conf(conf); 4383 4384 if (conf->expand_progress != MaxSector) { 4385 printk("...ok start reshape thread\n"); 4386 conf->expand_lo = conf->expand_progress; 4387 atomic_set(&conf->reshape_stripes, 0); 4388 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4389 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4390 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4391 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4392 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4393 "%s_reshape"); 4394 } 4395 4396 /* read-ahead size must cover two whole stripes, which is 4397 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4398 */ 4399 { 4400 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4401 int stripe = data_disks * 4402 (mddev->chunk_size / PAGE_SIZE); 4403 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4404 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4405 } 4406 4407 /* Ok, everything is just fine now */ 4408 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4409 printk(KERN_WARNING 4410 "raid5: failed to create sysfs attributes for %s\n", 4411 mdname(mddev)); 4412 4413 mddev->queue->unplug_fn = raid5_unplug_device; 4414 mddev->queue->backing_dev_info.congested_data = mddev; 4415 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4416 4417 mddev->array_size = mddev->size * (conf->previous_raid_disks - 4418 conf->max_degraded); 4419 4420 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4421 4422 return 0; 4423abort: 4424 if (conf) { 4425 print_raid5_conf(conf); 4426 safe_put_page(conf->spare_page); 4427 kfree(conf->disks); 4428 kfree(conf->stripe_hashtbl); 4429 kfree(conf); 4430 } 4431 mddev->private = NULL; 4432 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 4433 return -EIO; 4434} 4435 4436 4437 4438static int stop(mddev_t *mddev) 4439{ 4440 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4441 4442 md_unregister_thread(mddev->thread); 4443 mddev->thread = NULL; 4444 shrink_stripes(conf); 4445 kfree(conf->stripe_hashtbl); 4446 mddev->queue->backing_dev_info.congested_fn = NULL; 4447 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 4448 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 4449 kfree(conf->disks); 4450 kfree(conf); 4451 mddev->private = NULL; 4452 return 0; 4453} 4454 4455#ifdef DEBUG 4456static void print_sh (struct seq_file *seq, struct stripe_head *sh) 4457{ 4458 int i; 4459 4460 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 4461 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 4462 seq_printf(seq, "sh %llu, count %d.\n", 4463 (unsigned long long)sh->sector, atomic_read(&sh->count)); 4464 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 4465 for (i = 0; i < sh->disks; i++) { 4466 seq_printf(seq, "(cache%d: %p %ld) ", 4467 i, sh->dev[i].page, sh->dev[i].flags); 4468 } 4469 seq_printf(seq, "\n"); 4470} 4471 4472static void printall (struct seq_file *seq, raid5_conf_t *conf) 4473{ 4474 struct stripe_head *sh; 4475 struct hlist_node *hn; 4476 int i; 4477 4478 spin_lock_irq(&conf->device_lock); 4479 for (i = 0; i < NR_HASH; i++) { 4480 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 4481 if (sh->raid_conf != conf) 4482 continue; 4483 print_sh(seq, sh); 4484 } 4485 } 4486 spin_unlock_irq(&conf->device_lock); 4487} 4488#endif 4489 4490static void status (struct seq_file *seq, mddev_t *mddev) 4491{ 4492 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4493 int i; 4494 4495 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 4496 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4497 for (i = 0; i < conf->raid_disks; i++) 4498 seq_printf (seq, "%s", 4499 conf->disks[i].rdev && 4500 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4501 seq_printf (seq, "]"); 4502#ifdef DEBUG 4503 seq_printf (seq, "\n"); 4504 printall(seq, conf); 4505#endif 4506} 4507 4508static void print_raid5_conf (raid5_conf_t *conf) 4509{ 4510 int i; 4511 struct disk_info *tmp; 4512 4513 printk("RAID5 conf printout:\n"); 4514 if (!conf) { 4515 printk("(conf==NULL)\n"); 4516 return; 4517 } 4518 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 4519 conf->raid_disks - conf->mddev->degraded); 4520 4521 for (i = 0; i < conf->raid_disks; i++) { 4522 char b[BDEVNAME_SIZE]; 4523 tmp = conf->disks + i; 4524 if (tmp->rdev) 4525 printk(" disk %d, o:%d, dev:%s\n", 4526 i, !test_bit(Faulty, &tmp->rdev->flags), 4527 bdevname(tmp->rdev->bdev,b)); 4528 } 4529} 4530 4531static int raid5_spare_active(mddev_t *mddev) 4532{ 4533 int i; 4534 raid5_conf_t *conf = mddev->private; 4535 struct disk_info *tmp; 4536 4537 for (i = 0; i < conf->raid_disks; i++) { 4538 tmp = conf->disks + i; 4539 if (tmp->rdev 4540 && !test_bit(Faulty, &tmp->rdev->flags) 4541 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 4542 unsigned long flags; 4543 spin_lock_irqsave(&conf->device_lock, flags); 4544 mddev->degraded--; 4545 spin_unlock_irqrestore(&conf->device_lock, flags); 4546 } 4547 } 4548 print_raid5_conf(conf); 4549 return 0; 4550} 4551 4552static int raid5_remove_disk(mddev_t *mddev, int number) 4553{ 4554 raid5_conf_t *conf = mddev->private; 4555 int err = 0; 4556 mdk_rdev_t *rdev; 4557 struct disk_info *p = conf->disks + number; 4558 4559 print_raid5_conf(conf); 4560 rdev = p->rdev; 4561 if (rdev) { 4562 if (test_bit(In_sync, &rdev->flags) || 4563 atomic_read(&rdev->nr_pending)) { 4564 err = -EBUSY; 4565 goto abort; 4566 } 4567 p->rdev = NULL; 4568 synchronize_rcu(); 4569 if (atomic_read(&rdev->nr_pending)) { 4570 /* lost the race, try later */ 4571 err = -EBUSY; 4572 p->rdev = rdev; 4573 } 4574 } 4575abort: 4576 4577 print_raid5_conf(conf); 4578 return err; 4579} 4580 4581static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 4582{ 4583 raid5_conf_t *conf = mddev->private; 4584 int found = 0; 4585 int disk; 4586 struct disk_info *p; 4587 4588 if (mddev->degraded > conf->max_degraded) 4589 /* no point adding a device */ 4590 return 0; 4591 4592 /* 4593 * find the disk ... but prefer rdev->saved_raid_disk 4594 * if possible. 4595 */ 4596 if (rdev->saved_raid_disk >= 0 && 4597 conf->disks[rdev->saved_raid_disk].rdev == NULL) 4598 disk = rdev->saved_raid_disk; 4599 else 4600 disk = 0; 4601 for ( ; disk < conf->raid_disks; disk++) 4602 if ((p=conf->disks + disk)->rdev == NULL) { 4603 clear_bit(In_sync, &rdev->flags); 4604 rdev->raid_disk = disk; 4605 found = 1; 4606 if (rdev->saved_raid_disk != disk) 4607 conf->fullsync = 1; 4608 rcu_assign_pointer(p->rdev, rdev); 4609 break; 4610 } 4611 print_raid5_conf(conf); 4612 return found; 4613} 4614 4615static int raid5_resize(mddev_t *mddev, sector_t sectors) 4616{ 4617 /* no resync is happening, and there is enough space 4618 * on all devices, so we can resize. 4619 * We need to make sure resync covers any new space. 4620 * If the array is shrinking we should possibly wait until 4621 * any io in the removed space completes, but it hardly seems 4622 * worth it. 4623 */ 4624 raid5_conf_t *conf = mddev_to_conf(mddev); 4625 4626 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4627 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 4628 set_capacity(mddev->gendisk, mddev->array_size << 1); 4629 mddev->changed = 1; 4630 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 4631 mddev->recovery_cp = mddev->size << 1; 4632 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4633 } 4634 mddev->size = sectors /2; 4635 mddev->resync_max_sectors = sectors; 4636 return 0; 4637} 4638 4639#ifdef CONFIG_MD_RAID5_RESHAPE 4640static int raid5_check_reshape(mddev_t *mddev) 4641{ 4642 raid5_conf_t *conf = mddev_to_conf(mddev); 4643 int err; 4644 4645 if (mddev->delta_disks < 0 || 4646 mddev->new_level != mddev->level) 4647 return -EINVAL; /* Cannot shrink array or change level yet */ 4648 if (mddev->delta_disks == 0) 4649 return 0; /* nothing to do */ 4650 4651 /* Can only proceed if there are plenty of stripe_heads. 4652 * We need a minimum of one full stripe,, and for sensible progress 4653 * it is best to have about 4 times that. 4654 * If we require 4 times, then the default 256 4K stripe_heads will 4655 * allow for chunk sizes up to 256K, which is probably OK. 4656 * If the chunk size is greater, user-space should request more 4657 * stripe_heads first. 4658 */ 4659 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 4660 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4661 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4662 (mddev->chunk_size / STRIPE_SIZE)*4); 4663 return -ENOSPC; 4664 } 4665 4666 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 4667 if (err) 4668 return err; 4669 4670 if (mddev->degraded > conf->max_degraded) 4671 return -EINVAL; 4672 /* looks like we might be able to manage this */ 4673 return 0; 4674} 4675 4676static int raid5_start_reshape(mddev_t *mddev) 4677{ 4678 raid5_conf_t *conf = mddev_to_conf(mddev); 4679 mdk_rdev_t *rdev; 4680 struct list_head *rtmp; 4681 int spares = 0; 4682 int added_devices = 0; 4683 unsigned long flags; 4684 4685 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4686 return -EBUSY; 4687 4688 rdev_for_each(rdev, rtmp, mddev) 4689 if (rdev->raid_disk < 0 && 4690 !test_bit(Faulty, &rdev->flags)) 4691 spares++; 4692 4693 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 4694 /* Not enough devices even to make a degraded array 4695 * of that size 4696 */ 4697 return -EINVAL; 4698 4699 atomic_set(&conf->reshape_stripes, 0); 4700 spin_lock_irq(&conf->device_lock); 4701 conf->previous_raid_disks = conf->raid_disks; 4702 conf->raid_disks += mddev->delta_disks; 4703 conf->expand_progress = 0; 4704 conf->expand_lo = 0; 4705 spin_unlock_irq(&conf->device_lock); 4706 4707 /* Add some new drives, as many as will fit. 4708 * We know there are enough to make the newly sized array work. 4709 */ 4710 rdev_for_each(rdev, rtmp, mddev) 4711 if (rdev->raid_disk < 0 && 4712 !test_bit(Faulty, &rdev->flags)) { 4713 if (raid5_add_disk(mddev, rdev)) { 4714 char nm[20]; 4715 set_bit(In_sync, &rdev->flags); 4716 added_devices++; 4717 rdev->recovery_offset = 0; 4718 sprintf(nm, "rd%d", rdev->raid_disk); 4719 if (sysfs_create_link(&mddev->kobj, 4720 &rdev->kobj, nm)) 4721 printk(KERN_WARNING 4722 "raid5: failed to create " 4723 " link %s for %s\n", 4724 nm, mdname(mddev)); 4725 } else 4726 break; 4727 } 4728 4729 spin_lock_irqsave(&conf->device_lock, flags); 4730 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; 4731 spin_unlock_irqrestore(&conf->device_lock, flags); 4732 mddev->raid_disks = conf->raid_disks; 4733 mddev->reshape_position = 0; 4734 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4735 4736 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4737 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4738 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4739 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4740 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4741 "%s_reshape"); 4742 if (!mddev->sync_thread) { 4743 mddev->recovery = 0; 4744 spin_lock_irq(&conf->device_lock); 4745 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 4746 conf->expand_progress = MaxSector; 4747 spin_unlock_irq(&conf->device_lock); 4748 return -EAGAIN; 4749 } 4750 md_wakeup_thread(mddev->sync_thread); 4751 md_new_event(mddev); 4752 return 0; 4753} 4754#endif 4755 4756static void end_reshape(raid5_conf_t *conf) 4757{ 4758 struct block_device *bdev; 4759 4760 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4761 conf->mddev->array_size = conf->mddev->size * 4762 (conf->raid_disks - conf->max_degraded); 4763 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 4764 conf->mddev->changed = 1; 4765 4766 bdev = bdget_disk(conf->mddev->gendisk, 0); 4767 if (bdev) { 4768 mutex_lock(&bdev->bd_inode->i_mutex); 4769 i_size_write(bdev->bd_inode, (loff_t)conf->mddev->array_size << 10); 4770 mutex_unlock(&bdev->bd_inode->i_mutex); 4771 bdput(bdev); 4772 } 4773 spin_lock_irq(&conf->device_lock); 4774 conf->expand_progress = MaxSector; 4775 spin_unlock_irq(&conf->device_lock); 4776 conf->mddev->reshape_position = MaxSector; 4777 4778 /* read-ahead size must cover two whole stripes, which is 4779 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4780 */ 4781 { 4782 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4783 int stripe = data_disks * 4784 (conf->mddev->chunk_size / PAGE_SIZE); 4785 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4786 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4787 } 4788 } 4789} 4790 4791static void raid5_quiesce(mddev_t *mddev, int state) 4792{ 4793 raid5_conf_t *conf = mddev_to_conf(mddev); 4794 4795 switch(state) { 4796 case 2: /* resume for a suspend */ 4797 wake_up(&conf->wait_for_overlap); 4798 break; 4799 4800 case 1: /* stop all writes */ 4801 spin_lock_irq(&conf->device_lock); 4802 conf->quiesce = 1; 4803 wait_event_lock_irq(conf->wait_for_stripe, 4804 atomic_read(&conf->active_stripes) == 0 && 4805 atomic_read(&conf->active_aligned_reads) == 0, 4806 conf->device_lock, /* nothing */); 4807 spin_unlock_irq(&conf->device_lock); 4808 break; 4809 4810 case 0: /* re-enable writes */ 4811 spin_lock_irq(&conf->device_lock); 4812 conf->quiesce = 0; 4813 wake_up(&conf->wait_for_stripe); 4814 wake_up(&conf->wait_for_overlap); 4815 spin_unlock_irq(&conf->device_lock); 4816 break; 4817 } 4818} 4819 4820static struct mdk_personality raid6_personality = 4821{ 4822 .name = "raid6", 4823 .level = 6, 4824 .owner = THIS_MODULE, 4825 .make_request = make_request, 4826 .run = run, 4827 .stop = stop, 4828 .status = status, 4829 .error_handler = error, 4830 .hot_add_disk = raid5_add_disk, 4831 .hot_remove_disk= raid5_remove_disk, 4832 .spare_active = raid5_spare_active, 4833 .sync_request = sync_request, 4834 .resize = raid5_resize, 4835#ifdef CONFIG_MD_RAID5_RESHAPE 4836 .check_reshape = raid5_check_reshape, 4837 .start_reshape = raid5_start_reshape, 4838#endif 4839 .quiesce = raid5_quiesce, 4840}; 4841static struct mdk_personality raid5_personality = 4842{ 4843 .name = "raid5", 4844 .level = 5, 4845 .owner = THIS_MODULE, 4846 .make_request = make_request, 4847 .run = run, 4848 .stop = stop, 4849 .status = status, 4850 .error_handler = error, 4851 .hot_add_disk = raid5_add_disk, 4852 .hot_remove_disk= raid5_remove_disk, 4853 .spare_active = raid5_spare_active, 4854 .sync_request = sync_request, 4855 .resize = raid5_resize, 4856#ifdef CONFIG_MD_RAID5_RESHAPE 4857 .check_reshape = raid5_check_reshape, 4858 .start_reshape = raid5_start_reshape, 4859#endif 4860 .quiesce = raid5_quiesce, 4861}; 4862 4863static struct mdk_personality raid4_personality = 4864{ 4865 .name = "raid4", 4866 .level = 4, 4867 .owner = THIS_MODULE, 4868 .make_request = make_request, 4869 .run = run, 4870 .stop = stop, 4871 .status = status, 4872 .error_handler = error, 4873 .hot_add_disk = raid5_add_disk, 4874 .hot_remove_disk= raid5_remove_disk, 4875 .spare_active = raid5_spare_active, 4876 .sync_request = sync_request, 4877 .resize = raid5_resize, 4878#ifdef CONFIG_MD_RAID5_RESHAPE 4879 .check_reshape = raid5_check_reshape, 4880 .start_reshape = raid5_start_reshape, 4881#endif 4882 .quiesce = raid5_quiesce, 4883}; 4884 4885static int __init raid5_init(void) 4886{ 4887 int e; 4888 4889 e = raid6_select_algo(); 4890 if ( e ) 4891 return e; 4892 register_md_personality(&raid6_personality); 4893 register_md_personality(&raid5_personality); 4894 register_md_personality(&raid4_personality); 4895 return 0; 4896} 4897 4898static void raid5_exit(void) 4899{ 4900 unregister_md_personality(&raid6_personality); 4901 unregister_md_personality(&raid5_personality); 4902 unregister_md_personality(&raid4_personality); 4903} 4904 4905module_init(raid5_init); 4906module_exit(raid5_exit); 4907MODULE_LICENSE("GPL"); 4908MODULE_ALIAS("md-personality-4"); /* RAID5 */ 4909MODULE_ALIAS("md-raid5"); 4910MODULE_ALIAS("md-raid4"); 4911MODULE_ALIAS("md-level-5"); 4912MODULE_ALIAS("md-level-4"); 4913MODULE_ALIAS("md-personality-8"); /* RAID6 */ 4914MODULE_ALIAS("md-raid6"); 4915MODULE_ALIAS("md-level-6"); 4916 4917/* This used to be two separate modules, they were: */ 4918MODULE_ALIAS("raid5"); 4919MODULE_ALIAS("raid6");