Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.16-rc6 2430 lines 68 kB view raw
1/* 2 * raid6main.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-6 management functions. This code is derived from raid5.c. 8 * Last merge from raid5.c bkcvs version 1.79 (kernel 2.6.1). 9 * 10 * Thanks to Penguin Computing for making the RAID-6 development possible 11 * by donating a test server! 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2, or (at your option) 16 * any later version. 17 * 18 * You should have received a copy of the GNU General Public License 19 * (for example /usr/src/linux/COPYING); if not, write to the Free 20 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 24#include <linux/config.h> 25#include <linux/module.h> 26#include <linux/slab.h> 27#include <linux/highmem.h> 28#include <linux/bitops.h> 29#include <asm/atomic.h> 30#include "raid6.h" 31 32#include <linux/raid/bitmap.h> 33 34/* 35 * Stripe cache 36 */ 37 38#define NR_STRIPES 256 39#define STRIPE_SIZE PAGE_SIZE 40#define STRIPE_SHIFT (PAGE_SHIFT - 9) 41#define STRIPE_SECTORS (STRIPE_SIZE>>9) 42#define IO_THRESHOLD 1 43#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 44#define HASH_MASK (NR_HASH - 1) 45 46#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 47 48/* bio's attached to a stripe+device for I/O are linked together in bi_sector 49 * order without overlap. There may be several bio's per stripe+device, and 50 * a bio could span several devices. 51 * When walking this list for a particular stripe+device, we must never proceed 52 * beyond a bio that extends past this device, as the next bio might no longer 53 * be valid. 54 * This macro is used to determine the 'next' bio in the list, given the sector 55 * of the current stripe+device 56 */ 57#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 58/* 59 * The following can be used to debug the driver 60 */ 61#define RAID6_DEBUG 0 /* Extremely verbose printk */ 62#define RAID6_PARANOIA 1 /* Check spinlocks */ 63#define RAID6_DUMPSTATE 0 /* Include stripe cache state in /proc/mdstat */ 64#if RAID6_PARANOIA && defined(CONFIG_SMP) 65# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 66#else 67# define CHECK_DEVLOCK() 68#endif 69 70#define PRINTK(x...) ((void)(RAID6_DEBUG && printk(KERN_DEBUG x))) 71#if RAID6_DEBUG 72#undef inline 73#undef __inline__ 74#define inline 75#define __inline__ 76#endif 77 78#if !RAID6_USE_EMPTY_ZERO_PAGE 79/* In .bss so it's zeroed */ 80const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); 81#endif 82 83static inline int raid6_next_disk(int disk, int raid_disks) 84{ 85 disk++; 86 return (disk < raid_disks) ? disk : 0; 87} 88 89static void print_raid6_conf (raid6_conf_t *conf); 90 91static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh) 92{ 93 if (atomic_dec_and_test(&sh->count)) { 94 if (!list_empty(&sh->lru)) 95 BUG(); 96 if (atomic_read(&conf->active_stripes)==0) 97 BUG(); 98 if (test_bit(STRIPE_HANDLE, &sh->state)) { 99 if (test_bit(STRIPE_DELAYED, &sh->state)) 100 list_add_tail(&sh->lru, &conf->delayed_list); 101 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 102 conf->seq_write == sh->bm_seq) 103 list_add_tail(&sh->lru, &conf->bitmap_list); 104 else { 105 clear_bit(STRIPE_BIT_DELAY, &sh->state); 106 list_add_tail(&sh->lru, &conf->handle_list); 107 } 108 md_wakeup_thread(conf->mddev->thread); 109 } else { 110 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 111 atomic_dec(&conf->preread_active_stripes); 112 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 113 md_wakeup_thread(conf->mddev->thread); 114 } 115 list_add_tail(&sh->lru, &conf->inactive_list); 116 atomic_dec(&conf->active_stripes); 117 if (!conf->inactive_blocked || 118 atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4)) 119 wake_up(&conf->wait_for_stripe); 120 } 121 } 122} 123static void release_stripe(struct stripe_head *sh) 124{ 125 raid6_conf_t *conf = sh->raid_conf; 126 unsigned long flags; 127 128 spin_lock_irqsave(&conf->device_lock, flags); 129 __release_stripe(conf, sh); 130 spin_unlock_irqrestore(&conf->device_lock, flags); 131} 132 133static inline void remove_hash(struct stripe_head *sh) 134{ 135 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 136 137 hlist_del_init(&sh->hash); 138} 139 140static inline void insert_hash(raid6_conf_t *conf, struct stripe_head *sh) 141{ 142 struct hlist_head *hp = stripe_hash(conf, sh->sector); 143 144 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 145 146 CHECK_DEVLOCK(); 147 hlist_add_head(&sh->hash, hp); 148} 149 150 151/* find an idle stripe, make sure it is unhashed, and return it. */ 152static struct stripe_head *get_free_stripe(raid6_conf_t *conf) 153{ 154 struct stripe_head *sh = NULL; 155 struct list_head *first; 156 157 CHECK_DEVLOCK(); 158 if (list_empty(&conf->inactive_list)) 159 goto out; 160 first = conf->inactive_list.next; 161 sh = list_entry(first, struct stripe_head, lru); 162 list_del_init(first); 163 remove_hash(sh); 164 atomic_inc(&conf->active_stripes); 165out: 166 return sh; 167} 168 169static void shrink_buffers(struct stripe_head *sh, int num) 170{ 171 struct page *p; 172 int i; 173 174 for (i=0; i<num ; i++) { 175 p = sh->dev[i].page; 176 if (!p) 177 continue; 178 sh->dev[i].page = NULL; 179 put_page(p); 180 } 181} 182 183static int grow_buffers(struct stripe_head *sh, int num) 184{ 185 int i; 186 187 for (i=0; i<num; i++) { 188 struct page *page; 189 190 if (!(page = alloc_page(GFP_KERNEL))) { 191 return 1; 192 } 193 sh->dev[i].page = page; 194 } 195 return 0; 196} 197 198static void raid6_build_block (struct stripe_head *sh, int i); 199 200static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) 201{ 202 raid6_conf_t *conf = sh->raid_conf; 203 int disks = conf->raid_disks, i; 204 205 if (atomic_read(&sh->count) != 0) 206 BUG(); 207 if (test_bit(STRIPE_HANDLE, &sh->state)) 208 BUG(); 209 210 CHECK_DEVLOCK(); 211 PRINTK("init_stripe called, stripe %llu\n", 212 (unsigned long long)sh->sector); 213 214 remove_hash(sh); 215 216 sh->sector = sector; 217 sh->pd_idx = pd_idx; 218 sh->state = 0; 219 220 for (i=disks; i--; ) { 221 struct r5dev *dev = &sh->dev[i]; 222 223 if (dev->toread || dev->towrite || dev->written || 224 test_bit(R5_LOCKED, &dev->flags)) { 225 PRINTK("sector=%llx i=%d %p %p %p %d\n", 226 (unsigned long long)sh->sector, i, dev->toread, 227 dev->towrite, dev->written, 228 test_bit(R5_LOCKED, &dev->flags)); 229 BUG(); 230 } 231 dev->flags = 0; 232 raid6_build_block(sh, i); 233 } 234 insert_hash(conf, sh); 235} 236 237static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector) 238{ 239 struct stripe_head *sh; 240 struct hlist_node *hn; 241 242 CHECK_DEVLOCK(); 243 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 244 hlist_for_each_entry (sh, hn, stripe_hash(conf, sector), hash) 245 if (sh->sector == sector) 246 return sh; 247 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 248 return NULL; 249} 250 251static void unplug_slaves(mddev_t *mddev); 252 253static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector, 254 int pd_idx, int noblock) 255{ 256 struct stripe_head *sh; 257 258 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector); 259 260 spin_lock_irq(&conf->device_lock); 261 262 do { 263 wait_event_lock_irq(conf->wait_for_stripe, 264 conf->quiesce == 0, 265 conf->device_lock, /* nothing */); 266 sh = __find_stripe(conf, sector); 267 if (!sh) { 268 if (!conf->inactive_blocked) 269 sh = get_free_stripe(conf); 270 if (noblock && sh == NULL) 271 break; 272 if (!sh) { 273 conf->inactive_blocked = 1; 274 wait_event_lock_irq(conf->wait_for_stripe, 275 !list_empty(&conf->inactive_list) && 276 (atomic_read(&conf->active_stripes) 277 < (conf->max_nr_stripes *3/4) 278 || !conf->inactive_blocked), 279 conf->device_lock, 280 unplug_slaves(conf->mddev); 281 ); 282 conf->inactive_blocked = 0; 283 } else 284 init_stripe(sh, sector, pd_idx); 285 } else { 286 if (atomic_read(&sh->count)) { 287 if (!list_empty(&sh->lru)) 288 BUG(); 289 } else { 290 if (!test_bit(STRIPE_HANDLE, &sh->state)) 291 atomic_inc(&conf->active_stripes); 292 if (list_empty(&sh->lru)) 293 BUG(); 294 list_del_init(&sh->lru); 295 } 296 } 297 } while (sh == NULL); 298 299 if (sh) 300 atomic_inc(&sh->count); 301 302 spin_unlock_irq(&conf->device_lock); 303 return sh; 304} 305 306static int grow_one_stripe(raid6_conf_t *conf) 307{ 308 struct stripe_head *sh; 309 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 310 if (!sh) 311 return 0; 312 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 313 sh->raid_conf = conf; 314 spin_lock_init(&sh->lock); 315 316 if (grow_buffers(sh, conf->raid_disks)) { 317 shrink_buffers(sh, conf->raid_disks); 318 kmem_cache_free(conf->slab_cache, sh); 319 return 0; 320 } 321 /* we just created an active stripe so... */ 322 atomic_set(&sh->count, 1); 323 atomic_inc(&conf->active_stripes); 324 INIT_LIST_HEAD(&sh->lru); 325 release_stripe(sh); 326 return 1; 327} 328 329static int grow_stripes(raid6_conf_t *conf, int num) 330{ 331 kmem_cache_t *sc; 332 int devs = conf->raid_disks; 333 334 sprintf(conf->cache_name, "raid6/%s", mdname(conf->mddev)); 335 336 sc = kmem_cache_create(conf->cache_name, 337 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 338 0, 0, NULL, NULL); 339 if (!sc) 340 return 1; 341 conf->slab_cache = sc; 342 while (num--) 343 if (!grow_one_stripe(conf)) 344 return 1; 345 return 0; 346} 347 348static int drop_one_stripe(raid6_conf_t *conf) 349{ 350 struct stripe_head *sh; 351 spin_lock_irq(&conf->device_lock); 352 sh = get_free_stripe(conf); 353 spin_unlock_irq(&conf->device_lock); 354 if (!sh) 355 return 0; 356 if (atomic_read(&sh->count)) 357 BUG(); 358 shrink_buffers(sh, conf->raid_disks); 359 kmem_cache_free(conf->slab_cache, sh); 360 atomic_dec(&conf->active_stripes); 361 return 1; 362} 363 364static void shrink_stripes(raid6_conf_t *conf) 365{ 366 while (drop_one_stripe(conf)) 367 ; 368 369 if (conf->slab_cache) 370 kmem_cache_destroy(conf->slab_cache); 371 conf->slab_cache = NULL; 372} 373 374static int raid6_end_read_request(struct bio * bi, unsigned int bytes_done, 375 int error) 376{ 377 struct stripe_head *sh = bi->bi_private; 378 raid6_conf_t *conf = sh->raid_conf; 379 int disks = conf->raid_disks, i; 380 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 381 382 if (bi->bi_size) 383 return 1; 384 385 for (i=0 ; i<disks; i++) 386 if (bi == &sh->dev[i].req) 387 break; 388 389 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n", 390 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 391 uptodate); 392 if (i == disks) { 393 BUG(); 394 return 0; 395 } 396 397 if (uptodate) { 398#if 0 399 struct bio *bio; 400 unsigned long flags; 401 spin_lock_irqsave(&conf->device_lock, flags); 402 /* we can return a buffer if we bypassed the cache or 403 * if the top buffer is not in highmem. If there are 404 * multiple buffers, leave the extra work to 405 * handle_stripe 406 */ 407 buffer = sh->bh_read[i]; 408 if (buffer && 409 (!PageHighMem(buffer->b_page) 410 || buffer->b_page == bh->b_page ) 411 ) { 412 sh->bh_read[i] = buffer->b_reqnext; 413 buffer->b_reqnext = NULL; 414 } else 415 buffer = NULL; 416 spin_unlock_irqrestore(&conf->device_lock, flags); 417 if (sh->bh_page[i]==bh->b_page) 418 set_buffer_uptodate(bh); 419 if (buffer) { 420 if (buffer->b_page != bh->b_page) 421 memcpy(buffer->b_data, bh->b_data, bh->b_size); 422 buffer->b_end_io(buffer, 1); 423 } 424#else 425 set_bit(R5_UPTODATE, &sh->dev[i].flags); 426#endif 427 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 428 printk(KERN_INFO "raid6: read error corrected!!\n"); 429 clear_bit(R5_ReadError, &sh->dev[i].flags); 430 clear_bit(R5_ReWrite, &sh->dev[i].flags); 431 } 432 if (atomic_read(&conf->disks[i].rdev->read_errors)) 433 atomic_set(&conf->disks[i].rdev->read_errors, 0); 434 } else { 435 int retry = 0; 436 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 437 atomic_inc(&conf->disks[i].rdev->read_errors); 438 if (conf->mddev->degraded) 439 printk(KERN_WARNING "raid6: read error not correctable.\n"); 440 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 441 /* Oh, no!!! */ 442 printk(KERN_WARNING "raid6: read error NOT corrected!!\n"); 443 else if (atomic_read(&conf->disks[i].rdev->read_errors) 444 > conf->max_nr_stripes) 445 printk(KERN_WARNING 446 "raid6: Too many read errors, failing device.\n"); 447 else 448 retry = 1; 449 if (retry) 450 set_bit(R5_ReadError, &sh->dev[i].flags); 451 else { 452 clear_bit(R5_ReadError, &sh->dev[i].flags); 453 clear_bit(R5_ReWrite, &sh->dev[i].flags); 454 md_error(conf->mddev, conf->disks[i].rdev); 455 } 456 } 457 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 458#if 0 459 /* must restore b_page before unlocking buffer... */ 460 if (sh->bh_page[i] != bh->b_page) { 461 bh->b_page = sh->bh_page[i]; 462 bh->b_data = page_address(bh->b_page); 463 clear_buffer_uptodate(bh); 464 } 465#endif 466 clear_bit(R5_LOCKED, &sh->dev[i].flags); 467 set_bit(STRIPE_HANDLE, &sh->state); 468 release_stripe(sh); 469 return 0; 470} 471 472static int raid6_end_write_request (struct bio *bi, unsigned int bytes_done, 473 int error) 474{ 475 struct stripe_head *sh = bi->bi_private; 476 raid6_conf_t *conf = sh->raid_conf; 477 int disks = conf->raid_disks, i; 478 unsigned long flags; 479 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 480 481 if (bi->bi_size) 482 return 1; 483 484 for (i=0 ; i<disks; i++) 485 if (bi == &sh->dev[i].req) 486 break; 487 488 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n", 489 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 490 uptodate); 491 if (i == disks) { 492 BUG(); 493 return 0; 494 } 495 496 spin_lock_irqsave(&conf->device_lock, flags); 497 if (!uptodate) 498 md_error(conf->mddev, conf->disks[i].rdev); 499 500 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 501 502 clear_bit(R5_LOCKED, &sh->dev[i].flags); 503 set_bit(STRIPE_HANDLE, &sh->state); 504 __release_stripe(conf, sh); 505 spin_unlock_irqrestore(&conf->device_lock, flags); 506 return 0; 507} 508 509 510static sector_t compute_blocknr(struct stripe_head *sh, int i); 511 512static void raid6_build_block (struct stripe_head *sh, int i) 513{ 514 struct r5dev *dev = &sh->dev[i]; 515 int pd_idx = sh->pd_idx; 516 int qd_idx = raid6_next_disk(pd_idx, sh->raid_conf->raid_disks); 517 518 bio_init(&dev->req); 519 dev->req.bi_io_vec = &dev->vec; 520 dev->req.bi_vcnt++; 521 dev->req.bi_max_vecs++; 522 dev->vec.bv_page = dev->page; 523 dev->vec.bv_len = STRIPE_SIZE; 524 dev->vec.bv_offset = 0; 525 526 dev->req.bi_sector = sh->sector; 527 dev->req.bi_private = sh; 528 529 dev->flags = 0; 530 if (i != pd_idx && i != qd_idx) 531 dev->sector = compute_blocknr(sh, i); 532} 533 534static void error(mddev_t *mddev, mdk_rdev_t *rdev) 535{ 536 char b[BDEVNAME_SIZE]; 537 raid6_conf_t *conf = (raid6_conf_t *) mddev->private; 538 PRINTK("raid6: error called\n"); 539 540 if (!test_bit(Faulty, &rdev->flags)) { 541 mddev->sb_dirty = 1; 542 if (test_bit(In_sync, &rdev->flags)) { 543 conf->working_disks--; 544 mddev->degraded++; 545 conf->failed_disks++; 546 clear_bit(In_sync, &rdev->flags); 547 /* 548 * if recovery was running, make sure it aborts. 549 */ 550 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 551 } 552 set_bit(Faulty, &rdev->flags); 553 printk (KERN_ALERT 554 "raid6: Disk failure on %s, disabling device." 555 " Operation continuing on %d devices\n", 556 bdevname(rdev->bdev,b), conf->working_disks); 557 } 558} 559 560/* 561 * Input: a 'big' sector number, 562 * Output: index of the data and parity disk, and the sector # in them. 563 */ 564static sector_t raid6_compute_sector(sector_t r_sector, unsigned int raid_disks, 565 unsigned int data_disks, unsigned int * dd_idx, 566 unsigned int * pd_idx, raid6_conf_t *conf) 567{ 568 long stripe; 569 unsigned long chunk_number; 570 unsigned int chunk_offset; 571 sector_t new_sector; 572 int sectors_per_chunk = conf->chunk_size >> 9; 573 574 /* First compute the information on this sector */ 575 576 /* 577 * Compute the chunk number and the sector offset inside the chunk 578 */ 579 chunk_offset = sector_div(r_sector, sectors_per_chunk); 580 chunk_number = r_sector; 581 if ( r_sector != chunk_number ) { 582 printk(KERN_CRIT "raid6: ERROR: r_sector = %llu, chunk_number = %lu\n", 583 (unsigned long long)r_sector, (unsigned long)chunk_number); 584 BUG(); 585 } 586 587 /* 588 * Compute the stripe number 589 */ 590 stripe = chunk_number / data_disks; 591 592 /* 593 * Compute the data disk and parity disk indexes inside the stripe 594 */ 595 *dd_idx = chunk_number % data_disks; 596 597 /* 598 * Select the parity disk based on the user selected algorithm. 599 */ 600 601 /**** FIX THIS ****/ 602 switch (conf->algorithm) { 603 case ALGORITHM_LEFT_ASYMMETRIC: 604 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 605 if (*pd_idx == raid_disks-1) 606 (*dd_idx)++; /* Q D D D P */ 607 else if (*dd_idx >= *pd_idx) 608 (*dd_idx) += 2; /* D D P Q D */ 609 break; 610 case ALGORITHM_RIGHT_ASYMMETRIC: 611 *pd_idx = stripe % raid_disks; 612 if (*pd_idx == raid_disks-1) 613 (*dd_idx)++; /* Q D D D P */ 614 else if (*dd_idx >= *pd_idx) 615 (*dd_idx) += 2; /* D D P Q D */ 616 break; 617 case ALGORITHM_LEFT_SYMMETRIC: 618 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 619 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 620 break; 621 case ALGORITHM_RIGHT_SYMMETRIC: 622 *pd_idx = stripe % raid_disks; 623 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 624 break; 625 default: 626 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 627 conf->algorithm); 628 } 629 630 PRINTK("raid6: chunk_number = %lu, pd_idx = %u, dd_idx = %u\n", 631 chunk_number, *pd_idx, *dd_idx); 632 633 /* 634 * Finally, compute the new sector number 635 */ 636 new_sector = (sector_t) stripe * sectors_per_chunk + chunk_offset; 637 return new_sector; 638} 639 640 641static sector_t compute_blocknr(struct stripe_head *sh, int i) 642{ 643 raid6_conf_t *conf = sh->raid_conf; 644 int raid_disks = conf->raid_disks, data_disks = raid_disks - 2; 645 sector_t new_sector = sh->sector, check; 646 int sectors_per_chunk = conf->chunk_size >> 9; 647 sector_t stripe; 648 int chunk_offset; 649 int chunk_number, dummy1, dummy2, dd_idx = i; 650 sector_t r_sector; 651 int i0 = i; 652 653 chunk_offset = sector_div(new_sector, sectors_per_chunk); 654 stripe = new_sector; 655 if ( new_sector != stripe ) { 656 printk(KERN_CRIT "raid6: ERROR: new_sector = %llu, stripe = %lu\n", 657 (unsigned long long)new_sector, (unsigned long)stripe); 658 BUG(); 659 } 660 661 switch (conf->algorithm) { 662 case ALGORITHM_LEFT_ASYMMETRIC: 663 case ALGORITHM_RIGHT_ASYMMETRIC: 664 if (sh->pd_idx == raid_disks-1) 665 i--; /* Q D D D P */ 666 else if (i > sh->pd_idx) 667 i -= 2; /* D D P Q D */ 668 break; 669 case ALGORITHM_LEFT_SYMMETRIC: 670 case ALGORITHM_RIGHT_SYMMETRIC: 671 if (sh->pd_idx == raid_disks-1) 672 i--; /* Q D D D P */ 673 else { 674 /* D D P Q D */ 675 if (i < sh->pd_idx) 676 i += raid_disks; 677 i -= (sh->pd_idx + 2); 678 } 679 break; 680 default: 681 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 682 conf->algorithm); 683 } 684 685 PRINTK("raid6: compute_blocknr: pd_idx = %u, i0 = %u, i = %u\n", sh->pd_idx, i0, i); 686 687 chunk_number = stripe * data_disks + i; 688 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 689 690 check = raid6_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 691 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 692 printk(KERN_CRIT "raid6: compute_blocknr: map not correct\n"); 693 return 0; 694 } 695 return r_sector; 696} 697 698 699 700/* 701 * Copy data between a page in the stripe cache, and one or more bion 702 * The page could align with the middle of the bio, or there could be 703 * several bion, each with several bio_vecs, which cover part of the page 704 * Multiple bion are linked together on bi_next. There may be extras 705 * at the end of this list. We ignore them. 706 */ 707static void copy_data(int frombio, struct bio *bio, 708 struct page *page, 709 sector_t sector) 710{ 711 char *pa = page_address(page); 712 struct bio_vec *bvl; 713 int i; 714 int page_offset; 715 716 if (bio->bi_sector >= sector) 717 page_offset = (signed)(bio->bi_sector - sector) * 512; 718 else 719 page_offset = (signed)(sector - bio->bi_sector) * -512; 720 bio_for_each_segment(bvl, bio, i) { 721 int len = bio_iovec_idx(bio,i)->bv_len; 722 int clen; 723 int b_offset = 0; 724 725 if (page_offset < 0) { 726 b_offset = -page_offset; 727 page_offset += b_offset; 728 len -= b_offset; 729 } 730 731 if (len > 0 && page_offset + len > STRIPE_SIZE) 732 clen = STRIPE_SIZE - page_offset; 733 else clen = len; 734 735 if (clen > 0) { 736 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 737 if (frombio) 738 memcpy(pa+page_offset, ba+b_offset, clen); 739 else 740 memcpy(ba+b_offset, pa+page_offset, clen); 741 __bio_kunmap_atomic(ba, KM_USER0); 742 } 743 if (clen < len) /* hit end of page */ 744 break; 745 page_offset += len; 746 } 747} 748 749#define check_xor() do { \ 750 if (count == MAX_XOR_BLOCKS) { \ 751 xor_block(count, STRIPE_SIZE, ptr); \ 752 count = 1; \ 753 } \ 754 } while(0) 755 756/* Compute P and Q syndromes */ 757static void compute_parity(struct stripe_head *sh, int method) 758{ 759 raid6_conf_t *conf = sh->raid_conf; 760 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count; 761 struct bio *chosen; 762 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 763 void *ptrs[disks]; 764 765 qd_idx = raid6_next_disk(pd_idx, disks); 766 d0_idx = raid6_next_disk(qd_idx, disks); 767 768 PRINTK("compute_parity, stripe %llu, method %d\n", 769 (unsigned long long)sh->sector, method); 770 771 switch(method) { 772 case READ_MODIFY_WRITE: 773 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 774 case RECONSTRUCT_WRITE: 775 for (i= disks; i-- ;) 776 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 777 chosen = sh->dev[i].towrite; 778 sh->dev[i].towrite = NULL; 779 780 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 781 wake_up(&conf->wait_for_overlap); 782 783 if (sh->dev[i].written) BUG(); 784 sh->dev[i].written = chosen; 785 } 786 break; 787 case CHECK_PARITY: 788 BUG(); /* Not implemented yet */ 789 } 790 791 for (i = disks; i--;) 792 if (sh->dev[i].written) { 793 sector_t sector = sh->dev[i].sector; 794 struct bio *wbi = sh->dev[i].written; 795 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 796 copy_data(1, wbi, sh->dev[i].page, sector); 797 wbi = r5_next_bio(wbi, sector); 798 } 799 800 set_bit(R5_LOCKED, &sh->dev[i].flags); 801 set_bit(R5_UPTODATE, &sh->dev[i].flags); 802 } 803 804// switch(method) { 805// case RECONSTRUCT_WRITE: 806// case CHECK_PARITY: 807// case UPDATE_PARITY: 808 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ 809 /* FIX: Is this ordering of drives even remotely optimal? */ 810 count = 0; 811 i = d0_idx; 812 do { 813 ptrs[count++] = page_address(sh->dev[i].page); 814 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 815 printk("block %d/%d not uptodate on parity calc\n", i,count); 816 i = raid6_next_disk(i, disks); 817 } while ( i != d0_idx ); 818// break; 819// } 820 821 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); 822 823 switch(method) { 824 case RECONSTRUCT_WRITE: 825 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 826 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 827 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 828 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 829 break; 830 case UPDATE_PARITY: 831 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 832 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 833 break; 834 } 835} 836 837/* Compute one missing block */ 838static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 839{ 840 raid6_conf_t *conf = sh->raid_conf; 841 int i, count, disks = conf->raid_disks; 842 void *ptr[MAX_XOR_BLOCKS], *p; 843 int pd_idx = sh->pd_idx; 844 int qd_idx = raid6_next_disk(pd_idx, disks); 845 846 PRINTK("compute_block_1, stripe %llu, idx %d\n", 847 (unsigned long long)sh->sector, dd_idx); 848 849 if ( dd_idx == qd_idx ) { 850 /* We're actually computing the Q drive */ 851 compute_parity(sh, UPDATE_PARITY); 852 } else { 853 ptr[0] = page_address(sh->dev[dd_idx].page); 854 if (!nozero) memset(ptr[0], 0, STRIPE_SIZE); 855 count = 1; 856 for (i = disks ; i--; ) { 857 if (i == dd_idx || i == qd_idx) 858 continue; 859 p = page_address(sh->dev[i].page); 860 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 861 ptr[count++] = p; 862 else 863 printk("compute_block() %d, stripe %llu, %d" 864 " not present\n", dd_idx, 865 (unsigned long long)sh->sector, i); 866 867 check_xor(); 868 } 869 if (count != 1) 870 xor_block(count, STRIPE_SIZE, ptr); 871 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 872 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 873 } 874} 875 876/* Compute two missing blocks */ 877static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 878{ 879 raid6_conf_t *conf = sh->raid_conf; 880 int i, count, disks = conf->raid_disks; 881 int pd_idx = sh->pd_idx; 882 int qd_idx = raid6_next_disk(pd_idx, disks); 883 int d0_idx = raid6_next_disk(qd_idx, disks); 884 int faila, failb; 885 886 /* faila and failb are disk numbers relative to d0_idx */ 887 /* pd_idx become disks-2 and qd_idx become disks-1 */ 888 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; 889 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; 890 891 BUG_ON(faila == failb); 892 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 893 894 PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 895 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); 896 897 if ( failb == disks-1 ) { 898 /* Q disk is one of the missing disks */ 899 if ( faila == disks-2 ) { 900 /* Missing P+Q, just recompute */ 901 compute_parity(sh, UPDATE_PARITY); 902 return; 903 } else { 904 /* We're missing D+Q; recompute D from P */ 905 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); 906 compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */ 907 return; 908 } 909 } 910 911 /* We're missing D+P or D+D; build pointer table */ 912 { 913 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 914 void *ptrs[disks]; 915 916 count = 0; 917 i = d0_idx; 918 do { 919 ptrs[count++] = page_address(sh->dev[i].page); 920 i = raid6_next_disk(i, disks); 921 if (i != dd_idx1 && i != dd_idx2 && 922 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 923 printk("compute_2 with missing block %d/%d\n", count, i); 924 } while ( i != d0_idx ); 925 926 if ( failb == disks-2 ) { 927 /* We're missing D+P. */ 928 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); 929 } else { 930 /* We're missing D+D. */ 931 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); 932 } 933 934 /* Both the above update both missing blocks */ 935 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 936 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 937 } 938} 939 940 941/* 942 * Each stripe/dev can have one or more bion attached. 943 * toread/towrite point to the first in a chain. 944 * The bi_next chain must be in order. 945 */ 946static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 947{ 948 struct bio **bip; 949 raid6_conf_t *conf = sh->raid_conf; 950 int firstwrite=0; 951 952 PRINTK("adding bh b#%llu to stripe s#%llu\n", 953 (unsigned long long)bi->bi_sector, 954 (unsigned long long)sh->sector); 955 956 957 spin_lock(&sh->lock); 958 spin_lock_irq(&conf->device_lock); 959 if (forwrite) { 960 bip = &sh->dev[dd_idx].towrite; 961 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 962 firstwrite = 1; 963 } else 964 bip = &sh->dev[dd_idx].toread; 965 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 966 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 967 goto overlap; 968 bip = &(*bip)->bi_next; 969 } 970 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 971 goto overlap; 972 973 if (*bip && bi->bi_next && (*bip) != bi->bi_next) 974 BUG(); 975 if (*bip) 976 bi->bi_next = *bip; 977 *bip = bi; 978 bi->bi_phys_segments ++; 979 spin_unlock_irq(&conf->device_lock); 980 spin_unlock(&sh->lock); 981 982 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n", 983 (unsigned long long)bi->bi_sector, 984 (unsigned long long)sh->sector, dd_idx); 985 986 if (conf->mddev->bitmap && firstwrite) { 987 sh->bm_seq = conf->seq_write; 988 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 989 STRIPE_SECTORS, 0); 990 set_bit(STRIPE_BIT_DELAY, &sh->state); 991 } 992 993 if (forwrite) { 994 /* check if page is covered */ 995 sector_t sector = sh->dev[dd_idx].sector; 996 for (bi=sh->dev[dd_idx].towrite; 997 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 998 bi && bi->bi_sector <= sector; 999 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1000 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1001 sector = bi->bi_sector + (bi->bi_size>>9); 1002 } 1003 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1004 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1005 } 1006 return 1; 1007 1008 overlap: 1009 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1010 spin_unlock_irq(&conf->device_lock); 1011 spin_unlock(&sh->lock); 1012 return 0; 1013} 1014 1015 1016static int page_is_zero(struct page *p) 1017{ 1018 char *a = page_address(p); 1019 return ((*(u32*)a) == 0 && 1020 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1021} 1022/* 1023 * handle_stripe - do things to a stripe. 1024 * 1025 * We lock the stripe and then examine the state of various bits 1026 * to see what needs to be done. 1027 * Possible results: 1028 * return some read request which now have data 1029 * return some write requests which are safely on disc 1030 * schedule a read on some buffers 1031 * schedule a write of some buffers 1032 * return confirmation of parity correctness 1033 * 1034 * Parity calculations are done inside the stripe lock 1035 * buffers are taken off read_list or write_list, and bh_cache buffers 1036 * get BH_Lock set before the stripe lock is released. 1037 * 1038 */ 1039 1040static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) 1041{ 1042 raid6_conf_t *conf = sh->raid_conf; 1043 int disks = conf->raid_disks; 1044 struct bio *return_bi= NULL; 1045 struct bio *bi; 1046 int i; 1047 int syncing; 1048 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; 1049 int non_overwrite = 0; 1050 int failed_num[2] = {0, 0}; 1051 struct r5dev *dev, *pdev, *qdev; 1052 int pd_idx = sh->pd_idx; 1053 int qd_idx = raid6_next_disk(pd_idx, disks); 1054 int p_failed, q_failed; 1055 1056 PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n", 1057 (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count), 1058 pd_idx, qd_idx); 1059 1060 spin_lock(&sh->lock); 1061 clear_bit(STRIPE_HANDLE, &sh->state); 1062 clear_bit(STRIPE_DELAYED, &sh->state); 1063 1064 syncing = test_bit(STRIPE_SYNCING, &sh->state); 1065 /* Now to look around and see what can be done */ 1066 1067 rcu_read_lock(); 1068 for (i=disks; i--; ) { 1069 mdk_rdev_t *rdev; 1070 dev = &sh->dev[i]; 1071 clear_bit(R5_Insync, &dev->flags); 1072 1073 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 1074 i, dev->flags, dev->toread, dev->towrite, dev->written); 1075 /* maybe we can reply to a read */ 1076 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 1077 struct bio *rbi, *rbi2; 1078 PRINTK("Return read for disc %d\n", i); 1079 spin_lock_irq(&conf->device_lock); 1080 rbi = dev->toread; 1081 dev->toread = NULL; 1082 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1083 wake_up(&conf->wait_for_overlap); 1084 spin_unlock_irq(&conf->device_lock); 1085 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1086 copy_data(0, rbi, dev->page, dev->sector); 1087 rbi2 = r5_next_bio(rbi, dev->sector); 1088 spin_lock_irq(&conf->device_lock); 1089 if (--rbi->bi_phys_segments == 0) { 1090 rbi->bi_next = return_bi; 1091 return_bi = rbi; 1092 } 1093 spin_unlock_irq(&conf->device_lock); 1094 rbi = rbi2; 1095 } 1096 } 1097 1098 /* now count some things */ 1099 if (test_bit(R5_LOCKED, &dev->flags)) locked++; 1100 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++; 1101 1102 1103 if (dev->toread) to_read++; 1104 if (dev->towrite) { 1105 to_write++; 1106 if (!test_bit(R5_OVERWRITE, &dev->flags)) 1107 non_overwrite++; 1108 } 1109 if (dev->written) written++; 1110 rdev = rcu_dereference(conf->disks[i].rdev); 1111 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 1112 /* The ReadError flag will just be confusing now */ 1113 clear_bit(R5_ReadError, &dev->flags); 1114 clear_bit(R5_ReWrite, &dev->flags); 1115 } 1116 if (!rdev || !test_bit(In_sync, &rdev->flags) 1117 || test_bit(R5_ReadError, &dev->flags)) { 1118 if ( failed < 2 ) 1119 failed_num[failed] = i; 1120 failed++; 1121 } else 1122 set_bit(R5_Insync, &dev->flags); 1123 } 1124 rcu_read_unlock(); 1125 PRINTK("locked=%d uptodate=%d to_read=%d" 1126 " to_write=%d failed=%d failed_num=%d,%d\n", 1127 locked, uptodate, to_read, to_write, failed, 1128 failed_num[0], failed_num[1]); 1129 /* check if the array has lost >2 devices and, if so, some requests might 1130 * need to be failed 1131 */ 1132 if (failed > 2 && to_read+to_write+written) { 1133 for (i=disks; i--; ) { 1134 int bitmap_end = 0; 1135 1136 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1137 mdk_rdev_t *rdev; 1138 rcu_read_lock(); 1139 rdev = rcu_dereference(conf->disks[i].rdev); 1140 if (rdev && test_bit(In_sync, &rdev->flags)) 1141 /* multiple read failures in one stripe */ 1142 md_error(conf->mddev, rdev); 1143 rcu_read_unlock(); 1144 } 1145 1146 spin_lock_irq(&conf->device_lock); 1147 /* fail all writes first */ 1148 bi = sh->dev[i].towrite; 1149 sh->dev[i].towrite = NULL; 1150 if (bi) { to_write--; bitmap_end = 1; } 1151 1152 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1153 wake_up(&conf->wait_for_overlap); 1154 1155 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1156 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1157 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1158 if (--bi->bi_phys_segments == 0) { 1159 md_write_end(conf->mddev); 1160 bi->bi_next = return_bi; 1161 return_bi = bi; 1162 } 1163 bi = nextbi; 1164 } 1165 /* and fail all 'written' */ 1166 bi = sh->dev[i].written; 1167 sh->dev[i].written = NULL; 1168 if (bi) bitmap_end = 1; 1169 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { 1170 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1171 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1172 if (--bi->bi_phys_segments == 0) { 1173 md_write_end(conf->mddev); 1174 bi->bi_next = return_bi; 1175 return_bi = bi; 1176 } 1177 bi = bi2; 1178 } 1179 1180 /* fail any reads if this device is non-operational */ 1181 if (!test_bit(R5_Insync, &sh->dev[i].flags) || 1182 test_bit(R5_ReadError, &sh->dev[i].flags)) { 1183 bi = sh->dev[i].toread; 1184 sh->dev[i].toread = NULL; 1185 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1186 wake_up(&conf->wait_for_overlap); 1187 if (bi) to_read--; 1188 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1189 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1190 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1191 if (--bi->bi_phys_segments == 0) { 1192 bi->bi_next = return_bi; 1193 return_bi = bi; 1194 } 1195 bi = nextbi; 1196 } 1197 } 1198 spin_unlock_irq(&conf->device_lock); 1199 if (bitmap_end) 1200 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1201 STRIPE_SECTORS, 0, 0); 1202 } 1203 } 1204 if (failed > 2 && syncing) { 1205 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 1206 clear_bit(STRIPE_SYNCING, &sh->state); 1207 syncing = 0; 1208 } 1209 1210 /* 1211 * might be able to return some write requests if the parity blocks 1212 * are safe, or on a failed drive 1213 */ 1214 pdev = &sh->dev[pd_idx]; 1215 p_failed = (failed >= 1 && failed_num[0] == pd_idx) 1216 || (failed >= 2 && failed_num[1] == pd_idx); 1217 qdev = &sh->dev[qd_idx]; 1218 q_failed = (failed >= 1 && failed_num[0] == qd_idx) 1219 || (failed >= 2 && failed_num[1] == qd_idx); 1220 1221 if ( written && 1222 ( p_failed || ((test_bit(R5_Insync, &pdev->flags) 1223 && !test_bit(R5_LOCKED, &pdev->flags) 1224 && test_bit(R5_UPTODATE, &pdev->flags))) ) && 1225 ( q_failed || ((test_bit(R5_Insync, &qdev->flags) 1226 && !test_bit(R5_LOCKED, &qdev->flags) 1227 && test_bit(R5_UPTODATE, &qdev->flags))) ) ) { 1228 /* any written block on an uptodate or failed drive can be 1229 * returned. Note that if we 'wrote' to a failed drive, 1230 * it will be UPTODATE, but never LOCKED, so we don't need 1231 * to test 'failed' directly. 1232 */ 1233 for (i=disks; i--; ) 1234 if (sh->dev[i].written) { 1235 dev = &sh->dev[i]; 1236 if (!test_bit(R5_LOCKED, &dev->flags) && 1237 test_bit(R5_UPTODATE, &dev->flags) ) { 1238 /* We can return any write requests */ 1239 int bitmap_end = 0; 1240 struct bio *wbi, *wbi2; 1241 PRINTK("Return write for stripe %llu disc %d\n", 1242 (unsigned long long)sh->sector, i); 1243 spin_lock_irq(&conf->device_lock); 1244 wbi = dev->written; 1245 dev->written = NULL; 1246 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1247 wbi2 = r5_next_bio(wbi, dev->sector); 1248 if (--wbi->bi_phys_segments == 0) { 1249 md_write_end(conf->mddev); 1250 wbi->bi_next = return_bi; 1251 return_bi = wbi; 1252 } 1253 wbi = wbi2; 1254 } 1255 if (dev->towrite == NULL) 1256 bitmap_end = 1; 1257 spin_unlock_irq(&conf->device_lock); 1258 if (bitmap_end) 1259 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1260 STRIPE_SECTORS, 1261 !test_bit(STRIPE_DEGRADED, &sh->state), 0); 1262 } 1263 } 1264 } 1265 1266 /* Now we might consider reading some blocks, either to check/generate 1267 * parity, or to satisfy requests 1268 * or to load a block that is being partially written. 1269 */ 1270 if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) { 1271 for (i=disks; i--;) { 1272 dev = &sh->dev[i]; 1273 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1274 (dev->toread || 1275 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1276 syncing || 1277 (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) || 1278 (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write)) 1279 ) 1280 ) { 1281 /* we would like to get this block, possibly 1282 * by computing it, but we might not be able to 1283 */ 1284 if (uptodate == disks-1) { 1285 PRINTK("Computing stripe %llu block %d\n", 1286 (unsigned long long)sh->sector, i); 1287 compute_block_1(sh, i, 0); 1288 uptodate++; 1289 } else if ( uptodate == disks-2 && failed >= 2 ) { 1290 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */ 1291 int other; 1292 for (other=disks; other--;) { 1293 if ( other == i ) 1294 continue; 1295 if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) ) 1296 break; 1297 } 1298 BUG_ON(other < 0); 1299 PRINTK("Computing stripe %llu blocks %d,%d\n", 1300 (unsigned long long)sh->sector, i, other); 1301 compute_block_2(sh, i, other); 1302 uptodate += 2; 1303 } else if (test_bit(R5_Insync, &dev->flags)) { 1304 set_bit(R5_LOCKED, &dev->flags); 1305 set_bit(R5_Wantread, &dev->flags); 1306#if 0 1307 /* if I am just reading this block and we don't have 1308 a failed drive, or any pending writes then sidestep the cache */ 1309 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext && 1310 ! syncing && !failed && !to_write) { 1311 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page; 1312 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data; 1313 } 1314#endif 1315 locked++; 1316 PRINTK("Reading block %d (sync=%d)\n", 1317 i, syncing); 1318 } 1319 } 1320 } 1321 set_bit(STRIPE_HANDLE, &sh->state); 1322 } 1323 1324 /* now to consider writing and what else, if anything should be read */ 1325 if (to_write) { 1326 int rcw=0, must_compute=0; 1327 for (i=disks ; i--;) { 1328 dev = &sh->dev[i]; 1329 /* Would I have to read this buffer for reconstruct_write */ 1330 if (!test_bit(R5_OVERWRITE, &dev->flags) 1331 && i != pd_idx && i != qd_idx 1332 && (!test_bit(R5_LOCKED, &dev->flags) 1333#if 0 1334 || sh->bh_page[i] != bh->b_page 1335#endif 1336 ) && 1337 !test_bit(R5_UPTODATE, &dev->flags)) { 1338 if (test_bit(R5_Insync, &dev->flags)) rcw++; 1339 else { 1340 PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags); 1341 must_compute++; 1342 } 1343 } 1344 } 1345 PRINTK("for sector %llu, rcw=%d, must_compute=%d\n", 1346 (unsigned long long)sh->sector, rcw, must_compute); 1347 set_bit(STRIPE_HANDLE, &sh->state); 1348 1349 if (rcw > 0) 1350 /* want reconstruct write, but need to get some data */ 1351 for (i=disks; i--;) { 1352 dev = &sh->dev[i]; 1353 if (!test_bit(R5_OVERWRITE, &dev->flags) 1354 && !(failed == 0 && (i == pd_idx || i == qd_idx)) 1355 && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1356 test_bit(R5_Insync, &dev->flags)) { 1357 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1358 { 1359 PRINTK("Read_old stripe %llu block %d for Reconstruct\n", 1360 (unsigned long long)sh->sector, i); 1361 set_bit(R5_LOCKED, &dev->flags); 1362 set_bit(R5_Wantread, &dev->flags); 1363 locked++; 1364 } else { 1365 PRINTK("Request delayed stripe %llu block %d for Reconstruct\n", 1366 (unsigned long long)sh->sector, i); 1367 set_bit(STRIPE_DELAYED, &sh->state); 1368 set_bit(STRIPE_HANDLE, &sh->state); 1369 } 1370 } 1371 } 1372 /* now if nothing is locked, and if we have enough data, we can start a write request */ 1373 if (locked == 0 && rcw == 0 && 1374 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 1375 if ( must_compute > 0 ) { 1376 /* We have failed blocks and need to compute them */ 1377 switch ( failed ) { 1378 case 0: BUG(); 1379 case 1: compute_block_1(sh, failed_num[0], 0); break; 1380 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break; 1381 default: BUG(); /* This request should have been failed? */ 1382 } 1383 } 1384 1385 PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector); 1386 compute_parity(sh, RECONSTRUCT_WRITE); 1387 /* now every locked buffer is ready to be written */ 1388 for (i=disks; i--;) 1389 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 1390 PRINTK("Writing stripe %llu block %d\n", 1391 (unsigned long long)sh->sector, i); 1392 locked++; 1393 set_bit(R5_Wantwrite, &sh->dev[i].flags); 1394 } 1395 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 1396 set_bit(STRIPE_INSYNC, &sh->state); 1397 1398 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 1399 atomic_dec(&conf->preread_active_stripes); 1400 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 1401 md_wakeup_thread(conf->mddev->thread); 1402 } 1403 } 1404 } 1405 1406 /* maybe we need to check and possibly fix the parity for this stripe 1407 * Any reads will already have been scheduled, so we just see if enough data 1408 * is available 1409 */ 1410 if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) { 1411 int update_p = 0, update_q = 0; 1412 struct r5dev *dev; 1413 1414 set_bit(STRIPE_HANDLE, &sh->state); 1415 1416 BUG_ON(failed>2); 1417 BUG_ON(uptodate < disks); 1418 /* Want to check and possibly repair P and Q. 1419 * However there could be one 'failed' device, in which 1420 * case we can only check one of them, possibly using the 1421 * other to generate missing data 1422 */ 1423 1424 /* If !tmp_page, we cannot do the calculations, 1425 * but as we have set STRIPE_HANDLE, we will soon be called 1426 * by stripe_handle with a tmp_page - just wait until then. 1427 */ 1428 if (tmp_page) { 1429 if (failed == q_failed) { 1430 /* The only possible failed device holds 'Q', so it makes 1431 * sense to check P (If anything else were failed, we would 1432 * have used P to recreate it). 1433 */ 1434 compute_block_1(sh, pd_idx, 1); 1435 if (!page_is_zero(sh->dev[pd_idx].page)) { 1436 compute_block_1(sh,pd_idx,0); 1437 update_p = 1; 1438 } 1439 } 1440 if (!q_failed && failed < 2) { 1441 /* q is not failed, and we didn't use it to generate 1442 * anything, so it makes sense to check it 1443 */ 1444 memcpy(page_address(tmp_page), 1445 page_address(sh->dev[qd_idx].page), 1446 STRIPE_SIZE); 1447 compute_parity(sh, UPDATE_PARITY); 1448 if (memcmp(page_address(tmp_page), 1449 page_address(sh->dev[qd_idx].page), 1450 STRIPE_SIZE)!= 0) { 1451 clear_bit(STRIPE_INSYNC, &sh->state); 1452 update_q = 1; 1453 } 1454 } 1455 if (update_p || update_q) { 1456 conf->mddev->resync_mismatches += STRIPE_SECTORS; 1457 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 1458 /* don't try to repair!! */ 1459 update_p = update_q = 0; 1460 } 1461 1462 /* now write out any block on a failed drive, 1463 * or P or Q if they need it 1464 */ 1465 1466 if (failed == 2) { 1467 dev = &sh->dev[failed_num[1]]; 1468 locked++; 1469 set_bit(R5_LOCKED, &dev->flags); 1470 set_bit(R5_Wantwrite, &dev->flags); 1471 } 1472 if (failed >= 1) { 1473 dev = &sh->dev[failed_num[0]]; 1474 locked++; 1475 set_bit(R5_LOCKED, &dev->flags); 1476 set_bit(R5_Wantwrite, &dev->flags); 1477 } 1478 1479 if (update_p) { 1480 dev = &sh->dev[pd_idx]; 1481 locked ++; 1482 set_bit(R5_LOCKED, &dev->flags); 1483 set_bit(R5_Wantwrite, &dev->flags); 1484 } 1485 if (update_q) { 1486 dev = &sh->dev[qd_idx]; 1487 locked++; 1488 set_bit(R5_LOCKED, &dev->flags); 1489 set_bit(R5_Wantwrite, &dev->flags); 1490 } 1491 clear_bit(STRIPE_DEGRADED, &sh->state); 1492 1493 set_bit(STRIPE_INSYNC, &sh->state); 1494 } 1495 } 1496 1497 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 1498 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 1499 clear_bit(STRIPE_SYNCING, &sh->state); 1500 } 1501 1502 /* If the failed drives are just a ReadError, then we might need 1503 * to progress the repair/check process 1504 */ 1505 if (failed <= 2 && ! conf->mddev->ro) 1506 for (i=0; i<failed;i++) { 1507 dev = &sh->dev[failed_num[i]]; 1508 if (test_bit(R5_ReadError, &dev->flags) 1509 && !test_bit(R5_LOCKED, &dev->flags) 1510 && test_bit(R5_UPTODATE, &dev->flags) 1511 ) { 1512 if (!test_bit(R5_ReWrite, &dev->flags)) { 1513 set_bit(R5_Wantwrite, &dev->flags); 1514 set_bit(R5_ReWrite, &dev->flags); 1515 set_bit(R5_LOCKED, &dev->flags); 1516 } else { 1517 /* let's read it back */ 1518 set_bit(R5_Wantread, &dev->flags); 1519 set_bit(R5_LOCKED, &dev->flags); 1520 } 1521 } 1522 } 1523 spin_unlock(&sh->lock); 1524 1525 while ((bi=return_bi)) { 1526 int bytes = bi->bi_size; 1527 1528 return_bi = bi->bi_next; 1529 bi->bi_next = NULL; 1530 bi->bi_size = 0; 1531 bi->bi_end_io(bi, bytes, 0); 1532 } 1533 for (i=disks; i-- ;) { 1534 int rw; 1535 struct bio *bi; 1536 mdk_rdev_t *rdev; 1537 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 1538 rw = 1; 1539 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1540 rw = 0; 1541 else 1542 continue; 1543 1544 bi = &sh->dev[i].req; 1545 1546 bi->bi_rw = rw; 1547 if (rw) 1548 bi->bi_end_io = raid6_end_write_request; 1549 else 1550 bi->bi_end_io = raid6_end_read_request; 1551 1552 rcu_read_lock(); 1553 rdev = rcu_dereference(conf->disks[i].rdev); 1554 if (rdev && test_bit(Faulty, &rdev->flags)) 1555 rdev = NULL; 1556 if (rdev) 1557 atomic_inc(&rdev->nr_pending); 1558 rcu_read_unlock(); 1559 1560 if (rdev) { 1561 if (syncing) 1562 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1563 1564 bi->bi_bdev = rdev->bdev; 1565 PRINTK("for %llu schedule op %ld on disc %d\n", 1566 (unsigned long long)sh->sector, bi->bi_rw, i); 1567 atomic_inc(&sh->count); 1568 bi->bi_sector = sh->sector + rdev->data_offset; 1569 bi->bi_flags = 1 << BIO_UPTODATE; 1570 bi->bi_vcnt = 1; 1571 bi->bi_max_vecs = 1; 1572 bi->bi_idx = 0; 1573 bi->bi_io_vec = &sh->dev[i].vec; 1574 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1575 bi->bi_io_vec[0].bv_offset = 0; 1576 bi->bi_size = STRIPE_SIZE; 1577 bi->bi_next = NULL; 1578 if (rw == WRITE && 1579 test_bit(R5_ReWrite, &sh->dev[i].flags)) 1580 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 1581 generic_make_request(bi); 1582 } else { 1583 if (rw == 1) 1584 set_bit(STRIPE_DEGRADED, &sh->state); 1585 PRINTK("skip op %ld on disc %d for sector %llu\n", 1586 bi->bi_rw, i, (unsigned long long)sh->sector); 1587 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1588 set_bit(STRIPE_HANDLE, &sh->state); 1589 } 1590 } 1591} 1592 1593static void raid6_activate_delayed(raid6_conf_t *conf) 1594{ 1595 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 1596 while (!list_empty(&conf->delayed_list)) { 1597 struct list_head *l = conf->delayed_list.next; 1598 struct stripe_head *sh; 1599 sh = list_entry(l, struct stripe_head, lru); 1600 list_del_init(l); 1601 clear_bit(STRIPE_DELAYED, &sh->state); 1602 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1603 atomic_inc(&conf->preread_active_stripes); 1604 list_add_tail(&sh->lru, &conf->handle_list); 1605 } 1606 } 1607} 1608 1609static void activate_bit_delay(raid6_conf_t *conf) 1610{ 1611 /* device_lock is held */ 1612 struct list_head head; 1613 list_add(&head, &conf->bitmap_list); 1614 list_del_init(&conf->bitmap_list); 1615 while (!list_empty(&head)) { 1616 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 1617 list_del_init(&sh->lru); 1618 atomic_inc(&sh->count); 1619 __release_stripe(conf, sh); 1620 } 1621} 1622 1623static void unplug_slaves(mddev_t *mddev) 1624{ 1625 raid6_conf_t *conf = mddev_to_conf(mddev); 1626 int i; 1627 1628 rcu_read_lock(); 1629 for (i=0; i<mddev->raid_disks; i++) { 1630 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 1631 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 1632 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 1633 1634 atomic_inc(&rdev->nr_pending); 1635 rcu_read_unlock(); 1636 1637 if (r_queue->unplug_fn) 1638 r_queue->unplug_fn(r_queue); 1639 1640 rdev_dec_pending(rdev, mddev); 1641 rcu_read_lock(); 1642 } 1643 } 1644 rcu_read_unlock(); 1645} 1646 1647static void raid6_unplug_device(request_queue_t *q) 1648{ 1649 mddev_t *mddev = q->queuedata; 1650 raid6_conf_t *conf = mddev_to_conf(mddev); 1651 unsigned long flags; 1652 1653 spin_lock_irqsave(&conf->device_lock, flags); 1654 1655 if (blk_remove_plug(q)) { 1656 conf->seq_flush++; 1657 raid6_activate_delayed(conf); 1658 } 1659 md_wakeup_thread(mddev->thread); 1660 1661 spin_unlock_irqrestore(&conf->device_lock, flags); 1662 1663 unplug_slaves(mddev); 1664} 1665 1666static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk, 1667 sector_t *error_sector) 1668{ 1669 mddev_t *mddev = q->queuedata; 1670 raid6_conf_t *conf = mddev_to_conf(mddev); 1671 int i, ret = 0; 1672 1673 rcu_read_lock(); 1674 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 1675 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 1676 if (rdev && !test_bit(Faulty, &rdev->flags)) { 1677 struct block_device *bdev = rdev->bdev; 1678 request_queue_t *r_queue = bdev_get_queue(bdev); 1679 1680 if (!r_queue->issue_flush_fn) 1681 ret = -EOPNOTSUPP; 1682 else { 1683 atomic_inc(&rdev->nr_pending); 1684 rcu_read_unlock(); 1685 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, 1686 error_sector); 1687 rdev_dec_pending(rdev, mddev); 1688 rcu_read_lock(); 1689 } 1690 } 1691 } 1692 rcu_read_unlock(); 1693 return ret; 1694} 1695 1696static inline void raid6_plug_device(raid6_conf_t *conf) 1697{ 1698 spin_lock_irq(&conf->device_lock); 1699 blk_plug_device(conf->mddev->queue); 1700 spin_unlock_irq(&conf->device_lock); 1701} 1702 1703static int make_request (request_queue_t *q, struct bio * bi) 1704{ 1705 mddev_t *mddev = q->queuedata; 1706 raid6_conf_t *conf = mddev_to_conf(mddev); 1707 const unsigned int raid_disks = conf->raid_disks; 1708 const unsigned int data_disks = raid_disks - 2; 1709 unsigned int dd_idx, pd_idx; 1710 sector_t new_sector; 1711 sector_t logical_sector, last_sector; 1712 struct stripe_head *sh; 1713 const int rw = bio_data_dir(bi); 1714 1715 if (unlikely(bio_barrier(bi))) { 1716 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 1717 return 0; 1718 } 1719 1720 md_write_start(mddev, bi); 1721 1722 disk_stat_inc(mddev->gendisk, ios[rw]); 1723 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 1724 1725 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 1726 last_sector = bi->bi_sector + (bi->bi_size>>9); 1727 1728 bi->bi_next = NULL; 1729 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 1730 1731 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 1732 DEFINE_WAIT(w); 1733 1734 new_sector = raid6_compute_sector(logical_sector, 1735 raid_disks, data_disks, &dd_idx, &pd_idx, conf); 1736 1737 PRINTK("raid6: make_request, sector %llu logical %llu\n", 1738 (unsigned long long)new_sector, 1739 (unsigned long long)logical_sector); 1740 1741 retry: 1742 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 1743 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK)); 1744 if (sh) { 1745 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 1746 /* Add failed due to overlap. Flush everything 1747 * and wait a while 1748 */ 1749 raid6_unplug_device(mddev->queue); 1750 release_stripe(sh); 1751 schedule(); 1752 goto retry; 1753 } 1754 finish_wait(&conf->wait_for_overlap, &w); 1755 raid6_plug_device(conf); 1756 handle_stripe(sh, NULL); 1757 release_stripe(sh); 1758 } else { 1759 /* cannot get stripe for read-ahead, just give-up */ 1760 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1761 finish_wait(&conf->wait_for_overlap, &w); 1762 break; 1763 } 1764 1765 } 1766 spin_lock_irq(&conf->device_lock); 1767 if (--bi->bi_phys_segments == 0) { 1768 int bytes = bi->bi_size; 1769 1770 if (rw == WRITE ) 1771 md_write_end(mddev); 1772 bi->bi_size = 0; 1773 bi->bi_end_io(bi, bytes, 0); 1774 } 1775 spin_unlock_irq(&conf->device_lock); 1776 return 0; 1777} 1778 1779/* FIXME go_faster isn't used */ 1780static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1781{ 1782 raid6_conf_t *conf = (raid6_conf_t *) mddev->private; 1783 struct stripe_head *sh; 1784 int sectors_per_chunk = conf->chunk_size >> 9; 1785 sector_t x; 1786 unsigned long stripe; 1787 int chunk_offset; 1788 int dd_idx, pd_idx; 1789 sector_t first_sector; 1790 int raid_disks = conf->raid_disks; 1791 int data_disks = raid_disks - 2; 1792 sector_t max_sector = mddev->size << 1; 1793 int sync_blocks; 1794 int still_degraded = 0; 1795 int i; 1796 1797 if (sector_nr >= max_sector) { 1798 /* just being told to finish up .. nothing much to do */ 1799 unplug_slaves(mddev); 1800 1801 if (mddev->curr_resync < max_sector) /* aborted */ 1802 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 1803 &sync_blocks, 1); 1804 else /* completed sync */ 1805 conf->fullsync = 0; 1806 bitmap_close_sync(mddev->bitmap); 1807 1808 return 0; 1809 } 1810 /* if there are 2 or more failed drives and we are trying 1811 * to resync, then assert that we are finished, because there is 1812 * nothing we can do. 1813 */ 1814 if (mddev->degraded >= 2 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 1815 sector_t rv = (mddev->size << 1) - sector_nr; 1816 *skipped = 1; 1817 return rv; 1818 } 1819 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1820 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 1821 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 1822 /* we can skip this block, and probably more */ 1823 sync_blocks /= STRIPE_SECTORS; 1824 *skipped = 1; 1825 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 1826 } 1827 1828 x = sector_nr; 1829 chunk_offset = sector_div(x, sectors_per_chunk); 1830 stripe = x; 1831 BUG_ON(x != stripe); 1832 1833 first_sector = raid6_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk 1834 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf); 1835 sh = get_active_stripe(conf, sector_nr, pd_idx, 1); 1836 if (sh == NULL) { 1837 sh = get_active_stripe(conf, sector_nr, pd_idx, 0); 1838 /* make sure we don't swamp the stripe cache if someone else 1839 * is trying to get access 1840 */ 1841 schedule_timeout_uninterruptible(1); 1842 } 1843 /* Need to check if array will still be degraded after recovery/resync 1844 * We don't need to check the 'failed' flag as when that gets set, 1845 * recovery aborts. 1846 */ 1847 for (i=0; i<mddev->raid_disks; i++) 1848 if (conf->disks[i].rdev == NULL) 1849 still_degraded = 1; 1850 1851 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 1852 1853 spin_lock(&sh->lock); 1854 set_bit(STRIPE_SYNCING, &sh->state); 1855 clear_bit(STRIPE_INSYNC, &sh->state); 1856 spin_unlock(&sh->lock); 1857 1858 handle_stripe(sh, NULL); 1859 release_stripe(sh); 1860 1861 return STRIPE_SECTORS; 1862} 1863 1864/* 1865 * This is our raid6 kernel thread. 1866 * 1867 * We scan the hash table for stripes which can be handled now. 1868 * During the scan, completed stripes are saved for us by the interrupt 1869 * handler, so that they will not have to wait for our next wakeup. 1870 */ 1871static void raid6d (mddev_t *mddev) 1872{ 1873 struct stripe_head *sh; 1874 raid6_conf_t *conf = mddev_to_conf(mddev); 1875 int handled; 1876 1877 PRINTK("+++ raid6d active\n"); 1878 1879 md_check_recovery(mddev); 1880 1881 handled = 0; 1882 spin_lock_irq(&conf->device_lock); 1883 while (1) { 1884 struct list_head *first; 1885 1886 if (conf->seq_flush - conf->seq_write > 0) { 1887 int seq = conf->seq_flush; 1888 spin_unlock_irq(&conf->device_lock); 1889 bitmap_unplug(mddev->bitmap); 1890 spin_lock_irq(&conf->device_lock); 1891 conf->seq_write = seq; 1892 activate_bit_delay(conf); 1893 } 1894 1895 if (list_empty(&conf->handle_list) && 1896 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && 1897 !blk_queue_plugged(mddev->queue) && 1898 !list_empty(&conf->delayed_list)) 1899 raid6_activate_delayed(conf); 1900 1901 if (list_empty(&conf->handle_list)) 1902 break; 1903 1904 first = conf->handle_list.next; 1905 sh = list_entry(first, struct stripe_head, lru); 1906 1907 list_del_init(first); 1908 atomic_inc(&sh->count); 1909 if (atomic_read(&sh->count)!= 1) 1910 BUG(); 1911 spin_unlock_irq(&conf->device_lock); 1912 1913 handled++; 1914 handle_stripe(sh, conf->spare_page); 1915 release_stripe(sh); 1916 1917 spin_lock_irq(&conf->device_lock); 1918 } 1919 PRINTK("%d stripes handled\n", handled); 1920 1921 spin_unlock_irq(&conf->device_lock); 1922 1923 unplug_slaves(mddev); 1924 1925 PRINTK("--- raid6d inactive\n"); 1926} 1927 1928static ssize_t 1929raid6_show_stripe_cache_size(mddev_t *mddev, char *page) 1930{ 1931 raid6_conf_t *conf = mddev_to_conf(mddev); 1932 if (conf) 1933 return sprintf(page, "%d\n", conf->max_nr_stripes); 1934 else 1935 return 0; 1936} 1937 1938static ssize_t 1939raid6_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 1940{ 1941 raid6_conf_t *conf = mddev_to_conf(mddev); 1942 char *end; 1943 int new; 1944 if (len >= PAGE_SIZE) 1945 return -EINVAL; 1946 if (!conf) 1947 return -ENODEV; 1948 1949 new = simple_strtoul(page, &end, 10); 1950 if (!*page || (*end && *end != '\n') ) 1951 return -EINVAL; 1952 if (new <= 16 || new > 32768) 1953 return -EINVAL; 1954 while (new < conf->max_nr_stripes) { 1955 if (drop_one_stripe(conf)) 1956 conf->max_nr_stripes--; 1957 else 1958 break; 1959 } 1960 while (new > conf->max_nr_stripes) { 1961 if (grow_one_stripe(conf)) 1962 conf->max_nr_stripes++; 1963 else break; 1964 } 1965 return len; 1966} 1967 1968static struct md_sysfs_entry 1969raid6_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 1970 raid6_show_stripe_cache_size, 1971 raid6_store_stripe_cache_size); 1972 1973static ssize_t 1974stripe_cache_active_show(mddev_t *mddev, char *page) 1975{ 1976 raid6_conf_t *conf = mddev_to_conf(mddev); 1977 if (conf) 1978 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 1979 else 1980 return 0; 1981} 1982 1983static struct md_sysfs_entry 1984raid6_stripecache_active = __ATTR_RO(stripe_cache_active); 1985 1986static struct attribute *raid6_attrs[] = { 1987 &raid6_stripecache_size.attr, 1988 &raid6_stripecache_active.attr, 1989 NULL, 1990}; 1991static struct attribute_group raid6_attrs_group = { 1992 .name = NULL, 1993 .attrs = raid6_attrs, 1994}; 1995 1996static int run(mddev_t *mddev) 1997{ 1998 raid6_conf_t *conf; 1999 int raid_disk, memory; 2000 mdk_rdev_t *rdev; 2001 struct disk_info *disk; 2002 struct list_head *tmp; 2003 2004 if (mddev->level != 6) { 2005 PRINTK("raid6: %s: raid level not set to 6 (%d)\n", mdname(mddev), mddev->level); 2006 return -EIO; 2007 } 2008 2009 mddev->private = kzalloc(sizeof (raid6_conf_t) 2010 + mddev->raid_disks * sizeof(struct disk_info), 2011 GFP_KERNEL); 2012 if ((conf = mddev->private) == NULL) 2013 goto abort; 2014 conf->mddev = mddev; 2015 2016 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 2017 goto abort; 2018 2019 conf->spare_page = alloc_page(GFP_KERNEL); 2020 if (!conf->spare_page) 2021 goto abort; 2022 2023 spin_lock_init(&conf->device_lock); 2024 init_waitqueue_head(&conf->wait_for_stripe); 2025 init_waitqueue_head(&conf->wait_for_overlap); 2026 INIT_LIST_HEAD(&conf->handle_list); 2027 INIT_LIST_HEAD(&conf->delayed_list); 2028 INIT_LIST_HEAD(&conf->bitmap_list); 2029 INIT_LIST_HEAD(&conf->inactive_list); 2030 atomic_set(&conf->active_stripes, 0); 2031 atomic_set(&conf->preread_active_stripes, 0); 2032 2033 PRINTK("raid6: run(%s) called.\n", mdname(mddev)); 2034 2035 ITERATE_RDEV(mddev,rdev,tmp) { 2036 raid_disk = rdev->raid_disk; 2037 if (raid_disk >= mddev->raid_disks 2038 || raid_disk < 0) 2039 continue; 2040 disk = conf->disks + raid_disk; 2041 2042 disk->rdev = rdev; 2043 2044 if (test_bit(In_sync, &rdev->flags)) { 2045 char b[BDEVNAME_SIZE]; 2046 printk(KERN_INFO "raid6: device %s operational as raid" 2047 " disk %d\n", bdevname(rdev->bdev,b), 2048 raid_disk); 2049 conf->working_disks++; 2050 } 2051 } 2052 2053 conf->raid_disks = mddev->raid_disks; 2054 2055 /* 2056 * 0 for a fully functional array, 1 or 2 for a degraded array. 2057 */ 2058 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; 2059 conf->mddev = mddev; 2060 conf->chunk_size = mddev->chunk_size; 2061 conf->level = mddev->level; 2062 conf->algorithm = mddev->layout; 2063 conf->max_nr_stripes = NR_STRIPES; 2064 2065 /* device size must be a multiple of chunk size */ 2066 mddev->size &= ~(mddev->chunk_size/1024 -1); 2067 mddev->resync_max_sectors = mddev->size << 1; 2068 2069 if (conf->raid_disks < 4) { 2070 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 2071 mdname(mddev), conf->raid_disks); 2072 goto abort; 2073 } 2074 if (!conf->chunk_size || conf->chunk_size % 4) { 2075 printk(KERN_ERR "raid6: invalid chunk size %d for %s\n", 2076 conf->chunk_size, mdname(mddev)); 2077 goto abort; 2078 } 2079 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { 2080 printk(KERN_ERR 2081 "raid6: unsupported parity algorithm %d for %s\n", 2082 conf->algorithm, mdname(mddev)); 2083 goto abort; 2084 } 2085 if (mddev->degraded > 2) { 2086 printk(KERN_ERR "raid6: not enough operational devices for %s" 2087 " (%d/%d failed)\n", 2088 mdname(mddev), conf->failed_disks, conf->raid_disks); 2089 goto abort; 2090 } 2091 2092 if (mddev->degraded > 0 && 2093 mddev->recovery_cp != MaxSector) { 2094 if (mddev->ok_start_degraded) 2095 printk(KERN_WARNING "raid6: starting dirty degraded array:%s" 2096 "- data corruption possible.\n", 2097 mdname(mddev)); 2098 else { 2099 printk(KERN_ERR "raid6: cannot start dirty degraded array" 2100 " for %s\n", mdname(mddev)); 2101 goto abort; 2102 } 2103 } 2104 2105 { 2106 mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6"); 2107 if (!mddev->thread) { 2108 printk(KERN_ERR 2109 "raid6: couldn't allocate thread for %s\n", 2110 mdname(mddev)); 2111 goto abort; 2112 } 2113 } 2114 2115 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 2116 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 2117 if (grow_stripes(conf, conf->max_nr_stripes)) { 2118 printk(KERN_ERR 2119 "raid6: couldn't allocate %dkB for buffers\n", memory); 2120 shrink_stripes(conf); 2121 md_unregister_thread(mddev->thread); 2122 goto abort; 2123 } else 2124 printk(KERN_INFO "raid6: allocated %dkB for %s\n", 2125 memory, mdname(mddev)); 2126 2127 if (mddev->degraded == 0) 2128 printk(KERN_INFO "raid6: raid level %d set %s active with %d out of %d" 2129 " devices, algorithm %d\n", conf->level, mdname(mddev), 2130 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 2131 conf->algorithm); 2132 else 2133 printk(KERN_ALERT "raid6: raid level %d set %s active with %d" 2134 " out of %d devices, algorithm %d\n", conf->level, 2135 mdname(mddev), mddev->raid_disks - mddev->degraded, 2136 mddev->raid_disks, conf->algorithm); 2137 2138 print_raid6_conf(conf); 2139 2140 /* read-ahead size must cover two whole stripes, which is 2141 * 2 * (n-2) * chunksize where 'n' is the number of raid devices 2142 */ 2143 { 2144 int stripe = (mddev->raid_disks-2) * mddev->chunk_size 2145 / PAGE_SIZE; 2146 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 2147 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 2148 } 2149 2150 /* Ok, everything is just fine now */ 2151 mddev->array_size = mddev->size * (mddev->raid_disks - 2); 2152 2153 mddev->queue->unplug_fn = raid6_unplug_device; 2154 mddev->queue->issue_flush_fn = raid6_issue_flush; 2155 return 0; 2156abort: 2157 if (conf) { 2158 print_raid6_conf(conf); 2159 safe_put_page(conf->spare_page); 2160 kfree(conf->stripe_hashtbl); 2161 kfree(conf); 2162 } 2163 mddev->private = NULL; 2164 printk(KERN_ALERT "raid6: failed to run raid set %s\n", mdname(mddev)); 2165 return -EIO; 2166} 2167 2168 2169 2170static int stop (mddev_t *mddev) 2171{ 2172 raid6_conf_t *conf = (raid6_conf_t *) mddev->private; 2173 2174 md_unregister_thread(mddev->thread); 2175 mddev->thread = NULL; 2176 shrink_stripes(conf); 2177 kfree(conf->stripe_hashtbl); 2178 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2179 sysfs_remove_group(&mddev->kobj, &raid6_attrs_group); 2180 kfree(conf); 2181 mddev->private = NULL; 2182 return 0; 2183} 2184 2185#if RAID6_DUMPSTATE 2186static void print_sh (struct seq_file *seq, struct stripe_head *sh) 2187{ 2188 int i; 2189 2190 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 2191 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 2192 seq_printf(seq, "sh %llu, count %d.\n", 2193 (unsigned long long)sh->sector, atomic_read(&sh->count)); 2194 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 2195 for (i = 0; i < sh->raid_conf->raid_disks; i++) { 2196 seq_printf(seq, "(cache%d: %p %ld) ", 2197 i, sh->dev[i].page, sh->dev[i].flags); 2198 } 2199 seq_printf(seq, "\n"); 2200} 2201 2202static void printall (struct seq_file *seq, raid6_conf_t *conf) 2203{ 2204 struct stripe_head *sh; 2205 struct hlist_node *hn; 2206 int i; 2207 2208 spin_lock_irq(&conf->device_lock); 2209 for (i = 0; i < NR_HASH; i++) { 2210 sh = conf->stripe_hashtbl[i]; 2211 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 2212 if (sh->raid_conf != conf) 2213 continue; 2214 print_sh(seq, sh); 2215 } 2216 } 2217 spin_unlock_irq(&conf->device_lock); 2218} 2219#endif 2220 2221static void status (struct seq_file *seq, mddev_t *mddev) 2222{ 2223 raid6_conf_t *conf = (raid6_conf_t *) mddev->private; 2224 int i; 2225 2226 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 2227 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks); 2228 for (i = 0; i < conf->raid_disks; i++) 2229 seq_printf (seq, "%s", 2230 conf->disks[i].rdev && 2231 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 2232 seq_printf (seq, "]"); 2233#if RAID6_DUMPSTATE 2234 seq_printf (seq, "\n"); 2235 printall(seq, conf); 2236#endif 2237} 2238 2239static void print_raid6_conf (raid6_conf_t *conf) 2240{ 2241 int i; 2242 struct disk_info *tmp; 2243 2244 printk("RAID6 conf printout:\n"); 2245 if (!conf) { 2246 printk("(conf==NULL)\n"); 2247 return; 2248 } 2249 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks, 2250 conf->working_disks, conf->failed_disks); 2251 2252 for (i = 0; i < conf->raid_disks; i++) { 2253 char b[BDEVNAME_SIZE]; 2254 tmp = conf->disks + i; 2255 if (tmp->rdev) 2256 printk(" disk %d, o:%d, dev:%s\n", 2257 i, !test_bit(Faulty, &tmp->rdev->flags), 2258 bdevname(tmp->rdev->bdev,b)); 2259 } 2260} 2261 2262static int raid6_spare_active(mddev_t *mddev) 2263{ 2264 int i; 2265 raid6_conf_t *conf = mddev->private; 2266 struct disk_info *tmp; 2267 2268 for (i = 0; i < conf->raid_disks; i++) { 2269 tmp = conf->disks + i; 2270 if (tmp->rdev 2271 && !test_bit(Faulty, &tmp->rdev->flags) 2272 && !test_bit(In_sync, &tmp->rdev->flags)) { 2273 mddev->degraded--; 2274 conf->failed_disks--; 2275 conf->working_disks++; 2276 set_bit(In_sync, &tmp->rdev->flags); 2277 } 2278 } 2279 print_raid6_conf(conf); 2280 return 0; 2281} 2282 2283static int raid6_remove_disk(mddev_t *mddev, int number) 2284{ 2285 raid6_conf_t *conf = mddev->private; 2286 int err = 0; 2287 mdk_rdev_t *rdev; 2288 struct disk_info *p = conf->disks + number; 2289 2290 print_raid6_conf(conf); 2291 rdev = p->rdev; 2292 if (rdev) { 2293 if (test_bit(In_sync, &rdev->flags) || 2294 atomic_read(&rdev->nr_pending)) { 2295 err = -EBUSY; 2296 goto abort; 2297 } 2298 p->rdev = NULL; 2299 synchronize_rcu(); 2300 if (atomic_read(&rdev->nr_pending)) { 2301 /* lost the race, try later */ 2302 err = -EBUSY; 2303 p->rdev = rdev; 2304 } 2305 } 2306 2307abort: 2308 2309 print_raid6_conf(conf); 2310 return err; 2311} 2312 2313static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 2314{ 2315 raid6_conf_t *conf = mddev->private; 2316 int found = 0; 2317 int disk; 2318 struct disk_info *p; 2319 2320 if (mddev->degraded > 2) 2321 /* no point adding a device */ 2322 return 0; 2323 /* 2324 * find the disk ... but prefer rdev->saved_raid_disk 2325 * if possible. 2326 */ 2327 if (rdev->saved_raid_disk >= 0 && 2328 conf->disks[rdev->saved_raid_disk].rdev == NULL) 2329 disk = rdev->saved_raid_disk; 2330 else 2331 disk = 0; 2332 for ( ; disk < mddev->raid_disks; disk++) 2333 if ((p=conf->disks + disk)->rdev == NULL) { 2334 clear_bit(In_sync, &rdev->flags); 2335 rdev->raid_disk = disk; 2336 found = 1; 2337 if (rdev->saved_raid_disk != disk) 2338 conf->fullsync = 1; 2339 rcu_assign_pointer(p->rdev, rdev); 2340 break; 2341 } 2342 print_raid6_conf(conf); 2343 return found; 2344} 2345 2346static int raid6_resize(mddev_t *mddev, sector_t sectors) 2347{ 2348 /* no resync is happening, and there is enough space 2349 * on all devices, so we can resize. 2350 * We need to make sure resync covers any new space. 2351 * If the array is shrinking we should possibly wait until 2352 * any io in the removed space completes, but it hardly seems 2353 * worth it. 2354 */ 2355 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 2356 mddev->array_size = (sectors * (mddev->raid_disks-2))>>1; 2357 set_capacity(mddev->gendisk, mddev->array_size << 1); 2358 mddev->changed = 1; 2359 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 2360 mddev->recovery_cp = mddev->size << 1; 2361 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2362 } 2363 mddev->size = sectors /2; 2364 mddev->resync_max_sectors = sectors; 2365 return 0; 2366} 2367 2368static void raid6_quiesce(mddev_t *mddev, int state) 2369{ 2370 raid6_conf_t *conf = mddev_to_conf(mddev); 2371 2372 switch(state) { 2373 case 1: /* stop all writes */ 2374 spin_lock_irq(&conf->device_lock); 2375 conf->quiesce = 1; 2376 wait_event_lock_irq(conf->wait_for_stripe, 2377 atomic_read(&conf->active_stripes) == 0, 2378 conf->device_lock, /* nothing */); 2379 spin_unlock_irq(&conf->device_lock); 2380 break; 2381 2382 case 0: /* re-enable writes */ 2383 spin_lock_irq(&conf->device_lock); 2384 conf->quiesce = 0; 2385 wake_up(&conf->wait_for_stripe); 2386 spin_unlock_irq(&conf->device_lock); 2387 break; 2388 } 2389} 2390 2391static struct mdk_personality raid6_personality = 2392{ 2393 .name = "raid6", 2394 .level = 6, 2395 .owner = THIS_MODULE, 2396 .make_request = make_request, 2397 .run = run, 2398 .stop = stop, 2399 .status = status, 2400 .error_handler = error, 2401 .hot_add_disk = raid6_add_disk, 2402 .hot_remove_disk= raid6_remove_disk, 2403 .spare_active = raid6_spare_active, 2404 .sync_request = sync_request, 2405 .resize = raid6_resize, 2406 .quiesce = raid6_quiesce, 2407}; 2408 2409static int __init raid6_init(void) 2410{ 2411 int e; 2412 2413 e = raid6_select_algo(); 2414 if ( e ) 2415 return e; 2416 2417 return register_md_personality(&raid6_personality); 2418} 2419 2420static void raid6_exit (void) 2421{ 2422 unregister_md_personality(&raid6_personality); 2423} 2424 2425module_init(raid6_init); 2426module_exit(raid6_exit); 2427MODULE_LICENSE("GPL"); 2428MODULE_ALIAS("md-personality-8"); /* RAID6 */ 2429MODULE_ALIAS("md-raid6"); 2430MODULE_ALIAS("md-level-6");