Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.15-rc5 2232 lines 61 kB view raw
1/* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * 6 * RAID-5 management functions. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * You should have received a copy of the GNU General Public License 14 * (for example /usr/src/linux/COPYING); if not, write to the Free 15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 16 */ 17 18 19#include <linux/config.h> 20#include <linux/module.h> 21#include <linux/slab.h> 22#include <linux/raid/raid5.h> 23#include <linux/highmem.h> 24#include <linux/bitops.h> 25#include <asm/atomic.h> 26 27#include <linux/raid/bitmap.h> 28 29/* 30 * Stripe cache 31 */ 32 33#define NR_STRIPES 256 34#define STRIPE_SIZE PAGE_SIZE 35#define STRIPE_SHIFT (PAGE_SHIFT - 9) 36#define STRIPE_SECTORS (STRIPE_SIZE>>9) 37#define IO_THRESHOLD 1 38#define HASH_PAGES 1 39#define HASH_PAGES_ORDER 0 40#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *)) 41#define HASH_MASK (NR_HASH - 1) 42 43#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) 44 45/* bio's attached to a stripe+device for I/O are linked together in bi_sector 46 * order without overlap. There may be several bio's per stripe+device, and 47 * a bio could span several devices. 48 * When walking this list for a particular stripe+device, we must never proceed 49 * beyond a bio that extends past this device, as the next bio might no longer 50 * be valid. 51 * This macro is used to determine the 'next' bio in the list, given the sector 52 * of the current stripe+device 53 */ 54#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 55/* 56 * The following can be used to debug the driver 57 */ 58#define RAID5_DEBUG 0 59#define RAID5_PARANOIA 1 60#if RAID5_PARANOIA && defined(CONFIG_SMP) 61# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 62#else 63# define CHECK_DEVLOCK() 64#endif 65 66#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x))) 67#if RAID5_DEBUG 68#define inline 69#define __inline__ 70#endif 71 72static void print_raid5_conf (raid5_conf_t *conf); 73 74static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 75{ 76 if (atomic_dec_and_test(&sh->count)) { 77 if (!list_empty(&sh->lru)) 78 BUG(); 79 if (atomic_read(&conf->active_stripes)==0) 80 BUG(); 81 if (test_bit(STRIPE_HANDLE, &sh->state)) { 82 if (test_bit(STRIPE_DELAYED, &sh->state)) 83 list_add_tail(&sh->lru, &conf->delayed_list); 84 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 85 conf->seq_write == sh->bm_seq) 86 list_add_tail(&sh->lru, &conf->bitmap_list); 87 else { 88 clear_bit(STRIPE_BIT_DELAY, &sh->state); 89 list_add_tail(&sh->lru, &conf->handle_list); 90 } 91 md_wakeup_thread(conf->mddev->thread); 92 } else { 93 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 94 atomic_dec(&conf->preread_active_stripes); 95 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 96 md_wakeup_thread(conf->mddev->thread); 97 } 98 list_add_tail(&sh->lru, &conf->inactive_list); 99 atomic_dec(&conf->active_stripes); 100 if (!conf->inactive_blocked || 101 atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4)) 102 wake_up(&conf->wait_for_stripe); 103 } 104 } 105} 106static void release_stripe(struct stripe_head *sh) 107{ 108 raid5_conf_t *conf = sh->raid_conf; 109 unsigned long flags; 110 111 spin_lock_irqsave(&conf->device_lock, flags); 112 __release_stripe(conf, sh); 113 spin_unlock_irqrestore(&conf->device_lock, flags); 114} 115 116static void remove_hash(struct stripe_head *sh) 117{ 118 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 119 120 if (sh->hash_pprev) { 121 if (sh->hash_next) 122 sh->hash_next->hash_pprev = sh->hash_pprev; 123 *sh->hash_pprev = sh->hash_next; 124 sh->hash_pprev = NULL; 125 } 126} 127 128static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 129{ 130 struct stripe_head **shp = &stripe_hash(conf, sh->sector); 131 132 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 133 134 CHECK_DEVLOCK(); 135 if ((sh->hash_next = *shp) != NULL) 136 (*shp)->hash_pprev = &sh->hash_next; 137 *shp = sh; 138 sh->hash_pprev = shp; 139} 140 141 142/* find an idle stripe, make sure it is unhashed, and return it. */ 143static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 144{ 145 struct stripe_head *sh = NULL; 146 struct list_head *first; 147 148 CHECK_DEVLOCK(); 149 if (list_empty(&conf->inactive_list)) 150 goto out; 151 first = conf->inactive_list.next; 152 sh = list_entry(first, struct stripe_head, lru); 153 list_del_init(first); 154 remove_hash(sh); 155 atomic_inc(&conf->active_stripes); 156out: 157 return sh; 158} 159 160static void shrink_buffers(struct stripe_head *sh, int num) 161{ 162 struct page *p; 163 int i; 164 165 for (i=0; i<num ; i++) { 166 p = sh->dev[i].page; 167 if (!p) 168 continue; 169 sh->dev[i].page = NULL; 170 page_cache_release(p); 171 } 172} 173 174static int grow_buffers(struct stripe_head *sh, int num) 175{ 176 int i; 177 178 for (i=0; i<num; i++) { 179 struct page *page; 180 181 if (!(page = alloc_page(GFP_KERNEL))) { 182 return 1; 183 } 184 sh->dev[i].page = page; 185 } 186 return 0; 187} 188 189static void raid5_build_block (struct stripe_head *sh, int i); 190 191static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx) 192{ 193 raid5_conf_t *conf = sh->raid_conf; 194 int disks = conf->raid_disks, i; 195 196 if (atomic_read(&sh->count) != 0) 197 BUG(); 198 if (test_bit(STRIPE_HANDLE, &sh->state)) 199 BUG(); 200 201 CHECK_DEVLOCK(); 202 PRINTK("init_stripe called, stripe %llu\n", 203 (unsigned long long)sh->sector); 204 205 remove_hash(sh); 206 207 sh->sector = sector; 208 sh->pd_idx = pd_idx; 209 sh->state = 0; 210 211 for (i=disks; i--; ) { 212 struct r5dev *dev = &sh->dev[i]; 213 214 if (dev->toread || dev->towrite || dev->written || 215 test_bit(R5_LOCKED, &dev->flags)) { 216 printk("sector=%llx i=%d %p %p %p %d\n", 217 (unsigned long long)sh->sector, i, dev->toread, 218 dev->towrite, dev->written, 219 test_bit(R5_LOCKED, &dev->flags)); 220 BUG(); 221 } 222 dev->flags = 0; 223 raid5_build_block(sh, i); 224 } 225 insert_hash(conf, sh); 226} 227 228static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) 229{ 230 struct stripe_head *sh; 231 232 CHECK_DEVLOCK(); 233 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 234 for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) 235 if (sh->sector == sector) 236 return sh; 237 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 238 return NULL; 239} 240 241static void unplug_slaves(mddev_t *mddev); 242static void raid5_unplug_device(request_queue_t *q); 243 244static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, 245 int pd_idx, int noblock) 246{ 247 struct stripe_head *sh; 248 249 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector); 250 251 spin_lock_irq(&conf->device_lock); 252 253 do { 254 wait_event_lock_irq(conf->wait_for_stripe, 255 conf->quiesce == 0, 256 conf->device_lock, /* nothing */); 257 sh = __find_stripe(conf, sector); 258 if (!sh) { 259 if (!conf->inactive_blocked) 260 sh = get_free_stripe(conf); 261 if (noblock && sh == NULL) 262 break; 263 if (!sh) { 264 conf->inactive_blocked = 1; 265 wait_event_lock_irq(conf->wait_for_stripe, 266 !list_empty(&conf->inactive_list) && 267 (atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4) 268 || !conf->inactive_blocked), 269 conf->device_lock, 270 unplug_slaves(conf->mddev); 271 ); 272 conf->inactive_blocked = 0; 273 } else 274 init_stripe(sh, sector, pd_idx); 275 } else { 276 if (atomic_read(&sh->count)) { 277 if (!list_empty(&sh->lru)) 278 BUG(); 279 } else { 280 if (!test_bit(STRIPE_HANDLE, &sh->state)) 281 atomic_inc(&conf->active_stripes); 282 if (list_empty(&sh->lru)) 283 BUG(); 284 list_del_init(&sh->lru); 285 } 286 } 287 } while (sh == NULL); 288 289 if (sh) 290 atomic_inc(&sh->count); 291 292 spin_unlock_irq(&conf->device_lock); 293 return sh; 294} 295 296static int grow_one_stripe(raid5_conf_t *conf) 297{ 298 struct stripe_head *sh; 299 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 300 if (!sh) 301 return 0; 302 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 303 sh->raid_conf = conf; 304 spin_lock_init(&sh->lock); 305 306 if (grow_buffers(sh, conf->raid_disks)) { 307 shrink_buffers(sh, conf->raid_disks); 308 kmem_cache_free(conf->slab_cache, sh); 309 return 0; 310 } 311 /* we just created an active stripe so... */ 312 atomic_set(&sh->count, 1); 313 atomic_inc(&conf->active_stripes); 314 INIT_LIST_HEAD(&sh->lru); 315 release_stripe(sh); 316 return 1; 317} 318 319static int grow_stripes(raid5_conf_t *conf, int num) 320{ 321 kmem_cache_t *sc; 322 int devs = conf->raid_disks; 323 324 sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev)); 325 326 sc = kmem_cache_create(conf->cache_name, 327 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 328 0, 0, NULL, NULL); 329 if (!sc) 330 return 1; 331 conf->slab_cache = sc; 332 while (num--) { 333 if (!grow_one_stripe(conf)) 334 return 1; 335 } 336 return 0; 337} 338 339static int drop_one_stripe(raid5_conf_t *conf) 340{ 341 struct stripe_head *sh; 342 343 spin_lock_irq(&conf->device_lock); 344 sh = get_free_stripe(conf); 345 spin_unlock_irq(&conf->device_lock); 346 if (!sh) 347 return 0; 348 if (atomic_read(&sh->count)) 349 BUG(); 350 shrink_buffers(sh, conf->raid_disks); 351 kmem_cache_free(conf->slab_cache, sh); 352 atomic_dec(&conf->active_stripes); 353 return 1; 354} 355 356static void shrink_stripes(raid5_conf_t *conf) 357{ 358 while (drop_one_stripe(conf)) 359 ; 360 361 kmem_cache_destroy(conf->slab_cache); 362 conf->slab_cache = NULL; 363} 364 365static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, 366 int error) 367{ 368 struct stripe_head *sh = bi->bi_private; 369 raid5_conf_t *conf = sh->raid_conf; 370 int disks = conf->raid_disks, i; 371 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 372 373 if (bi->bi_size) 374 return 1; 375 376 for (i=0 ; i<disks; i++) 377 if (bi == &sh->dev[i].req) 378 break; 379 380 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n", 381 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 382 uptodate); 383 if (i == disks) { 384 BUG(); 385 return 0; 386 } 387 388 if (uptodate) { 389#if 0 390 struct bio *bio; 391 unsigned long flags; 392 spin_lock_irqsave(&conf->device_lock, flags); 393 /* we can return a buffer if we bypassed the cache or 394 * if the top buffer is not in highmem. If there are 395 * multiple buffers, leave the extra work to 396 * handle_stripe 397 */ 398 buffer = sh->bh_read[i]; 399 if (buffer && 400 (!PageHighMem(buffer->b_page) 401 || buffer->b_page == bh->b_page ) 402 ) { 403 sh->bh_read[i] = buffer->b_reqnext; 404 buffer->b_reqnext = NULL; 405 } else 406 buffer = NULL; 407 spin_unlock_irqrestore(&conf->device_lock, flags); 408 if (sh->bh_page[i]==bh->b_page) 409 set_buffer_uptodate(bh); 410 if (buffer) { 411 if (buffer->b_page != bh->b_page) 412 memcpy(buffer->b_data, bh->b_data, bh->b_size); 413 buffer->b_end_io(buffer, 1); 414 } 415#else 416 set_bit(R5_UPTODATE, &sh->dev[i].flags); 417#endif 418 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 419 printk("R5: read error corrected!!\n"); 420 clear_bit(R5_ReadError, &sh->dev[i].flags); 421 clear_bit(R5_ReWrite, &sh->dev[i].flags); 422 } 423 if (atomic_read(&conf->disks[i].rdev->read_errors)) 424 atomic_set(&conf->disks[i].rdev->read_errors, 0); 425 } else { 426 int retry = 0; 427 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 428 atomic_inc(&conf->disks[i].rdev->read_errors); 429 if (conf->mddev->degraded) 430 printk("R5: read error not correctable.\n"); 431 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 432 /* Oh, no!!! */ 433 printk("R5: read error NOT corrected!!\n"); 434 else if (atomic_read(&conf->disks[i].rdev->read_errors) 435 > conf->max_nr_stripes) 436 printk("raid5: Too many read errors, failing device.\n"); 437 else 438 retry = 1; 439 if (retry) 440 set_bit(R5_ReadError, &sh->dev[i].flags); 441 else { 442 clear_bit(R5_ReadError, &sh->dev[i].flags); 443 clear_bit(R5_ReWrite, &sh->dev[i].flags); 444 md_error(conf->mddev, conf->disks[i].rdev); 445 } 446 } 447 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 448#if 0 449 /* must restore b_page before unlocking buffer... */ 450 if (sh->bh_page[i] != bh->b_page) { 451 bh->b_page = sh->bh_page[i]; 452 bh->b_data = page_address(bh->b_page); 453 clear_buffer_uptodate(bh); 454 } 455#endif 456 clear_bit(R5_LOCKED, &sh->dev[i].flags); 457 set_bit(STRIPE_HANDLE, &sh->state); 458 release_stripe(sh); 459 return 0; 460} 461 462static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, 463 int error) 464{ 465 struct stripe_head *sh = bi->bi_private; 466 raid5_conf_t *conf = sh->raid_conf; 467 int disks = conf->raid_disks, i; 468 unsigned long flags; 469 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 470 471 if (bi->bi_size) 472 return 1; 473 474 for (i=0 ; i<disks; i++) 475 if (bi == &sh->dev[i].req) 476 break; 477 478 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n", 479 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 480 uptodate); 481 if (i == disks) { 482 BUG(); 483 return 0; 484 } 485 486 spin_lock_irqsave(&conf->device_lock, flags); 487 if (!uptodate) 488 md_error(conf->mddev, conf->disks[i].rdev); 489 490 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 491 492 clear_bit(R5_LOCKED, &sh->dev[i].flags); 493 set_bit(STRIPE_HANDLE, &sh->state); 494 __release_stripe(conf, sh); 495 spin_unlock_irqrestore(&conf->device_lock, flags); 496 return 0; 497} 498 499 500static sector_t compute_blocknr(struct stripe_head *sh, int i); 501 502static void raid5_build_block (struct stripe_head *sh, int i) 503{ 504 struct r5dev *dev = &sh->dev[i]; 505 506 bio_init(&dev->req); 507 dev->req.bi_io_vec = &dev->vec; 508 dev->req.bi_vcnt++; 509 dev->req.bi_max_vecs++; 510 dev->vec.bv_page = dev->page; 511 dev->vec.bv_len = STRIPE_SIZE; 512 dev->vec.bv_offset = 0; 513 514 dev->req.bi_sector = sh->sector; 515 dev->req.bi_private = sh; 516 517 dev->flags = 0; 518 if (i != sh->pd_idx) 519 dev->sector = compute_blocknr(sh, i); 520} 521 522static void error(mddev_t *mddev, mdk_rdev_t *rdev) 523{ 524 char b[BDEVNAME_SIZE]; 525 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 526 PRINTK("raid5: error called\n"); 527 528 if (!test_bit(Faulty, &rdev->flags)) { 529 mddev->sb_dirty = 1; 530 if (test_bit(In_sync, &rdev->flags)) { 531 conf->working_disks--; 532 mddev->degraded++; 533 conf->failed_disks++; 534 clear_bit(In_sync, &rdev->flags); 535 /* 536 * if recovery was running, make sure it aborts. 537 */ 538 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 539 } 540 set_bit(Faulty, &rdev->flags); 541 printk (KERN_ALERT 542 "raid5: Disk failure on %s, disabling device." 543 " Operation continuing on %d devices\n", 544 bdevname(rdev->bdev,b), conf->working_disks); 545 } 546} 547 548/* 549 * Input: a 'big' sector number, 550 * Output: index of the data and parity disk, and the sector # in them. 551 */ 552static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, 553 unsigned int data_disks, unsigned int * dd_idx, 554 unsigned int * pd_idx, raid5_conf_t *conf) 555{ 556 long stripe; 557 unsigned long chunk_number; 558 unsigned int chunk_offset; 559 sector_t new_sector; 560 int sectors_per_chunk = conf->chunk_size >> 9; 561 562 /* First compute the information on this sector */ 563 564 /* 565 * Compute the chunk number and the sector offset inside the chunk 566 */ 567 chunk_offset = sector_div(r_sector, sectors_per_chunk); 568 chunk_number = r_sector; 569 BUG_ON(r_sector != chunk_number); 570 571 /* 572 * Compute the stripe number 573 */ 574 stripe = chunk_number / data_disks; 575 576 /* 577 * Compute the data disk and parity disk indexes inside the stripe 578 */ 579 *dd_idx = chunk_number % data_disks; 580 581 /* 582 * Select the parity disk based on the user selected algorithm. 583 */ 584 if (conf->level == 4) 585 *pd_idx = data_disks; 586 else switch (conf->algorithm) { 587 case ALGORITHM_LEFT_ASYMMETRIC: 588 *pd_idx = data_disks - stripe % raid_disks; 589 if (*dd_idx >= *pd_idx) 590 (*dd_idx)++; 591 break; 592 case ALGORITHM_RIGHT_ASYMMETRIC: 593 *pd_idx = stripe % raid_disks; 594 if (*dd_idx >= *pd_idx) 595 (*dd_idx)++; 596 break; 597 case ALGORITHM_LEFT_SYMMETRIC: 598 *pd_idx = data_disks - stripe % raid_disks; 599 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 600 break; 601 case ALGORITHM_RIGHT_SYMMETRIC: 602 *pd_idx = stripe % raid_disks; 603 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 604 break; 605 default: 606 printk("raid5: unsupported algorithm %d\n", 607 conf->algorithm); 608 } 609 610 /* 611 * Finally, compute the new sector number 612 */ 613 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 614 return new_sector; 615} 616 617 618static sector_t compute_blocknr(struct stripe_head *sh, int i) 619{ 620 raid5_conf_t *conf = sh->raid_conf; 621 int raid_disks = conf->raid_disks, data_disks = raid_disks - 1; 622 sector_t new_sector = sh->sector, check; 623 int sectors_per_chunk = conf->chunk_size >> 9; 624 sector_t stripe; 625 int chunk_offset; 626 int chunk_number, dummy1, dummy2, dd_idx = i; 627 sector_t r_sector; 628 629 chunk_offset = sector_div(new_sector, sectors_per_chunk); 630 stripe = new_sector; 631 BUG_ON(new_sector != stripe); 632 633 634 switch (conf->algorithm) { 635 case ALGORITHM_LEFT_ASYMMETRIC: 636 case ALGORITHM_RIGHT_ASYMMETRIC: 637 if (i > sh->pd_idx) 638 i--; 639 break; 640 case ALGORITHM_LEFT_SYMMETRIC: 641 case ALGORITHM_RIGHT_SYMMETRIC: 642 if (i < sh->pd_idx) 643 i += raid_disks; 644 i -= (sh->pd_idx + 1); 645 break; 646 default: 647 printk("raid5: unsupported algorithm %d\n", 648 conf->algorithm); 649 } 650 651 chunk_number = stripe * data_disks + i; 652 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 653 654 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 655 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 656 printk("compute_blocknr: map not correct\n"); 657 return 0; 658 } 659 return r_sector; 660} 661 662 663 664/* 665 * Copy data between a page in the stripe cache, and a bio. 666 * There are no alignment or size guarantees between the page or the 667 * bio except that there is some overlap. 668 * All iovecs in the bio must be considered. 669 */ 670static void copy_data(int frombio, struct bio *bio, 671 struct page *page, 672 sector_t sector) 673{ 674 char *pa = page_address(page); 675 struct bio_vec *bvl; 676 int i; 677 int page_offset; 678 679 if (bio->bi_sector >= sector) 680 page_offset = (signed)(bio->bi_sector - sector) * 512; 681 else 682 page_offset = (signed)(sector - bio->bi_sector) * -512; 683 bio_for_each_segment(bvl, bio, i) { 684 int len = bio_iovec_idx(bio,i)->bv_len; 685 int clen; 686 int b_offset = 0; 687 688 if (page_offset < 0) { 689 b_offset = -page_offset; 690 page_offset += b_offset; 691 len -= b_offset; 692 } 693 694 if (len > 0 && page_offset + len > STRIPE_SIZE) 695 clen = STRIPE_SIZE - page_offset; 696 else clen = len; 697 698 if (clen > 0) { 699 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 700 if (frombio) 701 memcpy(pa+page_offset, ba+b_offset, clen); 702 else 703 memcpy(ba+b_offset, pa+page_offset, clen); 704 __bio_kunmap_atomic(ba, KM_USER0); 705 } 706 if (clen < len) /* hit end of page */ 707 break; 708 page_offset += len; 709 } 710} 711 712#define check_xor() do { \ 713 if (count == MAX_XOR_BLOCKS) { \ 714 xor_block(count, STRIPE_SIZE, ptr); \ 715 count = 1; \ 716 } \ 717 } while(0) 718 719 720static void compute_block(struct stripe_head *sh, int dd_idx) 721{ 722 raid5_conf_t *conf = sh->raid_conf; 723 int i, count, disks = conf->raid_disks; 724 void *ptr[MAX_XOR_BLOCKS], *p; 725 726 PRINTK("compute_block, stripe %llu, idx %d\n", 727 (unsigned long long)sh->sector, dd_idx); 728 729 ptr[0] = page_address(sh->dev[dd_idx].page); 730 memset(ptr[0], 0, STRIPE_SIZE); 731 count = 1; 732 for (i = disks ; i--; ) { 733 if (i == dd_idx) 734 continue; 735 p = page_address(sh->dev[i].page); 736 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 737 ptr[count++] = p; 738 else 739 printk("compute_block() %d, stripe %llu, %d" 740 " not present\n", dd_idx, 741 (unsigned long long)sh->sector, i); 742 743 check_xor(); 744 } 745 if (count != 1) 746 xor_block(count, STRIPE_SIZE, ptr); 747 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 748} 749 750static void compute_parity(struct stripe_head *sh, int method) 751{ 752 raid5_conf_t *conf = sh->raid_conf; 753 int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count; 754 void *ptr[MAX_XOR_BLOCKS]; 755 struct bio *chosen; 756 757 PRINTK("compute_parity, stripe %llu, method %d\n", 758 (unsigned long long)sh->sector, method); 759 760 count = 1; 761 ptr[0] = page_address(sh->dev[pd_idx].page); 762 switch(method) { 763 case READ_MODIFY_WRITE: 764 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags)) 765 BUG(); 766 for (i=disks ; i-- ;) { 767 if (i==pd_idx) 768 continue; 769 if (sh->dev[i].towrite && 770 test_bit(R5_UPTODATE, &sh->dev[i].flags)) { 771 ptr[count++] = page_address(sh->dev[i].page); 772 chosen = sh->dev[i].towrite; 773 sh->dev[i].towrite = NULL; 774 775 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 776 wake_up(&conf->wait_for_overlap); 777 778 if (sh->dev[i].written) BUG(); 779 sh->dev[i].written = chosen; 780 check_xor(); 781 } 782 } 783 break; 784 case RECONSTRUCT_WRITE: 785 memset(ptr[0], 0, STRIPE_SIZE); 786 for (i= disks; i-- ;) 787 if (i!=pd_idx && sh->dev[i].towrite) { 788 chosen = sh->dev[i].towrite; 789 sh->dev[i].towrite = NULL; 790 791 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 792 wake_up(&conf->wait_for_overlap); 793 794 if (sh->dev[i].written) BUG(); 795 sh->dev[i].written = chosen; 796 } 797 break; 798 case CHECK_PARITY: 799 break; 800 } 801 if (count>1) { 802 xor_block(count, STRIPE_SIZE, ptr); 803 count = 1; 804 } 805 806 for (i = disks; i--;) 807 if (sh->dev[i].written) { 808 sector_t sector = sh->dev[i].sector; 809 struct bio *wbi = sh->dev[i].written; 810 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 811 copy_data(1, wbi, sh->dev[i].page, sector); 812 wbi = r5_next_bio(wbi, sector); 813 } 814 815 set_bit(R5_LOCKED, &sh->dev[i].flags); 816 set_bit(R5_UPTODATE, &sh->dev[i].flags); 817 } 818 819 switch(method) { 820 case RECONSTRUCT_WRITE: 821 case CHECK_PARITY: 822 for (i=disks; i--;) 823 if (i != pd_idx) { 824 ptr[count++] = page_address(sh->dev[i].page); 825 check_xor(); 826 } 827 break; 828 case READ_MODIFY_WRITE: 829 for (i = disks; i--;) 830 if (sh->dev[i].written) { 831 ptr[count++] = page_address(sh->dev[i].page); 832 check_xor(); 833 } 834 } 835 if (count != 1) 836 xor_block(count, STRIPE_SIZE, ptr); 837 838 if (method != CHECK_PARITY) { 839 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 840 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 841 } else 842 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 843} 844 845/* 846 * Each stripe/dev can have one or more bion attached. 847 * toread/towrite point to the first in a chain. 848 * The bi_next chain must be in order. 849 */ 850static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 851{ 852 struct bio **bip; 853 raid5_conf_t *conf = sh->raid_conf; 854 int firstwrite=0; 855 856 PRINTK("adding bh b#%llu to stripe s#%llu\n", 857 (unsigned long long)bi->bi_sector, 858 (unsigned long long)sh->sector); 859 860 861 spin_lock(&sh->lock); 862 spin_lock_irq(&conf->device_lock); 863 if (forwrite) { 864 bip = &sh->dev[dd_idx].towrite; 865 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 866 firstwrite = 1; 867 } else 868 bip = &sh->dev[dd_idx].toread; 869 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 870 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 871 goto overlap; 872 bip = & (*bip)->bi_next; 873 } 874 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 875 goto overlap; 876 877 if (*bip && bi->bi_next && (*bip) != bi->bi_next) 878 BUG(); 879 if (*bip) 880 bi->bi_next = *bip; 881 *bip = bi; 882 bi->bi_phys_segments ++; 883 spin_unlock_irq(&conf->device_lock); 884 spin_unlock(&sh->lock); 885 886 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n", 887 (unsigned long long)bi->bi_sector, 888 (unsigned long long)sh->sector, dd_idx); 889 890 if (conf->mddev->bitmap && firstwrite) { 891 sh->bm_seq = conf->seq_write; 892 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 893 STRIPE_SECTORS, 0); 894 set_bit(STRIPE_BIT_DELAY, &sh->state); 895 } 896 897 if (forwrite) { 898 /* check if page is covered */ 899 sector_t sector = sh->dev[dd_idx].sector; 900 for (bi=sh->dev[dd_idx].towrite; 901 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 902 bi && bi->bi_sector <= sector; 903 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 904 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 905 sector = bi->bi_sector + (bi->bi_size>>9); 906 } 907 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 908 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 909 } 910 return 1; 911 912 overlap: 913 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 914 spin_unlock_irq(&conf->device_lock); 915 spin_unlock(&sh->lock); 916 return 0; 917} 918 919 920/* 921 * handle_stripe - do things to a stripe. 922 * 923 * We lock the stripe and then examine the state of various bits 924 * to see what needs to be done. 925 * Possible results: 926 * return some read request which now have data 927 * return some write requests which are safely on disc 928 * schedule a read on some buffers 929 * schedule a write of some buffers 930 * return confirmation of parity correctness 931 * 932 * Parity calculations are done inside the stripe lock 933 * buffers are taken off read_list or write_list, and bh_cache buffers 934 * get BH_Lock set before the stripe lock is released. 935 * 936 */ 937 938static void handle_stripe(struct stripe_head *sh) 939{ 940 raid5_conf_t *conf = sh->raid_conf; 941 int disks = conf->raid_disks; 942 struct bio *return_bi= NULL; 943 struct bio *bi; 944 int i; 945 int syncing; 946 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; 947 int non_overwrite = 0; 948 int failed_num=0; 949 struct r5dev *dev; 950 951 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n", 952 (unsigned long long)sh->sector, atomic_read(&sh->count), 953 sh->pd_idx); 954 955 spin_lock(&sh->lock); 956 clear_bit(STRIPE_HANDLE, &sh->state); 957 clear_bit(STRIPE_DELAYED, &sh->state); 958 959 syncing = test_bit(STRIPE_SYNCING, &sh->state); 960 /* Now to look around and see what can be done */ 961 962 for (i=disks; i--; ) { 963 mdk_rdev_t *rdev; 964 dev = &sh->dev[i]; 965 clear_bit(R5_Insync, &dev->flags); 966 clear_bit(R5_Syncio, &dev->flags); 967 968 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 969 i, dev->flags, dev->toread, dev->towrite, dev->written); 970 /* maybe we can reply to a read */ 971 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 972 struct bio *rbi, *rbi2; 973 PRINTK("Return read for disc %d\n", i); 974 spin_lock_irq(&conf->device_lock); 975 rbi = dev->toread; 976 dev->toread = NULL; 977 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 978 wake_up(&conf->wait_for_overlap); 979 spin_unlock_irq(&conf->device_lock); 980 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 981 copy_data(0, rbi, dev->page, dev->sector); 982 rbi2 = r5_next_bio(rbi, dev->sector); 983 spin_lock_irq(&conf->device_lock); 984 if (--rbi->bi_phys_segments == 0) { 985 rbi->bi_next = return_bi; 986 return_bi = rbi; 987 } 988 spin_unlock_irq(&conf->device_lock); 989 rbi = rbi2; 990 } 991 } 992 993 /* now count some things */ 994 if (test_bit(R5_LOCKED, &dev->flags)) locked++; 995 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++; 996 997 998 if (dev->toread) to_read++; 999 if (dev->towrite) { 1000 to_write++; 1001 if (!test_bit(R5_OVERWRITE, &dev->flags)) 1002 non_overwrite++; 1003 } 1004 if (dev->written) written++; 1005 rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ 1006 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 1007 /* The ReadError flag wil just be confusing now */ 1008 clear_bit(R5_ReadError, &dev->flags); 1009 clear_bit(R5_ReWrite, &dev->flags); 1010 } 1011 if (!rdev || !test_bit(In_sync, &rdev->flags) 1012 || test_bit(R5_ReadError, &dev->flags)) { 1013 failed++; 1014 failed_num = i; 1015 } else 1016 set_bit(R5_Insync, &dev->flags); 1017 } 1018 PRINTK("locked=%d uptodate=%d to_read=%d" 1019 " to_write=%d failed=%d failed_num=%d\n", 1020 locked, uptodate, to_read, to_write, failed, failed_num); 1021 /* check if the array has lost two devices and, if so, some requests might 1022 * need to be failed 1023 */ 1024 if (failed > 1 && to_read+to_write+written) { 1025 for (i=disks; i--; ) { 1026 int bitmap_end = 0; 1027 1028 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1029 mdk_rdev_t *rdev = conf->disks[i].rdev; 1030 if (rdev && test_bit(In_sync, &rdev->flags)) 1031 /* multiple read failures in one stripe */ 1032 md_error(conf->mddev, rdev); 1033 } 1034 1035 spin_lock_irq(&conf->device_lock); 1036 /* fail all writes first */ 1037 bi = sh->dev[i].towrite; 1038 sh->dev[i].towrite = NULL; 1039 if (bi) { to_write--; bitmap_end = 1; } 1040 1041 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1042 wake_up(&conf->wait_for_overlap); 1043 1044 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1045 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1046 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1047 if (--bi->bi_phys_segments == 0) { 1048 md_write_end(conf->mddev); 1049 bi->bi_next = return_bi; 1050 return_bi = bi; 1051 } 1052 bi = nextbi; 1053 } 1054 /* and fail all 'written' */ 1055 bi = sh->dev[i].written; 1056 sh->dev[i].written = NULL; 1057 if (bi) bitmap_end = 1; 1058 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { 1059 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1060 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1061 if (--bi->bi_phys_segments == 0) { 1062 md_write_end(conf->mddev); 1063 bi->bi_next = return_bi; 1064 return_bi = bi; 1065 } 1066 bi = bi2; 1067 } 1068 1069 /* fail any reads if this device is non-operational */ 1070 if (!test_bit(R5_Insync, &sh->dev[i].flags) || 1071 test_bit(R5_ReadError, &sh->dev[i].flags)) { 1072 bi = sh->dev[i].toread; 1073 sh->dev[i].toread = NULL; 1074 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1075 wake_up(&conf->wait_for_overlap); 1076 if (bi) to_read--; 1077 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){ 1078 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1079 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1080 if (--bi->bi_phys_segments == 0) { 1081 bi->bi_next = return_bi; 1082 return_bi = bi; 1083 } 1084 bi = nextbi; 1085 } 1086 } 1087 spin_unlock_irq(&conf->device_lock); 1088 if (bitmap_end) 1089 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1090 STRIPE_SECTORS, 0, 0); 1091 } 1092 } 1093 if (failed > 1 && syncing) { 1094 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 1095 clear_bit(STRIPE_SYNCING, &sh->state); 1096 syncing = 0; 1097 } 1098 1099 /* might be able to return some write requests if the parity block 1100 * is safe, or on a failed drive 1101 */ 1102 dev = &sh->dev[sh->pd_idx]; 1103 if ( written && 1104 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) && 1105 test_bit(R5_UPTODATE, &dev->flags)) 1106 || (failed == 1 && failed_num == sh->pd_idx)) 1107 ) { 1108 /* any written block on an uptodate or failed drive can be returned. 1109 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 1110 * never LOCKED, so we don't need to test 'failed' directly. 1111 */ 1112 for (i=disks; i--; ) 1113 if (sh->dev[i].written) { 1114 dev = &sh->dev[i]; 1115 if (!test_bit(R5_LOCKED, &dev->flags) && 1116 test_bit(R5_UPTODATE, &dev->flags) ) { 1117 /* We can return any write requests */ 1118 struct bio *wbi, *wbi2; 1119 int bitmap_end = 0; 1120 PRINTK("Return write for disc %d\n", i); 1121 spin_lock_irq(&conf->device_lock); 1122 wbi = dev->written; 1123 dev->written = NULL; 1124 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { 1125 wbi2 = r5_next_bio(wbi, dev->sector); 1126 if (--wbi->bi_phys_segments == 0) { 1127 md_write_end(conf->mddev); 1128 wbi->bi_next = return_bi; 1129 return_bi = wbi; 1130 } 1131 wbi = wbi2; 1132 } 1133 if (dev->towrite == NULL) 1134 bitmap_end = 1; 1135 spin_unlock_irq(&conf->device_lock); 1136 if (bitmap_end) 1137 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1138 STRIPE_SECTORS, 1139 !test_bit(STRIPE_DEGRADED, &sh->state), 0); 1140 } 1141 } 1142 } 1143 1144 /* Now we might consider reading some blocks, either to check/generate 1145 * parity, or to satisfy requests 1146 * or to load a block that is being partially written. 1147 */ 1148 if (to_read || non_overwrite || (syncing && (uptodate < disks))) { 1149 for (i=disks; i--;) { 1150 dev = &sh->dev[i]; 1151 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1152 (dev->toread || 1153 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1154 syncing || 1155 (failed && (sh->dev[failed_num].toread || 1156 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags)))) 1157 ) 1158 ) { 1159 /* we would like to get this block, possibly 1160 * by computing it, but we might not be able to 1161 */ 1162 if (uptodate == disks-1) { 1163 PRINTK("Computing block %d\n", i); 1164 compute_block(sh, i); 1165 uptodate++; 1166 } else if (test_bit(R5_Insync, &dev->flags)) { 1167 set_bit(R5_LOCKED, &dev->flags); 1168 set_bit(R5_Wantread, &dev->flags); 1169#if 0 1170 /* if I am just reading this block and we don't have 1171 a failed drive, or any pending writes then sidestep the cache */ 1172 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext && 1173 ! syncing && !failed && !to_write) { 1174 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page; 1175 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data; 1176 } 1177#endif 1178 locked++; 1179 PRINTK("Reading block %d (sync=%d)\n", 1180 i, syncing); 1181 if (syncing) 1182 md_sync_acct(conf->disks[i].rdev->bdev, 1183 STRIPE_SECTORS); 1184 } 1185 } 1186 } 1187 set_bit(STRIPE_HANDLE, &sh->state); 1188 } 1189 1190 /* now to consider writing and what else, if anything should be read */ 1191 if (to_write) { 1192 int rmw=0, rcw=0; 1193 for (i=disks ; i--;) { 1194 /* would I have to read this buffer for read_modify_write */ 1195 dev = &sh->dev[i]; 1196 if ((dev->towrite || i == sh->pd_idx) && 1197 (!test_bit(R5_LOCKED, &dev->flags) 1198#if 0 1199|| sh->bh_page[i]!=bh->b_page 1200#endif 1201 ) && 1202 !test_bit(R5_UPTODATE, &dev->flags)) { 1203 if (test_bit(R5_Insync, &dev->flags) 1204/* && !(!mddev->insync && i == sh->pd_idx) */ 1205 ) 1206 rmw++; 1207 else rmw += 2*disks; /* cannot read it */ 1208 } 1209 /* Would I have to read this buffer for reconstruct_write */ 1210 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 1211 (!test_bit(R5_LOCKED, &dev->flags) 1212#if 0 1213|| sh->bh_page[i] != bh->b_page 1214#endif 1215 ) && 1216 !test_bit(R5_UPTODATE, &dev->flags)) { 1217 if (test_bit(R5_Insync, &dev->flags)) rcw++; 1218 else rcw += 2*disks; 1219 } 1220 } 1221 PRINTK("for sector %llu, rmw=%d rcw=%d\n", 1222 (unsigned long long)sh->sector, rmw, rcw); 1223 set_bit(STRIPE_HANDLE, &sh->state); 1224 if (rmw < rcw && rmw > 0) 1225 /* prefer read-modify-write, but need to get some data */ 1226 for (i=disks; i--;) { 1227 dev = &sh->dev[i]; 1228 if ((dev->towrite || i == sh->pd_idx) && 1229 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1230 test_bit(R5_Insync, &dev->flags)) { 1231 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1232 { 1233 PRINTK("Read_old block %d for r-m-w\n", i); 1234 set_bit(R5_LOCKED, &dev->flags); 1235 set_bit(R5_Wantread, &dev->flags); 1236 locked++; 1237 } else { 1238 set_bit(STRIPE_DELAYED, &sh->state); 1239 set_bit(STRIPE_HANDLE, &sh->state); 1240 } 1241 } 1242 } 1243 if (rcw <= rmw && rcw > 0) 1244 /* want reconstruct write, but need to get some data */ 1245 for (i=disks; i--;) { 1246 dev = &sh->dev[i]; 1247 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 1248 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && 1249 test_bit(R5_Insync, &dev->flags)) { 1250 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1251 { 1252 PRINTK("Read_old block %d for Reconstruct\n", i); 1253 set_bit(R5_LOCKED, &dev->flags); 1254 set_bit(R5_Wantread, &dev->flags); 1255 locked++; 1256 } else { 1257 set_bit(STRIPE_DELAYED, &sh->state); 1258 set_bit(STRIPE_HANDLE, &sh->state); 1259 } 1260 } 1261 } 1262 /* now if nothing is locked, and if we have enough data, we can start a write request */ 1263 if (locked == 0 && (rcw == 0 ||rmw == 0) && 1264 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 1265 PRINTK("Computing parity...\n"); 1266 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE); 1267 /* now every locked buffer is ready to be written */ 1268 for (i=disks; i--;) 1269 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 1270 PRINTK("Writing block %d\n", i); 1271 locked++; 1272 set_bit(R5_Wantwrite, &sh->dev[i].flags); 1273 if (!test_bit(R5_Insync, &sh->dev[i].flags) 1274 || (i==sh->pd_idx && failed == 0)) 1275 set_bit(STRIPE_INSYNC, &sh->state); 1276 } 1277 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 1278 atomic_dec(&conf->preread_active_stripes); 1279 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 1280 md_wakeup_thread(conf->mddev->thread); 1281 } 1282 } 1283 } 1284 1285 /* maybe we need to check and possibly fix the parity for this stripe 1286 * Any reads will already have been scheduled, so we just see if enough data 1287 * is available 1288 */ 1289 if (syncing && locked == 0 && 1290 !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) { 1291 set_bit(STRIPE_HANDLE, &sh->state); 1292 if (failed == 0) { 1293 char *pagea; 1294 if (uptodate != disks) 1295 BUG(); 1296 compute_parity(sh, CHECK_PARITY); 1297 uptodate--; 1298 pagea = page_address(sh->dev[sh->pd_idx].page); 1299 if ((*(u32*)pagea) == 0 && 1300 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) { 1301 /* parity is correct (on disc, not in buffer any more) */ 1302 set_bit(STRIPE_INSYNC, &sh->state); 1303 } else { 1304 conf->mddev->resync_mismatches += STRIPE_SECTORS; 1305 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 1306 /* don't try to repair!! */ 1307 set_bit(STRIPE_INSYNC, &sh->state); 1308 } 1309 } 1310 if (!test_bit(STRIPE_INSYNC, &sh->state)) { 1311 if (failed==0) 1312 failed_num = sh->pd_idx; 1313 /* should be able to compute the missing block and write it to spare */ 1314 if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) { 1315 if (uptodate+1 != disks) 1316 BUG(); 1317 compute_block(sh, failed_num); 1318 uptodate++; 1319 } 1320 if (uptodate != disks) 1321 BUG(); 1322 dev = &sh->dev[failed_num]; 1323 set_bit(R5_LOCKED, &dev->flags); 1324 set_bit(R5_Wantwrite, &dev->flags); 1325 clear_bit(STRIPE_DEGRADED, &sh->state); 1326 locked++; 1327 set_bit(STRIPE_INSYNC, &sh->state); 1328 set_bit(R5_Syncio, &dev->flags); 1329 } 1330 } 1331 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 1332 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 1333 clear_bit(STRIPE_SYNCING, &sh->state); 1334 } 1335 1336 /* If the failed drive is just a ReadError, then we might need to progress 1337 * the repair/check process 1338 */ 1339 if (failed == 1 && ! conf->mddev->ro && 1340 test_bit(R5_ReadError, &sh->dev[failed_num].flags) 1341 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags) 1342 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags) 1343 ) { 1344 dev = &sh->dev[failed_num]; 1345 if (!test_bit(R5_ReWrite, &dev->flags)) { 1346 set_bit(R5_Wantwrite, &dev->flags); 1347 set_bit(R5_ReWrite, &dev->flags); 1348 set_bit(R5_LOCKED, &dev->flags); 1349 } else { 1350 /* let's read it back */ 1351 set_bit(R5_Wantread, &dev->flags); 1352 set_bit(R5_LOCKED, &dev->flags); 1353 } 1354 } 1355 1356 spin_unlock(&sh->lock); 1357 1358 while ((bi=return_bi)) { 1359 int bytes = bi->bi_size; 1360 1361 return_bi = bi->bi_next; 1362 bi->bi_next = NULL; 1363 bi->bi_size = 0; 1364 bi->bi_end_io(bi, bytes, 0); 1365 } 1366 for (i=disks; i-- ;) { 1367 int rw; 1368 struct bio *bi; 1369 mdk_rdev_t *rdev; 1370 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 1371 rw = 1; 1372 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 1373 rw = 0; 1374 else 1375 continue; 1376 1377 bi = &sh->dev[i].req; 1378 1379 bi->bi_rw = rw; 1380 if (rw) 1381 bi->bi_end_io = raid5_end_write_request; 1382 else 1383 bi->bi_end_io = raid5_end_read_request; 1384 1385 rcu_read_lock(); 1386 rdev = rcu_dereference(conf->disks[i].rdev); 1387 if (rdev && test_bit(Faulty, &rdev->flags)) 1388 rdev = NULL; 1389 if (rdev) 1390 atomic_inc(&rdev->nr_pending); 1391 rcu_read_unlock(); 1392 1393 if (rdev) { 1394 if (test_bit(R5_Syncio, &sh->dev[i].flags)) 1395 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1396 1397 bi->bi_bdev = rdev->bdev; 1398 PRINTK("for %llu schedule op %ld on disc %d\n", 1399 (unsigned long long)sh->sector, bi->bi_rw, i); 1400 atomic_inc(&sh->count); 1401 bi->bi_sector = sh->sector + rdev->data_offset; 1402 bi->bi_flags = 1 << BIO_UPTODATE; 1403 bi->bi_vcnt = 1; 1404 bi->bi_max_vecs = 1; 1405 bi->bi_idx = 0; 1406 bi->bi_io_vec = &sh->dev[i].vec; 1407 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1408 bi->bi_io_vec[0].bv_offset = 0; 1409 bi->bi_size = STRIPE_SIZE; 1410 bi->bi_next = NULL; 1411 generic_make_request(bi); 1412 } else { 1413 if (rw == 1) 1414 set_bit(STRIPE_DEGRADED, &sh->state); 1415 PRINTK("skip op %ld on disc %d for sector %llu\n", 1416 bi->bi_rw, i, (unsigned long long)sh->sector); 1417 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1418 set_bit(STRIPE_HANDLE, &sh->state); 1419 } 1420 } 1421} 1422 1423static inline void raid5_activate_delayed(raid5_conf_t *conf) 1424{ 1425 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 1426 while (!list_empty(&conf->delayed_list)) { 1427 struct list_head *l = conf->delayed_list.next; 1428 struct stripe_head *sh; 1429 sh = list_entry(l, struct stripe_head, lru); 1430 list_del_init(l); 1431 clear_bit(STRIPE_DELAYED, &sh->state); 1432 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 1433 atomic_inc(&conf->preread_active_stripes); 1434 list_add_tail(&sh->lru, &conf->handle_list); 1435 } 1436 } 1437} 1438 1439static inline void activate_bit_delay(raid5_conf_t *conf) 1440{ 1441 /* device_lock is held */ 1442 struct list_head head; 1443 list_add(&head, &conf->bitmap_list); 1444 list_del_init(&conf->bitmap_list); 1445 while (!list_empty(&head)) { 1446 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 1447 list_del_init(&sh->lru); 1448 atomic_inc(&sh->count); 1449 __release_stripe(conf, sh); 1450 } 1451} 1452 1453static void unplug_slaves(mddev_t *mddev) 1454{ 1455 raid5_conf_t *conf = mddev_to_conf(mddev); 1456 int i; 1457 1458 rcu_read_lock(); 1459 for (i=0; i<mddev->raid_disks; i++) { 1460 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 1461 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 1462 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 1463 1464 atomic_inc(&rdev->nr_pending); 1465 rcu_read_unlock(); 1466 1467 if (r_queue->unplug_fn) 1468 r_queue->unplug_fn(r_queue); 1469 1470 rdev_dec_pending(rdev, mddev); 1471 rcu_read_lock(); 1472 } 1473 } 1474 rcu_read_unlock(); 1475} 1476 1477static void raid5_unplug_device(request_queue_t *q) 1478{ 1479 mddev_t *mddev = q->queuedata; 1480 raid5_conf_t *conf = mddev_to_conf(mddev); 1481 unsigned long flags; 1482 1483 spin_lock_irqsave(&conf->device_lock, flags); 1484 1485 if (blk_remove_plug(q)) { 1486 conf->seq_flush++; 1487 raid5_activate_delayed(conf); 1488 } 1489 md_wakeup_thread(mddev->thread); 1490 1491 spin_unlock_irqrestore(&conf->device_lock, flags); 1492 1493 unplug_slaves(mddev); 1494} 1495 1496static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, 1497 sector_t *error_sector) 1498{ 1499 mddev_t *mddev = q->queuedata; 1500 raid5_conf_t *conf = mddev_to_conf(mddev); 1501 int i, ret = 0; 1502 1503 rcu_read_lock(); 1504 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 1505 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 1506 if (rdev && !test_bit(Faulty, &rdev->flags)) { 1507 struct block_device *bdev = rdev->bdev; 1508 request_queue_t *r_queue = bdev_get_queue(bdev); 1509 1510 if (!r_queue->issue_flush_fn) 1511 ret = -EOPNOTSUPP; 1512 else { 1513 atomic_inc(&rdev->nr_pending); 1514 rcu_read_unlock(); 1515 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, 1516 error_sector); 1517 rdev_dec_pending(rdev, mddev); 1518 rcu_read_lock(); 1519 } 1520 } 1521 } 1522 rcu_read_unlock(); 1523 return ret; 1524} 1525 1526static inline void raid5_plug_device(raid5_conf_t *conf) 1527{ 1528 spin_lock_irq(&conf->device_lock); 1529 blk_plug_device(conf->mddev->queue); 1530 spin_unlock_irq(&conf->device_lock); 1531} 1532 1533static int make_request (request_queue_t *q, struct bio * bi) 1534{ 1535 mddev_t *mddev = q->queuedata; 1536 raid5_conf_t *conf = mddev_to_conf(mddev); 1537 const unsigned int raid_disks = conf->raid_disks; 1538 const unsigned int data_disks = raid_disks - 1; 1539 unsigned int dd_idx, pd_idx; 1540 sector_t new_sector; 1541 sector_t logical_sector, last_sector; 1542 struct stripe_head *sh; 1543 const int rw = bio_data_dir(bi); 1544 1545 if (unlikely(bio_barrier(bi))) { 1546 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 1547 return 0; 1548 } 1549 1550 md_write_start(mddev, bi); 1551 1552 disk_stat_inc(mddev->gendisk, ios[rw]); 1553 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 1554 1555 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 1556 last_sector = bi->bi_sector + (bi->bi_size>>9); 1557 bi->bi_next = NULL; 1558 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 1559 1560 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 1561 DEFINE_WAIT(w); 1562 1563 new_sector = raid5_compute_sector(logical_sector, 1564 raid_disks, data_disks, &dd_idx, &pd_idx, conf); 1565 1566 PRINTK("raid5: make_request, sector %llu logical %llu\n", 1567 (unsigned long long)new_sector, 1568 (unsigned long long)logical_sector); 1569 1570 retry: 1571 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 1572 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK)); 1573 if (sh) { 1574 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 1575 /* Add failed due to overlap. Flush everything 1576 * and wait a while 1577 */ 1578 raid5_unplug_device(mddev->queue); 1579 release_stripe(sh); 1580 schedule(); 1581 goto retry; 1582 } 1583 finish_wait(&conf->wait_for_overlap, &w); 1584 raid5_plug_device(conf); 1585 handle_stripe(sh); 1586 release_stripe(sh); 1587 1588 } else { 1589 /* cannot get stripe for read-ahead, just give-up */ 1590 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1591 finish_wait(&conf->wait_for_overlap, &w); 1592 break; 1593 } 1594 1595 } 1596 spin_lock_irq(&conf->device_lock); 1597 if (--bi->bi_phys_segments == 0) { 1598 int bytes = bi->bi_size; 1599 1600 if ( bio_data_dir(bi) == WRITE ) 1601 md_write_end(mddev); 1602 bi->bi_size = 0; 1603 bi->bi_end_io(bi, bytes, 0); 1604 } 1605 spin_unlock_irq(&conf->device_lock); 1606 return 0; 1607} 1608 1609/* FIXME go_faster isn't used */ 1610static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1611{ 1612 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1613 struct stripe_head *sh; 1614 int sectors_per_chunk = conf->chunk_size >> 9; 1615 sector_t x; 1616 unsigned long stripe; 1617 int chunk_offset; 1618 int dd_idx, pd_idx; 1619 sector_t first_sector; 1620 int raid_disks = conf->raid_disks; 1621 int data_disks = raid_disks-1; 1622 sector_t max_sector = mddev->size << 1; 1623 int sync_blocks; 1624 1625 if (sector_nr >= max_sector) { 1626 /* just being told to finish up .. nothing much to do */ 1627 unplug_slaves(mddev); 1628 1629 if (mddev->curr_resync < max_sector) /* aborted */ 1630 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 1631 &sync_blocks, 1); 1632 else /* compelted sync */ 1633 conf->fullsync = 0; 1634 bitmap_close_sync(mddev->bitmap); 1635 1636 return 0; 1637 } 1638 /* if there is 1 or more failed drives and we are trying 1639 * to resync, then assert that we are finished, because there is 1640 * nothing we can do. 1641 */ 1642 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 1643 sector_t rv = (mddev->size << 1) - sector_nr; 1644 *skipped = 1; 1645 return rv; 1646 } 1647 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1648 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 1649 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 1650 /* we can skip this block, and probably more */ 1651 sync_blocks /= STRIPE_SECTORS; 1652 *skipped = 1; 1653 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 1654 } 1655 1656 x = sector_nr; 1657 chunk_offset = sector_div(x, sectors_per_chunk); 1658 stripe = x; 1659 BUG_ON(x != stripe); 1660 1661 first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk 1662 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf); 1663 sh = get_active_stripe(conf, sector_nr, pd_idx, 1); 1664 if (sh == NULL) { 1665 sh = get_active_stripe(conf, sector_nr, pd_idx, 0); 1666 /* make sure we don't swamp the stripe cache if someone else 1667 * is trying to get access 1668 */ 1669 schedule_timeout_uninterruptible(1); 1670 } 1671 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0); 1672 spin_lock(&sh->lock); 1673 set_bit(STRIPE_SYNCING, &sh->state); 1674 clear_bit(STRIPE_INSYNC, &sh->state); 1675 spin_unlock(&sh->lock); 1676 1677 handle_stripe(sh); 1678 release_stripe(sh); 1679 1680 return STRIPE_SECTORS; 1681} 1682 1683/* 1684 * This is our raid5 kernel thread. 1685 * 1686 * We scan the hash table for stripes which can be handled now. 1687 * During the scan, completed stripes are saved for us by the interrupt 1688 * handler, so that they will not have to wait for our next wakeup. 1689 */ 1690static void raid5d (mddev_t *mddev) 1691{ 1692 struct stripe_head *sh; 1693 raid5_conf_t *conf = mddev_to_conf(mddev); 1694 int handled; 1695 1696 PRINTK("+++ raid5d active\n"); 1697 1698 md_check_recovery(mddev); 1699 1700 handled = 0; 1701 spin_lock_irq(&conf->device_lock); 1702 while (1) { 1703 struct list_head *first; 1704 1705 if (conf->seq_flush - conf->seq_write > 0) { 1706 int seq = conf->seq_flush; 1707 spin_unlock_irq(&conf->device_lock); 1708 bitmap_unplug(mddev->bitmap); 1709 spin_lock_irq(&conf->device_lock); 1710 conf->seq_write = seq; 1711 activate_bit_delay(conf); 1712 } 1713 1714 if (list_empty(&conf->handle_list) && 1715 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && 1716 !blk_queue_plugged(mddev->queue) && 1717 !list_empty(&conf->delayed_list)) 1718 raid5_activate_delayed(conf); 1719 1720 if (list_empty(&conf->handle_list)) 1721 break; 1722 1723 first = conf->handle_list.next; 1724 sh = list_entry(first, struct stripe_head, lru); 1725 1726 list_del_init(first); 1727 atomic_inc(&sh->count); 1728 if (atomic_read(&sh->count)!= 1) 1729 BUG(); 1730 spin_unlock_irq(&conf->device_lock); 1731 1732 handled++; 1733 handle_stripe(sh); 1734 release_stripe(sh); 1735 1736 spin_lock_irq(&conf->device_lock); 1737 } 1738 PRINTK("%d stripes handled\n", handled); 1739 1740 spin_unlock_irq(&conf->device_lock); 1741 1742 unplug_slaves(mddev); 1743 1744 PRINTK("--- raid5d inactive\n"); 1745} 1746 1747static ssize_t 1748raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 1749{ 1750 raid5_conf_t *conf = mddev_to_conf(mddev); 1751 if (conf) 1752 return sprintf(page, "%d\n", conf->max_nr_stripes); 1753 else 1754 return 0; 1755} 1756 1757static ssize_t 1758raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 1759{ 1760 raid5_conf_t *conf = mddev_to_conf(mddev); 1761 char *end; 1762 int new; 1763 if (len >= PAGE_SIZE) 1764 return -EINVAL; 1765 if (!conf) 1766 return -ENODEV; 1767 1768 new = simple_strtoul(page, &end, 10); 1769 if (!*page || (*end && *end != '\n') ) 1770 return -EINVAL; 1771 if (new <= 16 || new > 32768) 1772 return -EINVAL; 1773 while (new < conf->max_nr_stripes) { 1774 if (drop_one_stripe(conf)) 1775 conf->max_nr_stripes--; 1776 else 1777 break; 1778 } 1779 while (new > conf->max_nr_stripes) { 1780 if (grow_one_stripe(conf)) 1781 conf->max_nr_stripes++; 1782 else break; 1783 } 1784 return len; 1785} 1786 1787static struct md_sysfs_entry 1788raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 1789 raid5_show_stripe_cache_size, 1790 raid5_store_stripe_cache_size); 1791 1792static ssize_t 1793stripe_cache_active_show(mddev_t *mddev, char *page) 1794{ 1795 raid5_conf_t *conf = mddev_to_conf(mddev); 1796 if (conf) 1797 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 1798 else 1799 return 0; 1800} 1801 1802static struct md_sysfs_entry 1803raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 1804 1805static struct attribute *raid5_attrs[] = { 1806 &raid5_stripecache_size.attr, 1807 &raid5_stripecache_active.attr, 1808 NULL, 1809}; 1810static struct attribute_group raid5_attrs_group = { 1811 .name = NULL, 1812 .attrs = raid5_attrs, 1813}; 1814 1815static int run(mddev_t *mddev) 1816{ 1817 raid5_conf_t *conf; 1818 int raid_disk, memory; 1819 mdk_rdev_t *rdev; 1820 struct disk_info *disk; 1821 struct list_head *tmp; 1822 1823 if (mddev->level != 5 && mddev->level != 4) { 1824 printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level); 1825 return -EIO; 1826 } 1827 1828 mddev->private = kmalloc (sizeof (raid5_conf_t) 1829 + mddev->raid_disks * sizeof(struct disk_info), 1830 GFP_KERNEL); 1831 if ((conf = mddev->private) == NULL) 1832 goto abort; 1833 memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) ); 1834 conf->mddev = mddev; 1835 1836 if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) 1837 goto abort; 1838 memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE); 1839 1840 spin_lock_init(&conf->device_lock); 1841 init_waitqueue_head(&conf->wait_for_stripe); 1842 init_waitqueue_head(&conf->wait_for_overlap); 1843 INIT_LIST_HEAD(&conf->handle_list); 1844 INIT_LIST_HEAD(&conf->delayed_list); 1845 INIT_LIST_HEAD(&conf->bitmap_list); 1846 INIT_LIST_HEAD(&conf->inactive_list); 1847 atomic_set(&conf->active_stripes, 0); 1848 atomic_set(&conf->preread_active_stripes, 0); 1849 1850 PRINTK("raid5: run(%s) called.\n", mdname(mddev)); 1851 1852 ITERATE_RDEV(mddev,rdev,tmp) { 1853 raid_disk = rdev->raid_disk; 1854 if (raid_disk >= mddev->raid_disks 1855 || raid_disk < 0) 1856 continue; 1857 disk = conf->disks + raid_disk; 1858 1859 disk->rdev = rdev; 1860 1861 if (test_bit(In_sync, &rdev->flags)) { 1862 char b[BDEVNAME_SIZE]; 1863 printk(KERN_INFO "raid5: device %s operational as raid" 1864 " disk %d\n", bdevname(rdev->bdev,b), 1865 raid_disk); 1866 conf->working_disks++; 1867 } 1868 } 1869 1870 conf->raid_disks = mddev->raid_disks; 1871 /* 1872 * 0 for a fully functional array, 1 for a degraded array. 1873 */ 1874 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; 1875 conf->mddev = mddev; 1876 conf->chunk_size = mddev->chunk_size; 1877 conf->level = mddev->level; 1878 conf->algorithm = mddev->layout; 1879 conf->max_nr_stripes = NR_STRIPES; 1880 1881 /* device size must be a multiple of chunk size */ 1882 mddev->size &= ~(mddev->chunk_size/1024 -1); 1883 mddev->resync_max_sectors = mddev->size << 1; 1884 1885 if (!conf->chunk_size || conf->chunk_size % 4) { 1886 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 1887 conf->chunk_size, mdname(mddev)); 1888 goto abort; 1889 } 1890 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { 1891 printk(KERN_ERR 1892 "raid5: unsupported parity algorithm %d for %s\n", 1893 conf->algorithm, mdname(mddev)); 1894 goto abort; 1895 } 1896 if (mddev->degraded > 1) { 1897 printk(KERN_ERR "raid5: not enough operational devices for %s" 1898 " (%d/%d failed)\n", 1899 mdname(mddev), conf->failed_disks, conf->raid_disks); 1900 goto abort; 1901 } 1902 1903 if (mddev->degraded == 1 && 1904 mddev->recovery_cp != MaxSector) { 1905 printk(KERN_ERR 1906 "raid5: cannot start dirty degraded array for %s\n", 1907 mdname(mddev)); 1908 goto abort; 1909 } 1910 1911 { 1912 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 1913 if (!mddev->thread) { 1914 printk(KERN_ERR 1915 "raid5: couldn't allocate thread for %s\n", 1916 mdname(mddev)); 1917 goto abort; 1918 } 1919 } 1920memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 1921 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 1922 if (grow_stripes(conf, conf->max_nr_stripes)) { 1923 printk(KERN_ERR 1924 "raid5: couldn't allocate %dkB for buffers\n", memory); 1925 shrink_stripes(conf); 1926 md_unregister_thread(mddev->thread); 1927 goto abort; 1928 } else 1929 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 1930 memory, mdname(mddev)); 1931 1932 if (mddev->degraded == 0) 1933 printk("raid5: raid level %d set %s active with %d out of %d" 1934 " devices, algorithm %d\n", conf->level, mdname(mddev), 1935 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 1936 conf->algorithm); 1937 else 1938 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 1939 " out of %d devices, algorithm %d\n", conf->level, 1940 mdname(mddev), mddev->raid_disks - mddev->degraded, 1941 mddev->raid_disks, conf->algorithm); 1942 1943 print_raid5_conf(conf); 1944 1945 /* read-ahead size must cover two whole stripes, which is 1946 * 2 * (n-1) * chunksize where 'n' is the number of raid devices 1947 */ 1948 { 1949 int stripe = (mddev->raid_disks-1) * mddev->chunk_size 1950 / PAGE_CACHE_SIZE; 1951 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 1952 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 1953 } 1954 1955 /* Ok, everything is just fine now */ 1956 sysfs_create_group(&mddev->kobj, &raid5_attrs_group); 1957 1958 if (mddev->bitmap) 1959 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; 1960 1961 mddev->queue->unplug_fn = raid5_unplug_device; 1962 mddev->queue->issue_flush_fn = raid5_issue_flush; 1963 1964 mddev->array_size = mddev->size * (mddev->raid_disks - 1); 1965 return 0; 1966abort: 1967 if (conf) { 1968 print_raid5_conf(conf); 1969 if (conf->stripe_hashtbl) 1970 free_pages((unsigned long) conf->stripe_hashtbl, 1971 HASH_PAGES_ORDER); 1972 kfree(conf); 1973 } 1974 mddev->private = NULL; 1975 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 1976 return -EIO; 1977} 1978 1979 1980 1981static int stop(mddev_t *mddev) 1982{ 1983 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1984 1985 md_unregister_thread(mddev->thread); 1986 mddev->thread = NULL; 1987 shrink_stripes(conf); 1988 free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); 1989 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 1990 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 1991 kfree(conf); 1992 mddev->private = NULL; 1993 return 0; 1994} 1995 1996#if RAID5_DEBUG 1997static void print_sh (struct stripe_head *sh) 1998{ 1999 int i; 2000 2001 printk("sh %llu, pd_idx %d, state %ld.\n", 2002 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 2003 printk("sh %llu, count %d.\n", 2004 (unsigned long long)sh->sector, atomic_read(&sh->count)); 2005 printk("sh %llu, ", (unsigned long long)sh->sector); 2006 for (i = 0; i < sh->raid_conf->raid_disks; i++) { 2007 printk("(cache%d: %p %ld) ", 2008 i, sh->dev[i].page, sh->dev[i].flags); 2009 } 2010 printk("\n"); 2011} 2012 2013static void printall (raid5_conf_t *conf) 2014{ 2015 struct stripe_head *sh; 2016 int i; 2017 2018 spin_lock_irq(&conf->device_lock); 2019 for (i = 0; i < NR_HASH; i++) { 2020 sh = conf->stripe_hashtbl[i]; 2021 for (; sh; sh = sh->hash_next) { 2022 if (sh->raid_conf != conf) 2023 continue; 2024 print_sh(sh); 2025 } 2026 } 2027 spin_unlock_irq(&conf->device_lock); 2028} 2029#endif 2030 2031static void status (struct seq_file *seq, mddev_t *mddev) 2032{ 2033 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 2034 int i; 2035 2036 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 2037 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks); 2038 for (i = 0; i < conf->raid_disks; i++) 2039 seq_printf (seq, "%s", 2040 conf->disks[i].rdev && 2041 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 2042 seq_printf (seq, "]"); 2043#if RAID5_DEBUG 2044#define D(x) \ 2045 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x)) 2046 printall(conf); 2047#endif 2048} 2049 2050static void print_raid5_conf (raid5_conf_t *conf) 2051{ 2052 int i; 2053 struct disk_info *tmp; 2054 2055 printk("RAID5 conf printout:\n"); 2056 if (!conf) { 2057 printk("(conf==NULL)\n"); 2058 return; 2059 } 2060 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks, 2061 conf->working_disks, conf->failed_disks); 2062 2063 for (i = 0; i < conf->raid_disks; i++) { 2064 char b[BDEVNAME_SIZE]; 2065 tmp = conf->disks + i; 2066 if (tmp->rdev) 2067 printk(" disk %d, o:%d, dev:%s\n", 2068 i, !test_bit(Faulty, &tmp->rdev->flags), 2069 bdevname(tmp->rdev->bdev,b)); 2070 } 2071} 2072 2073static int raid5_spare_active(mddev_t *mddev) 2074{ 2075 int i; 2076 raid5_conf_t *conf = mddev->private; 2077 struct disk_info *tmp; 2078 2079 for (i = 0; i < conf->raid_disks; i++) { 2080 tmp = conf->disks + i; 2081 if (tmp->rdev 2082 && !test_bit(Faulty, &tmp->rdev->flags) 2083 && !test_bit(In_sync, &tmp->rdev->flags)) { 2084 mddev->degraded--; 2085 conf->failed_disks--; 2086 conf->working_disks++; 2087 set_bit(In_sync, &tmp->rdev->flags); 2088 } 2089 } 2090 print_raid5_conf(conf); 2091 return 0; 2092} 2093 2094static int raid5_remove_disk(mddev_t *mddev, int number) 2095{ 2096 raid5_conf_t *conf = mddev->private; 2097 int err = 0; 2098 mdk_rdev_t *rdev; 2099 struct disk_info *p = conf->disks + number; 2100 2101 print_raid5_conf(conf); 2102 rdev = p->rdev; 2103 if (rdev) { 2104 if (test_bit(In_sync, &rdev->flags) || 2105 atomic_read(&rdev->nr_pending)) { 2106 err = -EBUSY; 2107 goto abort; 2108 } 2109 p->rdev = NULL; 2110 synchronize_rcu(); 2111 if (atomic_read(&rdev->nr_pending)) { 2112 /* lost the race, try later */ 2113 err = -EBUSY; 2114 p->rdev = rdev; 2115 } 2116 } 2117abort: 2118 2119 print_raid5_conf(conf); 2120 return err; 2121} 2122 2123static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 2124{ 2125 raid5_conf_t *conf = mddev->private; 2126 int found = 0; 2127 int disk; 2128 struct disk_info *p; 2129 2130 if (mddev->degraded > 1) 2131 /* no point adding a device */ 2132 return 0; 2133 2134 /* 2135 * find the disk ... 2136 */ 2137 for (disk=0; disk < mddev->raid_disks; disk++) 2138 if ((p=conf->disks + disk)->rdev == NULL) { 2139 clear_bit(In_sync, &rdev->flags); 2140 rdev->raid_disk = disk; 2141 found = 1; 2142 if (rdev->saved_raid_disk != disk) 2143 conf->fullsync = 1; 2144 rcu_assign_pointer(p->rdev, rdev); 2145 break; 2146 } 2147 print_raid5_conf(conf); 2148 return found; 2149} 2150 2151static int raid5_resize(mddev_t *mddev, sector_t sectors) 2152{ 2153 /* no resync is happening, and there is enough space 2154 * on all devices, so we can resize. 2155 * We need to make sure resync covers any new space. 2156 * If the array is shrinking we should possibly wait until 2157 * any io in the removed space completes, but it hardly seems 2158 * worth it. 2159 */ 2160 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 2161 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1; 2162 set_capacity(mddev->gendisk, mddev->array_size << 1); 2163 mddev->changed = 1; 2164 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 2165 mddev->recovery_cp = mddev->size << 1; 2166 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2167 } 2168 mddev->size = sectors /2; 2169 mddev->resync_max_sectors = sectors; 2170 return 0; 2171} 2172 2173static void raid5_quiesce(mddev_t *mddev, int state) 2174{ 2175 raid5_conf_t *conf = mddev_to_conf(mddev); 2176 2177 switch(state) { 2178 case 1: /* stop all writes */ 2179 spin_lock_irq(&conf->device_lock); 2180 conf->quiesce = 1; 2181 wait_event_lock_irq(conf->wait_for_stripe, 2182 atomic_read(&conf->active_stripes) == 0, 2183 conf->device_lock, /* nothing */); 2184 spin_unlock_irq(&conf->device_lock); 2185 break; 2186 2187 case 0: /* re-enable writes */ 2188 spin_lock_irq(&conf->device_lock); 2189 conf->quiesce = 0; 2190 wake_up(&conf->wait_for_stripe); 2191 spin_unlock_irq(&conf->device_lock); 2192 break; 2193 } 2194 if (mddev->thread) { 2195 if (mddev->bitmap) 2196 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; 2197 else 2198 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; 2199 md_wakeup_thread(mddev->thread); 2200 } 2201} 2202static mdk_personality_t raid5_personality= 2203{ 2204 .name = "raid5", 2205 .owner = THIS_MODULE, 2206 .make_request = make_request, 2207 .run = run, 2208 .stop = stop, 2209 .status = status, 2210 .error_handler = error, 2211 .hot_add_disk = raid5_add_disk, 2212 .hot_remove_disk= raid5_remove_disk, 2213 .spare_active = raid5_spare_active, 2214 .sync_request = sync_request, 2215 .resize = raid5_resize, 2216 .quiesce = raid5_quiesce, 2217}; 2218 2219static int __init raid5_init (void) 2220{ 2221 return register_md_personality (RAID5, &raid5_personality); 2222} 2223 2224static void raid5_exit (void) 2225{ 2226 unregister_md_personality (RAID5); 2227} 2228 2229module_init(raid5_init); 2230module_exit(raid5_exit); 2231MODULE_LICENSE("GPL"); 2232MODULE_ALIAS("md-personality-4"); /* RAID5 */