Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'md/4.4' of git://neil.brown.name/md

Pull md updates from Neil Brown:
"Two major components to this update.

1) The clustered-raid1 support from SUSE is nearly complete. There
are a few outstanding issues being worked on. Maybe half a dozen
patches will bring this to a usable state.

2) The first stage of journalled-raid5 support from Facebook makes an
appearance. With a journal device configured (typically NVRAM or
SSD), the "RAID5 write hole" should be closed - a crash during
degraded operations cannot result in data corruption.

The next stage will be to use the journal as a write-behind cache
so that latency can be reduced and in some cases throughput
increased by performing more full-stripe writes.

* tag 'md/4.4' of git://neil.brown.name/md: (66 commits)
MD: when RAID journal is missing/faulty, block RESTART_ARRAY_RW
MD: set journal disk ->raid_disk
MD: kick out journal disk if it's not fresh
raid5-cache: start raid5 readonly if journal is missing
MD: add new bit to indicate raid array with journal
raid5-cache: IO error handling
raid5: journal disk can't be removed
raid5-cache: add trim support for log
MD: fix info output for journal disk
raid5-cache: use bio chaining
raid5-cache: small log->seq cleanup
raid5-cache: new helper: r5_reserve_log_entry
raid5-cache: inline r5l_alloc_io_unit into r5l_new_meta
raid5-cache: take rdev->data_offset into account early on
raid5-cache: refactor bio allocation
raid5-cache: clean up r5l_get_meta
raid5-cache: simplify state machine when caches flushes are not needed
raid5-cache: factor out a helper to run all stripes for an I/O unit
raid5-cache: rename flushed_ios to finished_ios
raid5-cache: free I/O units earlier
...

+1991 -312
+1 -1
drivers/md/Makefile
··· 17 17 dm-cache-cleaner-y += dm-cache-policy-cleaner.o 18 18 dm-era-y += dm-era-target.o 19 19 md-mod-y += md.o bitmap.o 20 - raid456-y += raid5.o 20 + raid456-y += raid5.o raid5-cache.o 21 21 22 22 # Note: link order is important. All raid personalities 23 23 # and must come before md.o, as they each initialise
+6 -8
drivers/md/bitmap.c
··· 613 613 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 614 614 write_behind = le32_to_cpu(sb->write_behind); 615 615 sectors_reserved = le32_to_cpu(sb->sectors_reserved); 616 - /* XXX: This is a hack to ensure that we don't use clustering 617 - * in case: 618 - * - dm-raid is in use and 619 - * - the nodes written in bitmap_sb is erroneous. 616 + /* Setup nodes/clustername only if bitmap version is 617 + * cluster-compatible 620 618 */ 621 - if (!bitmap->mddev->sync_super) { 619 + if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { 622 620 nodes = le32_to_cpu(sb->nodes); 623 621 strlcpy(bitmap->mddev->bitmap_info.cluster_name, 624 622 sb->cluster_name, 64); ··· 626 628 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) 627 629 reason = "bad magic"; 628 630 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || 629 - le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) 631 + le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED) 630 632 reason = "unrecognized superblock version"; 631 633 else if (chunksize < 512) 632 634 reason = "bitmap chunksize too small"; ··· 1570 1572 } 1571 1573 EXPORT_SYMBOL(bitmap_close_sync); 1572 1574 1573 - void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) 1575 + void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) 1574 1576 { 1575 1577 sector_t s = 0; 1576 1578 sector_t blocks; ··· 1581 1583 bitmap->last_end_sync = jiffies; 1582 1584 return; 1583 1585 } 1584 - if (time_before(jiffies, (bitmap->last_end_sync 1586 + if (!force && time_before(jiffies, (bitmap->last_end_sync 1585 1587 + bitmap->mddev->bitmap_info.daemon_sleep))) 1586 1588 return; 1587 1589 wait_event(bitmap->mddev->recovery_wait,
+3 -1
drivers/md/bitmap.h
··· 9 9 #define BITMAP_MAJOR_LO 3 10 10 /* version 4 insists the bitmap is in little-endian order 11 11 * with version 3, it is host-endian which is non-portable 12 + * Version 5 is currently set only for clustered devices 12 13 */ 13 14 #define BITMAP_MAJOR_HI 4 15 + #define BITMAP_MAJOR_CLUSTERED 5 14 16 #define BITMAP_MAJOR_HOSTENDIAN 3 15 17 16 18 /* ··· 257 255 int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded); 258 256 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted); 259 257 void bitmap_close_sync(struct bitmap *bitmap); 260 - void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); 258 + void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force); 261 259 262 260 void bitmap_unplug(struct bitmap *bitmap); 263 261 void bitmap_daemon_work(struct mddev *mddev);
+130 -100
drivers/md/md-cluster.c
··· 28 28 struct completion completion; /* completion for synchronized locking */ 29 29 void (*bast)(void *arg, int mode); /* blocking AST function pointer*/ 30 30 struct mddev *mddev; /* pointing back to mddev. */ 31 + int mode; 31 32 }; 32 33 33 34 struct suspend_info { ··· 54 53 dlm_lockspace_t *lockspace; 55 54 int slot_number; 56 55 struct completion completion; 57 - struct mutex sb_mutex; 58 56 struct dlm_lock_resource *bitmap_lockres; 57 + struct dlm_lock_resource *resync_lockres; 59 58 struct list_head suspend_list; 60 59 spinlock_t suspend_lock; 61 60 struct md_thread *recovery_thread; ··· 80 79 }; 81 80 82 81 struct cluster_msg { 83 - int type; 84 - int slot; 82 + __le32 type; 83 + __le32 slot; 85 84 /* TODO: Unionize this for smaller footprint */ 86 - sector_t low; 87 - sector_t high; 85 + __le64 low; 86 + __le64 high; 88 87 char uuid[16]; 89 - int raid_slot; 88 + __le32 raid_slot; 90 89 }; 91 90 92 91 static void sync_ast(void *arg) 93 92 { 94 93 struct dlm_lock_resource *res; 95 94 96 - res = (struct dlm_lock_resource *) arg; 95 + res = arg; 97 96 complete(&res->completion); 98 97 } 99 98 ··· 107 106 if (ret) 108 107 return ret; 109 108 wait_for_completion(&res->completion); 109 + if (res->lksb.sb_status == 0) 110 + res->mode = mode; 110 111 return res->lksb.sb_status; 111 112 } 112 113 ··· 130 127 init_completion(&res->completion); 131 128 res->ls = cinfo->lockspace; 132 129 res->mddev = mddev; 130 + res->mode = DLM_LOCK_IV; 133 131 namelen = strlen(name); 134 132 res->name = kzalloc(namelen + 1, GFP_KERNEL); 135 133 if (!res->name) { ··· 195 191 kfree(res); 196 192 } 197 193 198 - static void add_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres, 199 - sector_t lo, sector_t hi) 194 + static void add_resync_info(struct dlm_lock_resource *lockres, 195 + sector_t lo, sector_t hi) 200 196 { 201 197 struct resync_info *ri; 202 198 ··· 214 210 dlm_lock_sync(lockres, DLM_LOCK_CR); 215 211 memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info)); 216 212 hi = le64_to_cpu(ri.hi); 217 - if (ri.hi > 0) { 213 + if (hi > 0) { 218 214 s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL); 219 215 if (!s) 220 216 goto out; ··· 349 345 */ 350 346 static void ack_bast(void *arg, int mode) 351 347 { 352 - struct dlm_lock_resource *res = (struct dlm_lock_resource *)arg; 348 + struct dlm_lock_resource *res = arg; 353 349 struct md_cluster_info *cinfo = res->mddev->cluster_info; 354 350 355 351 if (mode == DLM_LOCK_EX) ··· 362 358 363 359 list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list) 364 360 if (slot == s->slot) { 365 - pr_info("%s:%d Deleting suspend_info: %d\n", 366 - __func__, __LINE__, slot); 367 361 list_del(&s->list); 368 362 kfree(s); 369 363 break; 370 364 } 371 365 } 372 366 373 - static void remove_suspend_info(struct md_cluster_info *cinfo, int slot) 367 + static void remove_suspend_info(struct mddev *mddev, int slot) 374 368 { 369 + struct md_cluster_info *cinfo = mddev->cluster_info; 375 370 spin_lock_irq(&cinfo->suspend_lock); 376 371 __remove_suspend_info(cinfo, slot); 377 372 spin_unlock_irq(&cinfo->suspend_lock); 373 + mddev->pers->quiesce(mddev, 2); 378 374 } 379 375 380 376 381 - static void process_suspend_info(struct md_cluster_info *cinfo, 377 + static void process_suspend_info(struct mddev *mddev, 382 378 int slot, sector_t lo, sector_t hi) 383 379 { 380 + struct md_cluster_info *cinfo = mddev->cluster_info; 384 381 struct suspend_info *s; 385 382 386 383 if (!hi) { 387 - remove_suspend_info(cinfo, slot); 384 + remove_suspend_info(mddev, slot); 385 + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 386 + md_wakeup_thread(mddev->thread); 388 387 return; 389 388 } 390 389 s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL); ··· 396 389 s->slot = slot; 397 390 s->lo = lo; 398 391 s->hi = hi; 392 + mddev->pers->quiesce(mddev, 1); 393 + mddev->pers->quiesce(mddev, 0); 399 394 spin_lock_irq(&cinfo->suspend_lock); 400 395 /* Remove existing entry (if exists) before adding */ 401 396 __remove_suspend_info(cinfo, slot); 402 397 list_add(&s->list, &cinfo->suspend_list); 403 398 spin_unlock_irq(&cinfo->suspend_lock); 399 + mddev->pers->quiesce(mddev, 2); 404 400 } 405 401 406 402 static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) ··· 417 407 418 408 len = snprintf(disk_uuid, 64, "DEVICE_UUID="); 419 409 sprintf(disk_uuid + len, "%pU", cmsg->uuid); 420 - snprintf(raid_slot, 16, "RAID_DISK=%d", cmsg->raid_slot); 410 + snprintf(raid_slot, 16, "RAID_DISK=%d", le32_to_cpu(cmsg->raid_slot)); 421 411 pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot); 422 412 init_completion(&cinfo->newdisk_completion); 423 413 set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state); ··· 431 421 static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg) 432 422 { 433 423 struct md_cluster_info *cinfo = mddev->cluster_info; 434 - 435 - md_reload_sb(mddev); 424 + md_reload_sb(mddev, le32_to_cpu(msg->raid_slot)); 436 425 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); 437 426 } 438 427 439 428 static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg) 440 429 { 441 - struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot); 430 + struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, 431 + le32_to_cpu(msg->raid_slot)); 442 432 443 433 if (rdev) 444 434 md_kick_rdev_from_array(rdev); 445 435 else 446 - pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", __func__, __LINE__, msg->raid_slot); 436 + pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", 437 + __func__, __LINE__, le32_to_cpu(msg->raid_slot)); 447 438 } 448 439 449 440 static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg) 450 441 { 451 - struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot); 442 + struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, 443 + le32_to_cpu(msg->raid_slot)); 452 444 453 445 if (rdev && test_bit(Faulty, &rdev->flags)) 454 446 clear_bit(Faulty, &rdev->flags); 455 447 else 456 - pr_warn("%s: %d Could not find disk(%d) which is faulty", __func__, __LINE__, msg->raid_slot); 448 + pr_warn("%s: %d Could not find disk(%d) which is faulty", 449 + __func__, __LINE__, le32_to_cpu(msg->raid_slot)); 457 450 } 458 451 459 452 static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) 460 453 { 461 - switch (msg->type) { 454 + if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot), 455 + "node %d received it's own msg\n", le32_to_cpu(msg->slot))) 456 + return; 457 + switch (le32_to_cpu(msg->type)) { 462 458 case METADATA_UPDATED: 463 - pr_info("%s: %d Received message: METADATA_UPDATE from %d\n", 464 - __func__, __LINE__, msg->slot); 465 459 process_metadata_update(mddev, msg); 466 460 break; 467 461 case RESYNCING: 468 - pr_info("%s: %d Received message: RESYNCING from %d\n", 469 - __func__, __LINE__, msg->slot); 470 - process_suspend_info(mddev->cluster_info, msg->slot, 471 - msg->low, msg->high); 462 + process_suspend_info(mddev, le32_to_cpu(msg->slot), 463 + le64_to_cpu(msg->low), 464 + le64_to_cpu(msg->high)); 472 465 break; 473 466 case NEWDISK: 474 - pr_info("%s: %d Received message: NEWDISK from %d\n", 475 - __func__, __LINE__, msg->slot); 476 467 process_add_new_disk(mddev, msg); 477 468 break; 478 469 case REMOVE: 479 - pr_info("%s: %d Received REMOVE from %d\n", 480 - __func__, __LINE__, msg->slot); 481 470 process_remove_disk(mddev, msg); 482 471 break; 483 472 case RE_ADD: 484 - pr_info("%s: %d Received RE_ADD from %d\n", 485 - __func__, __LINE__, msg->slot); 486 473 process_readd_disk(mddev, msg); 487 474 break; 488 475 case BITMAP_NEEDS_SYNC: 489 - pr_info("%s: %d Received BITMAP_NEEDS_SYNC from %d\n", 490 - __func__, __LINE__, msg->slot); 491 - __recover_slot(mddev, msg->slot); 476 + __recover_slot(mddev, le32_to_cpu(msg->slot)); 492 477 break; 493 478 default: 494 479 pr_warn("%s:%d Received unknown message from %d\n", ··· 533 528 /* lock_comm() 534 529 * Takes the lock on the TOKEN lock resource so no other 535 530 * node can communicate while the operation is underway. 531 + * If called again, and the TOKEN lock is alread in EX mode 532 + * return success. However, care must be taken that unlock_comm() 533 + * is called only once. 536 534 */ 537 535 static int lock_comm(struct md_cluster_info *cinfo) 538 536 { 539 537 int error; 538 + 539 + if (cinfo->token_lockres->mode == DLM_LOCK_EX) 540 + return 0; 540 541 541 542 error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); 542 543 if (error) ··· 553 542 554 543 static void unlock_comm(struct md_cluster_info *cinfo) 555 544 { 545 + WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX); 556 546 dlm_unlock_sync(cinfo->token_lockres); 557 547 } 558 548 ··· 708 696 init_completion(&cinfo->completion); 709 697 set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state); 710 698 711 - mutex_init(&cinfo->sb_mutex); 712 699 mddev->cluster_info = cinfo; 713 700 714 701 memset(str, 0, 64); ··· 764 753 goto err; 765 754 } 766 755 756 + cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); 757 + if (!cinfo->resync_lockres) 758 + goto err; 759 + 767 760 ret = gather_all_resync_info(mddev, nodes); 768 761 if (ret) 769 762 goto err; ··· 778 763 lockres_free(cinfo->token_lockres); 779 764 lockres_free(cinfo->ack_lockres); 780 765 lockres_free(cinfo->no_new_dev_lockres); 766 + lockres_free(cinfo->resync_lockres); 781 767 lockres_free(cinfo->bitmap_lockres); 782 768 if (cinfo->lockspace) 783 769 dlm_release_lockspace(cinfo->lockspace, 2); ··· 787 771 return ret; 788 772 } 789 773 774 + static void resync_bitmap(struct mddev *mddev) 775 + { 776 + struct md_cluster_info *cinfo = mddev->cluster_info; 777 + struct cluster_msg cmsg = {0}; 778 + int err; 779 + 780 + cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC); 781 + err = sendmsg(cinfo, &cmsg); 782 + if (err) 783 + pr_err("%s:%d: failed to send BITMAP_NEEDS_SYNC message (%d)\n", 784 + __func__, __LINE__, err); 785 + } 786 + 790 787 static int leave(struct mddev *mddev) 791 788 { 792 789 struct md_cluster_info *cinfo = mddev->cluster_info; 793 790 794 791 if (!cinfo) 795 792 return 0; 793 + 794 + /* BITMAP_NEEDS_SYNC message should be sent when node 795 + * is leaving the cluster with dirty bitmap, also we 796 + * can only deliver it when dlm connection is available */ 797 + if (cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) 798 + resync_bitmap(mddev); 799 + 796 800 md_unregister_thread(&cinfo->recovery_thread); 797 801 md_unregister_thread(&cinfo->recv_thread); 798 802 lockres_free(cinfo->message_lockres); ··· 835 799 return cinfo->slot_number - 1; 836 800 } 837 801 838 - static void resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) 839 - { 840 - struct md_cluster_info *cinfo = mddev->cluster_info; 841 - 842 - add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi); 843 - /* Re-acquire the lock to refresh LVB */ 844 - dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); 845 - } 846 - 847 802 static int metadata_update_start(struct mddev *mddev) 848 803 { 849 804 return lock_comm(mddev->cluster_info); ··· 844 817 { 845 818 struct md_cluster_info *cinfo = mddev->cluster_info; 846 819 struct cluster_msg cmsg; 847 - int ret; 820 + struct md_rdev *rdev; 821 + int ret = 0; 822 + int raid_slot = -1; 848 823 849 824 memset(&cmsg, 0, sizeof(cmsg)); 850 825 cmsg.type = cpu_to_le32(METADATA_UPDATED); 851 - ret = __sendmsg(cinfo, &cmsg); 826 + /* Pick up a good active device number to send. 827 + */ 828 + rdev_for_each(rdev, mddev) 829 + if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) { 830 + raid_slot = rdev->desc_nr; 831 + break; 832 + } 833 + if (raid_slot >= 0) { 834 + cmsg.raid_slot = cpu_to_le32(raid_slot); 835 + ret = __sendmsg(cinfo, &cmsg); 836 + } else 837 + pr_warn("md-cluster: No good device id found to send\n"); 852 838 unlock_comm(cinfo); 853 839 return ret; 854 840 } 855 841 856 - static int metadata_update_cancel(struct mddev *mddev) 842 + static void metadata_update_cancel(struct mddev *mddev) 857 843 { 858 844 struct md_cluster_info *cinfo = mddev->cluster_info; 859 - 860 - return dlm_unlock_sync(cinfo->token_lockres); 845 + unlock_comm(cinfo); 861 846 } 862 847 863 - static int resync_send(struct mddev *mddev, enum msg_type type, 864 - sector_t lo, sector_t hi) 848 + static int resync_start(struct mddev *mddev) 865 849 { 866 850 struct md_cluster_info *cinfo = mddev->cluster_info; 867 - struct cluster_msg cmsg; 868 - int slot = cinfo->slot_number - 1; 851 + cinfo->resync_lockres->flags |= DLM_LKF_NOQUEUE; 852 + return dlm_lock_sync(cinfo->resync_lockres, DLM_LOCK_EX); 853 + } 869 854 870 - pr_info("%s:%d lo: %llu hi: %llu\n", __func__, __LINE__, 871 - (unsigned long long)lo, 872 - (unsigned long long)hi); 873 - resync_info_update(mddev, lo, hi); 874 - cmsg.type = cpu_to_le32(type); 875 - cmsg.slot = cpu_to_le32(slot); 855 + static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) 856 + { 857 + struct md_cluster_info *cinfo = mddev->cluster_info; 858 + struct cluster_msg cmsg = {0}; 859 + 860 + add_resync_info(cinfo->bitmap_lockres, lo, hi); 861 + /* Re-acquire the lock to refresh LVB */ 862 + dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); 863 + cmsg.type = cpu_to_le32(RESYNCING); 876 864 cmsg.low = cpu_to_le64(lo); 877 865 cmsg.high = cpu_to_le64(hi); 866 + 878 867 return sendmsg(cinfo, &cmsg); 879 868 } 880 869 881 - static int resync_start(struct mddev *mddev, sector_t lo, sector_t hi) 882 - { 883 - pr_info("%s:%d\n", __func__, __LINE__); 884 - return resync_send(mddev, RESYNCING, lo, hi); 885 - } 886 - 887 - static void resync_finish(struct mddev *mddev) 870 + static int resync_finish(struct mddev *mddev) 888 871 { 889 872 struct md_cluster_info *cinfo = mddev->cluster_info; 890 - struct cluster_msg cmsg; 891 - int slot = cinfo->slot_number - 1; 892 - 893 - pr_info("%s:%d\n", __func__, __LINE__); 894 - resync_send(mddev, RESYNCING, 0, 0); 895 - if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 896 - cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC); 897 - cmsg.slot = cpu_to_le32(slot); 898 - sendmsg(cinfo, &cmsg); 899 - } 873 + cinfo->resync_lockres->flags &= ~DLM_LKF_NOQUEUE; 874 + dlm_unlock_sync(cinfo->resync_lockres); 875 + return resync_info_update(mddev, 0, 0); 900 876 } 901 877 902 878 static int area_resyncing(struct mddev *mddev, int direction, ··· 926 896 return ret; 927 897 } 928 898 929 - static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev) 899 + /* add_new_disk() - initiates a disk add 900 + * However, if this fails before writing md_update_sb(), 901 + * add_new_disk_cancel() must be called to release token lock 902 + */ 903 + static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) 930 904 { 931 905 struct md_cluster_info *cinfo = mddev->cluster_info; 932 906 struct cluster_msg cmsg; ··· 941 907 memset(&cmsg, 0, sizeof(cmsg)); 942 908 cmsg.type = cpu_to_le32(NEWDISK); 943 909 memcpy(cmsg.uuid, uuid, 16); 944 - cmsg.raid_slot = rdev->desc_nr; 910 + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); 945 911 lock_comm(cinfo); 946 912 ret = __sendmsg(cinfo, &cmsg); 947 913 if (ret) ··· 952 918 /* Some node does not "see" the device */ 953 919 if (ret == -EAGAIN) 954 920 ret = -ENOENT; 921 + if (ret) 922 + unlock_comm(cinfo); 955 923 else 956 924 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); 957 925 return ret; 958 926 } 959 927 960 - static int add_new_disk_finish(struct mddev *mddev) 928 + static void add_new_disk_cancel(struct mddev *mddev) 961 929 { 962 - struct cluster_msg cmsg; 963 930 struct md_cluster_info *cinfo = mddev->cluster_info; 964 - int ret; 965 - /* Write sb and inform others */ 966 - md_update_sb(mddev, 1); 967 - cmsg.type = METADATA_UPDATED; 968 - ret = __sendmsg(cinfo, &cmsg); 969 931 unlock_comm(cinfo); 970 - return ret; 971 932 } 972 933 973 934 static int new_disk_ack(struct mddev *mddev, bool ack) ··· 982 953 983 954 static int remove_disk(struct mddev *mddev, struct md_rdev *rdev) 984 955 { 985 - struct cluster_msg cmsg; 956 + struct cluster_msg cmsg = {0}; 986 957 struct md_cluster_info *cinfo = mddev->cluster_info; 987 - cmsg.type = REMOVE; 988 - cmsg.raid_slot = rdev->desc_nr; 958 + cmsg.type = cpu_to_le32(REMOVE); 959 + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); 989 960 return __sendmsg(cinfo, &cmsg); 990 961 } 991 962 ··· 993 964 { 994 965 int sn, err; 995 966 sector_t lo, hi; 996 - struct cluster_msg cmsg; 967 + struct cluster_msg cmsg = {0}; 997 968 struct mddev *mddev = rdev->mddev; 998 969 struct md_cluster_info *cinfo = mddev->cluster_info; 999 970 1000 - cmsg.type = RE_ADD; 1001 - cmsg.raid_slot = rdev->desc_nr; 971 + cmsg.type = cpu_to_le32(RE_ADD); 972 + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); 1002 973 err = sendmsg(cinfo, &cmsg); 1003 974 if (err) 1004 975 goto out; ··· 1022 993 .join = join, 1023 994 .leave = leave, 1024 995 .slot_number = slot_number, 1025 - .resync_info_update = resync_info_update, 1026 996 .resync_start = resync_start, 1027 997 .resync_finish = resync_finish, 998 + .resync_info_update = resync_info_update, 1028 999 .metadata_update_start = metadata_update_start, 1029 1000 .metadata_update_finish = metadata_update_finish, 1030 1001 .metadata_update_cancel = metadata_update_cancel, 1031 1002 .area_resyncing = area_resyncing, 1032 - .add_new_disk_start = add_new_disk_start, 1033 - .add_new_disk_finish = add_new_disk_finish, 1003 + .add_new_disk = add_new_disk, 1004 + .add_new_disk_cancel = add_new_disk_cancel, 1034 1005 .new_disk_ack = new_disk_ack, 1035 1006 .remove_disk = remove_disk, 1036 1007 .gather_bitmaps = gather_bitmaps, ··· 1051 1022 1052 1023 module_init(cluster_init); 1053 1024 module_exit(cluster_exit); 1025 + MODULE_AUTHOR("SUSE"); 1054 1026 MODULE_LICENSE("GPL"); 1055 1027 MODULE_DESCRIPTION("Clustering support for MD");
+6 -6
drivers/md/md-cluster.h
··· 12 12 int (*join)(struct mddev *mddev, int nodes); 13 13 int (*leave)(struct mddev *mddev); 14 14 int (*slot_number)(struct mddev *mddev); 15 - void (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); 16 - int (*resync_start)(struct mddev *mddev, sector_t lo, sector_t hi); 17 - void (*resync_finish)(struct mddev *mddev); 15 + int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); 18 16 int (*metadata_update_start)(struct mddev *mddev); 19 17 int (*metadata_update_finish)(struct mddev *mddev); 20 - int (*metadata_update_cancel)(struct mddev *mddev); 18 + void (*metadata_update_cancel)(struct mddev *mddev); 19 + int (*resync_start)(struct mddev *mddev); 20 + int (*resync_finish)(struct mddev *mddev); 21 21 int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); 22 - int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); 23 - int (*add_new_disk_finish)(struct mddev *mddev); 22 + int (*add_new_disk)(struct mddev *mddev, struct md_rdev *rdev); 23 + void (*add_new_disk_cancel)(struct mddev *mddev); 24 24 int (*new_disk_ack)(struct mddev *mddev, bool ack); 25 25 int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev); 26 26 int (*gather_bitmaps)(struct md_rdev *rdev);
+366 -131
drivers/md/md.c
··· 1608 1608 ++ev1; 1609 1609 if (rdev->desc_nr >= 0 && 1610 1610 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1611 - le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1611 + (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1612 + le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 1612 1613 if (ev1 < mddev->events) 1613 1614 return -EINVAL; 1614 1615 } else if (mddev->bitmap) { ··· 1629 1628 int role; 1630 1629 if (rdev->desc_nr < 0 || 1631 1630 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1632 - role = 0xffff; 1631 + role = MD_DISK_ROLE_SPARE; 1633 1632 rdev->desc_nr = -1; 1634 1633 } else 1635 1634 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1636 1635 switch(role) { 1637 - case 0xffff: /* spare */ 1636 + case MD_DISK_ROLE_SPARE: /* spare */ 1638 1637 break; 1639 - case 0xfffe: /* faulty */ 1638 + case MD_DISK_ROLE_FAULTY: /* faulty */ 1640 1639 set_bit(Faulty, &rdev->flags); 1640 + break; 1641 + case MD_DISK_ROLE_JOURNAL: /* journal device */ 1642 + if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { 1643 + /* journal device without journal feature */ 1644 + printk(KERN_WARNING 1645 + "md: journal device provided without journal feature, ignoring the device\n"); 1646 + return -EINVAL; 1647 + } 1648 + set_bit(Journal, &rdev->flags); 1649 + rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1650 + if (mddev->recovery_cp == MaxSector) 1651 + set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 1652 + rdev->raid_disk = mddev->raid_disks; 1641 1653 break; 1642 1654 default: 1643 1655 rdev->saved_raid_disk = role; ··· 1669 1655 set_bit(WriteMostly, &rdev->flags); 1670 1656 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1671 1657 set_bit(Replacement, &rdev->flags); 1658 + if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) 1659 + set_bit(MD_HAS_JOURNAL, &mddev->flags); 1672 1660 } else /* MULTIPATH are always insync */ 1673 1661 set_bit(In_sync, &rdev->flags); 1674 1662 ··· 1695 1679 sb->events = cpu_to_le64(mddev->events); 1696 1680 if (mddev->in_sync) 1697 1681 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1682 + else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 1683 + sb->resync_offset = cpu_to_le64(MaxSector); 1698 1684 else 1699 1685 sb->resync_offset = cpu_to_le64(0); 1700 1686 ··· 1720 1702 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1721 1703 } 1722 1704 1723 - if (rdev->raid_disk >= 0 && 1705 + if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && 1724 1706 !test_bit(In_sync, &rdev->flags)) { 1725 1707 sb->feature_map |= 1726 1708 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); ··· 1730 1712 sb->feature_map |= 1731 1713 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 1732 1714 } 1715 + /* Note: recovery_offset and journal_tail share space */ 1716 + if (test_bit(Journal, &rdev->flags)) 1717 + sb->journal_tail = cpu_to_le64(rdev->journal_tail); 1733 1718 if (test_bit(Replacement, &rdev->flags)) 1734 1719 sb->feature_map |= 1735 1720 cpu_to_le32(MD_FEATURE_REPLACEMENT); ··· 1755 1734 - rdev->data_offset)); 1756 1735 } 1757 1736 } 1737 + 1738 + if (mddev_is_clustered(mddev)) 1739 + sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); 1758 1740 1759 1741 if (rdev->badblocks.count == 0) 1760 1742 /* Nothing to do for bad blocks*/ ; ··· 1809 1785 max_dev = le32_to_cpu(sb->max_dev); 1810 1786 1811 1787 for (i=0; i<max_dev;i++) 1812 - sb->dev_roles[i] = cpu_to_le16(0xfffe); 1788 + sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 1789 + 1790 + if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) 1791 + sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); 1813 1792 1814 1793 rdev_for_each(rdev2, mddev) { 1815 1794 i = rdev2->desc_nr; 1816 1795 if (test_bit(Faulty, &rdev2->flags)) 1817 - sb->dev_roles[i] = cpu_to_le16(0xfffe); 1796 + sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 1818 1797 else if (test_bit(In_sync, &rdev2->flags)) 1819 1798 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1799 + else if (test_bit(Journal, &rdev2->flags)) 1800 + sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); 1820 1801 else if (rdev2->raid_disk >= 0) 1821 1802 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1822 1803 else 1823 - sb->dev_roles[i] = cpu_to_le16(0xffff); 1804 + sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 1824 1805 } 1825 1806 1826 1807 sb->sb_csum = calc_sb_1_csum(sb); ··· 1941 1912 struct md_rdev *rdev, *rdev2; 1942 1913 1943 1914 rcu_read_lock(); 1944 - rdev_for_each_rcu(rdev, mddev1) 1945 - rdev_for_each_rcu(rdev2, mddev2) 1915 + rdev_for_each_rcu(rdev, mddev1) { 1916 + if (test_bit(Faulty, &rdev->flags) || 1917 + test_bit(Journal, &rdev->flags) || 1918 + rdev->raid_disk == -1) 1919 + continue; 1920 + rdev_for_each_rcu(rdev2, mddev2) { 1921 + if (test_bit(Faulty, &rdev2->flags) || 1922 + test_bit(Journal, &rdev2->flags) || 1923 + rdev2->raid_disk == -1) 1924 + continue; 1946 1925 if (rdev->bdev->bd_contains == 1947 1926 rdev2->bdev->bd_contains) { 1948 1927 rcu_read_unlock(); 1949 1928 return 1; 1950 1929 } 1930 + } 1931 + } 1951 1932 rcu_read_unlock(); 1952 1933 return 0; 1953 1934 } ··· 2233 2194 } 2234 2195 } 2235 2196 2197 + static bool does_sb_need_changing(struct mddev *mddev) 2198 + { 2199 + struct md_rdev *rdev; 2200 + struct mdp_superblock_1 *sb; 2201 + int role; 2202 + 2203 + /* Find a good rdev */ 2204 + rdev_for_each(rdev, mddev) 2205 + if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) 2206 + break; 2207 + 2208 + /* No good device found. */ 2209 + if (!rdev) 2210 + return false; 2211 + 2212 + sb = page_address(rdev->sb_page); 2213 + /* Check if a device has become faulty or a spare become active */ 2214 + rdev_for_each(rdev, mddev) { 2215 + role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2216 + /* Device activated? */ 2217 + if (role == 0xffff && rdev->raid_disk >=0 && 2218 + !test_bit(Faulty, &rdev->flags)) 2219 + return true; 2220 + /* Device turned faulty? */ 2221 + if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd)) 2222 + return true; 2223 + } 2224 + 2225 + /* Check if any mddev parameters have changed */ 2226 + if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2227 + (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2228 + (mddev->layout != le64_to_cpu(sb->layout)) || 2229 + (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2230 + (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2231 + return true; 2232 + 2233 + return false; 2234 + } 2235 + 2236 2236 void md_update_sb(struct mddev *mddev, int force_change) 2237 2237 { 2238 2238 struct md_rdev *rdev; 2239 2239 int sync_req; 2240 2240 int nospares = 0; 2241 2241 int any_badblocks_changed = 0; 2242 + int ret = -1; 2242 2243 2243 2244 if (mddev->ro) { 2244 2245 if (force_change) 2245 2246 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2246 2247 return; 2247 2248 } 2249 + 2250 + if (mddev_is_clustered(mddev)) { 2251 + if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2252 + force_change = 1; 2253 + ret = md_cluster_ops->metadata_update_start(mddev); 2254 + /* Has someone else has updated the sb */ 2255 + if (!does_sb_need_changing(mddev)) { 2256 + if (ret == 0) 2257 + md_cluster_ops->metadata_update_cancel(mddev); 2258 + clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2259 + return; 2260 + } 2261 + } 2248 2262 repeat: 2249 2263 /* First make sure individual recovery_offsets are correct */ 2250 2264 rdev_for_each(rdev, mddev) { 2251 2265 if (rdev->raid_disk >= 0 && 2252 2266 mddev->delta_disks >= 0 && 2267 + !test_bit(Journal, &rdev->flags) && 2253 2268 !test_bit(In_sync, &rdev->flags) && 2254 2269 mddev->curr_resync_completed > rdev->recovery_offset) 2255 2270 rdev->recovery_offset = mddev->curr_resync_completed; ··· 2447 2354 clear_bit(BlockedBadBlocks, &rdev->flags); 2448 2355 wake_up(&rdev->blocked_wait); 2449 2356 } 2357 + 2358 + if (mddev_is_clustered(mddev) && ret == 0) 2359 + md_cluster_ops->metadata_update_finish(mddev); 2450 2360 } 2451 2361 EXPORT_SYMBOL(md_update_sb); 2452 2362 ··· 2525 2429 len += sprintf(page+len, "%sin_sync",sep); 2526 2430 sep = ","; 2527 2431 } 2432 + if (test_bit(Journal, &flags)) { 2433 + len += sprintf(page+len, "%sjournal",sep); 2434 + sep = ","; 2435 + } 2528 2436 if (test_bit(WriteMostly, &flags)) { 2529 2437 len += sprintf(page+len, "%swrite_mostly",sep); 2530 2438 sep = ","; ··· 2540 2440 sep = ","; 2541 2441 } 2542 2442 if (!test_bit(Faulty, &flags) && 2443 + !test_bit(Journal, &flags) && 2543 2444 !test_bit(In_sync, &flags)) { 2544 2445 len += sprintf(page+len, "%sspare", sep); 2545 2446 sep = ","; ··· 2589 2488 err = -EBUSY; 2590 2489 else { 2591 2490 struct mddev *mddev = rdev->mddev; 2592 - if (mddev_is_clustered(mddev)) 2593 - md_cluster_ops->remove_disk(mddev, rdev); 2594 - md_kick_rdev_from_array(rdev); 2595 - if (mddev_is_clustered(mddev)) 2596 - md_cluster_ops->metadata_update_start(mddev); 2597 - if (mddev->pers) 2598 - md_update_sb(mddev, 1); 2599 - md_new_event(mddev); 2600 - if (mddev_is_clustered(mddev)) 2601 - md_cluster_ops->metadata_update_finish(mddev); 2602 2491 err = 0; 2492 + if (mddev_is_clustered(mddev)) 2493 + err = md_cluster_ops->remove_disk(mddev, rdev); 2494 + 2495 + if (err == 0) { 2496 + md_kick_rdev_from_array(rdev); 2497 + if (mddev->pers) 2498 + md_update_sb(mddev, 1); 2499 + md_new_event(mddev); 2500 + } 2603 2501 } 2604 2502 } else if (cmd_match(buf, "writemostly")) { 2605 2503 set_bit(WriteMostly, &rdev->flags); ··· 2627 2527 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2628 2528 set_bit(In_sync, &rdev->flags); 2629 2529 err = 0; 2630 - } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) { 2530 + } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && 2531 + !test_bit(Journal, &rdev->flags)) { 2631 2532 if (rdev->mddev->pers == NULL) { 2632 2533 clear_bit(In_sync, &rdev->flags); 2633 2534 rdev->saved_raid_disk = rdev->raid_disk; ··· 2647 2546 * check if recovery is needed. 2648 2547 */ 2649 2548 if (rdev->raid_disk >= 0 && 2549 + !test_bit(Journal, &rdev->flags) && 2650 2550 !test_bit(Replacement, &rdev->flags)) 2651 2551 set_bit(WantReplacement, &rdev->flags); 2652 2552 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); ··· 2725 2623 static ssize_t 2726 2624 slot_show(struct md_rdev *rdev, char *page) 2727 2625 { 2728 - if (rdev->raid_disk < 0) 2626 + if (test_bit(Journal, &rdev->flags)) 2627 + return sprintf(page, "journal\n"); 2628 + else if (rdev->raid_disk < 0) 2729 2629 return sprintf(page, "none\n"); 2730 2630 else 2731 2631 return sprintf(page, "%d\n", rdev->raid_disk); ··· 2739 2635 int slot; 2740 2636 int err; 2741 2637 2638 + if (test_bit(Journal, &rdev->flags)) 2639 + return -EBUSY; 2742 2640 if (strncmp(buf, "none", 4)==0) 2743 2641 slot = -1; 2744 2642 else { ··· 2792 2686 rdev->saved_raid_disk = -1; 2793 2687 clear_bit(In_sync, &rdev->flags); 2794 2688 clear_bit(Bitmap_sync, &rdev->flags); 2795 - err = rdev->mddev->pers-> 2796 - hot_add_disk(rdev->mddev, rdev); 2797 - if (err) { 2798 - rdev->raid_disk = -1; 2799 - return err; 2800 - } else 2801 - sysfs_notify_dirent_safe(rdev->sysfs_state); 2802 - if (sysfs_link_rdev(rdev->mddev, rdev)) 2803 - /* failure here is OK */; 2689 + remove_and_add_spares(rdev->mddev, rdev); 2690 + if (rdev->raid_disk == -1) 2691 + return -EBUSY; 2804 2692 /* don't wakeup anyone, leave that to userspace. */ 2805 2693 } else { 2806 2694 if (slot >= rdev->mddev->raid_disks && ··· 2939 2839 sector_t oldsectors = rdev->sectors; 2940 2840 sector_t sectors; 2941 2841 2842 + if (test_bit(Journal, &rdev->flags)) 2843 + return -EBUSY; 2942 2844 if (strict_blocks_to_sectors(buf, &sectors) < 0) 2943 2845 return -EINVAL; 2944 2846 if (rdev->data_offset != rdev->new_data_offset) ··· 3298 3196 md_kick_rdev_from_array(rdev); 3299 3197 continue; 3300 3198 } 3301 - /* No device should have a Candidate flag 3302 - * when reading devices 3303 - */ 3304 - if (test_bit(Candidate, &rdev->flags)) { 3305 - pr_info("md: kicking Cluster Candidate %s from array!\n", 3306 - bdevname(rdev->bdev, b)); 3307 - md_kick_rdev_from_array(rdev); 3308 - } 3309 3199 } 3310 3200 if (mddev->level == LEVEL_MULTIPATH) { 3311 3201 rdev->desc_nr = i++; 3312 3202 rdev->raid_disk = rdev->desc_nr; 3313 3203 set_bit(In_sync, &rdev->flags); 3314 - } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 3204 + } else if (rdev->raid_disk >= 3205 + (mddev->raid_disks - min(0, mddev->delta_disks)) && 3206 + !test_bit(Journal, &rdev->flags)) { 3315 3207 rdev->raid_disk = -1; 3316 3208 clear_bit(In_sync, &rdev->flags); 3317 3209 } ··· 3362 3266 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3363 3267 { 3364 3268 unsigned long msec; 3269 + 3270 + if (mddev_is_clustered(mddev)) { 3271 + pr_info("md: Safemode is disabled for clustered mode\n"); 3272 + return -EINVAL; 3273 + } 3365 3274 3366 3275 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3367 3276 return -EINVAL; ··· 3968 3867 break; 3969 3868 case clean: 3970 3869 if (mddev->pers) { 3971 - restart_array(mddev); 3870 + err = restart_array(mddev); 3871 + if (err) 3872 + break; 3972 3873 spin_lock(&mddev->lock); 3973 3874 if (atomic_read(&mddev->writes_pending) == 0) { 3974 3875 if (mddev->in_sync == 0) { ··· 3988 3885 break; 3989 3886 case active: 3990 3887 if (mddev->pers) { 3991 - restart_array(mddev); 3888 + err = restart_array(mddev); 3889 + if (err) 3890 + break; 3992 3891 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3993 3892 wake_up(&mddev->sb_wait); 3994 3893 err = 0; ··· 4169 4064 if (err) 4170 4065 return err; 4171 4066 if (mddev->pers) { 4172 - if (mddev_is_clustered(mddev)) 4173 - md_cluster_ops->metadata_update_start(mddev); 4174 4067 err = update_size(mddev, sectors); 4175 4068 md_update_sb(mddev, 1); 4176 - if (mddev_is_clustered(mddev)) 4177 - md_cluster_ops->metadata_update_finish(mddev); 4178 4069 } else { 4179 4070 if (mddev->dev_sectors == 0 || 4180 4071 mddev->dev_sectors > sectors) ··· 5282 5181 atomic_set(&mddev->max_corr_read_errors, 5283 5182 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 5284 5183 mddev->safemode = 0; 5285 - mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 5184 + if (mddev_is_clustered(mddev)) 5185 + mddev->safemode_delay = 0; 5186 + else 5187 + mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 5286 5188 mddev->in_sync = 1; 5287 5189 smp_wmb(); 5288 5190 spin_lock(&mddev->lock); ··· 5328 5224 goto out; 5329 5225 } 5330 5226 5227 + if (mddev_is_clustered(mddev)) 5228 + md_allow_write(mddev); 5229 + 5331 5230 md_wakeup_thread(mddev->thread); 5332 5231 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 5333 5232 ··· 5353 5246 return -EINVAL; 5354 5247 if (!mddev->ro) 5355 5248 return -EBUSY; 5249 + if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 5250 + struct md_rdev *rdev; 5251 + bool has_journal = false; 5252 + 5253 + rcu_read_lock(); 5254 + rdev_for_each_rcu(rdev, mddev) { 5255 + if (test_bit(Journal, &rdev->flags) && 5256 + !test_bit(Faulty, &rdev->flags)) { 5257 + has_journal = true; 5258 + break; 5259 + } 5260 + } 5261 + rcu_read_unlock(); 5262 + 5263 + /* Don't restart rw with journal missing/faulty */ 5264 + if (!has_journal) 5265 + return -EINVAL; 5266 + } 5267 + 5356 5268 mddev->safemode = 0; 5357 5269 mddev->ro = 0; 5358 5270 set_disk_ro(disk, 0); ··· 5433 5307 5434 5308 static void __md_stop_writes(struct mddev *mddev) 5435 5309 { 5436 - if (mddev_is_clustered(mddev)) 5437 - md_cluster_ops->metadata_update_start(mddev); 5438 5310 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5439 5311 flush_workqueue(md_misc_wq); 5440 5312 if (mddev->sync_thread) { ··· 5446 5322 md_super_wait(mddev); 5447 5323 5448 5324 if (mddev->ro == 0 && 5449 - (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) { 5325 + ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 5326 + (mddev->flags & MD_UPDATE_SB_FLAGS))) { 5450 5327 /* mark array as shutdown cleanly */ 5451 - mddev->in_sync = 1; 5328 + if (!mddev_is_clustered(mddev)) 5329 + mddev->in_sync = 1; 5452 5330 md_update_sb(mddev, 1); 5453 5331 } 5454 - if (mddev_is_clustered(mddev)) 5455 - md_cluster_ops->metadata_update_finish(mddev); 5456 5332 } 5457 5333 5458 5334 void md_stop_writes(struct mddev *mddev) ··· 5913 5789 info.state |= (1<<MD_DISK_ACTIVE); 5914 5790 info.state |= (1<<MD_DISK_SYNC); 5915 5791 } 5792 + if (test_bit(Journal, &rdev->flags)) 5793 + info.state |= (1<<MD_DISK_JOURNAL); 5916 5794 if (test_bit(WriteMostly, &rdev->flags)) 5917 5795 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5918 5796 } else { ··· 6029 5903 else 6030 5904 clear_bit(WriteMostly, &rdev->flags); 6031 5905 5906 + if (info->state & (1<<MD_DISK_JOURNAL)) 5907 + set_bit(Journal, &rdev->flags); 6032 5908 /* 6033 5909 * check whether the device shows up in other nodes 6034 5910 */ 6035 5911 if (mddev_is_clustered(mddev)) { 6036 - if (info->state & (1 << MD_DISK_CANDIDATE)) { 6037 - /* Through --cluster-confirm */ 5912 + if (info->state & (1 << MD_DISK_CANDIDATE)) 6038 5913 set_bit(Candidate, &rdev->flags); 6039 - err = md_cluster_ops->new_disk_ack(mddev, true); 6040 - if (err) { 6041 - export_rdev(rdev); 6042 - return err; 6043 - } 6044 - } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 5914 + else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 6045 5915 /* --add initiated by this node */ 6046 - err = md_cluster_ops->add_new_disk_start(mddev, rdev); 5916 + err = md_cluster_ops->add_new_disk(mddev, rdev); 6047 5917 if (err) { 6048 - md_cluster_ops->add_new_disk_finish(mddev); 6049 5918 export_rdev(rdev); 6050 5919 return err; 6051 5920 } ··· 6049 5928 6050 5929 rdev->raid_disk = -1; 6051 5930 err = bind_rdev_to_array(rdev, mddev); 5931 + 6052 5932 if (err) 6053 5933 export_rdev(rdev); 6054 - else 5934 + 5935 + if (mddev_is_clustered(mddev)) { 5936 + if (info->state & (1 << MD_DISK_CANDIDATE)) 5937 + md_cluster_ops->new_disk_ack(mddev, (err == 0)); 5938 + else { 5939 + if (err) 5940 + md_cluster_ops->add_new_disk_cancel(mddev); 5941 + else 5942 + err = add_bound_rdev(rdev); 5943 + } 5944 + 5945 + } else if (!err) 6055 5946 err = add_bound_rdev(rdev); 6056 - if (mddev_is_clustered(mddev) && 6057 - (info->state & (1 << MD_DISK_CLUSTER_ADD))) 6058 - md_cluster_ops->add_new_disk_finish(mddev); 5947 + 6059 5948 return err; 6060 5949 } 6061 5950 ··· 6121 5990 { 6122 5991 char b[BDEVNAME_SIZE]; 6123 5992 struct md_rdev *rdev; 5993 + int ret = -1; 6124 5994 6125 5995 rdev = find_rdev(mddev, dev); 6126 5996 if (!rdev) 6127 5997 return -ENXIO; 6128 5998 6129 5999 if (mddev_is_clustered(mddev)) 6130 - md_cluster_ops->metadata_update_start(mddev); 6000 + ret = md_cluster_ops->metadata_update_start(mddev); 6001 + 6002 + if (rdev->raid_disk < 0) 6003 + goto kick_rdev; 6131 6004 6132 6005 clear_bit(Blocked, &rdev->flags); 6133 6006 remove_and_add_spares(mddev, rdev); ··· 6139 6004 if (rdev->raid_disk >= 0) 6140 6005 goto busy; 6141 6006 6142 - if (mddev_is_clustered(mddev)) 6007 + kick_rdev: 6008 + if (mddev_is_clustered(mddev) && ret == 0) 6143 6009 md_cluster_ops->remove_disk(mddev, rdev); 6144 6010 6145 6011 md_kick_rdev_from_array(rdev); 6146 6012 md_update_sb(mddev, 1); 6147 6013 md_new_event(mddev); 6148 6014 6149 - if (mddev_is_clustered(mddev)) 6150 - md_cluster_ops->metadata_update_finish(mddev); 6151 - 6152 6015 return 0; 6153 6016 busy: 6154 - if (mddev_is_clustered(mddev)) 6017 + if (mddev_is_clustered(mddev) && ret == 0) 6155 6018 md_cluster_ops->metadata_update_cancel(mddev); 6019 + 6156 6020 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 6157 6021 bdevname(rdev->bdev,b), mdname(mddev)); 6158 6022 return -EBUSY; ··· 6202 6068 goto abort_export; 6203 6069 } 6204 6070 6205 - if (mddev_is_clustered(mddev)) 6206 - md_cluster_ops->metadata_update_start(mddev); 6207 6071 clear_bit(In_sync, &rdev->flags); 6208 6072 rdev->desc_nr = -1; 6209 6073 rdev->saved_raid_disk = -1; 6210 6074 err = bind_rdev_to_array(rdev, mddev); 6211 6075 if (err) 6212 - goto abort_clustered; 6076 + goto abort_export; 6213 6077 6214 6078 /* 6215 6079 * The rest should better be atomic, we can have disk failures ··· 6217 6085 rdev->raid_disk = -1; 6218 6086 6219 6087 md_update_sb(mddev, 1); 6220 - 6221 - if (mddev_is_clustered(mddev)) 6222 - md_cluster_ops->metadata_update_finish(mddev); 6223 6088 /* 6224 6089 * Kick recovery, maybe this spare has to be added to the 6225 6090 * array immediately. ··· 6226 6097 md_new_event(mddev); 6227 6098 return 0; 6228 6099 6229 - abort_clustered: 6230 - if (mddev_is_clustered(mddev)) 6231 - md_cluster_ops->metadata_update_cancel(mddev); 6232 6100 abort_export: 6233 6101 export_rdev(rdev); 6234 6102 return err; ··· 6543 6417 return rv; 6544 6418 } 6545 6419 } 6546 - if (mddev_is_clustered(mddev)) 6547 - md_cluster_ops->metadata_update_start(mddev); 6548 6420 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6549 6421 rv = update_size(mddev, (sector_t)info->size * 2); 6550 6422 ··· 6600 6476 } 6601 6477 } 6602 6478 md_update_sb(mddev, 1); 6603 - if (mddev_is_clustered(mddev)) 6604 - md_cluster_ops->metadata_update_finish(mddev); 6605 6479 return rv; 6606 6480 err: 6607 - if (mddev_is_clustered(mddev)) 6608 - md_cluster_ops->metadata_update_cancel(mddev); 6609 6481 return rv; 6610 6482 } 6611 6483 ··· 7402 7282 bdevname(rdev->bdev,b), rdev->desc_nr); 7403 7283 if (test_bit(WriteMostly, &rdev->flags)) 7404 7284 seq_printf(seq, "(W)"); 7285 + if (test_bit(Journal, &rdev->flags)) 7286 + seq_printf(seq, "(J)"); 7405 7287 if (test_bit(Faulty, &rdev->flags)) { 7406 7288 seq_printf(seq, "(F)"); 7407 7289 continue; ··· 7716 7594 mddev->safemode == 0) 7717 7595 mddev->safemode = 1; 7718 7596 spin_unlock(&mddev->lock); 7719 - if (mddev_is_clustered(mddev)) 7720 - md_cluster_ops->metadata_update_start(mddev); 7721 7597 md_update_sb(mddev, 0); 7722 - if (mddev_is_clustered(mddev)) 7723 - md_cluster_ops->metadata_update_finish(mddev); 7724 7598 sysfs_notify_dirent_safe(mddev->sysfs_state); 7725 7599 } else 7726 7600 spin_unlock(&mddev->lock); ··· 7748 7630 struct md_rdev *rdev; 7749 7631 char *desc, *action = NULL; 7750 7632 struct blk_plug plug; 7633 + bool cluster_resync_finished = false; 7751 7634 7752 7635 /* just incase thread restarts... */ 7753 7636 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) ··· 7858 7739 rcu_read_lock(); 7859 7740 rdev_for_each_rcu(rdev, mddev) 7860 7741 if (rdev->raid_disk >= 0 && 7742 + !test_bit(Journal, &rdev->flags) && 7861 7743 !test_bit(Faulty, &rdev->flags) && 7862 7744 !test_bit(In_sync, &rdev->flags) && 7863 7745 rdev->recovery_offset < j) ··· 7918 7798 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7919 7799 md_new_event(mddev); 7920 7800 update_time = jiffies; 7921 - 7922 - if (mddev_is_clustered(mddev)) 7923 - md_cluster_ops->resync_start(mddev, j, max_sectors); 7924 7801 7925 7802 blk_start_plug(&plug); 7926 7803 while (j < max_sectors) { ··· 7982 7865 j = max_sectors; 7983 7866 if (j > 2) 7984 7867 mddev->curr_resync = j; 7985 - if (mddev_is_clustered(mddev)) 7986 - md_cluster_ops->resync_info_update(mddev, j, max_sectors); 7987 7868 mddev->curr_mark_cnt = io_sectors; 7988 7869 if (last_check == 0) 7989 7870 /* this is the earliest that rebuild will be ··· 8052 7937 mddev->curr_resync_completed = mddev->curr_resync; 8053 7938 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 8054 7939 } 8055 - /* tell personality that we are finished */ 7940 + /* tell personality and other nodes that we are finished */ 7941 + if (mddev_is_clustered(mddev)) { 7942 + md_cluster_ops->resync_finish(mddev); 7943 + cluster_resync_finished = true; 7944 + } 8056 7945 mddev->pers->sync_request(mddev, max_sectors, &skipped); 8057 7946 8058 7947 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && ··· 8084 7965 rdev_for_each_rcu(rdev, mddev) 8085 7966 if (rdev->raid_disk >= 0 && 8086 7967 mddev->delta_disks >= 0 && 7968 + !test_bit(Journal, &rdev->flags) && 8087 7969 !test_bit(Faulty, &rdev->flags) && 8088 7970 !test_bit(In_sync, &rdev->flags) && 8089 7971 rdev->recovery_offset < mddev->curr_resync) ··· 8093 7973 } 8094 7974 } 8095 7975 skip: 8096 - if (mddev_is_clustered(mddev)) 8097 - md_cluster_ops->resync_finish(mddev); 8098 - 8099 7976 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7977 + 7978 + if (mddev_is_clustered(mddev) && 7979 + test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7980 + !cluster_resync_finished) 7981 + md_cluster_ops->resync_finish(mddev); 8100 7982 8101 7983 spin_lock(&mddev->lock); 8102 7984 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { ··· 8130 8008 rdev->raid_disk >= 0 && 8131 8009 !test_bit(Blocked, &rdev->flags) && 8132 8010 (test_bit(Faulty, &rdev->flags) || 8133 - ! test_bit(In_sync, &rdev->flags)) && 8011 + (!test_bit(In_sync, &rdev->flags) && 8012 + !test_bit(Journal, &rdev->flags))) && 8134 8013 atomic_read(&rdev->nr_pending)==0) { 8135 8014 if (mddev->pers->hot_remove_disk( 8136 8015 mddev, rdev) == 0) { ··· 8143 8020 if (removed && mddev->kobj.sd) 8144 8021 sysfs_notify(&mddev->kobj, NULL, "degraded"); 8145 8022 8146 - if (this) 8023 + if (this && removed) 8147 8024 goto no_add; 8148 8025 8149 8026 rdev_for_each(rdev, mddev) { 8027 + if (this && this != rdev) 8028 + continue; 8029 + if (test_bit(Candidate, &rdev->flags)) 8030 + continue; 8150 8031 if (rdev->raid_disk >= 0 && 8151 8032 !test_bit(In_sync, &rdev->flags) && 8033 + !test_bit(Journal, &rdev->flags) && 8152 8034 !test_bit(Faulty, &rdev->flags)) 8153 8035 spares++; 8154 8036 if (rdev->raid_disk >= 0) 8155 8037 continue; 8156 8038 if (test_bit(Faulty, &rdev->flags)) 8039 + continue; 8040 + if (test_bit(Journal, &rdev->flags)) 8157 8041 continue; 8158 8042 if (mddev->ro && 8159 8043 ! (rdev->saved_raid_disk >= 0 && ··· 8186 8056 static void md_start_sync(struct work_struct *ws) 8187 8057 { 8188 8058 struct mddev *mddev = container_of(ws, struct mddev, del_work); 8059 + int ret = 0; 8060 + 8061 + if (mddev_is_clustered(mddev)) { 8062 + ret = md_cluster_ops->resync_start(mddev); 8063 + if (ret) { 8064 + mddev->sync_thread = NULL; 8065 + goto out; 8066 + } 8067 + } 8189 8068 8190 8069 mddev->sync_thread = md_register_thread(md_do_sync, 8191 8070 mddev, 8192 8071 "resync"); 8072 + out: 8193 8073 if (!mddev->sync_thread) { 8194 - printk(KERN_ERR "%s: could not start resync" 8195 - " thread...\n", 8196 - mdname(mddev)); 8074 + if (!(mddev_is_clustered(mddev) && ret == -EAGAIN)) 8075 + printk(KERN_ERR "%s: could not start resync" 8076 + " thread...\n", 8077 + mdname(mddev)); 8197 8078 /* leave the spares where they are, it shouldn't hurt */ 8198 8079 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8199 8080 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); ··· 8323 8182 sysfs_notify_dirent_safe(mddev->sysfs_state); 8324 8183 } 8325 8184 8326 - if (mddev->flags & MD_UPDATE_SB_FLAGS) { 8327 - if (mddev_is_clustered(mddev)) 8328 - md_cluster_ops->metadata_update_start(mddev); 8185 + if (mddev->flags & MD_UPDATE_SB_FLAGS) 8329 8186 md_update_sb(mddev, 0); 8330 - if (mddev_is_clustered(mddev)) 8331 - md_cluster_ops->metadata_update_finish(mddev); 8332 - } 8333 8187 8334 8188 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 8335 8189 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { ··· 8422 8286 set_bit(MD_CHANGE_DEVS, &mddev->flags); 8423 8287 } 8424 8288 } 8425 - if (mddev_is_clustered(mddev)) 8426 - md_cluster_ops->metadata_update_start(mddev); 8427 8289 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8428 8290 mddev->pers->finish_reshape) 8429 8291 mddev->pers->finish_reshape(mddev); ··· 8434 8300 rdev->saved_raid_disk = -1; 8435 8301 8436 8302 md_update_sb(mddev, 1); 8437 - if (mddev_is_clustered(mddev)) 8438 - md_cluster_ops->metadata_update_finish(mddev); 8439 8303 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8440 8304 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 8441 8305 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); ··· 9056 8924 return ret; 9057 8925 } 9058 8926 9059 - void md_reload_sb(struct mddev *mddev) 8927 + static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) 9060 8928 { 9061 - struct md_rdev *rdev, *tmp; 8929 + struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 8930 + struct md_rdev *rdev2; 8931 + int role, ret; 8932 + char b[BDEVNAME_SIZE]; 9062 8933 9063 - rdev_for_each_safe(rdev, tmp, mddev) { 9064 - rdev->sb_loaded = 0; 9065 - ClearPageUptodate(rdev->sb_page); 9066 - } 9067 - mddev->raid_disks = 0; 9068 - analyze_sbs(mddev); 9069 - rdev_for_each_safe(rdev, tmp, mddev) { 9070 - struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 9071 - /* since we don't write to faulty devices, we figure out if the 9072 - * disk is faulty by comparing events 9073 - */ 9074 - if (mddev->events > sb->events) 9075 - set_bit(Faulty, &rdev->flags); 8934 + /* Check for change of roles in the active devices */ 8935 + rdev_for_each(rdev2, mddev) { 8936 + if (test_bit(Faulty, &rdev2->flags)) 8937 + continue; 8938 + 8939 + /* Check if the roles changed */ 8940 + role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); 8941 + 8942 + if (test_bit(Candidate, &rdev2->flags)) { 8943 + if (role == 0xfffe) { 8944 + pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b)); 8945 + md_kick_rdev_from_array(rdev2); 8946 + continue; 8947 + } 8948 + else 8949 + clear_bit(Candidate, &rdev2->flags); 8950 + } 8951 + 8952 + if (role != rdev2->raid_disk) { 8953 + /* got activated */ 8954 + if (rdev2->raid_disk == -1 && role != 0xffff) { 8955 + rdev2->saved_raid_disk = role; 8956 + ret = remove_and_add_spares(mddev, rdev2); 8957 + pr_info("Activated spare: %s\n", 8958 + bdevname(rdev2->bdev,b)); 8959 + continue; 8960 + } 8961 + /* device faulty 8962 + * We just want to do the minimum to mark the disk 8963 + * as faulty. The recovery is performed by the 8964 + * one who initiated the error. 8965 + */ 8966 + if ((role == 0xfffe) || (role == 0xfffd)) { 8967 + md_error(mddev, rdev2); 8968 + clear_bit(Blocked, &rdev2->flags); 8969 + } 8970 + } 9076 8971 } 9077 8972 8973 + if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) 8974 + update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); 8975 + 8976 + /* Finally set the event to be up to date */ 8977 + mddev->events = le64_to_cpu(sb->events); 8978 + } 8979 + 8980 + static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) 8981 + { 8982 + int err; 8983 + struct page *swapout = rdev->sb_page; 8984 + struct mdp_superblock_1 *sb; 8985 + 8986 + /* Store the sb page of the rdev in the swapout temporary 8987 + * variable in case we err in the future 8988 + */ 8989 + rdev->sb_page = NULL; 8990 + alloc_disk_sb(rdev); 8991 + ClearPageUptodate(rdev->sb_page); 8992 + rdev->sb_loaded = 0; 8993 + err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version); 8994 + 8995 + if (err < 0) { 8996 + pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", 8997 + __func__, __LINE__, rdev->desc_nr, err); 8998 + put_page(rdev->sb_page); 8999 + rdev->sb_page = swapout; 9000 + rdev->sb_loaded = 1; 9001 + return err; 9002 + } 9003 + 9004 + sb = page_address(rdev->sb_page); 9005 + /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET 9006 + * is not set 9007 + */ 9008 + 9009 + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) 9010 + rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 9011 + 9012 + /* The other node finished recovery, call spare_active to set 9013 + * device In_sync and mddev->degraded 9014 + */ 9015 + if (rdev->recovery_offset == MaxSector && 9016 + !test_bit(In_sync, &rdev->flags) && 9017 + mddev->pers->spare_active(mddev)) 9018 + sysfs_notify(&mddev->kobj, NULL, "degraded"); 9019 + 9020 + put_page(swapout); 9021 + return 0; 9022 + } 9023 + 9024 + void md_reload_sb(struct mddev *mddev, int nr) 9025 + { 9026 + struct md_rdev *rdev; 9027 + int err; 9028 + 9029 + /* Find the rdev */ 9030 + rdev_for_each_rcu(rdev, mddev) { 9031 + if (rdev->desc_nr == nr) 9032 + break; 9033 + } 9034 + 9035 + if (!rdev || rdev->desc_nr != nr) { 9036 + pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); 9037 + return; 9038 + } 9039 + 9040 + err = read_rdev(mddev, rdev); 9041 + if (err < 0) 9042 + return; 9043 + 9044 + check_sb_changes(mddev, rdev); 9045 + 9046 + /* Read all rdev's to update recovery_offset */ 9047 + rdev_for_each_rcu(rdev, mddev) 9048 + read_rdev(mddev, rdev); 9078 9049 } 9079 9050 EXPORT_SYMBOL(md_reload_sb); 9080 9051
+15 -2
drivers/md/md.h
··· 87 87 * array and could again if we did a partial 88 88 * resync from the bitmap 89 89 */ 90 - sector_t recovery_offset;/* If this device has been partially 90 + union { 91 + sector_t recovery_offset;/* If this device has been partially 91 92 * recovered, this is where we were 92 93 * up to. 93 94 */ 95 + sector_t journal_tail; /* If this device is a journal device, 96 + * this is the journal tail (journal 97 + * recovery start point) 98 + */ 99 + }; 94 100 95 101 atomic_t nr_pending; /* number of pending requests. 96 102 * only maintained for arrays that ··· 178 172 * This device is seen locally but not 179 173 * by the whole cluster 180 174 */ 175 + Journal, /* This device is used as journal for 176 + * raid-5/6. 177 + * Usually, this device should be faster 178 + * than other devices in the array 179 + */ 181 180 }; 182 181 183 182 #define BB_LEN_MASK (0x00000000000001FFULL) ··· 232 221 #define MD_STILL_CLOSED 4 /* If set, then array has not been opened since 233 222 * md_ioctl checked on it. 234 223 */ 224 + #define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */ 225 + #define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */ 235 226 236 227 int suspended; 237 228 atomic_t active_io; ··· 671 658 struct mddev *mddev); 672 659 673 660 extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); 674 - extern void md_reload_sb(struct mddev *mddev); 661 + extern void md_reload_sb(struct mddev *mddev, int raid_disk); 675 662 extern void md_update_sb(struct mddev *mddev, int force); 676 663 extern void md_kick_rdev_from_array(struct md_rdev * rdev); 677 664 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
+37 -4
drivers/md/raid1.c
··· 90 90 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 91 91 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) 92 92 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) 93 + #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) 94 + #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) 93 95 #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS) 94 96 95 97 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) ··· 1592 1590 if (rdev->raid_disk >= 0) 1593 1591 first = last = rdev->raid_disk; 1594 1592 1593 + /* 1594 + * find the disk ... but prefer rdev->saved_raid_disk 1595 + * if possible. 1596 + */ 1597 + if (rdev->saved_raid_disk >= 0 && 1598 + rdev->saved_raid_disk >= first && 1599 + conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1600 + first = last = rdev->saved_raid_disk; 1601 + 1595 1602 for (mirror = first; mirror <= last; mirror++) { 1596 1603 p = conf->mirrors+mirror; 1597 1604 if (!p->rdev) { ··· 2506 2495 2507 2496 bitmap_close_sync(mddev->bitmap); 2508 2497 close_sync(conf); 2498 + 2499 + if (mddev_is_clustered(mddev)) { 2500 + conf->cluster_sync_low = 0; 2501 + conf->cluster_sync_high = 0; 2502 + } 2509 2503 return 0; 2510 2504 } 2511 2505 ··· 2531 2515 return sync_blocks; 2532 2516 } 2533 2517 2534 - bitmap_cond_end_sync(mddev->bitmap, sector_nr); 2518 + /* we are incrementing sector_nr below. To be safe, we check against 2519 + * sector_nr + two times RESYNC_SECTORS 2520 + */ 2521 + 2522 + bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2523 + mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 2535 2524 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 2536 2525 2537 2526 raise_barrier(conf, sector_nr); ··· 2726 2705 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); 2727 2706 bio_full: 2728 2707 r1_bio->sectors = nr_sectors; 2708 + 2709 + if (mddev_is_clustered(mddev) && 2710 + conf->cluster_sync_high < sector_nr + nr_sectors) { 2711 + conf->cluster_sync_low = mddev->curr_resync_completed; 2712 + conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; 2713 + /* Send resync message */ 2714 + md_cluster_ops->resync_info_update(mddev, 2715 + conf->cluster_sync_low, 2716 + conf->cluster_sync_high); 2717 + } 2729 2718 2730 2719 /* For a user-requested sync, we read all readable devices and do a 2731 2720 * compare ··· 3051 3020 return -EINVAL; 3052 3021 } 3053 3022 3054 - err = md_allow_write(mddev); 3055 - if (err) 3056 - return err; 3023 + if (!mddev_is_clustered(mddev)) { 3024 + err = md_allow_write(mddev); 3025 + if (err) 3026 + return err; 3027 + } 3057 3028 3058 3029 raid_disks = mddev->raid_disks + mddev->delta_disks; 3059 3030
+7
drivers/md/raid1.h
··· 111 111 * the new thread here until we fully activate the array. 112 112 */ 113 113 struct md_thread *thread; 114 + 115 + /* Keep track of cluster resync window to send to other 116 + * nodes. 117 + */ 118 + sector_t cluster_sync_low; 119 + sector_t cluster_sync_high; 120 + 114 121 }; 115 122 116 123 /*
+1 -1
drivers/md/raid10.c
··· 3149 3149 /* resync. Schedule a read for every block at this virt offset */ 3150 3150 int count = 0; 3151 3151 3152 - bitmap_cond_end_sync(mddev->bitmap, sector_nr); 3152 + bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0); 3153 3153 3154 3154 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 3155 3155 &sync_blocks, mddev->degraded) &&
+1191
drivers/md/raid5-cache.c
··· 1 + /* 2 + * Copyright (C) 2015 Shaohua Li <shli@fb.com> 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + */ 14 + #include <linux/kernel.h> 15 + #include <linux/wait.h> 16 + #include <linux/blkdev.h> 17 + #include <linux/slab.h> 18 + #include <linux/raid/md_p.h> 19 + #include <linux/crc32c.h> 20 + #include <linux/random.h> 21 + #include "md.h" 22 + #include "raid5.h" 23 + 24 + /* 25 + * metadata/data stored in disk with 4k size unit (a block) regardless 26 + * underneath hardware sector size. only works with PAGE_SIZE == 4096 27 + */ 28 + #define BLOCK_SECTORS (8) 29 + 30 + /* 31 + * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent 32 + * recovery scans a very long log 33 + */ 34 + #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ 35 + #define RECLAIM_MAX_FREE_SPACE_SHIFT (2) 36 + 37 + struct r5l_log { 38 + struct md_rdev *rdev; 39 + 40 + u32 uuid_checksum; 41 + 42 + sector_t device_size; /* log device size, round to 43 + * BLOCK_SECTORS */ 44 + sector_t max_free_space; /* reclaim run if free space is at 45 + * this size */ 46 + 47 + sector_t last_checkpoint; /* log tail. where recovery scan 48 + * starts from */ 49 + u64 last_cp_seq; /* log tail sequence */ 50 + 51 + sector_t log_start; /* log head. where new data appends */ 52 + u64 seq; /* log head sequence */ 53 + 54 + sector_t next_checkpoint; 55 + u64 next_cp_seq; 56 + 57 + struct mutex io_mutex; 58 + struct r5l_io_unit *current_io; /* current io_unit accepting new data */ 59 + 60 + spinlock_t io_list_lock; 61 + struct list_head running_ios; /* io_units which are still running, 62 + * and have not yet been completely 63 + * written to the log */ 64 + struct list_head io_end_ios; /* io_units which have been completely 65 + * written to the log but not yet written 66 + * to the RAID */ 67 + struct list_head flushing_ios; /* io_units which are waiting for log 68 + * cache flush */ 69 + struct list_head finished_ios; /* io_units which settle down in log disk */ 70 + struct bio flush_bio; 71 + 72 + struct kmem_cache *io_kc; 73 + 74 + struct md_thread *reclaim_thread; 75 + unsigned long reclaim_target; /* number of space that need to be 76 + * reclaimed. if it's 0, reclaim spaces 77 + * used by io_units which are in 78 + * IO_UNIT_STRIPE_END state (eg, reclaim 79 + * dones't wait for specific io_unit 80 + * switching to IO_UNIT_STRIPE_END 81 + * state) */ 82 + wait_queue_head_t iounit_wait; 83 + 84 + struct list_head no_space_stripes; /* pending stripes, log has no space */ 85 + spinlock_t no_space_stripes_lock; 86 + 87 + bool need_cache_flush; 88 + bool in_teardown; 89 + }; 90 + 91 + /* 92 + * an IO range starts from a meta data block and end at the next meta data 93 + * block. The io unit's the meta data block tracks data/parity followed it. io 94 + * unit is written to log disk with normal write, as we always flush log disk 95 + * first and then start move data to raid disks, there is no requirement to 96 + * write io unit with FLUSH/FUA 97 + */ 98 + struct r5l_io_unit { 99 + struct r5l_log *log; 100 + 101 + struct page *meta_page; /* store meta block */ 102 + int meta_offset; /* current offset in meta_page */ 103 + 104 + struct bio *current_bio;/* current_bio accepting new data */ 105 + 106 + atomic_t pending_stripe;/* how many stripes not flushed to raid */ 107 + u64 seq; /* seq number of the metablock */ 108 + sector_t log_start; /* where the io_unit starts */ 109 + sector_t log_end; /* where the io_unit ends */ 110 + struct list_head log_sibling; /* log->running_ios */ 111 + struct list_head stripe_list; /* stripes added to the io_unit */ 112 + 113 + int state; 114 + bool need_split_bio; 115 + }; 116 + 117 + /* r5l_io_unit state */ 118 + enum r5l_io_unit_state { 119 + IO_UNIT_RUNNING = 0, /* accepting new IO */ 120 + IO_UNIT_IO_START = 1, /* io_unit bio start writing to log, 121 + * don't accepting new bio */ 122 + IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */ 123 + IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */ 124 + }; 125 + 126 + static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) 127 + { 128 + start += inc; 129 + if (start >= log->device_size) 130 + start = start - log->device_size; 131 + return start; 132 + } 133 + 134 + static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, 135 + sector_t end) 136 + { 137 + if (end >= start) 138 + return end - start; 139 + else 140 + return end + log->device_size - start; 141 + } 142 + 143 + static bool r5l_has_free_space(struct r5l_log *log, sector_t size) 144 + { 145 + sector_t used_size; 146 + 147 + used_size = r5l_ring_distance(log, log->last_checkpoint, 148 + log->log_start); 149 + 150 + return log->device_size > used_size + size; 151 + } 152 + 153 + static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io) 154 + { 155 + __free_page(io->meta_page); 156 + kmem_cache_free(log->io_kc, io); 157 + } 158 + 159 + static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to, 160 + enum r5l_io_unit_state state) 161 + { 162 + struct r5l_io_unit *io; 163 + 164 + while (!list_empty(from)) { 165 + io = list_first_entry(from, struct r5l_io_unit, log_sibling); 166 + /* don't change list order */ 167 + if (io->state >= state) 168 + list_move_tail(&io->log_sibling, to); 169 + else 170 + break; 171 + } 172 + } 173 + 174 + static void __r5l_set_io_unit_state(struct r5l_io_unit *io, 175 + enum r5l_io_unit_state state) 176 + { 177 + if (WARN_ON(io->state >= state)) 178 + return; 179 + io->state = state; 180 + } 181 + 182 + static void r5l_io_run_stripes(struct r5l_io_unit *io) 183 + { 184 + struct stripe_head *sh, *next; 185 + 186 + list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { 187 + list_del_init(&sh->log_list); 188 + set_bit(STRIPE_HANDLE, &sh->state); 189 + raid5_release_stripe(sh); 190 + } 191 + } 192 + 193 + static void r5l_log_run_stripes(struct r5l_log *log) 194 + { 195 + struct r5l_io_unit *io, *next; 196 + 197 + assert_spin_locked(&log->io_list_lock); 198 + 199 + list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { 200 + /* don't change list order */ 201 + if (io->state < IO_UNIT_IO_END) 202 + break; 203 + 204 + list_move_tail(&io->log_sibling, &log->finished_ios); 205 + r5l_io_run_stripes(io); 206 + } 207 + } 208 + 209 + static void r5l_log_endio(struct bio *bio) 210 + { 211 + struct r5l_io_unit *io = bio->bi_private; 212 + struct r5l_log *log = io->log; 213 + unsigned long flags; 214 + 215 + if (bio->bi_error) 216 + md_error(log->rdev->mddev, log->rdev); 217 + 218 + bio_put(bio); 219 + 220 + spin_lock_irqsave(&log->io_list_lock, flags); 221 + __r5l_set_io_unit_state(io, IO_UNIT_IO_END); 222 + if (log->need_cache_flush) 223 + r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, 224 + IO_UNIT_IO_END); 225 + else 226 + r5l_log_run_stripes(log); 227 + spin_unlock_irqrestore(&log->io_list_lock, flags); 228 + 229 + if (log->need_cache_flush) 230 + md_wakeup_thread(log->rdev->mddev->thread); 231 + } 232 + 233 + static void r5l_submit_current_io(struct r5l_log *log) 234 + { 235 + struct r5l_io_unit *io = log->current_io; 236 + struct r5l_meta_block *block; 237 + unsigned long flags; 238 + u32 crc; 239 + 240 + if (!io) 241 + return; 242 + 243 + block = page_address(io->meta_page); 244 + block->meta_size = cpu_to_le32(io->meta_offset); 245 + crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); 246 + block->checksum = cpu_to_le32(crc); 247 + 248 + log->current_io = NULL; 249 + spin_lock_irqsave(&log->io_list_lock, flags); 250 + __r5l_set_io_unit_state(io, IO_UNIT_IO_START); 251 + spin_unlock_irqrestore(&log->io_list_lock, flags); 252 + 253 + submit_bio(WRITE, io->current_bio); 254 + } 255 + 256 + static struct bio *r5l_bio_alloc(struct r5l_log *log) 257 + { 258 + struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); 259 + 260 + bio->bi_rw = WRITE; 261 + bio->bi_bdev = log->rdev->bdev; 262 + bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; 263 + 264 + return bio; 265 + } 266 + 267 + static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) 268 + { 269 + log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); 270 + 271 + /* 272 + * If we filled up the log device start from the beginning again, 273 + * which will require a new bio. 274 + * 275 + * Note: for this to work properly the log size needs to me a multiple 276 + * of BLOCK_SECTORS. 277 + */ 278 + if (log->log_start == 0) 279 + io->need_split_bio = true; 280 + 281 + io->log_end = log->log_start; 282 + } 283 + 284 + static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) 285 + { 286 + struct r5l_io_unit *io; 287 + struct r5l_meta_block *block; 288 + 289 + /* We can't handle memory allocate failure so far */ 290 + io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL); 291 + io->log = log; 292 + INIT_LIST_HEAD(&io->log_sibling); 293 + INIT_LIST_HEAD(&io->stripe_list); 294 + io->state = IO_UNIT_RUNNING; 295 + 296 + io->meta_page = alloc_page(GFP_NOIO | __GFP_NOFAIL | __GFP_ZERO); 297 + block = page_address(io->meta_page); 298 + block->magic = cpu_to_le32(R5LOG_MAGIC); 299 + block->version = R5LOG_VERSION; 300 + block->seq = cpu_to_le64(log->seq); 301 + block->position = cpu_to_le64(log->log_start); 302 + 303 + io->log_start = log->log_start; 304 + io->meta_offset = sizeof(struct r5l_meta_block); 305 + io->seq = log->seq++; 306 + 307 + io->current_bio = r5l_bio_alloc(log); 308 + io->current_bio->bi_end_io = r5l_log_endio; 309 + io->current_bio->bi_private = io; 310 + bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); 311 + 312 + r5_reserve_log_entry(log, io); 313 + 314 + spin_lock_irq(&log->io_list_lock); 315 + list_add_tail(&io->log_sibling, &log->running_ios); 316 + spin_unlock_irq(&log->io_list_lock); 317 + 318 + return io; 319 + } 320 + 321 + static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) 322 + { 323 + if (log->current_io && 324 + log->current_io->meta_offset + payload_size > PAGE_SIZE) 325 + r5l_submit_current_io(log); 326 + 327 + if (!log->current_io) 328 + log->current_io = r5l_new_meta(log); 329 + return 0; 330 + } 331 + 332 + static void r5l_append_payload_meta(struct r5l_log *log, u16 type, 333 + sector_t location, 334 + u32 checksum1, u32 checksum2, 335 + bool checksum2_valid) 336 + { 337 + struct r5l_io_unit *io = log->current_io; 338 + struct r5l_payload_data_parity *payload; 339 + 340 + payload = page_address(io->meta_page) + io->meta_offset; 341 + payload->header.type = cpu_to_le16(type); 342 + payload->header.flags = cpu_to_le16(0); 343 + payload->size = cpu_to_le32((1 + !!checksum2_valid) << 344 + (PAGE_SHIFT - 9)); 345 + payload->location = cpu_to_le64(location); 346 + payload->checksum[0] = cpu_to_le32(checksum1); 347 + if (checksum2_valid) 348 + payload->checksum[1] = cpu_to_le32(checksum2); 349 + 350 + io->meta_offset += sizeof(struct r5l_payload_data_parity) + 351 + sizeof(__le32) * (1 + !!checksum2_valid); 352 + } 353 + 354 + static void r5l_append_payload_page(struct r5l_log *log, struct page *page) 355 + { 356 + struct r5l_io_unit *io = log->current_io; 357 + 358 + if (io->need_split_bio) { 359 + struct bio *prev = io->current_bio; 360 + 361 + io->current_bio = r5l_bio_alloc(log); 362 + bio_chain(io->current_bio, prev); 363 + 364 + submit_bio(WRITE, prev); 365 + } 366 + 367 + if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) 368 + BUG(); 369 + 370 + r5_reserve_log_entry(log, io); 371 + } 372 + 373 + static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, 374 + int data_pages, int parity_pages) 375 + { 376 + int i; 377 + int meta_size; 378 + struct r5l_io_unit *io; 379 + 380 + meta_size = 381 + ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) 382 + * data_pages) + 383 + sizeof(struct r5l_payload_data_parity) + 384 + sizeof(__le32) * parity_pages; 385 + 386 + r5l_get_meta(log, meta_size); 387 + io = log->current_io; 388 + 389 + for (i = 0; i < sh->disks; i++) { 390 + if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) 391 + continue; 392 + if (i == sh->pd_idx || i == sh->qd_idx) 393 + continue; 394 + r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, 395 + raid5_compute_blocknr(sh, i, 0), 396 + sh->dev[i].log_checksum, 0, false); 397 + r5l_append_payload_page(log, sh->dev[i].page); 398 + } 399 + 400 + if (sh->qd_idx >= 0) { 401 + r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, 402 + sh->sector, sh->dev[sh->pd_idx].log_checksum, 403 + sh->dev[sh->qd_idx].log_checksum, true); 404 + r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); 405 + r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); 406 + } else { 407 + r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, 408 + sh->sector, sh->dev[sh->pd_idx].log_checksum, 409 + 0, false); 410 + r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); 411 + } 412 + 413 + list_add_tail(&sh->log_list, &io->stripe_list); 414 + atomic_inc(&io->pending_stripe); 415 + sh->log_io = io; 416 + } 417 + 418 + static void r5l_wake_reclaim(struct r5l_log *log, sector_t space); 419 + /* 420 + * running in raid5d, where reclaim could wait for raid5d too (when it flushes 421 + * data from log to raid disks), so we shouldn't wait for reclaim here 422 + */ 423 + int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) 424 + { 425 + int write_disks = 0; 426 + int data_pages, parity_pages; 427 + int meta_size; 428 + int reserve; 429 + int i; 430 + 431 + if (!log) 432 + return -EAGAIN; 433 + /* Don't support stripe batch */ 434 + if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || 435 + test_bit(STRIPE_SYNCING, &sh->state)) { 436 + /* the stripe is written to log, we start writing it to raid */ 437 + clear_bit(STRIPE_LOG_TRAPPED, &sh->state); 438 + return -EAGAIN; 439 + } 440 + 441 + for (i = 0; i < sh->disks; i++) { 442 + void *addr; 443 + 444 + if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) 445 + continue; 446 + write_disks++; 447 + /* checksum is already calculated in last run */ 448 + if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) 449 + continue; 450 + addr = kmap_atomic(sh->dev[i].page); 451 + sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, 452 + addr, PAGE_SIZE); 453 + kunmap_atomic(addr); 454 + } 455 + parity_pages = 1 + !!(sh->qd_idx >= 0); 456 + data_pages = write_disks - parity_pages; 457 + 458 + meta_size = 459 + ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) 460 + * data_pages) + 461 + sizeof(struct r5l_payload_data_parity) + 462 + sizeof(__le32) * parity_pages; 463 + /* Doesn't work with very big raid array */ 464 + if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE) 465 + return -EINVAL; 466 + 467 + set_bit(STRIPE_LOG_TRAPPED, &sh->state); 468 + /* 469 + * The stripe must enter state machine again to finish the write, so 470 + * don't delay. 471 + */ 472 + clear_bit(STRIPE_DELAYED, &sh->state); 473 + atomic_inc(&sh->count); 474 + 475 + mutex_lock(&log->io_mutex); 476 + /* meta + data */ 477 + reserve = (1 + write_disks) << (PAGE_SHIFT - 9); 478 + if (r5l_has_free_space(log, reserve)) 479 + r5l_log_stripe(log, sh, data_pages, parity_pages); 480 + else { 481 + spin_lock(&log->no_space_stripes_lock); 482 + list_add_tail(&sh->log_list, &log->no_space_stripes); 483 + spin_unlock(&log->no_space_stripes_lock); 484 + 485 + r5l_wake_reclaim(log, reserve); 486 + } 487 + mutex_unlock(&log->io_mutex); 488 + 489 + return 0; 490 + } 491 + 492 + void r5l_write_stripe_run(struct r5l_log *log) 493 + { 494 + if (!log) 495 + return; 496 + mutex_lock(&log->io_mutex); 497 + r5l_submit_current_io(log); 498 + mutex_unlock(&log->io_mutex); 499 + } 500 + 501 + int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) 502 + { 503 + if (!log) 504 + return -ENODEV; 505 + /* 506 + * we flush log disk cache first, then write stripe data to raid disks. 507 + * So if bio is finished, the log disk cache is flushed already. The 508 + * recovery guarantees we can recovery the bio from log disk, so we 509 + * don't need to flush again 510 + */ 511 + if (bio->bi_iter.bi_size == 0) { 512 + bio_endio(bio); 513 + return 0; 514 + } 515 + bio->bi_rw &= ~REQ_FLUSH; 516 + return -EAGAIN; 517 + } 518 + 519 + /* This will run after log space is reclaimed */ 520 + static void r5l_run_no_space_stripes(struct r5l_log *log) 521 + { 522 + struct stripe_head *sh; 523 + 524 + spin_lock(&log->no_space_stripes_lock); 525 + while (!list_empty(&log->no_space_stripes)) { 526 + sh = list_first_entry(&log->no_space_stripes, 527 + struct stripe_head, log_list); 528 + list_del_init(&sh->log_list); 529 + set_bit(STRIPE_HANDLE, &sh->state); 530 + raid5_release_stripe(sh); 531 + } 532 + spin_unlock(&log->no_space_stripes_lock); 533 + } 534 + 535 + static sector_t r5l_reclaimable_space(struct r5l_log *log) 536 + { 537 + return r5l_ring_distance(log, log->last_checkpoint, 538 + log->next_checkpoint); 539 + } 540 + 541 + static bool r5l_complete_finished_ios(struct r5l_log *log) 542 + { 543 + struct r5l_io_unit *io, *next; 544 + bool found = false; 545 + 546 + assert_spin_locked(&log->io_list_lock); 547 + 548 + list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { 549 + /* don't change list order */ 550 + if (io->state < IO_UNIT_STRIPE_END) 551 + break; 552 + 553 + log->next_checkpoint = io->log_start; 554 + log->next_cp_seq = io->seq; 555 + 556 + list_del(&io->log_sibling); 557 + r5l_free_io_unit(log, io); 558 + 559 + found = true; 560 + } 561 + 562 + return found; 563 + } 564 + 565 + static void __r5l_stripe_write_finished(struct r5l_io_unit *io) 566 + { 567 + struct r5l_log *log = io->log; 568 + unsigned long flags; 569 + 570 + spin_lock_irqsave(&log->io_list_lock, flags); 571 + __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); 572 + 573 + if (!r5l_complete_finished_ios(log)) { 574 + spin_unlock_irqrestore(&log->io_list_lock, flags); 575 + return; 576 + } 577 + 578 + if (r5l_reclaimable_space(log) > log->max_free_space) 579 + r5l_wake_reclaim(log, 0); 580 + 581 + spin_unlock_irqrestore(&log->io_list_lock, flags); 582 + wake_up(&log->iounit_wait); 583 + } 584 + 585 + void r5l_stripe_write_finished(struct stripe_head *sh) 586 + { 587 + struct r5l_io_unit *io; 588 + 589 + io = sh->log_io; 590 + sh->log_io = NULL; 591 + 592 + if (io && atomic_dec_and_test(&io->pending_stripe)) 593 + __r5l_stripe_write_finished(io); 594 + } 595 + 596 + static void r5l_log_flush_endio(struct bio *bio) 597 + { 598 + struct r5l_log *log = container_of(bio, struct r5l_log, 599 + flush_bio); 600 + unsigned long flags; 601 + struct r5l_io_unit *io; 602 + 603 + if (bio->bi_error) 604 + md_error(log->rdev->mddev, log->rdev); 605 + 606 + spin_lock_irqsave(&log->io_list_lock, flags); 607 + list_for_each_entry(io, &log->flushing_ios, log_sibling) 608 + r5l_io_run_stripes(io); 609 + list_splice_tail_init(&log->flushing_ios, &log->finished_ios); 610 + spin_unlock_irqrestore(&log->io_list_lock, flags); 611 + } 612 + 613 + /* 614 + * Starting dispatch IO to raid. 615 + * io_unit(meta) consists of a log. There is one situation we want to avoid. A 616 + * broken meta in the middle of a log causes recovery can't find meta at the 617 + * head of log. If operations require meta at the head persistent in log, we 618 + * must make sure meta before it persistent in log too. A case is: 619 + * 620 + * stripe data/parity is in log, we start write stripe to raid disks. stripe 621 + * data/parity must be persistent in log before we do the write to raid disks. 622 + * 623 + * The solution is we restrictly maintain io_unit list order. In this case, we 624 + * only write stripes of an io_unit to raid disks till the io_unit is the first 625 + * one whose data/parity is in log. 626 + */ 627 + void r5l_flush_stripe_to_raid(struct r5l_log *log) 628 + { 629 + bool do_flush; 630 + 631 + if (!log || !log->need_cache_flush) 632 + return; 633 + 634 + spin_lock_irq(&log->io_list_lock); 635 + /* flush bio is running */ 636 + if (!list_empty(&log->flushing_ios)) { 637 + spin_unlock_irq(&log->io_list_lock); 638 + return; 639 + } 640 + list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); 641 + do_flush = !list_empty(&log->flushing_ios); 642 + spin_unlock_irq(&log->io_list_lock); 643 + 644 + if (!do_flush) 645 + return; 646 + bio_reset(&log->flush_bio); 647 + log->flush_bio.bi_bdev = log->rdev->bdev; 648 + log->flush_bio.bi_end_io = r5l_log_flush_endio; 649 + submit_bio(WRITE_FLUSH, &log->flush_bio); 650 + } 651 + 652 + static void r5l_write_super(struct r5l_log *log, sector_t cp); 653 + static void r5l_write_super_and_discard_space(struct r5l_log *log, 654 + sector_t end) 655 + { 656 + struct block_device *bdev = log->rdev->bdev; 657 + struct mddev *mddev; 658 + 659 + r5l_write_super(log, end); 660 + 661 + if (!blk_queue_discard(bdev_get_queue(bdev))) 662 + return; 663 + 664 + mddev = log->rdev->mddev; 665 + /* 666 + * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and 667 + * wait for this thread to finish. This thread waits for 668 + * MD_CHANGE_PENDING clear, which is supposed to be done in 669 + * md_check_recovery(). md_check_recovery() tries to get 670 + * reconfig_mutex. Since r5l_quiesce already holds the mutex, 671 + * md_check_recovery() fails, so the PENDING never get cleared. The 672 + * in_teardown check workaround this issue. 673 + */ 674 + if (!log->in_teardown) { 675 + set_bit(MD_CHANGE_DEVS, &mddev->flags); 676 + set_bit(MD_CHANGE_PENDING, &mddev->flags); 677 + md_wakeup_thread(mddev->thread); 678 + wait_event(mddev->sb_wait, 679 + !test_bit(MD_CHANGE_PENDING, &mddev->flags) || 680 + log->in_teardown); 681 + /* 682 + * r5l_quiesce could run after in_teardown check and hold 683 + * mutex first. Superblock might get updated twice. 684 + */ 685 + if (log->in_teardown) 686 + md_update_sb(mddev, 1); 687 + } else { 688 + WARN_ON(!mddev_is_locked(mddev)); 689 + md_update_sb(mddev, 1); 690 + } 691 + 692 + /* discard IO error really doesn't matter, ignore it */ 693 + if (log->last_checkpoint < end) { 694 + blkdev_issue_discard(bdev, 695 + log->last_checkpoint + log->rdev->data_offset, 696 + end - log->last_checkpoint, GFP_NOIO, 0); 697 + } else { 698 + blkdev_issue_discard(bdev, 699 + log->last_checkpoint + log->rdev->data_offset, 700 + log->device_size - log->last_checkpoint, 701 + GFP_NOIO, 0); 702 + blkdev_issue_discard(bdev, log->rdev->data_offset, end, 703 + GFP_NOIO, 0); 704 + } 705 + } 706 + 707 + 708 + static void r5l_do_reclaim(struct r5l_log *log) 709 + { 710 + sector_t reclaim_target = xchg(&log->reclaim_target, 0); 711 + sector_t reclaimable; 712 + sector_t next_checkpoint; 713 + u64 next_cp_seq; 714 + 715 + spin_lock_irq(&log->io_list_lock); 716 + /* 717 + * move proper io_unit to reclaim list. We should not change the order. 718 + * reclaimable/unreclaimable io_unit can be mixed in the list, we 719 + * shouldn't reuse space of an unreclaimable io_unit 720 + */ 721 + while (1) { 722 + reclaimable = r5l_reclaimable_space(log); 723 + if (reclaimable >= reclaim_target || 724 + (list_empty(&log->running_ios) && 725 + list_empty(&log->io_end_ios) && 726 + list_empty(&log->flushing_ios) && 727 + list_empty(&log->finished_ios))) 728 + break; 729 + 730 + md_wakeup_thread(log->rdev->mddev->thread); 731 + wait_event_lock_irq(log->iounit_wait, 732 + r5l_reclaimable_space(log) > reclaimable, 733 + log->io_list_lock); 734 + } 735 + 736 + next_checkpoint = log->next_checkpoint; 737 + next_cp_seq = log->next_cp_seq; 738 + spin_unlock_irq(&log->io_list_lock); 739 + 740 + BUG_ON(reclaimable < 0); 741 + if (reclaimable == 0) 742 + return; 743 + 744 + /* 745 + * write_super will flush cache of each raid disk. We must write super 746 + * here, because the log area might be reused soon and we don't want to 747 + * confuse recovery 748 + */ 749 + r5l_write_super_and_discard_space(log, next_checkpoint); 750 + 751 + mutex_lock(&log->io_mutex); 752 + log->last_checkpoint = next_checkpoint; 753 + log->last_cp_seq = next_cp_seq; 754 + mutex_unlock(&log->io_mutex); 755 + 756 + r5l_run_no_space_stripes(log); 757 + } 758 + 759 + static void r5l_reclaim_thread(struct md_thread *thread) 760 + { 761 + struct mddev *mddev = thread->mddev; 762 + struct r5conf *conf = mddev->private; 763 + struct r5l_log *log = conf->log; 764 + 765 + if (!log) 766 + return; 767 + r5l_do_reclaim(log); 768 + } 769 + 770 + static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) 771 + { 772 + unsigned long target; 773 + unsigned long new = (unsigned long)space; /* overflow in theory */ 774 + 775 + do { 776 + target = log->reclaim_target; 777 + if (new < target) 778 + return; 779 + } while (cmpxchg(&log->reclaim_target, target, new) != target); 780 + md_wakeup_thread(log->reclaim_thread); 781 + } 782 + 783 + void r5l_quiesce(struct r5l_log *log, int state) 784 + { 785 + struct mddev *mddev; 786 + if (!log || state == 2) 787 + return; 788 + if (state == 0) { 789 + log->in_teardown = 0; 790 + log->reclaim_thread = md_register_thread(r5l_reclaim_thread, 791 + log->rdev->mddev, "reclaim"); 792 + } else if (state == 1) { 793 + /* 794 + * at this point all stripes are finished, so io_unit is at 795 + * least in STRIPE_END state 796 + */ 797 + log->in_teardown = 1; 798 + /* make sure r5l_write_super_and_discard_space exits */ 799 + mddev = log->rdev->mddev; 800 + wake_up(&mddev->sb_wait); 801 + r5l_wake_reclaim(log, -1L); 802 + md_unregister_thread(&log->reclaim_thread); 803 + r5l_do_reclaim(log); 804 + } 805 + } 806 + 807 + bool r5l_log_disk_error(struct r5conf *conf) 808 + { 809 + /* don't allow write if journal disk is missing */ 810 + if (!conf->log) 811 + return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); 812 + return test_bit(Faulty, &conf->log->rdev->flags); 813 + } 814 + 815 + struct r5l_recovery_ctx { 816 + struct page *meta_page; /* current meta */ 817 + sector_t meta_total_blocks; /* total size of current meta and data */ 818 + sector_t pos; /* recovery position */ 819 + u64 seq; /* recovery position seq */ 820 + }; 821 + 822 + static int r5l_read_meta_block(struct r5l_log *log, 823 + struct r5l_recovery_ctx *ctx) 824 + { 825 + struct page *page = ctx->meta_page; 826 + struct r5l_meta_block *mb; 827 + u32 crc, stored_crc; 828 + 829 + if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false)) 830 + return -EIO; 831 + 832 + mb = page_address(page); 833 + stored_crc = le32_to_cpu(mb->checksum); 834 + mb->checksum = 0; 835 + 836 + if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || 837 + le64_to_cpu(mb->seq) != ctx->seq || 838 + mb->version != R5LOG_VERSION || 839 + le64_to_cpu(mb->position) != ctx->pos) 840 + return -EINVAL; 841 + 842 + crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); 843 + if (stored_crc != crc) 844 + return -EINVAL; 845 + 846 + if (le32_to_cpu(mb->meta_size) > PAGE_SIZE) 847 + return -EINVAL; 848 + 849 + ctx->meta_total_blocks = BLOCK_SECTORS; 850 + 851 + return 0; 852 + } 853 + 854 + static int r5l_recovery_flush_one_stripe(struct r5l_log *log, 855 + struct r5l_recovery_ctx *ctx, 856 + sector_t stripe_sect, 857 + int *offset, sector_t *log_offset) 858 + { 859 + struct r5conf *conf = log->rdev->mddev->private; 860 + struct stripe_head *sh; 861 + struct r5l_payload_data_parity *payload; 862 + int disk_index; 863 + 864 + sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0); 865 + while (1) { 866 + payload = page_address(ctx->meta_page) + *offset; 867 + 868 + if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { 869 + raid5_compute_sector(conf, 870 + le64_to_cpu(payload->location), 0, 871 + &disk_index, sh); 872 + 873 + sync_page_io(log->rdev, *log_offset, PAGE_SIZE, 874 + sh->dev[disk_index].page, READ, false); 875 + sh->dev[disk_index].log_checksum = 876 + le32_to_cpu(payload->checksum[0]); 877 + set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); 878 + ctx->meta_total_blocks += BLOCK_SECTORS; 879 + } else { 880 + disk_index = sh->pd_idx; 881 + sync_page_io(log->rdev, *log_offset, PAGE_SIZE, 882 + sh->dev[disk_index].page, READ, false); 883 + sh->dev[disk_index].log_checksum = 884 + le32_to_cpu(payload->checksum[0]); 885 + set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); 886 + 887 + if (sh->qd_idx >= 0) { 888 + disk_index = sh->qd_idx; 889 + sync_page_io(log->rdev, 890 + r5l_ring_add(log, *log_offset, BLOCK_SECTORS), 891 + PAGE_SIZE, sh->dev[disk_index].page, 892 + READ, false); 893 + sh->dev[disk_index].log_checksum = 894 + le32_to_cpu(payload->checksum[1]); 895 + set_bit(R5_Wantwrite, 896 + &sh->dev[disk_index].flags); 897 + } 898 + ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; 899 + } 900 + 901 + *log_offset = r5l_ring_add(log, *log_offset, 902 + le32_to_cpu(payload->size)); 903 + *offset += sizeof(struct r5l_payload_data_parity) + 904 + sizeof(__le32) * 905 + (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); 906 + if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) 907 + break; 908 + } 909 + 910 + for (disk_index = 0; disk_index < sh->disks; disk_index++) { 911 + void *addr; 912 + u32 checksum; 913 + 914 + if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) 915 + continue; 916 + addr = kmap_atomic(sh->dev[disk_index].page); 917 + checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); 918 + kunmap_atomic(addr); 919 + if (checksum != sh->dev[disk_index].log_checksum) 920 + goto error; 921 + } 922 + 923 + for (disk_index = 0; disk_index < sh->disks; disk_index++) { 924 + struct md_rdev *rdev, *rrdev; 925 + 926 + if (!test_and_clear_bit(R5_Wantwrite, 927 + &sh->dev[disk_index].flags)) 928 + continue; 929 + 930 + /* in case device is broken */ 931 + rdev = rcu_dereference(conf->disks[disk_index].rdev); 932 + if (rdev) 933 + sync_page_io(rdev, stripe_sect, PAGE_SIZE, 934 + sh->dev[disk_index].page, WRITE, false); 935 + rrdev = rcu_dereference(conf->disks[disk_index].replacement); 936 + if (rrdev) 937 + sync_page_io(rrdev, stripe_sect, PAGE_SIZE, 938 + sh->dev[disk_index].page, WRITE, false); 939 + } 940 + raid5_release_stripe(sh); 941 + return 0; 942 + 943 + error: 944 + for (disk_index = 0; disk_index < sh->disks; disk_index++) 945 + sh->dev[disk_index].flags = 0; 946 + raid5_release_stripe(sh); 947 + return -EINVAL; 948 + } 949 + 950 + static int r5l_recovery_flush_one_meta(struct r5l_log *log, 951 + struct r5l_recovery_ctx *ctx) 952 + { 953 + struct r5conf *conf = log->rdev->mddev->private; 954 + struct r5l_payload_data_parity *payload; 955 + struct r5l_meta_block *mb; 956 + int offset; 957 + sector_t log_offset; 958 + sector_t stripe_sector; 959 + 960 + mb = page_address(ctx->meta_page); 961 + offset = sizeof(struct r5l_meta_block); 962 + log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 963 + 964 + while (offset < le32_to_cpu(mb->meta_size)) { 965 + int dd; 966 + 967 + payload = (void *)mb + offset; 968 + stripe_sector = raid5_compute_sector(conf, 969 + le64_to_cpu(payload->location), 0, &dd, NULL); 970 + if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector, 971 + &offset, &log_offset)) 972 + return -EINVAL; 973 + } 974 + return 0; 975 + } 976 + 977 + /* copy data/parity from log to raid disks */ 978 + static void r5l_recovery_flush_log(struct r5l_log *log, 979 + struct r5l_recovery_ctx *ctx) 980 + { 981 + while (1) { 982 + if (r5l_read_meta_block(log, ctx)) 983 + return; 984 + if (r5l_recovery_flush_one_meta(log, ctx)) 985 + return; 986 + ctx->seq++; 987 + ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); 988 + } 989 + } 990 + 991 + static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, 992 + u64 seq) 993 + { 994 + struct page *page; 995 + struct r5l_meta_block *mb; 996 + u32 crc; 997 + 998 + page = alloc_page(GFP_KERNEL | __GFP_ZERO); 999 + if (!page) 1000 + return -ENOMEM; 1001 + mb = page_address(page); 1002 + mb->magic = cpu_to_le32(R5LOG_MAGIC); 1003 + mb->version = R5LOG_VERSION; 1004 + mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); 1005 + mb->seq = cpu_to_le64(seq); 1006 + mb->position = cpu_to_le64(pos); 1007 + crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); 1008 + mb->checksum = cpu_to_le32(crc); 1009 + 1010 + if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) { 1011 + __free_page(page); 1012 + return -EIO; 1013 + } 1014 + __free_page(page); 1015 + return 0; 1016 + } 1017 + 1018 + static int r5l_recovery_log(struct r5l_log *log) 1019 + { 1020 + struct r5l_recovery_ctx ctx; 1021 + 1022 + ctx.pos = log->last_checkpoint; 1023 + ctx.seq = log->last_cp_seq; 1024 + ctx.meta_page = alloc_page(GFP_KERNEL); 1025 + if (!ctx.meta_page) 1026 + return -ENOMEM; 1027 + 1028 + r5l_recovery_flush_log(log, &ctx); 1029 + __free_page(ctx.meta_page); 1030 + 1031 + /* 1032 + * we did a recovery. Now ctx.pos points to an invalid meta block. New 1033 + * log will start here. but we can't let superblock point to last valid 1034 + * meta block. The log might looks like: 1035 + * | meta 1| meta 2| meta 3| 1036 + * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If 1037 + * superblock points to meta 1, we write a new valid meta 2n. if crash 1038 + * happens again, new recovery will start from meta 1. Since meta 2n is 1039 + * valid now, recovery will think meta 3 is valid, which is wrong. 1040 + * The solution is we create a new meta in meta2 with its seq == meta 1041 + * 1's seq + 10 and let superblock points to meta2. The same recovery will 1042 + * not think meta 3 is a valid meta, because its seq doesn't match 1043 + */ 1044 + if (ctx.seq > log->last_cp_seq + 1) { 1045 + int ret; 1046 + 1047 + ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); 1048 + if (ret) 1049 + return ret; 1050 + log->seq = ctx.seq + 11; 1051 + log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); 1052 + r5l_write_super(log, ctx.pos); 1053 + } else { 1054 + log->log_start = ctx.pos; 1055 + log->seq = ctx.seq; 1056 + } 1057 + return 0; 1058 + } 1059 + 1060 + static void r5l_write_super(struct r5l_log *log, sector_t cp) 1061 + { 1062 + struct mddev *mddev = log->rdev->mddev; 1063 + 1064 + log->rdev->journal_tail = cp; 1065 + set_bit(MD_CHANGE_DEVS, &mddev->flags); 1066 + } 1067 + 1068 + static int r5l_load_log(struct r5l_log *log) 1069 + { 1070 + struct md_rdev *rdev = log->rdev; 1071 + struct page *page; 1072 + struct r5l_meta_block *mb; 1073 + sector_t cp = log->rdev->journal_tail; 1074 + u32 stored_crc, expected_crc; 1075 + bool create_super = false; 1076 + int ret; 1077 + 1078 + /* Make sure it's valid */ 1079 + if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) 1080 + cp = 0; 1081 + page = alloc_page(GFP_KERNEL); 1082 + if (!page) 1083 + return -ENOMEM; 1084 + 1085 + if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) { 1086 + ret = -EIO; 1087 + goto ioerr; 1088 + } 1089 + mb = page_address(page); 1090 + 1091 + if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || 1092 + mb->version != R5LOG_VERSION) { 1093 + create_super = true; 1094 + goto create; 1095 + } 1096 + stored_crc = le32_to_cpu(mb->checksum); 1097 + mb->checksum = 0; 1098 + expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); 1099 + if (stored_crc != expected_crc) { 1100 + create_super = true; 1101 + goto create; 1102 + } 1103 + if (le64_to_cpu(mb->position) != cp) { 1104 + create_super = true; 1105 + goto create; 1106 + } 1107 + create: 1108 + if (create_super) { 1109 + log->last_cp_seq = prandom_u32(); 1110 + cp = 0; 1111 + /* 1112 + * Make sure super points to correct address. Log might have 1113 + * data very soon. If super hasn't correct log tail address, 1114 + * recovery can't find the log 1115 + */ 1116 + r5l_write_super(log, cp); 1117 + } else 1118 + log->last_cp_seq = le64_to_cpu(mb->seq); 1119 + 1120 + log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); 1121 + log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; 1122 + if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) 1123 + log->max_free_space = RECLAIM_MAX_FREE_SPACE; 1124 + log->last_checkpoint = cp; 1125 + 1126 + __free_page(page); 1127 + 1128 + return r5l_recovery_log(log); 1129 + ioerr: 1130 + __free_page(page); 1131 + return ret; 1132 + } 1133 + 1134 + int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) 1135 + { 1136 + struct r5l_log *log; 1137 + 1138 + if (PAGE_SIZE != 4096) 1139 + return -EINVAL; 1140 + log = kzalloc(sizeof(*log), GFP_KERNEL); 1141 + if (!log) 1142 + return -ENOMEM; 1143 + log->rdev = rdev; 1144 + 1145 + log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0); 1146 + 1147 + log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, 1148 + sizeof(rdev->mddev->uuid)); 1149 + 1150 + mutex_init(&log->io_mutex); 1151 + 1152 + spin_lock_init(&log->io_list_lock); 1153 + INIT_LIST_HEAD(&log->running_ios); 1154 + INIT_LIST_HEAD(&log->io_end_ios); 1155 + INIT_LIST_HEAD(&log->flushing_ios); 1156 + INIT_LIST_HEAD(&log->finished_ios); 1157 + bio_init(&log->flush_bio); 1158 + 1159 + log->io_kc = KMEM_CACHE(r5l_io_unit, 0); 1160 + if (!log->io_kc) 1161 + goto io_kc; 1162 + 1163 + log->reclaim_thread = md_register_thread(r5l_reclaim_thread, 1164 + log->rdev->mddev, "reclaim"); 1165 + if (!log->reclaim_thread) 1166 + goto reclaim_thread; 1167 + init_waitqueue_head(&log->iounit_wait); 1168 + 1169 + INIT_LIST_HEAD(&log->no_space_stripes); 1170 + spin_lock_init(&log->no_space_stripes_lock); 1171 + 1172 + if (r5l_load_log(log)) 1173 + goto error; 1174 + 1175 + conf->log = log; 1176 + return 0; 1177 + error: 1178 + md_unregister_thread(&log->reclaim_thread); 1179 + reclaim_thread: 1180 + kmem_cache_destroy(log->io_kc); 1181 + io_kc: 1182 + kfree(log); 1183 + return -EINVAL; 1184 + } 1185 + 1186 + void r5l_exit_log(struct r5l_log *log) 1187 + { 1188 + md_unregister_thread(&log->reclaim_thread); 1189 + kmem_cache_destroy(log->io_kc); 1190 + kfree(log); 1191 + }
+132 -57
drivers/md/raid5.c
··· 353 353 struct list_head *list = &temp_inactive_list[size - 1]; 354 354 355 355 /* 356 - * We don't hold any lock here yet, get_active_stripe() might 356 + * We don't hold any lock here yet, raid5_get_active_stripe() might 357 357 * remove stripes from the list 358 358 */ 359 359 if (!list_empty_careful(list)) { ··· 413 413 return count; 414 414 } 415 415 416 - static void release_stripe(struct stripe_head *sh) 416 + void raid5_release_stripe(struct stripe_head *sh) 417 417 { 418 418 struct r5conf *conf = sh->raid_conf; 419 419 unsigned long flags; ··· 658 658 return 0; 659 659 } 660 660 661 - static struct stripe_head * 662 - get_active_stripe(struct r5conf *conf, sector_t sector, 663 - int previous, int noblock, int noquiesce) 661 + struct stripe_head * 662 + raid5_get_active_stripe(struct r5conf *conf, sector_t sector, 663 + int previous, int noblock, int noquiesce) 664 664 { 665 665 struct stripe_head *sh; 666 666 int hash = stripe_hash_locks_hash(sector); ··· 755 755 /* Only freshly new full stripe normal write stripe can be added to a batch list */ 756 756 static bool stripe_can_batch(struct stripe_head *sh) 757 757 { 758 + struct r5conf *conf = sh->raid_conf; 759 + 760 + if (conf->log) 761 + return false; 758 762 return test_bit(STRIPE_BATCH_READY, &sh->state) && 759 763 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && 760 764 is_full_stripe_write(sh); ··· 862 858 unlock_out: 863 859 unlock_two_stripes(head, sh); 864 860 out: 865 - release_stripe(head); 861 + raid5_release_stripe(head); 866 862 } 867 863 868 864 /* Determine if 'data_offset' or 'new_data_offset' should be used ··· 899 895 900 896 might_sleep(); 901 897 898 + if (r5l_write_stripe(conf->log, sh) == 0) 899 + return; 902 900 for (i = disks; i--; ) { 903 901 int rw; 904 902 int replace_only = 0; ··· 1214 1208 return_io(&return_bi); 1215 1209 1216 1210 set_bit(STRIPE_HANDLE, &sh->state); 1217 - release_stripe(sh); 1211 + raid5_release_stripe(sh); 1218 1212 } 1219 1213 1220 1214 static void ops_run_biofill(struct stripe_head *sh) ··· 1277 1271 if (sh->check_state == check_state_compute_run) 1278 1272 sh->check_state = check_state_compute_result; 1279 1273 set_bit(STRIPE_HANDLE, &sh->state); 1280 - release_stripe(sh); 1274 + raid5_release_stripe(sh); 1281 1275 } 1282 1276 1283 1277 /* return a pointer to the address conversion region of the scribble buffer */ ··· 1703 1697 } 1704 1698 1705 1699 set_bit(STRIPE_HANDLE, &sh->state); 1706 - release_stripe(sh); 1700 + raid5_release_stripe(sh); 1707 1701 } 1708 1702 1709 1703 static void ··· 1861 1855 1862 1856 sh->check_state = check_state_check_result; 1863 1857 set_bit(STRIPE_HANDLE, &sh->state); 1864 - release_stripe(sh); 1858 + raid5_release_stripe(sh); 1865 1859 } 1866 1860 1867 1861 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) ··· 2023 2017 /* we just created an active stripe so... */ 2024 2018 atomic_inc(&conf->active_stripes); 2025 2019 2026 - release_stripe(sh); 2020 + raid5_release_stripe(sh); 2027 2021 conf->max_nr_stripes++; 2028 2022 return 1; 2029 2023 } ··· 2242 2236 if (!p) 2243 2237 err = -ENOMEM; 2244 2238 } 2245 - release_stripe(nsh); 2239 + raid5_release_stripe(nsh); 2246 2240 } 2247 2241 /* critical section pass, GFP_NOIO no longer needed */ 2248 2242 ··· 2400 2394 rdev_dec_pending(rdev, conf->mddev); 2401 2395 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2402 2396 set_bit(STRIPE_HANDLE, &sh->state); 2403 - release_stripe(sh); 2397 + raid5_release_stripe(sh); 2404 2398 } 2405 2399 2406 2400 static void raid5_end_write_request(struct bio *bi) ··· 2474 2468 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2475 2469 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2476 2470 set_bit(STRIPE_HANDLE, &sh->state); 2477 - release_stripe(sh); 2471 + raid5_release_stripe(sh); 2478 2472 2479 2473 if (sh->batch_head && sh != sh->batch_head) 2480 - release_stripe(sh->batch_head); 2474 + raid5_release_stripe(sh->batch_head); 2481 2475 } 2482 - 2483 - static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 2484 2476 2485 2477 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 2486 2478 { ··· 2495 2491 dev->rreq.bi_private = sh; 2496 2492 2497 2493 dev->flags = 0; 2498 - dev->sector = compute_blocknr(sh, i, previous); 2494 + dev->sector = raid5_compute_blocknr(sh, i, previous); 2499 2495 } 2500 2496 2501 2497 static void error(struct mddev *mddev, struct md_rdev *rdev) ··· 2528 2524 * Input: a 'big' sector number, 2529 2525 * Output: index of the data and parity disk, and the sector # in them. 2530 2526 */ 2531 - static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2532 - int previous, int *dd_idx, 2533 - struct stripe_head *sh) 2527 + sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2528 + int previous, int *dd_idx, 2529 + struct stripe_head *sh) 2534 2530 { 2535 2531 sector_t stripe, stripe2; 2536 2532 sector_t chunk_number; ··· 2730 2726 return new_sector; 2731 2727 } 2732 2728 2733 - static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 2729 + sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) 2734 2730 { 2735 2731 struct r5conf *conf = sh->raid_conf; 2736 2732 int raid_disks = sh->disks; ··· 3102 3098 if (bi) 3103 3099 bitmap_end = 1; 3104 3100 3101 + r5l_stripe_write_finished(sh); 3102 + 3105 3103 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3106 3104 wake_up(&conf->wait_for_overlap); 3107 3105 ··· 3147 3141 * the data has not reached the cache yet. 3148 3142 */ 3149 3143 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 3144 + s->failed > conf->max_degraded && 3150 3145 (!test_bit(R5_Insync, &sh->dev[i].flags) || 3151 3146 test_bit(R5_ReadError, &sh->dev[i].flags))) { 3152 3147 spin_lock_irq(&sh->stripe_lock); ··· 3504 3497 WARN_ON(test_bit(R5_SkipCopy, &dev->flags)); 3505 3498 WARN_ON(dev->page != dev->orig_page); 3506 3499 } 3500 + 3501 + r5l_stripe_write_finished(sh); 3502 + 3507 3503 if (!discard_pending && 3508 3504 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 3509 3505 int hash; ··· 3949 3939 struct stripe_head *sh2; 3950 3940 struct async_submit_ctl submit; 3951 3941 3952 - sector_t bn = compute_blocknr(sh, i, 1); 3942 + sector_t bn = raid5_compute_blocknr(sh, i, 1); 3953 3943 sector_t s = raid5_compute_sector(conf, bn, 0, 3954 3944 &dd_idx, NULL); 3955 - sh2 = get_active_stripe(conf, s, 0, 1, 1); 3945 + sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); 3956 3946 if (sh2 == NULL) 3957 3947 /* so far only the early blocks of this stripe 3958 3948 * have been requested. When later blocks ··· 3962 3952 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 3963 3953 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 3964 3954 /* must have already done this block */ 3965 - release_stripe(sh2); 3955 + raid5_release_stripe(sh2); 3966 3956 continue; 3967 3957 } 3968 3958 ··· 3983 3973 set_bit(STRIPE_EXPAND_READY, &sh2->state); 3984 3974 set_bit(STRIPE_HANDLE, &sh2->state); 3985 3975 } 3986 - release_stripe(sh2); 3976 + raid5_release_stripe(sh2); 3987 3977 3988 3978 } 3989 3979 /* done submitting copies, wait for them to complete */ ··· 4018 4008 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; 4019 4009 s->failed_num[0] = -1; 4020 4010 s->failed_num[1] = -1; 4011 + s->log_failed = r5l_log_disk_error(conf); 4021 4012 4022 4013 /* Now to look around and see what can be done */ 4023 4014 rcu_read_lock(); ··· 4270 4259 if (handle_flags == 0 || 4271 4260 sh->state & handle_flags) 4272 4261 set_bit(STRIPE_HANDLE, &sh->state); 4273 - release_stripe(sh); 4262 + raid5_release_stripe(sh); 4274 4263 } 4275 4264 spin_lock_irq(&head_sh->stripe_lock); 4276 4265 head_sh->batch_head = NULL; ··· 4331 4320 4332 4321 analyse_stripe(sh, &s); 4333 4322 4323 + if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) 4324 + goto finish; 4325 + 4334 4326 if (s.handle_bad_blocks) { 4335 4327 set_bit(STRIPE_HANDLE, &sh->state); 4336 4328 goto finish; ··· 4362 4348 /* check if the array has lost more than max_degraded devices and, 4363 4349 * if so, some requests might need to be failed. 4364 4350 */ 4365 - if (s.failed > conf->max_degraded) { 4351 + if (s.failed > conf->max_degraded || s.log_failed) { 4366 4352 sh->check_state = 0; 4367 4353 sh->reconstruct_state = 0; 4368 4354 break_stripe_batch_list(sh, 0); ··· 4520 4506 /* Finish reconstruct operations initiated by the expansion process */ 4521 4507 if (sh->reconstruct_state == reconstruct_state_result) { 4522 4508 struct stripe_head *sh_src 4523 - = get_active_stripe(conf, sh->sector, 1, 1, 1); 4509 + = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); 4524 4510 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 4525 4511 /* sh cannot be written until sh_src has been read. 4526 4512 * so arrange for sh to be delayed a little ··· 4530 4516 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 4531 4517 &sh_src->state)) 4532 4518 atomic_inc(&conf->preread_active_stripes); 4533 - release_stripe(sh_src); 4519 + raid5_release_stripe(sh_src); 4534 4520 goto finish; 4535 4521 } 4536 4522 if (sh_src) 4537 - release_stripe(sh_src); 4523 + raid5_release_stripe(sh_src); 4538 4524 4539 4525 sh->reconstruct_state = reconstruct_state_idle; 4540 4526 clear_bit(STRIPE_EXPANDING, &sh->state); ··· 5026 5012 struct raid5_plug_cb *cb; 5027 5013 5028 5014 if (!blk_cb) { 5029 - release_stripe(sh); 5015 + raid5_release_stripe(sh); 5030 5016 return; 5031 5017 } 5032 5018 ··· 5042 5028 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 5043 5029 list_add_tail(&sh->lru, &cb->list); 5044 5030 else 5045 - release_stripe(sh); 5031 + raid5_release_stripe(sh); 5046 5032 } 5047 5033 5048 5034 static void make_discard_request(struct mddev *mddev, struct bio *bi) ··· 5077 5063 DEFINE_WAIT(w); 5078 5064 int d; 5079 5065 again: 5080 - sh = get_active_stripe(conf, logical_sector, 0, 0, 0); 5066 + sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); 5081 5067 prepare_to_wait(&conf->wait_for_overlap, &w, 5082 5068 TASK_UNINTERRUPTIBLE); 5083 5069 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5084 5070 if (test_bit(STRIPE_SYNCING, &sh->state)) { 5085 - release_stripe(sh); 5071 + raid5_release_stripe(sh); 5086 5072 schedule(); 5087 5073 goto again; 5088 5074 } ··· 5094 5080 if (sh->dev[d].towrite || sh->dev[d].toread) { 5095 5081 set_bit(R5_Overlap, &sh->dev[d].flags); 5096 5082 spin_unlock_irq(&sh->stripe_lock); 5097 - release_stripe(sh); 5083 + raid5_release_stripe(sh); 5098 5084 schedule(); 5099 5085 goto again; 5100 5086 } ··· 5150 5136 bool do_prepare; 5151 5137 5152 5138 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 5153 - md_flush_request(mddev, bi); 5154 - return; 5139 + int ret = r5l_handle_flush_request(conf->log, bi); 5140 + 5141 + if (ret == 0) 5142 + return; 5143 + if (ret == -ENODEV) { 5144 + md_flush_request(mddev, bi); 5145 + return; 5146 + } 5147 + /* ret == -EAGAIN, fallback */ 5155 5148 } 5156 5149 5157 5150 md_write_start(mddev, bi); ··· 5231 5210 (unsigned long long)new_sector, 5232 5211 (unsigned long long)logical_sector); 5233 5212 5234 - sh = get_active_stripe(conf, new_sector, previous, 5213 + sh = raid5_get_active_stripe(conf, new_sector, previous, 5235 5214 (bi->bi_rw&RWA_MASK), 0); 5236 5215 if (sh) { 5237 5216 if (unlikely(previous)) { ··· 5252 5231 must_retry = 1; 5253 5232 spin_unlock_irq(&conf->device_lock); 5254 5233 if (must_retry) { 5255 - release_stripe(sh); 5234 + raid5_release_stripe(sh); 5256 5235 schedule(); 5257 5236 do_prepare = true; 5258 5237 goto retry; ··· 5262 5241 /* Might have got the wrong stripe_head 5263 5242 * by accident 5264 5243 */ 5265 - release_stripe(sh); 5244 + raid5_release_stripe(sh); 5266 5245 goto retry; 5267 5246 } 5268 5247 5269 5248 if (rw == WRITE && 5270 5249 logical_sector >= mddev->suspend_lo && 5271 5250 logical_sector < mddev->suspend_hi) { 5272 - release_stripe(sh); 5251 + raid5_release_stripe(sh); 5273 5252 /* As the suspend_* range is controlled by 5274 5253 * userspace, we want an interruptible 5275 5254 * wait. ··· 5292 5271 * and wait a while 5293 5272 */ 5294 5273 md_wakeup_thread(mddev->thread); 5295 - release_stripe(sh); 5274 + raid5_release_stripe(sh); 5296 5275 schedule(); 5297 5276 do_prepare = true; 5298 5277 goto retry; ··· 5479 5458 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 5480 5459 int j; 5481 5460 int skipped_disk = 0; 5482 - sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 5461 + sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 5483 5462 set_bit(STRIPE_EXPANDING, &sh->state); 5484 5463 atomic_inc(&conf->reshape_stripes); 5485 5464 /* If any of this stripe is beyond the end of the old ··· 5492 5471 if (conf->level == 6 && 5493 5472 j == sh->qd_idx) 5494 5473 continue; 5495 - s = compute_blocknr(sh, j, 0); 5474 + s = raid5_compute_blocknr(sh, j, 0); 5496 5475 if (s < raid5_size(mddev, 0, 0)) { 5497 5476 skipped_disk = 1; 5498 5477 continue; ··· 5528 5507 if (last_sector >= mddev->dev_sectors) 5529 5508 last_sector = mddev->dev_sectors - 1; 5530 5509 while (first_sector <= last_sector) { 5531 - sh = get_active_stripe(conf, first_sector, 1, 0, 1); 5510 + sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); 5532 5511 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 5533 5512 set_bit(STRIPE_HANDLE, &sh->state); 5534 - release_stripe(sh); 5513 + raid5_release_stripe(sh); 5535 5514 first_sector += STRIPE_SECTORS; 5536 5515 } 5537 5516 /* Now that the sources are clearly marked, we can release ··· 5540 5519 while (!list_empty(&stripes)) { 5541 5520 sh = list_entry(stripes.next, struct stripe_head, lru); 5542 5521 list_del_init(&sh->lru); 5543 - release_stripe(sh); 5522 + raid5_release_stripe(sh); 5544 5523 } 5545 5524 /* If this takes us to the resync_max point where we have to pause, 5546 5525 * then we need to write out the superblock. ··· 5636 5615 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 5637 5616 } 5638 5617 5639 - bitmap_cond_end_sync(mddev->bitmap, sector_nr); 5618 + bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); 5640 5619 5641 - sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 5620 + sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); 5642 5621 if (sh == NULL) { 5643 - sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 5622 + sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); 5644 5623 /* make sure we don't swamp the stripe cache if someone else 5645 5624 * is trying to get access 5646 5625 */ ··· 5664 5643 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 5665 5644 set_bit(STRIPE_HANDLE, &sh->state); 5666 5645 5667 - release_stripe(sh); 5646 + raid5_release_stripe(sh); 5668 5647 5669 5648 return STRIPE_SECTORS; 5670 5649 } ··· 5703 5682 /* already done this stripe */ 5704 5683 continue; 5705 5684 5706 - sh = get_active_stripe(conf, sector, 0, 1, 1); 5685 + sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); 5707 5686 5708 5687 if (!sh) { 5709 5688 /* failed to get a stripe - must wait */ ··· 5713 5692 } 5714 5693 5715 5694 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { 5716 - release_stripe(sh); 5695 + raid5_release_stripe(sh); 5717 5696 raid5_set_bi_processed_stripes(raid_bio, scnt); 5718 5697 conf->retry_read_aligned = raid_bio; 5719 5698 return handled; ··· 5721 5700 5722 5701 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 5723 5702 handle_stripe(sh); 5724 - release_stripe(sh); 5703 + raid5_release_stripe(sh); 5725 5704 handled++; 5726 5705 } 5727 5706 remaining = raid5_dec_bi_active_stripes(raid_bio); ··· 5751 5730 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5752 5731 if (!list_empty(temp_inactive_list + i)) 5753 5732 break; 5754 - if (i == NR_STRIPE_HASH_LOCKS) 5733 + if (i == NR_STRIPE_HASH_LOCKS) { 5734 + spin_unlock_irq(&conf->device_lock); 5735 + r5l_flush_stripe_to_raid(conf->log); 5736 + spin_lock_irq(&conf->device_lock); 5755 5737 return batch_size; 5738 + } 5756 5739 release_inactive = true; 5757 5740 } 5758 5741 spin_unlock_irq(&conf->device_lock); ··· 5764 5739 release_inactive_stripe_list(conf, temp_inactive_list, 5765 5740 NR_STRIPE_HASH_LOCKS); 5766 5741 5742 + r5l_flush_stripe_to_raid(conf->log); 5767 5743 if (release_inactive) { 5768 5744 spin_lock_irq(&conf->device_lock); 5769 5745 return 0; ··· 5772 5746 5773 5747 for (i = 0; i < batch_size; i++) 5774 5748 handle_stripe(batch[i]); 5749 + r5l_write_stripe_run(conf->log); 5775 5750 5776 5751 cond_resched(); 5777 5752 ··· 5905 5878 set_bit(R5_DID_ALLOC, &conf->cache_state); 5906 5879 mutex_unlock(&conf->cache_size_mutex); 5907 5880 } 5881 + 5882 + r5l_flush_stripe_to_raid(conf->log); 5908 5883 5909 5884 async_tx_issue_pending_all(); 5910 5885 blk_finish_plug(&plug); ··· 6345 6316 6346 6317 static void free_conf(struct r5conf *conf) 6347 6318 { 6319 + if (conf->log) 6320 + r5l_exit_log(conf->log); 6348 6321 if (conf->shrinker.seeks) 6349 6322 unregister_shrinker(&conf->shrinker); 6323 + 6350 6324 free_thread_groups(conf); 6351 6325 shrink_stripes(conf); 6352 6326 raid5_free_percpu(conf); ··· 6562 6530 rdev_for_each(rdev, mddev) { 6563 6531 raid_disk = rdev->raid_disk; 6564 6532 if (raid_disk >= max_disks 6565 - || raid_disk < 0) 6533 + || raid_disk < 0 || test_bit(Journal, &rdev->flags)) 6566 6534 continue; 6567 6535 disk = conf->disks + raid_disk; 6568 6536 ··· 6682 6650 int working_disks = 0; 6683 6651 int dirty_parity_disks = 0; 6684 6652 struct md_rdev *rdev; 6653 + struct md_rdev *journal_dev = NULL; 6685 6654 sector_t reshape_offset = 0; 6686 6655 int i; 6687 6656 long long min_offset_diff = 0; ··· 6695 6662 6696 6663 rdev_for_each(rdev, mddev) { 6697 6664 long long diff; 6665 + 6666 + if (test_bit(Journal, &rdev->flags)) { 6667 + journal_dev = rdev; 6668 + continue; 6669 + } 6698 6670 if (rdev->raid_disk < 0) 6699 6671 continue; 6700 6672 diff = (rdev->new_data_offset - rdev->data_offset); ··· 6732 6694 int max_degraded = (mddev->level == 6 ? 2 : 1); 6733 6695 int chunk_sectors; 6734 6696 int new_data_disks; 6697 + 6698 + if (journal_dev) { 6699 + printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n", 6700 + mdname(mddev)); 6701 + return -EINVAL; 6702 + } 6735 6703 6736 6704 if (mddev->new_level != mddev->level) { 6737 6705 printk(KERN_ERR "md/raid:%s: unsupported reshape " ··· 6813 6769 6814 6770 if (IS_ERR(conf)) 6815 6771 return PTR_ERR(conf); 6772 + 6773 + if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) { 6774 + printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n", 6775 + mdname(mddev)); 6776 + mddev->ro = 1; 6777 + set_disk_ro(mddev->gendisk, 1); 6778 + } 6816 6779 6817 6780 conf->min_offset_diff = min_offset_diff; 6818 6781 mddev->thread = conf->thread; ··· 7024 6973 mddev->queue); 7025 6974 } 7026 6975 6976 + if (journal_dev) { 6977 + char b[BDEVNAME_SIZE]; 6978 + 6979 + printk(KERN_INFO"md/raid:%s: using device %s as journal\n", 6980 + mdname(mddev), bdevname(journal_dev->bdev, b)); 6981 + r5l_init_log(conf, journal_dev); 6982 + } 6983 + 7027 6984 return 0; 7028 6985 abort: 7029 6986 md_unregister_thread(&mddev->thread); ··· 7141 7082 struct disk_info *p = conf->disks + number; 7142 7083 7143 7084 print_raid5_conf(conf); 7085 + if (test_bit(Journal, &rdev->flags)) { 7086 + /* 7087 + * journal disk is not removable, but we need give a chance to 7088 + * update superblock of other disks. Otherwise journal disk 7089 + * will be considered as 'fresh' 7090 + */ 7091 + set_bit(MD_CHANGE_DEVS, &mddev->flags); 7092 + return -EINVAL; 7093 + } 7144 7094 if (rdev == p->rdev) 7145 7095 rdevp = &p->rdev; 7146 7096 else if (rdev == p->replacement) ··· 7212 7144 int first = 0; 7213 7145 int last = conf->raid_disks - 1; 7214 7146 7147 + if (test_bit(Journal, &rdev->flags)) 7148 + return -EINVAL; 7215 7149 if (mddev->recovery_disabled == conf->recovery_disabled) 7216 7150 return -EBUSY; 7217 7151 ··· 7275 7205 sector_t newsize; 7276 7206 struct r5conf *conf = mddev->private; 7277 7207 7208 + if (conf->log) 7209 + return -EINVAL; 7278 7210 sectors &= ~((sector_t)conf->chunk_sectors - 1); 7279 7211 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 7280 7212 if (mddev->external_size && ··· 7328 7256 { 7329 7257 struct r5conf *conf = mddev->private; 7330 7258 7259 + if (conf->log) 7260 + return -EINVAL; 7331 7261 if (mddev->delta_disks == 0 && 7332 7262 mddev->new_layout == mddev->layout && 7333 7263 mddev->new_chunk_sectors == mddev->chunk_sectors) ··· 7606 7532 unlock_all_device_hash_locks_irq(conf); 7607 7533 break; 7608 7534 } 7535 + r5l_quiesce(conf->log, state); 7609 7536 } 7610 7537 7611 7538 static void *raid45_takeover_raid0(struct mddev *mddev, int level)
+24
drivers/md/raid5.h
··· 223 223 struct stripe_head *batch_head; /* protected by stripe lock */ 224 224 spinlock_t batch_lock; /* only header's lock is useful */ 225 225 struct list_head batch_list; /* protected by head's batch lock*/ 226 + 227 + struct r5l_io_unit *log_io; 228 + struct list_head log_list; 226 229 /** 227 230 * struct stripe_operations 228 231 * @target - STRIPE_OP_COMPUTE_BLK target ··· 247 244 struct bio *toread, *read, *towrite, *written; 248 245 sector_t sector; /* sector of this page */ 249 246 unsigned long flags; 247 + u32 log_checksum; 250 248 } dev[1]; /* allocated with extra space depending of RAID geometry */ 251 249 }; 252 250 ··· 272 268 struct bio_list return_bi; 273 269 struct md_rdev *blocked_rdev; 274 270 int handle_bad_blocks; 271 + int log_failed; 275 272 }; 276 273 277 274 /* Flags for struct r5dev.flags */ ··· 345 340 STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add 346 341 * to batch yet. 347 342 */ 343 + STRIPE_LOG_TRAPPED, /* trapped into log */ 348 344 }; 349 345 350 346 #define STRIPE_EXPAND_SYNC_FLAGS \ ··· 549 543 struct r5worker_group *worker_groups; 550 544 int group_cnt; 551 545 int worker_cnt_per_group; 546 + struct r5l_log *log; 552 547 }; 553 548 554 549 ··· 616 609 617 610 extern void md_raid5_kick_device(struct r5conf *conf); 618 611 extern int raid5_set_cache_size(struct mddev *mddev, int size); 612 + extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous); 613 + extern void raid5_release_stripe(struct stripe_head *sh); 614 + extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 615 + int previous, int *dd_idx, 616 + struct stripe_head *sh); 617 + extern struct stripe_head * 618 + raid5_get_active_stripe(struct r5conf *conf, sector_t sector, 619 + int previous, int noblock, int noquiesce); 620 + extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); 621 + extern void r5l_exit_log(struct r5l_log *log); 622 + extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); 623 + extern void r5l_write_stripe_run(struct r5l_log *log); 624 + extern void r5l_flush_stripe_to_raid(struct r5l_log *log); 625 + extern void r5l_stripe_write_finished(struct stripe_head *sh); 626 + extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio); 627 + extern void r5l_quiesce(struct r5l_log *log, int state); 628 + extern bool r5l_log_disk_error(struct r5conf *conf); 619 629 #endif
+72 -1
include/uapi/linux/raid/md_p.h
··· 89 89 * read requests will only be sent here in 90 90 * dire need 91 91 */ 92 + #define MD_DISK_JOURNAL 18 /* disk is used as the write journal in RAID-5/6 */ 93 + 94 + #define MD_DISK_ROLE_SPARE 0xffff 95 + #define MD_DISK_ROLE_FAULTY 0xfffe 96 + #define MD_DISK_ROLE_JOURNAL 0xfffd 97 + #define MD_DISK_ROLE_MAX 0xff00 /* max value of regular disk role */ 92 98 93 99 typedef struct mdp_device_descriptor_s { 94 100 __u32 number; /* 0 Device number in the entire set */ ··· 258 252 __le64 data_offset; /* sector start of data, often 0 */ 259 253 __le64 data_size; /* sectors in this device that can be used for data */ 260 254 __le64 super_offset; /* sector start of this superblock */ 261 - __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ 255 + union { 256 + __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ 257 + __le64 journal_tail;/* journal tail of journal device (from data_offset) */ 258 + }; 262 259 __le32 dev_number; /* permanent identifier of this device - not role in raid */ 263 260 __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ 264 261 __u8 device_uuid[16]; /* user-space setable, ignored by kernel */ ··· 311 302 #define MD_FEATURE_RECOVERY_BITMAP 128 /* recovery that is happening 312 303 * is guided by bitmap. 313 304 */ 305 + #define MD_FEATURE_CLUSTERED 256 /* clustered MD */ 306 + #define MD_FEATURE_JOURNAL 512 /* support write cache */ 314 307 #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ 315 308 |MD_FEATURE_RECOVERY_OFFSET \ 316 309 |MD_FEATURE_RESHAPE_ACTIVE \ ··· 321 310 |MD_FEATURE_RESHAPE_BACKWARDS \ 322 311 |MD_FEATURE_NEW_OFFSET \ 323 312 |MD_FEATURE_RECOVERY_BITMAP \ 313 + |MD_FEATURE_CLUSTERED \ 314 + |MD_FEATURE_JOURNAL \ 324 315 ) 325 316 317 + struct r5l_payload_header { 318 + __le16 type; 319 + __le16 flags; 320 + } __attribute__ ((__packed__)); 321 + 322 + enum r5l_payload_type { 323 + R5LOG_PAYLOAD_DATA = 0, 324 + R5LOG_PAYLOAD_PARITY = 1, 325 + R5LOG_PAYLOAD_FLUSH = 2, 326 + }; 327 + 328 + struct r5l_payload_data_parity { 329 + struct r5l_payload_header header; 330 + __le32 size; /* sector. data/parity size. each 4k 331 + * has a checksum */ 332 + __le64 location; /* sector. For data, it's raid sector. For 333 + * parity, it's stripe sector */ 334 + __le32 checksum[]; 335 + } __attribute__ ((__packed__)); 336 + 337 + enum r5l_payload_data_parity_flag { 338 + R5LOG_PAYLOAD_FLAG_DISCARD = 1, /* payload is discard */ 339 + /* 340 + * RESHAPED/RESHAPING is only set when there is reshape activity. Note, 341 + * both data/parity of a stripe should have the same flag set 342 + * 343 + * RESHAPED: reshape is running, and this stripe finished reshape 344 + * RESHAPING: reshape is running, and this stripe isn't reshaped 345 + */ 346 + R5LOG_PAYLOAD_FLAG_RESHAPED = 2, 347 + R5LOG_PAYLOAD_FLAG_RESHAPING = 3, 348 + }; 349 + 350 + struct r5l_payload_flush { 351 + struct r5l_payload_header header; 352 + __le32 size; /* flush_stripes size, bytes */ 353 + __le64 flush_stripes[]; 354 + } __attribute__ ((__packed__)); 355 + 356 + enum r5l_payload_flush_flag { 357 + R5LOG_PAYLOAD_FLAG_FLUSH_STRIPE = 1, /* data represents whole stripe */ 358 + }; 359 + 360 + struct r5l_meta_block { 361 + __le32 magic; 362 + __le32 checksum; 363 + __u8 version; 364 + __u8 __zero_pading_1; 365 + __le16 __zero_pading_2; 366 + __le32 meta_size; /* whole size of the block */ 367 + 368 + __le64 seq; 369 + __le64 position; /* sector, start from rdev->data_offset, current position */ 370 + struct r5l_payload_header payloads[]; 371 + } __attribute__ ((__packed__)); 372 + 373 + #define R5LOG_VERSION 0x1 374 + #define R5LOG_MAGIC 0x6433c509 326 375 #endif