Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md:
md: support bitmaps on RAID10 arrays larger then 2 terabytes
md: update sync_completed and reshape_position even more often.
md: improve usefulness and accuracy of sysfs file md/sync_completed.
md: allow setting newly added device to 'in_sync' via sysfs.
md: tiny md.h cleanups

+50 -24
+4 -3
drivers/md/bitmap.c
··· 1479 1479 s += blocks; 1480 1480 } 1481 1481 bitmap->last_end_sync = jiffies; 1482 + sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); 1482 1483 } 1483 1484 1484 1485 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) ··· 1590 1589 int bitmap_create(mddev_t *mddev) 1591 1590 { 1592 1591 struct bitmap *bitmap; 1593 - unsigned long blocks = mddev->resync_max_sectors; 1592 + sector_t blocks = mddev->resync_max_sectors; 1594 1593 unsigned long chunks; 1595 1594 unsigned long pages; 1596 1595 struct file *file = mddev->bitmap_file; ··· 1632 1631 bitmap->chunkshift = ffz(~bitmap->chunksize); 1633 1632 1634 1633 /* now that chunksize and chunkshift are set, we can use these macros */ 1635 - chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) / 1636 - CHUNK_BLOCK_RATIO(bitmap); 1634 + chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >> 1635 + CHUNK_BLOCK_SHIFT(bitmap); 1637 1636 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; 1638 1637 1639 1638 BUG_ON(!pages);
+28 -13
drivers/md/md.c
··· 2017 2017 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2018 2018 spin_unlock_irq(&mddev->write_lock); 2019 2019 wake_up(&mddev->sb_wait); 2020 + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2021 + sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2020 2022 2021 2023 } 2022 2024 ··· 2088 2086 * -writemostly - clears write_mostly 2089 2087 * blocked - sets the Blocked flag 2090 2088 * -blocked - clears the Blocked flag 2089 + * insync - sets Insync providing device isn't active 2091 2090 */ 2092 2091 int err = -EINVAL; 2093 2092 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { ··· 2120 2117 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2121 2118 md_wakeup_thread(rdev->mddev->thread); 2122 2119 2120 + err = 0; 2121 + } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2122 + set_bit(In_sync, &rdev->flags); 2123 2123 err = 0; 2124 2124 } 2125 2125 if (!err && rdev->sysfs_state) ··· 2196 2190 } else if (rdev->mddev->pers) { 2197 2191 mdk_rdev_t *rdev2; 2198 2192 /* Activating a spare .. or possibly reactivating 2199 - * if we every get bitmaps working here. 2193 + * if we ever get bitmaps working here. 2200 2194 */ 2201 2195 2202 2196 if (rdev->raid_disk != -1) ··· 3488 3482 { 3489 3483 unsigned long max_sectors, resync; 3490 3484 3485 + if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3486 + return sprintf(page, "none\n"); 3487 + 3491 3488 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3492 3489 max_sectors = mddev->resync_max_sectors; 3493 3490 else 3494 3491 max_sectors = mddev->dev_sectors; 3495 3492 3496 - resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 3493 + resync = mddev->curr_resync_completed; 3497 3494 return sprintf(page, "%lu / %lu\n", resync, max_sectors); 3498 3495 } 3499 3496 ··· 6343 6334 sector_t sectors; 6344 6335 6345 6336 skipped = 0; 6346 - if (j >= mddev->resync_max) { 6347 - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6348 - wait_event(mddev->recovery_wait, 6349 - mddev->resync_max > j 6350 - || kthread_should_stop()); 6351 - } 6352 - if (kthread_should_stop()) 6353 - goto interrupted; 6354 6337 6355 - if (mddev->curr_resync > mddev->curr_resync_completed && 6356 - (mddev->curr_resync - mddev->curr_resync_completed) 6357 - > (max_sectors >> 4)) { 6338 + if ((mddev->curr_resync > mddev->curr_resync_completed && 6339 + (mddev->curr_resync - mddev->curr_resync_completed) 6340 + > (max_sectors >> 4)) || 6341 + (j - mddev->curr_resync_completed)*2 6342 + >= mddev->resync_max - mddev->curr_resync_completed 6343 + ) { 6358 6344 /* time to update curr_resync_completed */ 6359 6345 blk_unplug(mddev->queue); 6360 6346 wait_event(mddev->recovery_wait, ··· 6357 6353 mddev->curr_resync_completed = 6358 6354 mddev->curr_resync; 6359 6355 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6356 + sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6360 6357 } 6358 + 6359 + if (j >= mddev->resync_max) 6360 + wait_event(mddev->recovery_wait, 6361 + mddev->resync_max > j 6362 + || kthread_should_stop()); 6363 + 6364 + if (kthread_should_stop()) 6365 + goto interrupted; 6366 + 6361 6367 sectors = mddev->pers->sync_request(mddev, j, &skipped, 6362 6368 currspeed < speed_min(mddev)); 6363 6369 if (sectors == 0) { ··· 6475 6461 6476 6462 skip: 6477 6463 mddev->curr_resync = 0; 6464 + mddev->curr_resync_completed = 0; 6478 6465 mddev->resync_min = 0; 6479 6466 mddev->resync_max = MaxSector; 6480 6467 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+12 -7
drivers/md/md.h
··· 12 12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 13 13 */ 14 14 15 - #ifndef _MD_K_H 16 - #define _MD_K_H 15 + #ifndef _MD_MD_H 16 + #define _MD_MD_H 17 17 18 - #ifdef CONFIG_BLOCK 18 + #include <linux/blkdev.h> 19 + #include <linux/kobject.h> 20 + #include <linux/list.h> 21 + #include <linux/mm.h> 22 + #include <linux/mutex.h> 23 + #include <linux/timer.h> 24 + #include <linux/wait.h> 25 + #include <linux/workqueue.h> 19 26 20 27 #define MaxSector (~(sector_t)0) 21 28 ··· 415 408 if (p) put_page(p); 416 409 } 417 410 418 - #endif /* CONFIG_BLOCK */ 419 - #endif 420 - 421 - 422 411 extern int register_md_personality(struct mdk_personality *p); 423 412 extern int unregister_md_personality(struct mdk_personality *p); 424 413 extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), ··· 437 434 extern int md_allow_write(mddev_t *mddev); 438 435 extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); 439 436 extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); 437 + 438 + #endif /* _MD_MD_H */
+6 -1
drivers/md/raid5.c
··· 3845 3845 wait_event(conf->wait_for_overlap, 3846 3846 atomic_read(&conf->reshape_stripes)==0); 3847 3847 mddev->reshape_position = conf->reshape_progress; 3848 + mddev->curr_resync_completed = mddev->curr_resync; 3848 3849 conf->reshape_checkpoint = jiffies; 3849 3850 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3850 3851 md_wakeup_thread(mddev->thread); ··· 3855 3854 conf->reshape_safe = mddev->reshape_position; 3856 3855 spin_unlock_irq(&conf->device_lock); 3857 3856 wake_up(&conf->wait_for_overlap); 3857 + sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3858 3858 } 3859 3859 3860 3860 if (mddev->delta_disks < 0) { ··· 3940 3938 * then we need to write out the superblock. 3941 3939 */ 3942 3940 sector_nr += reshape_sectors; 3943 - if (sector_nr >= mddev->resync_max) { 3941 + if ((sector_nr - mddev->curr_resync_completed) * 2 3942 + >= mddev->resync_max - mddev->curr_resync_completed) { 3944 3943 /* Cannot proceed until we've updated the superblock... */ 3945 3944 wait_event(conf->wait_for_overlap, 3946 3945 atomic_read(&conf->reshape_stripes) == 0); 3947 3946 mddev->reshape_position = conf->reshape_progress; 3947 + mddev->curr_resync_completed = mddev->curr_resync; 3948 3948 conf->reshape_checkpoint = jiffies; 3949 3949 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3950 3950 md_wakeup_thread(mddev->thread); ··· 3957 3953 conf->reshape_safe = mddev->reshape_position; 3958 3954 spin_unlock_irq(&conf->device_lock); 3959 3955 wake_up(&conf->wait_for_overlap); 3956 + sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3960 3957 } 3961 3958 return reshape_sectors; 3962 3959 }