Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md: add possibility to change data-offset for devices.

When reshaping we can avoid costly intermediate backup by
changing the 'start' address of the array on the device
(if there is enough room).

So as a first step, allow such a change to be requested
through sysfs, and recorded in v1.x metadata.

(As we didn't previous check that all 'pad' fields were zero,
we need a new FEATURE flag for this.
A (belatedly) check that all remaining 'pad' fields are
zero to avoid a repeat of this)

The new data offset must be requested separately for each device.
This allows each to have a different change in the data offset.
This is not likely to be used often but as data_offset can be
set per-device, new_data_offset should be too.

This patch also removes the 'acknowledged' arg to rdev_set_badblocks as
it is never used and never will be. At the same time we add a new
arg ('in_new') which is currently always zero but will be used more
soon.

When a reshape finishes we will need to update the data_offset
and rdev->sectors. So provide an exported function to do that.

Signed-off-by: NeilBrown <neilb@suse.de>

NeilBrown c6563a8c 2c810cdd

+222 -34
+196 -21
drivers/md/md.c
··· 1035 1035 struct super_type { 1036 1036 char *name; 1037 1037 struct module *owner; 1038 - int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev, 1038 + int (*load_super)(struct md_rdev *rdev, 1039 + struct md_rdev *refdev, 1039 1040 int minor_version); 1040 - int (*validate_super)(struct mddev *mddev, struct md_rdev *rdev); 1041 - void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); 1041 + int (*validate_super)(struct mddev *mddev, 1042 + struct md_rdev *rdev); 1043 + void (*sync_super)(struct mddev *mddev, 1044 + struct md_rdev *rdev); 1042 1045 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1043 1046 sector_t num_sectors); 1047 + int (*allow_new_offset)(struct md_rdev *rdev, 1048 + unsigned long long new_offset); 1044 1049 }; 1045 1050 1046 1051 /* ··· 1117 1112 1118 1113 rdev->preferred_minor = sb->md_minor; 1119 1114 rdev->data_offset = 0; 1115 + rdev->new_data_offset = 0; 1120 1116 rdev->sb_size = MD_SB_BYTES; 1121 1117 rdev->badblocks.shift = -1; 1122 1118 ··· 1444 1438 return num_sectors; 1445 1439 } 1446 1440 1441 + static int 1442 + super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1443 + { 1444 + /* non-zero offset changes not possible with v0.90 */ 1445 + return new_offset == 0; 1446 + } 1447 1447 1448 1448 /* 1449 1449 * version 1 superblock ··· 1485 1473 struct mdp_superblock_1 *sb; 1486 1474 int ret; 1487 1475 sector_t sb_start; 1476 + sector_t sectors; 1488 1477 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1489 1478 int bmask; 1490 1479 ··· 1540 1527 bdevname(rdev->bdev,b)); 1541 1528 return -EINVAL; 1542 1529 } 1530 + if (sb->pad0 || 1531 + sb->pad3[0] || 1532 + memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1533 + /* Some padding is non-zero, might be a new feature */ 1534 + return -EINVAL; 1543 1535 1544 1536 rdev->preferred_minor = 0xffff; 1545 1537 rdev->data_offset = le64_to_cpu(sb->data_offset); 1538 + rdev->new_data_offset = rdev->data_offset; 1539 + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1540 + (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1541 + rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1546 1542 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1547 1543 1548 1544 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; ··· 1561 1539 1562 1540 if (minor_version 1563 1541 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1542 + return -EINVAL; 1543 + if (minor_version 1544 + && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1564 1545 return -EINVAL; 1565 1546 1566 1547 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) ··· 1636 1611 else 1637 1612 ret = 0; 1638 1613 } 1639 - if (minor_version) 1640 - rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 1641 - le64_to_cpu(sb->data_offset); 1642 - else 1643 - rdev->sectors = rdev->sb_start; 1644 - if (rdev->sectors < le64_to_cpu(sb->data_size)) 1614 + if (minor_version) { 1615 + sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); 1616 + sectors -= rdev->data_offset; 1617 + } else 1618 + sectors = rdev->sb_start; 1619 + if (sectors < le64_to_cpu(sb->data_size)) 1645 1620 return -EINVAL; 1646 1621 rdev->sectors = le64_to_cpu(sb->data_size); 1647 - if (le64_to_cpu(sb->size) > rdev->sectors) 1648 - return -EINVAL; 1649 1622 return ret; 1650 1623 } 1651 1624 ··· 1768 1745 sb->feature_map = 0; 1769 1746 sb->pad0 = 0; 1770 1747 sb->recovery_offset = cpu_to_le64(0); 1771 - memset(sb->pad1, 0, sizeof(sb->pad1)); 1772 1748 memset(sb->pad3, 0, sizeof(sb->pad3)); 1773 1749 1774 1750 sb->utime = cpu_to_le64((__u64)mddev->utime); ··· 1789 1767 sb->devflags |= WriteMostly1; 1790 1768 else 1791 1769 sb->devflags &= ~WriteMostly1; 1770 + sb->data_offset = cpu_to_le64(rdev->data_offset); 1771 + sb->data_size = cpu_to_le64(rdev->sectors); 1792 1772 1793 1773 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1794 1774 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); ··· 1819 1795 mddev->reshape_backwards) 1820 1796 sb->feature_map 1821 1797 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 1798 + if (rdev->new_data_offset != rdev->data_offset) { 1799 + sb->feature_map 1800 + |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 1801 + sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 1802 + - rdev->data_offset)); 1803 + } 1822 1804 } 1823 1805 1824 1806 if (rdev->badblocks.count == 0) ··· 1901 1871 sector_t max_sectors; 1902 1872 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1903 1873 return 0; /* component must fit device */ 1874 + if (rdev->data_offset != rdev->new_data_offset) 1875 + return 0; /* too confusing */ 1904 1876 if (rdev->sb_start < rdev->data_offset) { 1905 1877 /* minor versions 1 and 2; superblock before data */ 1906 1878 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; ··· 1930 1898 rdev->sb_page); 1931 1899 md_super_wait(rdev->mddev); 1932 1900 return num_sectors; 1901 + 1902 + } 1903 + 1904 + static int 1905 + super_1_allow_new_offset(struct md_rdev *rdev, 1906 + unsigned long long new_offset) 1907 + { 1908 + /* All necessary checks on new >= old have been done */ 1909 + struct bitmap *bitmap; 1910 + if (new_offset >= rdev->data_offset) 1911 + return 1; 1912 + 1913 + /* with 1.0 metadata, there is no metadata to tread on 1914 + * so we can always move back */ 1915 + if (rdev->mddev->minor_version == 0) 1916 + return 1; 1917 + 1918 + /* otherwise we must be sure not to step on 1919 + * any metadata, so stay: 1920 + * 36K beyond start of superblock 1921 + * beyond end of badblocks 1922 + * beyond write-intent bitmap 1923 + */ 1924 + if (rdev->sb_start + (32+4)*2 > new_offset) 1925 + return 0; 1926 + bitmap = rdev->mddev->bitmap; 1927 + if (bitmap && !rdev->mddev->bitmap_info.file && 1928 + rdev->sb_start + rdev->mddev->bitmap_info.offset + 1929 + bitmap->file_pages * (PAGE_SIZE>>9) > new_offset) 1930 + return 0; 1931 + if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 1932 + return 0; 1933 + 1934 + return 1; 1933 1935 } 1934 1936 1935 1937 static struct super_type super_types[] = { ··· 1974 1908 .validate_super = super_90_validate, 1975 1909 .sync_super = super_90_sync, 1976 1910 .rdev_size_change = super_90_rdev_size_change, 1911 + .allow_new_offset = super_90_allow_new_offset, 1977 1912 }, 1978 1913 [1] = { 1979 1914 .name = "md-1", ··· 1983 1916 .validate_super = super_1_validate, 1984 1917 .sync_super = super_1_sync, 1985 1918 .rdev_size_change = super_1_rdev_size_change, 1919 + .allow_new_offset = super_1_allow_new_offset, 1986 1920 }, 1987 1921 }; 1988 1922 ··· 2891 2823 static ssize_t 2892 2824 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 2893 2825 { 2894 - char *e; 2895 - unsigned long long offset = simple_strtoull(buf, &e, 10); 2896 - if (e==buf || (*e && *e != '\n')) 2826 + unsigned long long offset; 2827 + if (strict_strtoull(buf, 10, &offset) < 0) 2897 2828 return -EINVAL; 2898 2829 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2899 2830 return -EBUSY; ··· 2906 2839 2907 2840 static struct rdev_sysfs_entry rdev_offset = 2908 2841 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2842 + 2843 + static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 2844 + { 2845 + return sprintf(page, "%llu\n", 2846 + (unsigned long long)rdev->new_data_offset); 2847 + } 2848 + 2849 + static ssize_t new_offset_store(struct md_rdev *rdev, 2850 + const char *buf, size_t len) 2851 + { 2852 + unsigned long long new_offset; 2853 + struct mddev *mddev = rdev->mddev; 2854 + 2855 + if (strict_strtoull(buf, 10, &new_offset) < 0) 2856 + return -EINVAL; 2857 + 2858 + if (mddev->sync_thread) 2859 + return -EBUSY; 2860 + if (new_offset == rdev->data_offset) 2861 + /* reset is always permitted */ 2862 + ; 2863 + else if (new_offset > rdev->data_offset) { 2864 + /* must not push array size beyond rdev_sectors */ 2865 + if (new_offset - rdev->data_offset 2866 + + mddev->dev_sectors > rdev->sectors) 2867 + return -E2BIG; 2868 + } 2869 + /* Metadata worries about other space details. */ 2870 + 2871 + /* decreasing the offset is inconsistent with a backwards 2872 + * reshape. 2873 + */ 2874 + if (new_offset < rdev->data_offset && 2875 + mddev->reshape_backwards) 2876 + return -EINVAL; 2877 + /* Increasing offset is inconsistent with forwards 2878 + * reshape. reshape_direction should be set to 2879 + * 'backwards' first. 2880 + */ 2881 + if (new_offset > rdev->data_offset && 2882 + !mddev->reshape_backwards) 2883 + return -EINVAL; 2884 + 2885 + if (mddev->pers && mddev->persistent && 2886 + !super_types[mddev->major_version] 2887 + .allow_new_offset(rdev, new_offset)) 2888 + return -E2BIG; 2889 + rdev->new_data_offset = new_offset; 2890 + if (new_offset > rdev->data_offset) 2891 + mddev->reshape_backwards = 1; 2892 + else if (new_offset < rdev->data_offset) 2893 + mddev->reshape_backwards = 0; 2894 + 2895 + return len; 2896 + } 2897 + static struct rdev_sysfs_entry rdev_new_offset = 2898 + __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 2909 2899 2910 2900 static ssize_t 2911 2901 rdev_size_show(struct md_rdev *rdev, char *page) ··· 3008 2884 3009 2885 if (strict_blocks_to_sectors(buf, &sectors) < 0) 3010 2886 return -EINVAL; 2887 + if (rdev->data_offset != rdev->new_data_offset) 2888 + return -EINVAL; /* too confusing */ 3011 2889 if (my_mddev->pers && rdev->raid_disk >= 0) { 3012 2890 if (my_mddev->persistent) { 3013 2891 sectors = super_types[my_mddev->major_version]. ··· 3146 3020 &rdev_errors.attr, 3147 3021 &rdev_slot.attr, 3148 3022 &rdev_offset.attr, 3023 + &rdev_new_offset.attr, 3149 3024 &rdev_size.attr, 3150 3025 &rdev_recovery_start.attr, 3151 3026 &rdev_bad_blocks.attr, ··· 3221 3094 rdev->raid_disk = -1; 3222 3095 rdev->flags = 0; 3223 3096 rdev->data_offset = 0; 3097 + rdev->new_data_offset = 0; 3224 3098 rdev->sb_events = 0; 3225 3099 rdev->last_read_error.tv_sec = 0; 3226 3100 rdev->last_read_error.tv_nsec = 0; ··· 3726 3598 if (mddev->pers) 3727 3599 rv = update_raid_disks(mddev, n); 3728 3600 else if (mddev->reshape_position != MaxSector) { 3601 + struct md_rdev *rdev; 3729 3602 int olddisks = mddev->raid_disks - mddev->delta_disks; 3603 + 3604 + rdev_for_each(rdev, mddev) { 3605 + if (olddisks < n && 3606 + rdev->data_offset < rdev->new_data_offset) 3607 + return -EINVAL; 3608 + if (olddisks > n && 3609 + rdev->data_offset > rdev->new_data_offset) 3610 + return -EINVAL; 3611 + } 3730 3612 mddev->delta_disks = n - olddisks; 3731 3613 mddev->raid_disks = n; 3732 3614 mddev->reshape_backwards = (mddev->delta_disks < 0); ··· 4583 4445 static ssize_t 4584 4446 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 4585 4447 { 4448 + struct md_rdev *rdev; 4586 4449 char *e; 4587 4450 unsigned long long new = simple_strtoull(buf, &e, 10); 4588 4451 if (mddev->pers) ··· 4596 4457 mddev->new_level = mddev->level; 4597 4458 mddev->new_layout = mddev->layout; 4598 4459 mddev->new_chunk_sectors = mddev->chunk_sectors; 4460 + rdev_for_each(rdev, mddev) 4461 + rdev->new_data_offset = rdev->data_offset; 4599 4462 return len; 4600 4463 } 4601 4464 ··· 6142 6001 static int update_raid_disks(struct mddev *mddev, int raid_disks) 6143 6002 { 6144 6003 int rv; 6004 + struct md_rdev *rdev; 6145 6005 /* change the number of raid disks */ 6146 6006 if (mddev->pers->check_reshape == NULL) 6147 6007 return -EINVAL; ··· 6151 6009 return -EINVAL; 6152 6010 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 6153 6011 return -EBUSY; 6012 + 6013 + rdev_for_each(rdev, mddev) { 6014 + if (mddev->raid_disks < raid_disks && 6015 + rdev->data_offset < rdev->new_data_offset) 6016 + return -EINVAL; 6017 + if (mddev->raid_disks > raid_disks && 6018 + rdev->data_offset > rdev->new_data_offset) 6019 + return -EINVAL; 6020 + } 6021 + 6154 6022 mddev->delta_disks = raid_disks - mddev->raid_disks; 6155 6023 if (mddev->delta_disks < 0) 6156 6024 mddev->reshape_backwards = 1; ··· 7861 7709 } 7862 7710 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 7863 7711 7712 + void md_finish_reshape(struct mddev *mddev) 7713 + { 7714 + /* called be personality module when reshape completes. */ 7715 + struct md_rdev *rdev; 7716 + 7717 + rdev_for_each(rdev, mddev) { 7718 + if (rdev->data_offset > rdev->new_data_offset) 7719 + rdev->sectors += rdev->data_offset - rdev->new_data_offset; 7720 + else 7721 + rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 7722 + rdev->data_offset = rdev->new_data_offset; 7723 + } 7724 + } 7725 + EXPORT_SYMBOL(md_finish_reshape); 7864 7726 7865 7727 /* Bad block management. 7866 7728 * We can record which blocks on each device are 'bad' and so just ··· 8123 7957 } 8124 7958 8125 7959 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 8126 - int acknowledged) 7960 + int is_new) 8127 7961 { 8128 - int rv = md_set_badblocks(&rdev->badblocks, 8129 - s + rdev->data_offset, sectors, acknowledged); 7962 + int rv; 7963 + if (is_new) 7964 + s += rdev->new_data_offset; 7965 + else 7966 + s += rdev->data_offset; 7967 + rv = md_set_badblocks(&rdev->badblocks, 7968 + s, sectors, 0); 8130 7969 if (rv) { 8131 7970 /* Make sure they get written out promptly */ 8132 7971 sysfs_notify_dirent_safe(rdev->sysfs_state); ··· 8237 8066 return rv; 8238 8067 } 8239 8068 8240 - int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors) 8069 + int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 8070 + int is_new) 8241 8071 { 8072 + if (is_new) 8073 + s += rdev->new_data_offset; 8074 + else 8075 + s += rdev->data_offset; 8242 8076 return md_clear_badblocks(&rdev->badblocks, 8243 - s + rdev->data_offset, 8244 - sectors); 8077 + s, sectors); 8245 8078 } 8246 8079 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 8247 8080
+5 -2
drivers/md/md.h
··· 55 55 int sb_loaded; 56 56 __u64 sb_events; 57 57 sector_t data_offset; /* start of data in array */ 58 + sector_t new_data_offset;/* only relevant while reshaping */ 58 59 sector_t sb_start; /* offset of the super block (in 512byte sectors) */ 59 60 int sb_size; /* bytes in the superblock */ 60 61 int preferred_minor; /* autorun support */ ··· 194 193 return 0; 195 194 } 196 195 extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 197 - int acknowledged); 198 - extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors); 196 + int is_new); 197 + extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 198 + int is_new); 199 199 extern void md_ack_all_badblocks(struct badblocks *bb); 200 200 201 201 struct mddev { ··· 594 592 extern void md_write_end(struct mddev *mddev); 595 593 extern void md_done_sync(struct mddev *mddev, int blocks, int ok); 596 594 extern void md_error(struct mddev *mddev, struct md_rdev *rdev); 595 + extern void md_finish_reshape(struct mddev *mddev); 597 596 598 597 extern int mddev_congested(struct mddev *mddev, int bits); 599 598 extern void md_flush_request(struct mddev *mddev, struct bio *bio);
+2 -2
drivers/md/raid1.c
··· 2024 2024 continue; 2025 2025 if (test_bit(BIO_UPTODATE, &bio->bi_flags) && 2026 2026 test_bit(R1BIO_MadeGood, &r1_bio->state)) { 2027 - rdev_clear_badblocks(rdev, r1_bio->sector, s); 2027 + rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); 2028 2028 } 2029 2029 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 2030 2030 test_bit(R1BIO_WriteError, &r1_bio->state)) { ··· 2044 2044 struct md_rdev *rdev = conf->mirrors[m].rdev; 2045 2045 rdev_clear_badblocks(rdev, 2046 2046 r1_bio->sector, 2047 - r1_bio->sectors); 2047 + r1_bio->sectors, 0); 2048 2048 rdev_dec_pending(rdev, conf->mddev); 2049 2049 } else if (r1_bio->bios[m] != NULL) { 2050 2050 /* This drive got a write error. We need to
+4 -4
drivers/md/raid10.c
··· 2480 2480 rdev_clear_badblocks( 2481 2481 rdev, 2482 2482 r10_bio->devs[m].addr, 2483 - r10_bio->sectors); 2483 + r10_bio->sectors, 0); 2484 2484 } else { 2485 2485 if (!rdev_set_badblocks( 2486 2486 rdev, ··· 2496 2496 rdev_clear_badblocks( 2497 2497 rdev, 2498 2498 r10_bio->devs[m].addr, 2499 - r10_bio->sectors); 2499 + r10_bio->sectors, 0); 2500 2500 } else { 2501 2501 if (!rdev_set_badblocks( 2502 2502 rdev, ··· 2515 2515 rdev_clear_badblocks( 2516 2516 rdev, 2517 2517 r10_bio->devs[m].addr, 2518 - r10_bio->sectors); 2518 + r10_bio->sectors, 0); 2519 2519 rdev_dec_pending(rdev, conf->mddev); 2520 2520 } else if (bio != NULL && 2521 2521 !test_bit(BIO_UPTODATE, &bio->bi_flags)) { ··· 2532 2532 rdev_clear_badblocks( 2533 2533 rdev, 2534 2534 r10_bio->devs[m].addr, 2535 - r10_bio->sectors); 2535 + r10_bio->sectors, 0); 2536 2536 rdev_dec_pending(rdev, conf->mddev); 2537 2537 } 2538 2538 }
+7 -3
drivers/md/raid5.c
··· 3561 3561 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 3562 3562 rdev = conf->disks[i].rdev; 3563 3563 rdev_clear_badblocks(rdev, sh->sector, 3564 - STRIPE_SECTORS); 3564 + STRIPE_SECTORS, 0); 3565 3565 rdev_dec_pending(rdev, conf->mddev); 3566 3566 } 3567 3567 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { ··· 3570 3570 /* rdev have been moved down */ 3571 3571 rdev = conf->disks[i].rdev; 3572 3572 rdev_clear_badblocks(rdev, sh->sector, 3573 - STRIPE_SECTORS); 3573 + STRIPE_SECTORS, 0); 3574 3574 rdev_dec_pending(rdev, conf->mddev); 3575 3575 } 3576 3576 } ··· 5505 5505 if (!check_stripe_cache(mddev)) 5506 5506 return -ENOSPC; 5507 5507 5508 - rdev_for_each(rdev, mddev) 5508 + rdev_for_each(rdev, mddev) { 5509 + /* Don't support changing data_offset yet */ 5510 + if (rdev->new_data_offset != rdev->data_offset) 5511 + return -EINVAL; 5509 5512 if (!test_bit(In_sync, &rdev->flags) 5510 5513 && !test_bit(Faulty, &rdev->flags)) 5511 5514 spares++; 5515 + } 5512 5516 5513 5517 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 5514 5518 /* Not enough devices even to make a degraded array
+8 -2
include/linux/raid/md_p.h
··· 233 233 __le32 delta_disks; /* change in number of raid_disks */ 234 234 __le32 new_layout; /* new layout */ 235 235 __le32 new_chunk; /* new chunk size (512byte sectors) */ 236 - __u8 pad1[128-124]; /* set to 0 when written */ 236 + __le32 new_offset; /* signed number to add to data_offset in new 237 + * layout. 0 == no-change. This can be 238 + * different on each device in the array. 239 + */ 237 240 238 241 /* constant this-device information - 64 bytes */ 239 242 __le64 data_offset; /* sector start of data, often 0 */ ··· 288 285 * of devices, but is going 289 286 * backwards anyway. 290 287 */ 288 + #define MD_FEATURE_NEW_OFFSET 64 /* new_offset must be honoured */ 291 289 #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ 292 290 |MD_FEATURE_RECOVERY_OFFSET \ 293 291 |MD_FEATURE_RESHAPE_ACTIVE \ 294 292 |MD_FEATURE_BAD_BLOCKS \ 295 293 |MD_FEATURE_REPLACEMENT \ 296 - |MD_FEATURE_RESHAPE_BACKWARDS) 294 + |MD_FEATURE_RESHAPE_BACKWARDS \ 295 + |MD_FEATURE_NEW_OFFSET \ 296 + ) 297 297 298 298 #endif