Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md: (52 commits)
md: Protect access to mddev->disks list using RCU
md: only count actual openers as access which prevent a 'stop'
md: linear: Make array_size sector-based and rename it to array_sectors.
md: Make mddev->array_size sector-based.
md: Make super_type->rdev_size_change() take sector-based sizes.
md: Fix check for overlapping devices.
md: Tidy up rdev_size_store a bit:
md: Remove some unused macros.
md: Turn rdev->sb_offset into a sector-based quantity.
md: Make calc_dev_sboffset() return a sector count.
md: Replace calc_dev_size() by calc_num_sectors().
md: Make update_size() take the number of sectors.
md: Better control of when do_md_stop is allowed to stop the array.
md: get_disk_info(): Don't convert between signed and unsigned and back.
md: Simplify restart_array().
md: alloc_disk_sb(): Return proper error value.
md: Simplify sb_equal().
md: Simplify uuid_equal().
md: sb_equal(): Fix misleading printk.
md: Fix a typo in the comment to cmd_match().
...

+843 -791
+29 -1
Documentation/md.txt
··· 236 236 writing the word for the desired state, however some states 237 237 cannot be explicitly set, and some transitions are not allowed. 238 238 239 + Select/poll works on this file. All changes except between 240 + active_idle and active (which can be frequent and are not 241 + very interesting) are notified. active->active_idle is 242 + reported if the metadata is externally managed. 243 + 239 244 clear 240 245 No devices, no size, no level 241 246 Writing is equivalent to STOP_ARRAY ioctl ··· 297 292 writemostly - device will only be subject to read 298 293 requests if there are no other options. 299 294 This applies only to raid1 arrays. 295 + blocked - device has failed, metadata is "external", 296 + and the failure hasn't been acknowledged yet. 297 + Writes that would write to this device if 298 + it were not faulty are blocked. 300 299 spare - device is working, but not a full member. 301 300 This includes spares that are in the process 302 301 of being recovered to ··· 310 301 Writing "remove" removes the device from the array. 311 302 Writing "writemostly" sets the writemostly flag. 312 303 Writing "-writemostly" clears the writemostly flag. 304 + Writing "blocked" sets the "blocked" flag. 305 + Writing "-blocked" clear the "blocked" flag and allows writes 306 + to complete. 307 + 308 + This file responds to select/poll. Any change to 'faulty' 309 + or 'blocked' causes an event. 313 310 314 311 errors 315 312 An approximate count of read errors that have been detected on ··· 347 332 for storage of data. This will normally be the same as the 348 333 component_size. This can be written while assembling an 349 334 array. If a value less than the current component_size is 350 - written, component_size will be reduced to this value. 335 + written, it will be rejected. 351 336 352 337 353 338 An active md device will also contain and entry for each active device ··· 395 380 corresponding operation if it was stopped with 'idle'. 396 381 'check' and 'repair' will start the appropriate process 397 382 providing the current state is 'idle'. 383 + 384 + This file responds to select/poll. Any important change in the value 385 + triggers a poll event. Sometimes the value will briefly be 386 + "recover" if a recovery seems to be needed, but cannot be 387 + achieved. In that case, the transition to "recover" isn't 388 + notified, but the transition away is. 389 + 390 + degraded 391 + This contains a count of the number of devices by which the 392 + arrays is degraded. So an optimal array with show '0'. A 393 + single failed/missing drive will show '1', etc. 394 + This file responds to select/poll, any increase or decrease 395 + in the count of missing devices will trigger an event. 398 396 399 397 mismatch_count 400 398 When performing 'check' and 'repair', and possibly when
+39 -15
drivers/md/bitmap.c
··· 225 225 || test_bit(Faulty, &rdev->flags)) 226 226 continue; 227 227 228 - target = (rdev->sb_offset << 1) + offset + index * (PAGE_SIZE/512); 228 + target = rdev->sb_start + offset + index * (PAGE_SIZE/512); 229 229 230 230 if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) { 231 231 page->index = index; ··· 241 241 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) 242 242 { 243 243 mdk_rdev_t *rdev; 244 - struct list_head *tmp; 245 244 mddev_t *mddev = bitmap->mddev; 246 245 247 - rdev_for_each(rdev, tmp, mddev) 246 + rcu_read_lock(); 247 + rdev_for_each_rcu(rdev, mddev) 248 248 if (test_bit(In_sync, &rdev->flags) 249 249 && !test_bit(Faulty, &rdev->flags)) { 250 250 int size = PAGE_SIZE; ··· 260 260 + (long)(page->index * (PAGE_SIZE/512)) 261 261 + size/512 > 0) 262 262 /* bitmap runs in to metadata */ 263 - return -EINVAL; 263 + goto bad_alignment; 264 264 if (rdev->data_offset + mddev->size*2 265 - > rdev->sb_offset*2 + bitmap->offset) 265 + > rdev->sb_start + bitmap->offset) 266 266 /* data runs in to bitmap */ 267 - return -EINVAL; 268 - } else if (rdev->sb_offset*2 < rdev->data_offset) { 267 + goto bad_alignment; 268 + } else if (rdev->sb_start < rdev->data_offset) { 269 269 /* METADATA BITMAP DATA */ 270 - if (rdev->sb_offset*2 270 + if (rdev->sb_start 271 271 + bitmap->offset 272 272 + page->index*(PAGE_SIZE/512) + size/512 273 273 > rdev->data_offset) 274 274 /* bitmap runs in to data */ 275 - return -EINVAL; 275 + goto bad_alignment; 276 276 } else { 277 277 /* DATA METADATA BITMAP - no problems */ 278 278 } 279 279 md_super_write(mddev, rdev, 280 - (rdev->sb_offset<<1) + bitmap->offset 280 + rdev->sb_start + bitmap->offset 281 281 + page->index * (PAGE_SIZE/512), 282 282 size, 283 283 page); 284 284 } 285 + rcu_read_unlock(); 285 286 286 287 if (wait) 287 288 md_super_wait(mddev); 288 289 return 0; 290 + 291 + bad_alignment: 292 + rcu_read_unlock(); 293 + return -EINVAL; 289 294 } 290 295 291 296 static void bitmap_file_kick(struct bitmap *bitmap); ··· 459 454 spin_unlock_irqrestore(&bitmap->lock, flags); 460 455 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); 461 456 sb->events = cpu_to_le64(bitmap->mddev->events); 462 - if (!bitmap->mddev->degraded) 463 - sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 457 + if (bitmap->mddev->events < bitmap->events_cleared) { 458 + /* rocking back to read-only */ 459 + bitmap->events_cleared = bitmap->mddev->events; 460 + sb->events_cleared = cpu_to_le64(bitmap->events_cleared); 461 + } 464 462 kunmap_atomic(sb, KM_USER0); 465 463 write_page(bitmap, bitmap->sb_page, 1); 466 464 } ··· 1093 1085 } else 1094 1086 spin_unlock_irqrestore(&bitmap->lock, flags); 1095 1087 lastpage = page; 1096 - /* 1097 - printk("bitmap clean at page %lu\n", j); 1098 - */ 1088 + 1089 + /* We are possibly going to clear some bits, so make 1090 + * sure that events_cleared is up-to-date. 1091 + */ 1092 + if (bitmap->need_sync) { 1093 + bitmap_super_t *sb; 1094 + bitmap->need_sync = 0; 1095 + sb = kmap_atomic(bitmap->sb_page, KM_USER0); 1096 + sb->events_cleared = 1097 + cpu_to_le64(bitmap->events_cleared); 1098 + kunmap_atomic(sb, KM_USER0); 1099 + write_page(bitmap, bitmap->sb_page, 1); 1100 + } 1099 1101 spin_lock_irqsave(&bitmap->lock, flags); 1100 1102 clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); 1101 1103 } ··· 1273 1255 if (!bmc) { 1274 1256 spin_unlock_irqrestore(&bitmap->lock, flags); 1275 1257 return; 1258 + } 1259 + 1260 + if (success && 1261 + bitmap->events_cleared < bitmap->mddev->events) { 1262 + bitmap->events_cleared = bitmap->mddev->events; 1263 + bitmap->need_sync = 1; 1276 1264 } 1277 1265 1278 1266 if (!success && ! (*bmc & NEEDED_MASK))
+1 -1
drivers/md/faulty.c
··· 297 297 rdev_for_each(rdev, tmp, mddev) 298 298 conf->rdev = rdev; 299 299 300 - mddev->array_size = mddev->size; 300 + mddev->array_sectors = mddev->size * 2; 301 301 mddev->private = conf; 302 302 303 303 reconfig(mddev, mddev->layout, -1);
+10 -10
drivers/md/linear.c
··· 122 122 return NULL; 123 123 124 124 cnt = 0; 125 - conf->array_size = 0; 125 + conf->array_sectors = 0; 126 126 127 127 rdev_for_each(rdev, tmp, mddev) { 128 128 int j = rdev->raid_disk; 129 129 dev_info_t *disk = conf->disks + j; 130 130 131 - if (j < 0 || j > raid_disks || disk->rdev) { 131 + if (j < 0 || j >= raid_disks || disk->rdev) { 132 132 printk("linear: disk numbering problem. Aborting!\n"); 133 133 goto out; 134 134 } ··· 146 146 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 147 147 148 148 disk->size = rdev->size; 149 - conf->array_size += rdev->size; 149 + conf->array_sectors += rdev->size * 2; 150 150 151 151 cnt++; 152 152 } ··· 155 155 goto out; 156 156 } 157 157 158 - min_spacing = conf->array_size; 158 + min_spacing = conf->array_sectors / 2; 159 159 sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *)); 160 160 161 161 /* min_spacing is the minimum spacing that will fit the hash ··· 164 164 * that is larger than min_spacing as use the size of that as 165 165 * the actual spacing 166 166 */ 167 - conf->hash_spacing = conf->array_size; 167 + conf->hash_spacing = conf->array_sectors / 2; 168 168 for (i=0; i < cnt-1 ; i++) { 169 169 sector_t sz = 0; 170 170 int j; ··· 194 194 unsigned round; 195 195 unsigned long base; 196 196 197 - sz = conf->array_size >> conf->preshift; 197 + sz = conf->array_sectors >> (conf->preshift + 1); 198 198 sz += 1; /* force round-up */ 199 199 base = conf->hash_spacing >> conf->preshift; 200 200 round = sector_div(sz, base); ··· 221 221 curr_offset = 0; 222 222 i = 0; 223 223 for (curr_offset = 0; 224 - curr_offset < conf->array_size; 224 + curr_offset < conf->array_sectors / 2; 225 225 curr_offset += conf->hash_spacing) { 226 226 227 227 while (i < raid_disks-1 && ··· 258 258 if (!conf) 259 259 return 1; 260 260 mddev->private = conf; 261 - mddev->array_size = conf->array_size; 261 + mddev->array_sectors = conf->array_sectors; 262 262 263 263 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 264 264 mddev->queue->unplug_fn = linear_unplug; ··· 292 292 newconf->prev = mddev_to_conf(mddev); 293 293 mddev->private = newconf; 294 294 mddev->raid_disks++; 295 - mddev->array_size = newconf->array_size; 296 - set_capacity(mddev->gendisk, mddev->array_size << 1); 295 + mddev->array_sectors = newconf->array_sectors; 296 + set_capacity(mddev->gendisk, mddev->array_sectors); 297 297 return 0; 298 298 } 299 299
+398 -217
drivers/md/md.c
··· 169 169 { 170 170 atomic_inc(&md_event_count); 171 171 wake_up(&md_event_waiters); 172 - sysfs_notify(&mddev->kobj, NULL, "sync_action"); 173 172 } 174 173 EXPORT_SYMBOL_GPL(md_new_event); 175 174 ··· 273 274 INIT_LIST_HEAD(&new->all_mddevs); 274 275 init_timer(&new->safemode_timer); 275 276 atomic_set(&new->active, 1); 277 + atomic_set(&new->openers, 0); 276 278 spin_lock_init(&new->write_lock); 277 279 init_waitqueue_head(&new->sb_wait); 278 280 init_waitqueue_head(&new->recovery_wait); 279 281 new->reshape_position = MaxSector; 282 + new->resync_min = 0; 280 283 new->resync_max = MaxSector; 281 284 new->level = LEVEL_NONE; 282 285 ··· 348 347 return NULL; 349 348 } 350 349 350 + /* return the offset of the super block in 512byte sectors */ 351 351 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 352 352 { 353 - sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 354 - return MD_NEW_SIZE_BLOCKS(size); 353 + sector_t num_sectors = bdev->bd_inode->i_size / 512; 354 + return MD_NEW_SIZE_SECTORS(num_sectors); 355 355 } 356 356 357 - static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size) 357 + static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size) 358 358 { 359 - sector_t size; 360 - 361 - size = rdev->sb_offset; 359 + sector_t num_sectors = rdev->sb_start; 362 360 363 361 if (chunk_size) 364 - size &= ~((sector_t)chunk_size/1024 - 1); 365 - return size; 362 + num_sectors &= ~((sector_t)chunk_size/512 - 1); 363 + return num_sectors; 366 364 } 367 365 368 366 static int alloc_disk_sb(mdk_rdev_t * rdev) ··· 372 372 rdev->sb_page = alloc_page(GFP_KERNEL); 373 373 if (!rdev->sb_page) { 374 374 printk(KERN_ALERT "md: out of memory.\n"); 375 - return -EINVAL; 375 + return -ENOMEM; 376 376 } 377 377 378 378 return 0; ··· 384 384 put_page(rdev->sb_page); 385 385 rdev->sb_loaded = 0; 386 386 rdev->sb_page = NULL; 387 - rdev->sb_offset = 0; 387 + rdev->sb_start = 0; 388 388 rdev->size = 0; 389 389 } 390 390 } ··· 530 530 return 0; 531 531 532 532 533 - if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ)) 533 + if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ)) 534 534 goto fail; 535 535 rdev->sb_loaded = 1; 536 536 return 0; ··· 543 543 544 544 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 545 545 { 546 - if ( (sb1->set_uuid0 == sb2->set_uuid0) && 547 - (sb1->set_uuid1 == sb2->set_uuid1) && 548 - (sb1->set_uuid2 == sb2->set_uuid2) && 549 - (sb1->set_uuid3 == sb2->set_uuid3)) 550 - 551 - return 1; 552 - 553 - return 0; 546 + return sb1->set_uuid0 == sb2->set_uuid0 && 547 + sb1->set_uuid1 == sb2->set_uuid1 && 548 + sb1->set_uuid2 == sb2->set_uuid2 && 549 + sb1->set_uuid3 == sb2->set_uuid3; 554 550 } 555 - 556 551 557 552 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 558 553 { ··· 559 564 560 565 if (!tmp1 || !tmp2) { 561 566 ret = 0; 562 - printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n"); 567 + printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 563 568 goto abort; 564 569 } 565 570 ··· 572 577 tmp1->nr_disks = 0; 573 578 tmp2->nr_disks = 0; 574 579 575 - if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4)) 576 - ret = 0; 577 - else 578 - ret = 1; 579 - 580 + ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 580 581 abort: 581 582 kfree(tmp1); 582 583 kfree(tmp2); ··· 649 658 */ 650 659 651 660 struct super_type { 652 - char *name; 653 - struct module *owner; 654 - int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version); 655 - int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 656 - void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 661 + char *name; 662 + struct module *owner; 663 + int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, 664 + int minor_version); 665 + int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 666 + void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 667 + unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, 668 + sector_t num_sectors); 657 669 }; 658 670 659 671 /* ··· 667 673 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 668 674 mdp_super_t *sb; 669 675 int ret; 670 - sector_t sb_offset; 671 676 672 677 /* 673 - * Calculate the position of the superblock, 678 + * Calculate the position of the superblock (512byte sectors), 674 679 * it's at the end of the disk. 675 680 * 676 681 * It also happens to be a multiple of 4Kb. 677 682 */ 678 - sb_offset = calc_dev_sboffset(rdev->bdev); 679 - rdev->sb_offset = sb_offset; 683 + rdev->sb_start = calc_dev_sboffset(rdev->bdev); 680 684 681 685 ret = read_disk_sb(rdev, MD_SB_BYTES); 682 686 if (ret) return ret; ··· 751 759 else 752 760 ret = 0; 753 761 } 754 - rdev->size = calc_dev_size(rdev, sb->chunk_size); 762 + rdev->size = calc_num_sectors(rdev, sb->chunk_size) / 2; 755 763 756 764 if (rdev->size < sb->size && sb->level > 1) 757 765 /* "this cannot possibly happen" ... */ ··· 996 1004 } 997 1005 998 1006 /* 1007 + * rdev_size_change for 0.90.0 1008 + */ 1009 + static unsigned long long 1010 + super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1011 + { 1012 + if (num_sectors && num_sectors < rdev->mddev->size * 2) 1013 + return 0; /* component must fit device */ 1014 + if (rdev->mddev->bitmap_offset) 1015 + return 0; /* can't move bitmap */ 1016 + rdev->sb_start = calc_dev_sboffset(rdev->bdev); 1017 + if (!num_sectors || num_sectors > rdev->sb_start) 1018 + num_sectors = rdev->sb_start; 1019 + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1020 + rdev->sb_page); 1021 + md_super_wait(rdev->mddev); 1022 + return num_sectors / 2; /* kB for sysfs */ 1023 + } 1024 + 1025 + 1026 + /* 999 1027 * version 1 superblock 1000 1028 */ 1001 1029 ··· 1046 1034 { 1047 1035 struct mdp_superblock_1 *sb; 1048 1036 int ret; 1049 - sector_t sb_offset; 1037 + sector_t sb_start; 1050 1038 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1051 1039 int bmask; 1052 1040 1053 1041 /* 1054 - * Calculate the position of the superblock. 1042 + * Calculate the position of the superblock in 512byte sectors. 1055 1043 * It is always aligned to a 4K boundary and 1056 1044 * depeding on minor_version, it can be: 1057 1045 * 0: At least 8K, but less than 12K, from end of device ··· 1060 1048 */ 1061 1049 switch(minor_version) { 1062 1050 case 0: 1063 - sb_offset = rdev->bdev->bd_inode->i_size >> 9; 1064 - sb_offset -= 8*2; 1065 - sb_offset &= ~(sector_t)(4*2-1); 1066 - /* convert from sectors to K */ 1067 - sb_offset /= 2; 1051 + sb_start = rdev->bdev->bd_inode->i_size >> 9; 1052 + sb_start -= 8*2; 1053 + sb_start &= ~(sector_t)(4*2-1); 1068 1054 break; 1069 1055 case 1: 1070 - sb_offset = 0; 1056 + sb_start = 0; 1071 1057 break; 1072 1058 case 2: 1073 - sb_offset = 4; 1059 + sb_start = 8; 1074 1060 break; 1075 1061 default: 1076 1062 return -EINVAL; 1077 1063 } 1078 - rdev->sb_offset = sb_offset; 1064 + rdev->sb_start = sb_start; 1079 1065 1080 1066 /* superblock is rarely larger than 1K, but it can be larger, 1081 1067 * and it is safe to read 4k, so we do that ··· 1087 1077 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1088 1078 sb->major_version != cpu_to_le32(1) || 1089 1079 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1090 - le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) || 1080 + le64_to_cpu(sb->super_offset) != rdev->sb_start || 1091 1081 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1092 1082 return -EINVAL; 1093 1083 ··· 1123 1113 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1124 1114 1125 1115 if (minor_version 1126 - && rdev->data_offset < sb_offset + (rdev->sb_size/512)) 1116 + && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1127 1117 return -EINVAL; 1128 1118 1129 1119 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) ··· 1159 1149 if (minor_version) 1160 1150 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; 1161 1151 else 1162 - rdev->size = rdev->sb_offset; 1152 + rdev->size = rdev->sb_start / 2; 1163 1153 if (rdev->size < le64_to_cpu(sb->data_size)/2) 1164 1154 return -EINVAL; 1165 1155 rdev->size = le64_to_cpu(sb->data_size)/2; ··· 1338 1328 sb->sb_csum = calc_sb_1_csum(sb); 1339 1329 } 1340 1330 1331 + static unsigned long long 1332 + super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1333 + { 1334 + struct mdp_superblock_1 *sb; 1335 + sector_t max_sectors; 1336 + if (num_sectors && num_sectors < rdev->mddev->size * 2) 1337 + return 0; /* component must fit device */ 1338 + if (rdev->sb_start < rdev->data_offset) { 1339 + /* minor versions 1 and 2; superblock before data */ 1340 + max_sectors = rdev->bdev->bd_inode->i_size >> 9; 1341 + max_sectors -= rdev->data_offset; 1342 + if (!num_sectors || num_sectors > max_sectors) 1343 + num_sectors = max_sectors; 1344 + } else if (rdev->mddev->bitmap_offset) { 1345 + /* minor version 0 with bitmap we can't move */ 1346 + return 0; 1347 + } else { 1348 + /* minor version 0; superblock after data */ 1349 + sector_t sb_start; 1350 + sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; 1351 + sb_start &= ~(sector_t)(4*2 - 1); 1352 + max_sectors = rdev->size * 2 + sb_start - rdev->sb_start; 1353 + if (!num_sectors || num_sectors > max_sectors) 1354 + num_sectors = max_sectors; 1355 + rdev->sb_start = sb_start; 1356 + } 1357 + sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); 1358 + sb->data_size = cpu_to_le64(num_sectors); 1359 + sb->super_offset = rdev->sb_start; 1360 + sb->sb_csum = calc_sb_1_csum(sb); 1361 + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1362 + rdev->sb_page); 1363 + md_super_wait(rdev->mddev); 1364 + return num_sectors / 2; /* kB for sysfs */ 1365 + } 1341 1366 1342 1367 static struct super_type super_types[] = { 1343 1368 [0] = { 1344 1369 .name = "0.90.0", 1345 1370 .owner = THIS_MODULE, 1346 - .load_super = super_90_load, 1347 - .validate_super = super_90_validate, 1348 - .sync_super = super_90_sync, 1371 + .load_super = super_90_load, 1372 + .validate_super = super_90_validate, 1373 + .sync_super = super_90_sync, 1374 + .rdev_size_change = super_90_rdev_size_change, 1349 1375 }, 1350 1376 [1] = { 1351 1377 .name = "md-1", 1352 1378 .owner = THIS_MODULE, 1353 - .load_super = super_1_load, 1354 - .validate_super = super_1_validate, 1355 - .sync_super = super_1_sync, 1379 + .load_super = super_1_load, 1380 + .validate_super = super_1_validate, 1381 + .sync_super = super_1_sync, 1382 + .rdev_size_change = super_1_rdev_size_change, 1356 1383 }, 1357 1384 }; 1358 1385 1359 1386 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1360 1387 { 1361 - struct list_head *tmp, *tmp2; 1362 1388 mdk_rdev_t *rdev, *rdev2; 1363 1389 1364 - rdev_for_each(rdev, tmp, mddev1) 1365 - rdev_for_each(rdev2, tmp2, mddev2) 1390 + rcu_read_lock(); 1391 + rdev_for_each_rcu(rdev, mddev1) 1392 + rdev_for_each_rcu(rdev2, mddev2) 1366 1393 if (rdev->bdev->bd_contains == 1367 - rdev2->bdev->bd_contains) 1394 + rdev2->bdev->bd_contains) { 1395 + rcu_read_unlock(); 1368 1396 return 1; 1369 - 1397 + } 1398 + rcu_read_unlock(); 1370 1399 return 0; 1371 1400 } 1372 1401 ··· 1472 1423 kobject_del(&rdev->kobj); 1473 1424 goto fail; 1474 1425 } 1475 - list_add(&rdev->same_set, &mddev->disks); 1426 + list_add_rcu(&rdev->same_set, &mddev->disks); 1476 1427 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); 1477 1428 return 0; 1478 1429 ··· 1497 1448 return; 1498 1449 } 1499 1450 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1500 - list_del_init(&rdev->same_set); 1451 + list_del_rcu(&rdev->same_set); 1501 1452 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1502 1453 rdev->mddev = NULL; 1503 1454 sysfs_remove_link(&rdev->kobj, "block"); 1504 1455 1505 1456 /* We need to delay this, otherwise we can deadlock when 1506 - * writing to 'remove' to "dev/state" 1457 + * writing to 'remove' to "dev/state". We also need 1458 + * to delay it due to rcu usage. 1507 1459 */ 1460 + synchronize_rcu(); 1508 1461 INIT_WORK(&rdev->del_work, md_delayed_delete); 1509 1462 kobject_get(&rdev->kobj); 1510 1463 schedule_work(&rdev->del_work); ··· 1562 1511 if (rdev->mddev) 1563 1512 MD_BUG(); 1564 1513 free_disk_sb(rdev); 1565 - list_del_init(&rdev->same_set); 1566 1514 #ifndef MODULE 1567 1515 if (test_bit(AutoDetected, &rdev->flags)) 1568 1516 md_autodetect_dev(rdev->bdev->bd_dev); ··· 1808 1758 dprintk("%s ", bdevname(rdev->bdev,b)); 1809 1759 if (!test_bit(Faulty, &rdev->flags)) { 1810 1760 md_super_write(mddev,rdev, 1811 - rdev->sb_offset<<1, rdev->sb_size, 1761 + rdev->sb_start, rdev->sb_size, 1812 1762 rdev->sb_page); 1813 1763 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 1814 1764 bdevname(rdev->bdev,b), 1815 - (unsigned long long)rdev->sb_offset); 1765 + (unsigned long long)rdev->sb_start); 1816 1766 rdev->sb_events = mddev->events; 1817 1767 1818 1768 } else ··· 1837 1787 1838 1788 } 1839 1789 1840 - /* words written to sysfs files may, or my not, be \n terminated. 1790 + /* words written to sysfs files may, or may not, be \n terminated. 1841 1791 * We want to accept with case. For this we use cmd_match. 1842 1792 */ 1843 1793 static int cmd_match(const char *cmd, const char *str) ··· 1936 1886 1937 1887 err = 0; 1938 1888 } 1889 + if (!err) 1890 + sysfs_notify(&rdev->kobj, NULL, "state"); 1939 1891 return err ? err : len; 1940 1892 } 1941 1893 static struct rdev_sysfs_entry rdev_state = ··· 1983 1931 slot = -1; 1984 1932 else if (e==buf || (*e && *e!= '\n')) 1985 1933 return -EINVAL; 1986 - if (rdev->mddev->pers) { 1934 + if (rdev->mddev->pers && slot == -1) { 1987 1935 /* Setting 'slot' on an active array requires also 1988 1936 * updating the 'rd%d' link, and communicating 1989 1937 * with the personality with ->hot_*_disk. ··· 1991 1939 * failed/spare devices. This normally happens automatically, 1992 1940 * but not when the metadata is externally managed. 1993 1941 */ 1994 - if (slot != -1) 1995 - return -EBUSY; 1996 1942 if (rdev->raid_disk == -1) 1997 1943 return -EEXIST; 1998 1944 /* personality does all needed checks */ ··· 2004 1954 sysfs_remove_link(&rdev->mddev->kobj, nm); 2005 1955 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2006 1956 md_wakeup_thread(rdev->mddev->thread); 1957 + } else if (rdev->mddev->pers) { 1958 + mdk_rdev_t *rdev2; 1959 + struct list_head *tmp; 1960 + /* Activating a spare .. or possibly reactivating 1961 + * if we every get bitmaps working here. 1962 + */ 1963 + 1964 + if (rdev->raid_disk != -1) 1965 + return -EBUSY; 1966 + 1967 + if (rdev->mddev->pers->hot_add_disk == NULL) 1968 + return -EINVAL; 1969 + 1970 + rdev_for_each(rdev2, tmp, rdev->mddev) 1971 + if (rdev2->raid_disk == slot) 1972 + return -EEXIST; 1973 + 1974 + rdev->raid_disk = slot; 1975 + if (test_bit(In_sync, &rdev->flags)) 1976 + rdev->saved_raid_disk = slot; 1977 + else 1978 + rdev->saved_raid_disk = -1; 1979 + err = rdev->mddev->pers-> 1980 + hot_add_disk(rdev->mddev, rdev); 1981 + if (err) { 1982 + rdev->raid_disk = -1; 1983 + return err; 1984 + } else 1985 + sysfs_notify(&rdev->kobj, NULL, "state"); 1986 + sprintf(nm, "rd%d", rdev->raid_disk); 1987 + if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) 1988 + printk(KERN_WARNING 1989 + "md: cannot register " 1990 + "%s for %s\n", 1991 + nm, mdname(rdev->mddev)); 1992 + 1993 + /* don't wakeup anyone, leave that to userspace. */ 2007 1994 } else { 2008 1995 if (slot >= rdev->mddev->raid_disks) 2009 1996 return -ENOSPC; ··· 2049 1962 clear_bit(Faulty, &rdev->flags); 2050 1963 clear_bit(WriteMostly, &rdev->flags); 2051 1964 set_bit(In_sync, &rdev->flags); 1965 + sysfs_notify(&rdev->kobj, NULL, "state"); 2052 1966 } 2053 1967 return len; 2054 1968 } ··· 2071 1983 unsigned long long offset = simple_strtoull(buf, &e, 10); 2072 1984 if (e==buf || (*e && *e != '\n')) 2073 1985 return -EINVAL; 2074 - if (rdev->mddev->pers) 1986 + if (rdev->mddev->pers && rdev->raid_disk >= 0) 2075 1987 return -EBUSY; 2076 1988 if (rdev->size && rdev->mddev->external) 2077 1989 /* Must set offset before size, so overlap checks ··· 2103 2015 static ssize_t 2104 2016 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2105 2017 { 2106 - char *e; 2107 - unsigned long long size = simple_strtoull(buf, &e, 10); 2018 + unsigned long long size; 2108 2019 unsigned long long oldsize = rdev->size; 2109 2020 mddev_t *my_mddev = rdev->mddev; 2110 2021 2111 - if (e==buf || (*e && *e != '\n')) 2022 + if (strict_strtoull(buf, 10, &size) < 0) 2112 2023 return -EINVAL; 2113 - if (my_mddev->pers) 2114 - return -EBUSY; 2024 + if (size < my_mddev->size) 2025 + return -EINVAL; 2026 + if (my_mddev->pers && rdev->raid_disk >= 0) { 2027 + if (my_mddev->persistent) { 2028 + size = super_types[my_mddev->major_version]. 2029 + rdev_size_change(rdev, size * 2); 2030 + if (!size) 2031 + return -EBUSY; 2032 + } else if (!size) { 2033 + size = (rdev->bdev->bd_inode->i_size >> 10); 2034 + size -= rdev->data_offset/2; 2035 + } 2036 + if (size < my_mddev->size) 2037 + return -EINVAL; /* component must fit device */ 2038 + } 2039 + 2115 2040 rdev->size = size; 2116 - if (size > oldsize && rdev->mddev->external) { 2041 + if (size > oldsize && my_mddev->external) { 2117 2042 /* need to check that all other rdevs with the same ->bdev 2118 2043 * do not overlap. We need to unlock the mddev to avoid 2119 2044 * a deadlock. We have already changed rdev->size, and if ··· 2145 2044 if (test_bit(AllReserved, &rdev2->flags) || 2146 2045 (rdev->bdev == rdev2->bdev && 2147 2046 rdev != rdev2 && 2148 - overlaps(rdev->data_offset, rdev->size, 2149 - rdev2->data_offset, rdev2->size))) { 2047 + overlaps(rdev->data_offset, rdev->size * 2, 2048 + rdev2->data_offset, 2049 + rdev2->size * 2))) { 2150 2050 overlap = 1; 2151 2051 break; 2152 2052 } ··· 2169 2067 return -EBUSY; 2170 2068 } 2171 2069 } 2172 - if (size < my_mddev->size || my_mddev->size == 0) 2173 - my_mddev->size = size; 2174 2070 return len; 2175 2071 } 2176 2072 ··· 2612 2512 * When written, doesn't tear down array, but just stops it 2613 2513 * suspended (not supported yet) 2614 2514 * All IO requests will block. The array can be reconfigured. 2615 - * Writing this, if accepted, will block until array is quiessent 2515 + * Writing this, if accepted, will block until array is quiescent 2616 2516 * readonly 2617 2517 * no resync can happen. no superblocks get written. 2618 2518 * write requests fail ··· 2685 2585 return sprintf(page, "%s\n", array_states[st]); 2686 2586 } 2687 2587 2688 - static int do_md_stop(mddev_t * mddev, int ro); 2588 + static int do_md_stop(mddev_t * mddev, int ro, int is_open); 2689 2589 static int do_md_run(mddev_t * mddev); 2690 2590 static int restart_array(mddev_t *mddev); 2691 2591 ··· 2699 2599 break; 2700 2600 case clear: 2701 2601 /* stopping an active array */ 2702 - if (atomic_read(&mddev->active) > 1) 2602 + if (atomic_read(&mddev->openers) > 0) 2703 2603 return -EBUSY; 2704 - err = do_md_stop(mddev, 0); 2604 + err = do_md_stop(mddev, 0, 0); 2705 2605 break; 2706 2606 case inactive: 2707 2607 /* stopping an active array */ 2708 2608 if (mddev->pers) { 2709 - if (atomic_read(&mddev->active) > 1) 2609 + if (atomic_read(&mddev->openers) > 0) 2710 2610 return -EBUSY; 2711 - err = do_md_stop(mddev, 2); 2611 + err = do_md_stop(mddev, 2, 0); 2712 2612 } else 2713 2613 err = 0; /* already inactive */ 2714 2614 break; ··· 2716 2616 break; /* not supported yet */ 2717 2617 case readonly: 2718 2618 if (mddev->pers) 2719 - err = do_md_stop(mddev, 1); 2619 + err = do_md_stop(mddev, 1, 0); 2720 2620 else { 2721 2621 mddev->ro = 1; 2722 2622 set_disk_ro(mddev->gendisk, 1); ··· 2726 2626 case read_auto: 2727 2627 if (mddev->pers) { 2728 2628 if (mddev->ro != 1) 2729 - err = do_md_stop(mddev, 1); 2629 + err = do_md_stop(mddev, 1, 0); 2730 2630 else 2731 2631 err = restart_array(mddev); 2732 2632 if (err == 0) { ··· 2781 2681 } 2782 2682 if (err) 2783 2683 return err; 2784 - else 2684 + else { 2685 + sysfs_notify(&mddev->kobj, NULL, "array_state"); 2785 2686 return len; 2687 + } 2786 2688 } 2787 2689 static struct md_sysfs_entry md_array_state = 2788 2690 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); ··· 2887 2785 return sprintf(page, "%llu\n", (unsigned long long)mddev->size); 2888 2786 } 2889 2787 2890 - static int update_size(mddev_t *mddev, unsigned long size); 2788 + static int update_size(mddev_t *mddev, sector_t num_sectors); 2891 2789 2892 2790 static ssize_t 2893 2791 size_store(mddev_t *mddev, const char *buf, size_t len) ··· 2904 2802 return -EINVAL; 2905 2803 2906 2804 if (mddev->pers) { 2907 - err = update_size(mddev, size); 2805 + err = update_size(mddev, size * 2); 2908 2806 md_update_sb(mddev, 1); 2909 2807 } else { 2910 2808 if (mddev->size == 0 || ··· 3001 2899 type = "check"; 3002 2900 else 3003 2901 type = "repair"; 3004 - } else 2902 + } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 3005 2903 type = "recover"; 3006 2904 } 3007 2905 return sprintf(page, "%s\n", type); ··· 3023 2921 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3024 2922 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 3025 2923 return -EBUSY; 3026 - else if (cmd_match(page, "resync") || cmd_match(page, "recover")) 2924 + else if (cmd_match(page, "resync")) 3027 2925 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3028 - else if (cmd_match(page, "reshape")) { 2926 + else if (cmd_match(page, "recover")) { 2927 + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2928 + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2929 + } else if (cmd_match(page, "reshape")) { 3029 2930 int err; 3030 2931 if (mddev->pers->start_reshape == NULL) 3031 2932 return -EINVAL; 3032 2933 err = mddev->pers->start_reshape(mddev); 3033 2934 if (err) 3034 2935 return err; 2936 + sysfs_notify(&mddev->kobj, NULL, "degraded"); 3035 2937 } else { 3036 2938 if (cmd_match(page, "check")) 3037 2939 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); ··· 3046 2940 } 3047 2941 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3048 2942 md_wakeup_thread(mddev->thread); 2943 + sysfs_notify(&mddev->kobj, NULL, "sync_action"); 3049 2944 return len; 3050 2945 } 3051 2946 ··· 3156 3049 sync_speed_show(mddev_t *mddev, char *page) 3157 3050 { 3158 3051 unsigned long resync, dt, db; 3159 - resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)); 3160 - dt = ((jiffies - mddev->resync_mark) / HZ); 3052 + resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 3053 + dt = (jiffies - mddev->resync_mark) / HZ; 3161 3054 if (!dt) dt++; 3162 - db = resync - (mddev->resync_mark_cnt); 3163 - return sprintf(page, "%ld\n", db/dt/2); /* K/sec */ 3055 + db = resync - mddev->resync_mark_cnt; 3056 + return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 3164 3057 } 3165 3058 3166 3059 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); ··· 3182 3075 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 3183 3076 3184 3077 static ssize_t 3078 + min_sync_show(mddev_t *mddev, char *page) 3079 + { 3080 + return sprintf(page, "%llu\n", 3081 + (unsigned long long)mddev->resync_min); 3082 + } 3083 + static ssize_t 3084 + min_sync_store(mddev_t *mddev, const char *buf, size_t len) 3085 + { 3086 + unsigned long long min; 3087 + if (strict_strtoull(buf, 10, &min)) 3088 + return -EINVAL; 3089 + if (min > mddev->resync_max) 3090 + return -EINVAL; 3091 + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3092 + return -EBUSY; 3093 + 3094 + /* Must be a multiple of chunk_size */ 3095 + if (mddev->chunk_size) { 3096 + if (min & (sector_t)((mddev->chunk_size>>9)-1)) 3097 + return -EINVAL; 3098 + } 3099 + mddev->resync_min = min; 3100 + 3101 + return len; 3102 + } 3103 + 3104 + static struct md_sysfs_entry md_min_sync = 3105 + __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 3106 + 3107 + static ssize_t 3185 3108 max_sync_show(mddev_t *mddev, char *page) 3186 3109 { 3187 3110 if (mddev->resync_max == MaxSector) ··· 3226 3089 if (strncmp(buf, "max", 3) == 0) 3227 3090 mddev->resync_max = MaxSector; 3228 3091 else { 3229 - char *ep; 3230 - unsigned long long max = simple_strtoull(buf, &ep, 10); 3231 - if (ep == buf || (*ep != 0 && *ep != '\n')) 3092 + unsigned long long max; 3093 + if (strict_strtoull(buf, 10, &max)) 3094 + return -EINVAL; 3095 + if (max < mddev->resync_min) 3232 3096 return -EINVAL; 3233 3097 if (max < mddev->resync_max && 3234 3098 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) ··· 3360 3222 &md_sync_speed.attr, 3361 3223 &md_sync_force_parallel.attr, 3362 3224 &md_sync_completed.attr, 3225 + &md_min_sync.attr, 3363 3226 &md_max_sync.attr, 3364 3227 &md_suspend_lo.attr, 3365 3228 &md_suspend_hi.attr, ··· 3465 3326 disk->queue = mddev->queue; 3466 3327 add_disk(disk); 3467 3328 mddev->gendisk = disk; 3468 - mutex_unlock(&disks_mutex); 3469 3329 error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj, 3470 3330 "%s", "md"); 3331 + mutex_unlock(&disks_mutex); 3471 3332 if (error) 3472 3333 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3473 3334 disk->disk_name); ··· 3480 3341 { 3481 3342 mddev_t *mddev = (mddev_t *) data; 3482 3343 3483 - mddev->safemode = 1; 3344 + if (!atomic_read(&mddev->writes_pending)) { 3345 + mddev->safemode = 1; 3346 + if (mddev->external) 3347 + sysfs_notify(&mddev->kobj, NULL, "array_state"); 3348 + } 3484 3349 md_wakeup_thread(mddev->thread); 3485 3350 } 3486 3351 ··· 3575 3432 * We don't want the data to overlap the metadata, 3576 3433 * Internal Bitmap issues has handled elsewhere. 3577 3434 */ 3578 - if (rdev->data_offset < rdev->sb_offset) { 3435 + if (rdev->data_offset < rdev->sb_start) { 3579 3436 if (mddev->size && 3580 3437 rdev->data_offset + mddev->size*2 3581 - > rdev->sb_offset*2) { 3438 + > rdev->sb_start) { 3582 3439 printk("md: %s: data overlaps metadata\n", 3583 3440 mdname(mddev)); 3584 3441 return -EINVAL; 3585 3442 } 3586 3443 } else { 3587 - if (rdev->sb_offset*2 + rdev->sb_size/512 3444 + if (rdev->sb_start + rdev->sb_size/512 3588 3445 > rdev->data_offset) { 3589 3446 printk("md: %s: metadata overlaps data\n", 3590 3447 mdname(mddev)); 3591 3448 return -EINVAL; 3592 3449 } 3593 3450 } 3451 + sysfs_notify(&rdev->kobj, NULL, "state"); 3594 3452 } 3595 3453 3596 3454 md_probe(mddev->unit, NULL, NULL); ··· 3663 3519 mddev->ro = 2; /* read-only, but switch on first write */ 3664 3520 3665 3521 err = mddev->pers->run(mddev); 3666 - if (!err && mddev->pers->sync_request) { 3522 + if (err) 3523 + printk(KERN_ERR "md: pers->run() failed ...\n"); 3524 + else if (mddev->pers->sync_request) { 3667 3525 err = bitmap_create(mddev); 3668 3526 if (err) { 3669 3527 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", ··· 3674 3528 } 3675 3529 } 3676 3530 if (err) { 3677 - printk(KERN_ERR "md: pers->run() failed ...\n"); 3678 3531 module_put(mddev->pers->owner); 3679 3532 mddev->pers = NULL; 3680 3533 bitmap_destroy(mddev); ··· 3708 3563 if (mddev->flags) 3709 3564 md_update_sb(mddev, 0); 3710 3565 3711 - set_capacity(disk, mddev->array_size<<1); 3566 + set_capacity(disk, mddev->array_sectors); 3712 3567 3713 3568 /* If we call blk_queue_make_request here, it will 3714 3569 * re-initialise max_sectors etc which may have been ··· 3753 3608 3754 3609 mddev->changed = 1; 3755 3610 md_new_event(mddev); 3611 + sysfs_notify(&mddev->kobj, NULL, "array_state"); 3612 + sysfs_notify(&mddev->kobj, NULL, "sync_action"); 3613 + sysfs_notify(&mddev->kobj, NULL, "degraded"); 3756 3614 kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE); 3757 3615 return 0; 3758 3616 } ··· 3763 3615 static int restart_array(mddev_t *mddev) 3764 3616 { 3765 3617 struct gendisk *disk = mddev->gendisk; 3766 - int err; 3767 3618 3768 - /* 3769 - * Complain if it has no devices 3770 - */ 3771 - err = -ENXIO; 3619 + /* Complain if it has no devices */ 3772 3620 if (list_empty(&mddev->disks)) 3773 - goto out; 3774 - 3775 - if (mddev->pers) { 3776 - err = -EBUSY; 3777 - if (!mddev->ro) 3778 - goto out; 3779 - 3780 - mddev->safemode = 0; 3781 - mddev->ro = 0; 3782 - set_disk_ro(disk, 0); 3783 - 3784 - printk(KERN_INFO "md: %s switched to read-write mode.\n", 3785 - mdname(mddev)); 3786 - /* 3787 - * Kick recovery or resync if necessary 3788 - */ 3789 - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3790 - md_wakeup_thread(mddev->thread); 3791 - md_wakeup_thread(mddev->sync_thread); 3792 - err = 0; 3793 - } else 3794 - err = -EINVAL; 3795 - 3796 - out: 3797 - return err; 3621 + return -ENXIO; 3622 + if (!mddev->pers) 3623 + return -EINVAL; 3624 + if (!mddev->ro) 3625 + return -EBUSY; 3626 + mddev->safemode = 0; 3627 + mddev->ro = 0; 3628 + set_disk_ro(disk, 0); 3629 + printk(KERN_INFO "md: %s switched to read-write mode.\n", 3630 + mdname(mddev)); 3631 + /* Kick recovery or resync if necessary */ 3632 + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3633 + md_wakeup_thread(mddev->thread); 3634 + md_wakeup_thread(mddev->sync_thread); 3635 + sysfs_notify(&mddev->kobj, NULL, "array_state"); 3636 + return 0; 3798 3637 } 3799 3638 3800 3639 /* similar to deny_write_access, but accounts for our holding a reference ··· 3815 3680 * 1 - switch to readonly 3816 3681 * 2 - stop but do not disassemble array 3817 3682 */ 3818 - static int do_md_stop(mddev_t * mddev, int mode) 3683 + static int do_md_stop(mddev_t * mddev, int mode, int is_open) 3819 3684 { 3820 3685 int err = 0; 3821 3686 struct gendisk *disk = mddev->gendisk; 3822 3687 3688 + if (atomic_read(&mddev->openers) > is_open) { 3689 + printk("md: %s still in use.\n",mdname(mddev)); 3690 + return -EBUSY; 3691 + } 3692 + 3823 3693 if (mddev->pers) { 3824 - if (atomic_read(&mddev->active)>2) { 3825 - printk("md: %s still in use.\n",mdname(mddev)); 3826 - return -EBUSY; 3827 - } 3828 3694 3829 3695 if (mddev->sync_thread) { 3830 3696 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ··· 3909 3773 3910 3774 export_array(mddev); 3911 3775 3912 - mddev->array_size = 0; 3776 + mddev->array_sectors = 0; 3913 3777 mddev->size = 0; 3914 3778 mddev->raid_disks = 0; 3915 3779 mddev->recovery_cp = 0; 3780 + mddev->resync_min = 0; 3916 3781 mddev->resync_max = MaxSector; 3917 3782 mddev->reshape_position = MaxSector; 3918 3783 mddev->external = 0; ··· 3948 3811 mdname(mddev)); 3949 3812 err = 0; 3950 3813 md_new_event(mddev); 3814 + sysfs_notify(&mddev->kobj, NULL, "array_state"); 3951 3815 out: 3952 3816 return err; 3953 3817 } ··· 3974 3836 err = do_md_run (mddev); 3975 3837 if (err) { 3976 3838 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 3977 - do_md_stop (mddev, 0); 3839 + do_md_stop (mddev, 0, 0); 3978 3840 } 3979 3841 } 3980 3842 ··· 4065 3927 /* on success, candidates will be empty, on error 4066 3928 * it won't... 4067 3929 */ 4068 - rdev_for_each_list(rdev, tmp, candidates) 3930 + rdev_for_each_list(rdev, tmp, candidates) { 3931 + list_del_init(&rdev->same_set); 4069 3932 export_rdev(rdev); 3933 + } 4070 3934 mddev_put(mddev); 4071 3935 } 4072 3936 printk(KERN_INFO "md: ... autorun DONE.\n"); ··· 4149 4009 char *ptr, *buf = NULL; 4150 4010 int err = -ENOMEM; 4151 4011 4152 - md_allow_write(mddev); 4012 + if (md_allow_write(mddev)) 4013 + file = kmalloc(sizeof(*file), GFP_NOIO); 4014 + else 4015 + file = kmalloc(sizeof(*file), GFP_KERNEL); 4153 4016 4154 - file = kmalloc(sizeof(*file), GFP_KERNEL); 4155 4017 if (!file) 4156 4018 goto out; 4157 4019 ··· 4186 4044 static int get_disk_info(mddev_t * mddev, void __user * arg) 4187 4045 { 4188 4046 mdu_disk_info_t info; 4189 - unsigned int nr; 4190 4047 mdk_rdev_t *rdev; 4191 4048 4192 4049 if (copy_from_user(&info, arg, sizeof(info))) 4193 4050 return -EFAULT; 4194 4051 4195 - nr = info.number; 4196 - 4197 - rdev = find_rdev_nr(mddev, nr); 4052 + rdev = find_rdev_nr(mddev, info.number); 4198 4053 if (rdev) { 4199 4054 info.major = MAJOR(rdev->bdev->bd_dev); 4200 4055 info.minor = MINOR(rdev->bdev->bd_dev); ··· 4311 4172 } 4312 4173 if (err) 4313 4174 export_rdev(rdev); 4175 + else 4176 + sysfs_notify(&rdev->kobj, NULL, "state"); 4314 4177 4315 4178 md_update_sb(mddev, 1); 4179 + if (mddev->degraded) 4180 + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4316 4181 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4317 4182 md_wakeup_thread(mddev->thread); 4318 4183 return err; ··· 4355 4212 4356 4213 if (!mddev->persistent) { 4357 4214 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 4358 - rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 4215 + rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4359 4216 } else 4360 - rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 4361 - rdev->size = calc_dev_size(rdev, mddev->chunk_size); 4217 + rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4218 + rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2; 4362 4219 4363 4220 err = bind_rdev_to_array(rdev, mddev); 4364 4221 if (err) { ··· 4374 4231 { 4375 4232 char b[BDEVNAME_SIZE]; 4376 4233 mdk_rdev_t *rdev; 4377 - 4378 - if (!mddev->pers) 4379 - return -ENODEV; 4380 4234 4381 4235 rdev = find_rdev(mddev, dev); 4382 4236 if (!rdev) ··· 4397 4257 { 4398 4258 char b[BDEVNAME_SIZE]; 4399 4259 int err; 4400 - unsigned int size; 4401 4260 mdk_rdev_t *rdev; 4402 4261 4403 4262 if (!mddev->pers) ··· 4424 4285 } 4425 4286 4426 4287 if (mddev->persistent) 4427 - rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 4288 + rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4428 4289 else 4429 - rdev->sb_offset = 4430 - rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 4290 + rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4431 4291 4432 - size = calc_dev_size(rdev, mddev->chunk_size); 4433 - rdev->size = size; 4292 + rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2; 4434 4293 4435 4294 if (test_bit(Faulty, &rdev->flags)) { 4436 4295 printk(KERN_WARNING ··· 4613 4476 return 0; 4614 4477 } 4615 4478 4616 - static int update_size(mddev_t *mddev, unsigned long size) 4479 + static int update_size(mddev_t *mddev, sector_t num_sectors) 4617 4480 { 4618 4481 mdk_rdev_t * rdev; 4619 4482 int rv; 4620 4483 struct list_head *tmp; 4621 - int fit = (size == 0); 4484 + int fit = (num_sectors == 0); 4622 4485 4623 4486 if (mddev->pers->resize == NULL) 4624 4487 return -EINVAL; 4625 - /* The "size" is the amount of each device that is used. 4626 - * This can only make sense for arrays with redundancy. 4627 - * linear and raid0 always use whatever space is available 4628 - * We can only consider changing the size if no resync 4629 - * or reconstruction is happening, and if the new size 4630 - * is acceptable. It must fit before the sb_offset or, 4631 - * if that is <data_offset, it must fit before the 4632 - * size of each device. 4633 - * If size is zero, we find the largest size that fits. 4488 + /* The "num_sectors" is the number of sectors of each device that 4489 + * is used. This can only make sense for arrays with redundancy. 4490 + * linear and raid0 always use whatever space is available. We can only 4491 + * consider changing this number if no resync or reconstruction is 4492 + * happening, and if the new size is acceptable. It must fit before the 4493 + * sb_start or, if that is <data_offset, it must fit before the size 4494 + * of each device. If num_sectors is zero, we find the largest size 4495 + * that fits. 4496 + 4634 4497 */ 4635 4498 if (mddev->sync_thread) 4636 4499 return -EBUSY; ··· 4638 4501 sector_t avail; 4639 4502 avail = rdev->size * 2; 4640 4503 4641 - if (fit && (size == 0 || size > avail/2)) 4642 - size = avail/2; 4643 - if (avail < ((sector_t)size << 1)) 4504 + if (fit && (num_sectors == 0 || num_sectors > avail)) 4505 + num_sectors = avail; 4506 + if (avail < num_sectors) 4644 4507 return -ENOSPC; 4645 4508 } 4646 - rv = mddev->pers->resize(mddev, (sector_t)size *2); 4509 + rv = mddev->pers->resize(mddev, num_sectors); 4647 4510 if (!rv) { 4648 4511 struct block_device *bdev; 4649 4512 4650 4513 bdev = bdget_disk(mddev->gendisk, 0); 4651 4514 if (bdev) { 4652 4515 mutex_lock(&bdev->bd_inode->i_mutex); 4653 - i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10); 4516 + i_size_write(bdev->bd_inode, 4517 + (loff_t)mddev->array_sectors << 9); 4654 4518 mutex_unlock(&bdev->bd_inode->i_mutex); 4655 4519 bdput(bdev); 4656 4520 } ··· 4726 4588 return mddev->pers->reconfig(mddev, info->layout, -1); 4727 4589 } 4728 4590 if (info->size >= 0 && mddev->size != info->size) 4729 - rv = update_size(mddev, info->size); 4591 + rv = update_size(mddev, (sector_t)info->size * 2); 4730 4592 4731 4593 if (mddev->raid_disks != info->raid_disks) 4732 4594 rv = update_raid_disks(mddev, info->raid_disks); ··· 4779 4641 return 0; 4780 4642 } 4781 4643 4644 + /* 4645 + * We have a problem here : there is no easy way to give a CHS 4646 + * virtual geometry. We currently pretend that we have a 2 heads 4647 + * 4 sectors (with a BIG number of cylinders...). This drives 4648 + * dosfs just mad... ;-) 4649 + */ 4782 4650 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4783 4651 { 4784 4652 mddev_t *mddev = bdev->bd_disk->private_data; ··· 4929 4785 goto done_unlock; 4930 4786 4931 4787 case STOP_ARRAY: 4932 - err = do_md_stop (mddev, 0); 4788 + err = do_md_stop (mddev, 0, 1); 4933 4789 goto done_unlock; 4934 4790 4935 4791 case STOP_ARRAY_RO: 4936 - err = do_md_stop (mddev, 1); 4792 + err = do_md_stop (mddev, 1, 1); 4937 4793 goto done_unlock; 4938 4794 4939 - /* 4940 - * We have a problem here : there is no easy way to give a CHS 4941 - * virtual geometry. We currently pretend that we have a 2 heads 4942 - * 4 sectors (with a BIG number of cylinders...). This drives 4943 - * dosfs just mad... ;-) 4944 - */ 4945 4795 } 4946 4796 4947 4797 /* ··· 4945 4807 * here and hit the 'default' below, so only disallow 4946 4808 * 'md' ioctls, and switch to rw mode if started auto-readonly. 4947 4809 */ 4948 - if (_IOC_TYPE(cmd) == MD_MAJOR && 4949 - mddev->ro && mddev->pers) { 4810 + if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 4950 4811 if (mddev->ro == 2) { 4951 4812 mddev->ro = 0; 4952 - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4953 - md_wakeup_thread(mddev->thread); 4954 - 4813 + sysfs_notify(&mddev->kobj, NULL, "array_state"); 4814 + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4815 + md_wakeup_thread(mddev->thread); 4955 4816 } else { 4956 4817 err = -EROFS; 4957 4818 goto abort_unlock; ··· 5020 4883 5021 4884 err = 0; 5022 4885 mddev_get(mddev); 4886 + atomic_inc(&mddev->openers); 5023 4887 mddev_unlock(mddev); 5024 4888 5025 4889 check_disk_change(inode->i_bdev); ··· 5033 4895 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 5034 4896 5035 4897 BUG_ON(!mddev); 4898 + atomic_dec(&mddev->openers); 5036 4899 mddev_put(mddev); 5037 4900 5038 4901 return 0; ··· 5168 5029 if (!mddev->pers->error_handler) 5169 5030 return; 5170 5031 mddev->pers->error_handler(mddev,rdev); 5032 + if (mddev->degraded) 5033 + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5034 + set_bit(StateChanged, &rdev->flags); 5171 5035 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5172 5036 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5173 5037 md_wakeup_thread(mddev->thread); ··· 5400 5258 if (!list_empty(&mddev->disks)) { 5401 5259 if (mddev->pers) 5402 5260 seq_printf(seq, "\n %llu blocks", 5403 - (unsigned long long)mddev->array_size); 5261 + (unsigned long long) 5262 + mddev->array_sectors / 2); 5404 5263 else 5405 5264 seq_printf(seq, "\n %llu blocks", 5406 - (unsigned long long)size); 5265 + (unsigned long long)size); 5407 5266 } 5408 5267 if (mddev->persistent) { 5409 5268 if (mddev->major_version != 0 || ··· 5534 5391 static int is_mddev_idle(mddev_t *mddev) 5535 5392 { 5536 5393 mdk_rdev_t * rdev; 5537 - struct list_head *tmp; 5538 5394 int idle; 5539 5395 long curr_events; 5540 5396 5541 5397 idle = 1; 5542 - rdev_for_each(rdev, tmp, mddev) { 5398 + rcu_read_lock(); 5399 + rdev_for_each_rcu(rdev, mddev) { 5543 5400 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 5544 5401 curr_events = disk_stat_read(disk, sectors[0]) + 5545 5402 disk_stat_read(disk, sectors[1]) - ··· 5571 5428 idle = 0; 5572 5429 } 5573 5430 } 5431 + rcu_read_unlock(); 5574 5432 return idle; 5575 5433 } 5576 5434 ··· 5595 5451 */ 5596 5452 void md_write_start(mddev_t *mddev, struct bio *bi) 5597 5453 { 5454 + int did_change = 0; 5598 5455 if (bio_data_dir(bi) != WRITE) 5599 5456 return; 5600 5457 ··· 5606 5461 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5607 5462 md_wakeup_thread(mddev->thread); 5608 5463 md_wakeup_thread(mddev->sync_thread); 5464 + did_change = 1; 5609 5465 } 5610 5466 atomic_inc(&mddev->writes_pending); 5611 5467 if (mddev->safemode == 1) ··· 5617 5471 mddev->in_sync = 0; 5618 5472 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 5619 5473 md_wakeup_thread(mddev->thread); 5474 + did_change = 1; 5620 5475 } 5621 5476 spin_unlock_irq(&mddev->write_lock); 5622 - sysfs_notify(&mddev->kobj, NULL, "array_state"); 5623 5477 } 5478 + if (did_change) 5479 + sysfs_notify(&mddev->kobj, NULL, "array_state"); 5624 5480 wait_event(mddev->sb_wait, 5625 5481 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && 5626 5482 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); ··· 5643 5495 * may proceed without blocking. It is important to call this before 5644 5496 * attempting a GFP_KERNEL allocation while holding the mddev lock. 5645 5497 * Must be called with mddev_lock held. 5498 + * 5499 + * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 5500 + * is dropped, so return -EAGAIN after notifying userspace. 5646 5501 */ 5647 - void md_allow_write(mddev_t *mddev) 5502 + int md_allow_write(mddev_t *mddev) 5648 5503 { 5649 5504 if (!mddev->pers) 5650 - return; 5505 + return 0; 5651 5506 if (mddev->ro) 5652 - return; 5507 + return 0; 5508 + if (!mddev->pers->sync_request) 5509 + return 0; 5653 5510 5654 5511 spin_lock_irq(&mddev->write_lock); 5655 5512 if (mddev->in_sync) { ··· 5665 5512 mddev->safemode = 1; 5666 5513 spin_unlock_irq(&mddev->write_lock); 5667 5514 md_update_sb(mddev, 0); 5668 - 5669 5515 sysfs_notify(&mddev->kobj, NULL, "array_state"); 5670 - /* wait for the dirty state to be recorded in the metadata */ 5671 - wait_event(mddev->sb_wait, 5672 - !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && 5673 - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 5674 5516 } else 5675 5517 spin_unlock_irq(&mddev->write_lock); 5518 + 5519 + if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 5520 + return -EAGAIN; 5521 + else 5522 + return 0; 5676 5523 } 5677 5524 EXPORT_SYMBOL_GPL(md_allow_write); 5678 5525 ··· 5778 5625 max_sectors = mddev->resync_max_sectors; 5779 5626 mddev->resync_mismatches = 0; 5780 5627 /* we don't use the checkpoint if there's a bitmap */ 5781 - if (!mddev->bitmap && 5782 - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 5628 + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 5629 + j = mddev->resync_min; 5630 + else if (!mddev->bitmap) 5783 5631 j = mddev->recovery_cp; 5632 + 5784 5633 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5785 5634 max_sectors = mddev->size << 1; 5786 5635 else { ··· 5951 5796 5952 5797 skip: 5953 5798 mddev->curr_resync = 0; 5799 + mddev->resync_min = 0; 5954 5800 mddev->resync_max = MaxSector; 5955 5801 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5956 5802 wake_up(&resync_wait); ··· 6001 5845 if (rdev->raid_disk < 0 6002 5846 && !test_bit(Faulty, &rdev->flags)) { 6003 5847 rdev->recovery_offset = 0; 6004 - if (mddev->pers->hot_add_disk(mddev,rdev)) { 5848 + if (mddev->pers-> 5849 + hot_add_disk(mddev, rdev) == 0) { 6005 5850 char nm[20]; 6006 5851 sprintf(nm, "rd%d", rdev->raid_disk); 6007 5852 if (sysfs_create_link(&mddev->kobj, ··· 6077 5920 int spares = 0; 6078 5921 6079 5922 if (!mddev->external) { 5923 + int did_change = 0; 6080 5924 spin_lock_irq(&mddev->write_lock); 6081 5925 if (mddev->safemode && 6082 5926 !atomic_read(&mddev->writes_pending) && 6083 5927 !mddev->in_sync && 6084 5928 mddev->recovery_cp == MaxSector) { 6085 5929 mddev->in_sync = 1; 5930 + did_change = 1; 6086 5931 if (mddev->persistent) 6087 5932 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6088 5933 } 6089 5934 if (mddev->safemode == 1) 6090 5935 mddev->safemode = 0; 6091 5936 spin_unlock_irq(&mddev->write_lock); 5937 + if (did_change) 5938 + sysfs_notify(&mddev->kobj, NULL, "array_state"); 6092 5939 } 6093 5940 6094 5941 if (mddev->flags) 6095 5942 md_update_sb(mddev, 0); 5943 + 5944 + rdev_for_each(rdev, rtmp, mddev) 5945 + if (test_and_clear_bit(StateChanged, &rdev->flags)) 5946 + sysfs_notify(&rdev->kobj, NULL, "state"); 6096 5947 6097 5948 6098 5949 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && ··· 6116 5951 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6117 5952 /* success...*/ 6118 5953 /* activate any spares */ 6119 - mddev->pers->spare_active(mddev); 5954 + if (mddev->pers->spare_active(mddev)) 5955 + sysfs_notify(&mddev->kobj, NULL, 5956 + "degraded"); 6120 5957 } 6121 5958 md_update_sb(mddev, 1); 6122 5959 ··· 6132 5965 mddev->recovery = 0; 6133 5966 /* flag recovery needed just to double check */ 6134 5967 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5968 + sysfs_notify(&mddev->kobj, NULL, "sync_action"); 6135 5969 md_new_event(mddev); 6136 5970 goto unlock; 6137 5971 } 5972 + /* Set RUNNING before clearing NEEDED to avoid 5973 + * any transients in the value of "sync_action". 5974 + */ 5975 + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 5976 + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6138 5977 /* Clear some bits that don't mean anything, but 6139 5978 * might be left set 6140 5979 */ 6141 - clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6142 5980 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 6143 5981 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 6144 5982 ··· 6161 5989 /* Cannot proceed */ 6162 5990 goto unlock; 6163 5991 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 5992 + clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6164 5993 } else if ((spares = remove_and_add_spares(mddev))) { 6165 5994 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6166 5995 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5996 + set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6167 5997 } else if (mddev->recovery_cp < MaxSector) { 6168 5998 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5999 + clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6169 6000 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6170 6001 /* nothing to be done ... */ 6171 6002 goto unlock; 6172 6003 6173 6004 if (mddev->pers->sync_request) { 6174 - set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6175 6005 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 6176 6006 /* We are adding a device or devices to an array 6177 6007 * which has the bitmap stored on all devices. ··· 6192 6018 mddev->recovery = 0; 6193 6019 } else 6194 6020 md_wakeup_thread(mddev->sync_thread); 6021 + sysfs_notify(&mddev->kobj, NULL, "sync_action"); 6195 6022 md_new_event(mddev); 6196 6023 } 6197 6024 unlock: 6025 + if (!mddev->sync_thread) { 6026 + clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6027 + if (test_and_clear_bit(MD_RECOVERY_RECOVER, 6028 + &mddev->recovery)) 6029 + sysfs_notify(&mddev->kobj, NULL, "sync_action"); 6030 + } 6198 6031 mddev_unlock(mddev); 6199 6032 } 6200 6033 } ··· 6228 6047 6229 6048 for_each_mddev(mddev, tmp) 6230 6049 if (mddev_trylock(mddev)) { 6231 - do_md_stop (mddev, 1); 6050 + do_md_stop (mddev, 1, 0); 6232 6051 mddev_unlock(mddev); 6233 6052 } 6234 6053 /*
+12 -5
drivers/md/multipath.c
··· 281 281 { 282 282 multipath_conf_t *conf = mddev->private; 283 283 struct request_queue *q; 284 - int found = 0; 284 + int err = -EEXIST; 285 285 int path; 286 286 struct multipath_info *p; 287 + int first = 0; 288 + int last = mddev->raid_disks - 1; 289 + 290 + if (rdev->raid_disk >= 0) 291 + first = last = rdev->raid_disk; 287 292 288 293 print_multipath_conf(conf); 289 294 290 - for (path=0; path<mddev->raid_disks; path++) 295 + for (path = first; path <= last; path++) 291 296 if ((p=conf->multipaths+path)->rdev == NULL) { 292 297 q = rdev->bdev->bd_disk->queue; 293 298 blk_queue_stack_limits(mddev->queue, q); ··· 312 307 rdev->raid_disk = path; 313 308 set_bit(In_sync, &rdev->flags); 314 309 rcu_assign_pointer(p->rdev, rdev); 315 - found = 1; 310 + err = 0; 311 + break; 316 312 } 317 313 318 314 print_multipath_conf(conf); 319 - return found; 315 + 316 + return err; 320 317 } 321 318 322 319 static int multipath_remove_disk(mddev_t *mddev, int number) ··· 504 497 /* 505 498 * Ok, everything is just fine now 506 499 */ 507 - mddev->array_size = mddev->size; 500 + mddev->array_sectors = mddev->size * 2; 508 501 509 502 mddev->queue->unplug_fn = multipath_unplug; 510 503 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
+4 -4
drivers/md/raid0.c
··· 295 295 goto out_free_conf; 296 296 297 297 /* calculate array device size */ 298 - mddev->array_size = 0; 298 + mddev->array_sectors = 0; 299 299 rdev_for_each(rdev, tmp, mddev) 300 - mddev->array_size += rdev->size; 300 + mddev->array_sectors += rdev->size * 2; 301 301 302 302 printk("raid0 : md_size is %llu blocks.\n", 303 - (unsigned long long)mddev->array_size); 303 + (unsigned long long)mddev->array_sectors / 2); 304 304 printk("raid0 : conf->hash_spacing is %llu blocks.\n", 305 305 (unsigned long long)conf->hash_spacing); 306 306 { 307 - sector_t s = mddev->array_size; 307 + sector_t s = mddev->array_sectors / 2; 308 308 sector_t space = conf->hash_spacing; 309 309 int round; 310 310 conf->preshift = 0;
+19 -11
drivers/md/raid1.c
··· 1100 1100 static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 1101 1101 { 1102 1102 conf_t *conf = mddev->private; 1103 - int found = 0; 1103 + int err = -EEXIST; 1104 1104 int mirror = 0; 1105 1105 mirror_info_t *p; 1106 + int first = 0; 1107 + int last = mddev->raid_disks - 1; 1106 1108 1107 - for (mirror=0; mirror < mddev->raid_disks; mirror++) 1109 + if (rdev->raid_disk >= 0) 1110 + first = last = rdev->raid_disk; 1111 + 1112 + for (mirror = first; mirror <= last; mirror++) 1108 1113 if ( !(p=conf->mirrors+mirror)->rdev) { 1109 1114 1110 1115 blk_queue_stack_limits(mddev->queue, ··· 1124 1119 1125 1120 p->head_position = 0; 1126 1121 rdev->raid_disk = mirror; 1127 - found = 1; 1122 + err = 0; 1128 1123 /* As all devices are equivalent, we don't need a full recovery 1129 1124 * if this was recently any drive of the array 1130 1125 */ ··· 1135 1130 } 1136 1131 1137 1132 print_conf(conf); 1138 - return found; 1133 + return err; 1139 1134 } 1140 1135 1141 1136 static int raid1_remove_disk(mddev_t *mddev, int number) ··· 2043 2038 /* 2044 2039 * Ok, everything is just fine now 2045 2040 */ 2046 - mddev->array_size = mddev->size; 2041 + mddev->array_sectors = mddev->size * 2; 2047 2042 2048 2043 mddev->queue->unplug_fn = raid1_unplug; 2049 2044 mddev->queue->backing_dev_info.congested_fn = raid1_congested; ··· 2105 2100 * any io in the removed space completes, but it hardly seems 2106 2101 * worth it. 2107 2102 */ 2108 - mddev->array_size = sectors>>1; 2109 - set_capacity(mddev->gendisk, mddev->array_size << 1); 2103 + mddev->array_sectors = sectors; 2104 + set_capacity(mddev->gendisk, mddev->array_sectors); 2110 2105 mddev->changed = 1; 2111 - if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) { 2106 + if (mddev->array_sectors / 2 > mddev->size && 2107 + mddev->recovery_cp == MaxSector) { 2112 2108 mddev->recovery_cp = mddev->size << 1; 2113 2109 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2114 2110 } 2115 - mddev->size = mddev->array_size; 2111 + mddev->size = mddev->array_sectors / 2; 2116 2112 mddev->resync_max_sectors = sectors; 2117 2113 return 0; 2118 2114 } ··· 2137 2131 conf_t *conf = mddev_to_conf(mddev); 2138 2132 int cnt, raid_disks; 2139 2133 unsigned long flags; 2140 - int d, d2; 2134 + int d, d2, err; 2141 2135 2142 2136 /* Cannot change chunk_size, layout, or level */ 2143 2137 if (mddev->chunk_size != mddev->new_chunk || ··· 2149 2143 return -EINVAL; 2150 2144 } 2151 2145 2152 - md_allow_write(mddev); 2146 + err = md_allow_write(mddev); 2147 + if (err) 2148 + return err; 2153 2149 2154 2150 raid_disks = mddev->raid_disks + mddev->delta_disks; 2155 2151
+14 -8
drivers/md/raid10.c
··· 1114 1114 static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 1115 1115 { 1116 1116 conf_t *conf = mddev->private; 1117 - int found = 0; 1117 + int err = -EEXIST; 1118 1118 int mirror; 1119 1119 mirror_info_t *p; 1120 + int first = 0; 1121 + int last = mddev->raid_disks - 1; 1120 1122 1121 1123 if (mddev->recovery_cp < MaxSector) 1122 1124 /* only hot-add to in-sync arrays, as recovery is 1123 1125 * very different from resync 1124 1126 */ 1125 - return 0; 1127 + return -EBUSY; 1126 1128 if (!enough(conf)) 1127 - return 0; 1129 + return -EINVAL; 1130 + 1131 + if (rdev->raid_disk) 1132 + first = last = rdev->raid_disk; 1128 1133 1129 1134 if (rdev->saved_raid_disk >= 0 && 1135 + rdev->saved_raid_disk >= first && 1130 1136 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1131 1137 mirror = rdev->saved_raid_disk; 1132 1138 else 1133 - mirror = 0; 1134 - for ( ; mirror < mddev->raid_disks; mirror++) 1139 + mirror = first; 1140 + for ( ; mirror <= last ; mirror++) 1135 1141 if ( !(p=conf->mirrors+mirror)->rdev) { 1136 1142 1137 1143 blk_queue_stack_limits(mddev->queue, ··· 1152 1146 1153 1147 p->head_position = 0; 1154 1148 rdev->raid_disk = mirror; 1155 - found = 1; 1149 + err = 0; 1156 1150 if (rdev->saved_raid_disk != mirror) 1157 1151 conf->fullsync = 1; 1158 1152 rcu_assign_pointer(p->rdev, rdev); ··· 1160 1154 } 1161 1155 1162 1156 print_conf(conf); 1163 - return found; 1157 + return err; 1164 1158 } 1165 1159 1166 1160 static int raid10_remove_disk(mddev_t *mddev, int number) ··· 2165 2159 /* 2166 2160 * Ok, everything is just fine now 2167 2161 */ 2168 - mddev->array_size = size << (conf->chunk_shift-1); 2162 + mddev->array_sectors = size << conf->chunk_shift; 2169 2163 mddev->resync_max_sectors = size << conf->chunk_shift; 2170 2164 2171 2165 mddev->queue->unplug_fn = raid10_unplug;
+256 -491
drivers/md/raid5.c
··· 115 115 return_bi = bi->bi_next; 116 116 bi->bi_next = NULL; 117 117 bi->bi_size = 0; 118 - bi->bi_end_io(bi, 119 - test_bit(BIO_UPTODATE, &bi->bi_flags) 120 - ? 0 : -EIO); 118 + bio_endio(bi, 0); 121 119 bi = return_bi; 122 120 } 123 121 } 124 122 125 123 static void print_raid5_conf (raid5_conf_t *conf); 124 + 125 + static int stripe_operations_active(struct stripe_head *sh) 126 + { 127 + return sh->check_state || sh->reconstruct_state || 128 + test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 129 + test_bit(STRIPE_COMPUTE_RUN, &sh->state); 130 + } 126 131 127 132 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 128 133 { ··· 148 143 } 149 144 md_wakeup_thread(conf->mddev->thread); 150 145 } else { 151 - BUG_ON(sh->ops.pending); 146 + BUG_ON(stripe_operations_active(sh)); 152 147 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 153 148 atomic_dec(&conf->preread_active_stripes); 154 149 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) ··· 250 245 251 246 BUG_ON(atomic_read(&sh->count) != 0); 252 247 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 253 - BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete); 248 + BUG_ON(stripe_operations_active(sh)); 254 249 255 250 CHECK_DEVLOCK(); 256 251 pr_debug("init_stripe called, stripe %llu\n", ··· 351 346 return sh; 352 347 } 353 348 354 - /* test_and_ack_op() ensures that we only dequeue an operation once */ 355 - #define test_and_ack_op(op, pend) \ 356 - do { \ 357 - if (test_bit(op, &sh->ops.pending) && \ 358 - !test_bit(op, &sh->ops.complete)) { \ 359 - if (test_and_set_bit(op, &sh->ops.ack)) \ 360 - clear_bit(op, &pend); \ 361 - else \ 362 - ack++; \ 363 - } else \ 364 - clear_bit(op, &pend); \ 365 - } while (0) 366 - 367 - /* find new work to run, do not resubmit work that is already 368 - * in flight 369 - */ 370 - static unsigned long get_stripe_work(struct stripe_head *sh) 371 - { 372 - unsigned long pending; 373 - int ack = 0; 374 - 375 - pending = sh->ops.pending; 376 - 377 - test_and_ack_op(STRIPE_OP_BIOFILL, pending); 378 - test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending); 379 - test_and_ack_op(STRIPE_OP_PREXOR, pending); 380 - test_and_ack_op(STRIPE_OP_BIODRAIN, pending); 381 - test_and_ack_op(STRIPE_OP_POSTXOR, pending); 382 - test_and_ack_op(STRIPE_OP_CHECK, pending); 383 - if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending)) 384 - ack++; 385 - 386 - sh->ops.count -= ack; 387 - if (unlikely(sh->ops.count < 0)) { 388 - printk(KERN_ERR "pending: %#lx ops.pending: %#lx ops.ack: %#lx " 389 - "ops.complete: %#lx\n", pending, sh->ops.pending, 390 - sh->ops.ack, sh->ops.complete); 391 - BUG(); 392 - } 393 - 394 - return pending; 395 - } 396 - 397 349 static void 398 350 raid5_end_read_request(struct bio *bi, int error); 399 351 static void 400 352 raid5_end_write_request(struct bio *bi, int error); 401 353 402 - static void ops_run_io(struct stripe_head *sh) 354 + static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 403 355 { 404 356 raid5_conf_t *conf = sh->raid_conf; 405 357 int i, disks = sh->disks; 406 358 407 359 might_sleep(); 408 360 409 - set_bit(STRIPE_IO_STARTED, &sh->state); 410 361 for (i = disks; i--; ) { 411 362 int rw; 412 363 struct bio *bi; ··· 391 430 rcu_read_unlock(); 392 431 393 432 if (rdev) { 394 - if (test_bit(STRIPE_SYNCING, &sh->state) || 395 - test_bit(STRIPE_EXPAND_SOURCE, &sh->state) || 396 - test_bit(STRIPE_EXPAND_READY, &sh->state)) 433 + if (s->syncing || s->expanding || s->expanded) 397 434 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 435 + 436 + set_bit(STRIPE_IO_STARTED, &sh->state); 398 437 399 438 bi->bi_bdev = rdev->bdev; 400 439 pr_debug("%s: for %llu schedule op %ld on disc %d\n", ··· 489 528 (unsigned long long)sh->sector); 490 529 491 530 /* clear completed biofills */ 531 + spin_lock_irq(&conf->device_lock); 492 532 for (i = sh->disks; i--; ) { 493 533 struct r5dev *dev = &sh->dev[i]; 494 534 495 535 /* acknowledge completion of a biofill operation */ 496 536 /* and check if we need to reply to a read request, 497 537 * new R5_Wantfill requests are held off until 498 - * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) 538 + * !STRIPE_BIOFILL_RUN 499 539 */ 500 540 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 501 541 struct bio *rbi, *rbi2; 502 542 503 - /* The access to dev->read is outside of the 504 - * spin_lock_irq(&conf->device_lock), but is protected 505 - * by the STRIPE_OP_BIOFILL pending bit 506 - */ 507 543 BUG_ON(!dev->read); 508 544 rbi = dev->read; 509 545 dev->read = NULL; 510 546 while (rbi && rbi->bi_sector < 511 547 dev->sector + STRIPE_SECTORS) { 512 548 rbi2 = r5_next_bio(rbi, dev->sector); 513 - spin_lock_irq(&conf->device_lock); 514 549 if (--rbi->bi_phys_segments == 0) { 515 550 rbi->bi_next = return_bi; 516 551 return_bi = rbi; 517 552 } 518 - spin_unlock_irq(&conf->device_lock); 519 553 rbi = rbi2; 520 554 } 521 555 } 522 556 } 523 - set_bit(STRIPE_OP_BIOFILL, &sh->ops.complete); 557 + spin_unlock_irq(&conf->device_lock); 558 + clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 524 559 525 560 return_io(return_bi); 526 561 ··· 567 610 set_bit(R5_UPTODATE, &tgt->flags); 568 611 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 569 612 clear_bit(R5_Wantcompute, &tgt->flags); 570 - set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 613 + clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 614 + if (sh->check_state == check_state_compute_run) 615 + sh->check_state = check_state_compute_result; 571 616 set_bit(STRIPE_HANDLE, &sh->state); 572 617 release_stripe(sh); 573 618 } 574 619 575 - static struct dma_async_tx_descriptor * 576 - ops_run_compute5(struct stripe_head *sh, unsigned long pending) 620 + static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) 577 621 { 578 622 /* kernel stack size limits the total number of disks */ 579 623 int disks = sh->disks; ··· 604 646 ASYNC_TX_XOR_ZERO_DST, NULL, 605 647 ops_complete_compute5, sh); 606 648 607 - /* ack now if postxor is not set to be run */ 608 - if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending)) 609 - async_tx_ack(tx); 610 - 611 649 return tx; 612 650 } 613 651 ··· 613 659 614 660 pr_debug("%s: stripe %llu\n", __func__, 615 661 (unsigned long long)sh->sector); 616 - 617 - set_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 618 662 } 619 663 620 664 static struct dma_async_tx_descriptor * ··· 632 680 for (i = disks; i--; ) { 633 681 struct r5dev *dev = &sh->dev[i]; 634 682 /* Only process blocks that are known to be uptodate */ 635 - if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags)) 683 + if (test_bit(R5_Wantdrain, &dev->flags)) 636 684 xor_srcs[count++] = dev->page; 637 685 } 638 686 ··· 644 692 } 645 693 646 694 static struct dma_async_tx_descriptor * 647 - ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, 648 - unsigned long pending) 695 + ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 649 696 { 650 697 int disks = sh->disks; 651 - int pd_idx = sh->pd_idx, i; 652 - 653 - /* check if prexor is active which means only process blocks 654 - * that are part of a read-modify-write (Wantprexor) 655 - */ 656 - int prexor = test_bit(STRIPE_OP_PREXOR, &pending); 698 + int i; 657 699 658 700 pr_debug("%s: stripe %llu\n", __func__, 659 701 (unsigned long long)sh->sector); ··· 655 709 for (i = disks; i--; ) { 656 710 struct r5dev *dev = &sh->dev[i]; 657 711 struct bio *chosen; 658 - int towrite; 659 712 660 - towrite = 0; 661 - if (prexor) { /* rmw */ 662 - if (dev->towrite && 663 - test_bit(R5_Wantprexor, &dev->flags)) 664 - towrite = 1; 665 - } else { /* rcw */ 666 - if (i != pd_idx && dev->towrite && 667 - test_bit(R5_LOCKED, &dev->flags)) 668 - towrite = 1; 669 - } 670 - 671 - if (towrite) { 713 + if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 672 714 struct bio *wbi; 673 715 674 716 spin_lock(&sh->lock); ··· 681 747 static void ops_complete_postxor(void *stripe_head_ref) 682 748 { 683 749 struct stripe_head *sh = stripe_head_ref; 684 - 685 - pr_debug("%s: stripe %llu\n", __func__, 686 - (unsigned long long)sh->sector); 687 - 688 - set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 689 - set_bit(STRIPE_HANDLE, &sh->state); 690 - release_stripe(sh); 691 - } 692 - 693 - static void ops_complete_write(void *stripe_head_ref) 694 - { 695 - struct stripe_head *sh = stripe_head_ref; 696 750 int disks = sh->disks, i, pd_idx = sh->pd_idx; 697 751 698 752 pr_debug("%s: stripe %llu\n", __func__, ··· 692 770 set_bit(R5_UPTODATE, &dev->flags); 693 771 } 694 772 695 - set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 696 - set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 773 + if (sh->reconstruct_state == reconstruct_state_drain_run) 774 + sh->reconstruct_state = reconstruct_state_drain_result; 775 + else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 776 + sh->reconstruct_state = reconstruct_state_prexor_drain_result; 777 + else { 778 + BUG_ON(sh->reconstruct_state != reconstruct_state_run); 779 + sh->reconstruct_state = reconstruct_state_result; 780 + } 697 781 698 782 set_bit(STRIPE_HANDLE, &sh->state); 699 783 release_stripe(sh); 700 784 } 701 785 702 786 static void 703 - ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, 704 - unsigned long pending) 787 + ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 705 788 { 706 789 /* kernel stack size limits the total number of disks */ 707 790 int disks = sh->disks; ··· 714 787 715 788 int count = 0, pd_idx = sh->pd_idx, i; 716 789 struct page *xor_dest; 717 - int prexor = test_bit(STRIPE_OP_PREXOR, &pending); 790 + int prexor = 0; 718 791 unsigned long flags; 719 - dma_async_tx_callback callback; 720 792 721 793 pr_debug("%s: stripe %llu\n", __func__, 722 794 (unsigned long long)sh->sector); ··· 723 797 /* check if prexor is active which means only process blocks 724 798 * that are part of a read-modify-write (written) 725 799 */ 726 - if (prexor) { 800 + if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 801 + prexor = 1; 727 802 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 728 803 for (i = disks; i--; ) { 729 804 struct r5dev *dev = &sh->dev[i]; ··· 740 813 } 741 814 } 742 815 743 - /* check whether this postxor is part of a write */ 744 - callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ? 745 - ops_complete_write : ops_complete_postxor; 746 - 747 816 /* 1/ if we prexor'd then the dest is reused as a source 748 817 * 2/ if we did not prexor then we are redoing the parity 749 818 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST ··· 753 830 if (unlikely(count == 1)) { 754 831 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 755 832 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 756 - flags, tx, callback, sh); 833 + flags, tx, ops_complete_postxor, sh); 757 834 } else 758 835 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 759 - flags, tx, callback, sh); 836 + flags, tx, ops_complete_postxor, sh); 760 837 } 761 838 762 839 static void ops_complete_check(void *stripe_head_ref) 763 840 { 764 841 struct stripe_head *sh = stripe_head_ref; 765 - int pd_idx = sh->pd_idx; 766 842 767 843 pr_debug("%s: stripe %llu\n", __func__, 768 844 (unsigned long long)sh->sector); 769 845 770 - if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) && 771 - sh->ops.zero_sum_result == 0) 772 - set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 773 - 774 - set_bit(STRIPE_OP_CHECK, &sh->ops.complete); 846 + sh->check_state = check_state_check_result; 775 847 set_bit(STRIPE_HANDLE, &sh->state); 776 848 release_stripe(sh); 777 849 } ··· 793 875 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 794 876 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 795 877 796 - if (tx) 797 - set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 798 - else 799 - clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 800 - 801 878 atomic_inc(&sh->count); 802 879 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 803 880 ops_complete_check, sh); 804 881 } 805 882 806 - static void raid5_run_ops(struct stripe_head *sh, unsigned long pending) 883 + static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) 807 884 { 808 885 int overlap_clear = 0, i, disks = sh->disks; 809 886 struct dma_async_tx_descriptor *tx = NULL; 810 887 811 - if (test_bit(STRIPE_OP_BIOFILL, &pending)) { 888 + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 812 889 ops_run_biofill(sh); 813 890 overlap_clear++; 814 891 } 815 892 816 - if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending)) 817 - tx = ops_run_compute5(sh, pending); 893 + if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 894 + tx = ops_run_compute5(sh); 895 + /* terminate the chain if postxor is not set to be run */ 896 + if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) 897 + async_tx_ack(tx); 898 + } 818 899 819 - if (test_bit(STRIPE_OP_PREXOR, &pending)) 900 + if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 820 901 tx = ops_run_prexor(sh, tx); 821 902 822 - if (test_bit(STRIPE_OP_BIODRAIN, &pending)) { 823 - tx = ops_run_biodrain(sh, tx, pending); 903 + if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 904 + tx = ops_run_biodrain(sh, tx); 824 905 overlap_clear++; 825 906 } 826 907 827 - if (test_bit(STRIPE_OP_POSTXOR, &pending)) 828 - ops_run_postxor(sh, tx, pending); 908 + if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) 909 + ops_run_postxor(sh, tx); 829 910 830 - if (test_bit(STRIPE_OP_CHECK, &pending)) 911 + if (test_bit(STRIPE_OP_CHECK, &ops_request)) 831 912 ops_run_check(sh); 832 - 833 - if (test_bit(STRIPE_OP_IO, &pending)) 834 - ops_run_io(sh); 835 913 836 914 if (overlap_clear) 837 915 for (i = disks; i--; ) { ··· 911 997 struct stripe_head *osh, *nsh; 912 998 LIST_HEAD(newstripes); 913 999 struct disk_info *ndisks; 914 - int err = 0; 1000 + int err; 915 1001 struct kmem_cache *sc; 916 1002 int i; 917 1003 918 1004 if (newsize <= conf->pool_size) 919 1005 return 0; /* never bother to shrink */ 920 1006 921 - md_allow_write(conf->mddev); 1007 + err = md_allow_write(conf->mddev); 1008 + if (err) 1009 + return err; 922 1010 923 1011 /* Step 1 */ 924 1012 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], ··· 1619 1703 } 1620 1704 } 1621 1705 1622 - static int 1623 - handle_write_operations5(struct stripe_head *sh, int rcw, int expand) 1706 + static void 1707 + schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, 1708 + int rcw, int expand) 1624 1709 { 1625 1710 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1626 - int locked = 0; 1627 1711 1628 1712 if (rcw) { 1629 1713 /* if we are not expanding this is a proper write request, and ··· 1631 1715 * stripe cache 1632 1716 */ 1633 1717 if (!expand) { 1634 - set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1635 - sh->ops.count++; 1636 - } 1718 + sh->reconstruct_state = reconstruct_state_drain_run; 1719 + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1720 + } else 1721 + sh->reconstruct_state = reconstruct_state_run; 1637 1722 1638 - set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1639 - sh->ops.count++; 1723 + set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1640 1724 1641 1725 for (i = disks; i--; ) { 1642 1726 struct r5dev *dev = &sh->dev[i]; 1643 1727 1644 1728 if (dev->towrite) { 1645 1729 set_bit(R5_LOCKED, &dev->flags); 1730 + set_bit(R5_Wantdrain, &dev->flags); 1646 1731 if (!expand) 1647 1732 clear_bit(R5_UPTODATE, &dev->flags); 1648 - locked++; 1733 + s->locked++; 1649 1734 } 1650 1735 } 1651 - if (locked + 1 == disks) 1736 + if (s->locked + 1 == disks) 1652 1737 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 1653 1738 atomic_inc(&sh->raid_conf->pending_full_writes); 1654 1739 } else { 1655 1740 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1656 1741 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1657 1742 1658 - set_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 1659 - set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1660 - set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1661 - 1662 - sh->ops.count += 3; 1743 + sh->reconstruct_state = reconstruct_state_prexor_drain_run; 1744 + set_bit(STRIPE_OP_PREXOR, &s->ops_request); 1745 + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 1746 + set_bit(STRIPE_OP_POSTXOR, &s->ops_request); 1663 1747 1664 1748 for (i = disks; i--; ) { 1665 1749 struct r5dev *dev = &sh->dev[i]; 1666 1750 if (i == pd_idx) 1667 1751 continue; 1668 1752 1669 - /* For a read-modify write there may be blocks that are 1670 - * locked for reading while others are ready to be 1671 - * written so we distinguish these blocks by the 1672 - * R5_Wantprexor bit 1673 - */ 1674 1753 if (dev->towrite && 1675 1754 (test_bit(R5_UPTODATE, &dev->flags) || 1676 - test_bit(R5_Wantcompute, &dev->flags))) { 1677 - set_bit(R5_Wantprexor, &dev->flags); 1755 + test_bit(R5_Wantcompute, &dev->flags))) { 1756 + set_bit(R5_Wantdrain, &dev->flags); 1678 1757 set_bit(R5_LOCKED, &dev->flags); 1679 1758 clear_bit(R5_UPTODATE, &dev->flags); 1680 - locked++; 1759 + s->locked++; 1681 1760 } 1682 1761 } 1683 1762 } ··· 1682 1771 */ 1683 1772 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1684 1773 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1685 - locked++; 1774 + s->locked++; 1686 1775 1687 - pr_debug("%s: stripe %llu locked: %d pending: %lx\n", 1776 + pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 1688 1777 __func__, (unsigned long long)sh->sector, 1689 - locked, sh->ops.pending); 1690 - 1691 - return locked; 1778 + s->locked, s->ops_request); 1692 1779 } 1693 1780 1694 1781 /* ··· 1785 1876 } 1786 1877 1787 1878 static void 1788 - handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh, 1879 + handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, 1789 1880 struct stripe_head_state *s, int disks, 1790 1881 struct bio **return_bi) 1791 1882 { ··· 1876 1967 md_wakeup_thread(conf->mddev->thread); 1877 1968 } 1878 1969 1879 - /* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks 1880 - * to process 1970 + /* fetch_block5 - checks the given member device to see if its data needs 1971 + * to be read or computed to satisfy a request. 1972 + * 1973 + * Returns 1 when no more member devices need to be checked, otherwise returns 1974 + * 0 to tell the loop in handle_stripe_fill5 to continue 1881 1975 */ 1882 - static int __handle_issuing_new_read_requests5(struct stripe_head *sh, 1883 - struct stripe_head_state *s, int disk_idx, int disks) 1976 + static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, 1977 + int disk_idx, int disks) 1884 1978 { 1885 1979 struct r5dev *dev = &sh->dev[disk_idx]; 1886 1980 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 1887 1981 1888 - /* don't schedule compute operations or reads on the parity block while 1889 - * a check is in flight 1890 - */ 1891 - if ((disk_idx == sh->pd_idx) && 1892 - test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 1893 - return ~0; 1894 - 1895 1982 /* is the data in this block needed, and can we get it? */ 1896 1983 if (!test_bit(R5_LOCKED, &dev->flags) && 1897 - !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || 1898 - (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1899 - s->syncing || s->expanding || (s->failed && 1900 - (failed_dev->toread || (failed_dev->towrite && 1901 - !test_bit(R5_OVERWRITE, &failed_dev->flags) 1902 - ))))) { 1903 - /* 1/ We would like to get this block, possibly by computing it, 1904 - * but we might not be able to. 1905 - * 1906 - * 2/ Since parity check operations potentially make the parity 1907 - * block !uptodate it will need to be refreshed before any 1908 - * compute operations on data disks are scheduled. 1909 - * 1910 - * 3/ We hold off parity block re-reads until check operations 1911 - * have quiesced. 1984 + !test_bit(R5_UPTODATE, &dev->flags) && 1985 + (dev->toread || 1986 + (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1987 + s->syncing || s->expanding || 1988 + (s->failed && 1989 + (failed_dev->toread || 1990 + (failed_dev->towrite && 1991 + !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { 1992 + /* We would like to get this block, possibly by computing it, 1993 + * otherwise read it if the backing disk is insync 1912 1994 */ 1913 1995 if ((s->uptodate == disks - 1) && 1914 - (s->failed && disk_idx == s->failed_num) && 1915 - !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 1916 - set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 1996 + (s->failed && disk_idx == s->failed_num)) { 1997 + set_bit(STRIPE_COMPUTE_RUN, &sh->state); 1998 + set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 1917 1999 set_bit(R5_Wantcompute, &dev->flags); 1918 2000 sh->ops.target = disk_idx; 1919 2001 s->req_compute = 1; 1920 - sh->ops.count++; 1921 2002 /* Careful: from this point on 'uptodate' is in the eye 1922 2003 * of raid5_run_ops which services 'compute' operations 1923 2004 * before writes. R5_Wantcompute flags a block that will ··· 1915 2016 * subsequent operation. 1916 2017 */ 1917 2018 s->uptodate++; 1918 - return 0; /* uptodate + compute == disks */ 2019 + return 1; /* uptodate + compute == disks */ 1919 2020 } else if (test_bit(R5_Insync, &dev->flags)) { 1920 2021 set_bit(R5_LOCKED, &dev->flags); 1921 2022 set_bit(R5_Wantread, &dev->flags); 1922 - if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 1923 - sh->ops.count++; 1924 2023 s->locked++; 1925 2024 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 1926 2025 s->syncing); 1927 2026 } 1928 2027 } 1929 2028 1930 - return ~0; 2029 + return 0; 1931 2030 } 1932 2031 1933 - static void handle_issuing_new_read_requests5(struct stripe_head *sh, 2032 + /** 2033 + * handle_stripe_fill5 - read or compute data to satisfy pending requests. 2034 + */ 2035 + static void handle_stripe_fill5(struct stripe_head *sh, 1934 2036 struct stripe_head_state *s, int disks) 1935 2037 { 1936 2038 int i; 1937 - 1938 - /* Clear completed compute operations. Parity recovery 1939 - * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled 1940 - * later on in this routine 1941 - */ 1942 - if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 1943 - !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 1944 - clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 1945 - clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 1946 - clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 1947 - } 1948 2039 1949 2040 /* look for blocks to read/compute, skip this if a compute 1950 2041 * is already in flight, or if the stripe contents are in the 1951 2042 * midst of changing due to a write 1952 2043 */ 1953 - if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 1954 - !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) && 1955 - !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2044 + if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2045 + !sh->reconstruct_state) 1956 2046 for (i = disks; i--; ) 1957 - if (__handle_issuing_new_read_requests5( 1958 - sh, s, i, disks) == 0) 2047 + if (fetch_block5(sh, s, i, disks)) 1959 2048 break; 1960 - } 1961 2049 set_bit(STRIPE_HANDLE, &sh->state); 1962 2050 } 1963 2051 1964 - static void handle_issuing_new_read_requests6(struct stripe_head *sh, 2052 + static void handle_stripe_fill6(struct stripe_head *sh, 1965 2053 struct stripe_head_state *s, struct r6_state *r6s, 1966 2054 int disks) 1967 2055 { ··· 2007 2121 } 2008 2122 2009 2123 2010 - /* handle_completed_write_requests 2124 + /* handle_stripe_clean_event 2011 2125 * any written block on an uptodate or failed drive can be returned. 2012 2126 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2013 2127 * never LOCKED, so we don't need to test 'failed' directly. 2014 2128 */ 2015 - static void handle_completed_write_requests(raid5_conf_t *conf, 2129 + static void handle_stripe_clean_event(raid5_conf_t *conf, 2016 2130 struct stripe_head *sh, int disks, struct bio **return_bi) 2017 2131 { 2018 2132 int i; ··· 2057 2171 md_wakeup_thread(conf->mddev->thread); 2058 2172 } 2059 2173 2060 - static void handle_issuing_new_write_requests5(raid5_conf_t *conf, 2174 + static void handle_stripe_dirtying5(raid5_conf_t *conf, 2061 2175 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2062 2176 { 2063 2177 int rmw = 0, rcw = 0, i; ··· 2101 2215 "%d for r-m-w\n", i); 2102 2216 set_bit(R5_LOCKED, &dev->flags); 2103 2217 set_bit(R5_Wantread, &dev->flags); 2104 - if (!test_and_set_bit( 2105 - STRIPE_OP_IO, &sh->ops.pending)) 2106 - sh->ops.count++; 2107 2218 s->locked++; 2108 2219 } else { 2109 2220 set_bit(STRIPE_DELAYED, &sh->state); ··· 2124 2241 "%d for Reconstruct\n", i); 2125 2242 set_bit(R5_LOCKED, &dev->flags); 2126 2243 set_bit(R5_Wantread, &dev->flags); 2127 - if (!test_and_set_bit( 2128 - STRIPE_OP_IO, &sh->ops.pending)) 2129 - sh->ops.count++; 2130 2244 s->locked++; 2131 2245 } else { 2132 2246 set_bit(STRIPE_DELAYED, &sh->state); ··· 2141 2261 * simultaneously. If this is not the case then new writes need to be 2142 2262 * held off until the compute completes. 2143 2263 */ 2144 - if ((s->req_compute || 2145 - !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) && 2146 - (s->locked == 0 && (rcw == 0 || rmw == 0) && 2147 - !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2148 - s->locked += handle_write_operations5(sh, rcw == 0, 0); 2264 + if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2265 + (s->locked == 0 && (rcw == 0 || rmw == 0) && 2266 + !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2267 + schedule_reconstruction5(sh, s, rcw == 0, 0); 2149 2268 } 2150 2269 2151 - static void handle_issuing_new_write_requests6(raid5_conf_t *conf, 2270 + static void handle_stripe_dirtying6(raid5_conf_t *conf, 2152 2271 struct stripe_head *sh, struct stripe_head_state *s, 2153 2272 struct r6_state *r6s, int disks) 2154 2273 { ··· 2250 2371 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2251 2372 struct stripe_head_state *s, int disks) 2252 2373 { 2253 - int canceled_check = 0; 2374 + struct r5dev *dev = NULL; 2254 2375 2255 2376 set_bit(STRIPE_HANDLE, &sh->state); 2256 2377 2257 - /* complete a check operation */ 2258 - if (test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) { 2259 - clear_bit(STRIPE_OP_CHECK, &sh->ops.ack); 2260 - clear_bit(STRIPE_OP_CHECK, &sh->ops.pending); 2378 + switch (sh->check_state) { 2379 + case check_state_idle: 2380 + /* start a new check operation if there are no failures */ 2261 2381 if (s->failed == 0) { 2262 - if (sh->ops.zero_sum_result == 0) 2263 - /* parity is correct (on disc, 2264 - * not in buffer any more) 2265 - */ 2266 - set_bit(STRIPE_INSYNC, &sh->state); 2267 - else { 2268 - conf->mddev->resync_mismatches += 2269 - STRIPE_SECTORS; 2270 - if (test_bit( 2271 - MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2272 - /* don't try to repair!! */ 2273 - set_bit(STRIPE_INSYNC, &sh->state); 2274 - else { 2275 - set_bit(STRIPE_OP_COMPUTE_BLK, 2276 - &sh->ops.pending); 2277 - set_bit(STRIPE_OP_MOD_REPAIR_PD, 2278 - &sh->ops.pending); 2279 - set_bit(R5_Wantcompute, 2280 - &sh->dev[sh->pd_idx].flags); 2281 - sh->ops.target = sh->pd_idx; 2282 - sh->ops.count++; 2283 - s->uptodate++; 2284 - } 2285 - } 2286 - } else 2287 - canceled_check = 1; /* STRIPE_INSYNC is not set */ 2288 - } 2289 - 2290 - /* start a new check operation if there are no failures, the stripe is 2291 - * not insync, and a repair is not in flight 2292 - */ 2293 - if (s->failed == 0 && 2294 - !test_bit(STRIPE_INSYNC, &sh->state) && 2295 - !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2296 - if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 2297 2382 BUG_ON(s->uptodate != disks); 2383 + sh->check_state = check_state_run; 2384 + set_bit(STRIPE_OP_CHECK, &s->ops_request); 2298 2385 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2299 - sh->ops.count++; 2300 2386 s->uptodate--; 2387 + break; 2301 2388 } 2302 - } 2303 - 2304 - /* check if we can clear a parity disk reconstruct */ 2305 - if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 2306 - test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2307 - 2308 - clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending); 2309 - clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 2310 - clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 2311 - clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 2312 - } 2313 - 2314 - 2315 - /* Wait for check parity and compute block operations to complete 2316 - * before write-back. If a failure occurred while the check operation 2317 - * was in flight we need to cycle this stripe through handle_stripe 2318 - * since the parity block may not be uptodate 2319 - */ 2320 - if (!canceled_check && !test_bit(STRIPE_INSYNC, &sh->state) && 2321 - !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) && 2322 - !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) { 2323 - struct r5dev *dev; 2324 - /* either failed parity check, or recovery is happening */ 2325 - if (s->failed == 0) 2326 - s->failed_num = sh->pd_idx; 2327 2389 dev = &sh->dev[s->failed_num]; 2390 + /* fall through */ 2391 + case check_state_compute_result: 2392 + sh->check_state = check_state_idle; 2393 + if (!dev) 2394 + dev = &sh->dev[sh->pd_idx]; 2395 + 2396 + /* check that a write has not made the stripe insync */ 2397 + if (test_bit(STRIPE_INSYNC, &sh->state)) 2398 + break; 2399 + 2400 + /* either failed parity check, or recovery is happening */ 2328 2401 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2329 2402 BUG_ON(s->uptodate != disks); 2330 2403 2331 2404 set_bit(R5_LOCKED, &dev->flags); 2405 + s->locked++; 2332 2406 set_bit(R5_Wantwrite, &dev->flags); 2333 - if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2334 - sh->ops.count++; 2335 2407 2336 2408 clear_bit(STRIPE_DEGRADED, &sh->state); 2337 - s->locked++; 2338 2409 set_bit(STRIPE_INSYNC, &sh->state); 2410 + break; 2411 + case check_state_run: 2412 + break; /* we will be called again upon completion */ 2413 + case check_state_check_result: 2414 + sh->check_state = check_state_idle; 2415 + 2416 + /* if a failure occurred during the check operation, leave 2417 + * STRIPE_INSYNC not set and let the stripe be handled again 2418 + */ 2419 + if (s->failed) 2420 + break; 2421 + 2422 + /* handle a successful check operation, if parity is correct 2423 + * we are done. Otherwise update the mismatch count and repair 2424 + * parity if !MD_RECOVERY_CHECK 2425 + */ 2426 + if (sh->ops.zero_sum_result == 0) 2427 + /* parity is correct (on disc, 2428 + * not in buffer any more) 2429 + */ 2430 + set_bit(STRIPE_INSYNC, &sh->state); 2431 + else { 2432 + conf->mddev->resync_mismatches += STRIPE_SECTORS; 2433 + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2434 + /* don't try to repair!! */ 2435 + set_bit(STRIPE_INSYNC, &sh->state); 2436 + else { 2437 + sh->check_state = check_state_compute_run; 2438 + set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2439 + set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2440 + set_bit(R5_Wantcompute, 2441 + &sh->dev[sh->pd_idx].flags); 2442 + sh->ops.target = sh->pd_idx; 2443 + s->uptodate++; 2444 + } 2445 + } 2446 + break; 2447 + case check_state_compute_run: 2448 + break; 2449 + default: 2450 + printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2451 + __func__, sh->check_state, 2452 + (unsigned long long) sh->sector); 2453 + BUG(); 2339 2454 } 2340 2455 } 2341 2456 ··· 2514 2641 struct bio *return_bi = NULL; 2515 2642 struct stripe_head_state s; 2516 2643 struct r5dev *dev; 2517 - unsigned long pending = 0; 2518 2644 mdk_rdev_t *blocked_rdev = NULL; 2519 2645 int prexor; 2520 2646 2521 2647 memset(&s, 0, sizeof(s)); 2522 - pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d " 2523 - "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state, 2524 - atomic_read(&sh->count), sh->pd_idx, 2525 - sh->ops.pending, sh->ops.ack, sh->ops.complete); 2648 + pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " 2649 + "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, 2650 + atomic_read(&sh->count), sh->pd_idx, sh->check_state, 2651 + sh->reconstruct_state); 2526 2652 2527 2653 spin_lock(&sh->lock); 2528 2654 clear_bit(STRIPE_HANDLE, &sh->state); ··· 2530 2658 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2531 2659 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2532 2660 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2661 + 2533 2662 /* Now to look around and see what can be done */ 2534 - 2535 - /* clean-up completed biofill operations */ 2536 - if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) { 2537 - clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending); 2538 - clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack); 2539 - clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete); 2540 - } 2541 - 2542 2663 rcu_read_lock(); 2543 2664 for (i=disks; i--; ) { 2544 2665 mdk_rdev_t *rdev; ··· 2545 2680 /* maybe we can request a biofill operation 2546 2681 * 2547 2682 * new wantfill requests are only permitted while 2548 - * STRIPE_OP_BIOFILL is clear 2683 + * ops_complete_biofill is guaranteed to be inactive 2549 2684 */ 2550 2685 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2551 - !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2686 + !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 2552 2687 set_bit(R5_Wantfill, &dev->flags); 2553 2688 2554 2689 /* now count some things */ ··· 2592 2727 goto unlock; 2593 2728 } 2594 2729 2595 - if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2596 - sh->ops.count++; 2730 + if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 2731 + set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 2732 + set_bit(STRIPE_BIOFILL_RUN, &sh->state); 2733 + } 2597 2734 2598 2735 pr_debug("locked=%d uptodate=%d to_read=%d" 2599 2736 " to_write=%d failed=%d failed_num=%d\n", ··· 2605 2738 * need to be failed 2606 2739 */ 2607 2740 if (s.failed > 1 && s.to_read+s.to_write+s.written) 2608 - handle_requests_to_failed_array(conf, sh, &s, disks, 2609 - &return_bi); 2741 + handle_failed_stripe(conf, sh, &s, disks, &return_bi); 2610 2742 if (s.failed > 1 && s.syncing) { 2611 2743 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2612 2744 clear_bit(STRIPE_SYNCING, &sh->state); ··· 2621 2755 !test_bit(R5_LOCKED, &dev->flags) && 2622 2756 test_bit(R5_UPTODATE, &dev->flags)) || 2623 2757 (s.failed == 1 && s.failed_num == sh->pd_idx))) 2624 - handle_completed_write_requests(conf, sh, disks, &return_bi); 2758 + handle_stripe_clean_event(conf, sh, disks, &return_bi); 2625 2759 2626 2760 /* Now we might consider reading some blocks, either to check/generate 2627 2761 * parity, or to satisfy requests 2628 2762 * or to load a block that is being partially written. 2629 2763 */ 2630 2764 if (s.to_read || s.non_overwrite || 2631 - (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding || 2632 - test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 2633 - handle_issuing_new_read_requests5(sh, &s, disks); 2765 + (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 2766 + handle_stripe_fill5(sh, &s, disks); 2634 2767 2635 2768 /* Now we check to see if any write operations have recently 2636 2769 * completed 2637 2770 */ 2638 - 2639 - /* leave prexor set until postxor is done, allows us to distinguish 2640 - * a rmw from a rcw during biodrain 2641 - */ 2642 2771 prexor = 0; 2643 - if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) && 2644 - test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2645 - 2772 + if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 2646 2773 prexor = 1; 2647 - clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 2648 - clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack); 2649 - clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 2650 - 2651 - for (i = disks; i--; ) 2652 - clear_bit(R5_Wantprexor, &sh->dev[i].flags); 2653 - } 2654 - 2655 - /* if only POSTXOR is set then this is an 'expand' postxor */ 2656 - if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) && 2657 - test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2658 - 2659 - clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 2660 - clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack); 2661 - clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 2662 - 2663 - clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2664 - clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2665 - clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2774 + if (sh->reconstruct_state == reconstruct_state_drain_result || 2775 + sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 2776 + sh->reconstruct_state = reconstruct_state_idle; 2666 2777 2667 2778 /* All the 'written' buffers and the parity block are ready to 2668 2779 * be written back to disk ··· 2651 2808 (i == sh->pd_idx || dev->written)) { 2652 2809 pr_debug("Writing block %d\n", i); 2653 2810 set_bit(R5_Wantwrite, &dev->flags); 2654 - if (!test_and_set_bit( 2655 - STRIPE_OP_IO, &sh->ops.pending)) 2656 - sh->ops.count++; 2657 2811 if (prexor) 2658 2812 continue; 2659 2813 if (!test_bit(R5_Insync, &dev->flags) || ··· 2672 2832 * 2/ A 'check' operation is in flight, as it may clobber the parity 2673 2833 * block. 2674 2834 */ 2675 - if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) && 2676 - !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 2677 - handle_issuing_new_write_requests5(conf, sh, &s, disks); 2835 + if (s.to_write && !sh->reconstruct_state && !sh->check_state) 2836 + handle_stripe_dirtying5(conf, sh, &s, disks); 2678 2837 2679 2838 /* maybe we need to check and possibly fix the parity for this stripe 2680 2839 * Any reads will already have been scheduled, so we just see if enough 2681 2840 * data is available. The parity check is held off while parity 2682 2841 * dependent operations are in flight. 2683 2842 */ 2684 - if ((s.syncing && s.locked == 0 && 2685 - !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 2686 - !test_bit(STRIPE_INSYNC, &sh->state)) || 2687 - test_bit(STRIPE_OP_CHECK, &sh->ops.pending) || 2688 - test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) 2843 + if (sh->check_state || 2844 + (s.syncing && s.locked == 0 && 2845 + !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 2846 + !test_bit(STRIPE_INSYNC, &sh->state))) 2689 2847 handle_parity_checks5(conf, sh, &s, disks); 2690 2848 2691 2849 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { ··· 2702 2864 dev = &sh->dev[s.failed_num]; 2703 2865 if (!test_bit(R5_ReWrite, &dev->flags)) { 2704 2866 set_bit(R5_Wantwrite, &dev->flags); 2705 - if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2706 - sh->ops.count++; 2707 2867 set_bit(R5_ReWrite, &dev->flags); 2708 2868 set_bit(R5_LOCKED, &dev->flags); 2709 2869 s.locked++; 2710 2870 } else { 2711 2871 /* let's read it back */ 2712 2872 set_bit(R5_Wantread, &dev->flags); 2713 - if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2714 - sh->ops.count++; 2715 2873 set_bit(R5_LOCKED, &dev->flags); 2716 2874 s.locked++; 2717 2875 } 2718 2876 } 2719 2877 2720 - /* Finish postxor operations initiated by the expansion 2721 - * process 2722 - */ 2723 - if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) && 2724 - !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) { 2725 - 2878 + /* Finish reconstruct operations initiated by the expansion process */ 2879 + if (sh->reconstruct_state == reconstruct_state_result) { 2880 + sh->reconstruct_state = reconstruct_state_idle; 2726 2881 clear_bit(STRIPE_EXPANDING, &sh->state); 2727 - 2728 - clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2729 - clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2730 - clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2731 - 2732 - for (i = conf->raid_disks; i--; ) { 2882 + for (i = conf->raid_disks; i--; ) 2733 2883 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2734 2884 set_bit(R5_LOCKED, &dev->flags); 2735 2885 s.locked++; 2736 - if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2737 - sh->ops.count++; 2738 - } 2739 2886 } 2740 2887 2741 2888 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2742 - !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2889 + !sh->reconstruct_state) { 2743 2890 /* Need to write out all blocks after computing parity */ 2744 2891 sh->disks = conf->raid_disks; 2745 2892 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 2746 2893 conf->raid_disks); 2747 - s.locked += handle_write_operations5(sh, 1, 1); 2748 - } else if (s.expanded && 2749 - s.locked == 0 && 2750 - !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2894 + schedule_reconstruction5(sh, &s, 1, 1); 2895 + } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 2751 2896 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2752 2897 atomic_dec(&conf->reshape_stripes); 2753 2898 wake_up(&conf->wait_for_overlap); ··· 2738 2917 } 2739 2918 2740 2919 if (s.expanding && s.locked == 0 && 2741 - !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 2920 + !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 2742 2921 handle_stripe_expansion(conf, sh, NULL); 2743 - 2744 - if (sh->ops.count) 2745 - pending = get_stripe_work(sh); 2746 2922 2747 2923 unlock: 2748 2924 spin_unlock(&sh->lock); ··· 2748 2930 if (unlikely(blocked_rdev)) 2749 2931 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 2750 2932 2751 - if (pending) 2752 - raid5_run_ops(sh, pending); 2933 + if (s.ops_request) 2934 + raid5_run_ops(sh, s.ops_request); 2935 + 2936 + ops_run_io(sh, &s); 2753 2937 2754 2938 return_io(return_bi); 2755 - 2756 2939 } 2757 2940 2758 2941 static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) ··· 2861 3042 * might need to be failed 2862 3043 */ 2863 3044 if (s.failed > 2 && s.to_read+s.to_write+s.written) 2864 - handle_requests_to_failed_array(conf, sh, &s, disks, 2865 - &return_bi); 3045 + handle_failed_stripe(conf, sh, &s, disks, &return_bi); 2866 3046 if (s.failed > 2 && s.syncing) { 2867 3047 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2868 3048 clear_bit(STRIPE_SYNCING, &sh->state); ··· 2886 3068 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 2887 3069 && !test_bit(R5_LOCKED, &qdev->flags) 2888 3070 && test_bit(R5_UPTODATE, &qdev->flags))))) 2889 - handle_completed_write_requests(conf, sh, disks, &return_bi); 3071 + handle_stripe_clean_event(conf, sh, disks, &return_bi); 2890 3072 2891 3073 /* Now we might consider reading some blocks, either to check/generate 2892 3074 * parity, or to satisfy requests ··· 2894 3076 */ 2895 3077 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 2896 3078 (s.syncing && (s.uptodate < disks)) || s.expanding) 2897 - handle_issuing_new_read_requests6(sh, &s, &r6s, disks); 3079 + handle_stripe_fill6(sh, &s, &r6s, disks); 2898 3080 2899 3081 /* now to consider writing and what else, if anything should be read */ 2900 3082 if (s.to_write) 2901 - handle_issuing_new_write_requests6(conf, sh, &s, &r6s, disks); 3083 + handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); 2902 3084 2903 3085 /* maybe we need to check and possibly fix the parity for this stripe 2904 3086 * Any reads will already have been scheduled, so we just see if enough ··· 2954 3136 } 2955 3137 2956 3138 if (s.expanding && s.locked == 0 && 2957 - !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 3139 + !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 2958 3140 handle_stripe_expansion(conf, sh, &r6s); 2959 3141 2960 3142 unlock: ··· 2964 3146 if (unlikely(blocked_rdev)) 2965 3147 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 2966 3148 3149 + ops_run_io(sh, &s); 3150 + 2967 3151 return_io(return_bi); 2968 - 2969 - for (i=disks; i-- ;) { 2970 - int rw; 2971 - struct bio *bi; 2972 - mdk_rdev_t *rdev; 2973 - if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 2974 - rw = WRITE; 2975 - else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 2976 - rw = READ; 2977 - else 2978 - continue; 2979 - 2980 - set_bit(STRIPE_IO_STARTED, &sh->state); 2981 - 2982 - bi = &sh->dev[i].req; 2983 - 2984 - bi->bi_rw = rw; 2985 - if (rw == WRITE) 2986 - bi->bi_end_io = raid5_end_write_request; 2987 - else 2988 - bi->bi_end_io = raid5_end_read_request; 2989 - 2990 - rcu_read_lock(); 2991 - rdev = rcu_dereference(conf->disks[i].rdev); 2992 - if (rdev && test_bit(Faulty, &rdev->flags)) 2993 - rdev = NULL; 2994 - if (rdev) 2995 - atomic_inc(&rdev->nr_pending); 2996 - rcu_read_unlock(); 2997 - 2998 - if (rdev) { 2999 - if (s.syncing || s.expanding || s.expanded) 3000 - md_sync_acct(rdev->bdev, STRIPE_SECTORS); 3001 - 3002 - bi->bi_bdev = rdev->bdev; 3003 - pr_debug("for %llu schedule op %ld on disc %d\n", 3004 - (unsigned long long)sh->sector, bi->bi_rw, i); 3005 - atomic_inc(&sh->count); 3006 - bi->bi_sector = sh->sector + rdev->data_offset; 3007 - bi->bi_flags = 1 << BIO_UPTODATE; 3008 - bi->bi_vcnt = 1; 3009 - bi->bi_max_vecs = 1; 3010 - bi->bi_idx = 0; 3011 - bi->bi_io_vec = &sh->dev[i].vec; 3012 - bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 3013 - bi->bi_io_vec[0].bv_offset = 0; 3014 - bi->bi_size = STRIPE_SIZE; 3015 - bi->bi_next = NULL; 3016 - if (rw == WRITE && 3017 - test_bit(R5_ReWrite, &sh->dev[i].flags)) 3018 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 3019 - generic_make_request(bi); 3020 - } else { 3021 - if (rw == WRITE) 3022 - set_bit(STRIPE_DEGRADED, &sh->state); 3023 - pr_debug("skip op %ld on disc %d for sector %llu\n", 3024 - bi->bi_rw, i, (unsigned long long)sh->sector); 3025 - clear_bit(R5_LOCKED, &sh->dev[i].flags); 3026 - set_bit(STRIPE_HANDLE, &sh->state); 3027 - } 3028 - } 3029 3152 } 3030 3153 3031 3154 static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) ··· 3456 3697 if ( rw == WRITE ) 3457 3698 md_write_end(mddev); 3458 3699 3459 - bi->bi_end_io(bi, 3460 - test_bit(BIO_UPTODATE, &bi->bi_flags) 3461 - ? 0 : -EIO); 3700 + bio_endio(bi, 0); 3462 3701 } 3463 3702 return 0; 3464 3703 } ··· 3542 3785 j == raid6_next_disk(sh->pd_idx, sh->disks)) 3543 3786 continue; 3544 3787 s = compute_blocknr(sh, j); 3545 - if (s < (mddev->array_size<<1)) { 3788 + if (s < mddev->array_sectors) { 3546 3789 skipped = 1; 3547 3790 continue; 3548 3791 } ··· 3759 4002 spin_lock_irq(&conf->device_lock); 3760 4003 remaining = --raid_bio->bi_phys_segments; 3761 4004 spin_unlock_irq(&conf->device_lock); 3762 - if (remaining == 0) { 3763 - 3764 - raid_bio->bi_end_io(raid_bio, 3765 - test_bit(BIO_UPTODATE, &raid_bio->bi_flags) 3766 - ? 0 : -EIO); 3767 - } 4005 + if (remaining == 0) 4006 + bio_endio(raid_bio, 0); 3768 4007 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3769 4008 wake_up(&conf->wait_for_stripe); 3770 4009 return handled; ··· 3847 4094 { 3848 4095 raid5_conf_t *conf = mddev_to_conf(mddev); 3849 4096 unsigned long new; 4097 + int err; 4098 + 3850 4099 if (len >= PAGE_SIZE) 3851 4100 return -EINVAL; 3852 4101 if (!conf) ··· 3864 4109 else 3865 4110 break; 3866 4111 } 3867 - md_allow_write(mddev); 4112 + err = md_allow_write(mddev); 4113 + if (err) 4114 + return err; 3868 4115 while (new > conf->max_nr_stripes) { 3869 4116 if (grow_one_stripe(conf)) 3870 4117 conf->max_nr_stripes++; ··· 4191 4434 mddev->queue->backing_dev_info.congested_data = mddev; 4192 4435 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4193 4436 4194 - mddev->array_size = mddev->size * (conf->previous_raid_disks - 4437 + mddev->array_sectors = 2 * mddev->size * (conf->previous_raid_disks - 4195 4438 conf->max_degraded); 4196 4439 4197 4440 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); ··· 4366 4609 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 4367 4610 { 4368 4611 raid5_conf_t *conf = mddev->private; 4369 - int found = 0; 4612 + int err = -EEXIST; 4370 4613 int disk; 4371 4614 struct disk_info *p; 4615 + int first = 0; 4616 + int last = conf->raid_disks - 1; 4372 4617 4373 4618 if (mddev->degraded > conf->max_degraded) 4374 4619 /* no point adding a device */ 4375 - return 0; 4620 + return -EINVAL; 4621 + 4622 + if (rdev->raid_disk >= 0) 4623 + first = last = rdev->raid_disk; 4376 4624 4377 4625 /* 4378 4626 * find the disk ... but prefer rdev->saved_raid_disk 4379 4627 * if possible. 4380 4628 */ 4381 4629 if (rdev->saved_raid_disk >= 0 && 4630 + rdev->saved_raid_disk >= first && 4382 4631 conf->disks[rdev->saved_raid_disk].rdev == NULL) 4383 4632 disk = rdev->saved_raid_disk; 4384 4633 else 4385 - disk = 0; 4386 - for ( ; disk < conf->raid_disks; disk++) 4634 + disk = first; 4635 + for ( ; disk <= last ; disk++) 4387 4636 if ((p=conf->disks + disk)->rdev == NULL) { 4388 4637 clear_bit(In_sync, &rdev->flags); 4389 4638 rdev->raid_disk = disk; 4390 - found = 1; 4639 + err = 0; 4391 4640 if (rdev->saved_raid_disk != disk) 4392 4641 conf->fullsync = 1; 4393 4642 rcu_assign_pointer(p->rdev, rdev); 4394 4643 break; 4395 4644 } 4396 4645 print_raid5_conf(conf); 4397 - return found; 4646 + return err; 4398 4647 } 4399 4648 4400 4649 static int raid5_resize(mddev_t *mddev, sector_t sectors) ··· 4415 4652 raid5_conf_t *conf = mddev_to_conf(mddev); 4416 4653 4417 4654 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4418 - mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 4419 - set_capacity(mddev->gendisk, mddev->array_size << 1); 4655 + mddev->array_sectors = sectors * (mddev->raid_disks 4656 + - conf->max_degraded); 4657 + set_capacity(mddev->gendisk, mddev->array_sectors); 4420 4658 mddev->changed = 1; 4421 4659 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 4422 4660 mddev->recovery_cp = mddev->size << 1; ··· 4502 4738 rdev_for_each(rdev, rtmp, mddev) 4503 4739 if (rdev->raid_disk < 0 && 4504 4740 !test_bit(Faulty, &rdev->flags)) { 4505 - if (raid5_add_disk(mddev, rdev)) { 4741 + if (raid5_add_disk(mddev, rdev) == 0) { 4506 4742 char nm[20]; 4507 4743 set_bit(In_sync, &rdev->flags); 4508 4744 added_devices++; ··· 4550 4786 struct block_device *bdev; 4551 4787 4552 4788 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4553 - conf->mddev->array_size = conf->mddev->size * 4789 + conf->mddev->array_sectors = 2 * conf->mddev->size * 4554 4790 (conf->raid_disks - conf->max_degraded); 4555 - set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 4791 + set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors); 4556 4792 conf->mddev->changed = 1; 4557 4793 4558 4794 bdev = bdget_disk(conf->mddev->gendisk, 0); 4559 4795 if (bdev) { 4560 4796 mutex_lock(&bdev->bd_inode->i_mutex); 4561 - i_size_write(bdev->bd_inode, (loff_t)conf->mddev->array_size << 10); 4797 + i_size_write(bdev->bd_inode, 4798 + (loff_t)conf->mddev->array_sectors << 9); 4562 4799 mutex_unlock(&bdev->bd_inode->i_mutex); 4563 4800 bdput(bdev); 4564 4801 }
+1
include/linux/raid/bitmap.h
··· 221 221 unsigned long syncchunk; 222 222 223 223 __u64 events_cleared; 224 + int need_sync; 224 225 225 226 /* bitmap spinlock */ 226 227 spinlock_t lock;
+1 -1
include/linux/raid/linear.h
··· 16 16 struct linear_private_data *prev; /* earlier version */ 17 17 dev_info_t **hash_table; 18 18 sector_t hash_spacing; 19 - sector_t array_size; 19 + sector_t array_sectors; 20 20 int preshift; /* shift before dividing by hash_spacing */ 21 21 dev_info_t disks[0]; 22 22 };
+1 -1
include/linux/raid/md.h
··· 95 95 struct page *page, int rw); 96 96 extern void md_do_sync(mddev_t *mddev); 97 97 extern void md_new_event(mddev_t *mddev); 98 - extern void md_allow_write(mddev_t *mddev); 98 + extern int md_allow_write(mddev_t *mddev); 99 99 extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); 100 100 101 101 #endif /* CONFIG_MD */
+14 -3
include/linux/raid/md_k.h
··· 59 59 int sb_loaded; 60 60 __u64 sb_events; 61 61 sector_t data_offset; /* start of data in array */ 62 - sector_t sb_offset; 62 + sector_t sb_start; /* offset of the super block (in 512byte sectors) */ 63 63 int sb_size; /* bytes in the superblock */ 64 64 int preferred_minor; /* autorun support */ 65 65 ··· 87 87 #define Blocked 8 /* An error occured on an externally 88 88 * managed array, don't allow writes 89 89 * until it is cleared */ 90 + #define StateChanged 9 /* Faulty or Blocked has changed during 91 + * interrupt, so it needs to be 92 + * notified by the thread */ 90 93 wait_queue_head_t blocked_wait; 91 94 92 95 int desc_nr; /* descriptor index in the superblock */ ··· 150 147 int raid_disks; 151 148 int max_disks; 152 149 sector_t size; /* used size of component devices */ 153 - sector_t array_size; /* exported array size */ 150 + sector_t array_sectors; /* exported array size */ 154 151 __u64 events; 155 152 156 153 char uuid[16]; ··· 191 188 * NEEDED: we might need to start a resync/recover 192 189 * RUNNING: a thread is running, or about to be started 193 190 * SYNC: actually doing a resync, not a recovery 191 + * RECOVER: doing recovery, or need to try it. 194 192 * INTR: resync needs to be aborted for some reason 195 193 * DONE: thread is done and is waiting to be reaped 196 194 * REQUEST: user-space has requested a sync (used with SYNC) ··· 202 198 */ 203 199 #define MD_RECOVERY_RUNNING 0 204 200 #define MD_RECOVERY_SYNC 1 201 + #define MD_RECOVERY_RECOVER 2 205 202 #define MD_RECOVERY_INTR 3 206 203 #define MD_RECOVERY_DONE 4 207 204 #define MD_RECOVERY_NEEDED 5 ··· 215 210 216 211 int in_sync; /* know to not need resync */ 217 212 struct mutex reconfig_mutex; 218 - atomic_t active; 213 + atomic_t active; /* general refcount */ 214 + atomic_t openers; /* number of active opens */ 219 215 220 216 int changed; /* true if we might need to reread partition info */ 221 217 int degraded; /* whether md should consider ··· 233 227 atomic_t recovery_active; /* blocks scheduled, but not written */ 234 228 wait_queue_head_t recovery_wait; 235 229 sector_t recovery_cp; 230 + sector_t resync_min; /* user requested sync 231 + * starts here */ 236 232 sector_t resync_max; /* resync should pause 237 233 * when it gets here */ 238 234 ··· 338 330 */ 339 331 #define rdev_for_each(rdev, tmp, mddev) \ 340 332 rdev_for_each_list(rdev, tmp, (mddev)->disks) 333 + 334 + #define rdev_for_each_rcu(rdev, mddev) \ 335 + list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) 341 336 342 337 typedef struct mdk_thread_s { 343 338 void (*run) (mddev_t *mddev);
-3
include/linux/raid/md_p.h
··· 43 43 */ 44 44 #define MD_RESERVED_BYTES (64 * 1024) 45 45 #define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512) 46 - #define MD_RESERVED_BLOCKS (MD_RESERVED_BYTES / BLOCK_SIZE) 47 46 48 47 #define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS) 49 - #define MD_NEW_SIZE_BLOCKS(x) ((x & ~(MD_RESERVED_BLOCKS - 1)) - MD_RESERVED_BLOCKS) 50 48 51 49 #define MD_SB_BYTES 4096 52 50 #define MD_SB_WORDS (MD_SB_BYTES / 4) 53 - #define MD_SB_BLOCKS (MD_SB_BYTES / BLOCK_SIZE) 54 51 #define MD_SB_SECTORS (MD_SB_BYTES / 512) 55 52 56 53 /*
+44 -20
include/linux/raid/raid5.h
··· 158 158 * the compute block completes. 159 159 */ 160 160 161 + /* 162 + * Operations state - intermediate states that are visible outside of sh->lock 163 + * In general _idle indicates nothing is running, _run indicates a data 164 + * processing operation is active, and _result means the data processing result 165 + * is stable and can be acted upon. For simple operations like biofill and 166 + * compute that only have an _idle and _run state they are indicated with 167 + * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN) 168 + */ 169 + /** 170 + * enum check_states - handles syncing / repairing a stripe 171 + * @check_state_idle - check operations are quiesced 172 + * @check_state_run - check operation is running 173 + * @check_state_result - set outside lock when check result is valid 174 + * @check_state_compute_run - check failed and we are repairing 175 + * @check_state_compute_result - set outside lock when compute result is valid 176 + */ 177 + enum check_states { 178 + check_state_idle = 0, 179 + check_state_run, /* parity check */ 180 + check_state_check_result, 181 + check_state_compute_run, /* parity repair */ 182 + check_state_compute_result, 183 + }; 184 + 185 + /** 186 + * enum reconstruct_states - handles writing or expanding a stripe 187 + */ 188 + enum reconstruct_states { 189 + reconstruct_state_idle = 0, 190 + reconstruct_state_prexor_drain_run, /* prexor-write */ 191 + reconstruct_state_drain_run, /* write */ 192 + reconstruct_state_run, /* expand */ 193 + reconstruct_state_prexor_drain_result, 194 + reconstruct_state_drain_result, 195 + reconstruct_state_result, 196 + }; 197 + 161 198 struct stripe_head { 162 199 struct hlist_node hash; 163 200 struct list_head lru; /* inactive_list or handle_list */ ··· 206 169 spinlock_t lock; 207 170 int bm_seq; /* sequence number for bitmap flushes */ 208 171 int disks; /* disks in stripe */ 172 + enum check_states check_state; 173 + enum reconstruct_states reconstruct_state; 209 174 /* stripe_operations 210 - * @pending - pending ops flags (set for request->issue->complete) 211 - * @ack - submitted ops flags (set for issue->complete) 212 - * @complete - completed ops flags (set for complete) 213 175 * @target - STRIPE_OP_COMPUTE_BLK target 214 - * @count - raid5_runs_ops is set to run when this is non-zero 215 176 */ 216 177 struct stripe_operations { 217 - unsigned long pending; 218 - unsigned long ack; 219 - unsigned long complete; 220 178 int target; 221 - int count; 222 179 u32 zero_sum_result; 223 180 } ops; 224 181 struct r5dev { ··· 233 202 int locked, uptodate, to_read, to_write, failed, written; 234 203 int to_fill, compute, req_compute, non_overwrite; 235 204 int failed_num; 205 + unsigned long ops_request; 236 206 }; 237 207 238 208 /* r6_state - extra state data only relevant to r6 */ ··· 260 228 #define R5_Wantfill 12 /* dev->toread contains a bio that needs 261 229 * filling 262 230 */ 263 - #define R5_Wantprexor 13 /* distinguish blocks ready for rmw from 264 - * other "towrites" 265 - */ 231 + #define R5_Wantdrain 13 /* dev->towrite needs to be drained */ 266 232 /* 267 233 * Write method 268 234 */ ··· 284 254 #define STRIPE_EXPAND_READY 11 285 255 #define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ 286 256 #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ 257 + #define STRIPE_BIOFILL_RUN 14 258 + #define STRIPE_COMPUTE_RUN 15 287 259 /* 288 - * Operations flags (in issue order) 260 + * Operation request flags 289 261 */ 290 262 #define STRIPE_OP_BIOFILL 0 291 263 #define STRIPE_OP_COMPUTE_BLK 1 ··· 295 263 #define STRIPE_OP_BIODRAIN 3 296 264 #define STRIPE_OP_POSTXOR 4 297 265 #define STRIPE_OP_CHECK 5 298 - #define STRIPE_OP_IO 6 299 - 300 - /* modifiers to the base operations 301 - * STRIPE_OP_MOD_REPAIR_PD - compute the parity block and write it back 302 - * STRIPE_OP_MOD_DMA_CHECK - parity is not corrupted by the check 303 - */ 304 - #define STRIPE_OP_MOD_REPAIR_PD 7 305 - #define STRIPE_OP_MOD_DMA_CHECK 8 306 266 307 267 /* 308 268 * Plugging: