Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md: use list_for_each_entry macro directly

The rdev_for_each macro defined in <linux/raid/md_k.h> is identical to
list_for_each_entry_safe, from <linux/list.h>, it should be defined to
use list_for_each_entry_safe, instead of reinventing the wheel.

But some calls to each_entry_safe don't really need a safe version,
just a direct list_for_each_entry is enough, this could save a temp
variable (tmp) in every function that used rdev_for_each.

In this patch, most rdev_for_each loops are replaced by list_for_each_entry,
totally save many tmp vars; and only in the other situations that will call
list_del to delete an entry, the safe version is used.

Signed-off-by: Cheng Renquan <crquan@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>

authored by

Cheng Renquan and committed by
NeilBrown
159ec1fc ccacc7d2

+60 -94
+1 -2
drivers/md/bitmap.c
··· 215 215 /* choose a good rdev and read the page from there */ 216 216 217 217 mdk_rdev_t *rdev; 218 - struct list_head *tmp; 219 218 sector_t target; 220 219 221 220 if (!page) ··· 222 223 if (!page) 223 224 return ERR_PTR(-ENOMEM); 224 225 225 - rdev_for_each(rdev, tmp, mddev) { 226 + list_for_each_entry(rdev, &mddev->disks, same_set) { 226 227 if (! test_bit(In_sync, &rdev->flags) 227 228 || test_bit(Faulty, &rdev->flags)) 228 229 continue;
+1 -2
drivers/md/faulty.c
··· 283 283 static int run(mddev_t *mddev) 284 284 { 285 285 mdk_rdev_t *rdev; 286 - struct list_head *tmp; 287 286 int i; 288 287 289 288 conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); ··· 295 296 } 296 297 conf->nfaults = 0; 297 298 298 - rdev_for_each(rdev, tmp, mddev) 299 + list_for_each_entry(rdev, &mddev->disks, same_set) 299 300 conf->rdev = rdev; 300 301 301 302 mddev->array_sectors = mddev->size * 2;
+1 -2
drivers/md/linear.c
··· 105 105 int i, nb_zone, cnt; 106 106 sector_t min_sectors; 107 107 sector_t curr_sector; 108 - struct list_head *tmp; 109 108 110 109 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), 111 110 GFP_KERNEL); ··· 114 115 cnt = 0; 115 116 conf->array_sectors = 0; 116 117 117 - rdev_for_each(rdev, tmp, mddev) { 118 + list_for_each_entry(rdev, &mddev->disks, same_set) { 118 119 int j = rdev->raid_disk; 119 120 dev_info_t *disk = conf->disks + j; 120 121
+43 -64
drivers/md/md.c
··· 307 307 308 308 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 309 309 { 310 - mdk_rdev_t * rdev; 311 - struct list_head *tmp; 310 + mdk_rdev_t *rdev; 312 311 313 - rdev_for_each(rdev, tmp, mddev) { 312 + list_for_each_entry(rdev, &mddev->disks, same_set) 314 313 if (rdev->desc_nr == nr) 315 314 return rdev; 316 - } 315 + 317 316 return NULL; 318 317 } 319 318 320 319 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 321 320 { 322 - struct list_head *tmp; 323 321 mdk_rdev_t *rdev; 324 322 325 - rdev_for_each(rdev, tmp, mddev) { 323 + list_for_each_entry(rdev, &mddev->disks, same_set) 326 324 if (rdev->bdev->bd_dev == dev) 327 325 return rdev; 328 - } 326 + 329 327 return NULL; 330 328 } 331 329 ··· 859 861 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 860 862 { 861 863 mdp_super_t *sb; 862 - struct list_head *tmp; 863 864 mdk_rdev_t *rdev2; 864 865 int next_spare = mddev->raid_disks; 865 866 ··· 930 933 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 931 934 932 935 sb->disks[0].state = (1<<MD_DISK_REMOVED); 933 - rdev_for_each(rdev2, tmp, mddev) { 936 + list_for_each_entry(rdev2, &mddev->disks, same_set) { 934 937 mdp_disk_t *d; 935 938 int desc_nr; 936 939 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) ··· 1256 1259 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1257 1260 { 1258 1261 struct mdp_superblock_1 *sb; 1259 - struct list_head *tmp; 1260 1262 mdk_rdev_t *rdev2; 1261 1263 int max_dev, i; 1262 1264 /* make rdev->sb match mddev and rdev data. */ ··· 1303 1307 } 1304 1308 1305 1309 max_dev = 0; 1306 - rdev_for_each(rdev2, tmp, mddev) 1310 + list_for_each_entry(rdev2, &mddev->disks, same_set) 1307 1311 if (rdev2->desc_nr+1 > max_dev) 1308 1312 max_dev = rdev2->desc_nr+1; 1309 1313 ··· 1312 1316 for (i=0; i<max_dev;i++) 1313 1317 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1314 1318 1315 - rdev_for_each(rdev2, tmp, mddev) { 1319 + list_for_each_entry(rdev2, &mddev->disks, same_set) { 1316 1320 i = rdev2->desc_nr; 1317 1321 if (test_bit(Faulty, &rdev2->flags)) 1318 1322 sb->dev_roles[i] = cpu_to_le16(0xfffe); ··· 1567 1571 1568 1572 static void export_array(mddev_t *mddev) 1569 1573 { 1570 - struct list_head *tmp; 1571 - mdk_rdev_t *rdev; 1574 + mdk_rdev_t *rdev, *tmp; 1572 1575 1573 1576 rdev_for_each(rdev, tmp, mddev) { 1574 1577 if (!rdev->mddev) { ··· 1638 1643 1639 1644 static void md_print_devices(void) 1640 1645 { 1641 - struct list_head *tmp, *tmp2; 1646 + struct list_head *tmp; 1642 1647 mdk_rdev_t *rdev; 1643 1648 mddev_t *mddev; 1644 1649 char b[BDEVNAME_SIZE]; ··· 1653 1658 bitmap_print_sb(mddev->bitmap); 1654 1659 else 1655 1660 printk("%s: ", mdname(mddev)); 1656 - rdev_for_each(rdev, tmp2, mddev) 1661 + list_for_each_entry(rdev, &mddev->disks, same_set) 1657 1662 printk("<%s>", bdevname(rdev->bdev,b)); 1658 1663 printk("\n"); 1659 1664 1660 - rdev_for_each(rdev, tmp2, mddev) 1665 + list_for_each_entry(rdev, &mddev->disks, same_set) 1661 1666 print_rdev(rdev); 1662 1667 } 1663 1668 printk("md: **********************************\n"); ··· 1674 1679 * with the rest of the array) 1675 1680 */ 1676 1681 mdk_rdev_t *rdev; 1677 - struct list_head *tmp; 1678 1682 1679 - rdev_for_each(rdev, tmp, mddev) { 1683 + list_for_each_entry(rdev, &mddev->disks, same_set) { 1680 1684 if (rdev->sb_events == mddev->events || 1681 1685 (nospares && 1682 1686 rdev->raid_disk < 0 && ··· 1693 1699 1694 1700 static void md_update_sb(mddev_t * mddev, int force_change) 1695 1701 { 1696 - struct list_head *tmp; 1697 1702 mdk_rdev_t *rdev; 1698 1703 int sync_req; 1699 1704 int nospares = 0; ··· 1783 1790 mdname(mddev),mddev->in_sync); 1784 1791 1785 1792 bitmap_update_sb(mddev->bitmap); 1786 - rdev_for_each(rdev, tmp, mddev) { 1793 + list_for_each_entry(rdev, &mddev->disks, same_set) { 1787 1794 char b[BDEVNAME_SIZE]; 1788 1795 dprintk(KERN_INFO "md: "); 1789 1796 if (rdev->sb_loaded != 1) ··· 1992 1999 md_wakeup_thread(rdev->mddev->thread); 1993 2000 } else if (rdev->mddev->pers) { 1994 2001 mdk_rdev_t *rdev2; 1995 - struct list_head *tmp; 1996 2002 /* Activating a spare .. or possibly reactivating 1997 2003 * if we every get bitmaps working here. 1998 2004 */ ··· 2002 2010 if (rdev->mddev->pers->hot_add_disk == NULL) 2003 2011 return -EINVAL; 2004 2012 2005 - rdev_for_each(rdev2, tmp, rdev->mddev) 2013 + list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) 2006 2014 if (rdev2->raid_disk == slot) 2007 2015 return -EEXIST; 2008 2016 ··· 2117 2125 */ 2118 2126 mddev_t *mddev; 2119 2127 int overlap = 0; 2120 - struct list_head *tmp, *tmp2; 2128 + struct list_head *tmp; 2121 2129 2122 2130 mddev_unlock(my_mddev); 2123 2131 for_each_mddev(mddev, tmp) { 2124 2132 mdk_rdev_t *rdev2; 2125 2133 2126 2134 mddev_lock(mddev); 2127 - rdev_for_each(rdev2, tmp2, mddev) 2135 + list_for_each_entry(rdev2, &mddev->disks, same_set) 2128 2136 if (test_bit(AllReserved, &rdev2->flags) || 2129 2137 (rdev->bdev == rdev2->bdev && 2130 2138 rdev != rdev2 && ··· 2320 2328 static void analyze_sbs(mddev_t * mddev) 2321 2329 { 2322 2330 int i; 2323 - struct list_head *tmp; 2324 - mdk_rdev_t *rdev, *freshest; 2331 + mdk_rdev_t *rdev, *freshest, *tmp; 2325 2332 char b[BDEVNAME_SIZE]; 2326 2333 2327 2334 freshest = NULL; ··· 3492 3501 { 3493 3502 int err; 3494 3503 int chunk_size; 3495 - struct list_head *tmp; 3496 3504 mdk_rdev_t *rdev; 3497 3505 struct gendisk *disk; 3498 3506 struct mdk_personality *pers; ··· 3530 3540 } 3531 3541 3532 3542 /* devices must have minimum size of one chunk */ 3533 - rdev_for_each(rdev, tmp, mddev) { 3543 + list_for_each_entry(rdev, &mddev->disks, same_set) { 3534 3544 if (test_bit(Faulty, &rdev->flags)) 3535 3545 continue; 3536 3546 if (rdev->size < chunk_size / 1024) { ··· 3555 3565 * the only valid external interface is through the md 3556 3566 * device. 3557 3567 */ 3558 - rdev_for_each(rdev, tmp, mddev) { 3568 + list_for_each_entry(rdev, &mddev->disks, same_set) { 3559 3569 if (test_bit(Faulty, &rdev->flags)) 3560 3570 continue; 3561 3571 sync_blockdev(rdev->bdev); ··· 3620 3630 */ 3621 3631 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 3622 3632 mdk_rdev_t *rdev2; 3623 - struct list_head *tmp2; 3624 3633 int warned = 0; 3625 - rdev_for_each(rdev, tmp, mddev) { 3626 - rdev_for_each(rdev2, tmp2, mddev) { 3634 + 3635 + list_for_each_entry(rdev, &mddev->disks, same_set) 3636 + list_for_each_entry(rdev2, &mddev->disks, same_set) { 3627 3637 if (rdev < rdev2 && 3628 3638 rdev->bdev->bd_contains == 3629 3639 rdev2->bdev->bd_contains) { ··· 3637 3647 warned = 1; 3638 3648 } 3639 3649 } 3640 - } 3650 + 3641 3651 if (warned) 3642 3652 printk(KERN_WARNING 3643 3653 "True protection against single-disk" ··· 3685 3695 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 3686 3696 mddev->in_sync = 1; 3687 3697 3688 - rdev_for_each(rdev, tmp, mddev) 3698 + list_for_each_entry(rdev, &mddev->disks, same_set) 3689 3699 if (rdev->raid_disk >= 0) { 3690 3700 char nm[20]; 3691 3701 sprintf(nm, "rd%d", rdev->raid_disk); ··· 3716 3726 * it will remove the drives and not do the right thing 3717 3727 */ 3718 3728 if (mddev->degraded && !mddev->sync_thread) { 3719 - struct list_head *rtmp; 3720 3729 int spares = 0; 3721 - rdev_for_each(rdev, rtmp, mddev) 3730 + list_for_each_entry(rdev, &mddev->disks, same_set) 3722 3731 if (rdev->raid_disk >= 0 && 3723 3732 !test_bit(In_sync, &rdev->flags) && 3724 3733 !test_bit(Faulty, &rdev->flags)) ··· 3877 3888 */ 3878 3889 if (mode == 0) { 3879 3890 mdk_rdev_t *rdev; 3880 - struct list_head *tmp; 3881 3891 3882 3892 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 3883 3893 ··· 3888 3900 } 3889 3901 mddev->bitmap_offset = 0; 3890 3902 3891 - rdev_for_each(rdev, tmp, mddev) 3903 + list_for_each_entry(rdev, &mddev->disks, same_set) 3892 3904 if (rdev->raid_disk >= 0) { 3893 3905 char nm[20]; 3894 3906 sprintf(nm, "rd%d", rdev->raid_disk); ··· 3949 3961 static void autorun_array(mddev_t *mddev) 3950 3962 { 3951 3963 mdk_rdev_t *rdev; 3952 - struct list_head *tmp; 3953 3964 int err; 3954 3965 3955 3966 if (list_empty(&mddev->disks)) ··· 3956 3969 3957 3970 printk(KERN_INFO "md: running: "); 3958 3971 3959 - rdev_for_each(rdev, tmp, mddev) { 3972 + list_for_each_entry(rdev, &mddev->disks, same_set) { 3960 3973 char b[BDEVNAME_SIZE]; 3961 3974 printk("<%s>", bdevname(rdev->bdev,b)); 3962 3975 } ··· 3983 3996 */ 3984 3997 static void autorun_devices(int part) 3985 3998 { 3986 - struct list_head *tmp; 3987 - mdk_rdev_t *rdev0, *rdev; 3999 + mdk_rdev_t *rdev0, *rdev, *tmp; 3988 4000 mddev_t *mddev; 3989 4001 char b[BDEVNAME_SIZE]; 3990 4002 ··· 3998 4012 printk(KERN_INFO "md: considering %s ...\n", 3999 4013 bdevname(rdev0->bdev,b)); 4000 4014 INIT_LIST_HEAD(&candidates); 4001 - rdev_for_each_list(rdev, tmp, pending_raid_disks) 4015 + rdev_for_each_list(rdev, tmp, &pending_raid_disks) 4002 4016 if (super_90_load(rdev, rdev0, 0) >= 0) { 4003 4017 printk(KERN_INFO "md: adding %s ...\n", 4004 4018 bdevname(rdev->bdev,b)); ··· 4044 4058 } else { 4045 4059 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 4046 4060 mddev->persistent = 1; 4047 - rdev_for_each_list(rdev, tmp, candidates) { 4061 + rdev_for_each_list(rdev, tmp, &candidates) { 4048 4062 list_del_init(&rdev->same_set); 4049 4063 if (bind_rdev_to_array(rdev, mddev)) 4050 4064 export_rdev(rdev); ··· 4055 4069 /* on success, candidates will be empty, on error 4056 4070 * it won't... 4057 4071 */ 4058 - rdev_for_each_list(rdev, tmp, candidates) { 4072 + rdev_for_each_list(rdev, tmp, &candidates) { 4059 4073 list_del_init(&rdev->same_set); 4060 4074 export_rdev(rdev); 4061 4075 } ··· 4084 4098 mdu_array_info_t info; 4085 4099 int nr,working,active,failed,spare; 4086 4100 mdk_rdev_t *rdev; 4087 - struct list_head *tmp; 4088 4101 4089 4102 nr=working=active=failed=spare=0; 4090 - rdev_for_each(rdev, tmp, mddev) { 4103 + list_for_each_entry(rdev, &mddev->disks, same_set) { 4091 4104 nr++; 4092 4105 if (test_bit(Faulty, &rdev->flags)) 4093 4106 failed++; ··· 4604 4619 4605 4620 static int update_size(mddev_t *mddev, sector_t num_sectors) 4606 4621 { 4607 - mdk_rdev_t * rdev; 4622 + mdk_rdev_t *rdev; 4608 4623 int rv; 4609 - struct list_head *tmp; 4610 4624 int fit = (num_sectors == 0); 4611 4625 4612 4626 if (mddev->pers->resize == NULL) ··· 4627 4643 * grow, and re-add. 4628 4644 */ 4629 4645 return -EBUSY; 4630 - rdev_for_each(rdev, tmp, mddev) { 4646 + list_for_each_entry(rdev, &mddev->disks, same_set) { 4631 4647 sector_t avail; 4632 4648 avail = rdev->size * 2; 4633 4649 ··· 5176 5192 { 5177 5193 int i = 0; 5178 5194 mdk_rdev_t *rdev; 5179 - struct list_head *tmp; 5180 5195 5181 5196 seq_printf(seq, "unused devices: "); 5182 5197 5183 - rdev_for_each_list(rdev, tmp, pending_raid_disks) { 5198 + list_for_each_entry(rdev, &pending_raid_disks, same_set) { 5184 5199 char b[BDEVNAME_SIZE]; 5185 5200 i++; 5186 5201 seq_printf(seq, "%s ", ··· 5338 5355 { 5339 5356 mddev_t *mddev = v; 5340 5357 sector_t size; 5341 - struct list_head *tmp2; 5342 5358 mdk_rdev_t *rdev; 5343 5359 struct mdstat_info *mi = seq->private; 5344 5360 struct bitmap *bitmap; ··· 5374 5392 } 5375 5393 5376 5394 size = 0; 5377 - rdev_for_each(rdev, tmp2, mddev) { 5395 + list_for_each_entry(rdev, &mddev->disks, same_set) { 5378 5396 char b[BDEVNAME_SIZE]; 5379 5397 seq_printf(seq, " %s[%d]", 5380 5398 bdevname(rdev->bdev,b), rdev->desc_nr); ··· 5681 5699 struct list_head *tmp; 5682 5700 sector_t last_check; 5683 5701 int skipped = 0; 5684 - struct list_head *rtmp; 5685 5702 mdk_rdev_t *rdev; 5686 5703 char *desc; 5687 5704 ··· 5785 5804 /* recovery follows the physical size of devices */ 5786 5805 max_sectors = mddev->size << 1; 5787 5806 j = MaxSector; 5788 - rdev_for_each(rdev, rtmp, mddev) 5807 + list_for_each_entry(rdev, &mddev->disks, same_set) 5789 5808 if (rdev->raid_disk >= 0 && 5790 5809 !test_bit(Faulty, &rdev->flags) && 5791 5810 !test_bit(In_sync, &rdev->flags) && ··· 5935 5954 } else { 5936 5955 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5937 5956 mddev->curr_resync = MaxSector; 5938 - rdev_for_each(rdev, rtmp, mddev) 5957 + list_for_each_entry(rdev, &mddev->disks, same_set) 5939 5958 if (rdev->raid_disk >= 0 && 5940 5959 !test_bit(Faulty, &rdev->flags) && 5941 5960 !test_bit(In_sync, &rdev->flags) && ··· 5971 5990 static int remove_and_add_spares(mddev_t *mddev) 5972 5991 { 5973 5992 mdk_rdev_t *rdev; 5974 - struct list_head *rtmp; 5975 5993 int spares = 0; 5976 5994 5977 - rdev_for_each(rdev, rtmp, mddev) 5995 + list_for_each_entry(rdev, &mddev->disks, same_set) 5978 5996 if (rdev->raid_disk >= 0 && 5979 5997 !test_bit(Blocked, &rdev->flags) && 5980 5998 (test_bit(Faulty, &rdev->flags) || ··· 5989 6009 } 5990 6010 5991 6011 if (mddev->degraded && ! mddev->ro) { 5992 - rdev_for_each(rdev, rtmp, mddev) { 6012 + list_for_each_entry(rdev, &mddev->disks, same_set) { 5993 6013 if (rdev->raid_disk >= 0 && 5994 6014 !test_bit(In_sync, &rdev->flags) && 5995 6015 !test_bit(Blocked, &rdev->flags)) ··· 6041 6061 void md_check_recovery(mddev_t *mddev) 6042 6062 { 6043 6063 mdk_rdev_t *rdev; 6044 - struct list_head *rtmp; 6045 6064 6046 6065 6047 6066 if (mddev->bitmap) ··· 6104 6125 if (mddev->flags) 6105 6126 md_update_sb(mddev, 0); 6106 6127 6107 - rdev_for_each(rdev, rtmp, mddev) 6128 + list_for_each_entry(rdev, &mddev->disks, same_set) 6108 6129 if (test_and_clear_bit(StateChanged, &rdev->flags)) 6109 6130 sysfs_notify_dirent(rdev->sysfs_state); 6110 6131 ··· 6133 6154 * information must be scrapped 6134 6155 */ 6135 6156 if (!mddev->degraded) 6136 - rdev_for_each(rdev, rtmp, mddev) 6157 + list_for_each_entry(rdev, &mddev->disks, same_set) 6137 6158 rdev->saved_raid_disk = -1; 6138 6159 6139 6160 mddev->recovery = 0;
+1 -2
drivers/md/multipath.c
··· 408 408 int disk_idx; 409 409 struct multipath_info *disk; 410 410 mdk_rdev_t *rdev; 411 - struct list_head *tmp; 412 411 413 412 if (mddev->level != LEVEL_MULTIPATH) { 414 413 printk("multipath: %s: raid level not set to multipath IO (%d)\n", ··· 440 441 } 441 442 442 443 conf->working_disks = 0; 443 - rdev_for_each(rdev, tmp, mddev) { 444 + list_for_each_entry(rdev, &mddev->disks, same_set) { 444 445 disk_idx = rdev->raid_disk; 445 446 if (disk_idx < 0 || 446 447 disk_idx >= mddev->raid_disks)
+4 -6
drivers/md/raid0.c
··· 57 57 sector_t min_spacing; 58 58 raid0_conf_t *conf = mddev_to_conf(mddev); 59 59 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; 60 - struct list_head *tmp1, *tmp2; 61 60 struct strip_zone *zone; 62 61 int cnt; 63 62 char b[BDEVNAME_SIZE]; ··· 66 67 */ 67 68 conf->nr_strip_zones = 0; 68 69 69 - rdev_for_each(rdev1, tmp1, mddev) { 70 + list_for_each_entry(rdev1, &mddev->disks, same_set) { 70 71 printk(KERN_INFO "raid0: looking at %s\n", 71 72 bdevname(rdev1->bdev,b)); 72 73 c = 0; 73 - rdev_for_each(rdev2, tmp2, mddev) { 74 + list_for_each_entry(rdev2, &mddev->disks, same_set) { 74 75 printk(KERN_INFO "raid0: comparing %s(%llu)", 75 76 bdevname(rdev1->bdev,b), 76 77 (unsigned long long)rdev1->size); ··· 119 120 cnt = 0; 120 121 smallest = NULL; 121 122 zone->dev = conf->devlist; 122 - rdev_for_each(rdev1, tmp1, mddev) { 123 + list_for_each_entry(rdev1, &mddev->disks, same_set) { 123 124 int j = rdev1->raid_disk; 124 125 125 126 if (j < 0 || j >= mddev->raid_disks) { ··· 267 268 s64 sectors; 268 269 raid0_conf_t *conf; 269 270 mdk_rdev_t *rdev; 270 - struct list_head *tmp; 271 271 272 272 if (mddev->chunk_size == 0) { 273 273 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); ··· 292 294 293 295 /* calculate array device size */ 294 296 mddev->array_sectors = 0; 295 - rdev_for_each(rdev, tmp, mddev) 297 + list_for_each_entry(rdev, &mddev->disks, same_set) 296 298 mddev->array_sectors += rdev->size * 2; 297 299 298 300 printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
+1 -2
drivers/md/raid1.c
··· 1919 1919 int i, j, disk_idx; 1920 1920 mirror_info_t *disk; 1921 1921 mdk_rdev_t *rdev; 1922 - struct list_head *tmp; 1923 1922 1924 1923 if (mddev->level != 1) { 1925 1924 printk("raid1: %s: raid level not set to mirroring (%d)\n", ··· 1963 1964 spin_lock_init(&conf->device_lock); 1964 1965 mddev->queue->queue_lock = &conf->device_lock; 1965 1966 1966 - rdev_for_each(rdev, tmp, mddev) { 1967 + list_for_each_entry(rdev, &mddev->disks, same_set) { 1967 1968 disk_idx = rdev->raid_disk; 1968 1969 if (disk_idx >= mddev->raid_disks 1969 1970 || disk_idx < 0)
+1 -2
drivers/md/raid10.c
··· 2025 2025 int i, disk_idx; 2026 2026 mirror_info_t *disk; 2027 2027 mdk_rdev_t *rdev; 2028 - struct list_head *tmp; 2029 2028 int nc, fc, fo; 2030 2029 sector_t stride, size; 2031 2030 ··· 2107 2108 spin_lock_init(&conf->device_lock); 2108 2109 mddev->queue->queue_lock = &conf->device_lock; 2109 2110 2110 - rdev_for_each(rdev, tmp, mddev) { 2111 + list_for_each_entry(rdev, &mddev->disks, same_set) { 2111 2112 disk_idx = rdev->raid_disk; 2112 2113 if (disk_idx >= mddev->raid_disks 2113 2114 || disk_idx < 0)
+3 -5
drivers/md/raid5.c
··· 3998 3998 int raid_disk, memory; 3999 3999 mdk_rdev_t *rdev; 4000 4000 struct disk_info *disk; 4001 - struct list_head *tmp; 4002 4001 int working_disks = 0; 4003 4002 4004 4003 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { ··· 4107 4108 4108 4109 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4109 4110 4110 - rdev_for_each(rdev, tmp, mddev) { 4111 + list_for_each_entry(rdev, &mddev->disks, same_set) { 4111 4112 raid_disk = rdev->raid_disk; 4112 4113 if (raid_disk >= conf->raid_disks 4113 4114 || raid_disk < 0) ··· 4532 4533 { 4533 4534 raid5_conf_t *conf = mddev_to_conf(mddev); 4534 4535 mdk_rdev_t *rdev; 4535 - struct list_head *rtmp; 4536 4536 int spares = 0; 4537 4537 int added_devices = 0; 4538 4538 unsigned long flags; ··· 4539 4541 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4540 4542 return -EBUSY; 4541 4543 4542 - rdev_for_each(rdev, rtmp, mddev) 4544 + list_for_each_entry(rdev, &mddev->disks, same_set) 4543 4545 if (rdev->raid_disk < 0 && 4544 4546 !test_bit(Faulty, &rdev->flags)) 4545 4547 spares++; ··· 4561 4563 /* Add some new drives, as many as will fit. 4562 4564 * We know there are enough to make the newly sized array work. 4563 4565 */ 4564 - rdev_for_each(rdev, rtmp, mddev) 4566 + list_for_each_entry(rdev, &mddev->disks, same_set) 4565 4567 if (rdev->raid_disk < 0 && 4566 4568 !test_bit(Faulty, &rdev->flags)) { 4567 4569 if (raid5_add_disk(mddev, rdev) == 0) {
+4 -7
include/linux/raid/md_k.h
··· 335 335 * iterates through some rdev ringlist. It's safe to remove the 336 336 * current 'rdev'. Dont touch 'tmp' though. 337 337 */ 338 - #define rdev_for_each_list(rdev, tmp, list) \ 339 - \ 340 - for ((tmp) = (list).next; \ 341 - (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \ 342 - (tmp) = (tmp)->next, (tmp)->prev != &(list) \ 343 - ; ) 338 + #define rdev_for_each_list(rdev, tmp, head) \ 339 + list_for_each_entry_safe(rdev, tmp, head, same_set) 340 + 344 341 /* 345 342 * iterates through the 'same array disks' ringlist 346 343 */ 347 344 #define rdev_for_each(rdev, tmp, mddev) \ 348 - rdev_for_each_list(rdev, tmp, (mddev)->disks) 345 + list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) 349 346 350 347 #define rdev_for_each_rcu(rdev, mddev) \ 351 348 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)