Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md: remove typedefs: mddev_t -> struct mddev

Having mddev_t and 'struct mddev_s' is ugly and not preferred

Signed-off-by: NeilBrown <neilb@suse.de>

NeilBrown fd01b88c 3cb03002

+416 -419
+22 -22
drivers/md/bitmap.c
··· 179 179 */ 180 180 181 181 /* IO operations when bitmap is stored near all superblocks */ 182 - static struct page *read_sb_page(mddev_t *mddev, loff_t offset, 182 + static struct page *read_sb_page(struct mddev *mddev, loff_t offset, 183 183 struct page *page, 184 184 unsigned long index, int size) 185 185 { ··· 218 218 219 219 } 220 220 221 - static struct md_rdev *next_active_rdev(struct md_rdev *rdev, mddev_t *mddev) 221 + static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) 222 222 { 223 223 /* Iterate the disks of an mddev, using rcu to protect access to the 224 224 * linked list, and raising the refcount of devices we return to ensure ··· 256 256 { 257 257 struct md_rdev *rdev = NULL; 258 258 struct block_device *bdev; 259 - mddev_t *mddev = bitmap->mddev; 259 + struct mddev *mddev = bitmap->mddev; 260 260 261 261 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { 262 262 int size = PAGE_SIZE; ··· 1157 1157 * out to disk 1158 1158 */ 1159 1159 1160 - void bitmap_daemon_work(mddev_t *mddev) 1160 + void bitmap_daemon_work(struct mddev *mddev) 1161 1161 { 1162 1162 struct bitmap *bitmap; 1163 1163 unsigned long j; ··· 1647 1647 /* 1648 1648 * flush out any pending updates 1649 1649 */ 1650 - void bitmap_flush(mddev_t *mddev) 1650 + void bitmap_flush(struct mddev *mddev) 1651 1651 { 1652 1652 struct bitmap *bitmap = mddev->bitmap; 1653 1653 long sleep; ··· 1695 1695 kfree(bitmap); 1696 1696 } 1697 1697 1698 - void bitmap_destroy(mddev_t *mddev) 1698 + void bitmap_destroy(struct mddev *mddev) 1699 1699 { 1700 1700 struct bitmap *bitmap = mddev->bitmap; 1701 1701 ··· 1718 1718 * initialize the bitmap structure 1719 1719 * if this returns an error, bitmap_destroy must be called to do clean up 1720 1720 */ 1721 - int bitmap_create(mddev_t *mddev) 1721 + int bitmap_create(struct mddev *mddev) 1722 1722 { 1723 1723 struct bitmap *bitmap; 1724 1724 sector_t blocks = mddev->resync_max_sectors; ··· 1822 1822 return err; 1823 1823 } 1824 1824 1825 - int bitmap_load(mddev_t *mddev) 1825 + int bitmap_load(struct mddev *mddev) 1826 1826 { 1827 1827 int err = 0; 1828 1828 sector_t start = 0; ··· 1868 1868 EXPORT_SYMBOL_GPL(bitmap_load); 1869 1869 1870 1870 static ssize_t 1871 - location_show(mddev_t *mddev, char *page) 1871 + location_show(struct mddev *mddev, char *page) 1872 1872 { 1873 1873 ssize_t len; 1874 1874 if (mddev->bitmap_info.file) ··· 1882 1882 } 1883 1883 1884 1884 static ssize_t 1885 - location_store(mddev_t *mddev, const char *buf, size_t len) 1885 + location_store(struct mddev *mddev, const char *buf, size_t len) 1886 1886 { 1887 1887 1888 1888 if (mddev->pers) { ··· 1959 1959 __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); 1960 1960 1961 1961 static ssize_t 1962 - timeout_show(mddev_t *mddev, char *page) 1962 + timeout_show(struct mddev *mddev, char *page) 1963 1963 { 1964 1964 ssize_t len; 1965 1965 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; ··· 1973 1973 } 1974 1974 1975 1975 static ssize_t 1976 - timeout_store(mddev_t *mddev, const char *buf, size_t len) 1976 + timeout_store(struct mddev *mddev, const char *buf, size_t len) 1977 1977 { 1978 1978 /* timeout can be set at any time */ 1979 1979 unsigned long timeout; ··· 2009 2009 __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); 2010 2010 2011 2011 static ssize_t 2012 - backlog_show(mddev_t *mddev, char *page) 2012 + backlog_show(struct mddev *mddev, char *page) 2013 2013 { 2014 2014 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); 2015 2015 } 2016 2016 2017 2017 static ssize_t 2018 - backlog_store(mddev_t *mddev, const char *buf, size_t len) 2018 + backlog_store(struct mddev *mddev, const char *buf, size_t len) 2019 2019 { 2020 2020 unsigned long backlog; 2021 2021 int rv = strict_strtoul(buf, 10, &backlog); ··· 2031 2031 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); 2032 2032 2033 2033 static ssize_t 2034 - chunksize_show(mddev_t *mddev, char *page) 2034 + chunksize_show(struct mddev *mddev, char *page) 2035 2035 { 2036 2036 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); 2037 2037 } 2038 2038 2039 2039 static ssize_t 2040 - chunksize_store(mddev_t *mddev, const char *buf, size_t len) 2040 + chunksize_store(struct mddev *mddev, const char *buf, size_t len) 2041 2041 { 2042 2042 /* Can only be changed when no bitmap is active */ 2043 2043 int rv; ··· 2057 2057 static struct md_sysfs_entry bitmap_chunksize = 2058 2058 __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); 2059 2059 2060 - static ssize_t metadata_show(mddev_t *mddev, char *page) 2060 + static ssize_t metadata_show(struct mddev *mddev, char *page) 2061 2061 { 2062 2062 return sprintf(page, "%s\n", (mddev->bitmap_info.external 2063 2063 ? "external" : "internal")); 2064 2064 } 2065 2065 2066 - static ssize_t metadata_store(mddev_t *mddev, const char *buf, size_t len) 2066 + static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len) 2067 2067 { 2068 2068 if (mddev->bitmap || 2069 2069 mddev->bitmap_info.file || ··· 2081 2081 static struct md_sysfs_entry bitmap_metadata = 2082 2082 __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 2083 2083 2084 - static ssize_t can_clear_show(mddev_t *mddev, char *page) 2084 + static ssize_t can_clear_show(struct mddev *mddev, char *page) 2085 2085 { 2086 2086 int len; 2087 2087 if (mddev->bitmap) ··· 2092 2092 return len; 2093 2093 } 2094 2094 2095 - static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len) 2095 + static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len) 2096 2096 { 2097 2097 if (mddev->bitmap == NULL) 2098 2098 return -ENOENT; ··· 2111 2111 __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); 2112 2112 2113 2113 static ssize_t 2114 - behind_writes_used_show(mddev_t *mddev, char *page) 2114 + behind_writes_used_show(struct mddev *mddev, char *page) 2115 2115 { 2116 2116 if (mddev->bitmap == NULL) 2117 2117 return sprintf(page, "0\n"); ··· 2120 2120 } 2121 2121 2122 2122 static ssize_t 2123 - behind_writes_used_reset(mddev_t *mddev, const char *buf, size_t len) 2123 + behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len) 2124 2124 { 2125 2125 if (mddev->bitmap) 2126 2126 mddev->bitmap->behind_writes_used = 0;
+6 -6
drivers/md/bitmap.h
··· 193 193 unsigned long pages; /* total number of pages in the bitmap */ 194 194 unsigned long missing_pages; /* number of pages not yet allocated */ 195 195 196 - mddev_t *mddev; /* the md device that the bitmap is for */ 196 + struct mddev *mddev; /* the md device that the bitmap is for */ 197 197 198 198 /* bitmap chunksize -- how much data does each bit represent? */ 199 199 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ ··· 238 238 /* the bitmap API */ 239 239 240 240 /* these are used only by md/bitmap */ 241 - int bitmap_create(mddev_t *mddev); 242 - int bitmap_load(mddev_t *mddev); 243 - void bitmap_flush(mddev_t *mddev); 244 - void bitmap_destroy(mddev_t *mddev); 241 + int bitmap_create(struct mddev *mddev); 242 + int bitmap_load(struct mddev *mddev); 243 + void bitmap_flush(struct mddev *mddev); 244 + void bitmap_destroy(struct mddev *mddev); 245 245 246 246 void bitmap_print_sb(struct bitmap *bitmap); 247 247 void bitmap_update_sb(struct bitmap *bitmap); ··· 262 262 void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); 263 263 264 264 void bitmap_unplug(struct bitmap *bitmap); 265 - void bitmap_daemon_work(mddev_t *mddev); 265 + void bitmap_daemon_work(struct mddev *mddev); 266 266 #endif 267 267 268 268 #endif
+6 -6
drivers/md/dm-raid.c
··· 57 57 58 58 uint64_t print_flags; 59 59 60 - struct mddev_s md; 60 + struct mddev md; 61 61 struct raid_type *raid_type; 62 62 struct dm_target_callbacks callbacks; 63 63 ··· 611 611 return 0; 612 612 } 613 613 614 - static void super_sync(mddev_t *mddev, struct md_rdev *rdev) 614 + static void super_sync(struct mddev *mddev, struct md_rdev *rdev) 615 615 { 616 616 struct md_rdev *r, *t; 617 617 uint64_t failed_devices; ··· 689 689 return (events_sb > events_refsb) ? 1 : 0; 690 690 } 691 691 692 - static int super_init_validation(mddev_t *mddev, struct md_rdev *rdev) 692 + static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev) 693 693 { 694 694 int role; 695 695 struct raid_set *rs = container_of(mddev, struct raid_set, md); ··· 809 809 return 0; 810 810 } 811 811 812 - static int super_validate(mddev_t *mddev, struct md_rdev *rdev) 812 + static int super_validate(struct mddev *mddev, struct md_rdev *rdev) 813 813 { 814 814 struct dm_raid_superblock *sb = page_address(rdev->sb_page); 815 815 ··· 850 850 { 851 851 int ret; 852 852 struct md_rdev *rdev, *freshest, *tmp; 853 - mddev_t *mddev = &rs->md; 853 + struct mddev *mddev = &rs->md; 854 854 855 855 freshest = NULL; 856 856 rdev_for_each(rdev, tmp, mddev) { ··· 1004 1004 static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) 1005 1005 { 1006 1006 struct raid_set *rs = ti->private; 1007 - mddev_t *mddev = &rs->md; 1007 + struct mddev *mddev = &rs->md; 1008 1008 1009 1009 mddev->pers->make_request(mddev, bio); 1010 1010
+6 -6
drivers/md/faulty.c
··· 169 169 conf->nfaults = n+1; 170 170 } 171 171 172 - static int make_request(mddev_t *mddev, struct bio *bio) 172 + static int make_request(struct mddev *mddev, struct bio *bio) 173 173 { 174 174 conf_t *conf = mddev->private; 175 175 int failit = 0; ··· 222 222 } 223 223 } 224 224 225 - static void status(struct seq_file *seq, mddev_t *mddev) 225 + static void status(struct seq_file *seq, struct mddev *mddev) 226 226 { 227 227 conf_t *conf = mddev->private; 228 228 int n; ··· 255 255 } 256 256 257 257 258 - static int reshape(mddev_t *mddev) 258 + static int reshape(struct mddev *mddev) 259 259 { 260 260 int mode = mddev->new_layout & ModeMask; 261 261 int count = mddev->new_layout >> ModeShift; ··· 284 284 return 0; 285 285 } 286 286 287 - static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks) 287 + static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) 288 288 { 289 289 WARN_ONCE(raid_disks, 290 290 "%s does not support generic reshape\n", __func__); ··· 295 295 return sectors; 296 296 } 297 297 298 - static int run(mddev_t *mddev) 298 + static int run(struct mddev *mddev) 299 299 { 300 300 struct md_rdev *rdev; 301 301 int i; ··· 325 325 return 0; 326 326 } 327 327 328 - static int stop(mddev_t *mddev) 328 + static int stop(struct mddev *mddev) 329 329 { 330 330 conf_t *conf = mddev->private; 331 331
+10 -10
drivers/md/linear.c
··· 26 26 /* 27 27 * find which device holds a particular offset 28 28 */ 29 - static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) 29 + static inline dev_info_t *which_dev(struct mddev *mddev, sector_t sector) 30 30 { 31 31 int lo, mid, hi; 32 32 linear_conf_t *conf; ··· 63 63 struct bvec_merge_data *bvm, 64 64 struct bio_vec *biovec) 65 65 { 66 - mddev_t *mddev = q->queuedata; 66 + struct mddev *mddev = q->queuedata; 67 67 dev_info_t *dev0; 68 68 unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; 69 69 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); ··· 89 89 90 90 static int linear_congested(void *data, int bits) 91 91 { 92 - mddev_t *mddev = data; 92 + struct mddev *mddev = data; 93 93 linear_conf_t *conf; 94 94 int i, ret = 0; 95 95 ··· 108 108 return ret; 109 109 } 110 110 111 - static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) 111 + static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) 112 112 { 113 113 linear_conf_t *conf; 114 114 sector_t array_sectors; ··· 123 123 return array_sectors; 124 124 } 125 125 126 - static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) 126 + static linear_conf_t *linear_conf(struct mddev *mddev, int raid_disks) 127 127 { 128 128 linear_conf_t *conf; 129 129 struct md_rdev *rdev; ··· 194 194 return NULL; 195 195 } 196 196 197 - static int linear_run (mddev_t *mddev) 197 + static int linear_run (struct mddev *mddev) 198 198 { 199 199 linear_conf_t *conf; 200 200 ··· 213 213 return md_integrity_register(mddev); 214 214 } 215 215 216 - static int linear_add(mddev_t *mddev, struct md_rdev *rdev) 216 + static int linear_add(struct mddev *mddev, struct md_rdev *rdev) 217 217 { 218 218 /* Adding a drive to a linear array allows the array to grow. 219 219 * It is permitted if the new drive has a matching superblock ··· 245 245 return 0; 246 246 } 247 247 248 - static int linear_stop (mddev_t *mddev) 248 + static int linear_stop (struct mddev *mddev) 249 249 { 250 250 linear_conf_t *conf = mddev->private; 251 251 ··· 264 264 return 0; 265 265 } 266 266 267 - static int linear_make_request (mddev_t *mddev, struct bio *bio) 267 + static int linear_make_request (struct mddev *mddev, struct bio *bio) 268 268 { 269 269 dev_info_t *tmp_dev; 270 270 sector_t start_sector; ··· 323 323 return 1; 324 324 } 325 325 326 - static void linear_status (struct seq_file *seq, mddev_t *mddev) 326 + static void linear_status (struct seq_file *seq, struct mddev *mddev) 327 327 { 328 328 329 329 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
+180 -180
drivers/md/md.c
··· 95 95 96 96 static int sysctl_speed_limit_min = 1000; 97 97 static int sysctl_speed_limit_max = 200000; 98 - static inline int speed_min(mddev_t *mddev) 98 + static inline int speed_min(struct mddev *mddev) 99 99 { 100 100 return mddev->sync_speed_min ? 101 101 mddev->sync_speed_min : sysctl_speed_limit_min; 102 102 } 103 103 104 - static inline int speed_max(mddev_t *mddev) 104 + static inline int speed_max(struct mddev *mddev) 105 105 { 106 106 return mddev->sync_speed_max ? 107 107 mddev->sync_speed_max : sysctl_speed_limit_max; ··· 157 157 158 158 static void mddev_bio_destructor(struct bio *bio) 159 159 { 160 - mddev_t *mddev, **mddevp; 160 + struct mddev *mddev, **mddevp; 161 161 162 162 mddevp = (void*)bio; 163 163 mddev = mddevp[-1]; ··· 166 166 } 167 167 168 168 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 169 - mddev_t *mddev) 169 + struct mddev *mddev) 170 170 { 171 171 struct bio *b; 172 - mddev_t **mddevp; 172 + struct mddev **mddevp; 173 173 174 174 if (!mddev || !mddev->bio_set) 175 175 return bio_alloc(gfp_mask, nr_iovecs); ··· 186 186 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 187 187 188 188 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 189 - mddev_t *mddev) 189 + struct mddev *mddev) 190 190 { 191 191 struct bio *b; 192 - mddev_t **mddevp; 192 + struct mddev **mddevp; 193 193 194 194 if (!mddev || !mddev->bio_set) 195 195 return bio_clone(bio, gfp_mask); ··· 278 278 */ 279 279 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 280 280 static atomic_t md_event_count; 281 - void md_new_event(mddev_t *mddev) 281 + void md_new_event(struct mddev *mddev) 282 282 { 283 283 atomic_inc(&md_event_count); 284 284 wake_up(&md_event_waiters); ··· 288 288 /* Alternate version that can be called from interrupts 289 289 * when calling sysfs_notify isn't needed. 290 290 */ 291 - static void md_new_event_inintr(mddev_t *mddev) 291 + static void md_new_event_inintr(struct mddev *mddev) 292 292 { 293 293 atomic_inc(&md_event_count); 294 294 wake_up(&md_event_waiters); ··· 309 309 * Any code which breaks out of this loop while own 310 310 * a reference to the current mddev and must mddev_put it. 311 311 */ 312 - #define for_each_mddev(mddev,tmp) \ 312 + #define for_each_mddev(_mddev,_tmp) \ 313 313 \ 314 314 for (({ spin_lock(&all_mddevs_lock); \ 315 - tmp = all_mddevs.next; \ 316 - mddev = NULL;}); \ 317 - ({ if (tmp != &all_mddevs) \ 318 - mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 315 + _tmp = all_mddevs.next; \ 316 + _mddev = NULL;}); \ 317 + ({ if (_tmp != &all_mddevs) \ 318 + mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 319 319 spin_unlock(&all_mddevs_lock); \ 320 - if (mddev) mddev_put(mddev); \ 321 - mddev = list_entry(tmp, mddev_t, all_mddevs); \ 322 - tmp != &all_mddevs;}); \ 320 + if (_mddev) mddev_put(_mddev); \ 321 + _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 322 + _tmp != &all_mddevs;}); \ 323 323 ({ spin_lock(&all_mddevs_lock); \ 324 - tmp = tmp->next;}) \ 324 + _tmp = _tmp->next;}) \ 325 325 ) 326 326 327 327 ··· 335 335 static int md_make_request(struct request_queue *q, struct bio *bio) 336 336 { 337 337 const int rw = bio_data_dir(bio); 338 - mddev_t *mddev = q->queuedata; 338 + struct mddev *mddev = q->queuedata; 339 339 int rv; 340 340 int cpu; 341 341 unsigned int sectors; ··· 387 387 * Once ->stop is called and completes, the module will be completely 388 388 * unused. 389 389 */ 390 - void mddev_suspend(mddev_t *mddev) 390 + void mddev_suspend(struct mddev *mddev) 391 391 { 392 392 BUG_ON(mddev->suspended); 393 393 mddev->suspended = 1; ··· 397 397 } 398 398 EXPORT_SYMBOL_GPL(mddev_suspend); 399 399 400 - void mddev_resume(mddev_t *mddev) 400 + void mddev_resume(struct mddev *mddev) 401 401 { 402 402 mddev->suspended = 0; 403 403 wake_up(&mddev->sb_wait); ··· 408 408 } 409 409 EXPORT_SYMBOL_GPL(mddev_resume); 410 410 411 - int mddev_congested(mddev_t *mddev, int bits) 411 + int mddev_congested(struct mddev *mddev, int bits) 412 412 { 413 413 return mddev->suspended; 414 414 } ··· 421 421 static void md_end_flush(struct bio *bio, int err) 422 422 { 423 423 struct md_rdev *rdev = bio->bi_private; 424 - mddev_t *mddev = rdev->mddev; 424 + struct mddev *mddev = rdev->mddev; 425 425 426 426 rdev_dec_pending(rdev, mddev); 427 427 ··· 436 436 437 437 static void submit_flushes(struct work_struct *ws) 438 438 { 439 - mddev_t *mddev = container_of(ws, mddev_t, flush_work); 439 + struct mddev *mddev = container_of(ws, struct mddev, flush_work); 440 440 struct md_rdev *rdev; 441 441 442 442 INIT_WORK(&mddev->flush_work, md_submit_flush_data); ··· 469 469 470 470 static void md_submit_flush_data(struct work_struct *ws) 471 471 { 472 - mddev_t *mddev = container_of(ws, mddev_t, flush_work); 472 + struct mddev *mddev = container_of(ws, struct mddev, flush_work); 473 473 struct bio *bio = mddev->flush_bio; 474 474 475 475 if (bio->bi_size == 0) ··· 485 485 wake_up(&mddev->sb_wait); 486 486 } 487 487 488 - void md_flush_request(mddev_t *mddev, struct bio *bio) 488 + void md_flush_request(struct mddev *mddev, struct bio *bio) 489 489 { 490 490 spin_lock_irq(&mddev->write_lock); 491 491 wait_event_lock_irq(mddev->sb_wait, ··· 509 509 */ 510 510 struct md_plug_cb { 511 511 struct blk_plug_cb cb; 512 - mddev_t *mddev; 512 + struct mddev *mddev; 513 513 }; 514 514 515 515 static void plugger_unplug(struct blk_plug_cb *cb) ··· 523 523 /* Check that an unplug wakeup will come shortly. 524 524 * If not, wakeup the md thread immediately 525 525 */ 526 - int mddev_check_plugged(mddev_t *mddev) 526 + int mddev_check_plugged(struct mddev *mddev) 527 527 { 528 528 struct blk_plug *plug = current->plug; 529 529 struct md_plug_cb *mdcb; ··· 555 555 } 556 556 EXPORT_SYMBOL_GPL(mddev_check_plugged); 557 557 558 - static inline mddev_t *mddev_get(mddev_t *mddev) 558 + static inline struct mddev *mddev_get(struct mddev *mddev) 559 559 { 560 560 atomic_inc(&mddev->active); 561 561 return mddev; ··· 563 563 564 564 static void mddev_delayed_delete(struct work_struct *ws); 565 565 566 - static void mddev_put(mddev_t *mddev) 566 + static void mddev_put(struct mddev *mddev) 567 567 { 568 568 struct bio_set *bs = NULL; 569 569 ··· 592 592 bioset_free(bs); 593 593 } 594 594 595 - void mddev_init(mddev_t *mddev) 595 + void mddev_init(struct mddev *mddev) 596 596 { 597 597 mutex_init(&mddev->open_mutex); 598 598 mutex_init(&mddev->reconfig_mutex); ··· 615 615 } 616 616 EXPORT_SYMBOL_GPL(mddev_init); 617 617 618 - static mddev_t * mddev_find(dev_t unit) 618 + static struct mddev * mddev_find(dev_t unit) 619 619 { 620 - mddev_t *mddev, *new = NULL; 620 + struct mddev *mddev, *new = NULL; 621 621 622 622 if (unit && MAJOR(unit) != MD_MAJOR) 623 623 unit &= ~((1<<MdpMinorShift)-1); ··· 689 689 goto retry; 690 690 } 691 691 692 - static inline int mddev_lock(mddev_t * mddev) 692 + static inline int mddev_lock(struct mddev * mddev) 693 693 { 694 694 return mutex_lock_interruptible(&mddev->reconfig_mutex); 695 695 } 696 696 697 - static inline int mddev_is_locked(mddev_t *mddev) 697 + static inline int mddev_is_locked(struct mddev *mddev) 698 698 { 699 699 return mutex_is_locked(&mddev->reconfig_mutex); 700 700 } 701 701 702 - static inline int mddev_trylock(mddev_t * mddev) 702 + static inline int mddev_trylock(struct mddev * mddev) 703 703 { 704 704 return mutex_trylock(&mddev->reconfig_mutex); 705 705 } 706 706 707 707 static struct attribute_group md_redundancy_group; 708 708 709 - static void mddev_unlock(mddev_t * mddev) 709 + static void mddev_unlock(struct mddev * mddev) 710 710 { 711 711 if (mddev->to_remove) { 712 712 /* These cannot be removed under reconfig_mutex as ··· 749 749 spin_unlock(&pers_lock); 750 750 } 751 751 752 - static struct md_rdev * find_rdev_nr(mddev_t *mddev, int nr) 752 + static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr) 753 753 { 754 754 struct md_rdev *rdev; 755 755 ··· 760 760 return NULL; 761 761 } 762 762 763 - static struct md_rdev * find_rdev(mddev_t * mddev, dev_t dev) 763 + static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev) 764 764 { 765 765 struct md_rdev *rdev; 766 766 ··· 823 823 static void super_written(struct bio *bio, int error) 824 824 { 825 825 struct md_rdev *rdev = bio->bi_private; 826 - mddev_t *mddev = rdev->mddev; 826 + struct mddev *mddev = rdev->mddev; 827 827 828 828 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 829 829 printk("md: super_written gets error=%d, uptodate=%d\n", ··· 837 837 bio_put(bio); 838 838 } 839 839 840 - void md_super_write(mddev_t *mddev, struct md_rdev *rdev, 840 + void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 841 841 sector_t sector, int size, struct page *page) 842 842 { 843 843 /* write first size bytes of page to sector of rdev ··· 858 858 submit_bio(WRITE_FLUSH_FUA, bio); 859 859 } 860 860 861 - void md_super_wait(mddev_t *mddev) 861 + void md_super_wait(struct mddev *mddev) 862 862 { 863 863 /* wait for all superblock writes that were scheduled to complete */ 864 864 DEFINE_WAIT(wq); ··· 1021 1021 * -EINVAL superblock incompatible or invalid 1022 1022 * -othererror e.g. -EIO 1023 1023 * 1024 - * int validate_super(mddev_t *mddev, struct md_rdev *dev) 1024 + * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1025 1025 * Verify that dev is acceptable into mddev. 1026 1026 * The first time, mddev->raid_disks will be 0, and data from 1027 1027 * dev should be merged in. Subsequent calls check that dev 1028 1028 * is new enough. Return 0 or -EINVAL 1029 1029 * 1030 - * void sync_super(mddev_t *mddev, struct md_rdev *dev) 1030 + * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1031 1031 * Update the superblock for rdev with data in mddev 1032 1032 * This does not write to disc. 1033 1033 * ··· 1038 1038 struct module *owner; 1039 1039 int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev, 1040 1040 int minor_version); 1041 - int (*validate_super)(mddev_t *mddev, struct md_rdev *rdev); 1042 - void (*sync_super)(mddev_t *mddev, struct md_rdev *rdev); 1041 + int (*validate_super)(struct mddev *mddev, struct md_rdev *rdev); 1042 + void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); 1043 1043 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1044 1044 sector_t num_sectors); 1045 1045 }; ··· 1052 1052 * has a bitmap. Otherwise, it returns 0. 1053 1053 * 1054 1054 */ 1055 - int md_check_no_bitmap(mddev_t *mddev) 1055 + int md_check_no_bitmap(struct mddev *mddev) 1056 1056 { 1057 1057 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1058 1058 return 0; ··· 1160 1160 /* 1161 1161 * validate_super for 0.90.0 1162 1162 */ 1163 - static int super_90_validate(mddev_t *mddev, struct md_rdev *rdev) 1163 + static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1164 1164 { 1165 1165 mdp_disk_t *desc; 1166 1166 mdp_super_t *sb = page_address(rdev->sb_page); ··· 1272 1272 /* 1273 1273 * sync_super for 0.90.0 1274 1274 */ 1275 - static void super_90_sync(mddev_t *mddev, struct md_rdev *rdev) 1275 + static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1276 1276 { 1277 1277 mdp_super_t *sb; 1278 1278 struct md_rdev *rdev2; ··· 1622 1622 return ret; 1623 1623 } 1624 1624 1625 - static int super_1_validate(mddev_t *mddev, struct md_rdev *rdev) 1625 + static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1626 1626 { 1627 1627 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1628 1628 __u64 ev1 = le64_to_cpu(sb->events); ··· 1723 1723 return 0; 1724 1724 } 1725 1725 1726 - static void super_1_sync(mddev_t *mddev, struct md_rdev *rdev) 1726 + static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1727 1727 { 1728 1728 struct mdp_superblock_1 *sb; 1729 1729 struct md_rdev *rdev2; ··· 1902 1902 }, 1903 1903 }; 1904 1904 1905 - static void sync_super(mddev_t *mddev, struct md_rdev *rdev) 1905 + static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 1906 1906 { 1907 1907 if (mddev->sync_super) { 1908 1908 mddev->sync_super(mddev, rdev); ··· 1914 1914 super_types[mddev->major_version].sync_super(mddev, rdev); 1915 1915 } 1916 1916 1917 - static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1917 + static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 1918 1918 { 1919 1919 struct md_rdev *rdev, *rdev2; 1920 1920 ··· 1939 1939 * from the array. It only succeeds if all working and active component devices 1940 1940 * are integrity capable with matching profiles. 1941 1941 */ 1942 - int md_integrity_register(mddev_t *mddev) 1942 + int md_integrity_register(struct mddev *mddev) 1943 1943 { 1944 1944 struct md_rdev *rdev, *reference = NULL; 1945 1945 ··· 1986 1986 EXPORT_SYMBOL(md_integrity_register); 1987 1987 1988 1988 /* Disable data integrity if non-capable/non-matching disk is being added */ 1989 - void md_integrity_add_rdev(struct md_rdev *rdev, mddev_t *mddev) 1989 + void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 1990 1990 { 1991 1991 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1992 1992 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); ··· 2003 2003 } 2004 2004 EXPORT_SYMBOL(md_integrity_add_rdev); 2005 2005 2006 - static int bind_rdev_to_array(struct md_rdev * rdev, mddev_t * mddev) 2006 + static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev) 2007 2007 { 2008 2008 char b[BDEVNAME_SIZE]; 2009 2009 struct kobject *ko; ··· 2170 2170 export_rdev(rdev); 2171 2171 } 2172 2172 2173 - static void export_array(mddev_t *mddev) 2173 + static void export_array(struct mddev *mddev) 2174 2174 { 2175 2175 struct md_rdev *rdev, *tmp; 2176 2176 ··· 2293 2293 { 2294 2294 struct list_head *tmp; 2295 2295 struct md_rdev *rdev; 2296 - mddev_t *mddev; 2296 + struct mddev *mddev; 2297 2297 char b[BDEVNAME_SIZE]; 2298 2298 2299 2299 printk("\n"); ··· 2318 2318 } 2319 2319 2320 2320 2321 - static void sync_sbs(mddev_t * mddev, int nospares) 2321 + static void sync_sbs(struct mddev * mddev, int nospares) 2322 2322 { 2323 2323 /* Update each superblock (in-memory image), but 2324 2324 * if we are allowed to, skip spares which already ··· 2341 2341 } 2342 2342 } 2343 2343 2344 - static void md_update_sb(mddev_t * mddev, int force_change) 2344 + static void md_update_sb(struct mddev * mddev, int force_change) 2345 2345 { 2346 2346 struct md_rdev *rdev; 2347 2347 int sync_req; ··· 2586 2586 if (rdev->raid_disk >= 0) 2587 2587 err = -EBUSY; 2588 2588 else { 2589 - mddev_t *mddev = rdev->mddev; 2589 + struct mddev *mddev = rdev->mddev; 2590 2590 kick_rdev_from_array(rdev); 2591 2591 if (mddev->pers) 2592 2592 md_update_sb(mddev, 1); ··· 2814 2814 static ssize_t 2815 2815 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 2816 2816 { 2817 - mddev_t *my_mddev = rdev->mddev; 2817 + struct mddev *my_mddev = rdev->mddev; 2818 2818 sector_t oldsectors = rdev->sectors; 2819 2819 sector_t sectors; 2820 2820 ··· 2840 2840 * a deadlock. We have already changed rdev->sectors, and if 2841 2841 * we have to change it back, we will have the lock again. 2842 2842 */ 2843 - mddev_t *mddev; 2843 + struct mddev *mddev; 2844 2844 int overlap = 0; 2845 2845 struct list_head *tmp; 2846 2846 ··· 2967 2967 { 2968 2968 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2969 2969 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 2970 - mddev_t *mddev = rdev->mddev; 2970 + struct mddev *mddev = rdev->mddev; 2971 2971 ssize_t rv; 2972 2972 2973 2973 if (!entry->show) ··· 2991 2991 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2992 2992 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 2993 2993 ssize_t rv; 2994 - mddev_t *mddev = rdev->mddev; 2994 + struct mddev *mddev = rdev->mddev; 2995 2995 2996 2996 if (!entry->store) 2997 2997 return -EIO; ··· 3139 3139 */ 3140 3140 3141 3141 3142 - static void analyze_sbs(mddev_t * mddev) 3142 + static void analyze_sbs(struct mddev * mddev) 3143 3143 { 3144 3144 int i; 3145 3145 struct md_rdev *rdev, *freshest, *tmp; ··· 3242 3242 static void md_safemode_timeout(unsigned long data); 3243 3243 3244 3244 static ssize_t 3245 - safe_delay_show(mddev_t *mddev, char *page) 3245 + safe_delay_show(struct mddev *mddev, char *page) 3246 3246 { 3247 3247 int msec = (mddev->safemode_delay*1000)/HZ; 3248 3248 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3249 3249 } 3250 3250 static ssize_t 3251 - safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) 3251 + safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3252 3252 { 3253 3253 unsigned long msec; 3254 3254 ··· 3270 3270 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3271 3271 3272 3272 static ssize_t 3273 - level_show(mddev_t *mddev, char *page) 3273 + level_show(struct mddev *mddev, char *page) 3274 3274 { 3275 3275 struct mdk_personality *p = mddev->pers; 3276 3276 if (p) ··· 3284 3284 } 3285 3285 3286 3286 static ssize_t 3287 - level_store(mddev_t *mddev, const char *buf, size_t len) 3287 + level_store(struct mddev *mddev, const char *buf, size_t len) 3288 3288 { 3289 3289 char clevel[16]; 3290 3290 ssize_t rv = len; ··· 3465 3465 3466 3466 3467 3467 static ssize_t 3468 - layout_show(mddev_t *mddev, char *page) 3468 + layout_show(struct mddev *mddev, char *page) 3469 3469 { 3470 3470 /* just a number, not meaningful for all levels */ 3471 3471 if (mddev->reshape_position != MaxSector && ··· 3476 3476 } 3477 3477 3478 3478 static ssize_t 3479 - layout_store(mddev_t *mddev, const char *buf, size_t len) 3479 + layout_store(struct mddev *mddev, const char *buf, size_t len) 3480 3480 { 3481 3481 char *e; 3482 3482 unsigned long n = simple_strtoul(buf, &e, 10); ··· 3506 3506 3507 3507 3508 3508 static ssize_t 3509 - raid_disks_show(mddev_t *mddev, char *page) 3509 + raid_disks_show(struct mddev *mddev, char *page) 3510 3510 { 3511 3511 if (mddev->raid_disks == 0) 3512 3512 return 0; ··· 3517 3517 return sprintf(page, "%d\n", mddev->raid_disks); 3518 3518 } 3519 3519 3520 - static int update_raid_disks(mddev_t *mddev, int raid_disks); 3520 + static int update_raid_disks(struct mddev *mddev, int raid_disks); 3521 3521 3522 3522 static ssize_t 3523 - raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 3523 + raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 3524 3524 { 3525 3525 char *e; 3526 3526 int rv = 0; ··· 3543 3543 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3544 3544 3545 3545 static ssize_t 3546 - chunk_size_show(mddev_t *mddev, char *page) 3546 + chunk_size_show(struct mddev *mddev, char *page) 3547 3547 { 3548 3548 if (mddev->reshape_position != MaxSector && 3549 3549 mddev->chunk_sectors != mddev->new_chunk_sectors) ··· 3554 3554 } 3555 3555 3556 3556 static ssize_t 3557 - chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 3557 + chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 3558 3558 { 3559 3559 char *e; 3560 3560 unsigned long n = simple_strtoul(buf, &e, 10); ··· 3583 3583 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3584 3584 3585 3585 static ssize_t 3586 - resync_start_show(mddev_t *mddev, char *page) 3586 + resync_start_show(struct mddev *mddev, char *page) 3587 3587 { 3588 3588 if (mddev->recovery_cp == MaxSector) 3589 3589 return sprintf(page, "none\n"); ··· 3591 3591 } 3592 3592 3593 3593 static ssize_t 3594 - resync_start_store(mddev_t *mddev, const char *buf, size_t len) 3594 + resync_start_store(struct mddev *mddev, const char *buf, size_t len) 3595 3595 { 3596 3596 char *e; 3597 3597 unsigned long long n = simple_strtoull(buf, &e, 10); ··· 3661 3661 } 3662 3662 3663 3663 static ssize_t 3664 - array_state_show(mddev_t *mddev, char *page) 3664 + array_state_show(struct mddev *mddev, char *page) 3665 3665 { 3666 3666 enum array_state st = inactive; 3667 3667 ··· 3694 3694 return sprintf(page, "%s\n", array_states[st]); 3695 3695 } 3696 3696 3697 - static int do_md_stop(mddev_t * mddev, int ro, int is_open); 3698 - static int md_set_readonly(mddev_t * mddev, int is_open); 3699 - static int do_md_run(mddev_t * mddev); 3700 - static int restart_array(mddev_t *mddev); 3697 + static int do_md_stop(struct mddev * mddev, int ro, int is_open); 3698 + static int md_set_readonly(struct mddev * mddev, int is_open); 3699 + static int do_md_run(struct mddev * mddev); 3700 + static int restart_array(struct mddev *mddev); 3701 3701 3702 3702 static ssize_t 3703 - array_state_store(mddev_t *mddev, const char *buf, size_t len) 3703 + array_state_store(struct mddev *mddev, const char *buf, size_t len) 3704 3704 { 3705 3705 int err = -EINVAL; 3706 3706 enum array_state st = match_word(buf, array_states); ··· 3794 3794 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3795 3795 3796 3796 static ssize_t 3797 - max_corrected_read_errors_show(mddev_t *mddev, char *page) { 3797 + max_corrected_read_errors_show(struct mddev *mddev, char *page) { 3798 3798 return sprintf(page, "%d\n", 3799 3799 atomic_read(&mddev->max_corr_read_errors)); 3800 3800 } 3801 3801 3802 3802 static ssize_t 3803 - max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len) 3803 + max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 3804 3804 { 3805 3805 char *e; 3806 3806 unsigned long n = simple_strtoul(buf, &e, 10); ··· 3817 3817 max_corrected_read_errors_store); 3818 3818 3819 3819 static ssize_t 3820 - null_show(mddev_t *mddev, char *page) 3820 + null_show(struct mddev *mddev, char *page) 3821 3821 { 3822 3822 return -EINVAL; 3823 3823 } 3824 3824 3825 3825 static ssize_t 3826 - new_dev_store(mddev_t *mddev, const char *buf, size_t len) 3826 + new_dev_store(struct mddev *mddev, const char *buf, size_t len) 3827 3827 { 3828 3828 /* buf must be %d:%d\n? giving major and minor numbers */ 3829 3829 /* The new device is added to the array. ··· 3880 3880 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 3881 3881 3882 3882 static ssize_t 3883 - bitmap_store(mddev_t *mddev, const char *buf, size_t len) 3883 + bitmap_store(struct mddev *mddev, const char *buf, size_t len) 3884 3884 { 3885 3885 char *end; 3886 3886 unsigned long chunk, end_chunk; ··· 3909 3909 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 3910 3910 3911 3911 static ssize_t 3912 - size_show(mddev_t *mddev, char *page) 3912 + size_show(struct mddev *mddev, char *page) 3913 3913 { 3914 3914 return sprintf(page, "%llu\n", 3915 3915 (unsigned long long)mddev->dev_sectors / 2); 3916 3916 } 3917 3917 3918 - static int update_size(mddev_t *mddev, sector_t num_sectors); 3918 + static int update_size(struct mddev *mddev, sector_t num_sectors); 3919 3919 3920 3920 static ssize_t 3921 - size_store(mddev_t *mddev, const char *buf, size_t len) 3921 + size_store(struct mddev *mddev, const char *buf, size_t len) 3922 3922 { 3923 3923 /* If array is inactive, we can reduce the component size, but 3924 3924 * not increase it (except from 0). ··· 3953 3953 * or N.M for internally known formats 3954 3954 */ 3955 3955 static ssize_t 3956 - metadata_show(mddev_t *mddev, char *page) 3956 + metadata_show(struct mddev *mddev, char *page) 3957 3957 { 3958 3958 if (mddev->persistent) 3959 3959 return sprintf(page, "%d.%d\n", ··· 3965 3965 } 3966 3966 3967 3967 static ssize_t 3968 - metadata_store(mddev_t *mddev, const char *buf, size_t len) 3968 + metadata_store(struct mddev *mddev, const char *buf, size_t len) 3969 3969 { 3970 3970 int major, minor; 3971 3971 char *e; ··· 4019 4019 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4020 4020 4021 4021 static ssize_t 4022 - action_show(mddev_t *mddev, char *page) 4022 + action_show(struct mddev *mddev, char *page) 4023 4023 { 4024 4024 char *type = "idle"; 4025 4025 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) ··· 4041 4041 return sprintf(page, "%s\n", type); 4042 4042 } 4043 4043 4044 - static void reap_sync_thread(mddev_t *mddev); 4044 + static void reap_sync_thread(struct mddev *mddev); 4045 4045 4046 4046 static ssize_t 4047 - action_store(mddev_t *mddev, const char *page, size_t len) 4047 + action_store(struct mddev *mddev, const char *page, size_t len) 4048 4048 { 4049 4049 if (!mddev->pers || !mddev->pers->sync_request) 4050 4050 return -EINVAL; ··· 4090 4090 } 4091 4091 4092 4092 static ssize_t 4093 - mismatch_cnt_show(mddev_t *mddev, char *page) 4093 + mismatch_cnt_show(struct mddev *mddev, char *page) 4094 4094 { 4095 4095 return sprintf(page, "%llu\n", 4096 4096 (unsigned long long) mddev->resync_mismatches); ··· 4103 4103 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4104 4104 4105 4105 static ssize_t 4106 - sync_min_show(mddev_t *mddev, char *page) 4106 + sync_min_show(struct mddev *mddev, char *page) 4107 4107 { 4108 4108 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4109 4109 mddev->sync_speed_min ? "local": "system"); 4110 4110 } 4111 4111 4112 4112 static ssize_t 4113 - sync_min_store(mddev_t *mddev, const char *buf, size_t len) 4113 + sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4114 4114 { 4115 4115 int min; 4116 4116 char *e; ··· 4129 4129 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4130 4130 4131 4131 static ssize_t 4132 - sync_max_show(mddev_t *mddev, char *page) 4132 + sync_max_show(struct mddev *mddev, char *page) 4133 4133 { 4134 4134 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4135 4135 mddev->sync_speed_max ? "local": "system"); 4136 4136 } 4137 4137 4138 4138 static ssize_t 4139 - sync_max_store(mddev_t *mddev, const char *buf, size_t len) 4139 + sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4140 4140 { 4141 4141 int max; 4142 4142 char *e; ··· 4155 4155 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4156 4156 4157 4157 static ssize_t 4158 - degraded_show(mddev_t *mddev, char *page) 4158 + degraded_show(struct mddev *mddev, char *page) 4159 4159 { 4160 4160 return sprintf(page, "%d\n", mddev->degraded); 4161 4161 } 4162 4162 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4163 4163 4164 4164 static ssize_t 4165 - sync_force_parallel_show(mddev_t *mddev, char *page) 4165 + sync_force_parallel_show(struct mddev *mddev, char *page) 4166 4166 { 4167 4167 return sprintf(page, "%d\n", mddev->parallel_resync); 4168 4168 } 4169 4169 4170 4170 static ssize_t 4171 - sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) 4171 + sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4172 4172 { 4173 4173 long n; 4174 4174 ··· 4192 4192 sync_force_parallel_show, sync_force_parallel_store); 4193 4193 4194 4194 static ssize_t 4195 - sync_speed_show(mddev_t *mddev, char *page) 4195 + sync_speed_show(struct mddev *mddev, char *page) 4196 4196 { 4197 4197 unsigned long resync, dt, db; 4198 4198 if (mddev->curr_resync == 0) ··· 4207 4207 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4208 4208 4209 4209 static ssize_t 4210 - sync_completed_show(mddev_t *mddev, char *page) 4210 + sync_completed_show(struct mddev *mddev, char *page) 4211 4211 { 4212 4212 unsigned long long max_sectors, resync; 4213 4213 ··· 4226 4226 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 4227 4227 4228 4228 static ssize_t 4229 - min_sync_show(mddev_t *mddev, char *page) 4229 + min_sync_show(struct mddev *mddev, char *page) 4230 4230 { 4231 4231 return sprintf(page, "%llu\n", 4232 4232 (unsigned long long)mddev->resync_min); 4233 4233 } 4234 4234 static ssize_t 4235 - min_sync_store(mddev_t *mddev, const char *buf, size_t len) 4235 + min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4236 4236 { 4237 4237 unsigned long long min; 4238 4238 if (strict_strtoull(buf, 10, &min)) ··· 4257 4257 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4258 4258 4259 4259 static ssize_t 4260 - max_sync_show(mddev_t *mddev, char *page) 4260 + max_sync_show(struct mddev *mddev, char *page) 4261 4261 { 4262 4262 if (mddev->resync_max == MaxSector) 4263 4263 return sprintf(page, "max\n"); ··· 4266 4266 (unsigned long long)mddev->resync_max); 4267 4267 } 4268 4268 static ssize_t 4269 - max_sync_store(mddev_t *mddev, const char *buf, size_t len) 4269 + max_sync_store(struct mddev *mddev, const char *buf, size_t len) 4270 4270 { 4271 4271 if (strncmp(buf, "max", 3) == 0) 4272 4272 mddev->resync_max = MaxSector; ··· 4297 4297 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4298 4298 4299 4299 static ssize_t 4300 - suspend_lo_show(mddev_t *mddev, char *page) 4300 + suspend_lo_show(struct mddev *mddev, char *page) 4301 4301 { 4302 4302 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4303 4303 } 4304 4304 4305 4305 static ssize_t 4306 - suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 4306 + suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4307 4307 { 4308 4308 char *e; 4309 4309 unsigned long long new = simple_strtoull(buf, &e, 10); ··· 4331 4331 4332 4332 4333 4333 static ssize_t 4334 - suspend_hi_show(mddev_t *mddev, char *page) 4334 + suspend_hi_show(struct mddev *mddev, char *page) 4335 4335 { 4336 4336 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4337 4337 } 4338 4338 4339 4339 static ssize_t 4340 - suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 4340 + suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4341 4341 { 4342 4342 char *e; 4343 4343 unsigned long long new = simple_strtoull(buf, &e, 10); ··· 4364 4364 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4365 4365 4366 4366 static ssize_t 4367 - reshape_position_show(mddev_t *mddev, char *page) 4367 + reshape_position_show(struct mddev *mddev, char *page) 4368 4368 { 4369 4369 if (mddev->reshape_position != MaxSector) 4370 4370 return sprintf(page, "%llu\n", ··· 4374 4374 } 4375 4375 4376 4376 static ssize_t 4377 - reshape_position_store(mddev_t *mddev, const char *buf, size_t len) 4377 + reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 4378 4378 { 4379 4379 char *e; 4380 4380 unsigned long long new = simple_strtoull(buf, &e, 10); ··· 4395 4395 reshape_position_store); 4396 4396 4397 4397 static ssize_t 4398 - array_size_show(mddev_t *mddev, char *page) 4398 + array_size_show(struct mddev *mddev, char *page) 4399 4399 { 4400 4400 if (mddev->external_size) 4401 4401 return sprintf(page, "%llu\n", ··· 4405 4405 } 4406 4406 4407 4407 static ssize_t 4408 - array_size_store(mddev_t *mddev, const char *buf, size_t len) 4408 + array_size_store(struct mddev *mddev, const char *buf, size_t len) 4409 4409 { 4410 4410 sector_t sectors; 4411 4411 ··· 4480 4480 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4481 4481 { 4482 4482 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4483 - mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4483 + struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4484 4484 ssize_t rv; 4485 4485 4486 4486 if (!entry->show) ··· 4498 4498 const char *page, size_t length) 4499 4499 { 4500 4500 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4501 - mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4501 + struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4502 4502 ssize_t rv; 4503 4503 4504 4504 if (!entry->store) ··· 4517 4517 4518 4518 static void md_free(struct kobject *ko) 4519 4519 { 4520 - mddev_t *mddev = container_of(ko, mddev_t, kobj); 4520 + struct mddev *mddev = container_of(ko, struct mddev, kobj); 4521 4521 4522 4522 if (mddev->sysfs_state) 4523 4523 sysfs_put(mddev->sysfs_state); ··· 4546 4546 4547 4547 static void mddev_delayed_delete(struct work_struct *ws) 4548 4548 { 4549 - mddev_t *mddev = container_of(ws, mddev_t, del_work); 4549 + struct mddev *mddev = container_of(ws, struct mddev, del_work); 4550 4550 4551 4551 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4552 4552 kobject_del(&mddev->kobj); ··· 4556 4556 static int md_alloc(dev_t dev, char *name) 4557 4557 { 4558 4558 static DEFINE_MUTEX(disks_mutex); 4559 - mddev_t *mddev = mddev_find(dev); 4559 + struct mddev *mddev = mddev_find(dev); 4560 4560 struct gendisk *disk; 4561 4561 int partitioned; 4562 4562 int shift; ··· 4583 4583 if (name) { 4584 4584 /* Need to ensure that 'name' is not a duplicate. 4585 4585 */ 4586 - mddev_t *mddev2; 4586 + struct mddev *mddev2; 4587 4587 spin_lock(&all_mddevs_lock); 4588 4588 4589 4589 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) ··· 4684 4684 4685 4685 static void md_safemode_timeout(unsigned long data) 4686 4686 { 4687 - mddev_t *mddev = (mddev_t *) data; 4687 + struct mddev *mddev = (struct mddev *) data; 4688 4688 4689 4689 if (!atomic_read(&mddev->writes_pending)) { 4690 4690 mddev->safemode = 1; ··· 4696 4696 4697 4697 static int start_dirty_degraded; 4698 4698 4699 - int md_run(mddev_t *mddev) 4699 + int md_run(struct mddev *mddev) 4700 4700 { 4701 4701 int err; 4702 4702 struct md_rdev *rdev; ··· 4764 4764 4765 4765 if (mddev->bio_set == NULL) 4766 4766 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 4767 - sizeof(mddev_t *)); 4767 + sizeof(struct mddev *)); 4768 4768 4769 4769 spin_lock(&pers_lock); 4770 4770 pers = find_pers(mddev->level, mddev->clevel); ··· 4898 4898 } 4899 4899 EXPORT_SYMBOL_GPL(md_run); 4900 4900 4901 - static int do_md_run(mddev_t *mddev) 4901 + static int do_md_run(struct mddev *mddev) 4902 4902 { 4903 4903 int err; 4904 4904 ··· 4922 4922 return err; 4923 4923 } 4924 4924 4925 - static int restart_array(mddev_t *mddev) 4925 + static int restart_array(struct mddev *mddev) 4926 4926 { 4927 4927 struct gendisk *disk = mddev->gendisk; 4928 4928 ··· 4972 4972 spin_unlock(&inode->i_lock); 4973 4973 } 4974 4974 4975 - static void md_clean(mddev_t *mddev) 4975 + static void md_clean(struct mddev *mddev) 4976 4976 { 4977 4977 mddev->array_sectors = 0; 4978 4978 mddev->external_size = 0; ··· 5015 5015 mddev->bitmap_info.max_write_behind = 0; 5016 5016 } 5017 5017 5018 - static void __md_stop_writes(mddev_t *mddev) 5018 + static void __md_stop_writes(struct mddev *mddev) 5019 5019 { 5020 5020 if (mddev->sync_thread) { 5021 5021 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ··· 5035 5035 } 5036 5036 } 5037 5037 5038 - void md_stop_writes(mddev_t *mddev) 5038 + void md_stop_writes(struct mddev *mddev) 5039 5039 { 5040 5040 mddev_lock(mddev); 5041 5041 __md_stop_writes(mddev); ··· 5043 5043 } 5044 5044 EXPORT_SYMBOL_GPL(md_stop_writes); 5045 5045 5046 - void md_stop(mddev_t *mddev) 5046 + void md_stop(struct mddev *mddev) 5047 5047 { 5048 5048 mddev->ready = 0; 5049 5049 mddev->pers->stop(mddev); ··· 5055 5055 } 5056 5056 EXPORT_SYMBOL_GPL(md_stop); 5057 5057 5058 - static int md_set_readonly(mddev_t *mddev, int is_open) 5058 + static int md_set_readonly(struct mddev *mddev, int is_open) 5059 5059 { 5060 5060 int err = 0; 5061 5061 mutex_lock(&mddev->open_mutex); ··· 5085 5085 * 0 - completely stop and dis-assemble array 5086 5086 * 2 - stop but do not disassemble array 5087 5087 */ 5088 - static int do_md_stop(mddev_t * mddev, int mode, int is_open) 5088 + static int do_md_stop(struct mddev * mddev, int mode, int is_open) 5089 5089 { 5090 5090 struct gendisk *disk = mddev->gendisk; 5091 5091 struct md_rdev *rdev; ··· 5151 5151 } 5152 5152 5153 5153 #ifndef MODULE 5154 - static void autorun_array(mddev_t *mddev) 5154 + static void autorun_array(struct mddev *mddev) 5155 5155 { 5156 5156 struct md_rdev *rdev; 5157 5157 int err; ··· 5189 5189 static void autorun_devices(int part) 5190 5190 { 5191 5191 struct md_rdev *rdev0, *rdev, *tmp; 5192 - mddev_t *mddev; 5192 + struct mddev *mddev; 5193 5193 char b[BDEVNAME_SIZE]; 5194 5194 5195 5195 printk(KERN_INFO "md: autorun ...\n"); ··· 5284 5284 return 0; 5285 5285 } 5286 5286 5287 - static int get_array_info(mddev_t * mddev, void __user * arg) 5287 + static int get_array_info(struct mddev * mddev, void __user * arg) 5288 5288 { 5289 5289 mdu_array_info_t info; 5290 5290 int nr,working,insync,failed,spare; ··· 5337 5337 return 0; 5338 5338 } 5339 5339 5340 - static int get_bitmap_file(mddev_t * mddev, void __user * arg) 5340 + static int get_bitmap_file(struct mddev * mddev, void __user * arg) 5341 5341 { 5342 5342 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5343 5343 char *ptr, *buf = NULL; ··· 5377 5377 return err; 5378 5378 } 5379 5379 5380 - static int get_disk_info(mddev_t * mddev, void __user * arg) 5380 + static int get_disk_info(struct mddev * mddev, void __user * arg) 5381 5381 { 5382 5382 mdu_disk_info_t info; 5383 5383 struct md_rdev *rdev; ··· 5411 5411 return 0; 5412 5412 } 5413 5413 5414 - static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 5414 + static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) 5415 5415 { 5416 5416 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5417 5417 struct md_rdev *rdev; ··· 5583 5583 return 0; 5584 5584 } 5585 5585 5586 - static int hot_remove_disk(mddev_t * mddev, dev_t dev) 5586 + static int hot_remove_disk(struct mddev * mddev, dev_t dev) 5587 5587 { 5588 5588 char b[BDEVNAME_SIZE]; 5589 5589 struct md_rdev *rdev; ··· 5606 5606 return -EBUSY; 5607 5607 } 5608 5608 5609 - static int hot_add_disk(mddev_t * mddev, dev_t dev) 5609 + static int hot_add_disk(struct mddev * mddev, dev_t dev) 5610 5610 { 5611 5611 char b[BDEVNAME_SIZE]; 5612 5612 int err; ··· 5680 5680 return err; 5681 5681 } 5682 5682 5683 - static int set_bitmap_file(mddev_t *mddev, int fd) 5683 + static int set_bitmap_file(struct mddev *mddev, int fd) 5684 5684 { 5685 5685 int err; 5686 5686 ··· 5753 5753 * The minor and patch _version numbers are also kept incase the 5754 5754 * super_block handler wishes to interpret them. 5755 5755 */ 5756 - static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 5756 + static int set_array_info(struct mddev * mddev, mdu_array_info_t *info) 5757 5757 { 5758 5758 5759 5759 if (info->raid_disks == 0) { ··· 5823 5823 return 0; 5824 5824 } 5825 5825 5826 - void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) 5826 + void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 5827 5827 { 5828 5828 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 5829 5829 ··· 5834 5834 } 5835 5835 EXPORT_SYMBOL(md_set_array_sectors); 5836 5836 5837 - static int update_size(mddev_t *mddev, sector_t num_sectors) 5837 + static int update_size(struct mddev *mddev, sector_t num_sectors) 5838 5838 { 5839 5839 struct md_rdev *rdev; 5840 5840 int rv; ··· 5872 5872 return rv; 5873 5873 } 5874 5874 5875 - static int update_raid_disks(mddev_t *mddev, int raid_disks) 5875 + static int update_raid_disks(struct mddev *mddev, int raid_disks) 5876 5876 { 5877 5877 int rv; 5878 5878 /* change the number of raid disks */ ··· 5900 5900 * Any differences that cannot be handled will cause an error. 5901 5901 * Normally, only one change can be managed at a time. 5902 5902 */ 5903 - static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 5903 + static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 5904 5904 { 5905 5905 int rv = 0; 5906 5906 int cnt = 0; ··· 5993 5993 return rv; 5994 5994 } 5995 5995 5996 - static int set_disk_faulty(mddev_t *mddev, dev_t dev) 5996 + static int set_disk_faulty(struct mddev *mddev, dev_t dev) 5997 5997 { 5998 5998 struct md_rdev *rdev; 5999 5999 ··· 6018 6018 */ 6019 6019 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 6020 6020 { 6021 - mddev_t *mddev = bdev->bd_disk->private_data; 6021 + struct mddev *mddev = bdev->bd_disk->private_data; 6022 6022 6023 6023 geo->heads = 2; 6024 6024 geo->sectors = 4; ··· 6031 6031 { 6032 6032 int err = 0; 6033 6033 void __user *argp = (void __user *)arg; 6034 - mddev_t *mddev = NULL; 6034 + struct mddev *mddev = NULL; 6035 6035 int ro; 6036 6036 6037 6037 if (!capable(CAP_SYS_ADMIN)) ··· 6294 6294 * Succeed if we can lock the mddev, which confirms that 6295 6295 * it isn't being stopped right now. 6296 6296 */ 6297 - mddev_t *mddev = mddev_find(bdev->bd_dev); 6297 + struct mddev *mddev = mddev_find(bdev->bd_dev); 6298 6298 int err; 6299 6299 6300 6300 if (mddev->gendisk != bdev->bd_disk) { ··· 6323 6323 6324 6324 static int md_release(struct gendisk *disk, fmode_t mode) 6325 6325 { 6326 - mddev_t *mddev = disk->private_data; 6326 + struct mddev *mddev = disk->private_data; 6327 6327 6328 6328 BUG_ON(!mddev); 6329 6329 atomic_dec(&mddev->openers); ··· 6334 6334 6335 6335 static int md_media_changed(struct gendisk *disk) 6336 6336 { 6337 - mddev_t *mddev = disk->private_data; 6337 + struct mddev *mddev = disk->private_data; 6338 6338 6339 6339 return mddev->changed; 6340 6340 } 6341 6341 6342 6342 static int md_revalidate(struct gendisk *disk) 6343 6343 { 6344 - mddev_t *mddev = disk->private_data; 6344 + struct mddev *mddev = disk->private_data; 6345 6345 6346 6346 mddev->changed = 0; 6347 6347 return 0; ··· 6410 6410 } 6411 6411 } 6412 6412 6413 - mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 6413 + mdk_thread_t *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev, 6414 6414 const char *name) 6415 6415 { 6416 6416 mdk_thread_t *thread; ··· 6452 6452 kfree(thread); 6453 6453 } 6454 6454 6455 - void md_error(mddev_t *mddev, struct md_rdev *rdev) 6455 + void md_error(struct mddev *mddev, struct md_rdev *rdev) 6456 6456 { 6457 6457 if (!mddev) { 6458 6458 MD_BUG(); ··· 6498 6498 } 6499 6499 6500 6500 6501 - static void status_resync(struct seq_file *seq, mddev_t * mddev) 6501 + static void status_resync(struct seq_file *seq, struct mddev * mddev) 6502 6502 { 6503 6503 sector_t max_sectors, resync, res; 6504 6504 unsigned long dt, db; ··· 6589 6589 { 6590 6590 struct list_head *tmp; 6591 6591 loff_t l = *pos; 6592 - mddev_t *mddev; 6592 + struct mddev *mddev; 6593 6593 6594 6594 if (l >= 0x10000) 6595 6595 return NULL; ··· 6600 6600 spin_lock(&all_mddevs_lock); 6601 6601 list_for_each(tmp,&all_mddevs) 6602 6602 if (!l--) { 6603 - mddev = list_entry(tmp, mddev_t, all_mddevs); 6603 + mddev = list_entry(tmp, struct mddev, all_mddevs); 6604 6604 mddev_get(mddev); 6605 6605 spin_unlock(&all_mddevs_lock); 6606 6606 return mddev; ··· 6614 6614 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 6615 6615 { 6616 6616 struct list_head *tmp; 6617 - mddev_t *next_mddev, *mddev = v; 6617 + struct mddev *next_mddev, *mddev = v; 6618 6618 6619 6619 ++*pos; 6620 6620 if (v == (void*)2) ··· 6626 6626 else 6627 6627 tmp = mddev->all_mddevs.next; 6628 6628 if (tmp != &all_mddevs) 6629 - next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 6629 + next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 6630 6630 else { 6631 6631 next_mddev = (void*)2; 6632 6632 *pos = 0x10000; ··· 6641 6641 6642 6642 static void md_seq_stop(struct seq_file *seq, void *v) 6643 6643 { 6644 - mddev_t *mddev = v; 6644 + struct mddev *mddev = v; 6645 6645 6646 6646 if (mddev && v != (void*)1 && v != (void*)2) 6647 6647 mddev_put(mddev); ··· 6649 6649 6650 6650 static int md_seq_show(struct seq_file *seq, void *v) 6651 6651 { 6652 - mddev_t *mddev = v; 6652 + struct mddev *mddev = v; 6653 6653 sector_t sectors; 6654 6654 struct md_rdev *rdev; 6655 6655 struct bitmap *bitmap; ··· 6829 6829 return 0; 6830 6830 } 6831 6831 6832 - static int is_mddev_idle(mddev_t *mddev, int init) 6832 + static int is_mddev_idle(struct mddev *mddev, int init) 6833 6833 { 6834 6834 struct md_rdev * rdev; 6835 6835 int idle; ··· 6873 6873 return idle; 6874 6874 } 6875 6875 6876 - void md_done_sync(mddev_t *mddev, int blocks, int ok) 6876 + void md_done_sync(struct mddev *mddev, int blocks, int ok) 6877 6877 { 6878 6878 /* another "blocks" (512byte) blocks have been synced */ 6879 6879 atomic_sub(blocks, &mddev->recovery_active); ··· 6891 6891 * in superblock) before writing, schedule a superblock update 6892 6892 * and wait for it to complete. 6893 6893 */ 6894 - void md_write_start(mddev_t *mddev, struct bio *bi) 6894 + void md_write_start(struct mddev *mddev, struct bio *bi) 6895 6895 { 6896 6896 int did_change = 0; 6897 6897 if (bio_data_dir(bi) != WRITE) ··· 6926 6926 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6927 6927 } 6928 6928 6929 - void md_write_end(mddev_t *mddev) 6929 + void md_write_end(struct mddev *mddev) 6930 6930 { 6931 6931 if (atomic_dec_and_test(&mddev->writes_pending)) { 6932 6932 if (mddev->safemode == 2) ··· 6945 6945 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 6946 6946 * is dropped, so return -EAGAIN after notifying userspace. 6947 6947 */ 6948 - int md_allow_write(mddev_t *mddev) 6948 + int md_allow_write(struct mddev *mddev) 6949 6949 { 6950 6950 if (!mddev->pers) 6951 6951 return 0; ··· 6977 6977 6978 6978 #define SYNC_MARKS 10 6979 6979 #define SYNC_MARK_STEP (3*HZ) 6980 - void md_do_sync(mddev_t *mddev) 6980 + void md_do_sync(struct mddev *mddev) 6981 6981 { 6982 - mddev_t *mddev2; 6982 + struct mddev *mddev2; 6983 6983 unsigned int currspeed = 0, 6984 6984 window; 6985 6985 sector_t max_sectors,j, io_sectors; ··· 7304 7304 } 7305 7305 EXPORT_SYMBOL_GPL(md_do_sync); 7306 7306 7307 - static int remove_and_add_spares(mddev_t *mddev) 7307 + static int remove_and_add_spares(struct mddev *mddev) 7308 7308 { 7309 7309 struct md_rdev *rdev; 7310 7310 int spares = 0; ··· 7348 7348 return spares; 7349 7349 } 7350 7350 7351 - static void reap_sync_thread(mddev_t *mddev) 7351 + static void reap_sync_thread(struct mddev *mddev) 7352 7352 { 7353 7353 struct md_rdev *rdev; 7354 7354 ··· 7409 7409 * 5/ If array is degraded, try to add spares devices 7410 7410 * 6/ If array has spares or is not in-sync, start a resync thread. 7411 7411 */ 7412 - void md_check_recovery(mddev_t *mddev) 7412 + void md_check_recovery(struct mddev *mddev) 7413 7413 { 7414 7414 if (mddev->suspended) 7415 7415 return; ··· 7569 7569 } 7570 7570 } 7571 7571 7572 - void md_wait_for_blocked_rdev(struct md_rdev *rdev, mddev_t *mddev) 7572 + void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 7573 7573 { 7574 7574 sysfs_notify_dirent_safe(rdev->sysfs_state); 7575 7575 wait_event_timeout(rdev->blocked_wait, ··· 8070 8070 unsigned long code, void *x) 8071 8071 { 8072 8072 struct list_head *tmp; 8073 - mddev_t *mddev; 8073 + struct mddev *mddev; 8074 8074 int need_delay = 0; 8075 8075 8076 8076 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { ··· 8223 8223 8224 8224 static __exit void md_exit(void) 8225 8225 { 8226 - mddev_t *mddev; 8226 + struct mddev *mddev; 8227 8227 struct list_head *tmp; 8228 8228 8229 8229 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
+55 -58
drivers/md/md.h
··· 26 26 27 27 #define MaxSector (~(sector_t)0) 28 28 29 - typedef struct mddev_s mddev_t; 30 - 31 29 /* Bad block numbers are stored sorted in a single page. 32 30 * 64bits is used for each block or extent. 33 31 * 54 bits are sector number, 9 bits are extent size, ··· 40 42 struct list_head same_set; /* RAID devices within the same set */ 41 43 42 44 sector_t sectors; /* Device size (in 512bytes sectors) */ 43 - mddev_t *mddev; /* RAID array if running */ 45 + struct mddev *mddev; /* RAID array if running */ 44 46 int last_events; /* IO event timestamp */ 45 47 46 48 /* ··· 182 184 extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors); 183 185 extern void md_ack_all_badblocks(struct badblocks *bb); 184 186 185 - struct mddev_s 186 - { 187 + struct mddev { 187 188 void *private; 188 189 struct mdk_personality *pers; 189 190 dev_t unit; ··· 397 400 atomic_t flush_pending; 398 401 struct work_struct flush_work; 399 402 struct work_struct event_work; /* used by dm to report failure event */ 400 - void (*sync_super)(mddev_t *mddev, struct md_rdev *rdev); 403 + void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); 401 404 }; 402 405 403 406 404 - static inline void rdev_dec_pending(struct md_rdev *rdev, mddev_t *mddev) 407 + static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) 405 408 { 406 409 int faulty = test_bit(Faulty, &rdev->flags); 407 410 if (atomic_dec_and_test(&rdev->nr_pending) && faulty) ··· 419 422 int level; 420 423 struct list_head list; 421 424 struct module *owner; 422 - int (*make_request)(mddev_t *mddev, struct bio *bio); 423 - int (*run)(mddev_t *mddev); 424 - int (*stop)(mddev_t *mddev); 425 - void (*status)(struct seq_file *seq, mddev_t *mddev); 425 + int (*make_request)(struct mddev *mddev, struct bio *bio); 426 + int (*run)(struct mddev *mddev); 427 + int (*stop)(struct mddev *mddev); 428 + void (*status)(struct seq_file *seq, struct mddev *mddev); 426 429 /* error_handler must set ->faulty and clear ->in_sync 427 430 * if appropriate, and should abort recovery if needed 428 431 */ 429 - void (*error_handler)(mddev_t *mddev, struct md_rdev *rdev); 430 - int (*hot_add_disk) (mddev_t *mddev, struct md_rdev *rdev); 431 - int (*hot_remove_disk) (mddev_t *mddev, int number); 432 - int (*spare_active) (mddev_t *mddev); 433 - sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); 434 - int (*resize) (mddev_t *mddev, sector_t sectors); 435 - sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks); 436 - int (*check_reshape) (mddev_t *mddev); 437 - int (*start_reshape) (mddev_t *mddev); 438 - void (*finish_reshape) (mddev_t *mddev); 432 + void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev); 433 + int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); 434 + int (*hot_remove_disk) (struct mddev *mddev, int number); 435 + int (*spare_active) (struct mddev *mddev); 436 + sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster); 437 + int (*resize) (struct mddev *mddev, sector_t sectors); 438 + sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); 439 + int (*check_reshape) (struct mddev *mddev); 440 + int (*start_reshape) (struct mddev *mddev); 441 + void (*finish_reshape) (struct mddev *mddev); 439 442 /* quiesce moves between quiescence states 440 443 * 0 - fully active 441 444 * 1 - no new requests allowed 442 445 * others - reserved 443 446 */ 444 - void (*quiesce) (mddev_t *mddev, int state); 447 + void (*quiesce) (struct mddev *mddev, int state); 445 448 /* takeover is used to transition an array from one 446 449 * personality to another. The new personality must be able 447 450 * to handle the data in the current layout. ··· 451 454 * This needs to be installed and then ->run used to activate the 452 455 * array. 453 456 */ 454 - void *(*takeover) (mddev_t *mddev); 457 + void *(*takeover) (struct mddev *mddev); 455 458 }; 456 459 457 460 458 461 struct md_sysfs_entry { 459 462 struct attribute attr; 460 - ssize_t (*show)(mddev_t *, char *); 461 - ssize_t (*store)(mddev_t *, const char *, size_t); 463 + ssize_t (*show)(struct mddev *, char *); 464 + ssize_t (*store)(struct mddev *, const char *, size_t); 462 465 }; 463 466 extern struct attribute_group md_bitmap_group; 464 467 ··· 474 477 sysfs_notify_dirent(sd); 475 478 } 476 479 477 - static inline char * mdname (mddev_t * mddev) 480 + static inline char * mdname (struct mddev * mddev) 478 481 { 479 482 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; 480 483 } 481 484 482 - static inline int sysfs_link_rdev(mddev_t *mddev, struct md_rdev *rdev) 485 + static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) 483 486 { 484 487 char nm[20]; 485 488 sprintf(nm, "rd%d", rdev->raid_disk); 486 489 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 487 490 } 488 491 489 - static inline void sysfs_unlink_rdev(mddev_t *mddev, struct md_rdev *rdev) 492 + static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) 490 493 { 491 494 char nm[20]; 492 495 sprintf(nm, "rd%d", rdev->raid_disk); ··· 510 513 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) 511 514 512 515 typedef struct mdk_thread_s { 513 - void (*run) (mddev_t *mddev); 514 - mddev_t *mddev; 516 + void (*run) (struct mddev *mddev); 517 + struct mddev *mddev; 515 518 wait_queue_head_t wqueue; 516 519 unsigned long flags; 517 520 struct task_struct *tsk; ··· 553 556 554 557 extern int register_md_personality(struct mdk_personality *p); 555 558 extern int unregister_md_personality(struct mdk_personality *p); 556 - extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), 557 - mddev_t *mddev, const char *name); 559 + extern mdk_thread_t * md_register_thread(void (*run) (struct mddev *mddev), 560 + struct mddev *mddev, const char *name); 558 561 extern void md_unregister_thread(mdk_thread_t **threadp); 559 562 extern void md_wakeup_thread(mdk_thread_t *thread); 560 - extern void md_check_recovery(mddev_t *mddev); 561 - extern void md_write_start(mddev_t *mddev, struct bio *bi); 562 - extern void md_write_end(mddev_t *mddev); 563 - extern void md_done_sync(mddev_t *mddev, int blocks, int ok); 564 - extern void md_error(mddev_t *mddev, struct md_rdev *rdev); 563 + extern void md_check_recovery(struct mddev *mddev); 564 + extern void md_write_start(struct mddev *mddev, struct bio *bi); 565 + extern void md_write_end(struct mddev *mddev); 566 + extern void md_done_sync(struct mddev *mddev, int blocks, int ok); 567 + extern void md_error(struct mddev *mddev, struct md_rdev *rdev); 565 568 566 - extern int mddev_congested(mddev_t *mddev, int bits); 567 - extern void md_flush_request(mddev_t *mddev, struct bio *bio); 568 - extern void md_super_write(mddev_t *mddev, struct md_rdev *rdev, 569 + extern int mddev_congested(struct mddev *mddev, int bits); 570 + extern void md_flush_request(struct mddev *mddev, struct bio *bio); 571 + extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 569 572 sector_t sector, int size, struct page *page); 570 - extern void md_super_wait(mddev_t *mddev); 573 + extern void md_super_wait(struct mddev *mddev); 571 574 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 572 575 struct page *page, int rw, bool metadata_op); 573 - extern void md_do_sync(mddev_t *mddev); 574 - extern void md_new_event(mddev_t *mddev); 575 - extern int md_allow_write(mddev_t *mddev); 576 - extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, mddev_t *mddev); 577 - extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); 578 - extern int md_check_no_bitmap(mddev_t *mddev); 579 - extern int md_integrity_register(mddev_t *mddev); 580 - extern void md_integrity_add_rdev(struct md_rdev *rdev, mddev_t *mddev); 576 + extern void md_do_sync(struct mddev *mddev); 577 + extern void md_new_event(struct mddev *mddev); 578 + extern int md_allow_write(struct mddev *mddev); 579 + extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); 580 + extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); 581 + extern int md_check_no_bitmap(struct mddev *mddev); 582 + extern int md_integrity_register(struct mddev *mddev); 583 + extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); 581 584 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); 582 585 extern void restore_bitmap_write_access(struct file *file); 583 586 584 - extern void mddev_init(mddev_t *mddev); 585 - extern int md_run(mddev_t *mddev); 586 - extern void md_stop(mddev_t *mddev); 587 - extern void md_stop_writes(mddev_t *mddev); 587 + extern void mddev_init(struct mddev *mddev); 588 + extern int md_run(struct mddev *mddev); 589 + extern void md_stop(struct mddev *mddev); 590 + extern void md_stop_writes(struct mddev *mddev); 588 591 extern int md_rdev_init(struct md_rdev *rdev); 589 592 590 - extern void mddev_suspend(mddev_t *mddev); 591 - extern void mddev_resume(mddev_t *mddev); 593 + extern void mddev_suspend(struct mddev *mddev); 594 + extern void mddev_resume(struct mddev *mddev); 592 595 extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 593 - mddev_t *mddev); 596 + struct mddev *mddev); 594 597 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 595 - mddev_t *mddev); 596 - extern int mddev_check_plugged(mddev_t *mddev); 598 + struct mddev *mddev); 599 + extern int mddev_check_plugged(struct mddev *mddev); 597 600 extern void md_trim_bio(struct bio *bio, int offset, int size); 598 601 #endif /* _MD_MD_H */
+11 -11
drivers/md/multipath.c
··· 58 58 static void multipath_reschedule_retry (struct multipath_bh *mp_bh) 59 59 { 60 60 unsigned long flags; 61 - mddev_t *mddev = mp_bh->mddev; 61 + struct mddev *mddev = mp_bh->mddev; 62 62 multipath_conf_t *conf = mddev->private; 63 63 64 64 spin_lock_irqsave(&conf->device_lock, flags); ··· 106 106 rdev_dec_pending(rdev, conf->mddev); 107 107 } 108 108 109 - static int multipath_make_request(mddev_t *mddev, struct bio * bio) 109 + static int multipath_make_request(struct mddev *mddev, struct bio * bio) 110 110 { 111 111 multipath_conf_t *conf = mddev->private; 112 112 struct multipath_bh * mp_bh; ··· 140 140 return 0; 141 141 } 142 142 143 - static void multipath_status (struct seq_file *seq, mddev_t *mddev) 143 + static void multipath_status (struct seq_file *seq, struct mddev *mddev) 144 144 { 145 145 multipath_conf_t *conf = mddev->private; 146 146 int i; ··· 156 156 157 157 static int multipath_congested(void *data, int bits) 158 158 { 159 - mddev_t *mddev = data; 159 + struct mddev *mddev = data; 160 160 multipath_conf_t *conf = mddev->private; 161 161 int i, ret = 0; 162 162 ··· 183 183 /* 184 184 * Careful, this can execute in IRQ contexts as well! 185 185 */ 186 - static void multipath_error (mddev_t *mddev, struct md_rdev *rdev) 186 + static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) 187 187 { 188 188 multipath_conf_t *conf = mddev->private; 189 189 char b[BDEVNAME_SIZE]; ··· 242 242 } 243 243 244 244 245 - static int multipath_add_disk(mddev_t *mddev, struct md_rdev *rdev) 245 + static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) 246 246 { 247 247 multipath_conf_t *conf = mddev->private; 248 248 struct request_queue *q; ··· 291 291 return err; 292 292 } 293 293 294 - static int multipath_remove_disk(mddev_t *mddev, int number) 294 + static int multipath_remove_disk(struct mddev *mddev, int number) 295 295 { 296 296 multipath_conf_t *conf = mddev->private; 297 297 int err = 0; ··· 335 335 * 3. Performs writes following reads for array syncronising. 336 336 */ 337 337 338 - static void multipathd (mddev_t *mddev) 338 + static void multipathd (struct mddev *mddev) 339 339 { 340 340 struct multipath_bh *mp_bh; 341 341 struct bio *bio; ··· 379 379 spin_unlock_irqrestore(&conf->device_lock, flags); 380 380 } 381 381 382 - static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks) 382 + static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks) 383 383 { 384 384 WARN_ONCE(sectors || raid_disks, 385 385 "%s does not support generic reshape\n", __func__); ··· 387 387 return mddev->dev_sectors; 388 388 } 389 389 390 - static int multipath_run (mddev_t *mddev) 390 + static int multipath_run (struct mddev *mddev) 391 391 { 392 392 multipath_conf_t *conf; 393 393 int disk_idx; ··· 510 510 } 511 511 512 512 513 - static int multipath_stop (mddev_t *mddev) 513 + static int multipath_stop (struct mddev *mddev) 514 514 { 515 515 multipath_conf_t *conf = mddev->private; 516 516
+2 -2
drivers/md/multipath.h
··· 6 6 }; 7 7 8 8 struct multipath_private_data { 9 - mddev_t *mddev; 9 + struct mddev *mddev; 10 10 struct multipath_info *multipaths; 11 11 int raid_disks; 12 12 spinlock_t device_lock; ··· 24 24 */ 25 25 26 26 struct multipath_bh { 27 - mddev_t *mddev; 27 + struct mddev *mddev; 28 28 struct bio *master_bio; 29 29 struct bio bio; 30 30 int path;
+16 -16
drivers/md/raid0.c
··· 27 27 28 28 static int raid0_congested(void *data, int bits) 29 29 { 30 - mddev_t *mddev = data; 30 + struct mddev *mddev = data; 31 31 raid0_conf_t *conf = mddev->private; 32 32 struct md_rdev **devlist = conf->devlist; 33 33 int raid_disks = conf->strip_zone[0].nb_dev; ··· 47 47 /* 48 48 * inform the user of the raid configuration 49 49 */ 50 - static void dump_zones(mddev_t *mddev) 50 + static void dump_zones(struct mddev *mddev) 51 51 { 52 52 int j, k; 53 53 sector_t zone_size = 0; ··· 77 77 printk(KERN_INFO "\n"); 78 78 } 79 79 80 - static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) 80 + static int create_strip_zones(struct mddev *mddev, raid0_conf_t **private_conf) 81 81 { 82 82 int i, c, err; 83 83 sector_t curr_zone_end, sectors; ··· 301 301 struct bvec_merge_data *bvm, 302 302 struct bio_vec *biovec) 303 303 { 304 - mddev_t *mddev = q->queuedata; 304 + struct mddev *mddev = q->queuedata; 305 305 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 306 306 int max; 307 307 unsigned int chunk_sectors = mddev->chunk_sectors; ··· 320 320 return max; 321 321 } 322 322 323 - static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) 323 + static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) 324 324 { 325 325 sector_t array_sectors = 0; 326 326 struct md_rdev *rdev; ··· 334 334 return array_sectors; 335 335 } 336 336 337 - static int raid0_run(mddev_t *mddev) 337 + static int raid0_run(struct mddev *mddev) 338 338 { 339 339 raid0_conf_t *conf; 340 340 int ret; ··· 384 384 return md_integrity_register(mddev); 385 385 } 386 386 387 - static int raid0_stop(mddev_t *mddev) 387 + static int raid0_stop(struct mddev *mddev) 388 388 { 389 389 raid0_conf_t *conf = mddev->private; 390 390 ··· 419 419 * remaps the bio to the target device. we separate two flows. 420 420 * power 2 flow and a general flow for the sake of perfromance 421 421 */ 422 - static struct md_rdev *map_sector(mddev_t *mddev, struct strip_zone *zone, 422 + static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, 423 423 sector_t sector, sector_t *sector_offset) 424 424 { 425 425 unsigned int sect_in_chunk; ··· 455 455 /* 456 456 * Is io distribute over 1 or more chunks ? 457 457 */ 458 - static inline int is_io_in_chunk_boundary(mddev_t *mddev, 458 + static inline int is_io_in_chunk_boundary(struct mddev *mddev, 459 459 unsigned int chunk_sects, struct bio *bio) 460 460 { 461 461 if (likely(is_power_of_2(chunk_sects))) { ··· 468 468 } 469 469 } 470 470 471 - static int raid0_make_request(mddev_t *mddev, struct bio *bio) 471 + static int raid0_make_request(struct mddev *mddev, struct bio *bio) 472 472 { 473 473 unsigned int chunk_sects; 474 474 sector_t sector_offset; ··· 528 528 return 0; 529 529 } 530 530 531 - static void raid0_status(struct seq_file *seq, mddev_t *mddev) 531 + static void raid0_status(struct seq_file *seq, struct mddev *mddev) 532 532 { 533 533 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); 534 534 return; 535 535 } 536 536 537 - static void *raid0_takeover_raid45(mddev_t *mddev) 537 + static void *raid0_takeover_raid45(struct mddev *mddev) 538 538 { 539 539 struct md_rdev *rdev; 540 540 raid0_conf_t *priv_conf; ··· 568 568 return priv_conf; 569 569 } 570 570 571 - static void *raid0_takeover_raid10(mddev_t *mddev) 571 + static void *raid0_takeover_raid10(struct mddev *mddev) 572 572 { 573 573 raid0_conf_t *priv_conf; 574 574 ··· 609 609 return priv_conf; 610 610 } 611 611 612 - static void *raid0_takeover_raid1(mddev_t *mddev) 612 + static void *raid0_takeover_raid1(struct mddev *mddev) 613 613 { 614 614 raid0_conf_t *priv_conf; 615 615 ··· 635 635 return priv_conf; 636 636 } 637 637 638 - static void *raid0_takeover(mddev_t *mddev) 638 + static void *raid0_takeover(struct mddev *mddev) 639 639 { 640 640 /* raid0 can take over: 641 641 * raid4 - if all data disks are active. ··· 666 666 return ERR_PTR(-EINVAL); 667 667 } 668 668 669 - static void raid0_quiesce(mddev_t *mddev, int state) 669 + static void raid0_quiesce(struct mddev *mddev, int state) 670 670 { 671 671 } 672 672
+26 -26
drivers/md/raid1.c
··· 193 193 static void reschedule_retry(r1bio_t *r1_bio) 194 194 { 195 195 unsigned long flags; 196 - mddev_t *mddev = r1_bio->mddev; 196 + struct mddev *mddev = r1_bio->mddev; 197 197 conf_t *conf = mddev->private; 198 198 199 199 spin_lock_irqsave(&conf->device_lock, flags); ··· 593 593 return best_disk; 594 594 } 595 595 596 - int md_raid1_congested(mddev_t *mddev, int bits) 596 + int md_raid1_congested(struct mddev *mddev, int bits) 597 597 { 598 598 conf_t *conf = mddev->private; 599 599 int i, ret = 0; ··· 622 622 623 623 static int raid1_congested(void *data, int bits) 624 624 { 625 - mddev_t *mddev = data; 625 + struct mddev *mddev = data; 626 626 627 627 return mddev_congested(mddev, bits) || 628 628 md_raid1_congested(mddev, bits); ··· 796 796 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 797 797 } 798 798 799 - static int make_request(mddev_t *mddev, struct bio * bio) 799 + static int make_request(struct mddev *mddev, struct bio * bio) 800 800 { 801 801 conf_t *conf = mddev->private; 802 802 mirror_info_t *mirror; ··· 1138 1138 return 0; 1139 1139 } 1140 1140 1141 - static void status(struct seq_file *seq, mddev_t *mddev) 1141 + static void status(struct seq_file *seq, struct mddev *mddev) 1142 1142 { 1143 1143 conf_t *conf = mddev->private; 1144 1144 int i; ··· 1156 1156 } 1157 1157 1158 1158 1159 - static void error(mddev_t *mddev, struct md_rdev *rdev) 1159 + static void error(struct mddev *mddev, struct md_rdev *rdev) 1160 1160 { 1161 1161 char b[BDEVNAME_SIZE]; 1162 1162 conf_t *conf = mddev->private; ··· 1233 1233 conf->r1buf_pool = NULL; 1234 1234 } 1235 1235 1236 - static int raid1_spare_active(mddev_t *mddev) 1236 + static int raid1_spare_active(struct mddev *mddev) 1237 1237 { 1238 1238 int i; 1239 1239 conf_t *conf = mddev->private; ··· 1263 1263 } 1264 1264 1265 1265 1266 - static int raid1_add_disk(mddev_t *mddev, struct md_rdev *rdev) 1266 + static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1267 1267 { 1268 1268 conf_t *conf = mddev->private; 1269 1269 int err = -EEXIST; ··· 1311 1311 return err; 1312 1312 } 1313 1313 1314 - static int raid1_remove_disk(mddev_t *mddev, int number) 1314 + static int raid1_remove_disk(struct mddev *mddev, int number) 1315 1315 { 1316 1316 conf_t *conf = mddev->private; 1317 1317 int err = 0; ··· 1374 1374 { 1375 1375 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1376 1376 r1bio_t *r1_bio = bio->bi_private; 1377 - mddev_t *mddev = r1_bio->mddev; 1377 + struct mddev *mddev = r1_bio->mddev; 1378 1378 conf_t *conf = mddev->private; 1379 1379 int mirror=0; 1380 1380 sector_t first_bad; ··· 1446 1446 * made sure that anything with a bad block in range 1447 1447 * will have bi_end_io clear. 1448 1448 */ 1449 - mddev_t *mddev = r1_bio->mddev; 1449 + struct mddev *mddev = r1_bio->mddev; 1450 1450 conf_t *conf = mddev->private; 1451 1451 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; 1452 1452 sector_t sect = r1_bio->sector; ··· 1562 1562 * If any blocks failed to read, then we need to 1563 1563 * attempt an over-write 1564 1564 */ 1565 - mddev_t *mddev = r1_bio->mddev; 1565 + struct mddev *mddev = r1_bio->mddev; 1566 1566 conf_t *conf = mddev->private; 1567 1567 int primary; 1568 1568 int i; ··· 1635 1635 return 0; 1636 1636 } 1637 1637 1638 - static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) 1638 + static void sync_request_write(struct mddev *mddev, r1bio_t *r1_bio) 1639 1639 { 1640 1640 conf_t *conf = mddev->private; 1641 1641 int i; ··· 1690 1690 static void fix_read_error(conf_t *conf, int read_disk, 1691 1691 sector_t sect, int sectors) 1692 1692 { 1693 - mddev_t *mddev = conf->mddev; 1693 + struct mddev *mddev = conf->mddev; 1694 1694 while(sectors) { 1695 1695 int s = sectors; 1696 1696 int d = read_disk; ··· 1792 1792 1793 1793 static int narrow_write_error(r1bio_t *r1_bio, int i) 1794 1794 { 1795 - mddev_t *mddev = r1_bio->mddev; 1795 + struct mddev *mddev = r1_bio->mddev; 1796 1796 conf_t *conf = mddev->private; 1797 1797 struct md_rdev *rdev = conf->mirrors[i].rdev; 1798 1798 int vcnt, idx; ··· 1922 1922 { 1923 1923 int disk; 1924 1924 int max_sectors; 1925 - mddev_t *mddev = conf->mddev; 1925 + struct mddev *mddev = conf->mddev; 1926 1926 struct bio *bio; 1927 1927 char b[BDEVNAME_SIZE]; 1928 1928 struct md_rdev *rdev; ··· 2008 2008 } 2009 2009 } 2010 2010 2011 - static void raid1d(mddev_t *mddev) 2011 + static void raid1d(struct mddev *mddev) 2012 2012 { 2013 2013 r1bio_t *r1_bio; 2014 2014 unsigned long flags; ··· 2085 2085 * that can be installed to exclude normal IO requests. 2086 2086 */ 2087 2087 2088 - static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 2088 + static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) 2089 2089 { 2090 2090 conf_t *conf = mddev->private; 2091 2091 r1bio_t *r1_bio; ··· 2357 2357 return nr_sectors; 2358 2358 } 2359 2359 2360 - static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks) 2360 + static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) 2361 2361 { 2362 2362 if (sectors) 2363 2363 return sectors; ··· 2365 2365 return mddev->dev_sectors; 2366 2366 } 2367 2367 2368 - static conf_t *setup_conf(mddev_t *mddev) 2368 + static conf_t *setup_conf(struct mddev *mddev) 2369 2369 { 2370 2370 conf_t *conf; 2371 2371 int i; ··· 2466 2466 return ERR_PTR(err); 2467 2467 } 2468 2468 2469 - static int run(mddev_t *mddev) 2469 + static int run(struct mddev *mddev) 2470 2470 { 2471 2471 conf_t *conf; 2472 2472 int i; ··· 2546 2546 return md_integrity_register(mddev); 2547 2547 } 2548 2548 2549 - static int stop(mddev_t *mddev) 2549 + static int stop(struct mddev *mddev) 2550 2550 { 2551 2551 conf_t *conf = mddev->private; 2552 2552 struct bitmap *bitmap = mddev->bitmap; ··· 2573 2573 return 0; 2574 2574 } 2575 2575 2576 - static int raid1_resize(mddev_t *mddev, sector_t sectors) 2576 + static int raid1_resize(struct mddev *mddev, sector_t sectors) 2577 2577 { 2578 2578 /* no resync is happening, and there is enough space 2579 2579 * on all devices, so we can resize. ··· 2597 2597 return 0; 2598 2598 } 2599 2599 2600 - static int raid1_reshape(mddev_t *mddev) 2600 + static int raid1_reshape(struct mddev *mddev) 2601 2601 { 2602 2602 /* We need to: 2603 2603 * 1/ resize the r1bio_pool ··· 2703 2703 return 0; 2704 2704 } 2705 2705 2706 - static void raid1_quiesce(mddev_t *mddev, int state) 2706 + static void raid1_quiesce(struct mddev *mddev, int state) 2707 2707 { 2708 2708 conf_t *conf = mddev->private; 2709 2709 ··· 2720 2720 } 2721 2721 } 2722 2722 2723 - static void *raid1_takeover(mddev_t *mddev) 2723 + static void *raid1_takeover(struct mddev *mddev) 2724 2724 { 2725 2725 /* raid1 can take over: 2726 2726 * raid5 with 2 devices, any layout or chunk size
+4 -4
drivers/md/raid1.h
··· 17 17 */ 18 18 19 19 struct pool_info { 20 - mddev_t *mddev; 20 + struct mddev *mddev; 21 21 int raid_disks; 22 22 }; 23 23 ··· 25 25 typedef struct r1bio_s r1bio_t; 26 26 27 27 struct r1_private_data_s { 28 - mddev_t *mddev; 28 + struct mddev *mddev; 29 29 mirror_info_t *mirrors; 30 30 int raid_disks; 31 31 ··· 114 114 sector_t sector; 115 115 int sectors; 116 116 unsigned long state; 117 - mddev_t *mddev; 117 + struct mddev *mddev; 118 118 /* 119 119 * original bio going to /dev/mdx 120 120 */ ··· 173 173 #define R1BIO_MadeGood 7 174 174 #define R1BIO_WriteError 8 175 175 176 - extern int md_raid1_congested(mddev_t *mddev, int bits); 176 + extern int md_raid1_congested(struct mddev *mddev, int bits); 177 177 178 178 #endif
+27 -27
drivers/md/raid10.c
··· 207 207 static void reschedule_retry(r10bio_t *r10_bio) 208 208 { 209 209 unsigned long flags; 210 - mddev_t *mddev = r10_bio->mddev; 210 + struct mddev *mddev = r10_bio->mddev; 211 211 conf_t *conf = mddev->private; 212 212 213 213 spin_lock_irqsave(&conf->device_lock, flags); ··· 522 522 struct bvec_merge_data *bvm, 523 523 struct bio_vec *biovec) 524 524 { 525 - mddev_t *mddev = q->queuedata; 525 + struct mddev *mddev = q->queuedata; 526 526 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 527 527 int max; 528 528 unsigned int chunk_sectors = mddev->chunk_sectors; ··· 677 677 678 678 static int raid10_congested(void *data, int bits) 679 679 { 680 - mddev_t *mddev = data; 680 + struct mddev *mddev = data; 681 681 conf_t *conf = mddev->private; 682 682 int i, ret = 0; 683 683 ··· 830 830 spin_unlock_irq(&conf->resync_lock); 831 831 } 832 832 833 - static int make_request(mddev_t *mddev, struct bio * bio) 833 + static int make_request(struct mddev *mddev, struct bio * bio) 834 834 { 835 835 conf_t *conf = mddev->private; 836 836 mirror_info_t *mirror; ··· 1161 1161 return 0; 1162 1162 } 1163 1163 1164 - static void status(struct seq_file *seq, mddev_t *mddev) 1164 + static void status(struct seq_file *seq, struct mddev *mddev) 1165 1165 { 1166 1166 conf_t *conf = mddev->private; 1167 1167 int i; ··· 1209 1209 return 1; 1210 1210 } 1211 1211 1212 - static void error(mddev_t *mddev, struct md_rdev *rdev) 1212 + static void error(struct mddev *mddev, struct md_rdev *rdev) 1213 1213 { 1214 1214 char b[BDEVNAME_SIZE]; 1215 1215 conf_t *conf = mddev->private; ··· 1279 1279 conf->r10buf_pool = NULL; 1280 1280 } 1281 1281 1282 - static int raid10_spare_active(mddev_t *mddev) 1282 + static int raid10_spare_active(struct mddev *mddev) 1283 1283 { 1284 1284 int i; 1285 1285 conf_t *conf = mddev->private; ··· 1309 1309 } 1310 1310 1311 1311 1312 - static int raid10_add_disk(mddev_t *mddev, struct md_rdev *rdev) 1312 + static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1313 1313 { 1314 1314 conf_t *conf = mddev->private; 1315 1315 int err = -EEXIST; ··· 1368 1368 return err; 1369 1369 } 1370 1370 1371 - static int raid10_remove_disk(mddev_t *mddev, int number) 1371 + static int raid10_remove_disk(struct mddev *mddev, int number) 1372 1372 { 1373 1373 conf_t *conf = mddev->private; 1374 1374 int err = 0; ··· 1441 1441 1442 1442 static void end_sync_request(r10bio_t *r10_bio) 1443 1443 { 1444 - mddev_t *mddev = r10_bio->mddev; 1444 + struct mddev *mddev = r10_bio->mddev; 1445 1445 1446 1446 while (atomic_dec_and_test(&r10_bio->remaining)) { 1447 1447 if (r10_bio->master_bio == NULL) { ··· 1470 1470 { 1471 1471 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1472 1472 r10bio_t *r10_bio = bio->bi_private; 1473 - mddev_t *mddev = r10_bio->mddev; 1473 + struct mddev *mddev = r10_bio->mddev; 1474 1474 conf_t *conf = mddev->private; 1475 1475 int d; 1476 1476 sector_t first_bad; ··· 1509 1509 * We check if all blocks are in-sync and only write to blocks that 1510 1510 * aren't in sync 1511 1511 */ 1512 - static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) 1512 + static void sync_request_write(struct mddev *mddev, r10bio_t *r10_bio) 1513 1513 { 1514 1514 conf_t *conf = mddev->private; 1515 1515 int i, first; ··· 1618 1618 * If a read fails, record a bad block on both old and 1619 1619 * new devices. 1620 1620 */ 1621 - mddev_t *mddev = r10_bio->mddev; 1621 + struct mddev *mddev = r10_bio->mddev; 1622 1622 conf_t *conf = mddev->private; 1623 1623 struct bio *bio = r10_bio->devs[0].bio; 1624 1624 sector_t sect = 0; ··· 1688 1688 } 1689 1689 } 1690 1690 1691 - static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) 1691 + static void recovery_request_write(struct mddev *mddev, r10bio_t *r10_bio) 1692 1692 { 1693 1693 conf_t *conf = mddev->private; 1694 1694 int d; ··· 1719 1719 * since the last recorded read error. 1720 1720 * 1721 1721 */ 1722 - static void check_decay_read_errors(mddev_t *mddev, struct md_rdev *rdev) 1722 + static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) 1723 1723 { 1724 1724 struct timespec cur_time_mon; 1725 1725 unsigned long hours_since_last; ··· 1778 1778 * 3. Performs writes following reads for array synchronising. 1779 1779 */ 1780 1780 1781 - static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio) 1781 + static void fix_read_error(conf_t *conf, struct mddev *mddev, r10bio_t *r10_bio) 1782 1782 { 1783 1783 int sect = 0; /* Offset from r10_bio->sector */ 1784 1784 int sectors = r10_bio->sectors; ··· 1986 1986 static int narrow_write_error(r10bio_t *r10_bio, int i) 1987 1987 { 1988 1988 struct bio *bio = r10_bio->master_bio; 1989 - mddev_t *mddev = r10_bio->mddev; 1989 + struct mddev *mddev = r10_bio->mddev; 1990 1990 conf_t *conf = mddev->private; 1991 1991 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; 1992 1992 /* bio has the data to be written to slot 'i' where ··· 2040 2040 return ok; 2041 2041 } 2042 2042 2043 - static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) 2043 + static void handle_read_error(struct mddev *mddev, r10bio_t *r10_bio) 2044 2044 { 2045 2045 int slot = r10_bio->read_slot; 2046 2046 int mirror = r10_bio->devs[slot].devnum; ··· 2200 2200 } 2201 2201 } 2202 2202 2203 - static void raid10d(mddev_t *mddev) 2203 + static void raid10d(struct mddev *mddev) 2204 2204 { 2205 2205 r10bio_t *r10_bio; 2206 2206 unsigned long flags; ··· 2297 2297 * 2298 2298 */ 2299 2299 2300 - static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, 2300 + static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, 2301 2301 int *skipped, int go_faster) 2302 2302 { 2303 2303 conf_t *conf = mddev->private; ··· 2714 2714 } 2715 2715 2716 2716 static sector_t 2717 - raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) 2717 + raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) 2718 2718 { 2719 2719 sector_t size; 2720 2720 conf_t *conf = mddev->private; ··· 2733 2733 } 2734 2734 2735 2735 2736 - static conf_t *setup_conf(mddev_t *mddev) 2736 + static conf_t *setup_conf(struct mddev *mddev) 2737 2737 { 2738 2738 conf_t *conf = NULL; 2739 2739 int nc, fc, fo; ··· 2836 2836 return ERR_PTR(err); 2837 2837 } 2838 2838 2839 - static int run(mddev_t *mddev) 2839 + static int run(struct mddev *mddev) 2840 2840 { 2841 2841 conf_t *conf; 2842 2842 int i, disk_idx, chunk_size; ··· 2966 2966 return -EIO; 2967 2967 } 2968 2968 2969 - static int stop(mddev_t *mddev) 2969 + static int stop(struct mddev *mddev) 2970 2970 { 2971 2971 conf_t *conf = mddev->private; 2972 2972 ··· 2983 2983 return 0; 2984 2984 } 2985 2985 2986 - static void raid10_quiesce(mddev_t *mddev, int state) 2986 + static void raid10_quiesce(struct mddev *mddev, int state) 2987 2987 { 2988 2988 conf_t *conf = mddev->private; 2989 2989 ··· 2997 2997 } 2998 2998 } 2999 2999 3000 - static void *raid10_takeover_raid0(mddev_t *mddev) 3000 + static void *raid10_takeover_raid0(struct mddev *mddev) 3001 3001 { 3002 3002 struct md_rdev *rdev; 3003 3003 conf_t *conf; ··· 3029 3029 return conf; 3030 3030 } 3031 3031 3032 - static void *raid10_takeover(mddev_t *mddev) 3032 + static void *raid10_takeover(struct mddev *mddev) 3033 3033 { 3034 3034 struct raid0_private_data *raid0_priv; 3035 3035
+2 -2
drivers/md/raid10.h
··· 16 16 typedef struct r10bio_s r10bio_t; 17 17 18 18 struct r10_private_data_s { 19 - mddev_t *mddev; 19 + struct mddev *mddev; 20 20 mirror_info_t *mirrors; 21 21 int raid_disks; 22 22 spinlock_t device_lock; ··· 87 87 sector_t sector; /* virtual sector number */ 88 88 int sectors; 89 89 unsigned long state; 90 - mddev_t *mddev; 90 + struct mddev *mddev; 91 91 /* 92 92 * original bio going to /dev/mdx 93 93 */
+40 -40
drivers/md/raid5.c
··· 1719 1719 dev->sector = compute_blocknr(sh, i, previous); 1720 1720 } 1721 1721 1722 - static void error(mddev_t *mddev, struct md_rdev *rdev) 1722 + static void error(struct mddev *mddev, struct md_rdev *rdev) 1723 1723 { 1724 1724 char b[BDEVNAME_SIZE]; 1725 1725 raid5_conf_t *conf = mddev->private; ··· 3403 3403 } 3404 3404 } 3405 3405 3406 - int md_raid5_congested(mddev_t *mddev, int bits) 3406 + int md_raid5_congested(struct mddev *mddev, int bits) 3407 3407 { 3408 3408 raid5_conf_t *conf = mddev->private; 3409 3409 ··· 3424 3424 3425 3425 static int raid5_congested(void *data, int bits) 3426 3426 { 3427 - mddev_t *mddev = data; 3427 + struct mddev *mddev = data; 3428 3428 3429 3429 return mddev_congested(mddev, bits) || 3430 3430 md_raid5_congested(mddev, bits); ··· 3437 3437 struct bvec_merge_data *bvm, 3438 3438 struct bio_vec *biovec) 3439 3439 { 3440 - mddev_t *mddev = q->queuedata; 3440 + struct mddev *mddev = q->queuedata; 3441 3441 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3442 3442 int max; 3443 3443 unsigned int chunk_sectors = mddev->chunk_sectors; ··· 3457 3457 } 3458 3458 3459 3459 3460 - static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3460 + static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 3461 3461 { 3462 3462 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3463 3463 unsigned int chunk_sectors = mddev->chunk_sectors; ··· 3520 3520 static void raid5_align_endio(struct bio *bi, int error) 3521 3521 { 3522 3522 struct bio* raid_bi = bi->bi_private; 3523 - mddev_t *mddev; 3523 + struct mddev *mddev; 3524 3524 raid5_conf_t *conf; 3525 3525 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3526 3526 struct md_rdev *rdev; ··· 3567 3567 } 3568 3568 3569 3569 3570 - static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio) 3570 + static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) 3571 3571 { 3572 3572 raid5_conf_t *conf = mddev->private; 3573 3573 int dd_idx; ··· 3688 3688 return sh; 3689 3689 } 3690 3690 3691 - static int make_request(mddev_t *mddev, struct bio * bi) 3691 + static int make_request(struct mddev *mddev, struct bio * bi) 3692 3692 { 3693 3693 raid5_conf_t *conf = mddev->private; 3694 3694 int dd_idx; ··· 3848 3848 return 0; 3849 3849 } 3850 3850 3851 - static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); 3851 + static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 3852 3852 3853 - static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 3853 + static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 3854 3854 { 3855 3855 /* reshaping is quite different to recovery/resync so it is 3856 3856 * handled quite separately ... here. ··· 4068 4068 } 4069 4069 4070 4070 /* FIXME go_faster isn't used */ 4071 - static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 4071 + static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) 4072 4072 { 4073 4073 raid5_conf_t *conf = mddev->private; 4074 4074 struct stripe_head *sh; ··· 4227 4227 * During the scan, completed stripes are saved for us by the interrupt 4228 4228 * handler, so that they will not have to wait for our next wakeup. 4229 4229 */ 4230 - static void raid5d(mddev_t *mddev) 4230 + static void raid5d(struct mddev *mddev) 4231 4231 { 4232 4232 struct stripe_head *sh; 4233 4233 raid5_conf_t *conf = mddev->private; ··· 4294 4294 } 4295 4295 4296 4296 static ssize_t 4297 - raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 4297 + raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 4298 4298 { 4299 4299 raid5_conf_t *conf = mddev->private; 4300 4300 if (conf) ··· 4304 4304 } 4305 4305 4306 4306 int 4307 - raid5_set_cache_size(mddev_t *mddev, int size) 4307 + raid5_set_cache_size(struct mddev *mddev, int size) 4308 4308 { 4309 4309 raid5_conf_t *conf = mddev->private; 4310 4310 int err; ··· 4330 4330 EXPORT_SYMBOL(raid5_set_cache_size); 4331 4331 4332 4332 static ssize_t 4333 - raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 4333 + raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 4334 4334 { 4335 4335 raid5_conf_t *conf = mddev->private; 4336 4336 unsigned long new; ··· 4355 4355 raid5_store_stripe_cache_size); 4356 4356 4357 4357 static ssize_t 4358 - raid5_show_preread_threshold(mddev_t *mddev, char *page) 4358 + raid5_show_preread_threshold(struct mddev *mddev, char *page) 4359 4359 { 4360 4360 raid5_conf_t *conf = mddev->private; 4361 4361 if (conf) ··· 4365 4365 } 4366 4366 4367 4367 static ssize_t 4368 - raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) 4368 + raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 4369 4369 { 4370 4370 raid5_conf_t *conf = mddev->private; 4371 4371 unsigned long new; ··· 4389 4389 raid5_store_preread_threshold); 4390 4390 4391 4391 static ssize_t 4392 - stripe_cache_active_show(mddev_t *mddev, char *page) 4392 + stripe_cache_active_show(struct mddev *mddev, char *page) 4393 4393 { 4394 4394 raid5_conf_t *conf = mddev->private; 4395 4395 if (conf) ··· 4413 4413 }; 4414 4414 4415 4415 static sector_t 4416 - raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) 4416 + raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 4417 4417 { 4418 4418 raid5_conf_t *conf = mddev->private; 4419 4419 ··· 4540 4540 return err; 4541 4541 } 4542 4542 4543 - static raid5_conf_t *setup_conf(mddev_t *mddev) 4543 + static raid5_conf_t *setup_conf(struct mddev *mddev) 4544 4544 { 4545 4545 raid5_conf_t *conf; 4546 4546 int raid_disk, memory, max_disks; ··· 4705 4705 return 0; 4706 4706 } 4707 4707 4708 - static int run(mddev_t *mddev) 4708 + static int run(struct mddev *mddev) 4709 4709 { 4710 4710 raid5_conf_t *conf; 4711 4711 int working_disks = 0; ··· 4942 4942 return -EIO; 4943 4943 } 4944 4944 4945 - static int stop(mddev_t *mddev) 4945 + static int stop(struct mddev *mddev) 4946 4946 { 4947 4947 raid5_conf_t *conf = mddev->private; 4948 4948 ··· 4955 4955 return 0; 4956 4956 } 4957 4957 4958 - static void status(struct seq_file *seq, mddev_t *mddev) 4958 + static void status(struct seq_file *seq, struct mddev *mddev) 4959 4959 { 4960 4960 raid5_conf_t *conf = mddev->private; 4961 4961 int i; ··· 4994 4994 } 4995 4995 } 4996 4996 4997 - static int raid5_spare_active(mddev_t *mddev) 4997 + static int raid5_spare_active(struct mddev *mddev) 4998 4998 { 4999 4999 int i; 5000 5000 raid5_conf_t *conf = mddev->private; ··· 5019 5019 return count; 5020 5020 } 5021 5021 5022 - static int raid5_remove_disk(mddev_t *mddev, int number) 5022 + static int raid5_remove_disk(struct mddev *mddev, int number) 5023 5023 { 5024 5024 raid5_conf_t *conf = mddev->private; 5025 5025 int err = 0; ··· 5062 5062 return err; 5063 5063 } 5064 5064 5065 - static int raid5_add_disk(mddev_t *mddev, struct md_rdev *rdev) 5065 + static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 5066 5066 { 5067 5067 raid5_conf_t *conf = mddev->private; 5068 5068 int err = -EEXIST; ··· 5105 5105 return err; 5106 5106 } 5107 5107 5108 - static int raid5_resize(mddev_t *mddev, sector_t sectors) 5108 + static int raid5_resize(struct mddev *mddev, sector_t sectors) 5109 5109 { 5110 5110 /* no resync is happening, and there is enough space 5111 5111 * on all devices, so we can resize. ··· 5132 5132 return 0; 5133 5133 } 5134 5134 5135 - static int check_stripe_cache(mddev_t *mddev) 5135 + static int check_stripe_cache(struct mddev *mddev) 5136 5136 { 5137 5137 /* Can only proceed if there are plenty of stripe_heads. 5138 5138 * We need a minimum of one full stripe,, and for sensible progress ··· 5156 5156 return 1; 5157 5157 } 5158 5158 5159 - static int check_reshape(mddev_t *mddev) 5159 + static int check_reshape(struct mddev *mddev) 5160 5160 { 5161 5161 raid5_conf_t *conf = mddev->private; 5162 5162 ··· 5188 5188 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 5189 5189 } 5190 5190 5191 - static int raid5_start_reshape(mddev_t *mddev) 5191 + static int raid5_start_reshape(struct mddev *mddev) 5192 5192 { 5193 5193 raid5_conf_t *conf = mddev->private; 5194 5194 struct md_rdev *rdev; ··· 5332 5332 /* This is called from the raid5d thread with mddev_lock held. 5333 5333 * It makes config changes to the device. 5334 5334 */ 5335 - static void raid5_finish_reshape(mddev_t *mddev) 5335 + static void raid5_finish_reshape(struct mddev *mddev) 5336 5336 { 5337 5337 raid5_conf_t *conf = mddev->private; 5338 5338 ··· 5367 5367 } 5368 5368 } 5369 5369 5370 - static void raid5_quiesce(mddev_t *mddev, int state) 5370 + static void raid5_quiesce(struct mddev *mddev, int state) 5371 5371 { 5372 5372 raid5_conf_t *conf = mddev->private; 5373 5373 ··· 5403 5403 } 5404 5404 5405 5405 5406 - static void *raid45_takeover_raid0(mddev_t *mddev, int level) 5406 + static void *raid45_takeover_raid0(struct mddev *mddev, int level) 5407 5407 { 5408 5408 struct raid0_private_data *raid0_priv = mddev->private; 5409 5409 sector_t sectors; ··· 5430 5430 } 5431 5431 5432 5432 5433 - static void *raid5_takeover_raid1(mddev_t *mddev) 5433 + static void *raid5_takeover_raid1(struct mddev *mddev) 5434 5434 { 5435 5435 int chunksect; 5436 5436 ··· 5457 5457 return setup_conf(mddev); 5458 5458 } 5459 5459 5460 - static void *raid5_takeover_raid6(mddev_t *mddev) 5460 + static void *raid5_takeover_raid6(struct mddev *mddev) 5461 5461 { 5462 5462 int new_layout; 5463 5463 ··· 5491 5491 } 5492 5492 5493 5493 5494 - static int raid5_check_reshape(mddev_t *mddev) 5494 + static int raid5_check_reshape(struct mddev *mddev) 5495 5495 { 5496 5496 /* For a 2-drive array, the layout and chunk size can be changed 5497 5497 * immediately as not restriping is needed. ··· 5531 5531 return check_reshape(mddev); 5532 5532 } 5533 5533 5534 - static int raid6_check_reshape(mddev_t *mddev) 5534 + static int raid6_check_reshape(struct mddev *mddev) 5535 5535 { 5536 5536 int new_chunk = mddev->new_chunk_sectors; 5537 5537 ··· 5551 5551 return check_reshape(mddev); 5552 5552 } 5553 5553 5554 - static void *raid5_takeover(mddev_t *mddev) 5554 + static void *raid5_takeover(struct mddev *mddev) 5555 5555 { 5556 5556 /* raid5 can take over: 5557 5557 * raid0 - if there is only one strip zone - make it a raid4 layout ··· 5574 5574 return ERR_PTR(-EINVAL); 5575 5575 } 5576 5576 5577 - static void *raid4_takeover(mddev_t *mddev) 5577 + static void *raid4_takeover(struct mddev *mddev) 5578 5578 { 5579 5579 /* raid4 can take over: 5580 5580 * raid0 - if there is only one strip zone ··· 5593 5593 5594 5594 static struct mdk_personality raid5_personality; 5595 5595 5596 - static void *raid6_takeover(mddev_t *mddev) 5596 + static void *raid6_takeover(struct mddev *mddev) 5597 5597 { 5598 5598 /* Currently can only take over a raid5. We map the 5599 5599 * personality to an equivalent raid6 personality
+3 -3
drivers/md/raid5.h
··· 349 349 350 350 struct raid5_private_data { 351 351 struct hlist_head *stripe_hashtbl; 352 - mddev_t *mddev; 352 + struct mddev *mddev; 353 353 struct disk_info *spare; 354 354 int chunk_sectors; 355 355 int level, algorithm; ··· 503 503 return layout >= 8 && layout <= 10; 504 504 } 505 505 506 - extern int md_raid5_congested(mddev_t *mddev, int bits); 506 + extern int md_raid5_congested(struct mddev *mddev, int bits); 507 507 extern void md_raid5_kick_device(raid5_conf_t *conf); 508 - extern int raid5_set_cache_size(mddev_t *mddev, int size); 508 + extern int raid5_set_cache_size(struct mddev *mddev, int size); 509 509 #endif