Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drbd: Rename struct drbd_conf -> struct drbd_device

sed -i -e 's:\<drbd_conf\>:drbd_device:g'

Signed-off-by: Andreas Gruenbacher <agruen@linbit.com>
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>

authored by

Andreas Gruenbacher and committed by
Philipp Reisner
54761697 a3603a6e

+562 -562
+34 -34
drivers/block/drbd/drbd_actlog.c
··· 105 105 }; 106 106 107 107 108 - void *drbd_md_get_buffer(struct drbd_conf *mdev) 108 + void *drbd_md_get_buffer(struct drbd_device *mdev) 109 109 { 110 110 int r; 111 111 ··· 116 116 return r ? NULL : page_address(mdev->md_io_page); 117 117 } 118 118 119 - void drbd_md_put_buffer(struct drbd_conf *mdev) 119 + void drbd_md_put_buffer(struct drbd_device *mdev) 120 120 { 121 121 if (atomic_dec_and_test(&mdev->md_io_in_use)) 122 122 wake_up(&mdev->misc_wait); 123 123 } 124 124 125 - void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, 125 + void wait_until_done_or_force_detached(struct drbd_device *mdev, struct drbd_backing_dev *bdev, 126 126 unsigned int *done) 127 127 { 128 128 long dt; ··· 142 142 } 143 143 } 144 144 145 - static int _drbd_md_sync_page_io(struct drbd_conf *mdev, 145 + static int _drbd_md_sync_page_io(struct drbd_device *mdev, 146 146 struct drbd_backing_dev *bdev, 147 147 struct page *page, sector_t sector, 148 148 int rw, int size) ··· 192 192 return err; 193 193 } 194 194 195 - int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, 195 + int drbd_md_sync_page_io(struct drbd_device *mdev, struct drbd_backing_dev *bdev, 196 196 sector_t sector, int rw) 197 197 { 198 198 int err; ··· 222 222 return err; 223 223 } 224 224 225 - static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsigned int enr) 225 + static struct bm_extent *find_active_resync_extent(struct drbd_device *mdev, unsigned int enr) 226 226 { 227 227 struct lc_element *tmp; 228 228 tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); ··· 234 234 return NULL; 235 235 } 236 236 237 - static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr, bool nonblock) 237 + static struct lc_element *_al_get(struct drbd_device *mdev, unsigned int enr, bool nonblock) 238 238 { 239 239 struct lc_element *al_ext; 240 240 struct bm_extent *bm_ext; ··· 257 257 return al_ext; 258 258 } 259 259 260 - bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i) 260 + bool drbd_al_begin_io_fastpath(struct drbd_device *mdev, struct drbd_interval *i) 261 261 { 262 262 /* for bios crossing activity log extent boundaries, 263 263 * we may need to activate two extents in one go */ ··· 275 275 } 276 276 277 277 static 278 - bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i) 278 + bool drbd_al_begin_io_prepare(struct drbd_device *mdev, struct drbd_interval *i) 279 279 { 280 280 /* for bios crossing activity log extent boundaries, 281 281 * we may need to activate two extents in one go */ ··· 297 297 return need_transaction; 298 298 } 299 299 300 - static int al_write_transaction(struct drbd_conf *mdev, bool delegate); 300 + static int al_write_transaction(struct drbd_device *mdev, bool delegate); 301 301 302 302 /* When called through generic_make_request(), we must delegate 303 303 * activity log I/O to the worker thread: a further request ··· 311 311 /* 312 312 * @delegate: delegate activity log I/O to the worker thread 313 313 */ 314 - void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate) 314 + void drbd_al_begin_io_commit(struct drbd_device *mdev, bool delegate) 315 315 { 316 316 bool locked = false; 317 317 ··· 352 352 /* 353 353 * @delegate: delegate activity log I/O to the worker thread 354 354 */ 355 - void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate) 355 + void drbd_al_begin_io(struct drbd_device *mdev, struct drbd_interval *i, bool delegate) 356 356 { 357 357 BUG_ON(delegate && current == mdev->tconn->worker.task); 358 358 ··· 360 360 drbd_al_begin_io_commit(mdev, delegate); 361 361 } 362 362 363 - int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i) 363 + int drbd_al_begin_io_nonblock(struct drbd_device *mdev, struct drbd_interval *i) 364 364 { 365 365 struct lru_cache *al = mdev->act_log; 366 366 /* for bios crossing activity log extent boundaries, ··· 409 409 return 0; 410 410 } 411 411 412 - void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i) 412 + void drbd_al_complete_io(struct drbd_device *mdev, struct drbd_interval *i) 413 413 { 414 414 /* for bios crossing activity log extent boundaries, 415 415 * we may need to activate two extents in one go */ ··· 461 461 (BM_EXT_SHIFT - BM_BLOCK_SHIFT)); 462 462 } 463 463 464 - static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev) 464 + static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *mdev) 465 465 { 466 466 const unsigned int stripes = mdev->ldev->md.al_stripes; 467 467 const unsigned int stripe_size_4kB = mdev->ldev->md.al_stripe_size_4k; ··· 480 480 } 481 481 482 482 static int 483 - _al_write_transaction(struct drbd_conf *mdev) 483 + _al_write_transaction(struct drbd_device *mdev) 484 484 { 485 485 struct al_transaction_on_disk *buffer; 486 486 struct lc_element *e; ··· 594 594 static int w_al_write_transaction(struct drbd_work *w, int unused) 595 595 { 596 596 struct update_al_work *aw = container_of(w, struct update_al_work, w); 597 - struct drbd_conf *mdev = w->mdev; 597 + struct drbd_device *mdev = w->mdev; 598 598 int err; 599 599 600 600 err = _al_write_transaction(mdev); ··· 607 607 /* Calls from worker context (see w_restart_disk_io()) need to write the 608 608 transaction directly. Others came through generic_make_request(), 609 609 those need to delegate it to the worker. */ 610 - static int al_write_transaction(struct drbd_conf *mdev, bool delegate) 610 + static int al_write_transaction(struct drbd_device *mdev, bool delegate) 611 611 { 612 612 if (delegate) { 613 613 struct update_al_work al_work; ··· 621 621 return _al_write_transaction(mdev); 622 622 } 623 623 624 - static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext) 624 + static int _try_lc_del(struct drbd_device *mdev, struct lc_element *al_ext) 625 625 { 626 626 int rv; 627 627 ··· 643 643 * 644 644 * You need to lock mdev->act_log with lc_try_lock() / lc_unlock() 645 645 */ 646 - void drbd_al_shrink(struct drbd_conf *mdev) 646 + void drbd_al_shrink(struct drbd_device *mdev) 647 647 { 648 648 struct lc_element *al_ext; 649 649 int i; ··· 660 660 wake_up(&mdev->al_wait); 661 661 } 662 662 663 - int drbd_initialize_al(struct drbd_conf *mdev, void *buffer) 663 + int drbd_initialize_al(struct drbd_device *mdev, void *buffer) 664 664 { 665 665 struct al_transaction_on_disk *al = buffer; 666 666 struct drbd_md *md = &mdev->ldev->md; ··· 684 684 static int w_update_odbm(struct drbd_work *w, int unused) 685 685 { 686 686 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); 687 - struct drbd_conf *mdev = w->mdev; 687 + struct drbd_device *mdev = w->mdev; 688 688 struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, }; 689 689 690 690 if (!get_ldev(mdev)) { ··· 721 721 * 722 722 * TODO will be obsoleted once we have a caching lru of the on disk bitmap 723 723 */ 724 - static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, 724 + static void drbd_try_clear_on_disk_bm(struct drbd_device *mdev, sector_t sector, 725 725 int count, int success) 726 726 { 727 727 struct lc_element *e; ··· 809 809 } 810 810 } 811 811 812 - void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go) 812 + void drbd_advance_rs_marks(struct drbd_device *mdev, unsigned long still_to_go) 813 813 { 814 814 unsigned long now = jiffies; 815 815 unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; ··· 832 832 * called by worker on C_SYNC_TARGET and receiver on SyncSource. 833 833 * 834 834 */ 835 - void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, 835 + void __drbd_set_in_sync(struct drbd_device *mdev, sector_t sector, int size, 836 836 const char *file, const unsigned int line) 837 837 { 838 838 /* Is called from worker and receiver context _only_ */ ··· 904 904 * called by tl_clear and drbd_send_dblock (==drbd_make_request). 905 905 * so this can be _any_ process. 906 906 */ 907 - int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, 907 + int __drbd_set_out_of_sync(struct drbd_device *mdev, sector_t sector, int size, 908 908 const char *file, const unsigned int line) 909 909 { 910 910 unsigned long sbnr, ebnr, flags; ··· 956 956 } 957 957 958 958 static 959 - struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr) 959 + struct bm_extent *_bme_get(struct drbd_device *mdev, unsigned int enr) 960 960 { 961 961 struct lc_element *e; 962 962 struct bm_extent *bm_ext; ··· 996 996 return bm_ext; 997 997 } 998 998 999 - static int _is_in_al(struct drbd_conf *mdev, unsigned int enr) 999 + static int _is_in_al(struct drbd_device *mdev, unsigned int enr) 1000 1000 { 1001 1001 int rv; 1002 1002 ··· 1014 1014 * 1015 1015 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted. 1016 1016 */ 1017 - int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) 1017 + int drbd_rs_begin_io(struct drbd_device *mdev, sector_t sector) 1018 1018 { 1019 1019 unsigned int enr = BM_SECT_TO_EXT(sector); 1020 1020 struct bm_extent *bm_ext; ··· 1067 1067 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN 1068 1068 * if there is still application IO going on in this area. 1069 1069 */ 1070 - int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector) 1070 + int drbd_try_rs_begin_io(struct drbd_device *mdev, sector_t sector) 1071 1071 { 1072 1072 unsigned int enr = BM_SECT_TO_EXT(sector); 1073 1073 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT; ··· 1166 1166 return -EAGAIN; 1167 1167 } 1168 1168 1169 - void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector) 1169 + void drbd_rs_complete_io(struct drbd_device *mdev, sector_t sector) 1170 1170 { 1171 1171 unsigned int enr = BM_SECT_TO_EXT(sector); 1172 1172 struct lc_element *e; ··· 1204 1204 * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED) 1205 1205 * @mdev: DRBD device. 1206 1206 */ 1207 - void drbd_rs_cancel_all(struct drbd_conf *mdev) 1207 + void drbd_rs_cancel_all(struct drbd_device *mdev) 1208 1208 { 1209 1209 spin_lock_irq(&mdev->al_lock); 1210 1210 ··· 1225 1225 * Returns 0 upon success, -EAGAIN if at least one reference count was 1226 1226 * not zero. 1227 1227 */ 1228 - int drbd_rs_del_all(struct drbd_conf *mdev) 1228 + int drbd_rs_del_all(struct drbd_device *mdev) 1229 1229 { 1230 1230 struct lc_element *e; 1231 1231 struct bm_extent *bm_ext; ··· 1276 1276 * @sector: The sector number. 1277 1277 * @size: Size of failed IO operation, in byte. 1278 1278 */ 1279 - void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) 1279 + void drbd_rs_failed_io(struct drbd_device *mdev, sector_t sector, int size) 1280 1280 { 1281 1281 /* Is called from worker and receiver context _only_ */ 1282 1282 unsigned long sbnr, ebnr, lbnr;
+43 -43
drivers/block/drbd/drbd_bitmap.c
··· 113 113 }; 114 114 115 115 #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) 116 - static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) 116 + static void __bm_print_lock_info(struct drbd_device *mdev, const char *func) 117 117 { 118 118 struct drbd_bitmap *b = mdev->bitmap; 119 119 if (!__ratelimit(&drbd_ratelimit_state)) ··· 124 124 drbd_task_to_thread_name(mdev->tconn, b->bm_task)); 125 125 } 126 126 127 - void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags) 127 + void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags) 128 128 { 129 129 struct drbd_bitmap *b = mdev->bitmap; 130 130 int trylock_failed; ··· 151 151 b->bm_task = current; 152 152 } 153 153 154 - void drbd_bm_unlock(struct drbd_conf *mdev) 154 + void drbd_bm_unlock(struct drbd_device *mdev) 155 155 { 156 156 struct drbd_bitmap *b = mdev->bitmap; 157 157 if (!b) { ··· 211 211 /* As is very unlikely that the same page is under IO from more than one 212 212 * context, we can get away with a bit per page and one wait queue per bitmap. 213 213 */ 214 - static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr) 214 + static void bm_page_lock_io(struct drbd_device *mdev, int page_nr) 215 215 { 216 216 struct drbd_bitmap *b = mdev->bitmap; 217 217 void *addr = &page_private(b->bm_pages[page_nr]); 218 218 wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); 219 219 } 220 220 221 - static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr) 221 + static void bm_page_unlock_io(struct drbd_device *mdev, int page_nr) 222 222 { 223 223 struct drbd_bitmap *b = mdev->bitmap; 224 224 void *addr = &page_private(b->bm_pages[page_nr]); ··· 249 249 * hints, then call drbd_bm_write_hinted(), which will only write out changed 250 250 * pages which are flagged with this mark. 251 251 */ 252 - void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr) 252 + void drbd_bm_mark_for_writeout(struct drbd_device *mdev, int page_nr) 253 253 { 254 254 struct page *page; 255 255 if (page_nr >= mdev->bitmap->bm_number_of_pages) { ··· 340 340 341 341 /* 342 342 * actually most functions herein should take a struct drbd_bitmap*, not a 343 - * struct drbd_conf*, but for the debug macros I like to have the mdev around 343 + * struct drbd_device*, but for the debug macros I like to have the mdev around 344 344 * to be able to report device specific. 345 345 */ 346 346 ··· 438 438 * called on driver init only. TODO call when a device is created. 439 439 * allocates the drbd_bitmap, and stores it in mdev->bitmap. 440 440 */ 441 - int drbd_bm_init(struct drbd_conf *mdev) 441 + int drbd_bm_init(struct drbd_device *mdev) 442 442 { 443 443 struct drbd_bitmap *b = mdev->bitmap; 444 444 WARN_ON(b != NULL); ··· 454 454 return 0; 455 455 } 456 456 457 - sector_t drbd_bm_capacity(struct drbd_conf *mdev) 457 + sector_t drbd_bm_capacity(struct drbd_device *mdev) 458 458 { 459 459 if (!expect(mdev->bitmap)) 460 460 return 0; ··· 463 463 464 464 /* called on driver unload. TODO: call when a device is destroyed. 465 465 */ 466 - void drbd_bm_cleanup(struct drbd_conf *mdev) 466 + void drbd_bm_cleanup(struct drbd_device *mdev) 467 467 { 468 468 if (!expect(mdev->bitmap)) 469 469 return; ··· 631 631 * In case this is actually a resize, we copy the old bitmap into the new one. 632 632 * Otherwise, the bitmap is initialized to all bits set. 633 633 */ 634 - int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) 634 + int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits) 635 635 { 636 636 struct drbd_bitmap *b = mdev->bitmap; 637 637 unsigned long bits, words, owords, obits; ··· 757 757 * 758 758 * maybe bm_set should be atomic_t ? 759 759 */ 760 - unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev) 760 + unsigned long _drbd_bm_total_weight(struct drbd_device *mdev) 761 761 { 762 762 struct drbd_bitmap *b = mdev->bitmap; 763 763 unsigned long s; ··· 775 775 return s; 776 776 } 777 777 778 - unsigned long drbd_bm_total_weight(struct drbd_conf *mdev) 778 + unsigned long drbd_bm_total_weight(struct drbd_device *mdev) 779 779 { 780 780 unsigned long s; 781 781 /* if I don't have a disk, I don't know about out-of-sync status */ ··· 786 786 return s; 787 787 } 788 788 789 - size_t drbd_bm_words(struct drbd_conf *mdev) 789 + size_t drbd_bm_words(struct drbd_device *mdev) 790 790 { 791 791 struct drbd_bitmap *b = mdev->bitmap; 792 792 if (!expect(b)) ··· 797 797 return b->bm_words; 798 798 } 799 799 800 - unsigned long drbd_bm_bits(struct drbd_conf *mdev) 800 + unsigned long drbd_bm_bits(struct drbd_device *mdev) 801 801 { 802 802 struct drbd_bitmap *b = mdev->bitmap; 803 803 if (!expect(b)) ··· 811 811 * bitmap must be locked by drbd_bm_lock. 812 812 * currently only used from receive_bitmap. 813 813 */ 814 - void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, 814 + void drbd_bm_merge_lel(struct drbd_device *mdev, size_t offset, size_t number, 815 815 unsigned long *buffer) 816 816 { 817 817 struct drbd_bitmap *b = mdev->bitmap; ··· 860 860 /* copy number words from the bitmap starting at offset into the buffer. 861 861 * buffer[i] will be little endian unsigned long. 862 862 */ 863 - void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, 863 + void drbd_bm_get_lel(struct drbd_device *mdev, size_t offset, size_t number, 864 864 unsigned long *buffer) 865 865 { 866 866 struct drbd_bitmap *b = mdev->bitmap; ··· 897 897 } 898 898 899 899 /* set all bits in the bitmap */ 900 - void drbd_bm_set_all(struct drbd_conf *mdev) 900 + void drbd_bm_set_all(struct drbd_device *mdev) 901 901 { 902 902 struct drbd_bitmap *b = mdev->bitmap; 903 903 if (!expect(b)) ··· 913 913 } 914 914 915 915 /* clear all bits in the bitmap */ 916 - void drbd_bm_clear_all(struct drbd_conf *mdev) 916 + void drbd_bm_clear_all(struct drbd_device *mdev) 917 917 { 918 918 struct drbd_bitmap *b = mdev->bitmap; 919 919 if (!expect(b)) ··· 928 928 } 929 929 930 930 struct bm_aio_ctx { 931 - struct drbd_conf *mdev; 931 + struct drbd_device *mdev; 932 932 atomic_t in_flight; 933 933 unsigned int done; 934 934 unsigned flags; ··· 951 951 static void bm_async_io_complete(struct bio *bio, int error) 952 952 { 953 953 struct bm_aio_ctx *ctx = bio->bi_private; 954 - struct drbd_conf *mdev = ctx->mdev; 954 + struct drbd_device *mdev = ctx->mdev; 955 955 struct drbd_bitmap *b = mdev->bitmap; 956 956 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); 957 957 int uptodate = bio_flagged(bio, BIO_UPTODATE); ··· 1000 1000 static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) 1001 1001 { 1002 1002 struct bio *bio = bio_alloc_drbd(GFP_NOIO); 1003 - struct drbd_conf *mdev = ctx->mdev; 1003 + struct drbd_device *mdev = ctx->mdev; 1004 1004 struct drbd_bitmap *b = mdev->bitmap; 1005 1005 struct page *page; 1006 1006 unsigned int len; ··· 1049 1049 /* 1050 1050 * bm_rw: read/write the whole bitmap from/to its on disk location. 1051 1051 */ 1052 - static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local) 1052 + static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local) 1053 1053 { 1054 1054 struct bm_aio_ctx *ctx; 1055 1055 struct drbd_bitmap *b = mdev->bitmap; ··· 1173 1173 * drbd_bm_read() - Read the whole bitmap from its on disk location. 1174 1174 * @mdev: DRBD device. 1175 1175 */ 1176 - int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) 1176 + int drbd_bm_read(struct drbd_device *mdev) __must_hold(local) 1177 1177 { 1178 1178 return bm_rw(mdev, READ, 0, 0); 1179 1179 } ··· 1184 1184 * 1185 1185 * Will only write pages that have changed since last IO. 1186 1186 */ 1187 - int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) 1187 + int drbd_bm_write(struct drbd_device *mdev) __must_hold(local) 1188 1188 { 1189 1189 return bm_rw(mdev, WRITE, 0, 0); 1190 1190 } ··· 1195 1195 * 1196 1196 * Will write all pages. 1197 1197 */ 1198 - int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local) 1198 + int drbd_bm_write_all(struct drbd_device *mdev) __must_hold(local) 1199 1199 { 1200 1200 return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0); 1201 1201 } ··· 1211 1211 * verify is aborted due to a failed peer disk, while local IO continues, or 1212 1212 * pending resync acks are still being processed. 1213 1213 */ 1214 - int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local) 1214 + int drbd_bm_write_copy_pages(struct drbd_device *mdev) __must_hold(local) 1215 1215 { 1216 1216 return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0); 1217 1217 } ··· 1220 1220 * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed. 1221 1221 * @mdev: DRBD device. 1222 1222 */ 1223 - int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local) 1223 + int drbd_bm_write_hinted(struct drbd_device *mdev) __must_hold(local) 1224 1224 { 1225 1225 return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0); 1226 1226 } ··· 1237 1237 * In case this becomes an issue on systems with larger PAGE_SIZE, 1238 1238 * we may want to change this again to write 4k aligned 4k pieces. 1239 1239 */ 1240 - int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) 1240 + int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(local) 1241 1241 { 1242 1242 struct bm_aio_ctx *ctx; 1243 1243 int err; ··· 1288 1288 * 1289 1289 * this returns a bit number, NOT a sector! 1290 1290 */ 1291 - static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, 1291 + static unsigned long __bm_find_next(struct drbd_device *mdev, unsigned long bm_fo, 1292 1292 const int find_zero_bit) 1293 1293 { 1294 1294 struct drbd_bitmap *b = mdev->bitmap; ··· 1328 1328 return bm_fo; 1329 1329 } 1330 1330 1331 - static unsigned long bm_find_next(struct drbd_conf *mdev, 1331 + static unsigned long bm_find_next(struct drbd_device *mdev, 1332 1332 unsigned long bm_fo, const int find_zero_bit) 1333 1333 { 1334 1334 struct drbd_bitmap *b = mdev->bitmap; ··· 1349 1349 return i; 1350 1350 } 1351 1351 1352 - unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1352 + unsigned long drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo) 1353 1353 { 1354 1354 return bm_find_next(mdev, bm_fo, 0); 1355 1355 } 1356 1356 1357 1357 #if 0 1358 1358 /* not yet needed for anything. */ 1359 - unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1359 + unsigned long drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo) 1360 1360 { 1361 1361 return bm_find_next(mdev, bm_fo, 1); 1362 1362 } ··· 1364 1364 1365 1365 /* does not spin_lock_irqsave. 1366 1366 * you must take drbd_bm_lock() first */ 1367 - unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1367 + unsigned long _drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo) 1368 1368 { 1369 1369 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1370 1370 return __bm_find_next(mdev, bm_fo, 0); 1371 1371 } 1372 1372 1373 - unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1373 + unsigned long _drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo) 1374 1374 { 1375 1375 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1376 1376 return __bm_find_next(mdev, bm_fo, 1); ··· 1382 1382 * wants bitnr, not sector. 1383 1383 * expected to be called for only a few bits (e - s about BITS_PER_LONG). 1384 1384 * Must hold bitmap lock already. */ 1385 - static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1385 + static int __bm_change_bits_to(struct drbd_device *mdev, const unsigned long s, 1386 1386 unsigned long e, int val) 1387 1387 { 1388 1388 struct drbd_bitmap *b = mdev->bitmap; ··· 1431 1431 * for val != 0, we change 0 -> 1, return code positive 1432 1432 * for val == 0, we change 1 -> 0, return code negative 1433 1433 * wants bitnr, not sector */ 1434 - static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1434 + static int bm_change_bits_to(struct drbd_device *mdev, const unsigned long s, 1435 1435 const unsigned long e, int val) 1436 1436 { 1437 1437 unsigned long flags; ··· 1454 1454 } 1455 1455 1456 1456 /* returns number of bits changed 0 -> 1 */ 1457 - int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1457 + int drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e) 1458 1458 { 1459 1459 return bm_change_bits_to(mdev, s, e, 1); 1460 1460 } 1461 1461 1462 1462 /* returns number of bits changed 1 -> 0 */ 1463 - int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1463 + int drbd_bm_clear_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e) 1464 1464 { 1465 1465 return -bm_change_bits_to(mdev, s, e, 0); 1466 1466 } ··· 1494 1494 * You must first drbd_bm_lock(). 1495 1495 * Can be called to set the whole bitmap in one go. 1496 1496 * Sets bits from s to e _inclusive_. */ 1497 - void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1497 + void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e) 1498 1498 { 1499 1499 /* First set_bit from the first bit (s) 1500 1500 * up to the next long boundary (sl), ··· 1574 1574 * 0 ... bit not set 1575 1575 * -1 ... first out of bounds access, stop testing for bits! 1576 1576 */ 1577 - int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) 1577 + int drbd_bm_test_bit(struct drbd_device *mdev, const unsigned long bitnr) 1578 1578 { 1579 1579 unsigned long flags; 1580 1580 struct drbd_bitmap *b = mdev->bitmap; ··· 1605 1605 } 1606 1606 1607 1607 /* returns number of bits set in the range [s, e] */ 1608 - int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1608 + int drbd_bm_count_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e) 1609 1609 { 1610 1610 unsigned long flags; 1611 1611 struct drbd_bitmap *b = mdev->bitmap; ··· 1660 1660 * reference count of some bitmap extent element from some lru instead... 1661 1661 * 1662 1662 */ 1663 - int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) 1663 + int drbd_bm_e_weight(struct drbd_device *mdev, unsigned long enr) 1664 1664 { 1665 1665 struct drbd_bitmap *b = mdev->bitmap; 1666 1666 int count, s, e;
+173 -173
drivers/block/drbd/drbd_int.h
··· 66 66 extern unsigned int minor_count; 67 67 extern bool disable_sendpage; 68 68 extern bool allow_oos; 69 - void tl_abort_disk_io(struct drbd_conf *mdev); 69 + void tl_abort_disk_io(struct drbd_device *mdev); 70 70 71 71 #ifdef CONFIG_DRBD_FAULT_INJECTION 72 72 extern int enable_faults; ··· 97 97 98 98 #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) 99 99 100 - struct drbd_conf; 100 + struct drbd_device; 101 101 struct drbd_tconn; 102 102 103 103 ··· 147 147 }; 148 148 149 149 extern unsigned int 150 - _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); 150 + _drbd_insert_fault(struct drbd_device *mdev, unsigned int type); 151 151 152 152 static inline int 153 - drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { 153 + drbd_insert_fault(struct drbd_device *mdev, unsigned int type) { 154 154 #ifdef CONFIG_DRBD_FAULT_INJECTION 155 155 return fault_rate && 156 156 (enable_faults & (1<<type)) && ··· 189 189 unsigned bytes[2]; 190 190 }; 191 191 192 - extern void INFO_bm_xfer_stats(struct drbd_conf *mdev, 192 + extern void INFO_bm_xfer_stats(struct drbd_device *mdev, 193 193 const char *direction, struct bm_xfer_ctx *c); 194 194 195 195 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) ··· 246 246 struct list_head list; 247 247 int (*cb)(struct drbd_work *, int cancel); 248 248 union { 249 - struct drbd_conf *mdev; 249 + struct drbd_device *mdev; 250 250 struct drbd_tconn *tconn; 251 251 }; 252 252 }; 253 253 254 254 #include "drbd_interval.h" 255 255 256 - extern int drbd_wait_misc(struct drbd_conf *, struct drbd_interval *); 256 + extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *); 257 257 258 258 struct drbd_request { 259 259 struct drbd_work w; ··· 409 409 READ_BALANCE_RR, 410 410 }; 411 411 412 - struct drbd_bitmap; /* opaque for drbd_conf */ 412 + struct drbd_bitmap; /* opaque for drbd_device */ 413 413 414 414 /* definition of bits in bm_flags to be used in drbd_bm_lock 415 415 * and drbd_bitmap_io and friends. */ ··· 496 496 struct drbd_work w; 497 497 char *why; 498 498 enum bm_flag flags; 499 - int (*io_fn)(struct drbd_conf *mdev); 500 - void (*done)(struct drbd_conf *mdev, int rv); 499 + int (*io_fn)(struct drbd_device *mdev); 500 + void (*done)(struct drbd_device *mdev, int rv); 501 501 }; 502 502 503 503 enum write_ordering_e { ··· 617 617 struct list_head writes; 618 618 }; 619 619 620 - struct drbd_conf { 620 + struct drbd_device { 621 621 struct drbd_tconn *tconn; 622 622 int vnr; /* volume number within the connection */ 623 623 struct kref kref; ··· 763 763 struct submit_worker submit; 764 764 }; 765 765 766 - static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 766 + static inline struct drbd_device *minor_to_mdev(unsigned int minor) 767 767 { 768 - return (struct drbd_conf *)idr_find(&minors, minor); 768 + return (struct drbd_device *)idr_find(&minors, minor); 769 769 } 770 770 771 - static inline unsigned int mdev_to_minor(struct drbd_conf *mdev) 771 + static inline unsigned int mdev_to_minor(struct drbd_device *mdev) 772 772 { 773 773 return mdev->minor; 774 774 } 775 775 776 - static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr) 776 + static inline struct drbd_device *vnr_to_mdev(struct drbd_tconn *tconn, int vnr) 777 777 { 778 - return (struct drbd_conf *)idr_find(&tconn->volumes, vnr); 778 + return (struct drbd_device *)idr_find(&tconn->volumes, vnr); 779 779 } 780 780 781 781 /* ··· 789 789 DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ 790 790 }; 791 791 792 - extern void drbd_init_set_defaults(struct drbd_conf *mdev); 792 + extern void drbd_init_set_defaults(struct drbd_device *mdev); 793 793 extern int drbd_thread_start(struct drbd_thread *thi); 794 794 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); 795 795 extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task); ··· 811 811 812 812 extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd); 813 813 extern int drbd_send_protocol(struct drbd_tconn *tconn); 814 - extern int drbd_send_uuids(struct drbd_conf *mdev); 815 - extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); 816 - extern void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev); 817 - extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); 818 - extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s); 819 - extern int drbd_send_current_state(struct drbd_conf *mdev); 820 - extern int drbd_send_sync_param(struct drbd_conf *mdev); 814 + extern int drbd_send_uuids(struct drbd_device *mdev); 815 + extern int drbd_send_uuids_skip_initial_sync(struct drbd_device *mdev); 816 + extern void drbd_gen_and_send_sync_uuid(struct drbd_device *mdev); 817 + extern int drbd_send_sizes(struct drbd_device *mdev, int trigger_reply, enum dds_flags flags); 818 + extern int drbd_send_state(struct drbd_device *mdev, union drbd_state s); 819 + extern int drbd_send_current_state(struct drbd_device *mdev); 820 + extern int drbd_send_sync_param(struct drbd_device *mdev); 821 821 extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, 822 822 u32 set_size); 823 - extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet, 823 + extern int drbd_send_ack(struct drbd_device *, enum drbd_packet, 824 824 struct drbd_peer_request *); 825 - extern void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, 825 + extern void drbd_send_ack_rp(struct drbd_device *mdev, enum drbd_packet cmd, 826 826 struct p_block_req *rp); 827 - extern void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd, 827 + extern void drbd_send_ack_dp(struct drbd_device *mdev, enum drbd_packet cmd, 828 828 struct p_data *dp, int data_size); 829 - extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd, 829 + extern int drbd_send_ack_ex(struct drbd_device *mdev, enum drbd_packet cmd, 830 830 sector_t sector, int blksize, u64 block_id); 831 - extern int drbd_send_out_of_sync(struct drbd_conf *, struct drbd_request *); 832 - extern int drbd_send_block(struct drbd_conf *, enum drbd_packet, 831 + extern int drbd_send_out_of_sync(struct drbd_device *, struct drbd_request *); 832 + extern int drbd_send_block(struct drbd_device *, enum drbd_packet, 833 833 struct drbd_peer_request *); 834 - extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); 835 - extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, 834 + extern int drbd_send_dblock(struct drbd_device *mdev, struct drbd_request *req); 835 + extern int drbd_send_drequest(struct drbd_device *mdev, int cmd, 836 836 sector_t sector, int size, u64 block_id); 837 - extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, 837 + extern int drbd_send_drequest_csum(struct drbd_device *mdev, sector_t sector, 838 838 int size, void *digest, int digest_size, 839 839 enum drbd_packet cmd); 840 - extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size); 840 + extern int drbd_send_ov_request(struct drbd_device *mdev, sector_t sector, int size); 841 841 842 - extern int drbd_send_bitmap(struct drbd_conf *mdev); 843 - extern void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode); 842 + extern int drbd_send_bitmap(struct drbd_device *mdev); 843 + extern void drbd_send_sr_reply(struct drbd_device *mdev, enum drbd_state_rv retcode); 844 844 extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode); 845 845 extern void drbd_free_bc(struct drbd_backing_dev *ldev); 846 - extern void drbd_mdev_cleanup(struct drbd_conf *mdev); 847 - void drbd_print_uuids(struct drbd_conf *mdev, const char *text); 846 + extern void drbd_mdev_cleanup(struct drbd_device *mdev); 847 + void drbd_print_uuids(struct drbd_device *mdev, const char *text); 848 848 849 849 extern void conn_md_sync(struct drbd_tconn *tconn); 850 - extern void drbd_md_write(struct drbd_conf *mdev, void *buffer); 851 - extern void drbd_md_sync(struct drbd_conf *mdev); 852 - extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); 853 - extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 854 - extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 855 - extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); 856 - extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local); 857 - extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local); 858 - extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 859 - extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local); 860 - extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local); 850 + extern void drbd_md_write(struct drbd_device *mdev, void *buffer); 851 + extern void drbd_md_sync(struct drbd_device *mdev); 852 + extern int drbd_md_read(struct drbd_device *mdev, struct drbd_backing_dev *bdev); 853 + extern void drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local); 854 + extern void _drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local); 855 + extern void drbd_uuid_new_current(struct drbd_device *mdev) __must_hold(local); 856 + extern void drbd_uuid_set_bm(struct drbd_device *mdev, u64 val) __must_hold(local); 857 + extern void drbd_uuid_move_history(struct drbd_device *mdev) __must_hold(local); 858 + extern void __drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local); 859 + extern void drbd_md_set_flag(struct drbd_device *mdev, int flags) __must_hold(local); 860 + extern void drbd_md_clear_flag(struct drbd_device *mdev, int flags)__must_hold(local); 861 861 extern int drbd_md_test_flag(struct drbd_backing_dev *, int); 862 862 #ifndef DRBD_DEBUG_MD_SYNC 863 - extern void drbd_md_mark_dirty(struct drbd_conf *mdev); 863 + extern void drbd_md_mark_dirty(struct drbd_device *mdev); 864 864 #else 865 865 #define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ ) 866 - extern void drbd_md_mark_dirty_(struct drbd_conf *mdev, 866 + extern void drbd_md_mark_dirty_(struct drbd_device *mdev, 867 867 unsigned int line, const char *func); 868 868 #endif 869 - extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, 870 - int (*io_fn)(struct drbd_conf *), 871 - void (*done)(struct drbd_conf *, int), 869 + extern void drbd_queue_bitmap_io(struct drbd_device *mdev, 870 + int (*io_fn)(struct drbd_device *), 871 + void (*done)(struct drbd_device *, int), 872 872 char *why, enum bm_flag flags); 873 - extern int drbd_bitmap_io(struct drbd_conf *mdev, 874 - int (*io_fn)(struct drbd_conf *), 873 + extern int drbd_bitmap_io(struct drbd_device *mdev, 874 + int (*io_fn)(struct drbd_device *), 875 875 char *why, enum bm_flag flags); 876 - extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, 877 - int (*io_fn)(struct drbd_conf *), 876 + extern int drbd_bitmap_io_from_worker(struct drbd_device *mdev, 877 + int (*io_fn)(struct drbd_device *), 878 878 char *why, enum bm_flag flags); 879 - extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); 880 - extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); 881 - extern void drbd_ldev_destroy(struct drbd_conf *mdev); 879 + extern int drbd_bmio_set_n_write(struct drbd_device *mdev); 880 + extern int drbd_bmio_clear_n_write(struct drbd_device *mdev); 881 + extern void drbd_ldev_destroy(struct drbd_device *mdev); 882 882 883 883 /* Meta data layout 884 884 * ··· 1064 1064 #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */ 1065 1065 #define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */ 1066 1066 1067 - extern int drbd_bm_init(struct drbd_conf *mdev); 1068 - extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits); 1069 - extern void drbd_bm_cleanup(struct drbd_conf *mdev); 1070 - extern void drbd_bm_set_all(struct drbd_conf *mdev); 1071 - extern void drbd_bm_clear_all(struct drbd_conf *mdev); 1067 + extern int drbd_bm_init(struct drbd_device *mdev); 1068 + extern int drbd_bm_resize(struct drbd_device *mdev, sector_t sectors, int set_new_bits); 1069 + extern void drbd_bm_cleanup(struct drbd_device *mdev); 1070 + extern void drbd_bm_set_all(struct drbd_device *mdev); 1071 + extern void drbd_bm_clear_all(struct drbd_device *mdev); 1072 1072 /* set/clear/test only a few bits at a time */ 1073 1073 extern int drbd_bm_set_bits( 1074 - struct drbd_conf *mdev, unsigned long s, unsigned long e); 1074 + struct drbd_device *mdev, unsigned long s, unsigned long e); 1075 1075 extern int drbd_bm_clear_bits( 1076 - struct drbd_conf *mdev, unsigned long s, unsigned long e); 1076 + struct drbd_device *mdev, unsigned long s, unsigned long e); 1077 1077 extern int drbd_bm_count_bits( 1078 - struct drbd_conf *mdev, const unsigned long s, const unsigned long e); 1078 + struct drbd_device *mdev, const unsigned long s, const unsigned long e); 1079 1079 /* bm_set_bits variant for use while holding drbd_bm_lock, 1080 1080 * may process the whole bitmap in one go */ 1081 - extern void _drbd_bm_set_bits(struct drbd_conf *mdev, 1081 + extern void _drbd_bm_set_bits(struct drbd_device *mdev, 1082 1082 const unsigned long s, const unsigned long e); 1083 - extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); 1084 - extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); 1085 - extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); 1086 - extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); 1087 - extern void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr); 1088 - extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); 1089 - extern int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local); 1090 - extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local); 1091 - extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local); 1092 - extern size_t drbd_bm_words(struct drbd_conf *mdev); 1093 - extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); 1094 - extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); 1083 + extern int drbd_bm_test_bit(struct drbd_device *mdev, unsigned long bitnr); 1084 + extern int drbd_bm_e_weight(struct drbd_device *mdev, unsigned long enr); 1085 + extern int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(local); 1086 + extern int drbd_bm_read(struct drbd_device *mdev) __must_hold(local); 1087 + extern void drbd_bm_mark_for_writeout(struct drbd_device *mdev, int page_nr); 1088 + extern int drbd_bm_write(struct drbd_device *mdev) __must_hold(local); 1089 + extern int drbd_bm_write_hinted(struct drbd_device *mdev) __must_hold(local); 1090 + extern int drbd_bm_write_all(struct drbd_device *mdev) __must_hold(local); 1091 + extern int drbd_bm_write_copy_pages(struct drbd_device *mdev) __must_hold(local); 1092 + extern size_t drbd_bm_words(struct drbd_device *mdev); 1093 + extern unsigned long drbd_bm_bits(struct drbd_device *mdev); 1094 + extern sector_t drbd_bm_capacity(struct drbd_device *mdev); 1095 1095 1096 1096 #define DRBD_END_OF_BITMAP (~(unsigned long)0) 1097 - extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); 1097 + extern unsigned long drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo); 1098 1098 /* bm_find_next variants for use while you hold drbd_bm_lock() */ 1099 - extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); 1100 - extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo); 1101 - extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev); 1102 - extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev); 1103 - extern int drbd_bm_rs_done(struct drbd_conf *mdev); 1099 + extern unsigned long _drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo); 1100 + extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo); 1101 + extern unsigned long _drbd_bm_total_weight(struct drbd_device *mdev); 1102 + extern unsigned long drbd_bm_total_weight(struct drbd_device *mdev); 1103 + extern int drbd_bm_rs_done(struct drbd_device *mdev); 1104 1104 /* for receive_bitmap */ 1105 - extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, 1105 + extern void drbd_bm_merge_lel(struct drbd_device *mdev, size_t offset, 1106 1106 size_t number, unsigned long *buffer); 1107 1107 /* for _drbd_send_bitmap */ 1108 - extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, 1108 + extern void drbd_bm_get_lel(struct drbd_device *mdev, size_t offset, 1109 1109 size_t number, unsigned long *buffer); 1110 1110 1111 - extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags); 1112 - extern void drbd_bm_unlock(struct drbd_conf *mdev); 1111 + extern void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags); 1112 + extern void drbd_bm_unlock(struct drbd_device *mdev); 1113 1113 /* drbd_main.c */ 1114 1114 1115 1115 extern struct kmem_cache *drbd_request_cache; ··· 1169 1169 1170 1170 /* drbd_req */ 1171 1171 extern void do_submit(struct work_struct *ws); 1172 - extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long); 1172 + extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); 1173 1173 extern void drbd_make_request(struct request_queue *q, struct bio *bio); 1174 - extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); 1174 + extern int drbd_read_remote(struct drbd_device *mdev, struct drbd_request *req); 1175 1175 extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); 1176 1176 extern int is_valid_ar_handle(struct drbd_request *, sector_t); 1177 1177 1178 1178 1179 1179 /* drbd_nl.c */ 1180 1180 extern int drbd_msg_put_info(const char *info); 1181 - extern void drbd_suspend_io(struct drbd_conf *mdev); 1182 - extern void drbd_resume_io(struct drbd_conf *mdev); 1181 + extern void drbd_suspend_io(struct drbd_device *mdev); 1182 + extern void drbd_resume_io(struct drbd_device *mdev); 1183 1183 extern char *ppsize(char *buf, unsigned long long size); 1184 - extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int); 1184 + extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int); 1185 1185 enum determine_dev_size { 1186 1186 DS_ERROR_SHRINK = -3, 1187 1187 DS_ERROR_SPACE_MD = -2, ··· 1192 1192 DS_GREW_FROM_ZERO = 3, 1193 1193 }; 1194 1194 extern enum determine_dev_size 1195 - drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local); 1196 - extern void resync_after_online_grow(struct drbd_conf *); 1197 - extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); 1198 - extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, 1195 + drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local); 1196 + extern void resync_after_online_grow(struct drbd_device *); 1197 + extern void drbd_reconsider_max_bio_size(struct drbd_device *mdev); 1198 + extern enum drbd_state_rv drbd_set_role(struct drbd_device *mdev, 1199 1199 enum drbd_role new_role, 1200 1200 int force); 1201 1201 extern bool conn_try_outdate_peer(struct drbd_tconn *tconn); 1202 1202 extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn); 1203 - extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); 1203 + extern int drbd_khelper(struct drbd_device *mdev, char *cmd); 1204 1204 1205 1205 /* drbd_worker.c */ 1206 1206 extern int drbd_worker(struct drbd_thread *thi); 1207 - enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor); 1208 - void drbd_resync_after_changed(struct drbd_conf *mdev); 1209 - extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side); 1210 - extern void resume_next_sg(struct drbd_conf *mdev); 1211 - extern void suspend_other_sg(struct drbd_conf *mdev); 1212 - extern int drbd_resync_finished(struct drbd_conf *mdev); 1207 + enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *mdev, int o_minor); 1208 + void drbd_resync_after_changed(struct drbd_device *mdev); 1209 + extern void drbd_start_resync(struct drbd_device *mdev, enum drbd_conns side); 1210 + extern void resume_next_sg(struct drbd_device *mdev); 1211 + extern void suspend_other_sg(struct drbd_device *mdev); 1212 + extern int drbd_resync_finished(struct drbd_device *mdev); 1213 1213 /* maybe rather drbd_main.c ? */ 1214 - extern void *drbd_md_get_buffer(struct drbd_conf *mdev); 1215 - extern void drbd_md_put_buffer(struct drbd_conf *mdev); 1216 - extern int drbd_md_sync_page_io(struct drbd_conf *mdev, 1214 + extern void *drbd_md_get_buffer(struct drbd_device *mdev); 1215 + extern void drbd_md_put_buffer(struct drbd_device *mdev); 1216 + extern int drbd_md_sync_page_io(struct drbd_device *mdev, 1217 1217 struct drbd_backing_dev *bdev, sector_t sector, int rw); 1218 - extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int); 1219 - extern void wait_until_done_or_force_detached(struct drbd_conf *mdev, 1218 + extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); 1219 + extern void wait_until_done_or_force_detached(struct drbd_device *mdev, 1220 1220 struct drbd_backing_dev *bdev, unsigned int *done); 1221 - extern void drbd_rs_controller_reset(struct drbd_conf *mdev); 1221 + extern void drbd_rs_controller_reset(struct drbd_device *mdev); 1222 1222 1223 - static inline void ov_out_of_sync_print(struct drbd_conf *mdev) 1223 + static inline void ov_out_of_sync_print(struct drbd_device *mdev) 1224 1224 { 1225 1225 if (mdev->ov_last_oos_size) { 1226 1226 dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n", ··· 1231 1231 } 1232 1232 1233 1233 1234 - extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *); 1235 - extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, 1234 + extern void drbd_csum_bio(struct drbd_device *, struct crypto_hash *, struct bio *, void *); 1235 + extern void drbd_csum_ee(struct drbd_device *, struct crypto_hash *, 1236 1236 struct drbd_peer_request *, void *); 1237 1237 /* worker callbacks */ 1238 1238 extern int w_e_end_data_req(struct drbd_work *, int); ··· 1256 1256 extern void start_resync_timer_fn(unsigned long data); 1257 1257 1258 1258 /* drbd_receiver.c */ 1259 - extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector); 1260 - extern int drbd_submit_peer_request(struct drbd_conf *, 1259 + extern int drbd_rs_should_slow_down(struct drbd_device *mdev, sector_t sector); 1260 + extern int drbd_submit_peer_request(struct drbd_device *, 1261 1261 struct drbd_peer_request *, const unsigned, 1262 1262 const int); 1263 - extern int drbd_free_peer_reqs(struct drbd_conf *, struct list_head *); 1264 - extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64, 1263 + extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); 1264 + extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_device *, u64, 1265 1265 sector_t, unsigned int, 1266 1266 gfp_t) __must_hold(local); 1267 - extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *, 1267 + extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, 1268 1268 int); 1269 1269 #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0) 1270 1270 #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1) 1271 - extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool); 1272 - extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled); 1273 - extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed); 1271 + extern struct page *drbd_alloc_pages(struct drbd_device *, unsigned int, bool); 1272 + extern void drbd_set_recv_tcq(struct drbd_device *mdev, int tcq_enabled); 1273 + extern void _drbd_clear_done_ee(struct drbd_device *mdev, struct list_head *to_be_freed); 1274 1274 extern void conn_flush_workqueue(struct drbd_tconn *tconn); 1275 - extern int drbd_connected(struct drbd_conf *mdev); 1276 - static inline void drbd_flush_workqueue(struct drbd_conf *mdev) 1275 + extern int drbd_connected(struct drbd_device *mdev); 1276 + static inline void drbd_flush_workqueue(struct drbd_device *mdev) 1277 1277 { 1278 1278 conn_flush_workqueue(mdev->tconn); 1279 1279 } ··· 1336 1336 extern const char *drbd_role_str(enum drbd_role s); 1337 1337 1338 1338 /* drbd_actlog.c */ 1339 - extern int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i); 1340 - extern void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate); 1341 - extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i); 1342 - extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate); 1343 - extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i); 1344 - extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector); 1345 - extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector); 1346 - extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector); 1347 - extern void drbd_rs_cancel_all(struct drbd_conf *mdev); 1348 - extern int drbd_rs_del_all(struct drbd_conf *mdev); 1349 - extern void drbd_rs_failed_io(struct drbd_conf *mdev, 1339 + extern int drbd_al_begin_io_nonblock(struct drbd_device *mdev, struct drbd_interval *i); 1340 + extern void drbd_al_begin_io_commit(struct drbd_device *mdev, bool delegate); 1341 + extern bool drbd_al_begin_io_fastpath(struct drbd_device *mdev, struct drbd_interval *i); 1342 + extern void drbd_al_begin_io(struct drbd_device *mdev, struct drbd_interval *i, bool delegate); 1343 + extern void drbd_al_complete_io(struct drbd_device *mdev, struct drbd_interval *i); 1344 + extern void drbd_rs_complete_io(struct drbd_device *mdev, sector_t sector); 1345 + extern int drbd_rs_begin_io(struct drbd_device *mdev, sector_t sector); 1346 + extern int drbd_try_rs_begin_io(struct drbd_device *mdev, sector_t sector); 1347 + extern void drbd_rs_cancel_all(struct drbd_device *mdev); 1348 + extern int drbd_rs_del_all(struct drbd_device *mdev); 1349 + extern void drbd_rs_failed_io(struct drbd_device *mdev, 1350 1350 sector_t sector, int size); 1351 - extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go); 1352 - extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, 1351 + extern void drbd_advance_rs_marks(struct drbd_device *mdev, unsigned long still_to_go); 1352 + extern void __drbd_set_in_sync(struct drbd_device *mdev, sector_t sector, 1353 1353 int size, const char *file, const unsigned int line); 1354 1354 #define drbd_set_in_sync(mdev, sector, size) \ 1355 1355 __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) 1356 - extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, 1356 + extern int __drbd_set_out_of_sync(struct drbd_device *mdev, sector_t sector, 1357 1357 int size, const char *file, const unsigned int line); 1358 1358 #define drbd_set_out_of_sync(mdev, sector, size) \ 1359 1359 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) 1360 - extern void drbd_al_shrink(struct drbd_conf *mdev); 1361 - extern int drbd_initialize_al(struct drbd_conf *, void *); 1360 + extern void drbd_al_shrink(struct drbd_device *mdev); 1361 + extern int drbd_initialize_al(struct drbd_device *, void *); 1362 1362 1363 1363 /* drbd_nl.c */ 1364 1364 /* state info broadcast */ ··· 1375 1375 }; 1376 1376 }; 1377 1377 }; 1378 - void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib); 1378 + void drbd_bcast_event(struct drbd_device *mdev, const struct sib_info *sib); 1379 1379 1380 1380 /* 1381 1381 * inline helper functions ··· 1404 1404 } 1405 1405 1406 1406 static inline enum drbd_state_rv 1407 - _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, 1407 + _drbd_set_state(struct drbd_device *mdev, union drbd_state ns, 1408 1408 enum chg_state_flags flags, struct completion *done) 1409 1409 { 1410 1410 enum drbd_state_rv rv; ··· 1416 1416 return rv; 1417 1417 } 1418 1418 1419 - static inline union drbd_state drbd_read_state(struct drbd_conf *mdev) 1419 + static inline union drbd_state drbd_read_state(struct drbd_device *mdev) 1420 1420 { 1421 1421 union drbd_state rv; 1422 1422 ··· 1436 1436 }; 1437 1437 1438 1438 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) 1439 - static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, 1439 + static inline void __drbd_chk_io_error_(struct drbd_device *mdev, 1440 1440 enum drbd_force_detach_flags df, 1441 1441 const char *where) 1442 1442 { ··· 1500 1500 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED) 1501 1501 */ 1502 1502 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) 1503 - static inline void drbd_chk_io_error_(struct drbd_conf *mdev, 1503 + static inline void drbd_chk_io_error_(struct drbd_device *mdev, 1504 1504 int error, enum drbd_force_detach_flags forcedetach, const char *where) 1505 1505 { 1506 1506 if (error) { ··· 1643 1643 } 1644 1644 1645 1645 extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *); 1646 - extern void *drbd_prepare_command(struct drbd_conf *, struct drbd_socket *); 1646 + extern void *drbd_prepare_command(struct drbd_device *, struct drbd_socket *); 1647 1647 extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *, 1648 1648 enum drbd_packet, unsigned int, void *, 1649 1649 unsigned int); 1650 - extern int drbd_send_command(struct drbd_conf *, struct drbd_socket *, 1650 + extern int drbd_send_command(struct drbd_device *, struct drbd_socket *, 1651 1651 enum drbd_packet, unsigned int, void *, 1652 1652 unsigned int); 1653 1653 1654 1654 extern int drbd_send_ping(struct drbd_tconn *tconn); 1655 1655 extern int drbd_send_ping_ack(struct drbd_tconn *tconn); 1656 - extern int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state); 1656 + extern int drbd_send_state_req(struct drbd_device *, union drbd_state, union drbd_state); 1657 1657 extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state); 1658 1658 1659 1659 static inline void drbd_thread_stop(struct drbd_thread *thi) ··· 1693 1693 * _req_mod(req, CONNECTION_LOST_WHILE_PENDING) 1694 1694 * [from tl_clear_barrier] 1695 1695 */ 1696 - static inline void inc_ap_pending(struct drbd_conf *mdev) 1696 + static inline void inc_ap_pending(struct drbd_device *mdev) 1697 1697 { 1698 1698 atomic_inc(&mdev->ap_pending_cnt); 1699 1699 } ··· 1705 1705 atomic_read(&mdev->which)) 1706 1706 1707 1707 #define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__) 1708 - static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int line) 1708 + static inline void _dec_ap_pending(struct drbd_device *mdev, const char *func, int line) 1709 1709 { 1710 1710 if (atomic_dec_and_test(&mdev->ap_pending_cnt)) 1711 1711 wake_up(&mdev->misc_wait); ··· 1718 1718 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER) 1719 1719 * (or P_NEG_ACK with ID_SYNCER) 1720 1720 */ 1721 - static inline void inc_rs_pending(struct drbd_conf *mdev) 1721 + static inline void inc_rs_pending(struct drbd_device *mdev) 1722 1722 { 1723 1723 atomic_inc(&mdev->rs_pending_cnt); 1724 1724 } 1725 1725 1726 1726 #define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__) 1727 - static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int line) 1727 + static inline void _dec_rs_pending(struct drbd_device *mdev, const char *func, int line) 1728 1728 { 1729 1729 atomic_dec(&mdev->rs_pending_cnt); 1730 1730 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line); ··· 1739 1739 * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA 1740 1740 * receive_Barrier_* we need to send a P_BARRIER_ACK 1741 1741 */ 1742 - static inline void inc_unacked(struct drbd_conf *mdev) 1742 + static inline void inc_unacked(struct drbd_device *mdev) 1743 1743 { 1744 1744 atomic_inc(&mdev->unacked_cnt); 1745 1745 } 1746 1746 1747 1747 #define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__) 1748 - static inline void _dec_unacked(struct drbd_conf *mdev, const char *func, int line) 1748 + static inline void _dec_unacked(struct drbd_device *mdev, const char *func, int line) 1749 1749 { 1750 1750 atomic_dec(&mdev->unacked_cnt); 1751 1751 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); 1752 1752 } 1753 1753 1754 1754 #define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__) 1755 - static inline void _sub_unacked(struct drbd_conf *mdev, int n, const char *func, int line) 1755 + static inline void _sub_unacked(struct drbd_device *mdev, int n, const char *func, int line) 1756 1756 { 1757 1757 atomic_sub(n, &mdev->unacked_cnt); 1758 1758 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); ··· 1767 1767 #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT)) 1768 1768 #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS)) 1769 1769 1770 - static inline void put_ldev(struct drbd_conf *mdev) 1770 + static inline void put_ldev(struct drbd_device *mdev) 1771 1771 { 1772 1772 int i = atomic_dec_return(&mdev->local_cnt); 1773 1773 ··· 1790 1790 } 1791 1791 1792 1792 #ifndef __CHECKER__ 1793 - static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) 1793 + static inline int _get_ldev_if_state(struct drbd_device *mdev, enum drbd_disk_state mins) 1794 1794 { 1795 1795 int io_allowed; 1796 1796 ··· 1805 1805 return io_allowed; 1806 1806 } 1807 1807 #else 1808 - extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins); 1808 + extern int _get_ldev_if_state(struct drbd_device *mdev, enum drbd_disk_state mins); 1809 1809 #endif 1810 1810 1811 1811 /* you must have an "get_ldev" reference */ 1812 - static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, 1812 + static inline void drbd_get_syncer_progress(struct drbd_device *mdev, 1813 1813 unsigned long *bits_left, unsigned int *per_mil_done) 1814 1814 { 1815 1815 /* this is to break it at compile time when we change that, in case we ··· 1859 1859 /* this throttles on-the-fly application requests 1860 1860 * according to max_buffers settings; 1861 1861 * maybe re-implement using semaphores? */ 1862 - static inline int drbd_get_max_buffers(struct drbd_conf *mdev) 1862 + static inline int drbd_get_max_buffers(struct drbd_device *mdev) 1863 1863 { 1864 1864 struct net_conf *nc; 1865 1865 int mxb; ··· 1872 1872 return mxb; 1873 1873 } 1874 1874 1875 - static inline int drbd_state_is_stable(struct drbd_conf *mdev) 1875 + static inline int drbd_state_is_stable(struct drbd_device *mdev) 1876 1876 { 1877 1877 union drbd_dev_state s = mdev->state; 1878 1878 ··· 1942 1942 return 1; 1943 1943 } 1944 1944 1945 - static inline int drbd_suspended(struct drbd_conf *mdev) 1945 + static inline int drbd_suspended(struct drbd_device *mdev) 1946 1946 { 1947 1947 struct drbd_tconn *tconn = mdev->tconn; 1948 1948 1949 1949 return tconn->susp || tconn->susp_fen || tconn->susp_nod; 1950 1950 } 1951 1951 1952 - static inline bool may_inc_ap_bio(struct drbd_conf *mdev) 1952 + static inline bool may_inc_ap_bio(struct drbd_device *mdev) 1953 1953 { 1954 1954 int mxb = drbd_get_max_buffers(mdev); 1955 1955 ··· 1975 1975 return true; 1976 1976 } 1977 1977 1978 - static inline bool inc_ap_bio_cond(struct drbd_conf *mdev) 1978 + static inline bool inc_ap_bio_cond(struct drbd_device *mdev) 1979 1979 { 1980 1980 bool rv = false; 1981 1981 ··· 1988 1988 return rv; 1989 1989 } 1990 1990 1991 - static inline void inc_ap_bio(struct drbd_conf *mdev) 1991 + static inline void inc_ap_bio(struct drbd_device *mdev) 1992 1992 { 1993 1993 /* we wait here 1994 1994 * as long as the device is suspended ··· 2001 2001 wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev)); 2002 2002 } 2003 2003 2004 - static inline void dec_ap_bio(struct drbd_conf *mdev) 2004 + static inline void dec_ap_bio(struct drbd_device *mdev) 2005 2005 { 2006 2006 int mxb = drbd_get_max_buffers(mdev); 2007 2007 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); ··· 2020 2020 wake_up(&mdev->misc_wait); 2021 2021 } 2022 2022 2023 - static inline bool verify_can_do_stop_sector(struct drbd_conf *mdev) 2023 + static inline bool verify_can_do_stop_sector(struct drbd_device *mdev) 2024 2024 { 2025 2025 return mdev->tconn->agreed_pro_version >= 97 && 2026 2026 mdev->tconn->agreed_pro_version != 100; 2027 2027 } 2028 2028 2029 - static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) 2029 + static inline int drbd_set_ed_uuid(struct drbd_device *mdev, u64 val) 2030 2030 { 2031 2031 int changed = mdev->ed_uuid != val; 2032 2032 mdev->ed_uuid = val; 2033 2033 return changed; 2034 2034 } 2035 2035 2036 - static inline int drbd_queue_order_type(struct drbd_conf *mdev) 2036 + static inline int drbd_queue_order_type(struct drbd_device *mdev) 2037 2037 { 2038 2038 /* sorry, we currently have no working implementation 2039 2039 * of distributed TCQ stuff */ ··· 2043 2043 return QUEUE_ORDERED_NONE; 2044 2044 } 2045 2045 2046 - static inline void drbd_md_flush(struct drbd_conf *mdev) 2046 + static inline void drbd_md_flush(struct drbd_device *mdev) 2047 2047 { 2048 2048 int r; 2049 2049
+79 -79
drivers/block/drbd/drbd_main.c
··· 165 165 /* When checking with sparse, and this is an inline function, sparse will 166 166 give tons of false positives. When this is a real functions sparse works. 167 167 */ 168 - int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) 168 + int _get_ldev_if_state(struct drbd_device *mdev, enum drbd_disk_state mins) 169 169 { 170 170 int io_allowed; 171 171 ··· 306 306 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL 307 307 * @mdev: DRBD device. 308 308 */ 309 - void tl_abort_disk_io(struct drbd_conf *mdev) 309 + void tl_abort_disk_io(struct drbd_device *mdev) 310 310 { 311 311 struct drbd_tconn *tconn = mdev->tconn; 312 312 struct drbd_request *req, *r; ··· 495 495 496 496 int conn_lowest_minor(struct drbd_tconn *tconn) 497 497 { 498 - struct drbd_conf *mdev; 498 + struct drbd_device *mdev; 499 499 int vnr = 0, m; 500 500 501 501 rcu_read_lock(); ··· 631 631 return p; 632 632 } 633 633 634 - void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock) 634 + void *drbd_prepare_command(struct drbd_device *mdev, struct drbd_socket *sock) 635 635 { 636 636 return conn_prepare_command(mdev->tconn, sock); 637 637 } ··· 680 680 return err; 681 681 } 682 682 683 - int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock, 683 + int drbd_send_command(struct drbd_device *mdev, struct drbd_socket *sock, 684 684 enum drbd_packet cmd, unsigned int header_size, 685 685 void *data, unsigned int size) 686 686 { ··· 712 712 return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0); 713 713 } 714 714 715 - int drbd_send_sync_param(struct drbd_conf *mdev) 715 + int drbd_send_sync_param(struct drbd_device *mdev) 716 716 { 717 717 struct drbd_socket *sock; 718 718 struct p_rs_param_95 *p; ··· 822 822 return err; 823 823 } 824 824 825 - static int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags) 825 + static int _drbd_send_uuids(struct drbd_device *mdev, u64 uuid_flags) 826 826 { 827 827 struct drbd_socket *sock; 828 828 struct p_uuids *p; ··· 855 855 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0); 856 856 } 857 857 858 - int drbd_send_uuids(struct drbd_conf *mdev) 858 + int drbd_send_uuids(struct drbd_device *mdev) 859 859 { 860 860 return _drbd_send_uuids(mdev, 0); 861 861 } 862 862 863 - int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) 863 + int drbd_send_uuids_skip_initial_sync(struct drbd_device *mdev) 864 864 { 865 865 return _drbd_send_uuids(mdev, 8); 866 866 } 867 867 868 - void drbd_print_uuids(struct drbd_conf *mdev, const char *text) 868 + void drbd_print_uuids(struct drbd_device *mdev, const char *text) 869 869 { 870 870 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 871 871 u64 *uuid = mdev->ldev->md.uuid; ··· 883 883 } 884 884 } 885 885 886 - void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) 886 + void drbd_gen_and_send_sync_uuid(struct drbd_device *mdev) 887 887 { 888 888 struct drbd_socket *sock; 889 889 struct p_rs_uuid *p; ··· 908 908 } 909 909 } 910 910 911 - int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) 911 + int drbd_send_sizes(struct drbd_device *mdev, int trigger_reply, enum dds_flags flags) 912 912 { 913 913 struct drbd_socket *sock; 914 914 struct p_sizes *p; ··· 956 956 * drbd_send_current_state() - Sends the drbd state to the peer 957 957 * @mdev: DRBD device. 958 958 */ 959 - int drbd_send_current_state(struct drbd_conf *mdev) 959 + int drbd_send_current_state(struct drbd_device *mdev) 960 960 { 961 961 struct drbd_socket *sock; 962 962 struct p_state *p; ··· 979 979 * between queuing and processing of the after_state_ch work, we still 980 980 * want to send each intermediary state in the order it occurred. 981 981 */ 982 - int drbd_send_state(struct drbd_conf *mdev, union drbd_state state) 982 + int drbd_send_state(struct drbd_device *mdev, union drbd_state state) 983 983 { 984 984 struct drbd_socket *sock; 985 985 struct p_state *p; ··· 992 992 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0); 993 993 } 994 994 995 - int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val) 995 + int drbd_send_state_req(struct drbd_device *mdev, union drbd_state mask, union drbd_state val) 996 996 { 997 997 struct drbd_socket *sock; 998 998 struct p_req_state *p; ··· 1022 1022 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0); 1023 1023 } 1024 1024 1025 - void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) 1025 + void drbd_send_sr_reply(struct drbd_device *mdev, enum drbd_state_rv retcode) 1026 1026 { 1027 1027 struct drbd_socket *sock; 1028 1028 struct p_req_state_reply *p; ··· 1066 1066 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); 1067 1067 } 1068 1068 1069 - static int fill_bitmap_rle_bits(struct drbd_conf *mdev, 1069 + static int fill_bitmap_rle_bits(struct drbd_device *mdev, 1070 1070 struct p_compressed_bm *p, 1071 1071 unsigned int size, 1072 1072 struct bm_xfer_ctx *c) ··· 1170 1170 * code upon failure. 1171 1171 */ 1172 1172 static int 1173 - send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c) 1173 + send_bitmap_rle_or_plain(struct drbd_device *mdev, struct bm_xfer_ctx *c) 1174 1174 { 1175 1175 struct drbd_socket *sock = &mdev->tconn->data; 1176 1176 unsigned int header_size = drbd_header_size(mdev->tconn); ··· 1226 1226 } 1227 1227 1228 1228 /* See the comment at receive_bitmap() */ 1229 - static int _drbd_send_bitmap(struct drbd_conf *mdev) 1229 + static int _drbd_send_bitmap(struct drbd_device *mdev) 1230 1230 { 1231 1231 struct bm_xfer_ctx c; 1232 1232 int err; ··· 1263 1263 return err == 0; 1264 1264 } 1265 1265 1266 - int drbd_send_bitmap(struct drbd_conf *mdev) 1266 + int drbd_send_bitmap(struct drbd_device *mdev) 1267 1267 { 1268 1268 struct drbd_socket *sock = &mdev->tconn->data; 1269 1269 int err = -1; ··· 1300 1300 * @blksize: size in byte, needs to be in big endian byte order 1301 1301 * @block_id: Id, big endian byte order 1302 1302 */ 1303 - static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, 1303 + static int _drbd_send_ack(struct drbd_device *mdev, enum drbd_packet cmd, 1304 1304 u64 sector, u32 blksize, u64 block_id) 1305 1305 { 1306 1306 struct drbd_socket *sock; ··· 1323 1323 /* dp->sector and dp->block_id already/still in network byte order, 1324 1324 * data_size is payload size according to dp->head, 1325 1325 * and may need to be corrected for digest size. */ 1326 - void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd, 1326 + void drbd_send_ack_dp(struct drbd_device *mdev, enum drbd_packet cmd, 1327 1327 struct p_data *dp, int data_size) 1328 1328 { 1329 1329 if (mdev->tconn->peer_integrity_tfm) ··· 1332 1332 dp->block_id); 1333 1333 } 1334 1334 1335 - void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, 1335 + void drbd_send_ack_rp(struct drbd_device *mdev, enum drbd_packet cmd, 1336 1336 struct p_block_req *rp) 1337 1337 { 1338 1338 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id); ··· 1344 1344 * @cmd: packet command code 1345 1345 * @peer_req: peer request 1346 1346 */ 1347 - int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, 1347 + int drbd_send_ack(struct drbd_device *mdev, enum drbd_packet cmd, 1348 1348 struct drbd_peer_request *peer_req) 1349 1349 { 1350 1350 return _drbd_send_ack(mdev, cmd, ··· 1355 1355 1356 1356 /* This function misuses the block_id field to signal if the blocks 1357 1357 * are is sync or not. */ 1358 - int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd, 1358 + int drbd_send_ack_ex(struct drbd_device *mdev, enum drbd_packet cmd, 1359 1359 sector_t sector, int blksize, u64 block_id) 1360 1360 { 1361 1361 return _drbd_send_ack(mdev, cmd, ··· 1364 1364 cpu_to_be64(block_id)); 1365 1365 } 1366 1366 1367 - int drbd_send_drequest(struct drbd_conf *mdev, int cmd, 1367 + int drbd_send_drequest(struct drbd_device *mdev, int cmd, 1368 1368 sector_t sector, int size, u64 block_id) 1369 1369 { 1370 1370 struct drbd_socket *sock; ··· 1380 1380 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0); 1381 1381 } 1382 1382 1383 - int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size, 1383 + int drbd_send_drequest_csum(struct drbd_device *mdev, sector_t sector, int size, 1384 1384 void *digest, int digest_size, enum drbd_packet cmd) 1385 1385 { 1386 1386 struct drbd_socket *sock; ··· 1399 1399 digest, digest_size); 1400 1400 } 1401 1401 1402 - int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) 1402 + int drbd_send_ov_request(struct drbd_device *mdev, sector_t sector, int size) 1403 1403 { 1404 1404 struct drbd_socket *sock; 1405 1405 struct p_block_req *p; ··· 1469 1469 * As a workaround, we disable sendpage on pages 1470 1470 * with page_count == 0 or PageSlab. 1471 1471 */ 1472 - static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, 1472 + static int _drbd_no_send_page(struct drbd_device *mdev, struct page *page, 1473 1473 int offset, size_t size, unsigned msg_flags) 1474 1474 { 1475 1475 struct socket *socket; ··· 1485 1485 return err; 1486 1486 } 1487 1487 1488 - static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, 1488 + static int _drbd_send_page(struct drbd_device *mdev, struct page *page, 1489 1489 int offset, size_t size, unsigned msg_flags) 1490 1490 { 1491 1491 struct socket *socket = mdev->tconn->data.socket; ··· 1534 1534 return err; 1535 1535 } 1536 1536 1537 - static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) 1537 + static int _drbd_send_bio(struct drbd_device *mdev, struct bio *bio) 1538 1538 { 1539 1539 struct bio_vec bvec; 1540 1540 struct bvec_iter iter; ··· 1553 1553 return 0; 1554 1554 } 1555 1555 1556 - static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) 1556 + static int _drbd_send_zc_bio(struct drbd_device *mdev, struct bio *bio) 1557 1557 { 1558 1558 struct bio_vec bvec; 1559 1559 struct bvec_iter iter; ··· 1571 1571 return 0; 1572 1572 } 1573 1573 1574 - static int _drbd_send_zc_ee(struct drbd_conf *mdev, 1574 + static int _drbd_send_zc_ee(struct drbd_device *mdev, 1575 1575 struct drbd_peer_request *peer_req) 1576 1576 { 1577 1577 struct page *page = peer_req->pages; ··· 1591 1591 return 0; 1592 1592 } 1593 1593 1594 - static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) 1594 + static u32 bio_flags_to_wire(struct drbd_device *mdev, unsigned long bi_rw) 1595 1595 { 1596 1596 if (mdev->tconn->agreed_pro_version >= 95) 1597 1597 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | ··· 1605 1605 /* Used to send write requests 1606 1606 * R_PRIMARY -> Peer (P_DATA) 1607 1607 */ 1608 - int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) 1608 + int drbd_send_dblock(struct drbd_device *mdev, struct drbd_request *req) 1609 1609 { 1610 1610 struct drbd_socket *sock; 1611 1611 struct p_data *p; ··· 1677 1677 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY) 1678 1678 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY) 1679 1679 */ 1680 - int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, 1680 + int drbd_send_block(struct drbd_device *mdev, enum drbd_packet cmd, 1681 1681 struct drbd_peer_request *peer_req) 1682 1682 { 1683 1683 struct drbd_socket *sock; ··· 1706 1706 return err; 1707 1707 } 1708 1708 1709 - int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req) 1709 + int drbd_send_out_of_sync(struct drbd_device *mdev, struct drbd_request *req) 1710 1710 { 1711 1711 struct drbd_socket *sock; 1712 1712 struct p_block_desc *p; ··· 1827 1827 1828 1828 static int drbd_open(struct block_device *bdev, fmode_t mode) 1829 1829 { 1830 - struct drbd_conf *mdev = bdev->bd_disk->private_data; 1830 + struct drbd_device *mdev = bdev->bd_disk->private_data; 1831 1831 unsigned long flags; 1832 1832 int rv = 0; 1833 1833 ··· 1853 1853 1854 1854 static void drbd_release(struct gendisk *gd, fmode_t mode) 1855 1855 { 1856 - struct drbd_conf *mdev = gd->private_data; 1856 + struct drbd_device *mdev = gd->private_data; 1857 1857 mutex_lock(&drbd_main_mutex); 1858 1858 mdev->open_cnt--; 1859 1859 mutex_unlock(&drbd_main_mutex); 1860 1860 } 1861 1861 1862 - static void drbd_set_defaults(struct drbd_conf *mdev) 1862 + static void drbd_set_defaults(struct drbd_device *mdev) 1863 1863 { 1864 1864 /* Beware! The actual layout differs 1865 1865 * between big endian and little endian */ ··· 1872 1872 } }; 1873 1873 } 1874 1874 1875 - void drbd_init_set_defaults(struct drbd_conf *mdev) 1875 + void drbd_init_set_defaults(struct drbd_device *mdev) 1876 1876 { 1877 1877 /* the memset(,0,) did most of this. 1878 1878 * note: only assignments, no allocation in here */ ··· 1947 1947 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; 1948 1948 } 1949 1949 1950 - void drbd_mdev_cleanup(struct drbd_conf *mdev) 1950 + void drbd_mdev_cleanup(struct drbd_device *mdev) 1951 1951 { 1952 1952 int i; 1953 1953 if (mdev->tconn->receiver.t_state != NONE) ··· 2130 2130 .notifier_call = drbd_notify_sys, 2131 2131 }; 2132 2132 2133 - static void drbd_release_all_peer_reqs(struct drbd_conf *mdev) 2133 + static void drbd_release_all_peer_reqs(struct drbd_device *mdev) 2134 2134 { 2135 2135 int rr; 2136 2136 ··· 2158 2158 /* caution. no locking. */ 2159 2159 void drbd_minor_destroy(struct kref *kref) 2160 2160 { 2161 - struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref); 2161 + struct drbd_device *mdev = container_of(kref, struct drbd_device, kref); 2162 2162 struct drbd_tconn *tconn = mdev->tconn; 2163 2163 2164 2164 del_timer_sync(&mdev->request_timer); ··· 2217 2217 spin_unlock_irq(&retry->lock); 2218 2218 2219 2219 list_for_each_entry_safe(req, tmp, &writes, tl_requests) { 2220 - struct drbd_conf *mdev = req->w.mdev; 2220 + struct drbd_device *mdev = req->w.mdev; 2221 2221 struct bio *bio = req->master_bio; 2222 2222 unsigned long start_time = req->start_time; 2223 2223 bool expected; ··· 2277 2277 static void drbd_cleanup(void) 2278 2278 { 2279 2279 unsigned int i; 2280 - struct drbd_conf *mdev; 2280 + struct drbd_device *mdev; 2281 2281 struct drbd_tconn *tconn, *tmp; 2282 2282 2283 2283 unregister_reboot_notifier(&drbd_notifier); ··· 2331 2331 */ 2332 2332 static int drbd_congested(void *congested_data, int bdi_bits) 2333 2333 { 2334 - struct drbd_conf *mdev = congested_data; 2334 + struct drbd_device *mdev = congested_data; 2335 2335 struct request_queue *q; 2336 2336 char reason = '-'; 2337 2337 int r = 0; ··· 2591 2591 kfree(tconn); 2592 2592 } 2593 2593 2594 - static int init_submitter(struct drbd_conf *mdev) 2594 + static int init_submitter(struct drbd_device *mdev) 2595 2595 { 2596 2596 /* opencoded create_singlethread_workqueue(), 2597 2597 * to be able to say "drbd%d", ..., minor */ ··· 2608 2608 2609 2609 enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr) 2610 2610 { 2611 - struct drbd_conf *mdev; 2611 + struct drbd_device *mdev; 2612 2612 struct gendisk *disk; 2613 2613 struct request_queue *q; 2614 2614 int vnr_got = vnr; ··· 2620 2620 return ERR_MINOR_EXISTS; 2621 2621 2622 2622 /* GFP_KERNEL, we are outside of all write-out paths */ 2623 - mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL); 2623 + mdev = kzalloc(sizeof(struct drbd_device), GFP_KERNEL); 2624 2624 if (!mdev) 2625 2625 return ERR_NOMEM; 2626 2626 ··· 2843 2843 2844 2844 void conn_md_sync(struct drbd_tconn *tconn) 2845 2845 { 2846 - struct drbd_conf *mdev; 2846 + struct drbd_device *mdev; 2847 2847 int vnr; 2848 2848 2849 2849 rcu_read_lock(); ··· 2882 2882 2883 2883 2884 2884 2885 - void drbd_md_write(struct drbd_conf *mdev, void *b) 2885 + void drbd_md_write(struct drbd_device *mdev, void *b) 2886 2886 { 2887 2887 struct meta_data_on_disk *buffer = b; 2888 2888 sector_t sector; ··· 2922 2922 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set 2923 2923 * @mdev: DRBD device. 2924 2924 */ 2925 - void drbd_md_sync(struct drbd_conf *mdev) 2925 + void drbd_md_sync(struct drbd_device *mdev) 2926 2926 { 2927 2927 struct meta_data_on_disk *buffer; 2928 2928 ··· 2955 2955 put_ldev(mdev); 2956 2956 } 2957 2957 2958 - static int check_activity_log_stripe_size(struct drbd_conf *mdev, 2958 + static int check_activity_log_stripe_size(struct drbd_device *mdev, 2959 2959 struct meta_data_on_disk *on_disk, 2960 2960 struct drbd_md *in_core) 2961 2961 { ··· 3000 3000 return -EINVAL; 3001 3001 } 3002 3002 3003 - static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 3003 + static int check_offsets_and_sizes(struct drbd_device *mdev, struct drbd_backing_dev *bdev) 3004 3004 { 3005 3005 sector_t capacity = drbd_get_capacity(bdev->md_bdev); 3006 3006 struct drbd_md *in_core = &bdev->md; ··· 3091 3091 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS, 3092 3092 * even before @bdev is assigned to @mdev->ldev. 3093 3093 */ 3094 - int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 3094 + int drbd_md_read(struct drbd_device *mdev, struct drbd_backing_dev *bdev) 3095 3095 { 3096 3096 struct meta_data_on_disk *buffer; 3097 3097 u32 magic, flags; ··· 3196 3196 * timer that ensures that within five seconds you have to call drbd_md_sync(). 3197 3197 */ 3198 3198 #ifdef DEBUG 3199 - void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func) 3199 + void drbd_md_mark_dirty_(struct drbd_device *mdev, unsigned int line, const char *func) 3200 3200 { 3201 3201 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) { 3202 3202 mod_timer(&mdev->md_sync_timer, jiffies + HZ); ··· 3205 3205 } 3206 3206 } 3207 3207 #else 3208 - void drbd_md_mark_dirty(struct drbd_conf *mdev) 3208 + void drbd_md_mark_dirty(struct drbd_device *mdev) 3209 3209 { 3210 3210 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) 3211 3211 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); 3212 3212 } 3213 3213 #endif 3214 3214 3215 - void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) 3215 + void drbd_uuid_move_history(struct drbd_device *mdev) __must_hold(local) 3216 3216 { 3217 3217 int i; 3218 3218 ··· 3220 3220 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; 3221 3221 } 3222 3222 3223 - void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) 3223 + void __drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local) 3224 3224 { 3225 3225 if (idx == UI_CURRENT) { 3226 3226 if (mdev->state.role == R_PRIMARY) ··· 3235 3235 drbd_md_mark_dirty(mdev); 3236 3236 } 3237 3237 3238 - void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) 3238 + void _drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local) 3239 3239 { 3240 3240 unsigned long flags; 3241 3241 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); ··· 3243 3243 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags); 3244 3244 } 3245 3245 3246 - void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) 3246 + void drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local) 3247 3247 { 3248 3248 unsigned long flags; 3249 3249 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); ··· 3262 3262 * Creates a new current UUID, and rotates the old current UUID into 3263 3263 * the bitmap slot. Causes an incremental resync upon next connect. 3264 3264 */ 3265 - void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) 3265 + void drbd_uuid_new_current(struct drbd_device *mdev) __must_hold(local) 3266 3266 { 3267 3267 u64 val; 3268 3268 unsigned long long bm_uuid; ··· 3284 3284 drbd_md_sync(mdev); 3285 3285 } 3286 3286 3287 - void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) 3287 + void drbd_uuid_set_bm(struct drbd_device *mdev, u64 val) __must_hold(local) 3288 3288 { 3289 3289 unsigned long flags; 3290 3290 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) ··· 3313 3313 * 3314 3314 * Sets all bits in the bitmap and writes the whole bitmap to stable storage. 3315 3315 */ 3316 - int drbd_bmio_set_n_write(struct drbd_conf *mdev) 3316 + int drbd_bmio_set_n_write(struct drbd_device *mdev) 3317 3317 { 3318 3318 int rv = -EIO; 3319 3319 ··· 3341 3341 * 3342 3342 * Clears all bits in the bitmap and writes the whole bitmap to stable storage. 3343 3343 */ 3344 - int drbd_bmio_clear_n_write(struct drbd_conf *mdev) 3344 + int drbd_bmio_clear_n_write(struct drbd_device *mdev) 3345 3345 { 3346 3346 int rv = -EIO; 3347 3347 ··· 3358 3358 static int w_bitmap_io(struct drbd_work *w, int unused) 3359 3359 { 3360 3360 struct bm_io_work *work = container_of(w, struct bm_io_work, w); 3361 - struct drbd_conf *mdev = w->mdev; 3361 + struct drbd_device *mdev = w->mdev; 3362 3362 int rv = -EIO; 3363 3363 3364 3364 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); ··· 3383 3383 return 0; 3384 3384 } 3385 3385 3386 - void drbd_ldev_destroy(struct drbd_conf *mdev) 3386 + void drbd_ldev_destroy(struct drbd_device *mdev) 3387 3387 { 3388 3388 lc_destroy(mdev->resync); 3389 3389 mdev->resync = NULL; ··· 3398 3398 3399 3399 static int w_go_diskless(struct drbd_work *w, int unused) 3400 3400 { 3401 - struct drbd_conf *mdev = w->mdev; 3401 + struct drbd_device *mdev = w->mdev; 3402 3402 3403 3403 D_ASSERT(mdev->state.disk == D_FAILED); 3404 3404 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will ··· 3449 3449 * called from worker context. It MUST NOT be used while a previous such 3450 3450 * work is still pending! 3451 3451 */ 3452 - void drbd_queue_bitmap_io(struct drbd_conf *mdev, 3453 - int (*io_fn)(struct drbd_conf *), 3454 - void (*done)(struct drbd_conf *, int), 3452 + void drbd_queue_bitmap_io(struct drbd_device *mdev, 3453 + int (*io_fn)(struct drbd_device *), 3454 + void (*done)(struct drbd_device *, int), 3455 3455 char *why, enum bm_flag flags) 3456 3456 { 3457 3457 D_ASSERT(current == mdev->tconn->worker.task); ··· 3486 3486 * freezes application IO while that the actual IO operations runs. This 3487 3487 * functions MAY NOT be called from worker context. 3488 3488 */ 3489 - int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), 3489 + int drbd_bitmap_io(struct drbd_device *mdev, int (*io_fn)(struct drbd_device *), 3490 3490 char *why, enum bm_flag flags) 3491 3491 { 3492 3492 int rv; ··· 3506 3506 return rv; 3507 3507 } 3508 3508 3509 - void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local) 3509 + void drbd_md_set_flag(struct drbd_device *mdev, int flag) __must_hold(local) 3510 3510 { 3511 3511 if ((mdev->ldev->md.flags & flag) != flag) { 3512 3512 drbd_md_mark_dirty(mdev); ··· 3514 3514 } 3515 3515 } 3516 3516 3517 - void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local) 3517 + void drbd_md_clear_flag(struct drbd_device *mdev, int flag) __must_hold(local) 3518 3518 { 3519 3519 if ((mdev->ldev->md.flags & flag) != 0) { 3520 3520 drbd_md_mark_dirty(mdev); ··· 3528 3528 3529 3529 static void md_sync_timer_fn(unsigned long data) 3530 3530 { 3531 - struct drbd_conf *mdev = (struct drbd_conf *) data; 3531 + struct drbd_device *mdev = (struct drbd_device *) data; 3532 3532 3533 3533 /* must not double-queue! */ 3534 3534 if (list_empty(&mdev->md_sync_work.list)) ··· 3537 3537 3538 3538 static int w_md_sync(struct drbd_work *w, int unused) 3539 3539 { 3540 - struct drbd_conf *mdev = w->mdev; 3540 + struct drbd_device *mdev = w->mdev; 3541 3541 3542 3542 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); 3543 3543 #ifdef DEBUG ··· 3624 3624 * @i: the struct drbd_interval embedded in struct drbd_request or 3625 3625 * struct drbd_peer_request 3626 3626 */ 3627 - int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i) 3627 + int drbd_wait_misc(struct drbd_device *mdev, struct drbd_interval *i) 3628 3628 { 3629 3629 struct net_conf *nc; 3630 3630 DEFINE_WAIT(wait); ··· 3702 3702 } 3703 3703 3704 3704 unsigned int 3705 - _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) 3705 + _drbd_insert_fault(struct drbd_device *mdev, unsigned int type) 3706 3706 { 3707 3707 static struct fault_random_state rrs = {0, 0}; 3708 3708
+34 -34
drivers/block/drbd/drbd_nl.c
··· 103 103 /* pointer into reply buffer */ 104 104 struct drbd_genlmsghdr *reply_dh; 105 105 /* resolved from attributes, if possible */ 106 - struct drbd_conf *mdev; 106 + struct drbd_device *mdev; 107 107 struct drbd_tconn *tconn; 108 108 } adm_ctx; 109 109 ··· 313 313 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs); 314 314 } 315 315 316 - int drbd_khelper(struct drbd_conf *mdev, char *cmd) 316 + int drbd_khelper(struct drbd_device *mdev, char *cmd) 317 317 { 318 318 char *envp[] = { "HOME=/", 319 319 "TERM=linux", ··· 400 400 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn) 401 401 { 402 402 enum drbd_fencing_p fp = FP_NOT_AVAIL; 403 - struct drbd_conf *mdev; 403 + struct drbd_device *mdev; 404 404 int vnr; 405 405 406 406 rcu_read_lock(); ··· 534 534 } 535 535 536 536 enum drbd_state_rv 537 - drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 537 + drbd_set_role(struct drbd_device *mdev, enum drbd_role new_role, int force) 538 538 { 539 539 const int max_tries = 4; 540 540 enum drbd_state_rv rv = SS_UNKNOWN_ERROR; ··· 729 729 * Activity log size used to be fixed 32kB, 730 730 * but is about to become configurable. 731 731 */ 732 - static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, 732 + static void drbd_md_set_sector_offsets(struct drbd_device *mdev, 733 733 struct drbd_backing_dev *bdev) 734 734 { 735 735 sector_t md_size_sect = 0; ··· 807 807 * and can be long lived. 808 808 * This changes an mdev->flag, is triggered by drbd internals, 809 809 * and should be short-lived. */ 810 - void drbd_suspend_io(struct drbd_conf *mdev) 810 + void drbd_suspend_io(struct drbd_device *mdev) 811 811 { 812 812 set_bit(SUSPEND_IO, &mdev->flags); 813 813 if (drbd_suspended(mdev)) ··· 815 815 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 816 816 } 817 817 818 - void drbd_resume_io(struct drbd_conf *mdev) 818 + void drbd_resume_io(struct drbd_device *mdev) 819 819 { 820 820 clear_bit(SUSPEND_IO, &mdev->flags); 821 821 wake_up(&mdev->misc_wait); ··· 829 829 * You should call drbd_md_sync() after calling this function. 830 830 */ 831 831 enum determine_dev_size 832 - drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local) 832 + drbd_determine_dev_size(struct drbd_device *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local) 833 833 { 834 834 sector_t prev_first_sect, prev_size; /* previous meta location */ 835 835 sector_t la_size_sect, u_size; ··· 979 979 } 980 980 981 981 sector_t 982 - drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, 982 + drbd_new_dev_size(struct drbd_device *mdev, struct drbd_backing_dev *bdev, 983 983 sector_t u_size, int assume_peer_has_space) 984 984 { 985 985 sector_t p_size = mdev->p_size; /* partner's disk size. */ ··· 1033 1033 * failed, and 0 on success. You should call drbd_md_sync() after you called 1034 1034 * this function. 1035 1035 */ 1036 - static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc) 1036 + static int drbd_check_al_size(struct drbd_device *mdev, struct disk_conf *dc) 1037 1037 { 1038 1038 struct lru_cache *n, *t; 1039 1039 struct lc_element *e; ··· 1078 1078 return 0; 1079 1079 } 1080 1080 1081 - static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) 1081 + static void drbd_setup_queue_param(struct drbd_device *mdev, unsigned int max_bio_size) 1082 1082 { 1083 1083 struct request_queue * const q = mdev->rq_queue; 1084 1084 unsigned int max_hw_sectors = max_bio_size >> 9; ··· 1115 1115 } 1116 1116 } 1117 1117 1118 - void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) 1118 + void drbd_reconsider_max_bio_size(struct drbd_device *mdev) 1119 1119 { 1120 1120 unsigned int now, new, local, peer; 1121 1121 ··· 1180 1180 } 1181 1181 1182 1182 /* Make sure IO is suspended before calling this function(). */ 1183 - static void drbd_suspend_al(struct drbd_conf *mdev) 1183 + static void drbd_suspend_al(struct drbd_device *mdev) 1184 1184 { 1185 1185 int s = 0; 1186 1186 ··· 1238 1238 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) 1239 1239 { 1240 1240 enum drbd_ret_code retcode; 1241 - struct drbd_conf *mdev; 1241 + struct drbd_device *mdev; 1242 1242 struct disk_conf *new_disk_conf, *old_disk_conf; 1243 1243 struct fifo_buffer *old_plan = NULL, *new_plan = NULL; 1244 1244 int err, fifo_size; ··· 1366 1366 1367 1367 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) 1368 1368 { 1369 - struct drbd_conf *mdev; 1369 + struct drbd_device *mdev; 1370 1370 int err; 1371 1371 enum drbd_ret_code retcode; 1372 1372 enum determine_dev_size dd; ··· 1800 1800 return 0; 1801 1801 } 1802 1802 1803 - static int adm_detach(struct drbd_conf *mdev, int force) 1803 + static int adm_detach(struct drbd_device *mdev, int force) 1804 1804 { 1805 1805 enum drbd_state_rv retcode; 1806 1806 int ret; ··· 1862 1862 1863 1863 static bool conn_resync_running(struct drbd_tconn *tconn) 1864 1864 { 1865 - struct drbd_conf *mdev; 1865 + struct drbd_device *mdev; 1866 1866 bool rv = false; 1867 1867 int vnr; 1868 1868 ··· 1883 1883 1884 1884 static bool conn_ov_running(struct drbd_tconn *tconn) 1885 1885 { 1886 - struct drbd_conf *mdev; 1886 + struct drbd_device *mdev; 1887 1887 bool rv = false; 1888 1888 int vnr; 1889 1889 ··· 1903 1903 static enum drbd_ret_code 1904 1904 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf) 1905 1905 { 1906 - struct drbd_conf *mdev; 1906 + struct drbd_device *mdev; 1907 1907 int i; 1908 1908 1909 1909 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) { ··· 1947 1947 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf) 1948 1948 { 1949 1949 static enum drbd_ret_code rv; 1950 - struct drbd_conf *mdev; 1950 + struct drbd_device *mdev; 1951 1951 int i; 1952 1952 1953 1953 rcu_read_lock(); ··· 2139 2139 2140 2140 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) 2141 2141 { 2142 - struct drbd_conf *mdev; 2142 + struct drbd_device *mdev; 2143 2143 struct net_conf *old_conf, *new_conf = NULL; 2144 2144 struct crypto crypto = { }; 2145 2145 struct drbd_tconn *tconn; ··· 2349 2349 return 0; 2350 2350 } 2351 2351 2352 - void resync_after_online_grow(struct drbd_conf *mdev) 2352 + void resync_after_online_grow(struct drbd_device *mdev) 2353 2353 { 2354 2354 int iass; /* I am sync source */ 2355 2355 ··· 2369 2369 { 2370 2370 struct disk_conf *old_disk_conf, *new_disk_conf = NULL; 2371 2371 struct resize_parms rs; 2372 - struct drbd_conf *mdev; 2372 + struct drbd_device *mdev; 2373 2373 enum drbd_ret_code retcode; 2374 2374 enum determine_dev_size dd; 2375 2375 bool change_al_layout = false; ··· 2535 2535 2536 2536 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) 2537 2537 { 2538 - struct drbd_conf *mdev; 2538 + struct drbd_device *mdev; 2539 2539 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ 2540 2540 2541 2541 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); ··· 2590 2590 return 0; 2591 2591 } 2592 2592 2593 - static int drbd_bmio_set_susp_al(struct drbd_conf *mdev) 2593 + static int drbd_bmio_set_susp_al(struct drbd_device *mdev) 2594 2594 { 2595 2595 int rv; 2596 2596 ··· 2602 2602 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) 2603 2603 { 2604 2604 int retcode; /* drbd_ret_code, drbd_state_rv */ 2605 - struct drbd_conf *mdev; 2605 + struct drbd_device *mdev; 2606 2606 2607 2607 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); 2608 2608 if (!adm_ctx.reply_skb) ··· 2692 2692 2693 2693 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) 2694 2694 { 2695 - struct drbd_conf *mdev; 2695 + struct drbd_device *mdev; 2696 2696 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ 2697 2697 2698 2698 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); ··· 2753 2753 return -EMSGSIZE; 2754 2754 } 2755 2755 2756 - static int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev, 2756 + static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *mdev, 2757 2757 const struct sib_info *sib) 2758 2758 { 2759 2759 struct state_info *si = NULL; /* for sizeof(si->member); */ ··· 2897 2897 2898 2898 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb) 2899 2899 { 2900 - struct drbd_conf *mdev; 2900 + struct drbd_device *mdev; 2901 2901 struct drbd_genlmsghdr *dh; 2902 2902 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0]; 2903 2903 struct drbd_tconn *tconn = NULL; ··· 3097 3097 3098 3098 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) 3099 3099 { 3100 - struct drbd_conf *mdev; 3100 + struct drbd_device *mdev; 3101 3101 enum drbd_ret_code retcode; 3102 3102 struct start_ov_parms parms; 3103 3103 ··· 3138 3138 3139 3139 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) 3140 3140 { 3141 - struct drbd_conf *mdev; 3141 + struct drbd_device *mdev; 3142 3142 enum drbd_ret_code retcode; 3143 3143 int skip_initial_sync = 0; 3144 3144 int err; ··· 3302 3302 return 0; 3303 3303 } 3304 3304 3305 - static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev) 3305 + static enum drbd_ret_code adm_delete_minor(struct drbd_device *mdev) 3306 3306 { 3307 3307 if (mdev->state.disk == D_DISKLESS && 3308 3308 /* no need to be mdev->state.conn == C_STANDALONE && ··· 3341 3341 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) 3342 3342 { 3343 3343 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ 3344 - struct drbd_conf *mdev; 3344 + struct drbd_device *mdev; 3345 3345 unsigned i; 3346 3346 3347 3347 retcode = drbd_adm_prepare(skb, info, 0); ··· 3441 3441 return 0; 3442 3442 } 3443 3443 3444 - void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib) 3444 + void drbd_bcast_event(struct drbd_device *mdev, const struct sib_info *sib) 3445 3445 { 3446 3446 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */ 3447 3447 struct sk_buff *msg;
+2 -2
drivers/block/drbd/drbd_proc.c
··· 66 66 * [=====>..............] 33.5% (23456/123456) 67 67 * finish: 2:20:20 speed: 6,345 (6,456) K/sec 68 68 */ 69 - static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) 69 + static void drbd_syncer_progress(struct drbd_device *mdev, struct seq_file *seq) 70 70 { 71 71 unsigned long db, dt, dbdt, rt, rs_left; 72 72 unsigned int res; ··· 202 202 { 203 203 int i, prev_i = -1; 204 204 const char *sn; 205 - struct drbd_conf *mdev; 205 + struct drbd_device *mdev; 206 206 struct net_conf *nc; 207 207 char wp; 208 208
+71 -71
drivers/block/drbd/drbd_receiver.c
··· 64 64 65 65 static int drbd_do_features(struct drbd_tconn *tconn); 66 66 static int drbd_do_auth(struct drbd_tconn *tconn); 67 - static int drbd_disconnected(struct drbd_conf *mdev); 67 + static int drbd_disconnected(struct drbd_device *mdev); 68 68 69 69 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event); 70 70 static int e_end_block(struct drbd_work *, int); ··· 151 151 *head = chain_first; 152 152 } 153 153 154 - static struct page *__drbd_alloc_pages(struct drbd_conf *mdev, 154 + static struct page *__drbd_alloc_pages(struct drbd_device *mdev, 155 155 unsigned int number) 156 156 { 157 157 struct page *page = NULL; ··· 197 197 return NULL; 198 198 } 199 199 200 - static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev, 200 + static void reclaim_finished_net_peer_reqs(struct drbd_device *mdev, 201 201 struct list_head *to_be_freed) 202 202 { 203 203 struct drbd_peer_request *peer_req; ··· 216 216 } 217 217 } 218 218 219 - static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) 219 + static void drbd_kick_lo_and_reclaim_net(struct drbd_device *mdev) 220 220 { 221 221 LIST_HEAD(reclaimed); 222 222 struct drbd_peer_request *peer_req, *t; ··· 241 241 * 242 242 * Returns a page chain linked via page->private. 243 243 */ 244 - struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number, 244 + struct page *drbd_alloc_pages(struct drbd_device *mdev, unsigned int number, 245 245 bool retry) 246 246 { 247 247 struct page *page = NULL; ··· 291 291 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock); 292 292 * Either links the page chain back to the global pool, 293 293 * or returns all pages to the system. */ 294 - static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net) 294 + static void drbd_free_pages(struct drbd_device *mdev, struct page *page, int is_net) 295 295 { 296 296 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; 297 297 int i; ··· 331 331 */ 332 332 333 333 struct drbd_peer_request * 334 - drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector, 334 + drbd_alloc_peer_req(struct drbd_device *mdev, u64 id, sector_t sector, 335 335 unsigned int data_size, gfp_t gfp_mask) __must_hold(local) 336 336 { 337 337 struct drbd_peer_request *peer_req; ··· 378 378 return NULL; 379 379 } 380 380 381 - void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req, 381 + void __drbd_free_peer_req(struct drbd_device *mdev, struct drbd_peer_request *peer_req, 382 382 int is_net) 383 383 { 384 384 if (peer_req->flags & EE_HAS_DIGEST) ··· 389 389 mempool_free(peer_req, drbd_ee_mempool); 390 390 } 391 391 392 - int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list) 392 + int drbd_free_peer_reqs(struct drbd_device *mdev, struct list_head *list) 393 393 { 394 394 LIST_HEAD(work_list); 395 395 struct drbd_peer_request *peer_req, *t; ··· 410 410 /* 411 411 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier. 412 412 */ 413 - static int drbd_finish_peer_reqs(struct drbd_conf *mdev) 413 + static int drbd_finish_peer_reqs(struct drbd_device *mdev) 414 414 { 415 415 LIST_HEAD(work_list); 416 416 LIST_HEAD(reclaimed); ··· 443 443 return err; 444 444 } 445 445 446 - static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, 446 + static void _drbd_wait_ee_list_empty(struct drbd_device *mdev, 447 447 struct list_head *head) 448 448 { 449 449 DEFINE_WAIT(wait); ··· 459 459 } 460 460 } 461 461 462 - static void drbd_wait_ee_list_empty(struct drbd_conf *mdev, 462 + static void drbd_wait_ee_list_empty(struct drbd_device *mdev, 463 463 struct list_head *head) 464 464 { 465 465 spin_lock_irq(&mdev->tconn->req_lock); ··· 831 831 } 832 832 /* Gets called if a connection is established, or if a new minor gets created 833 833 in a connection */ 834 - int drbd_connected(struct drbd_conf *mdev) 834 + int drbd_connected(struct drbd_device *mdev) 835 835 { 836 836 int err; 837 837 ··· 867 867 static int conn_connect(struct drbd_tconn *tconn) 868 868 { 869 869 struct drbd_socket sock, msock; 870 - struct drbd_conf *mdev; 870 + struct drbd_device *mdev; 871 871 struct net_conf *nc; 872 872 int vnr, timeout, h, ok; 873 873 bool discard_my_data; ··· 1145 1145 static void drbd_flush(struct drbd_tconn *tconn) 1146 1146 { 1147 1147 int rv; 1148 - struct drbd_conf *mdev; 1148 + struct drbd_device *mdev; 1149 1149 int vnr; 1150 1150 1151 1151 if (tconn->write_ordering >= WO_bdev_flush) { ··· 1260 1260 void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo) 1261 1261 { 1262 1262 struct disk_conf *dc; 1263 - struct drbd_conf *mdev; 1263 + struct drbd_device *mdev; 1264 1264 enum write_ordering_e pwo; 1265 1265 int vnr; 1266 1266 static char *write_ordering_str[] = { ··· 1306 1306 * on certain Xen deployments. 1307 1307 */ 1308 1308 /* TODO allocate from our own bio_set. */ 1309 - int drbd_submit_peer_request(struct drbd_conf *mdev, 1309 + int drbd_submit_peer_request(struct drbd_device *mdev, 1310 1310 struct drbd_peer_request *peer_req, 1311 1311 const unsigned rw, const int fault_type) 1312 1312 { ··· 1386 1386 return err; 1387 1387 } 1388 1388 1389 - static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev, 1389 + static void drbd_remove_epoch_entry_interval(struct drbd_device *mdev, 1390 1390 struct drbd_peer_request *peer_req) 1391 1391 { 1392 1392 struct drbd_interval *i = &peer_req->i; ··· 1401 1401 1402 1402 static void conn_wait_active_ee_empty(struct drbd_tconn *tconn) 1403 1403 { 1404 - struct drbd_conf *mdev; 1404 + struct drbd_device *mdev; 1405 1405 int vnr; 1406 1406 1407 1407 rcu_read_lock(); ··· 1485 1485 /* used from receive_RSDataReply (recv_resync_read) 1486 1486 * and from receive_Data */ 1487 1487 static struct drbd_peer_request * 1488 - read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, 1488 + read_in_block(struct drbd_device *mdev, u64 id, sector_t sector, 1489 1489 int data_size) __must_hold(local) 1490 1490 { 1491 1491 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); ··· 1568 1568 /* drbd_drain_block() just takes a data block 1569 1569 * out of the socket input buffer, and discards it. 1570 1570 */ 1571 - static int drbd_drain_block(struct drbd_conf *mdev, int data_size) 1571 + static int drbd_drain_block(struct drbd_device *mdev, int data_size) 1572 1572 { 1573 1573 struct page *page; 1574 1574 int err = 0; ··· 1593 1593 return err; 1594 1594 } 1595 1595 1596 - static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, 1596 + static int recv_dless_read(struct drbd_device *mdev, struct drbd_request *req, 1597 1597 sector_t sector, int data_size) 1598 1598 { 1599 1599 struct bio_vec bvec; ··· 1649 1649 { 1650 1650 struct drbd_peer_request *peer_req = 1651 1651 container_of(w, struct drbd_peer_request, w); 1652 - struct drbd_conf *mdev = w->mdev; 1652 + struct drbd_device *mdev = w->mdev; 1653 1653 sector_t sector = peer_req->i.sector; 1654 1654 int err; 1655 1655 ··· 1669 1669 return err; 1670 1670 } 1671 1671 1672 - static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local) 1672 + static int recv_resync_read(struct drbd_device *mdev, sector_t sector, int data_size) __releases(local) 1673 1673 { 1674 1674 struct drbd_peer_request *peer_req; 1675 1675 ··· 1706 1706 } 1707 1707 1708 1708 static struct drbd_request * 1709 - find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id, 1709 + find_request(struct drbd_device *mdev, struct rb_root *root, u64 id, 1710 1710 sector_t sector, bool missing_ok, const char *func) 1711 1711 { 1712 1712 struct drbd_request *req; ··· 1724 1724 1725 1725 static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi) 1726 1726 { 1727 - struct drbd_conf *mdev; 1727 + struct drbd_device *mdev; 1728 1728 struct drbd_request *req; 1729 1729 sector_t sector; 1730 1730 int err; ··· 1757 1757 1758 1758 static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi) 1759 1759 { 1760 - struct drbd_conf *mdev; 1760 + struct drbd_device *mdev; 1761 1761 sector_t sector; 1762 1762 int err; 1763 1763 struct p_data *p = pi->data; ··· 1788 1788 return err; 1789 1789 } 1790 1790 1791 - static void restart_conflicting_writes(struct drbd_conf *mdev, 1791 + static void restart_conflicting_writes(struct drbd_device *mdev, 1792 1792 sector_t sector, int size) 1793 1793 { 1794 1794 struct drbd_interval *i; ··· 1814 1814 { 1815 1815 struct drbd_peer_request *peer_req = 1816 1816 container_of(w, struct drbd_peer_request, w); 1817 - struct drbd_conf *mdev = w->mdev; 1817 + struct drbd_device *mdev = w->mdev; 1818 1818 sector_t sector = peer_req->i.sector; 1819 1819 int err = 0, pcmd; 1820 1820 ··· 1853 1853 1854 1854 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack) 1855 1855 { 1856 - struct drbd_conf *mdev = w->mdev; 1856 + struct drbd_device *mdev = w->mdev; 1857 1857 struct drbd_peer_request *peer_req = 1858 1858 container_of(w, struct drbd_peer_request, w); 1859 1859 int err; ··· 1892 1892 return seq_greater(a, b) ? a : b; 1893 1893 } 1894 1894 1895 - static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq) 1895 + static void update_peer_seq(struct drbd_device *mdev, unsigned int peer_seq) 1896 1896 { 1897 1897 unsigned int newest_peer_seq; 1898 1898 ··· 1913 1913 } 1914 1914 1915 1915 /* maybe change sync_ee into interval trees as well? */ 1916 - static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req) 1916 + static bool overlapping_resync_write(struct drbd_device *mdev, struct drbd_peer_request *peer_req) 1917 1917 { 1918 1918 struct drbd_peer_request *rs_req; 1919 1919 bool rv = 0; ··· 1952 1952 * 1953 1953 * returns 0 if we may process the packet, 1954 1954 * -ERESTARTSYS if we were interrupted (by disconnect signal). */ 1955 - static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq) 1955 + static int wait_for_and_update_peer_seq(struct drbd_device *mdev, const u32 peer_seq) 1956 1956 { 1957 1957 DEFINE_WAIT(wait); 1958 1958 long timeout; ··· 2002 2002 /* see also bio_flags_to_wire() 2003 2003 * DRBD_REQ_*, because we need to semantically map the flags to data packet 2004 2004 * flags and back. We may replicate to other kernel versions. */ 2005 - static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) 2005 + static unsigned long wire_flags_to_bio(struct drbd_device *mdev, u32 dpf) 2006 2006 { 2007 2007 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 2008 2008 (dpf & DP_FUA ? REQ_FUA : 0) | ··· 2010 2010 (dpf & DP_DISCARD ? REQ_DISCARD : 0); 2011 2011 } 2012 2012 2013 - static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector, 2013 + static void fail_postponed_requests(struct drbd_device *mdev, sector_t sector, 2014 2014 unsigned int size) 2015 2015 { 2016 2016 struct drbd_interval *i; ··· 2035 2035 } 2036 2036 } 2037 2037 2038 - static int handle_write_conflicts(struct drbd_conf *mdev, 2038 + static int handle_write_conflicts(struct drbd_device *mdev, 2039 2039 struct drbd_peer_request *peer_req) 2040 2040 { 2041 2041 struct drbd_tconn *tconn = mdev->tconn; ··· 2147 2147 /* mirrored write */ 2148 2148 static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) 2149 2149 { 2150 - struct drbd_conf *mdev; 2150 + struct drbd_device *mdev; 2151 2151 sector_t sector; 2152 2152 struct drbd_peer_request *peer_req; 2153 2153 struct p_data *p = pi->data; ··· 2296 2296 * The current sync rate used here uses only the most recent two step marks, 2297 2297 * to have a short time average so we can react faster. 2298 2298 */ 2299 - int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) 2299 + int drbd_rs_should_slow_down(struct drbd_device *mdev, sector_t sector) 2300 2300 { 2301 2301 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; 2302 2302 unsigned long db, dt, dbdt; ··· 2359 2359 2360 2360 static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi) 2361 2361 { 2362 - struct drbd_conf *mdev; 2362 + struct drbd_device *mdev; 2363 2363 sector_t sector; 2364 2364 sector_t capacity; 2365 2365 struct drbd_peer_request *peer_req; ··· 2545 2545 return -EIO; 2546 2546 } 2547 2547 2548 - static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) 2548 + static int drbd_asb_recover_0p(struct drbd_device *mdev) __must_hold(local) 2549 2549 { 2550 2550 int self, peer, rv = -100; 2551 2551 unsigned long ch_self, ch_peer; ··· 2622 2622 return rv; 2623 2623 } 2624 2624 2625 - static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) 2625 + static int drbd_asb_recover_1p(struct drbd_device *mdev) __must_hold(local) 2626 2626 { 2627 2627 int hg, rv = -100; 2628 2628 enum drbd_after_sb_p after_sb_1p; ··· 2675 2675 return rv; 2676 2676 } 2677 2677 2678 - static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) 2678 + static int drbd_asb_recover_2p(struct drbd_device *mdev) __must_hold(local) 2679 2679 { 2680 2680 int hg, rv = -100; 2681 2681 enum drbd_after_sb_p after_sb_2p; ··· 2721 2721 return rv; 2722 2722 } 2723 2723 2724 - static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, 2724 + static void drbd_uuid_dump(struct drbd_device *mdev, char *text, u64 *uuid, 2725 2725 u64 bits, u64 flags) 2726 2726 { 2727 2727 if (!uuid) { ··· 2750 2750 -1091 requires proto 91 2751 2751 -1096 requires proto 96 2752 2752 */ 2753 - static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) 2753 + static int drbd_uuid_compare(struct drbd_device *mdev, int *rule_nr) __must_hold(local) 2754 2754 { 2755 2755 u64 self, peer; 2756 2756 int i, j; ··· 2935 2935 /* drbd_sync_handshake() returns the new conn state on success, or 2936 2936 CONN_MASK (-1) on failure. 2937 2937 */ 2938 - static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role, 2938 + static enum drbd_conns drbd_sync_handshake(struct drbd_device *mdev, enum drbd_role peer_role, 2939 2939 enum drbd_disk_state peer_disk) __must_hold(local) 2940 2940 { 2941 2941 enum drbd_conns rv = C_MASK; ··· 3259 3259 * ERR_PTR(error) if something goes wrong 3260 3260 * or the crypto hash ptr, if it worked out ok. */ 3261 3261 static 3262 - struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, 3262 + struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *mdev, 3263 3263 const char *alg, const char *name) 3264 3264 { 3265 3265 struct crypto_hash *tfm; ··· 3316 3316 3317 3317 static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi) 3318 3318 { 3319 - struct drbd_conf *mdev; 3319 + struct drbd_device *mdev; 3320 3320 struct p_rs_param_95 *p; 3321 3321 unsigned int header_size, data_size, exp_max_sz; 3322 3322 struct crypto_hash *verify_tfm = NULL; ··· 3525 3525 } 3526 3526 3527 3527 /* warn if the arguments differ by more than 12.5% */ 3528 - static void warn_if_differ_considerably(struct drbd_conf *mdev, 3528 + static void warn_if_differ_considerably(struct drbd_device *mdev, 3529 3529 const char *s, sector_t a, sector_t b) 3530 3530 { 3531 3531 sector_t d; ··· 3539 3539 3540 3540 static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi) 3541 3541 { 3542 - struct drbd_conf *mdev; 3542 + struct drbd_device *mdev; 3543 3543 struct p_sizes *p = pi->data; 3544 3544 enum determine_dev_size dd = DS_UNCHANGED; 3545 3545 sector_t p_size, p_usize, my_usize; ··· 3660 3660 3661 3661 static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi) 3662 3662 { 3663 - struct drbd_conf *mdev; 3663 + struct drbd_device *mdev; 3664 3664 struct p_uuids *p = pi->data; 3665 3665 u64 *p_uuid; 3666 3666 int i, updated_uuids = 0; ··· 3765 3765 3766 3766 static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi) 3767 3767 { 3768 - struct drbd_conf *mdev; 3768 + struct drbd_device *mdev; 3769 3769 struct p_req_state *p = pi->data; 3770 3770 union drbd_state mask, val; 3771 3771 enum drbd_state_rv rv; ··· 3820 3820 3821 3821 static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi) 3822 3822 { 3823 - struct drbd_conf *mdev; 3823 + struct drbd_device *mdev; 3824 3824 struct p_state *p = pi->data; 3825 3825 union drbd_state os, ns, peer_state; 3826 3826 enum drbd_disk_state real_peer_disk; ··· 3996 3996 3997 3997 static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi) 3998 3998 { 3999 - struct drbd_conf *mdev; 3999 + struct drbd_device *mdev; 4000 4000 struct p_rs_uuid *p = pi->data; 4001 4001 4002 4002 mdev = vnr_to_mdev(tconn, pi->vnr); ··· 4034 4034 * code upon failure. 4035 4035 */ 4036 4036 static int 4037 - receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size, 4037 + receive_bitmap_plain(struct drbd_device *mdev, unsigned int size, 4038 4038 unsigned long *p, struct bm_xfer_ctx *c) 4039 4039 { 4040 4040 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - ··· 4086 4086 * code upon failure. 4087 4087 */ 4088 4088 static int 4089 - recv_bm_rle_bits(struct drbd_conf *mdev, 4089 + recv_bm_rle_bits(struct drbd_device *mdev, 4090 4090 struct p_compressed_bm *p, 4091 4091 struct bm_xfer_ctx *c, 4092 4092 unsigned int len) ··· 4155 4155 * code upon failure. 4156 4156 */ 4157 4157 static int 4158 - decode_bitmap_c(struct drbd_conf *mdev, 4158 + decode_bitmap_c(struct drbd_device *mdev, 4159 4159 struct p_compressed_bm *p, 4160 4160 struct bm_xfer_ctx *c, 4161 4161 unsigned int len) ··· 4172 4172 return -EIO; 4173 4173 } 4174 4174 4175 - void INFO_bm_xfer_stats(struct drbd_conf *mdev, 4175 + void INFO_bm_xfer_stats(struct drbd_device *mdev, 4176 4176 const char *direction, struct bm_xfer_ctx *c) 4177 4177 { 4178 4178 /* what would it take to transfer it "plaintext" */ ··· 4218 4218 returns 0 on failure, 1 if we successfully received it. */ 4219 4219 static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi) 4220 4220 { 4221 - struct drbd_conf *mdev; 4221 + struct drbd_device *mdev; 4222 4222 struct bm_xfer_ctx c; 4223 4223 int err; 4224 4224 ··· 4321 4321 4322 4322 static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi) 4323 4323 { 4324 - struct drbd_conf *mdev; 4324 + struct drbd_device *mdev; 4325 4325 struct p_block_desc *p = pi->data; 4326 4326 4327 4327 mdev = vnr_to_mdev(tconn, pi->vnr); ··· 4436 4436 4437 4437 static void conn_disconnect(struct drbd_tconn *tconn) 4438 4438 { 4439 - struct drbd_conf *mdev; 4439 + struct drbd_device *mdev; 4440 4440 enum drbd_conns oc; 4441 4441 int vnr; 4442 4442 ··· 4486 4486 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD); 4487 4487 } 4488 4488 4489 - static int drbd_disconnected(struct drbd_conf *mdev) 4489 + static int drbd_disconnected(struct drbd_device *mdev) 4490 4490 { 4491 4491 unsigned int i; 4492 4492 ··· 4885 4885 4886 4886 static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi) 4887 4887 { 4888 - struct drbd_conf *mdev; 4888 + struct drbd_device *mdev; 4889 4889 struct p_req_state_reply *p = pi->data; 4890 4890 int retcode = be32_to_cpu(p->retcode); 4891 4891 ··· 4928 4928 4929 4929 static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi) 4930 4930 { 4931 - struct drbd_conf *mdev; 4931 + struct drbd_device *mdev; 4932 4932 struct p_block_ack *p = pi->data; 4933 4933 sector_t sector = be64_to_cpu(p->sector); 4934 4934 int blksize = be32_to_cpu(p->blksize); ··· 4955 4955 } 4956 4956 4957 4957 static int 4958 - validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector, 4958 + validate_req_change_req_state(struct drbd_device *mdev, u64 id, sector_t sector, 4959 4959 struct rb_root *root, const char *func, 4960 4960 enum drbd_req_event what, bool missing_ok) 4961 4961 { ··· 4978 4978 4979 4979 static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi) 4980 4980 { 4981 - struct drbd_conf *mdev; 4981 + struct drbd_device *mdev; 4982 4982 struct p_block_ack *p = pi->data; 4983 4983 sector_t sector = be64_to_cpu(p->sector); 4984 4984 int blksize = be32_to_cpu(p->blksize); ··· 5022 5022 5023 5023 static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi) 5024 5024 { 5025 - struct drbd_conf *mdev; 5025 + struct drbd_device *mdev; 5026 5026 struct p_block_ack *p = pi->data; 5027 5027 sector_t sector = be64_to_cpu(p->sector); 5028 5028 int size = be32_to_cpu(p->blksize); ··· 5056 5056 5057 5057 static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi) 5058 5058 { 5059 - struct drbd_conf *mdev; 5059 + struct drbd_device *mdev; 5060 5060 struct p_block_ack *p = pi->data; 5061 5061 sector_t sector = be64_to_cpu(p->sector); 5062 5062 ··· 5076 5076 5077 5077 static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi) 5078 5078 { 5079 - struct drbd_conf *mdev; 5079 + struct drbd_device *mdev; 5080 5080 sector_t sector; 5081 5081 int size; 5082 5082 struct p_block_ack *p = pi->data; ··· 5111 5111 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi) 5112 5112 { 5113 5113 struct p_barrier_ack *p = pi->data; 5114 - struct drbd_conf *mdev; 5114 + struct drbd_device *mdev; 5115 5115 int vnr; 5116 5116 5117 5117 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size)); ··· 5132 5132 5133 5133 static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi) 5134 5134 { 5135 - struct drbd_conf *mdev; 5135 + struct drbd_device *mdev; 5136 5136 struct p_block_ack *p = pi->data; 5137 5137 struct drbd_work *w; 5138 5138 sector_t sector; ··· 5187 5187 5188 5188 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn) 5189 5189 { 5190 - struct drbd_conf *mdev; 5190 + struct drbd_device *mdev; 5191 5191 int vnr, not_empty = 0; 5192 5192 5193 5193 do {
+29 -29
drivers/block/drbd/drbd_req.c
··· 31 31 #include "drbd_req.h" 32 32 33 33 34 - static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size); 34 + static bool drbd_may_do_local_read(struct drbd_device *mdev, sector_t sector, int size); 35 35 36 36 /* Update disk stats at start of I/O request */ 37 - static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req) 37 + static void _drbd_start_io_acct(struct drbd_device *mdev, struct drbd_request *req) 38 38 { 39 39 const int rw = bio_data_dir(req->master_bio); 40 40 int cpu; ··· 49 49 } 50 50 51 51 /* Update disk stats when completing request upwards */ 52 - static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) 52 + static void _drbd_end_io_acct(struct drbd_device *mdev, struct drbd_request *req) 53 53 { 54 54 int rw = bio_data_dir(req->master_bio); 55 55 unsigned long duration = jiffies - req->start_time; ··· 61 61 part_stat_unlock(); 62 62 } 63 63 64 - static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, 64 + static struct drbd_request *drbd_req_new(struct drbd_device *mdev, 65 65 struct bio *bio_src) 66 66 { 67 67 struct drbd_request *req; ··· 95 95 void drbd_req_destroy(struct kref *kref) 96 96 { 97 97 struct drbd_request *req = container_of(kref, struct drbd_request, kref); 98 - struct drbd_conf *mdev = req->w.mdev; 98 + struct drbd_device *mdev = req->w.mdev; 99 99 const unsigned s = req->rq_state; 100 100 101 101 if ((req->master_bio && !(s & RQ_POSTPONED)) || ··· 179 179 wake_all_senders(tconn); 180 180 } 181 181 182 - void complete_master_bio(struct drbd_conf *mdev, 182 + void complete_master_bio(struct drbd_device *mdev, 183 183 struct bio_and_error *m) 184 184 { 185 185 bio_endio(m->bio, m->error); ··· 190 190 static void drbd_remove_request_interval(struct rb_root *root, 191 191 struct drbd_request *req) 192 192 { 193 - struct drbd_conf *mdev = req->w.mdev; 193 + struct drbd_device *mdev = req->w.mdev; 194 194 struct drbd_interval *i = &req->i; 195 195 196 196 drbd_remove_interval(root, i); ··· 210 210 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) 211 211 { 212 212 const unsigned s = req->rq_state; 213 - struct drbd_conf *mdev = req->w.mdev; 213 + struct drbd_device *mdev = req->w.mdev; 214 214 int rw; 215 215 int error, ok; 216 216 ··· 305 305 306 306 static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) 307 307 { 308 - struct drbd_conf *mdev = req->w.mdev; 308 + struct drbd_device *mdev = req->w.mdev; 309 309 D_ASSERT(m || (req->rq_state & RQ_POSTPONED)); 310 310 311 311 if (!atomic_sub_and_test(put, &req->completion_ref)) ··· 328 328 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, 329 329 int clear, int set) 330 330 { 331 - struct drbd_conf *mdev = req->w.mdev; 331 + struct drbd_device *mdev = req->w.mdev; 332 332 unsigned s = req->rq_state; 333 333 int c_put = 0; 334 334 int k_put = 0; ··· 424 424 kref_sub(&req->kref, k_put, drbd_req_destroy); 425 425 } 426 426 427 - static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req) 427 + static void drbd_report_io_error(struct drbd_device *mdev, struct drbd_request *req) 428 428 { 429 429 char b[BDEVNAME_SIZE]; 430 430 ··· 453 453 int __req_mod(struct drbd_request *req, enum drbd_req_event what, 454 454 struct bio_and_error *m) 455 455 { 456 - struct drbd_conf *mdev = req->w.mdev; 456 + struct drbd_device *mdev = req->w.mdev; 457 457 struct net_conf *nc; 458 458 int p, rv = 0; 459 459 ··· 771 771 * since size may be bigger than BM_BLOCK_SIZE, 772 772 * we may need to check several bits. 773 773 */ 774 - static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size) 774 + static bool drbd_may_do_local_read(struct drbd_device *mdev, sector_t sector, int size) 775 775 { 776 776 unsigned long sbnr, ebnr; 777 777 sector_t esector, nr_sectors; ··· 791 791 return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0; 792 792 } 793 793 794 - static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector, 794 + static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sector, 795 795 enum drbd_read_balancing rbm) 796 796 { 797 797 struct backing_dev_info *bdi; ··· 834 834 static void complete_conflicting_writes(struct drbd_request *req) 835 835 { 836 836 DEFINE_WAIT(wait); 837 - struct drbd_conf *mdev = req->w.mdev; 837 + struct drbd_device *mdev = req->w.mdev; 838 838 struct drbd_interval *i; 839 839 sector_t sector = req->i.sector; 840 840 int size = req->i.size; ··· 858 858 } 859 859 860 860 /* called within req_lock and rcu_read_lock() */ 861 - static void maybe_pull_ahead(struct drbd_conf *mdev) 861 + static void maybe_pull_ahead(struct drbd_device *mdev) 862 862 { 863 863 struct drbd_tconn *tconn = mdev->tconn; 864 864 struct net_conf *nc; ··· 914 914 */ 915 915 static bool do_remote_read(struct drbd_request *req) 916 916 { 917 - struct drbd_conf *mdev = req->w.mdev; 917 + struct drbd_device *mdev = req->w.mdev; 918 918 enum drbd_read_balancing rbm; 919 919 920 920 if (req->private_bio) { ··· 959 959 * which does NOT include those that we are L_AHEAD for. */ 960 960 static int drbd_process_write_request(struct drbd_request *req) 961 961 { 962 - struct drbd_conf *mdev = req->w.mdev; 962 + struct drbd_device *mdev = req->w.mdev; 963 963 int remote, send_oos; 964 964 965 965 remote = drbd_should_do_remote(mdev->state); ··· 996 996 static void 997 997 drbd_submit_req_private_bio(struct drbd_request *req) 998 998 { 999 - struct drbd_conf *mdev = req->w.mdev; 999 + struct drbd_device *mdev = req->w.mdev; 1000 1000 struct bio *bio = req->private_bio; 1001 1001 const int rw = bio_rw(bio); 1002 1002 ··· 1020 1020 bio_endio(bio, -EIO); 1021 1021 } 1022 1022 1023 - static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req) 1023 + static void drbd_queue_write(struct drbd_device *mdev, struct drbd_request *req) 1024 1024 { 1025 1025 spin_lock(&mdev->submit.lock); 1026 1026 list_add_tail(&req->tl_requests, &mdev->submit.writes); ··· 1034 1034 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. 1035 1035 */ 1036 1036 static struct drbd_request * 1037 - drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) 1037 + drbd_request_prepare(struct drbd_device *mdev, struct bio *bio, unsigned long start_time) 1038 1038 { 1039 1039 const int rw = bio_data_dir(bio); 1040 1040 struct drbd_request *req; ··· 1071 1071 return req; 1072 1072 } 1073 1073 1074 - static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req) 1074 + static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request *req) 1075 1075 { 1076 1076 const int rw = bio_rw(req->master_bio); 1077 1077 struct bio_and_error m = { NULL, }; ··· 1160 1160 complete_master_bio(mdev, &m); 1161 1161 } 1162 1162 1163 - void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) 1163 + void __drbd_make_request(struct drbd_device *mdev, struct bio *bio, unsigned long start_time) 1164 1164 { 1165 1165 struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time); 1166 1166 if (IS_ERR_OR_NULL(req)) ··· 1168 1168 drbd_send_and_submit(mdev, req); 1169 1169 } 1170 1170 1171 - static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming) 1171 + static void submit_fast_path(struct drbd_device *mdev, struct list_head *incoming) 1172 1172 { 1173 1173 struct drbd_request *req, *tmp; 1174 1174 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { ··· 1188 1188 } 1189 1189 } 1190 1190 1191 - static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev, 1191 + static bool prepare_al_transaction_nonblock(struct drbd_device *mdev, 1192 1192 struct list_head *incoming, 1193 1193 struct list_head *pending) 1194 1194 { ··· 1215 1215 1216 1216 void do_submit(struct work_struct *ws) 1217 1217 { 1218 - struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker); 1218 + struct drbd_device *mdev = container_of(ws, struct drbd_device, submit.worker); 1219 1219 LIST_HEAD(incoming); 1220 1220 LIST_HEAD(pending); 1221 1221 struct drbd_request *req, *tmp; ··· 1272 1272 1273 1273 void drbd_make_request(struct request_queue *q, struct bio *bio) 1274 1274 { 1275 - struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; 1275 + struct drbd_device *mdev = (struct drbd_device *) q->queuedata; 1276 1276 unsigned long start_time; 1277 1277 1278 1278 start_time = jiffies; ··· 1300 1300 */ 1301 1301 int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) 1302 1302 { 1303 - struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; 1303 + struct drbd_device *mdev = (struct drbd_device *) q->queuedata; 1304 1304 unsigned int bio_size = bvm->bi_size; 1305 1305 int limit = DRBD_MAX_BIO_SIZE; 1306 1306 int backing_limit; ··· 1334 1334 1335 1335 void request_timer_fn(unsigned long data) 1336 1336 { 1337 - struct drbd_conf *mdev = (struct drbd_conf *) data; 1337 + struct drbd_device *mdev = (struct drbd_device *) data; 1338 1338 struct drbd_tconn *tconn = mdev->tconn; 1339 1339 struct drbd_request *req; /* oldest request */ 1340 1340 struct net_conf *nc;
+3 -3
drivers/block/drbd/drbd_req.h
··· 281 281 struct bio_and_error *m); 282 282 extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, 283 283 struct bio_and_error *m); 284 - extern void complete_master_bio(struct drbd_conf *mdev, 284 + extern void complete_master_bio(struct drbd_device *mdev, 285 285 struct bio_and_error *m); 286 286 extern void request_timer_fn(unsigned long data); 287 287 extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what); ··· 294 294 * outside the spinlock, e.g. when walking some list on cleanup. */ 295 295 static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) 296 296 { 297 - struct drbd_conf *mdev = req->w.mdev; 297 + struct drbd_device *mdev = req->w.mdev; 298 298 struct bio_and_error m; 299 299 int rv; 300 300 ··· 314 314 enum drbd_req_event what) 315 315 { 316 316 unsigned long flags; 317 - struct drbd_conf *mdev = req->w.mdev; 317 + struct drbd_device *mdev = req->w.mdev; 318 318 struct bio_and_error m; 319 319 int rv; 320 320
+36 -36
drivers/block/drbd/drbd_state.c
··· 48 48 }; 49 49 50 50 static int w_after_state_ch(struct drbd_work *w, int unused); 51 - static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, 51 + static void after_state_ch(struct drbd_device *mdev, union drbd_state os, 52 52 union drbd_state ns, enum chg_state_flags flags); 53 - static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state); 53 + static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state); 54 54 static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *); 55 55 static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns); 56 - static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns, 56 + static union drbd_state sanitize_state(struct drbd_device *mdev, union drbd_state ns, 57 57 enum sanitize_state_warnings *warn); 58 58 59 59 static inline bool is_susp(union drbd_state s) ··· 63 63 64 64 bool conn_all_vols_unconf(struct drbd_tconn *tconn) 65 65 { 66 - struct drbd_conf *mdev; 66 + struct drbd_device *mdev; 67 67 bool rv = true; 68 68 int vnr; 69 69 ··· 103 103 enum drbd_role conn_highest_role(struct drbd_tconn *tconn) 104 104 { 105 105 enum drbd_role role = R_UNKNOWN; 106 - struct drbd_conf *mdev; 106 + struct drbd_device *mdev; 107 107 int vnr; 108 108 109 109 rcu_read_lock(); ··· 117 117 enum drbd_role conn_highest_peer(struct drbd_tconn *tconn) 118 118 { 119 119 enum drbd_role peer = R_UNKNOWN; 120 - struct drbd_conf *mdev; 120 + struct drbd_device *mdev; 121 121 int vnr; 122 122 123 123 rcu_read_lock(); ··· 131 131 enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn) 132 132 { 133 133 enum drbd_disk_state ds = D_DISKLESS; 134 - struct drbd_conf *mdev; 134 + struct drbd_device *mdev; 135 135 int vnr; 136 136 137 137 rcu_read_lock(); ··· 145 145 enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn) 146 146 { 147 147 enum drbd_disk_state ds = D_MASK; 148 - struct drbd_conf *mdev; 148 + struct drbd_device *mdev; 149 149 int vnr; 150 150 151 151 rcu_read_lock(); ··· 159 159 enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn) 160 160 { 161 161 enum drbd_disk_state ds = D_DISKLESS; 162 - struct drbd_conf *mdev; 162 + struct drbd_device *mdev; 163 163 int vnr; 164 164 165 165 rcu_read_lock(); ··· 173 173 enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn) 174 174 { 175 175 enum drbd_conns conn = C_MASK; 176 - struct drbd_conf *mdev; 176 + struct drbd_device *mdev; 177 177 int vnr; 178 178 179 179 rcu_read_lock(); ··· 186 186 187 187 static bool no_peer_wf_report_params(struct drbd_tconn *tconn) 188 188 { 189 - struct drbd_conf *mdev; 189 + struct drbd_device *mdev; 190 190 int vnr; 191 191 bool rv = true; 192 192 ··· 208 208 * @os: old (current) state. 209 209 * @ns: new (wanted) state. 210 210 */ 211 - static int cl_wide_st_chg(struct drbd_conf *mdev, 211 + static int cl_wide_st_chg(struct drbd_device *mdev, 212 212 union drbd_state os, union drbd_state ns) 213 213 { 214 214 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED && ··· 230 230 } 231 231 232 232 enum drbd_state_rv 233 - drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, 233 + drbd_change_state(struct drbd_device *mdev, enum chg_state_flags f, 234 234 union drbd_state mask, union drbd_state val) 235 235 { 236 236 unsigned long flags; ··· 251 251 * @mask: mask of state bits to change. 252 252 * @val: value of new state bits. 253 253 */ 254 - void drbd_force_state(struct drbd_conf *mdev, 254 + void drbd_force_state(struct drbd_device *mdev, 255 255 union drbd_state mask, union drbd_state val) 256 256 { 257 257 drbd_change_state(mdev, CS_HARD, mask, val); 258 258 } 259 259 260 260 static enum drbd_state_rv 261 - _req_st_cond(struct drbd_conf *mdev, union drbd_state mask, 261 + _req_st_cond(struct drbd_device *mdev, union drbd_state mask, 262 262 union drbd_state val) 263 263 { 264 264 union drbd_state os, ns; ··· 304 304 * _drbd_request_state(). 305 305 */ 306 306 static enum drbd_state_rv 307 - drbd_req_state(struct drbd_conf *mdev, union drbd_state mask, 307 + drbd_req_state(struct drbd_device *mdev, union drbd_state mask, 308 308 union drbd_state val, enum chg_state_flags f) 309 309 { 310 310 struct completion done; ··· 385 385 * flag, or when logging of failed state change requests is not desired. 386 386 */ 387 387 enum drbd_state_rv 388 - _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, 388 + _drbd_request_state(struct drbd_device *mdev, union drbd_state mask, 389 389 union drbd_state val, enum chg_state_flags f) 390 390 { 391 391 enum drbd_state_rv rv; ··· 396 396 return rv; 397 397 } 398 398 399 - static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns) 399 + static void print_st(struct drbd_device *mdev, char *name, union drbd_state ns) 400 400 { 401 401 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n", 402 402 name, ··· 414 414 ); 415 415 } 416 416 417 - void print_st_err(struct drbd_conf *mdev, union drbd_state os, 417 + void print_st_err(struct drbd_device *mdev, union drbd_state os, 418 418 union drbd_state ns, enum drbd_state_rv err) 419 419 { 420 420 if (err == SS_IN_TRANSIENT_STATE) ··· 455 455 return pbp - pb; 456 456 } 457 457 458 - static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns, 458 + static void drbd_pr_state_change(struct drbd_device *mdev, union drbd_state os, union drbd_state ns, 459 459 enum chg_state_flags flags) 460 460 { 461 461 char pb[300]; ··· 504 504 * @ns: State to consider. 505 505 */ 506 506 static enum drbd_state_rv 507 - is_valid_state(struct drbd_conf *mdev, union drbd_state ns) 507 + is_valid_state(struct drbd_device *mdev, union drbd_state ns) 508 508 { 509 509 /* See drbd_state_sw_errors in drbd_strings.c */ 510 510 ··· 701 701 return rv; 702 702 } 703 703 704 - static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn) 704 + static void print_sanitize_warnings(struct drbd_device *mdev, enum sanitize_state_warnings warn) 705 705 { 706 706 static const char *msg_table[] = { 707 707 [NO_WARNING] = "", ··· 726 726 * When we loose connection, we have to set the state of the peers disk (pdsk) 727 727 * to D_UNKNOWN. This rule and many more along those lines are in this function. 728 728 */ 729 - static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns, 729 + static union drbd_state sanitize_state(struct drbd_device *mdev, union drbd_state ns, 730 730 enum sanitize_state_warnings *warn) 731 731 { 732 732 enum drbd_fencing_p fp; ··· 890 890 return ns; 891 891 } 892 892 893 - void drbd_resume_al(struct drbd_conf *mdev) 893 + void drbd_resume_al(struct drbd_device *mdev) 894 894 { 895 895 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags)) 896 896 dev_info(DEV, "Resumed AL updates\n"); 897 897 } 898 898 899 899 /* helper for __drbd_set_state */ 900 - static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) 900 + static void set_ov_position(struct drbd_device *mdev, enum drbd_conns cs) 901 901 { 902 902 if (mdev->tconn->agreed_pro_version < 90) 903 903 mdev->ov_start_sector = 0; ··· 933 933 * Caller needs to hold req_lock, and global_state_lock. Do not call directly. 934 934 */ 935 935 enum drbd_state_rv 936 - __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, 936 + __drbd_set_state(struct drbd_device *mdev, union drbd_state ns, 937 937 enum chg_state_flags flags, struct completion *done) 938 938 { 939 939 union drbd_state os; ··· 1145 1145 { 1146 1146 struct after_state_chg_work *ascw = 1147 1147 container_of(w, struct after_state_chg_work, w); 1148 - struct drbd_conf *mdev = w->mdev; 1148 + struct drbd_device *mdev = w->mdev; 1149 1149 1150 1150 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags); 1151 1151 if (ascw->flags & CS_WAIT_COMPLETE) { ··· 1157 1157 return 0; 1158 1158 } 1159 1159 1160 - static void abw_start_sync(struct drbd_conf *mdev, int rv) 1160 + static void abw_start_sync(struct drbd_device *mdev, int rv) 1161 1161 { 1162 1162 if (rv) { 1163 1163 dev_err(DEV, "Writing the bitmap failed not starting resync.\n"); ··· 1175 1175 } 1176 1176 } 1177 1177 1178 - int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, 1179 - int (*io_fn)(struct drbd_conf *), 1178 + int drbd_bitmap_io_from_worker(struct drbd_device *mdev, 1179 + int (*io_fn)(struct drbd_device *), 1180 1180 char *why, enum bm_flag flags) 1181 1181 { 1182 1182 int rv; ··· 1202 1202 * @ns: new state. 1203 1203 * @flags: Flags 1204 1204 */ 1205 - static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, 1205 + static void after_state_ch(struct drbd_device *mdev, union drbd_state os, 1206 1206 union drbd_state ns, enum chg_state_flags flags) 1207 1207 { 1208 1208 struct sib_info sib; ··· 1255 1255 spin_lock_irq(&tconn->req_lock); 1256 1256 if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) { 1257 1257 /* case2: The connection was established again: */ 1258 - struct drbd_conf *odev; 1258 + struct drbd_device *odev; 1259 1259 int vnr; 1260 1260 1261 1261 rcu_read_lock(); ··· 1529 1529 struct drbd_tconn *tconn = w->tconn; 1530 1530 enum drbd_conns oc = acscw->oc; 1531 1531 union drbd_state ns_max = acscw->ns_max; 1532 - struct drbd_conf *mdev; 1532 + struct drbd_device *mdev; 1533 1533 int vnr; 1534 1534 1535 1535 kfree(acscw); ··· 1583 1583 void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf) 1584 1584 { 1585 1585 enum chg_state_flags flags = ~0; 1586 - struct drbd_conf *mdev; 1586 + struct drbd_device *mdev; 1587 1587 int vnr, first_vol = 1; 1588 1588 union drbd_dev_state os, cs = { 1589 1589 { .role = R_SECONDARY, ··· 1631 1631 { 1632 1632 enum drbd_state_rv rv = SS_SUCCESS; 1633 1633 union drbd_state ns, os; 1634 - struct drbd_conf *mdev; 1634 + struct drbd_device *mdev; 1635 1635 int vnr; 1636 1636 1637 1637 rcu_read_lock(); ··· 1680 1680 .disk = D_MASK, 1681 1681 .pdsk = D_MASK 1682 1682 } }; 1683 - struct drbd_conf *mdev; 1683 + struct drbd_device *mdev; 1684 1684 enum drbd_state_rv rv; 1685 1685 int vnr, number_of_volumes = 0; 1686 1686
+8 -8
drivers/block/drbd/drbd_state.h
··· 1 1 #ifndef DRBD_STATE_H 2 2 #define DRBD_STATE_H 3 3 4 - struct drbd_conf; 4 + struct drbd_device; 5 5 struct drbd_tconn; 6 6 7 7 /** ··· 107 107 unsigned int i; 108 108 }; 109 109 110 - extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev, 110 + extern enum drbd_state_rv drbd_change_state(struct drbd_device *mdev, 111 111 enum chg_state_flags f, 112 112 union drbd_state mask, 113 113 union drbd_state val); 114 - extern void drbd_force_state(struct drbd_conf *, union drbd_state, 114 + extern void drbd_force_state(struct drbd_device *, union drbd_state, 115 115 union drbd_state); 116 - extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *, 116 + extern enum drbd_state_rv _drbd_request_state(struct drbd_device *, 117 117 union drbd_state, 118 118 union drbd_state, 119 119 enum chg_state_flags); 120 - extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state, 120 + extern enum drbd_state_rv __drbd_set_state(struct drbd_device *, union drbd_state, 121 121 enum chg_state_flags, 122 122 struct completion *done); 123 - extern void print_st_err(struct drbd_conf *, union drbd_state, 123 + extern void print_st_err(struct drbd_device *, union drbd_state, 124 124 union drbd_state, int); 125 125 126 126 enum drbd_state_rv ··· 131 131 conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, 132 132 enum chg_state_flags flags); 133 133 134 - extern void drbd_resume_al(struct drbd_conf *mdev); 134 + extern void drbd_resume_al(struct drbd_device *mdev); 135 135 extern bool conn_all_vols_unconf(struct drbd_tconn *tconn); 136 136 137 137 /** ··· 144 144 * quite verbose in case the state change is not possible, and all those 145 145 * state changes are globally serialized. 146 146 */ 147 - static inline int drbd_request_state(struct drbd_conf *mdev, 147 + static inline int drbd_request_state(struct drbd_device *mdev, 148 148 union drbd_state mask, 149 149 union drbd_state val) 150 150 {
+48 -48
drivers/block/drbd/drbd_worker.c
··· 68 68 void drbd_md_io_complete(struct bio *bio, int error) 69 69 { 70 70 struct drbd_md_io *md_io; 71 - struct drbd_conf *mdev; 71 + struct drbd_device *mdev; 72 72 73 73 md_io = (struct drbd_md_io *)bio->bi_private; 74 - mdev = container_of(md_io, struct drbd_conf, md_io); 74 + mdev = container_of(md_io, struct drbd_device, md_io); 75 75 76 76 md_io->error = error; 77 77 ··· 100 100 static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) 101 101 { 102 102 unsigned long flags = 0; 103 - struct drbd_conf *mdev = peer_req->w.mdev; 103 + struct drbd_device *mdev = peer_req->w.mdev; 104 104 105 105 spin_lock_irqsave(&mdev->tconn->req_lock, flags); 106 106 mdev->read_cnt += peer_req->i.size >> 9; ··· 120 120 static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) 121 121 { 122 122 unsigned long flags = 0; 123 - struct drbd_conf *mdev = peer_req->w.mdev; 123 + struct drbd_device *mdev = peer_req->w.mdev; 124 124 struct drbd_interval i; 125 125 int do_wake; 126 126 u64 block_id; ··· 171 171 void drbd_peer_request_endio(struct bio *bio, int error) 172 172 { 173 173 struct drbd_peer_request *peer_req = bio->bi_private; 174 - struct drbd_conf *mdev = peer_req->w.mdev; 174 + struct drbd_device *mdev = peer_req->w.mdev; 175 175 int uptodate = bio_flagged(bio, BIO_UPTODATE); 176 176 int is_write = bio_data_dir(bio) == WRITE; 177 177 ··· 208 208 { 209 209 unsigned long flags; 210 210 struct drbd_request *req = bio->bi_private; 211 - struct drbd_conf *mdev = req->w.mdev; 211 + struct drbd_device *mdev = req->w.mdev; 212 212 struct bio_and_error m; 213 213 enum drbd_req_event what; 214 214 int uptodate = bio_flagged(bio, BIO_UPTODATE); ··· 282 282 complete_master_bio(mdev, &m); 283 283 } 284 284 285 - void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, 285 + void drbd_csum_ee(struct drbd_device *mdev, struct crypto_hash *tfm, 286 286 struct drbd_peer_request *peer_req, void *digest) 287 287 { 288 288 struct hash_desc desc; ··· 310 310 crypto_hash_final(&desc, digest); 311 311 } 312 312 313 - void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest) 313 + void drbd_csum_bio(struct drbd_device *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest) 314 314 { 315 315 struct hash_desc desc; 316 316 struct scatterlist sg; ··· 334 334 static int w_e_send_csum(struct drbd_work *w, int cancel) 335 335 { 336 336 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 337 - struct drbd_conf *mdev = w->mdev; 337 + struct drbd_device *mdev = w->mdev; 338 338 int digest_size; 339 339 void *digest; 340 340 int err = 0; ··· 379 379 380 380 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 381 381 382 - static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) 382 + static int read_for_csum(struct drbd_device *mdev, sector_t sector, int size) 383 383 { 384 384 struct drbd_peer_request *peer_req; 385 385 ··· 421 421 422 422 int w_resync_timer(struct drbd_work *w, int cancel) 423 423 { 424 - struct drbd_conf *mdev = w->mdev; 424 + struct drbd_device *mdev = w->mdev; 425 425 switch (mdev->state.conn) { 426 426 case C_VERIFY_S: 427 427 w_make_ov_request(w, cancel); ··· 436 436 437 437 void resync_timer_fn(unsigned long data) 438 438 { 439 - struct drbd_conf *mdev = (struct drbd_conf *) data; 439 + struct drbd_device *mdev = (struct drbd_device *) data; 440 440 441 441 if (list_empty(&mdev->resync_work.list)) 442 442 drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work); ··· 486 486 return fb; 487 487 } 488 488 489 - static int drbd_rs_controller(struct drbd_conf *mdev) 489 + static int drbd_rs_controller(struct drbd_device *mdev) 490 490 { 491 491 struct disk_conf *dc; 492 492 unsigned int sect_in; /* Number of sectors that came in since the last turn */ ··· 542 542 return req_sect; 543 543 } 544 544 545 - static int drbd_rs_number_requests(struct drbd_conf *mdev) 545 + static int drbd_rs_number_requests(struct drbd_device *mdev) 546 546 { 547 547 int number; 548 548 ··· 563 563 564 564 int w_make_resync_request(struct drbd_work *w, int cancel) 565 565 { 566 - struct drbd_conf *mdev = w->mdev; 566 + struct drbd_device *mdev = w->mdev; 567 567 unsigned long bit; 568 568 sector_t sector; 569 569 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); ··· 726 726 727 727 static int w_make_ov_request(struct drbd_work *w, int cancel) 728 728 { 729 - struct drbd_conf *mdev = w->mdev; 729 + struct drbd_device *mdev = w->mdev; 730 730 int number, i, size; 731 731 sector_t sector; 732 732 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); ··· 780 780 781 781 int w_ov_finished(struct drbd_work *w, int cancel) 782 782 { 783 - struct drbd_conf *mdev = w->mdev; 783 + struct drbd_device *mdev = w->mdev; 784 784 kfree(w); 785 785 ov_out_of_sync_print(mdev); 786 786 drbd_resync_finished(mdev); ··· 790 790 791 791 static int w_resync_finished(struct drbd_work *w, int cancel) 792 792 { 793 - struct drbd_conf *mdev = w->mdev; 793 + struct drbd_device *mdev = w->mdev; 794 794 kfree(w); 795 795 796 796 drbd_resync_finished(mdev); ··· 798 798 return 0; 799 799 } 800 800 801 - static void ping_peer(struct drbd_conf *mdev) 801 + static void ping_peer(struct drbd_device *mdev) 802 802 { 803 803 struct drbd_tconn *tconn = mdev->tconn; 804 804 ··· 808 808 test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED); 809 809 } 810 810 811 - int drbd_resync_finished(struct drbd_conf *mdev) 811 + int drbd_resync_finished(struct drbd_device *mdev) 812 812 { 813 813 unsigned long db, dt, dbdt; 814 814 unsigned long n_oos; ··· 963 963 } 964 964 965 965 /* helper */ 966 - static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req) 966 + static void move_to_net_ee_or_free(struct drbd_device *mdev, struct drbd_peer_request *peer_req) 967 967 { 968 968 if (drbd_peer_req_has_active_page(peer_req)) { 969 969 /* This might happen if sendpage() has not finished */ ··· 987 987 int w_e_end_data_req(struct drbd_work *w, int cancel) 988 988 { 989 989 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 990 - struct drbd_conf *mdev = w->mdev; 990 + struct drbd_device *mdev = w->mdev; 991 991 int err; 992 992 993 993 if (unlikely(cancel)) { ··· 1024 1024 int w_e_end_rsdata_req(struct drbd_work *w, int cancel) 1025 1025 { 1026 1026 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1027 - struct drbd_conf *mdev = w->mdev; 1027 + struct drbd_device *mdev = w->mdev; 1028 1028 int err; 1029 1029 1030 1030 if (unlikely(cancel)) { ··· 1073 1073 int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) 1074 1074 { 1075 1075 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1076 - struct drbd_conf *mdev = w->mdev; 1076 + struct drbd_device *mdev = w->mdev; 1077 1077 struct digest_info *di; 1078 1078 int digest_size; 1079 1079 void *digest = NULL; ··· 1136 1136 int w_e_end_ov_req(struct drbd_work *w, int cancel) 1137 1137 { 1138 1138 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1139 - struct drbd_conf *mdev = w->mdev; 1139 + struct drbd_device *mdev = w->mdev; 1140 1140 sector_t sector = peer_req->i.sector; 1141 1141 unsigned int size = peer_req->i.size; 1142 1142 int digest_size; ··· 1178 1178 return err; 1179 1179 } 1180 1180 1181 - void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size) 1181 + void drbd_ov_out_of_sync_found(struct drbd_device *mdev, sector_t sector, int size) 1182 1182 { 1183 1183 if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) { 1184 1184 mdev->ov_last_oos_size += size>>9; ··· 1192 1192 int w_e_end_ov_reply(struct drbd_work *w, int cancel) 1193 1193 { 1194 1194 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); 1195 - struct drbd_conf *mdev = w->mdev; 1195 + struct drbd_device *mdev = w->mdev; 1196 1196 struct digest_info *di; 1197 1197 void *digest; 1198 1198 sector_t sector = peer_req->i.sector; ··· 1292 1292 1293 1293 int w_send_write_hint(struct drbd_work *w, int cancel) 1294 1294 { 1295 - struct drbd_conf *mdev = w->mdev; 1295 + struct drbd_device *mdev = w->mdev; 1296 1296 struct drbd_socket *sock; 1297 1297 1298 1298 if (cancel) ··· 1327 1327 int w_send_out_of_sync(struct drbd_work *w, int cancel) 1328 1328 { 1329 1329 struct drbd_request *req = container_of(w, struct drbd_request, w); 1330 - struct drbd_conf *mdev = w->mdev; 1330 + struct drbd_device *mdev = w->mdev; 1331 1331 struct drbd_tconn *tconn = mdev->tconn; 1332 1332 int err; 1333 1333 ··· 1357 1357 int w_send_dblock(struct drbd_work *w, int cancel) 1358 1358 { 1359 1359 struct drbd_request *req = container_of(w, struct drbd_request, w); 1360 - struct drbd_conf *mdev = w->mdev; 1360 + struct drbd_device *mdev = w->mdev; 1361 1361 struct drbd_tconn *tconn = mdev->tconn; 1362 1362 int err; 1363 1363 ··· 1385 1385 int w_send_read_req(struct drbd_work *w, int cancel) 1386 1386 { 1387 1387 struct drbd_request *req = container_of(w, struct drbd_request, w); 1388 - struct drbd_conf *mdev = w->mdev; 1388 + struct drbd_device *mdev = w->mdev; 1389 1389 struct drbd_tconn *tconn = mdev->tconn; 1390 1390 int err; 1391 1391 ··· 1409 1409 int w_restart_disk_io(struct drbd_work *w, int cancel) 1410 1410 { 1411 1411 struct drbd_request *req = container_of(w, struct drbd_request, w); 1412 - struct drbd_conf *mdev = w->mdev; 1412 + struct drbd_device *mdev = w->mdev; 1413 1413 1414 1414 if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) 1415 1415 drbd_al_begin_io(mdev, &req->i, false); ··· 1421 1421 return 0; 1422 1422 } 1423 1423 1424 - static int _drbd_may_sync_now(struct drbd_conf *mdev) 1424 + static int _drbd_may_sync_now(struct drbd_device *mdev) 1425 1425 { 1426 - struct drbd_conf *odev = mdev; 1426 + struct drbd_device *odev = mdev; 1427 1427 int resync_after; 1428 1428 1429 1429 while (1) { ··· 1451 1451 * 1452 1452 * Called from process context only (admin command and after_state_ch). 1453 1453 */ 1454 - static int _drbd_pause_after(struct drbd_conf *mdev) 1454 + static int _drbd_pause_after(struct drbd_device *mdev) 1455 1455 { 1456 - struct drbd_conf *odev; 1456 + struct drbd_device *odev; 1457 1457 int i, rv = 0; 1458 1458 1459 1459 rcu_read_lock(); ··· 1475 1475 * 1476 1476 * Called from process context only (admin command and worker). 1477 1477 */ 1478 - static int _drbd_resume_next(struct drbd_conf *mdev) 1478 + static int _drbd_resume_next(struct drbd_device *mdev) 1479 1479 { 1480 - struct drbd_conf *odev; 1480 + struct drbd_device *odev; 1481 1481 int i, rv = 0; 1482 1482 1483 1483 rcu_read_lock(); ··· 1495 1495 return rv; 1496 1496 } 1497 1497 1498 - void resume_next_sg(struct drbd_conf *mdev) 1498 + void resume_next_sg(struct drbd_device *mdev) 1499 1499 { 1500 1500 write_lock_irq(&global_state_lock); 1501 1501 _drbd_resume_next(mdev); 1502 1502 write_unlock_irq(&global_state_lock); 1503 1503 } 1504 1504 1505 - void suspend_other_sg(struct drbd_conf *mdev) 1505 + void suspend_other_sg(struct drbd_device *mdev) 1506 1506 { 1507 1507 write_lock_irq(&global_state_lock); 1508 1508 _drbd_pause_after(mdev); ··· 1510 1510 } 1511 1511 1512 1512 /* caller must hold global_state_lock */ 1513 - enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor) 1513 + enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *mdev, int o_minor) 1514 1514 { 1515 - struct drbd_conf *odev; 1515 + struct drbd_device *odev; 1516 1516 int resync_after; 1517 1517 1518 1518 if (o_minor == -1) ··· 1548 1548 } 1549 1549 1550 1550 /* caller must hold global_state_lock */ 1551 - void drbd_resync_after_changed(struct drbd_conf *mdev) 1551 + void drbd_resync_after_changed(struct drbd_device *mdev) 1552 1552 { 1553 1553 int changes; 1554 1554 ··· 1558 1558 } while (changes); 1559 1559 } 1560 1560 1561 - void drbd_rs_controller_reset(struct drbd_conf *mdev) 1561 + void drbd_rs_controller_reset(struct drbd_device *mdev) 1562 1562 { 1563 1563 struct fifo_buffer *plan; 1564 1564 ··· 1579 1579 1580 1580 void start_resync_timer_fn(unsigned long data) 1581 1581 { 1582 - struct drbd_conf *mdev = (struct drbd_conf *) data; 1582 + struct drbd_device *mdev = (struct drbd_device *) data; 1583 1583 1584 1584 drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work); 1585 1585 } 1586 1586 1587 1587 int w_start_resync(struct drbd_work *w, int cancel) 1588 1588 { 1589 - struct drbd_conf *mdev = w->mdev; 1589 + struct drbd_device *mdev = w->mdev; 1590 1590 1591 1591 if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) { 1592 1592 dev_warn(DEV, "w_start_resync later...\n"); ··· 1608 1608 * This function might bring you directly into one of the 1609 1609 * C_PAUSED_SYNC_* states. 1610 1610 */ 1611 - void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) 1611 + void drbd_start_resync(struct drbd_device *mdev, enum drbd_conns side) 1612 1612 { 1613 1613 union drbd_state ns; 1614 1614 int r; ··· 1886 1886 { 1887 1887 struct drbd_tconn *tconn = thi->tconn; 1888 1888 struct drbd_work *w = NULL; 1889 - struct drbd_conf *mdev; 1889 + struct drbd_device *mdev; 1890 1890 LIST_HEAD(work_list); 1891 1891 int vnr; 1892 1892
+2 -2
drivers/block/drbd/drbd_wrappers.h
··· 9 9 extern char *drbd_sec_holder; 10 10 11 11 /* sets the number of 512 byte sectors of our virtual device */ 12 - static inline void drbd_set_my_capacity(struct drbd_conf *mdev, 12 + static inline void drbd_set_my_capacity(struct drbd_device *mdev, 13 13 sector_t size) 14 14 { 15 15 /* set_capacity(mdev->this_bdev->bd_disk, size); */ ··· 27 27 /* 28 28 * used to submit our private bio 29 29 */ 30 - static inline void drbd_generic_make_request(struct drbd_conf *mdev, 30 + static inline void drbd_generic_make_request(struct drbd_device *mdev, 31 31 int fault_type, struct bio *bio) 32 32 { 33 33 __release(local);