Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drbd: Pass a peer device to the resync and online verify functions

Originally-from: Andreas Grünbacher <agruen@linbit.com>
Signed-off-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com>
Link: https://lore.kernel.org/r/20230330102744.2128122-2-christoph.boehmwalder@linbit.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Böhmwalder and committed by
Jens Axboe
0d11f3cf ad878a0d

+126 -108
+8 -5
drivers/block/drbd/drbd_actlog.c
··· 735 735 return false; 736 736 } 737 737 738 - void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go) 738 + void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go) 739 739 { 740 + struct drbd_device *device = peer_device->device; 740 741 unsigned long now = jiffies; 741 742 unsigned long last = device->rs_mark_time[device->rs_last_mark]; 742 743 int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS; ··· 820 819 if (mode == SET_IN_SYNC) { 821 820 unsigned long still_to_go = drbd_bm_total_weight(device); 822 821 bool rs_is_done = (still_to_go <= device->rs_failed); 823 - drbd_advance_rs_marks(device, still_to_go); 822 + drbd_advance_rs_marks(first_peer_device(device), still_to_go); 824 823 if (cleared || rs_is_done) 825 824 maybe_schedule_on_disk_bitmap_update(device, rs_is_done); 826 825 } else if (mode == RECORD_RS_FAILED) ··· 844 843 * called by worker on C_SYNC_TARGET and receiver on SyncSource. 845 844 * 846 845 */ 847 - int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, 846 + int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size, 848 847 enum update_sync_bits_mode mode) 849 848 { 850 849 /* Is called from worker and receiver context _only_ */ 850 + struct drbd_device *device = peer_device->device; 851 851 unsigned long sbnr, ebnr, lbnr; 852 852 unsigned long count = 0; 853 853 sector_t esector, nr_sectors; ··· 1011 1009 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN 1012 1010 * if there is still application IO going on in this area. 1013 1011 */ 1014 - int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector) 1012 + int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector) 1015 1013 { 1014 + struct drbd_device *device = peer_device->device; 1016 1015 unsigned int enr = BM_SECT_TO_EXT(sector); 1017 1016 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT; 1018 1017 struct lc_element *e; 1019 1018 struct bm_extent *bm_ext; 1020 1019 int i; 1021 - bool throttle = drbd_rs_should_slow_down(device, sector, true); 1020 + bool throttle = drbd_rs_should_slow_down(peer_device, sector, true); 1022 1021 1023 1022 /* If we need to throttle, a half-locked (only marked BME_NO_WRITES, 1024 1023 * not yet BME_LOCKED) extent needs to be kicked out explicitly if we
+25 -21
drivers/block/drbd/drbd_int.h
··· 1433 1433 extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side); 1434 1434 extern void resume_next_sg(struct drbd_device *device); 1435 1435 extern void suspend_other_sg(struct drbd_device *device); 1436 - extern int drbd_resync_finished(struct drbd_device *device); 1436 + extern int drbd_resync_finished(struct drbd_peer_device *peer_device); 1437 1437 /* maybe rather drbd_main.c ? */ 1438 1438 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); 1439 1439 extern void drbd_md_put_buffer(struct drbd_device *device); 1440 1440 extern int drbd_md_sync_page_io(struct drbd_device *device, 1441 1441 struct drbd_backing_dev *bdev, sector_t sector, enum req_op op); 1442 - extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); 1442 + extern void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device, 1443 + sector_t sector, int size); 1443 1444 extern void wait_until_done_or_force_detached(struct drbd_device *device, 1444 1445 struct drbd_backing_dev *bdev, unsigned int *done); 1445 - extern void drbd_rs_controller_reset(struct drbd_device *device); 1446 + extern void drbd_rs_controller_reset(struct drbd_peer_device *peer_device); 1446 1447 1447 - static inline void ov_out_of_sync_print(struct drbd_device *device) 1448 + static inline void ov_out_of_sync_print(struct drbd_peer_device *peer_device) 1448 1449 { 1450 + struct drbd_device *device = peer_device->device; 1451 + 1449 1452 if (device->ov_last_oos_size) { 1450 - drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n", 1453 + drbd_err(peer_device, "Out of sync: start=%llu, size=%lu (sectors)\n", 1451 1454 (unsigned long long)device->ov_last_oos_start, 1452 1455 (unsigned long)device->ov_last_oos_size); 1453 1456 } ··· 1489 1486 extern void drbd_send_ping_wf(struct work_struct *ws); 1490 1487 extern void drbd_send_acks_wf(struct work_struct *ws); 1491 1488 extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device); 1492 - extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, 1489 + extern bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector, 1493 1490 bool throttle_if_app_is_waiting); 1494 1491 extern int drbd_submit_peer_request(struct drbd_peer_request *peer_req); 1495 1492 extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); ··· 1545 1542 extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i); 1546 1543 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector); 1547 1544 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector); 1548 - extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector); 1545 + extern int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector); 1549 1546 extern void drbd_rs_cancel_all(struct drbd_device *device); 1550 1547 extern int drbd_rs_del_all(struct drbd_device *device); 1551 - extern void drbd_rs_failed_io(struct drbd_device *device, 1548 + extern void drbd_rs_failed_io(struct drbd_peer_device *peer_device, 1552 1549 sector_t sector, int size); 1553 - extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go); 1550 + extern void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go); 1554 1551 1555 1552 enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC }; 1556 - extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, 1553 + extern int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size, 1557 1554 enum update_sync_bits_mode mode); 1558 - #define drbd_set_in_sync(device, sector, size) \ 1559 - __drbd_change_sync(device, sector, size, SET_IN_SYNC) 1560 - #define drbd_set_out_of_sync(device, sector, size) \ 1561 - __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC) 1562 - #define drbd_rs_failed_io(device, sector, size) \ 1563 - __drbd_change_sync(device, sector, size, RECORD_RS_FAILED) 1555 + #define drbd_set_in_sync(peer_device, sector, size) \ 1556 + __drbd_change_sync(peer_device, sector, size, SET_IN_SYNC) 1557 + #define drbd_set_out_of_sync(peer_device, sector, size) \ 1558 + __drbd_change_sync(peer_device, sector, size, SET_OUT_OF_SYNC) 1559 + #define drbd_rs_failed_io(peer_device, sector, size) \ 1560 + __drbd_change_sync(peer_device, sector, size, RECORD_RS_FAILED) 1564 1561 extern void drbd_al_shrink(struct drbd_device *device); 1565 1562 extern int drbd_al_initialize(struct drbd_device *, void *); 1566 1563 ··· 1948 1945 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER) 1949 1946 * (or P_NEG_ACK with ID_SYNCER) 1950 1947 */ 1951 - static inline void inc_rs_pending(struct drbd_device *device) 1948 + static inline void inc_rs_pending(struct drbd_peer_device *peer_device) 1952 1949 { 1953 - atomic_inc(&device->rs_pending_cnt); 1950 + atomic_inc(&peer_device->device->rs_pending_cnt); 1954 1951 } 1955 1952 1956 - #define dec_rs_pending(device) ((void)expect((device), __dec_rs_pending(device) >= 0)) 1957 - static inline int __dec_rs_pending(struct drbd_device *device) 1953 + #define dec_rs_pending(peer_device) \ 1954 + ((void)expect((peer_device), __dec_rs_pending(peer_device) >= 0)) 1955 + static inline int __dec_rs_pending(struct drbd_peer_device *peer_device) 1958 1956 { 1959 - return atomic_dec_return(&device->rs_pending_cnt); 1957 + return atomic_dec_return(&peer_device->device->rs_pending_cnt); 1960 1958 } 1961 1959 1962 1960 /* counts how many answers we still need to send to the peer.
+30 -29
drivers/block/drbd/drbd_receiver.c
··· 2044 2044 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); 2045 2045 2046 2046 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 2047 - drbd_set_in_sync(device, sector, peer_req->i.size); 2047 + drbd_set_in_sync(peer_device, sector, peer_req->i.size); 2048 2048 err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req); 2049 2049 } else { 2050 2050 /* Record failure to sync */ 2051 - drbd_rs_failed_io(device, sector, peer_req->i.size); 2051 + drbd_rs_failed_io(peer_device, sector, peer_req->i.size); 2052 2052 2053 2053 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); 2054 2054 } ··· 2067 2067 if (!peer_req) 2068 2068 goto fail; 2069 2069 2070 - dec_rs_pending(device); 2070 + dec_rs_pending(peer_device); 2071 2071 2072 2072 inc_unacked(device); 2073 2073 /* corresponding dec_unacked() in e_end_resync_block() ··· 2220 2220 P_RS_WRITE_ACK : P_WRITE_ACK; 2221 2221 err = drbd_send_ack(peer_device, pcmd, peer_req); 2222 2222 if (pcmd == P_RS_WRITE_ACK) 2223 - drbd_set_in_sync(device, sector, peer_req->i.size); 2223 + drbd_set_in_sync(peer_device, sector, peer_req->i.size); 2224 2224 } else { 2225 2225 err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); 2226 2226 /* we expect it to be marked out of sync anyways... ··· 2691 2691 2692 2692 if (device->state.pdsk < D_INCONSISTENT) { 2693 2693 /* In case we have the only disk of the cluster, */ 2694 - drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); 2694 + drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size); 2695 2695 peer_req->flags &= ~EE_MAY_SET_IN_SYNC; 2696 2696 drbd_al_begin_io(device, &peer_req->i); 2697 2697 peer_req->flags |= EE_CALL_AL_COMPLETE_IO; ··· 2730 2730 * The current sync rate used here uses only the most recent two step marks, 2731 2731 * to have a short time average so we can react faster. 2732 2732 */ 2733 - bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, 2733 + bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector, 2734 2734 bool throttle_if_app_is_waiting) 2735 2735 { 2736 + struct drbd_device *device = peer_device->device; 2736 2737 struct lc_element *tmp; 2737 2738 bool throttle = drbd_rs_c_min_rate_throttle(device); 2738 2739 ··· 2845 2844 break; 2846 2845 case P_OV_REPLY: 2847 2846 verb = 0; 2848 - dec_rs_pending(device); 2847 + dec_rs_pending(peer_device); 2849 2848 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC); 2850 2849 break; 2851 2850 default: ··· 2916 2915 /* track progress, we may need to throttle */ 2917 2916 atomic_add(size >> 9, &device->rs_sect_in); 2918 2917 peer_req->w.cb = w_e_end_ov_reply; 2919 - dec_rs_pending(device); 2918 + dec_rs_pending(peer_device); 2920 2919 /* drbd_rs_begin_io done when we sent this request, 2921 2920 * but accounting still needs to be done. */ 2922 2921 goto submit_for_resync; ··· 2979 2978 2980 2979 update_receiver_timing_details(connection, drbd_rs_should_slow_down); 2981 2980 if (device->state.peer != R_PRIMARY 2982 - && drbd_rs_should_slow_down(device, sector, false)) 2981 + && drbd_rs_should_slow_down(peer_device, sector, false)) 2983 2982 schedule_timeout_uninterruptible(HZ/10); 2984 2983 update_receiver_timing_details(connection, drbd_rs_begin_io); 2985 2984 if (drbd_rs_begin_io(device, sector)) ··· 4451 4450 else if (os.conn >= C_SYNC_SOURCE && 4452 4451 peer_state.conn == C_CONNECTED) { 4453 4452 if (drbd_bm_total_weight(device) <= device->rs_failed) 4454 - drbd_resync_finished(device); 4453 + drbd_resync_finished(peer_device); 4455 4454 return 0; 4456 4455 } 4457 4456 } ··· 4459 4458 /* explicit verify finished notification, stop sector reached. */ 4460 4459 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE && 4461 4460 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) { 4462 - ov_out_of_sync_print(device); 4463 - drbd_resync_finished(device); 4461 + ov_out_of_sync_print(peer_device); 4462 + drbd_resync_finished(peer_device); 4464 4463 return 0; 4465 4464 } 4466 4465 ··· 4938 4937 drbd_conn_str(device->state.conn)); 4939 4938 } 4940 4939 4941 - drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); 4940 + drbd_set_out_of_sync(peer_device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); 4942 4941 4943 4942 return 0; 4944 4943 } ··· 4959 4958 sector = be64_to_cpu(p->sector); 4960 4959 size = be32_to_cpu(p->blksize); 4961 4960 4962 - dec_rs_pending(device); 4961 + dec_rs_pending(peer_device); 4963 4962 4964 4963 if (get_ldev(device)) { 4965 4964 struct drbd_peer_request *peer_req; ··· 5651 5650 5652 5651 if (get_ldev(device)) { 5653 5652 drbd_rs_complete_io(device, sector); 5654 - drbd_set_in_sync(device, sector, blksize); 5653 + drbd_set_in_sync(peer_device, sector, blksize); 5655 5654 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ 5656 5655 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); 5657 5656 put_ldev(device); 5658 5657 } 5659 - dec_rs_pending(device); 5658 + dec_rs_pending(peer_device); 5660 5659 atomic_add(blksize >> 9, &device->rs_sect_in); 5661 5660 5662 5661 return 0; ··· 5702 5701 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5703 5702 5704 5703 if (p->block_id == ID_SYNCER) { 5705 - drbd_set_in_sync(device, sector, blksize); 5706 - dec_rs_pending(device); 5704 + drbd_set_in_sync(peer_device, sector, blksize); 5705 + dec_rs_pending(peer_device); 5707 5706 return 0; 5708 5707 } 5709 5708 switch (pi->cmd) { ··· 5748 5747 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5749 5748 5750 5749 if (p->block_id == ID_SYNCER) { 5751 - dec_rs_pending(device); 5752 - drbd_rs_failed_io(device, sector, size); 5750 + dec_rs_pending(peer_device); 5751 + drbd_rs_failed_io(peer_device, sector, size); 5753 5752 return 0; 5754 5753 } 5755 5754 ··· 5762 5761 request is no longer in the collision hash. */ 5763 5762 /* In Protocol B we might already have got a P_RECV_ACK 5764 5763 but then get a P_NEG_ACK afterwards. */ 5765 - drbd_set_out_of_sync(device, sector, size); 5764 + drbd_set_out_of_sync(peer_device, sector, size); 5766 5765 } 5767 5766 return 0; 5768 5767 } ··· 5807 5806 5808 5807 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5809 5808 5810 - dec_rs_pending(device); 5809 + dec_rs_pending(peer_device); 5811 5810 5812 5811 if (get_ldev_if_state(device, D_FAILED)) { 5813 5812 drbd_rs_complete_io(device, sector); 5814 5813 switch (pi->cmd) { 5815 5814 case P_NEG_RS_DREPLY: 5816 - drbd_rs_failed_io(device, sector, size); 5815 + drbd_rs_failed_io(peer_device, sector, size); 5817 5816 break; 5818 5817 case P_RS_CANCEL: 5819 5818 break; ··· 5870 5869 update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); 5871 5870 5872 5871 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) 5873 - drbd_ov_out_of_sync_found(device, sector, size); 5872 + drbd_ov_out_of_sync_found(peer_device, sector, size); 5874 5873 else 5875 - ov_out_of_sync_print(device); 5874 + ov_out_of_sync_print(peer_device); 5876 5875 5877 5876 if (!get_ldev(device)) 5878 5877 return 0; 5879 5878 5880 5879 drbd_rs_complete_io(device, sector); 5881 - dec_rs_pending(device); 5880 + dec_rs_pending(peer_device); 5882 5881 5883 5882 --device->ov_left; 5884 5883 5885 5884 /* let's advance progress step marks only for every other megabyte */ 5886 5885 if ((device->ov_left & 0x200) == 0x200) 5887 - drbd_advance_rs_marks(device, device->ov_left); 5886 + drbd_advance_rs_marks(peer_device, device->ov_left); 5888 5887 5889 5888 if (device->ov_left == 0) { 5890 5889 dw = kmalloc(sizeof(*dw), GFP_NOIO); ··· 5894 5893 drbd_queue_work(&peer_device->connection->sender_work, &dw->w); 5895 5894 } else { 5896 5895 drbd_err(device, "kmalloc(dw) failed."); 5897 - ov_out_of_sync_print(device); 5898 - drbd_resync_finished(device); 5896 + ov_out_of_sync_print(peer_device); 5897 + drbd_resync_finished(peer_device); 5899 5898 } 5900 5899 } 5901 5900 put_ldev(device);
+5 -4
drivers/block/drbd/drbd_req.c
··· 122 122 * before it even was submitted or sent. 123 123 * In that case we do not want to touch the bitmap at all. 124 124 */ 125 + struct drbd_peer_device *peer_device = first_peer_device(device); 125 126 if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { 126 127 if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) 127 - drbd_set_out_of_sync(device, req->i.sector, req->i.size); 128 + drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size); 128 129 129 130 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) 130 - drbd_set_in_sync(device, req->i.sector, req->i.size); 131 + drbd_set_in_sync(peer_device, req->i.sector, req->i.size); 131 132 } 132 133 133 134 /* one might be tempted to move the drbd_al_complete_io ··· 621 620 break; 622 621 623 622 case READ_COMPLETED_WITH_ERROR: 624 - drbd_set_out_of_sync(device, req->i.sector, req->i.size); 623 + drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size); 625 624 drbd_report_io_error(device, req); 626 625 __drbd_chk_io_error(device, DRBD_READ_ERROR); 627 626 fallthrough; ··· 1132 1131 if (remote) { 1133 1132 _req_mod(req, TO_BE_SENT, peer_device); 1134 1133 _req_mod(req, QUEUE_FOR_NET_WRITE, peer_device); 1135 - } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) 1134 + } else if (drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size)) 1136 1135 _req_mod(req, QUEUE_FOR_SEND_OOS, peer_device); 1137 1136 1138 1137 return remote;
+6 -4
drivers/block/drbd/drbd_state.c
··· 1222 1222 } 1223 1223 1224 1224 /* helper for _drbd_set_state */ 1225 - static void set_ov_position(struct drbd_device *device, enum drbd_conns cs) 1225 + static void set_ov_position(struct drbd_peer_device *peer_device, enum drbd_conns cs) 1226 1226 { 1227 - if (first_peer_device(device)->connection->agreed_pro_version < 90) 1227 + struct drbd_device *device = peer_device->device; 1228 + 1229 + if (peer_device->connection->agreed_pro_version < 90) 1228 1230 device->ov_start_sector = 0; 1229 1231 device->rs_total = drbd_bm_bits(device); 1230 1232 device->ov_position = 0; ··· 1389 1387 unsigned long now = jiffies; 1390 1388 int i; 1391 1389 1392 - set_ov_position(device, ns.conn); 1390 + set_ov_position(peer_device, ns.conn); 1393 1391 device->rs_start = now; 1394 1392 device->rs_last_sect_ev = 0; 1395 1393 device->ov_last_oos_size = 0; ··· 1400 1398 device->rs_mark_time[i] = now; 1401 1399 } 1402 1400 1403 - drbd_rs_controller_reset(device); 1401 + drbd_rs_controller_reset(peer_device); 1404 1402 1405 1403 if (ns.conn == C_VERIFY_S) { 1406 1404 drbd_info(device, "Starting Online Verify from sector %llu\n",
+52 -45
drivers/block/drbd/drbd_worker.c
··· 28 28 #include "drbd_protocol.h" 29 29 #include "drbd_req.h" 30 30 31 - static int make_ov_request(struct drbd_device *, int); 32 - static int make_resync_request(struct drbd_device *, int); 31 + static int make_ov_request(struct drbd_peer_device *, int); 32 + static int make_resync_request(struct drbd_peer_device *, int); 33 33 34 34 /* endio handlers: 35 35 * drbd_md_endio (defined here) ··· 124 124 * In case of a write error, send the neg ack anyways. */ 125 125 if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags)) 126 126 inc_unacked(device); 127 - drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); 127 + drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size); 128 128 } 129 129 130 130 spin_lock_irqsave(&device->resource->req_lock, flags); ··· 363 363 * drbd_alloc_pages due to pp_in_use > max_buffers. */ 364 364 drbd_free_peer_req(device, peer_req); 365 365 peer_req = NULL; 366 - inc_rs_pending(device); 366 + inc_rs_pending(peer_device); 367 367 err = drbd_send_drequest_csum(peer_device, sector, size, 368 368 digest, digest_size, 369 369 P_CSUM_RS_REQUEST); ··· 430 430 431 431 switch (device->state.conn) { 432 432 case C_VERIFY_S: 433 - make_ov_request(device, cancel); 433 + make_ov_request(first_peer_device(device), cancel); 434 434 break; 435 435 case C_SYNC_TARGET: 436 - make_resync_request(device, cancel); 436 + make_resync_request(first_peer_device(device), cancel); 437 437 break; 438 438 } 439 439 ··· 493 493 return fb; 494 494 } 495 495 496 - static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in) 496 + static int drbd_rs_controller(struct drbd_peer_device *peer_device, unsigned int sect_in) 497 497 { 498 + struct drbd_device *device = peer_device->device; 498 499 struct disk_conf *dc; 499 500 unsigned int want; /* The number of sectors we want in-flight */ 500 501 int req_sect; /* Number of sectors to request in this turn */ ··· 546 545 return req_sect; 547 546 } 548 547 549 - static int drbd_rs_number_requests(struct drbd_device *device) 548 + static int drbd_rs_number_requests(struct drbd_peer_device *peer_device) 550 549 { 550 + struct drbd_device *device = peer_device->device; 551 551 unsigned int sect_in; /* Number of sectors that came in since the last turn */ 552 552 int number, mxb; 553 553 ··· 558 556 rcu_read_lock(); 559 557 mxb = drbd_get_max_buffers(device) / 2; 560 558 if (rcu_dereference(device->rs_plan_s)->size) { 561 - number = drbd_rs_controller(device, sect_in) >> (BM_BLOCK_SHIFT - 9); 559 + number = drbd_rs_controller(peer_device, sect_in) >> (BM_BLOCK_SHIFT - 9); 562 560 device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; 563 561 } else { 564 562 device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate; ··· 582 580 return number; 583 581 } 584 582 585 - static int make_resync_request(struct drbd_device *const device, int cancel) 583 + static int make_resync_request(struct drbd_peer_device *const peer_device, int cancel) 586 584 { 587 - struct drbd_peer_device *const peer_device = first_peer_device(device); 585 + struct drbd_device *const device = peer_device->device; 588 586 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; 589 587 unsigned long bit; 590 588 sector_t sector; ··· 600 598 601 599 if (device->rs_total == 0) { 602 600 /* empty resync? */ 603 - drbd_resync_finished(device); 601 + drbd_resync_finished(peer_device); 604 602 return 0; 605 603 } 606 604 ··· 620 618 } 621 619 622 620 max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; 623 - number = drbd_rs_number_requests(device); 621 + number = drbd_rs_number_requests(peer_device); 624 622 if (number <= 0) 625 623 goto requeue; 626 624 ··· 655 653 656 654 sector = BM_BIT_TO_SECT(bit); 657 655 658 - if (drbd_try_rs_begin_io(device, sector)) { 656 + if (drbd_try_rs_begin_io(peer_device, sector)) { 659 657 device->bm_resync_fo = bit; 660 658 goto requeue; 661 659 } ··· 731 729 } else { 732 730 int err; 733 731 734 - inc_rs_pending(device); 732 + inc_rs_pending(peer_device); 735 733 err = drbd_send_drequest(peer_device, 736 734 size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST, 737 735 sector, size, ID_SYNCER); 738 736 if (err) { 739 737 drbd_err(device, "drbd_send_drequest() failed, aborting...\n"); 740 - dec_rs_pending(device); 738 + dec_rs_pending(peer_device); 741 739 put_ldev(device); 742 740 return err; 743 741 } ··· 762 760 return 0; 763 761 } 764 762 765 - static int make_ov_request(struct drbd_device *device, int cancel) 763 + static int make_ov_request(struct drbd_peer_device *peer_device, int cancel) 766 764 { 765 + struct drbd_device *device = peer_device->device; 767 766 int number, i, size; 768 767 sector_t sector; 769 768 const sector_t capacity = get_capacity(device->vdisk); ··· 773 770 if (unlikely(cancel)) 774 771 return 1; 775 772 776 - number = drbd_rs_number_requests(device); 773 + number = drbd_rs_number_requests(peer_device); 777 774 778 775 sector = device->ov_position; 779 776 for (i = 0; i < number; i++) { ··· 791 788 792 789 size = BM_BLOCK_SIZE; 793 790 794 - if (drbd_try_rs_begin_io(device, sector)) { 791 + if (drbd_try_rs_begin_io(peer_device, sector)) { 795 792 device->ov_position = sector; 796 793 goto requeue; 797 794 } ··· 799 796 if (sector + (size>>9) > capacity) 800 797 size = (capacity-sector)<<9; 801 798 802 - inc_rs_pending(device); 799 + inc_rs_pending(peer_device); 803 800 if (drbd_send_ov_request(first_peer_device(device), sector, size)) { 804 - dec_rs_pending(device); 801 + dec_rs_pending(peer_device); 805 802 return 0; 806 803 } 807 804 sector += BM_SECT_PER_BIT; ··· 821 818 container_of(w, struct drbd_device_work, w); 822 819 struct drbd_device *device = dw->device; 823 820 kfree(dw); 824 - ov_out_of_sync_print(device); 825 - drbd_resync_finished(device); 821 + ov_out_of_sync_print(first_peer_device(device)); 822 + drbd_resync_finished(first_peer_device(device)); 826 823 827 824 return 0; 828 825 } ··· 834 831 struct drbd_device *device = dw->device; 835 832 kfree(dw); 836 833 837 - drbd_resync_finished(device); 834 + drbd_resync_finished(first_peer_device(device)); 838 835 839 836 return 0; 840 837 } ··· 849 846 test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED); 850 847 } 851 848 852 - int drbd_resync_finished(struct drbd_device *device) 849 + int drbd_resync_finished(struct drbd_peer_device *peer_device) 853 850 { 854 - struct drbd_connection *connection = first_peer_device(device)->connection; 851 + struct drbd_device *device = peer_device->device; 852 + struct drbd_connection *connection = peer_device->connection; 855 853 unsigned long db, dt, dbdt; 856 854 unsigned long n_oos; 857 855 union drbd_state os, ns; ··· 1133 1129 err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req); 1134 1130 } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 1135 1131 if (likely(device->state.pdsk >= D_INCONSISTENT)) { 1136 - inc_rs_pending(device); 1132 + inc_rs_pending(peer_device); 1137 1133 if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req)) 1138 1134 err = drbd_send_rs_deallocated(peer_device, peer_req); 1139 1135 else ··· 1152 1148 err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req); 1153 1149 1154 1150 /* update resync data with failure */ 1155 - drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size); 1151 + drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size); 1156 1152 } 1157 1153 1158 1154 dec_unacked(device); ··· 1203 1199 } 1204 1200 1205 1201 if (eq) { 1206 - drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size); 1202 + drbd_set_in_sync(peer_device, peer_req->i.sector, peer_req->i.size); 1207 1203 /* rs_same_csums unit is BM_BLOCK_SIZE */ 1208 1204 device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; 1209 1205 err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req); 1210 1206 } else { 1211 - inc_rs_pending(device); 1207 + inc_rs_pending(peer_device); 1212 1208 peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ 1213 1209 peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */ 1214 1210 kfree(di); ··· 1261 1257 * drbd_alloc_pages due to pp_in_use > max_buffers. */ 1262 1258 drbd_free_peer_req(device, peer_req); 1263 1259 peer_req = NULL; 1264 - inc_rs_pending(device); 1260 + inc_rs_pending(peer_device); 1265 1261 err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY); 1266 1262 if (err) 1267 - dec_rs_pending(device); 1263 + dec_rs_pending(peer_device); 1268 1264 kfree(digest); 1269 1265 1270 1266 out: ··· 1274 1270 return err; 1275 1271 } 1276 1272 1277 - void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size) 1273 + void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device, sector_t sector, int size) 1278 1274 { 1275 + struct drbd_device *device = peer_device->device; 1279 1276 if (device->ov_last_oos_start + device->ov_last_oos_size == sector) { 1280 1277 device->ov_last_oos_size += size>>9; 1281 1278 } else { 1282 1279 device->ov_last_oos_start = sector; 1283 1280 device->ov_last_oos_size = size>>9; 1284 1281 } 1285 - drbd_set_out_of_sync(device, sector, size); 1282 + drbd_set_out_of_sync(peer_device, sector, size); 1286 1283 } 1287 1284 1288 1285 int w_e_end_ov_reply(struct drbd_work *w, int cancel) ··· 1333 1328 * drbd_alloc_pages due to pp_in_use > max_buffers. */ 1334 1329 drbd_free_peer_req(device, peer_req); 1335 1330 if (!eq) 1336 - drbd_ov_out_of_sync_found(device, sector, size); 1331 + drbd_ov_out_of_sync_found(peer_device, sector, size); 1337 1332 else 1338 - ov_out_of_sync_print(device); 1333 + ov_out_of_sync_print(peer_device); 1339 1334 1340 1335 err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, 1341 1336 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC); ··· 1346 1341 1347 1342 /* let's advance progress step marks only for every other megabyte */ 1348 1343 if ((device->ov_left & 0x200) == 0x200) 1349 - drbd_advance_rs_marks(device, device->ov_left); 1344 + drbd_advance_rs_marks(peer_device, device->ov_left); 1350 1345 1351 1346 stop_sector_reached = verify_can_do_stop_sector(device) && 1352 1347 (sector + (size>>9)) >= device->ov_stop_sector; 1353 1348 1354 1349 if (device->ov_left == 0 || stop_sector_reached) { 1355 - ov_out_of_sync_print(device); 1356 - drbd_resync_finished(device); 1350 + ov_out_of_sync_print(peer_device); 1351 + drbd_resync_finished(peer_device); 1357 1352 } 1358 1353 1359 1354 return err; ··· 1673 1668 } while (changed); 1674 1669 } 1675 1670 1676 - void drbd_rs_controller_reset(struct drbd_device *device) 1671 + void drbd_rs_controller_reset(struct drbd_peer_device *peer_device) 1677 1672 { 1673 + struct drbd_device *device = peer_device->device; 1678 1674 struct gendisk *disk = device->ldev->backing_bdev->bd_disk; 1679 1675 struct fifo_buffer *plan; 1680 1676 ··· 1897 1891 rcu_read_unlock(); 1898 1892 schedule_timeout_interruptible(timeo); 1899 1893 } 1900 - drbd_resync_finished(device); 1894 + drbd_resync_finished(peer_device); 1901 1895 } 1902 1896 1903 - drbd_rs_controller_reset(device); 1897 + drbd_rs_controller_reset(peer_device); 1904 1898 /* ns.conn may already be != device->state.conn, 1905 1899 * we may have been paused in between, or become paused until 1906 1900 * the timer triggers. ··· 1915 1909 mutex_unlock(device->state_mutex); 1916 1910 } 1917 1911 1918 - static void update_on_disk_bitmap(struct drbd_device *device, bool resync_done) 1912 + static void update_on_disk_bitmap(struct drbd_peer_device *peer_device, bool resync_done) 1919 1913 { 1914 + struct drbd_device *device = peer_device->device; 1920 1915 struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, }; 1921 1916 device->rs_last_bcast = jiffies; 1922 1917 ··· 1926 1919 1927 1920 drbd_bm_write_lazy(device, 0); 1928 1921 if (resync_done && is_sync_state(device->state.conn)) 1929 - drbd_resync_finished(device); 1922 + drbd_resync_finished(peer_device); 1930 1923 1931 1924 drbd_bcast_event(device, &sib); 1932 1925 /* update timestamp, in case it took a while to write out stuff */ ··· 2025 2018 do_md_sync(device); 2026 2019 if (test_bit(RS_DONE, &todo) || 2027 2020 test_bit(RS_PROGRESS, &todo)) 2028 - update_on_disk_bitmap(device, test_bit(RS_DONE, &todo)); 2021 + update_on_disk_bitmap(first_peer_device(device), test_bit(RS_DONE, &todo)); 2029 2022 if (test_bit(GO_DISKLESS, &todo)) 2030 2023 go_diskless(device); 2031 2024 if (test_bit(DESTROY_DISK, &todo))