Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-2.6.39/drivers' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.39/drivers' of git://git.kernel.dk/linux-2.6-block: (122 commits)
cciss: fix lost command issue
drbd: need include for bitops functions declarations
Revert "cciss: Add missing allocation in scsi_cmd_stack_setup and corresponding deallocation"
cciss: fix missed command status value CMD_UNABORTABLE
cciss: remove unnecessary casts
cciss: Mask off error bits of c->busaddr in cmd_special_free when calling pci_free_consistent
cciss: Inform controller we are using 32-bit tags.
cciss: hoist tag masking out of loop
cciss: Add missing allocation in scsi_cmd_stack_setup and corresponding deallocation
cciss: export resettable host attribute
drbd: drop code present under #ifdef which is relevant to 2.6.28 and below
drbd: Fixed handling of read errors on a 'VerifyS' node
drbd: Fixed handling of read errors on a 'VerifyT' node
drbd: Implemented real timeout checking for request processing time
drbd: Remove unused function atodb_endio()
drbd: improve log message if received sector offset exceeds local capacity
drbd: kill dead code
drbd: don't BUG_ON, if bio_add_page of a single page to an empty bio fails
drbd: Removed left over, now wrong comments
drbd: serialize admin requests for new verify run with pending bitmap io
...

+2273 -1409
+12
Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
··· 59 59 Contact: iss_storagedev@hp.com 60 60 Description: Displays the usage count (number of opens) of logical drive Y 61 61 of controller X. 62 + 63 + Where: /sys/bus/pci/devices/<dev>/ccissX/resettable 64 + Date: February 2011 65 + Kernel Version: 2.6.38 66 + Contact: iss_storagedev@hp.com 67 + Description: Value of 1 indicates the controller can honor the reset_devices 68 + kernel parameter. Value of 0 indicates reset_devices cannot be 69 + honored. This is to allow, for example, kexec tools to be able 70 + to warn the user if they designate an unresettable device as 71 + a dump device, as kdump requires resetting the device in order 72 + to work reliably. 73 +
+69 -17
drivers/block/cciss.c
··· 193 193 u64 *cfg_offset); 194 194 static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, 195 195 unsigned long *memory_bar); 196 - 196 + static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); 197 197 198 198 /* performant mode helper functions */ 199 199 static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, ··· 231 231 */ 232 232 static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) 233 233 { 234 - if (likely(h->transMethod == CFGTBL_Trans_Performant)) 234 + if (likely(h->transMethod & CFGTBL_Trans_Performant)) 235 235 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 236 236 } 237 237 ··· 556 556 #define to_hba(n) container_of(n, struct ctlr_info, dev) 557 557 #define to_drv(n) container_of(n, drive_info_struct, dev) 558 558 559 + /* List of controllers which cannot be reset on kexec with reset_devices */ 560 + static u32 unresettable_controller[] = { 561 + 0x324a103C, /* Smart Array P712m */ 562 + 0x324b103C, /* SmartArray P711m */ 563 + 0x3223103C, /* Smart Array P800 */ 564 + 0x3234103C, /* Smart Array P400 */ 565 + 0x3235103C, /* Smart Array P400i */ 566 + 0x3211103C, /* Smart Array E200i */ 567 + 0x3212103C, /* Smart Array E200 */ 568 + 0x3213103C, /* Smart Array E200i */ 569 + 0x3214103C, /* Smart Array E200i */ 570 + 0x3215103C, /* Smart Array E200i */ 571 + 0x3237103C, /* Smart Array E500 */ 572 + 0x323D103C, /* Smart Array P700m */ 573 + 0x409C0E11, /* Smart Array 6400 */ 574 + 0x409D0E11, /* Smart Array 6400 EM */ 575 + }; 576 + 577 + static int ctlr_is_resettable(struct ctlr_info *h) 578 + { 579 + int i; 580 + 581 + for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 582 + if (unresettable_controller[i] == h->board_id) 583 + return 0; 584 + return 1; 585 + } 586 + 587 + static ssize_t host_show_resettable(struct device *dev, 588 + struct device_attribute *attr, 589 + char *buf) 590 + { 591 + struct ctlr_info *h = to_hba(dev); 592 + 593 + return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); 594 + } 595 + static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); 596 + 559 597 static ssize_t host_store_rescan(struct device *dev, 560 598 struct device_attribute *attr, 561 599 const char *buf, size_t count) ··· 779 741 780 742 static struct attribute *cciss_host_attrs[] = { 781 743 &dev_attr_rescan.attr, 744 + &dev_attr_resettable.attr, 782 745 NULL 783 746 }; 784 747 ··· 1012 973 temp64.val32.upper = c->ErrDesc.Addr.upper; 1013 974 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), 1014 975 c->err_info, (dma_addr_t) temp64.val); 1015 - pci_free_consistent(h->pdev, sizeof(CommandList_struct), 1016 - c, (dma_addr_t) c->busaddr); 976 + pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, 977 + (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr)); 1017 978 } 1018 979 1019 980 static inline ctlr_info_t *get_host(struct gendisk *disk) ··· 1529 1490 return -EINVAL; 1530 1491 if (!capable(CAP_SYS_RAWIO)) 1531 1492 return -EPERM; 1532 - ioc = (BIG_IOCTL_Command_struct *) 1533 - kmalloc(sizeof(*ioc), GFP_KERNEL); 1493 + ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 1534 1494 if (!ioc) { 1535 1495 status = -ENOMEM; 1536 1496 goto cleanup1; ··· 2691 2653 c->Request.CDB[0]); 2692 2654 return_status = IO_NEEDS_RETRY; 2693 2655 break; 2656 + case CMD_UNABORTABLE: 2657 + dev_warn(&h->pdev->dev, "cmd unabortable\n"); 2658 + return_status = IO_ERROR; 2659 + break; 2694 2660 default: 2695 2661 dev_warn(&h->pdev->dev, "cmd 0x%02x returned " 2696 2662 "unknown status %x\n", c->Request.CDB[0], ··· 3145 3103 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3146 3104 DID_PASSTHROUGH : DID_ERROR); 3147 3105 break; 3106 + case CMD_UNABORTABLE: 3107 + dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); 3108 + rq->errors = make_status_bytes(SAM_STAT_GOOD, 3109 + cmd->err_info->CommandStatus, DRIVER_OK, 3110 + cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? 3111 + DID_PASSTHROUGH : DID_ERROR); 3112 + break; 3148 3113 default: 3149 3114 dev_warn(&h->pdev->dev, "cmd %p returned " 3150 3115 "unknown status %x\n", cmd, ··· 3185 3136 return tag >> DIRECT_LOOKUP_SHIFT; 3186 3137 } 3187 3138 3188 - static inline u32 cciss_tag_discard_error_bits(u32 tag) 3139 + static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag) 3189 3140 { 3190 - #define CCISS_ERROR_BITS 0x03 3191 - return tag & ~CCISS_ERROR_BITS; 3141 + #define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 3142 + #define CCISS_SIMPLE_ERROR_BITS 0x03 3143 + if (likely(h->transMethod & CFGTBL_Trans_Performant)) 3144 + return tag & ~CCISS_PERF_ERROR_BITS; 3145 + return tag & ~CCISS_SIMPLE_ERROR_BITS; 3192 3146 } 3193 3147 3194 3148 static inline void cciss_mark_tag_indexed(u32 *tag) ··· 3411 3359 { 3412 3360 u32 a; 3413 3361 3414 - if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) 3362 + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 3415 3363 return h->access.command_completed(h); 3416 3364 3417 3365 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { ··· 3446 3394 /* process completion of a non-indexed command */ 3447 3395 static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) 3448 3396 { 3449 - u32 tag; 3450 3397 CommandList_struct *c = NULL; 3451 3398 __u32 busaddr_masked, tag_masked; 3452 3399 3453 - tag = cciss_tag_discard_error_bits(raw_tag); 3400 + tag_masked = cciss_tag_discard_error_bits(h, raw_tag); 3454 3401 list_for_each_entry(c, &h->cmpQ, list) { 3455 - busaddr_masked = cciss_tag_discard_error_bits(c->busaddr); 3456 - tag_masked = cciss_tag_discard_error_bits(tag); 3402 + busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr); 3457 3403 if (busaddr_masked == tag_masked) { 3458 3404 finish_cmd(h, c, raw_tag); 3459 3405 return next_command(h); ··· 3803 3753 } 3804 3754 } 3805 3755 3806 - static __devinit void cciss_enter_performant_mode(ctlr_info_t *h) 3756 + static __devinit void cciss_enter_performant_mode(ctlr_info_t *h, 3757 + u32 use_short_tags) 3807 3758 { 3808 3759 /* This is a bit complicated. There are 8 registers on 3809 3760 * the controller which we write to to tell it 8 different ··· 3859 3808 writel(0, &h->transtable->RepQCtrAddrHigh32); 3860 3809 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 3861 3810 writel(0, &h->transtable->RepQAddr0High32); 3862 - writel(CFGTBL_Trans_Performant, 3811 + writel(CFGTBL_Trans_Performant | use_short_tags, 3863 3812 &(h->cfgtable->HostWrite.TransportRequest)); 3864 3813 3865 3814 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); ··· 3906 3855 if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) 3907 3856 goto clean_up; 3908 3857 3909 - cciss_enter_performant_mode(h); 3858 + cciss_enter_performant_mode(h, 3859 + trans_support & CFGTBL_Trans_use_short_tags); 3910 3860 3911 3861 /* Change the access methods to the performant access methods */ 3912 3862 h->access = SA5_performant_access;
+1
drivers/block/cciss.h
··· 222 222 h->ctlr, c->busaddr); 223 223 #endif /* CCISS_DEBUG */ 224 224 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 225 + readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); 225 226 h->commands_outstanding++; 226 227 if ( h->commands_outstanding > h->max_outstanding) 227 228 h->max_outstanding = h->commands_outstanding;
+1
drivers/block/cciss_cmd.h
··· 56 56 57 57 #define CFGTBL_Trans_Simple 0x00000002l 58 58 #define CFGTBL_Trans_Performant 0x00000004l 59 + #define CFGTBL_Trans_use_short_tags 0x20000000l 59 60 60 61 #define CFGTBL_BusType_Ultra2 0x00000001l 61 62 #define CFGTBL_BusType_Ultra3 0x00000002l
+11 -2
drivers/block/cciss_scsi.c
··· 824 824 break; 825 825 case CMD_UNSOLICITED_ABORT: 826 826 cmd->result = DID_ABORT << 16; 827 - dev_warn(&h->pdev->dev, "%p aborted do to an " 827 + dev_warn(&h->pdev->dev, "%p aborted due to an " 828 828 "unsolicited abort\n", c); 829 829 break; 830 830 case CMD_TIMEOUT: 831 831 cmd->result = DID_TIME_OUT << 16; 832 832 dev_warn(&h->pdev->dev, "%p timedout\n", c); 833 + break; 834 + case CMD_UNABORTABLE: 835 + cmd->result = DID_ERROR << 16; 836 + dev_warn(&h->pdev->dev, "c %p command " 837 + "unabortable\n", c); 833 838 break; 834 839 default: 835 840 cmd->result = DID_ERROR << 16; ··· 1012 1007 break; 1013 1008 case CMD_UNSOLICITED_ABORT: 1014 1009 dev_warn(&h->pdev->dev, 1015 - "%p aborted do to an unsolicited abort\n", c); 1010 + "%p aborted due to an unsolicited abort\n", c); 1016 1011 break; 1017 1012 case CMD_TIMEOUT: 1018 1013 dev_warn(&h->pdev->dev, "%p timedout\n", c); 1014 + break; 1015 + case CMD_UNABORTABLE: 1016 + dev_warn(&h->pdev->dev, 1017 + "%p unabortable\n", c); 1019 1018 break; 1020 1019 default: 1021 1020 dev_warn(&h->pdev->dev,
+81 -254
drivers/block/drbd/drbd_actlog.c
··· 92 92 bio->bi_end_io = drbd_md_io_complete; 93 93 bio->bi_rw = rw; 94 94 95 - if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) 95 + if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) 96 96 bio_endio(bio, -EIO); 97 97 else 98 98 submit_bio(rw, bio); ··· 176 176 struct lc_element *al_ext; 177 177 struct lc_element *tmp; 178 178 unsigned long al_flags = 0; 179 + int wake; 179 180 180 181 spin_lock_irq(&mdev->al_lock); 181 182 tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); 182 183 if (unlikely(tmp != NULL)) { 183 184 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 184 185 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { 186 + wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags); 185 187 spin_unlock_irq(&mdev->al_lock); 188 + if (wake) 189 + wake_up(&mdev->al_wait); 186 190 return NULL; 187 191 } 188 192 } ··· 262 258 spin_unlock_irqrestore(&mdev->al_lock, flags); 263 259 } 264 260 261 + #if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT) 262 + /* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT 263 + * are still coupled, or assume too much about their relation. 264 + * Code below will not work if this is violated. 265 + * Will be cleaned up with some followup patch. 266 + */ 267 + # error FIXME 268 + #endif 269 + 270 + static unsigned int al_extent_to_bm_page(unsigned int al_enr) 271 + { 272 + return al_enr >> 273 + /* bit to page */ 274 + ((PAGE_SHIFT + 3) - 275 + /* al extent number to bit */ 276 + (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)); 277 + } 278 + 279 + static unsigned int rs_extent_to_bm_page(unsigned int rs_enr) 280 + { 281 + return rs_enr >> 282 + /* bit to page */ 283 + ((PAGE_SHIFT + 3) - 284 + /* al extent number to bit */ 285 + (BM_EXT_SHIFT - BM_BLOCK_SHIFT)); 286 + } 287 + 265 288 int 266 289 w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) 267 290 { ··· 316 285 * For now, we must not write the transaction, 317 286 * if we cannot write out the bitmap of the evicted extent. */ 318 287 if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) 319 - drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); 288 + drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted)); 320 289 321 290 /* The bitmap write may have failed, causing a state change. */ 322 291 if (mdev->state.disk < D_INCONSISTENT) { ··· 365 334 + mdev->ldev->md.al_offset + mdev->al_tr_pos; 366 335 367 336 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) 368 - drbd_chk_io_error(mdev, 1, TRUE); 337 + drbd_chk_io_error(mdev, 1, true); 369 338 370 339 if (++mdev->al_tr_pos > 371 340 div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) ··· 542 511 return 1; 543 512 } 544 513 545 - static void atodb_endio(struct bio *bio, int error) 546 - { 547 - struct drbd_atodb_wait *wc = bio->bi_private; 548 - struct drbd_conf *mdev = wc->mdev; 549 - struct page *page; 550 - int uptodate = bio_flagged(bio, BIO_UPTODATE); 551 - 552 - /* strange behavior of some lower level drivers... 553 - * fail the request by clearing the uptodate flag, 554 - * but do not return any error?! */ 555 - if (!error && !uptodate) 556 - error = -EIO; 557 - 558 - drbd_chk_io_error(mdev, error, TRUE); 559 - if (error && wc->error == 0) 560 - wc->error = error; 561 - 562 - if (atomic_dec_and_test(&wc->count)) 563 - complete(&wc->io_done); 564 - 565 - page = bio->bi_io_vec[0].bv_page; 566 - put_page(page); 567 - bio_put(bio); 568 - mdev->bm_writ_cnt++; 569 - put_ldev(mdev); 570 - } 571 - 572 - /* sector to word */ 573 - #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 574 - 575 - /* activity log to on disk bitmap -- prepare bio unless that sector 576 - * is already covered by previously prepared bios */ 577 - static int atodb_prepare_unless_covered(struct drbd_conf *mdev, 578 - struct bio **bios, 579 - unsigned int enr, 580 - struct drbd_atodb_wait *wc) __must_hold(local) 581 - { 582 - struct bio *bio; 583 - struct page *page; 584 - sector_t on_disk_sector; 585 - unsigned int page_offset = PAGE_SIZE; 586 - int offset; 587 - int i = 0; 588 - int err = -ENOMEM; 589 - 590 - /* We always write aligned, full 4k blocks, 591 - * so we can ignore the logical_block_size (for now) */ 592 - enr &= ~7U; 593 - on_disk_sector = enr + mdev->ldev->md.md_offset 594 - + mdev->ldev->md.bm_offset; 595 - 596 - D_ASSERT(!(on_disk_sector & 7U)); 597 - 598 - /* Check if that enr is already covered by an already created bio. 599 - * Caution, bios[] is not NULL terminated, 600 - * but only initialized to all NULL. 601 - * For completely scattered activity log, 602 - * the last invocation iterates over all bios, 603 - * and finds the last NULL entry. 604 - */ 605 - while ((bio = bios[i])) { 606 - if (bio->bi_sector == on_disk_sector) 607 - return 0; 608 - i++; 609 - } 610 - /* bios[i] == NULL, the next not yet used slot */ 611 - 612 - /* GFP_KERNEL, we are not in the write-out path */ 613 - bio = bio_alloc(GFP_KERNEL, 1); 614 - if (bio == NULL) 615 - return -ENOMEM; 616 - 617 - if (i > 0) { 618 - const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec; 619 - page_offset = prev_bv->bv_offset + prev_bv->bv_len; 620 - page = prev_bv->bv_page; 621 - } 622 - if (page_offset == PAGE_SIZE) { 623 - page = alloc_page(__GFP_HIGHMEM); 624 - if (page == NULL) 625 - goto out_bio_put; 626 - page_offset = 0; 627 - } else { 628 - get_page(page); 629 - } 630 - 631 - offset = S2W(enr); 632 - drbd_bm_get_lel(mdev, offset, 633 - min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset), 634 - kmap(page) + page_offset); 635 - kunmap(page); 636 - 637 - bio->bi_private = wc; 638 - bio->bi_end_io = atodb_endio; 639 - bio->bi_bdev = mdev->ldev->md_bdev; 640 - bio->bi_sector = on_disk_sector; 641 - 642 - if (bio_add_page(bio, page, 4096, page_offset) != 4096) 643 - goto out_put_page; 644 - 645 - atomic_inc(&wc->count); 646 - /* we already know that we may do this... 647 - * get_ldev_if_state(mdev,D_ATTACHING); 648 - * just get the extra reference, so that the local_cnt reflects 649 - * the number of pending IO requests DRBD at its backing device. 650 - */ 651 - atomic_inc(&mdev->local_cnt); 652 - 653 - bios[i] = bio; 654 - 655 - return 0; 656 - 657 - out_put_page: 658 - err = -EINVAL; 659 - put_page(page); 660 - out_bio_put: 661 - bio_put(bio); 662 - return err; 663 - } 664 - 665 - /** 666 - * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents 667 - * @mdev: DRBD device. 668 - * 669 - * Called when we detach (unconfigure) local storage, 670 - * or when we go from R_PRIMARY to R_SECONDARY role. 671 - */ 672 - void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) 673 - { 674 - int i, nr_elements; 675 - unsigned int enr; 676 - struct bio **bios; 677 - struct drbd_atodb_wait wc; 678 - 679 - ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) 680 - return; /* sorry, I don't have any act_log etc... */ 681 - 682 - wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 683 - 684 - nr_elements = mdev->act_log->nr_elements; 685 - 686 - /* GFP_KERNEL, we are not in anyone's write-out path */ 687 - bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); 688 - if (!bios) 689 - goto submit_one_by_one; 690 - 691 - atomic_set(&wc.count, 0); 692 - init_completion(&wc.io_done); 693 - wc.mdev = mdev; 694 - wc.error = 0; 695 - 696 - for (i = 0; i < nr_elements; i++) { 697 - enr = lc_element_by_index(mdev->act_log, i)->lc_number; 698 - if (enr == LC_FREE) 699 - continue; 700 - /* next statement also does atomic_inc wc.count and local_cnt */ 701 - if (atodb_prepare_unless_covered(mdev, bios, 702 - enr/AL_EXT_PER_BM_SECT, 703 - &wc)) 704 - goto free_bios_submit_one_by_one; 705 - } 706 - 707 - /* unnecessary optimization? */ 708 - lc_unlock(mdev->act_log); 709 - wake_up(&mdev->al_wait); 710 - 711 - /* all prepared, submit them */ 712 - for (i = 0; i < nr_elements; i++) { 713 - if (bios[i] == NULL) 714 - break; 715 - if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { 716 - bios[i]->bi_rw = WRITE; 717 - bio_endio(bios[i], -EIO); 718 - } else { 719 - submit_bio(WRITE, bios[i]); 720 - } 721 - } 722 - 723 - /* always (try to) flush bitmap to stable storage */ 724 - drbd_md_flush(mdev); 725 - 726 - /* In case we did not submit a single IO do not wait for 727 - * them to complete. ( Because we would wait forever here. ) 728 - * 729 - * In case we had IOs and they are already complete, there 730 - * is not point in waiting anyways. 731 - * Therefore this if () ... */ 732 - if (atomic_read(&wc.count)) 733 - wait_for_completion(&wc.io_done); 734 - 735 - put_ldev(mdev); 736 - 737 - kfree(bios); 738 - return; 739 - 740 - free_bios_submit_one_by_one: 741 - /* free everything by calling the endio callback directly. */ 742 - for (i = 0; i < nr_elements && bios[i]; i++) 743 - bio_endio(bios[i], 0); 744 - 745 - kfree(bios); 746 - 747 - submit_one_by_one: 748 - dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); 749 - 750 - for (i = 0; i < mdev->act_log->nr_elements; i++) { 751 - enr = lc_element_by_index(mdev->act_log, i)->lc_number; 752 - if (enr == LC_FREE) 753 - continue; 754 - /* Really slow: if we have al-extents 16..19 active, 755 - * sector 4 will be written four times! Synchronous! */ 756 - drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); 757 - } 758 - 759 - lc_unlock(mdev->act_log); 760 - wake_up(&mdev->al_wait); 761 - put_ldev(mdev); 762 - } 763 - 764 514 /** 765 515 * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents 766 516 * @mdev: DRBD device. ··· 621 809 return 1; 622 810 } 623 811 624 - drbd_bm_write_sect(mdev, udw->enr); 812 + drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr)); 625 813 put_ldev(mdev); 626 814 627 815 kfree(udw); ··· 701 889 dev_warn(DEV, "Kicking resync_lru element enr=%u " 702 890 "out with rs_failed=%d\n", 703 891 ext->lce.lc_number, ext->rs_failed); 704 - set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); 705 892 } 706 893 ext->rs_left = rs_left; 707 894 ext->rs_failed = success ? 0 : count; ··· 719 908 drbd_queue_work_front(&mdev->data.work, &udw->w); 720 909 } else { 721 910 dev_warn(DEV, "Could not kmalloc an udw\n"); 722 - set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); 723 911 } 724 912 } 725 913 } else { ··· 726 916 mdev->resync_locked, 727 917 mdev->resync->nr_elements, 728 918 mdev->resync->flags); 919 + } 920 + } 921 + 922 + void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go) 923 + { 924 + unsigned long now = jiffies; 925 + unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; 926 + int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; 927 + if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { 928 + if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go && 929 + mdev->state.conn != C_PAUSED_SYNC_T && 930 + mdev->state.conn != C_PAUSED_SYNC_S) { 931 + mdev->rs_mark_time[next] = now; 932 + mdev->rs_mark_left[next] = still_to_go; 933 + mdev->rs_last_mark = next; 934 + } 729 935 } 730 936 } 731 937 ··· 762 936 int wake_up = 0; 763 937 unsigned long flags; 764 938 765 - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 939 + if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 766 940 dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", 767 941 (unsigned long long)sector, size); 768 942 return; ··· 795 969 */ 796 970 count = drbd_bm_clear_bits(mdev, sbnr, ebnr); 797 971 if (count && get_ldev(mdev)) { 798 - unsigned long now = jiffies; 799 - unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; 800 - int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; 801 - if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { 802 - unsigned long tw = drbd_bm_total_weight(mdev); 803 - if (mdev->rs_mark_left[mdev->rs_last_mark] != tw && 804 - mdev->state.conn != C_PAUSED_SYNC_T && 805 - mdev->state.conn != C_PAUSED_SYNC_S) { 806 - mdev->rs_mark_time[next] = now; 807 - mdev->rs_mark_left[next] = tw; 808 - mdev->rs_last_mark = next; 809 - } 810 - } 972 + drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev)); 811 973 spin_lock_irqsave(&mdev->al_lock, flags); 812 - drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); 974 + drbd_try_clear_on_disk_bm(mdev, sector, count, true); 813 975 spin_unlock_irqrestore(&mdev->al_lock, flags); 814 976 815 977 /* just wake_up unconditional now, various lc_chaged(), ··· 812 998 /* 813 999 * this is intended to set one request worth of data out of sync. 814 1000 * affects at least 1 bit, 815 - * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits. 1001 + * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits. 816 1002 * 817 1003 * called by tl_clear and drbd_send_dblock (==drbd_make_request). 818 1004 * so this can be _any_ process. 819 1005 */ 820 - void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, 1006 + int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, 821 1007 const char *file, const unsigned int line) 822 1008 { 823 1009 unsigned long sbnr, ebnr, lbnr, flags; 824 1010 sector_t esector, nr_sectors; 825 - unsigned int enr, count; 1011 + unsigned int enr, count = 0; 826 1012 struct lc_element *e; 827 1013 828 - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1014 + if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 829 1015 dev_err(DEV, "sector: %llus, size: %d\n", 830 1016 (unsigned long long)sector, size); 831 - return; 1017 + return 0; 832 1018 } 833 1019 834 1020 if (!get_ldev(mdev)) 835 - return; /* no disk, no metadata, no bitmap to set bits in */ 1021 + return 0; /* no disk, no metadata, no bitmap to set bits in */ 836 1022 837 1023 nr_sectors = drbd_get_capacity(mdev->this_bdev); 838 1024 esector = sector + (size >> 9) - 1; ··· 862 1048 863 1049 out: 864 1050 put_ldev(mdev); 1051 + 1052 + return count; 865 1053 } 866 1054 867 1055 static ··· 944 1128 unsigned int enr = BM_SECT_TO_EXT(sector); 945 1129 struct bm_extent *bm_ext; 946 1130 int i, sig; 1131 + int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait. 1132 + 200 times -> 20 seconds. */ 947 1133 1134 + retry: 948 1135 sig = wait_event_interruptible(mdev->al_wait, 949 1136 (bm_ext = _bme_get(mdev, enr))); 950 1137 if (sig) ··· 958 1139 959 1140 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 960 1141 sig = wait_event_interruptible(mdev->al_wait, 961 - !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i)); 962 - if (sig) { 1142 + !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) || 1143 + test_bit(BME_PRIORITY, &bm_ext->flags)); 1144 + 1145 + if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) { 963 1146 spin_lock_irq(&mdev->al_lock); 964 1147 if (lc_put(mdev->resync, &bm_ext->lce) == 0) { 965 - clear_bit(BME_NO_WRITES, &bm_ext->flags); 1148 + bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */ 966 1149 mdev->resync_locked--; 967 1150 wake_up(&mdev->al_wait); 968 1151 } 969 1152 spin_unlock_irq(&mdev->al_lock); 970 - return -EINTR; 1153 + if (sig) 1154 + return -EINTR; 1155 + if (schedule_timeout_interruptible(HZ/10)) 1156 + return -EINTR; 1157 + if (sa && --sa == 0) 1158 + dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec." 1159 + "Resync stalled?\n"); 1160 + goto retry; 971 1161 } 972 1162 } 973 1163 set_bit(BME_LOCKED, &bm_ext->flags); ··· 1119 1291 } 1120 1292 1121 1293 if (lc_put(mdev->resync, &bm_ext->lce) == 0) { 1122 - clear_bit(BME_LOCKED, &bm_ext->flags); 1123 - clear_bit(BME_NO_WRITES, &bm_ext->flags); 1294 + bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */ 1124 1295 mdev->resync_locked--; 1125 1296 wake_up(&mdev->al_wait); 1126 1297 } ··· 1210 1383 sector_t esector, nr_sectors; 1211 1384 int wake_up = 0; 1212 1385 1213 - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1386 + if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 1214 1387 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", 1215 1388 (unsigned long long)sector, size); 1216 1389 return; ··· 1247 1420 mdev->rs_failed += count; 1248 1421 1249 1422 if (get_ldev(mdev)) { 1250 - drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE); 1423 + drbd_try_clear_on_disk_bm(mdev, sector, count, false); 1251 1424 put_ldev(mdev); 1252 1425 } 1253 1426
+502 -254
drivers/block/drbd/drbd_bitmap.c
··· 28 28 #include <linux/drbd.h> 29 29 #include <linux/slab.h> 30 30 #include <asm/kmap_types.h> 31 + 32 + #include <asm-generic/bitops/le.h> 33 + 31 34 #include "drbd_int.h" 35 + 32 36 33 37 /* OPAQUE outside this file! 34 38 * interface defined in drbd_int.h ··· 40 36 * convention: 41 37 * function name drbd_bm_... => used elsewhere, "public". 42 38 * function name bm_... => internal to implementation, "private". 39 + */ 43 40 44 - * Note that since find_first_bit returns int, at the current granularity of 45 - * the bitmap (4KB per byte), this implementation "only" supports up to 46 - * 1<<(32+12) == 16 TB... 41 + 42 + /* 43 + * LIMITATIONS: 44 + * We want to support >= peta byte of backend storage, while for now still using 45 + * a granularity of one bit per 4KiB of storage. 46 + * 1 << 50 bytes backend storage (1 PiB) 47 + * 1 << (50 - 12) bits needed 48 + * 38 --> we need u64 to index and count bits 49 + * 1 << (38 - 3) bitmap bytes needed 50 + * 35 --> we still need u64 to index and count bytes 51 + * (that's 32 GiB of bitmap for 1 PiB storage) 52 + * 1 << (35 - 2) 32bit longs needed 53 + * 33 --> we'd even need u64 to index and count 32bit long words. 54 + * 1 << (35 - 3) 64bit longs needed 55 + * 32 --> we could get away with a 32bit unsigned int to index and count 56 + * 64bit long words, but I rather stay with unsigned long for now. 57 + * We probably should neither count nor point to bytes or long words 58 + * directly, but either by bitnumber, or by page index and offset. 59 + * 1 << (35 - 12) 60 + * 22 --> we need that much 4KiB pages of bitmap. 61 + * 1 << (22 + 3) --> on a 64bit arch, 62 + * we need 32 MiB to store the array of page pointers. 63 + * 64 + * Because I'm lazy, and because the resulting patch was too large, too ugly 65 + * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), 66 + * (1 << 32) bits * 4k storage. 67 + * 68 + 69 + * bitmap storage and IO: 70 + * Bitmap is stored little endian on disk, and is kept little endian in 71 + * core memory. Currently we still hold the full bitmap in core as long 72 + * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage 73 + * seems excessive. 74 + * 75 + * We plan to reduce the amount of in-core bitmap pages by pageing them in 76 + * and out against their on-disk location as necessary, but need to make 77 + * sure we don't cause too much meta data IO, and must not deadlock in 78 + * tight memory situations. This needs some more work. 47 79 */ 48 80 49 81 /* ··· 95 55 struct drbd_bitmap { 96 56 struct page **bm_pages; 97 57 spinlock_t bm_lock; 98 - /* WARNING unsigned long bm_*: 99 - * 32bit number of bit offset is just enough for 512 MB bitmap. 100 - * it will blow up if we make the bitmap bigger... 101 - * not that it makes much sense to have a bitmap that large, 102 - * rather change the granularity to 16k or 64k or something. 103 - * (that implies other problems, however...) 104 - */ 58 + 59 + /* see LIMITATIONS: above */ 60 + 105 61 unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ 106 62 unsigned long bm_bits; 107 63 size_t bm_words; ··· 105 69 sector_t bm_dev_capacity; 106 70 struct mutex bm_change; /* serializes resize operations */ 107 71 108 - atomic_t bm_async_io; 109 - wait_queue_head_t bm_io_wait; 72 + wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ 110 73 111 - unsigned long bm_flags; 74 + enum bm_flag bm_flags; 112 75 113 76 /* debugging aid, in case we are still racy somewhere */ 114 77 char *bm_why; 115 78 struct task_struct *bm_task; 116 79 }; 117 80 118 - /* definition of bits in bm_flags */ 119 - #define BM_LOCKED 0 120 - #define BM_MD_IO_ERROR 1 121 - #define BM_P_VMALLOCED 2 122 - 123 81 static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 124 82 unsigned long e, int val, const enum km_type km); 125 - 126 - static int bm_is_locked(struct drbd_bitmap *b) 127 - { 128 - return test_bit(BM_LOCKED, &b->bm_flags); 129 - } 130 83 131 84 #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) 132 85 static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) ··· 133 108 b->bm_task == mdev->worker.task ? "worker" : "?"); 134 109 } 135 110 136 - void drbd_bm_lock(struct drbd_conf *mdev, char *why) 111 + void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags) 137 112 { 138 113 struct drbd_bitmap *b = mdev->bitmap; 139 114 int trylock_failed; ··· 156 131 b->bm_task == mdev->worker.task ? "worker" : "?"); 157 132 mutex_lock(&b->bm_change); 158 133 } 159 - if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) 134 + if (BM_LOCKED_MASK & b->bm_flags) 160 135 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 136 + b->bm_flags |= flags & BM_LOCKED_MASK; 161 137 162 138 b->bm_why = why; 163 139 b->bm_task = current; ··· 172 146 return; 173 147 } 174 148 175 - if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags)) 149 + if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags)) 176 150 dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); 177 151 152 + b->bm_flags &= ~BM_LOCKED_MASK; 178 153 b->bm_why = NULL; 179 154 b->bm_task = NULL; 180 155 mutex_unlock(&b->bm_change); 181 156 } 182 157 183 - /* word offset to long pointer */ 184 - static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km) 158 + /* we store some "meta" info about our pages in page->private */ 159 + /* at a granularity of 4k storage per bitmap bit: 160 + * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks 161 + * 1<<38 bits, 162 + * 1<<23 4k bitmap pages. 163 + * Use 24 bits as page index, covers 2 peta byte storage 164 + * at a granularity of 4k per bit. 165 + * Used to report the failed page idx on io error from the endio handlers. 166 + */ 167 + #define BM_PAGE_IDX_MASK ((1UL<<24)-1) 168 + /* this page is currently read in, or written back */ 169 + #define BM_PAGE_IO_LOCK 31 170 + /* if there has been an IO error for this page */ 171 + #define BM_PAGE_IO_ERROR 30 172 + /* this is to be able to intelligently skip disk IO, 173 + * set if bits have been set since last IO. */ 174 + #define BM_PAGE_NEED_WRITEOUT 29 175 + /* to mark for lazy writeout once syncer cleared all clearable bits, 176 + * we if bits have been cleared since last IO. */ 177 + #define BM_PAGE_LAZY_WRITEOUT 28 178 + 179 + /* store_page_idx uses non-atomic assingment. It is only used directly after 180 + * allocating the page. All other bm_set_page_* and bm_clear_page_* need to 181 + * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap 182 + * changes) may happen from various contexts, and wait_on_bit/wake_up_bit 183 + * requires it all to be atomic as well. */ 184 + static void bm_store_page_idx(struct page *page, unsigned long idx) 185 185 { 186 - struct page *page; 187 - unsigned long page_nr; 186 + BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); 187 + page_private(page) |= idx; 188 + } 188 189 190 + static unsigned long bm_page_to_idx(struct page *page) 191 + { 192 + return page_private(page) & BM_PAGE_IDX_MASK; 193 + } 194 + 195 + /* As is very unlikely that the same page is under IO from more than one 196 + * context, we can get away with a bit per page and one wait queue per bitmap. 197 + */ 198 + static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr) 199 + { 200 + struct drbd_bitmap *b = mdev->bitmap; 201 + void *addr = &page_private(b->bm_pages[page_nr]); 202 + wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); 203 + } 204 + 205 + static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr) 206 + { 207 + struct drbd_bitmap *b = mdev->bitmap; 208 + void *addr = &page_private(b->bm_pages[page_nr]); 209 + clear_bit(BM_PAGE_IO_LOCK, addr); 210 + smp_mb__after_clear_bit(); 211 + wake_up(&mdev->bitmap->bm_io_wait); 212 + } 213 + 214 + /* set _before_ submit_io, so it may be reset due to being changed 215 + * while this page is in flight... will get submitted later again */ 216 + static void bm_set_page_unchanged(struct page *page) 217 + { 218 + /* use cmpxchg? */ 219 + clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 220 + clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 221 + } 222 + 223 + static void bm_set_page_need_writeout(struct page *page) 224 + { 225 + set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 226 + } 227 + 228 + static int bm_test_page_unchanged(struct page *page) 229 + { 230 + volatile const unsigned long *addr = &page_private(page); 231 + return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0; 232 + } 233 + 234 + static void bm_set_page_io_err(struct page *page) 235 + { 236 + set_bit(BM_PAGE_IO_ERROR, &page_private(page)); 237 + } 238 + 239 + static void bm_clear_page_io_err(struct page *page) 240 + { 241 + clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); 242 + } 243 + 244 + static void bm_set_page_lazy_writeout(struct page *page) 245 + { 246 + set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 247 + } 248 + 249 + static int bm_test_page_lazy_writeout(struct page *page) 250 + { 251 + return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 252 + } 253 + 254 + /* on a 32bit box, this would allow for exactly (2<<38) bits. */ 255 + static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) 256 + { 189 257 /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ 190 - page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); 258 + unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); 191 259 BUG_ON(page_nr >= b->bm_number_of_pages); 192 - page = b->bm_pages[page_nr]; 260 + return page_nr; 261 + } 193 262 263 + static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) 264 + { 265 + /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ 266 + unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); 267 + BUG_ON(page_nr >= b->bm_number_of_pages); 268 + return page_nr; 269 + } 270 + 271 + static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) 272 + { 273 + struct page *page = b->bm_pages[idx]; 194 274 return (unsigned long *) kmap_atomic(page, km); 195 275 } 196 276 197 - static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset) 277 + static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) 198 278 { 199 - return __bm_map_paddr(b, offset, KM_IRQ1); 279 + return __bm_map_pidx(b, idx, KM_IRQ1); 200 280 } 201 281 202 282 static void __bm_unmap(unsigned long *p_addr, const enum km_type km) ··· 333 201 * struct drbd_conf*, but for the debug macros I like to have the mdev around 334 202 * to be able to report device specific. 335 203 */ 204 + 336 205 337 206 static void bm_free_pages(struct page **pages, unsigned long number) 338 207 { ··· 402 269 bm_vk_free(new_pages, vmalloced); 403 270 return NULL; 404 271 } 272 + /* we want to know which page it is 273 + * from the endio handlers */ 274 + bm_store_page_idx(page, i); 405 275 new_pages[i] = page; 406 276 } 407 277 } else { ··· 416 280 } 417 281 418 282 if (vmalloced) 419 - set_bit(BM_P_VMALLOCED, &b->bm_flags); 283 + b->bm_flags |= BM_P_VMALLOCED; 420 284 else 421 - clear_bit(BM_P_VMALLOCED, &b->bm_flags); 285 + b->bm_flags &= ~BM_P_VMALLOCED; 422 286 423 287 return new_pages; 424 288 } ··· 455 319 { 456 320 ERR_IF (!mdev->bitmap) return; 457 321 bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); 458 - bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags)); 322 + bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags)); 459 323 kfree(mdev->bitmap); 460 324 mdev->bitmap = NULL; 461 325 } ··· 465 329 * this masks out the remaining bits. 466 330 * Returns the number of bits cleared. 467 331 */ 332 + #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) 333 + #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) 334 + #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) 468 335 static int bm_clear_surplus(struct drbd_bitmap *b) 469 336 { 470 - const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; 471 - size_t w = b->bm_bits >> LN2_BPL; 472 - int cleared = 0; 337 + unsigned long mask; 473 338 unsigned long *p_addr, *bm; 339 + int tmp; 340 + int cleared = 0; 474 341 475 - p_addr = bm_map_paddr(b, w); 476 - bm = p_addr + MLPP(w); 477 - if (w < b->bm_words) { 342 + /* number of bits modulo bits per page */ 343 + tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 344 + /* mask the used bits of the word containing the last bit */ 345 + mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 346 + /* bitmap is always stored little endian, 347 + * on disk and in core memory alike */ 348 + mask = cpu_to_lel(mask); 349 + 350 + p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 351 + bm = p_addr + (tmp/BITS_PER_LONG); 352 + if (mask) { 353 + /* If mask != 0, we are not exactly aligned, so bm now points 354 + * to the long containing the last bit. 355 + * If mask == 0, bm already points to the word immediately 356 + * after the last (long word aligned) bit. */ 478 357 cleared = hweight_long(*bm & ~mask); 479 358 *bm &= mask; 480 - w++; bm++; 359 + bm++; 481 360 } 482 361 483 - if (w < b->bm_words) { 362 + if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 363 + /* on a 32bit arch, we may need to zero out 364 + * a padding long to align with a 64bit remote */ 484 365 cleared += hweight_long(*bm); 485 366 *bm = 0; 486 367 } ··· 507 354 508 355 static void bm_set_surplus(struct drbd_bitmap *b) 509 356 { 510 - const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; 511 - size_t w = b->bm_bits >> LN2_BPL; 357 + unsigned long mask; 512 358 unsigned long *p_addr, *bm; 359 + int tmp; 513 360 514 - p_addr = bm_map_paddr(b, w); 515 - bm = p_addr + MLPP(w); 516 - if (w < b->bm_words) { 361 + /* number of bits modulo bits per page */ 362 + tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 363 + /* mask the used bits of the word containing the last bit */ 364 + mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 365 + /* bitmap is always stored little endian, 366 + * on disk and in core memory alike */ 367 + mask = cpu_to_lel(mask); 368 + 369 + p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 370 + bm = p_addr + (tmp/BITS_PER_LONG); 371 + if (mask) { 372 + /* If mask != 0, we are not exactly aligned, so bm now points 373 + * to the long containing the last bit. 374 + * If mask == 0, bm already points to the word immediately 375 + * after the last (long word aligned) bit. */ 517 376 *bm |= ~mask; 518 - bm++; w++; 377 + bm++; 519 378 } 520 379 521 - if (w < b->bm_words) { 522 - *bm = ~(0UL); 380 + if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 381 + /* on a 32bit arch, we may need to zero out 382 + * a padding long to align with a 64bit remote */ 383 + *bm = ~0UL; 523 384 } 524 385 bm_unmap(p_addr); 525 386 } 526 387 527 - static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian) 528 - { 529 - unsigned long *p_addr, *bm, offset = 0; 530 - unsigned long bits = 0; 531 - unsigned long i, do_now; 532 - 533 - while (offset < b->bm_words) { 534 - i = do_now = min_t(size_t, b->bm_words-offset, LWPP); 535 - p_addr = __bm_map_paddr(b, offset, KM_USER0); 536 - bm = p_addr + MLPP(offset); 537 - while (i--) { 538 - #ifndef __LITTLE_ENDIAN 539 - if (swap_endian) 540 - *bm = lel_to_cpu(*bm); 541 - #endif 542 - bits += hweight_long(*bm++); 543 - } 544 - __bm_unmap(p_addr, KM_USER0); 545 - offset += do_now; 546 - cond_resched(); 547 - } 548 - 549 - return bits; 550 - } 551 - 388 + /* you better not modify the bitmap while this is running, 389 + * or its results will be stale */ 552 390 static unsigned long bm_count_bits(struct drbd_bitmap *b) 553 391 { 554 - return __bm_count_bits(b, 0); 555 - } 392 + unsigned long *p_addr; 393 + unsigned long bits = 0; 394 + unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; 395 + int idx, i, last_word; 556 396 557 - static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b) 558 - { 559 - return __bm_count_bits(b, 1); 397 + /* all but last page */ 398 + for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { 399 + p_addr = __bm_map_pidx(b, idx, KM_USER0); 400 + for (i = 0; i < LWPP; i++) 401 + bits += hweight_long(p_addr[i]); 402 + __bm_unmap(p_addr, KM_USER0); 403 + cond_resched(); 404 + } 405 + /* last (or only) page */ 406 + last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; 407 + p_addr = __bm_map_pidx(b, idx, KM_USER0); 408 + for (i = 0; i < last_word; i++) 409 + bits += hweight_long(p_addr[i]); 410 + p_addr[last_word] &= cpu_to_lel(mask); 411 + bits += hweight_long(p_addr[last_word]); 412 + /* 32bit arch, may have an unused padding long */ 413 + if (BITS_PER_LONG == 32 && (last_word & 1) == 0) 414 + p_addr[last_word+1] = 0; 415 + __bm_unmap(p_addr, KM_USER0); 416 + return bits; 560 417 } 561 418 562 419 /* offset and len in long words.*/ 563 420 static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) 564 421 { 565 422 unsigned long *p_addr, *bm; 423 + unsigned int idx; 566 424 size_t do_now, end; 567 - 568 - #define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512) 569 425 570 426 end = offset + len; 571 427 ··· 585 423 586 424 while (offset < end) { 587 425 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; 588 - p_addr = bm_map_paddr(b, offset); 426 + idx = bm_word_to_page_idx(b, offset); 427 + p_addr = bm_map_pidx(b, idx); 589 428 bm = p_addr + MLPP(offset); 590 429 if (bm+do_now > p_addr + LWPP) { 591 430 printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", 592 431 p_addr, bm, (int)do_now); 593 - break; /* breaks to after catch_oob_access_end() only! */ 594 - } 595 - memset(bm, c, do_now * sizeof(long)); 432 + } else 433 + memset(bm, c, do_now * sizeof(long)); 596 434 bm_unmap(p_addr); 435 + bm_set_page_need_writeout(b->bm_pages[idx]); 597 436 offset += do_now; 598 437 } 599 438 } ··· 610 447 int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) 611 448 { 612 449 struct drbd_bitmap *b = mdev->bitmap; 613 - unsigned long bits, words, owords, obits, *p_addr, *bm; 450 + unsigned long bits, words, owords, obits; 614 451 unsigned long want, have, onpages; /* number of pages */ 615 452 struct page **npages, **opages = NULL; 616 453 int err = 0, growing; ··· 618 455 619 456 ERR_IF(!b) return -ENOMEM; 620 457 621 - drbd_bm_lock(mdev, "resize"); 458 + drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK); 622 459 623 460 dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", 624 461 (unsigned long long)capacity); ··· 626 463 if (capacity == b->bm_dev_capacity) 627 464 goto out; 628 465 629 - opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags); 466 + opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags); 630 467 631 468 if (capacity == 0) { 632 469 spin_lock_irq(&b->bm_lock); ··· 654 491 words = ALIGN(bits, 64) >> LN2_BPL; 655 492 656 493 if (get_ldev(mdev)) { 657 - D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12)); 494 + u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12; 658 495 put_ldev(mdev); 496 + if (bits > bits_on_disk) { 497 + dev_info(DEV, "bits = %lu\n", bits); 498 + dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk); 499 + err = -ENOSPC; 500 + goto out; 501 + } 659 502 } 660 503 661 - /* one extra long to catch off by one errors */ 662 - want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; 504 + want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; 663 505 have = b->bm_number_of_pages; 664 506 if (want == have) { 665 507 D_ASSERT(b->bm_pages != NULL); 666 508 npages = b->bm_pages; 667 509 } else { 668 - if (FAULT_ACTIVE(mdev, DRBD_FAULT_BM_ALLOC)) 510 + if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC)) 669 511 npages = NULL; 670 512 else 671 513 npages = bm_realloc_pages(b, want); ··· 710 542 bm_free_pages(opages + want, have - want); 711 543 } 712 544 713 - p_addr = bm_map_paddr(b, words); 714 - bm = p_addr + MLPP(words); 715 - *bm = DRBD_MAGIC; 716 - bm_unmap(p_addr); 717 - 718 545 (void)bm_clear_surplus(b); 719 546 720 547 spin_unlock_irq(&b->bm_lock); ··· 717 554 bm_vk_free(opages, opages_vmalloced); 718 555 if (!growing) 719 556 b->bm_set = bm_count_bits(b); 720 - dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words); 557 + dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); 721 558 722 559 out: 723 560 drbd_bm_unlock(mdev); ··· 787 624 struct drbd_bitmap *b = mdev->bitmap; 788 625 unsigned long *p_addr, *bm; 789 626 unsigned long word, bits; 627 + unsigned int idx; 790 628 size_t end, do_now; 791 629 792 630 end = offset + number; ··· 802 638 spin_lock_irq(&b->bm_lock); 803 639 while (offset < end) { 804 640 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 805 - p_addr = bm_map_paddr(b, offset); 641 + idx = bm_word_to_page_idx(b, offset); 642 + p_addr = bm_map_pidx(b, idx); 806 643 bm = p_addr + MLPP(offset); 807 644 offset += do_now; 808 645 while (do_now--) { 809 646 bits = hweight_long(*bm); 810 - word = *bm | lel_to_cpu(*buffer++); 647 + word = *bm | *buffer++; 811 648 *bm++ = word; 812 649 b->bm_set += hweight_long(word) - bits; 813 650 } 814 651 bm_unmap(p_addr); 652 + bm_set_page_need_writeout(b->bm_pages[idx]); 815 653 } 816 654 /* with 32bit <-> 64bit cross-platform connect 817 655 * this is only correct for current usage, ··· 822 656 */ 823 657 if (end == b->bm_words) 824 658 b->bm_set -= bm_clear_surplus(b); 825 - 826 659 spin_unlock_irq(&b->bm_lock); 827 660 } 828 661 ··· 851 686 else { 852 687 while (offset < end) { 853 688 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 854 - p_addr = bm_map_paddr(b, offset); 689 + p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); 855 690 bm = p_addr + MLPP(offset); 856 691 offset += do_now; 857 692 while (do_now--) 858 - *buffer++ = cpu_to_lel(*bm++); 693 + *buffer++ = *bm++; 859 694 bm_unmap(p_addr); 860 695 } 861 696 } ··· 889 724 spin_unlock_irq(&b->bm_lock); 890 725 } 891 726 727 + struct bm_aio_ctx { 728 + struct drbd_conf *mdev; 729 + atomic_t in_flight; 730 + struct completion done; 731 + unsigned flags; 732 + #define BM_AIO_COPY_PAGES 1 733 + int error; 734 + }; 735 + 736 + /* bv_page may be a copy, or may be the original */ 892 737 static void bm_async_io_complete(struct bio *bio, int error) 893 738 { 894 - struct drbd_bitmap *b = bio->bi_private; 739 + struct bm_aio_ctx *ctx = bio->bi_private; 740 + struct drbd_conf *mdev = ctx->mdev; 741 + struct drbd_bitmap *b = mdev->bitmap; 742 + unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); 895 743 int uptodate = bio_flagged(bio, BIO_UPTODATE); 896 744 897 745 ··· 915 737 if (!error && !uptodate) 916 738 error = -EIO; 917 739 740 + if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && 741 + !bm_test_page_unchanged(b->bm_pages[idx])) 742 + dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx); 743 + 918 744 if (error) { 919 - /* doh. what now? 920 - * for now, set all bits, and flag MD_IO_ERROR */ 921 - __set_bit(BM_MD_IO_ERROR, &b->bm_flags); 745 + /* ctx error will hold the completed-last non-zero error code, 746 + * in case error codes differ. */ 747 + ctx->error = error; 748 + bm_set_page_io_err(b->bm_pages[idx]); 749 + /* Not identical to on disk version of it. 750 + * Is BM_PAGE_IO_ERROR enough? */ 751 + if (__ratelimit(&drbd_ratelimit_state)) 752 + dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n", 753 + error, idx); 754 + } else { 755 + bm_clear_page_io_err(b->bm_pages[idx]); 756 + dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx); 922 757 } 923 - if (atomic_dec_and_test(&b->bm_async_io)) 924 - wake_up(&b->bm_io_wait); 758 + 759 + bm_page_unlock_io(mdev, idx); 760 + 761 + /* FIXME give back to page pool */ 762 + if (ctx->flags & BM_AIO_COPY_PAGES) 763 + put_page(bio->bi_io_vec[0].bv_page); 925 764 926 765 bio_put(bio); 766 + 767 + if (atomic_dec_and_test(&ctx->in_flight)) 768 + complete(&ctx->done); 927 769 } 928 770 929 - static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local) 771 + static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) 930 772 { 931 773 /* we are process context. we always get a bio */ 932 774 struct bio *bio = bio_alloc(GFP_KERNEL, 1); 775 + struct drbd_conf *mdev = ctx->mdev; 776 + struct drbd_bitmap *b = mdev->bitmap; 777 + struct page *page; 933 778 unsigned int len; 779 + 934 780 sector_t on_disk_sector = 935 781 mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; 936 782 on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); 937 783 938 784 /* this might happen with very small 939 - * flexible external meta data device */ 785 + * flexible external meta data device, 786 + * or with PAGE_SIZE > 4k */ 940 787 len = min_t(unsigned int, PAGE_SIZE, 941 788 (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); 942 789 790 + /* serialize IO on this page */ 791 + bm_page_lock_io(mdev, page_nr); 792 + /* before memcpy and submit, 793 + * so it can be redirtied any time */ 794 + bm_set_page_unchanged(b->bm_pages[page_nr]); 795 + 796 + if (ctx->flags & BM_AIO_COPY_PAGES) { 797 + /* FIXME alloc_page is good enough for now, but actually needs 798 + * to use pre-allocated page pool */ 799 + void *src, *dest; 800 + page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); 801 + dest = kmap_atomic(page, KM_USER0); 802 + src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); 803 + memcpy(dest, src, PAGE_SIZE); 804 + kunmap_atomic(src, KM_USER1); 805 + kunmap_atomic(dest, KM_USER0); 806 + bm_store_page_idx(page, page_nr); 807 + } else 808 + page = b->bm_pages[page_nr]; 809 + 943 810 bio->bi_bdev = mdev->ldev->md_bdev; 944 811 bio->bi_sector = on_disk_sector; 945 - bio_add_page(bio, b->bm_pages[page_nr], len, 0); 946 - bio->bi_private = b; 812 + bio_add_page(bio, page, len, 0); 813 + bio->bi_private = ctx; 947 814 bio->bi_end_io = bm_async_io_complete; 948 815 949 - if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 816 + if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 950 817 bio->bi_rw |= rw; 951 818 bio_endio(bio, -EIO); 952 819 } else { ··· 999 776 } 1000 777 } 1001 778 1002 - # if defined(__LITTLE_ENDIAN) 1003 - /* nothing to do, on disk == in memory */ 1004 - # define bm_cpu_to_lel(x) ((void)0) 1005 - # else 1006 - static void bm_cpu_to_lel(struct drbd_bitmap *b) 1007 - { 1008 - /* need to cpu_to_lel all the pages ... 1009 - * this may be optimized by using 1010 - * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0; 1011 - * the following is still not optimal, but better than nothing */ 1012 - unsigned int i; 1013 - unsigned long *p_addr, *bm; 1014 - if (b->bm_set == 0) { 1015 - /* no page at all; avoid swap if all is 0 */ 1016 - i = b->bm_number_of_pages; 1017 - } else if (b->bm_set == b->bm_bits) { 1018 - /* only the last page */ 1019 - i = b->bm_number_of_pages - 1; 1020 - } else { 1021 - /* all pages */ 1022 - i = 0; 1023 - } 1024 - for (; i < b->bm_number_of_pages; i++) { 1025 - p_addr = kmap_atomic(b->bm_pages[i], KM_USER0); 1026 - for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++) 1027 - *bm = cpu_to_lel(*bm); 1028 - kunmap_atomic(p_addr, KM_USER0); 1029 - } 1030 - } 1031 - # endif 1032 - /* lel_to_cpu == cpu_to_lel */ 1033 - # define bm_lel_to_cpu(x) bm_cpu_to_lel(x) 1034 - 1035 779 /* 1036 780 * bm_rw: read/write the whole bitmap from/to its on disk location. 1037 781 */ 1038 - static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) 782 + static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local) 1039 783 { 784 + struct bm_aio_ctx ctx = { 785 + .mdev = mdev, 786 + .in_flight = ATOMIC_INIT(1), 787 + .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), 788 + .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0, 789 + }; 1040 790 struct drbd_bitmap *b = mdev->bitmap; 1041 - /* sector_t sector; */ 1042 - int bm_words, num_pages, i; 791 + int num_pages, i, count = 0; 1043 792 unsigned long now; 1044 793 char ppb[10]; 1045 794 int err = 0; 1046 795 1047 - WARN_ON(!bm_is_locked(b)); 796 + /* 797 + * We are protected against bitmap disappearing/resizing by holding an 798 + * ldev reference (caller must have called get_ldev()). 799 + * For read/write, we are protected against changes to the bitmap by 800 + * the bitmap lock (see drbd_bitmap_io). 801 + * For lazy writeout, we don't care for ongoing changes to the bitmap, 802 + * as we submit copies of pages anyways. 803 + */ 804 + if (!ctx.flags) 805 + WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); 1048 806 1049 - /* no spinlock here, the drbd_bm_lock should be enough! */ 1050 - 1051 - bm_words = drbd_bm_words(mdev); 1052 - num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT; 1053 - 1054 - /* on disk bitmap is little endian */ 1055 - if (rw == WRITE) 1056 - bm_cpu_to_lel(b); 807 + num_pages = b->bm_number_of_pages; 1057 808 1058 809 now = jiffies; 1059 - atomic_set(&b->bm_async_io, num_pages); 1060 - __clear_bit(BM_MD_IO_ERROR, &b->bm_flags); 1061 810 1062 811 /* let the layers below us try to merge these bios... */ 1063 - for (i = 0; i < num_pages; i++) 1064 - bm_page_io_async(mdev, b, i, rw); 812 + for (i = 0; i < num_pages; i++) { 813 + /* ignore completely unchanged pages */ 814 + if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) 815 + break; 816 + if (rw & WRITE) { 817 + if (bm_test_page_unchanged(b->bm_pages[i])) { 818 + dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); 819 + continue; 820 + } 821 + /* during lazy writeout, 822 + * ignore those pages not marked for lazy writeout. */ 823 + if (lazy_writeout_upper_idx && 824 + !bm_test_page_lazy_writeout(b->bm_pages[i])) { 825 + dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i); 826 + continue; 827 + } 828 + } 829 + atomic_inc(&ctx.in_flight); 830 + bm_page_io_async(&ctx, i, rw); 831 + ++count; 832 + cond_resched(); 833 + } 1065 834 1066 - wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); 835 + /* 836 + * We initialize ctx.in_flight to one to make sure bm_async_io_complete 837 + * will not complete() early, and decrement / test it here. If there 838 + * are still some bios in flight, we need to wait for them here. 839 + */ 840 + if (!atomic_dec_and_test(&ctx.in_flight)) 841 + wait_for_completion(&ctx.done); 842 + dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", 843 + rw == WRITE ? "WRITE" : "READ", 844 + count, jiffies - now); 1067 845 1068 - if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { 846 + if (ctx.error) { 1069 847 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); 1070 - drbd_chk_io_error(mdev, 1, TRUE); 1071 - err = -EIO; 848 + drbd_chk_io_error(mdev, 1, true); 849 + err = -EIO; /* ctx.error ? */ 1072 850 } 1073 851 1074 852 now = jiffies; 1075 853 if (rw == WRITE) { 1076 - /* swap back endianness */ 1077 - bm_lel_to_cpu(b); 1078 - /* flush bitmap to stable storage */ 1079 854 drbd_md_flush(mdev); 1080 855 } else /* rw == READ */ { 1081 - /* just read, if necessary adjust endianness */ 1082 - b->bm_set = bm_count_bits_swap_endian(b); 856 + b->bm_set = bm_count_bits(b); 1083 857 dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", 1084 858 jiffies - now); 1085 859 } ··· 1094 874 */ 1095 875 int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) 1096 876 { 1097 - return bm_rw(mdev, READ); 877 + return bm_rw(mdev, READ, 0); 1098 878 } 1099 879 1100 880 /** 1101 881 * drbd_bm_write() - Write the whole bitmap to its on disk location. 1102 882 * @mdev: DRBD device. 883 + * 884 + * Will only write pages that have changed since last IO. 1103 885 */ 1104 886 int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) 1105 887 { 1106 - return bm_rw(mdev, WRITE); 888 + return bm_rw(mdev, WRITE, 0); 1107 889 } 1108 890 1109 891 /** 1110 - * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap 892 + * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. 1111 893 * @mdev: DRBD device. 1112 - * @enr: Extent number in the resync lru (happens to be sector offset) 1113 - * 1114 - * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered 1115 - * by a single sector write. Therefore enr == sector offset from the 1116 - * start of the bitmap. 894 + * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages 1117 895 */ 1118 - int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local) 896 + int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local) 1119 897 { 1120 - sector_t on_disk_sector = enr + mdev->ldev->md.md_offset 1121 - + mdev->ldev->md.bm_offset; 1122 - int bm_words, num_words, offset; 1123 - int err = 0; 898 + return bm_rw(mdev, WRITE, upper_idx); 899 + } 1124 900 1125 - mutex_lock(&mdev->md_io_mutex); 1126 - bm_words = drbd_bm_words(mdev); 1127 - offset = S2W(enr); /* word offset into bitmap */ 1128 - num_words = min(S2W(1), bm_words - offset); 1129 - if (num_words < S2W(1)) 1130 - memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE); 1131 - drbd_bm_get_lel(mdev, offset, num_words, 1132 - page_address(mdev->md_io_page)); 1133 - if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) { 1134 - int i; 1135 - err = -EIO; 1136 - dev_err(DEV, "IO ERROR writing bitmap sector %lu " 1137 - "(meta-disk sector %llus)\n", 1138 - enr, (unsigned long long)on_disk_sector); 1139 - drbd_chk_io_error(mdev, 1, TRUE); 1140 - for (i = 0; i < AL_EXT_PER_BM_SECT; i++) 1141 - drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i); 901 + 902 + /** 903 + * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap 904 + * @mdev: DRBD device. 905 + * @idx: bitmap page index 906 + * 907 + * We don't want to special case on logical_block_size of the backend device, 908 + * so we submit PAGE_SIZE aligned pieces. 909 + * Note that on "most" systems, PAGE_SIZE is 4k. 910 + * 911 + * In case this becomes an issue on systems with larger PAGE_SIZE, 912 + * we may want to change this again to write 4k aligned 4k pieces. 913 + */ 914 + int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) 915 + { 916 + struct bm_aio_ctx ctx = { 917 + .mdev = mdev, 918 + .in_flight = ATOMIC_INIT(1), 919 + .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), 920 + .flags = BM_AIO_COPY_PAGES, 921 + }; 922 + 923 + if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { 924 + dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); 925 + return 0; 1142 926 } 927 + 928 + bm_page_io_async(&ctx, idx, WRITE_SYNC); 929 + wait_for_completion(&ctx.done); 930 + 931 + if (ctx.error) 932 + drbd_chk_io_error(mdev, 1, true); 933 + /* that should force detach, so the in memory bitmap will be 934 + * gone in a moment as well. */ 935 + 1143 936 mdev->bm_writ_cnt++; 1144 - mutex_unlock(&mdev->md_io_mutex); 1145 - return err; 937 + return ctx.error; 1146 938 } 1147 939 1148 940 /* NOTE 1149 941 * find_first_bit returns int, we return unsigned long. 1150 - * should not make much difference anyways, but ... 942 + * For this to work on 32bit arch with bitnumbers > (1<<32), 943 + * we'd need to return u64, and get a whole lot of other places 944 + * fixed where we still use unsigned long. 1151 945 * 1152 946 * this returns a bit number, NOT a sector! 1153 947 */ 1154 - #define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1) 1155 948 static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, 1156 949 const int find_zero_bit, const enum km_type km) 1157 950 { 1158 951 struct drbd_bitmap *b = mdev->bitmap; 1159 - unsigned long i = -1UL; 1160 952 unsigned long *p_addr; 1161 - unsigned long bit_offset; /* bit offset of the mapped page. */ 953 + unsigned long bit_offset; 954 + unsigned i; 955 + 1162 956 1163 957 if (bm_fo > b->bm_bits) { 1164 958 dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); 959 + bm_fo = DRBD_END_OF_BITMAP; 1165 960 } else { 1166 961 while (bm_fo < b->bm_bits) { 1167 - unsigned long offset; 1168 - bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */ 1169 - offset = bit_offset >> LN2_BPL; /* word offset of the page */ 1170 - p_addr = __bm_map_paddr(b, offset, km); 962 + /* bit offset of the first bit in the page */ 963 + bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; 964 + p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); 1171 965 1172 966 if (find_zero_bit) 1173 - i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); 967 + i = generic_find_next_zero_le_bit(p_addr, 968 + PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1174 969 else 1175 - i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); 970 + i = generic_find_next_le_bit(p_addr, 971 + PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1176 972 1177 973 __bm_unmap(p_addr, km); 1178 974 if (i < PAGE_SIZE*8) { 1179 - i = bit_offset + i; 1180 - if (i >= b->bm_bits) 975 + bm_fo = bit_offset + i; 976 + if (bm_fo >= b->bm_bits) 1181 977 break; 1182 978 goto found; 1183 979 } 1184 980 bm_fo = bit_offset + PAGE_SIZE*8; 1185 981 } 1186 - i = -1UL; 982 + bm_fo = DRBD_END_OF_BITMAP; 1187 983 } 1188 984 found: 1189 - return i; 985 + return bm_fo; 1190 986 } 1191 987 1192 988 static unsigned long bm_find_next(struct drbd_conf *mdev, 1193 989 unsigned long bm_fo, const int find_zero_bit) 1194 990 { 1195 991 struct drbd_bitmap *b = mdev->bitmap; 1196 - unsigned long i = -1UL; 992 + unsigned long i = DRBD_END_OF_BITMAP; 1197 993 1198 994 ERR_IF(!b) return i; 1199 995 ERR_IF(!b->bm_pages) return i; 1200 996 1201 997 spin_lock_irq(&b->bm_lock); 1202 - if (bm_is_locked(b)) 998 + if (BM_DONT_TEST & b->bm_flags) 1203 999 bm_print_lock_info(mdev); 1204 1000 1205 1001 i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); ··· 1241 1005 * you must take drbd_bm_lock() first */ 1242 1006 unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1243 1007 { 1244 - /* WARN_ON(!bm_is_locked(mdev)); */ 1008 + /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1245 1009 return __bm_find_next(mdev, bm_fo, 0, KM_USER1); 1246 1010 } 1247 1011 1248 1012 unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1249 1013 { 1250 - /* WARN_ON(!bm_is_locked(mdev)); */ 1014 + /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1251 1015 return __bm_find_next(mdev, bm_fo, 1, KM_USER1); 1252 1016 } 1253 1017 ··· 1263 1027 struct drbd_bitmap *b = mdev->bitmap; 1264 1028 unsigned long *p_addr = NULL; 1265 1029 unsigned long bitnr; 1266 - unsigned long last_page_nr = -1UL; 1030 + unsigned int last_page_nr = -1U; 1267 1031 int c = 0; 1032 + int changed_total = 0; 1268 1033 1269 1034 if (e >= b->bm_bits) { 1270 1035 dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", ··· 1273 1036 e = b->bm_bits ? b->bm_bits -1 : 0; 1274 1037 } 1275 1038 for (bitnr = s; bitnr <= e; bitnr++) { 1276 - unsigned long offset = bitnr>>LN2_BPL; 1277 - unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); 1039 + unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); 1278 1040 if (page_nr != last_page_nr) { 1279 1041 if (p_addr) 1280 1042 __bm_unmap(p_addr, km); 1281 - p_addr = __bm_map_paddr(b, offset, km); 1043 + if (c < 0) 1044 + bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 1045 + else if (c > 0) 1046 + bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 1047 + changed_total += c; 1048 + c = 0; 1049 + p_addr = __bm_map_pidx(b, page_nr, km); 1282 1050 last_page_nr = page_nr; 1283 1051 } 1284 1052 if (val) 1285 - c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr)); 1053 + c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1286 1054 else 1287 - c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr)); 1055 + c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1288 1056 } 1289 1057 if (p_addr) 1290 1058 __bm_unmap(p_addr, km); 1291 - b->bm_set += c; 1292 - return c; 1059 + if (c < 0) 1060 + bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 1061 + else if (c > 0) 1062 + bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 1063 + changed_total += c; 1064 + b->bm_set += changed_total; 1065 + return changed_total; 1293 1066 } 1294 1067 1295 1068 /* returns number of bits actually changed. ··· 1317 1070 ERR_IF(!b->bm_pages) return 0; 1318 1071 1319 1072 spin_lock_irqsave(&b->bm_lock, flags); 1320 - if (bm_is_locked(b)) 1073 + if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) 1321 1074 bm_print_lock_info(mdev); 1322 1075 1323 1076 c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); ··· 1434 1187 ERR_IF(!b->bm_pages) return 0; 1435 1188 1436 1189 spin_lock_irqsave(&b->bm_lock, flags); 1437 - if (bm_is_locked(b)) 1190 + if (BM_DONT_TEST & b->bm_flags) 1438 1191 bm_print_lock_info(mdev); 1439 1192 if (bitnr < b->bm_bits) { 1440 - unsigned long offset = bitnr>>LN2_BPL; 1441 - p_addr = bm_map_paddr(b, offset); 1442 - i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0; 1193 + p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); 1194 + i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; 1443 1195 bm_unmap(p_addr); 1444 1196 } else if (bitnr == b->bm_bits) { 1445 1197 i = -1; ··· 1456 1210 { 1457 1211 unsigned long flags; 1458 1212 struct drbd_bitmap *b = mdev->bitmap; 1459 - unsigned long *p_addr = NULL, page_nr = -1; 1213 + unsigned long *p_addr = NULL; 1460 1214 unsigned long bitnr; 1215 + unsigned int page_nr = -1U; 1461 1216 int c = 0; 1462 - size_t w; 1463 1217 1464 1218 /* If this is called without a bitmap, that is a bug. But just to be 1465 1219 * robust in case we screwed up elsewhere, in that case pretend there ··· 1469 1223 ERR_IF(!b->bm_pages) return 1; 1470 1224 1471 1225 spin_lock_irqsave(&b->bm_lock, flags); 1472 - if (bm_is_locked(b)) 1226 + if (BM_DONT_TEST & b->bm_flags) 1473 1227 bm_print_lock_info(mdev); 1474 1228 for (bitnr = s; bitnr <= e; bitnr++) { 1475 - w = bitnr >> LN2_BPL; 1476 - if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) { 1477 - page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3); 1229 + unsigned int idx = bm_bit_to_page_idx(b, bitnr); 1230 + if (page_nr != idx) { 1231 + page_nr = idx; 1478 1232 if (p_addr) 1479 1233 bm_unmap(p_addr); 1480 - p_addr = bm_map_paddr(b, w); 1234 + p_addr = bm_map_pidx(b, idx); 1481 1235 } 1482 1236 ERR_IF (bitnr >= b->bm_bits) { 1483 1237 dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); 1484 1238 } else { 1485 - c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); 1239 + c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); 1486 1240 } 1487 1241 } 1488 1242 if (p_addr) ··· 1517 1271 ERR_IF(!b->bm_pages) return 0; 1518 1272 1519 1273 spin_lock_irqsave(&b->bm_lock, flags); 1520 - if (bm_is_locked(b)) 1274 + if (BM_DONT_TEST & b->bm_flags) 1521 1275 bm_print_lock_info(mdev); 1522 1276 1523 1277 s = S2W(enr); ··· 1525 1279 count = 0; 1526 1280 if (s < b->bm_words) { 1527 1281 int n = e-s; 1528 - p_addr = bm_map_paddr(b, s); 1282 + p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); 1529 1283 bm = p_addr + MLPP(s); 1530 1284 while (n--) 1531 1285 count += hweight_long(*bm++); ··· 1537 1291 return count; 1538 1292 } 1539 1293 1540 - /* set all bits covered by the AL-extent al_enr */ 1294 + /* Set all bits covered by the AL-extent al_enr. 1295 + * Returns number of bits changed. */ 1541 1296 unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) 1542 1297 { 1543 1298 struct drbd_bitmap *b = mdev->bitmap; 1544 1299 unsigned long *p_addr, *bm; 1545 1300 unsigned long weight; 1546 - int count, s, e, i, do_now; 1301 + unsigned long s, e; 1302 + int count, i, do_now; 1547 1303 ERR_IF(!b) return 0; 1548 1304 ERR_IF(!b->bm_pages) return 0; 1549 1305 1550 1306 spin_lock_irq(&b->bm_lock); 1551 - if (bm_is_locked(b)) 1307 + if (BM_DONT_SET & b->bm_flags) 1552 1308 bm_print_lock_info(mdev); 1553 1309 weight = b->bm_set; 1554 1310 ··· 1562 1314 count = 0; 1563 1315 if (s < b->bm_words) { 1564 1316 i = do_now = e-s; 1565 - p_addr = bm_map_paddr(b, s); 1317 + p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); 1566 1318 bm = p_addr + MLPP(s); 1567 1319 while (i--) { 1568 1320 count += hweight_long(*bm); ··· 1574 1326 if (e == b->bm_words) 1575 1327 b->bm_set -= bm_clear_surplus(b); 1576 1328 } else { 1577 - dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s); 1329 + dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s); 1578 1330 } 1579 1331 weight = b->bm_set - weight; 1580 1332 spin_unlock_irq(&b->bm_lock);
+164 -106
drivers/block/drbd/drbd_int.h
··· 72 72 extern char usermode_helper[]; 73 73 74 74 75 - #ifndef TRUE 76 - #define TRUE 1 77 - #endif 78 - #ifndef FALSE 79 - #define FALSE 0 80 - #endif 81 - 82 75 /* I don't remember why XCPU ... 83 76 * This is used to wake the asender, 84 77 * and to interrupt sending the sending task ··· 97 104 #define ID_SYNCER (-1ULL) 98 105 #define ID_VACANT 0 99 106 #define is_syncer_block_id(id) ((id) == ID_SYNCER) 107 + #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) 100 108 101 109 struct drbd_conf; 102 110 ··· 131 137 DRBD_FAULT_MAX, 132 138 }; 133 139 134 - #ifdef CONFIG_DRBD_FAULT_INJECTION 135 140 extern unsigned int 136 141 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); 142 + 137 143 static inline int 138 144 drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { 145 + #ifdef CONFIG_DRBD_FAULT_INJECTION 139 146 return fault_rate && 140 147 (enable_faults & (1<<type)) && 141 148 _drbd_insert_fault(mdev, type); 142 - } 143 - #define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t))) 144 - 145 149 #else 146 - #define FAULT_ACTIVE(_m, _t) (0) 150 + return 0; 147 151 #endif 152 + } 148 153 149 154 /* integer division, round _UP_ to the next integer */ 150 155 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) ··· 205 212 /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ 206 213 /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ 207 214 P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ 215 + P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */ 216 + P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */ 208 217 209 - P_MAX_CMD = 0x28, 218 + P_MAX_CMD = 0x2A, 210 219 P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ 211 220 P_MAX_OPT_CMD = 0x101, 212 221 ··· 264 269 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", 265 270 [P_COMPRESSED_BITMAP] = "CBitmap", 266 271 [P_DELAY_PROBE] = "DelayProbe", 272 + [P_OUT_OF_SYNC] = "OutOfSync", 267 273 [P_MAX_CMD] = NULL, 268 274 }; 269 275 ··· 508 512 u64 d_size; /* size of disk */ 509 513 u64 u_size; /* user requested size */ 510 514 u64 c_size; /* current exported size */ 511 - u32 max_segment_size; /* Maximal size of a BIO */ 515 + u32 max_bio_size; /* Maximal size of a BIO */ 512 516 u16 queue_order_type; /* not yet implemented in DRBD*/ 513 517 u16 dds_flags; /* use enum dds_flags here. */ 514 518 } __packed; ··· 544 548 u64 block_id; 545 549 u32 seq_num; 546 550 u32 pad; 551 + } __packed; 552 + 553 + struct p_block_desc { 554 + struct p_header80 head; 555 + u64 sector; 556 + u32 blksize; 557 + u32 pad; /* to multiple of 8 Byte */ 547 558 } __packed; 548 559 549 560 /* Valid values for the encoding field. ··· 650 647 struct p_block_req block_req; 651 648 struct p_delay_probe93 delay_probe93; 652 649 struct p_rs_uuid rs_uuid; 650 + struct p_block_desc block_desc; 653 651 } __packed; 654 652 655 653 /**********************************************************************/ ··· 681 677 return thi->t_state; 682 678 } 683 679 684 - 685 - /* 686 - * Having this as the first member of a struct provides sort of "inheritance". 687 - * "derived" structs can be "drbd_queue_work()"ed. 688 - * The callback should know and cast back to the descendant struct. 689 - * drbd_request and drbd_epoch_entry are descendants of drbd_work. 690 - */ 691 680 struct drbd_work; 692 681 typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); 693 682 struct drbd_work { ··· 708 711 * the current epoch, and we therefore have to close it, 709 712 * starting a new epoch... 710 713 */ 711 - 712 - /* up to here, the struct layout is identical to drbd_epoch_entry; 713 - * we might be able to use that to our advantage... */ 714 714 715 715 struct list_head tl_requests; /* ring list in the transfer log */ 716 716 struct bio *master_bio; /* master bio pointer */ ··· 825 831 CRASHED_PRIMARY, /* This node was a crashed primary. 826 832 * Gets cleared when the state.conn 827 833 * goes into C_CONNECTED state. */ 828 - WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ 834 + NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ 829 835 CONSIDER_RESYNC, 830 836 831 837 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ ··· 850 856 GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ 851 857 NEW_CUR_UUID, /* Create new current UUID when thawing IO */ 852 858 AL_SUSPENDED, /* Activity logging is currently suspended. */ 859 + AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ 853 860 }; 854 861 855 862 struct drbd_bitmap; /* opaque for drbd_conf */ 863 + 864 + /* definition of bits in bm_flags to be used in drbd_bm_lock 865 + * and drbd_bitmap_io and friends. */ 866 + enum bm_flag { 867 + /* do we need to kfree, or vfree bm_pages? */ 868 + BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ 869 + 870 + /* currently locked for bulk operation */ 871 + BM_LOCKED_MASK = 0x7, 872 + 873 + /* in detail, that is: */ 874 + BM_DONT_CLEAR = 0x1, 875 + BM_DONT_SET = 0x2, 876 + BM_DONT_TEST = 0x4, 877 + 878 + /* (test bit, count bit) allowed (common case) */ 879 + BM_LOCKED_TEST_ALLOWED = 0x3, 880 + 881 + /* testing bits, as well as setting new bits allowed, but clearing bits 882 + * would be unexpected. Used during bitmap receive. Setting new bits 883 + * requires sending of "out-of-sync" information, though. */ 884 + BM_LOCKED_SET_ALLOWED = 0x1, 885 + 886 + /* clear is not expected while bitmap is locked for bulk operation */ 887 + }; 888 + 856 889 857 890 /* TODO sort members for performance 858 891 * MAYBE group them further */ ··· 946 925 struct bm_io_work { 947 926 struct drbd_work w; 948 927 char *why; 928 + enum bm_flag flags; 949 929 int (*io_fn)(struct drbd_conf *mdev); 950 930 void (*done)(struct drbd_conf *mdev, int rv); 951 931 }; ··· 985 963 struct drbd_work resync_work, 986 964 unplug_work, 987 965 go_diskless, 988 - md_sync_work; 966 + md_sync_work, 967 + start_resync_work; 989 968 struct timer_list resync_timer; 990 969 struct timer_list md_sync_timer; 970 + struct timer_list start_resync_timer; 971 + struct timer_list request_timer; 991 972 #ifdef DRBD_DEBUG_MD_SYNC 992 973 struct { 993 974 unsigned int line; ··· 1025 1000 struct hlist_head *tl_hash; 1026 1001 unsigned int tl_hash_s; 1027 1002 1028 - /* blocks to sync in this run [unit BM_BLOCK_SIZE] */ 1003 + /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ 1029 1004 unsigned long rs_total; 1030 - /* number of sync IOs that failed in this run */ 1005 + /* number of resync blocks that failed in this run */ 1031 1006 unsigned long rs_failed; 1032 1007 /* Syncer's start time [unit jiffies] */ 1033 1008 unsigned long rs_start; ··· 1127 1102 struct fifo_buffer rs_plan_s; /* correction values of resync planer */ 1128 1103 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 1129 1104 int rs_planed; /* resync sectors already planed */ 1105 + atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 1130 1106 }; 1131 1107 1132 1108 static inline struct drbd_conf *minor_to_mdev(unsigned int minor) ··· 1189 1163 }; 1190 1164 1191 1165 extern void drbd_init_set_defaults(struct drbd_conf *mdev); 1192 - extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, 1193 - union drbd_state mask, union drbd_state val); 1166 + extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev, 1167 + enum chg_state_flags f, 1168 + union drbd_state mask, 1169 + union drbd_state val); 1194 1170 extern void drbd_force_state(struct drbd_conf *, union drbd_state, 1195 1171 union drbd_state); 1196 - extern int _drbd_request_state(struct drbd_conf *, union drbd_state, 1197 - union drbd_state, enum chg_state_flags); 1198 - extern int __drbd_set_state(struct drbd_conf *, union drbd_state, 1199 - enum chg_state_flags, struct completion *done); 1172 + extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *, 1173 + union drbd_state, 1174 + union drbd_state, 1175 + enum chg_state_flags); 1176 + extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state, 1177 + enum chg_state_flags, 1178 + struct completion *done); 1200 1179 extern void print_st_err(struct drbd_conf *, union drbd_state, 1201 1180 union drbd_state, int); 1202 1181 extern int drbd_thread_start(struct drbd_thread *thi); ··· 1226 1195 extern int drbd_send_protocol(struct drbd_conf *mdev); 1227 1196 extern int drbd_send_uuids(struct drbd_conf *mdev); 1228 1197 extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); 1229 - extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val); 1198 + extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev); 1230 1199 extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); 1231 1200 extern int _drbd_send_state(struct drbd_conf *mdev); 1232 1201 extern int drbd_send_state(struct drbd_conf *mdev); ··· 1251 1220 struct p_data *dp, int data_size); 1252 1221 extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, 1253 1222 sector_t sector, int blksize, u64 block_id); 1223 + extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req); 1254 1224 extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, 1255 1225 struct drbd_epoch_entry *e); 1256 1226 extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); 1257 - extern int _drbd_send_barrier(struct drbd_conf *mdev, 1258 - struct drbd_tl_epoch *barrier); 1259 1227 extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, 1260 1228 sector_t sector, int size, u64 block_id); 1261 1229 extern int drbd_send_drequest_csum(struct drbd_conf *mdev, ··· 1265 1235 1266 1236 extern int drbd_send_bitmap(struct drbd_conf *mdev); 1267 1237 extern int _drbd_send_bitmap(struct drbd_conf *mdev); 1268 - extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode); 1238 + extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode); 1269 1239 extern void drbd_free_bc(struct drbd_backing_dev *ldev); 1270 1240 extern void drbd_mdev_cleanup(struct drbd_conf *mdev); 1241 + void drbd_print_uuids(struct drbd_conf *mdev, const char *text); 1271 1242 1272 - /* drbd_meta-data.c (still in drbd_main.c) */ 1273 1243 extern void drbd_md_sync(struct drbd_conf *mdev); 1274 1244 extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); 1275 - /* maybe define them below as inline? */ 1276 1245 extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 1277 1246 extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); 1278 1247 extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); ··· 1290 1261 extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, 1291 1262 int (*io_fn)(struct drbd_conf *), 1292 1263 void (*done)(struct drbd_conf *, int), 1293 - char *why); 1264 + char *why, enum bm_flag flags); 1265 + extern int drbd_bitmap_io(struct drbd_conf *mdev, 1266 + int (*io_fn)(struct drbd_conf *), 1267 + char *why, enum bm_flag flags); 1294 1268 extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); 1295 1269 extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); 1296 - extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); 1297 1270 extern void drbd_go_diskless(struct drbd_conf *mdev); 1298 1271 extern void drbd_ldev_destroy(struct drbd_conf *mdev); 1299 1272 ··· 1344 1313 1345 1314 #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ 1346 1315 #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ 1316 + #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */ 1347 1317 1348 1318 /* drbd_bitmap.c */ 1349 1319 /* ··· 1422 1390 * you should use 64bit OS for that much storage, anyways. */ 1423 1391 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) 1424 1392 #else 1425 - #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32) 1393 + /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */ 1394 + #define DRBD_MAX_SECTORS_FLEX (1UL << 51) 1395 + /* corresponds to (1UL << 38) bits right now. */ 1426 1396 #endif 1427 1397 #endif 1428 1398 ··· 1432 1398 * With a value of 8 all IO in one 128K block make it to the same slot of the 1433 1399 * hash table. */ 1434 1400 #define HT_SHIFT 8 1435 - #define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT)) 1401 + #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) 1436 1402 1437 1403 #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ 1438 1404 ··· 1444 1410 extern void drbd_bm_cleanup(struct drbd_conf *mdev); 1445 1411 extern void drbd_bm_set_all(struct drbd_conf *mdev); 1446 1412 extern void drbd_bm_clear_all(struct drbd_conf *mdev); 1413 + /* set/clear/test only a few bits at a time */ 1447 1414 extern int drbd_bm_set_bits( 1448 1415 struct drbd_conf *mdev, unsigned long s, unsigned long e); 1449 1416 extern int drbd_bm_clear_bits( 1450 1417 struct drbd_conf *mdev, unsigned long s, unsigned long e); 1451 - /* bm_set_bits variant for use while holding drbd_bm_lock */ 1418 + extern int drbd_bm_count_bits( 1419 + struct drbd_conf *mdev, const unsigned long s, const unsigned long e); 1420 + /* bm_set_bits variant for use while holding drbd_bm_lock, 1421 + * may process the whole bitmap in one go */ 1452 1422 extern void _drbd_bm_set_bits(struct drbd_conf *mdev, 1453 1423 const unsigned long s, const unsigned long e); 1454 1424 extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); 1455 1425 extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); 1456 - extern int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local); 1426 + extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); 1457 1427 extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); 1458 1428 extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); 1459 1429 extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, ··· 1465 1427 extern size_t drbd_bm_words(struct drbd_conf *mdev); 1466 1428 extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); 1467 1429 extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); 1430 + 1431 + #define DRBD_END_OF_BITMAP (~(unsigned long)0) 1468 1432 extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); 1469 1433 /* bm_find_next variants for use while you hold drbd_bm_lock() */ 1470 1434 extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); ··· 1477 1437 /* for receive_bitmap */ 1478 1438 extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, 1479 1439 size_t number, unsigned long *buffer); 1480 - /* for _drbd_send_bitmap and drbd_bm_write_sect */ 1440 + /* for _drbd_send_bitmap */ 1481 1441 extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, 1482 1442 size_t number, unsigned long *buffer); 1483 1443 1484 - extern void drbd_bm_lock(struct drbd_conf *mdev, char *why); 1444 + extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags); 1485 1445 extern void drbd_bm_unlock(struct drbd_conf *mdev); 1486 - 1487 - extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e); 1488 1446 /* drbd_main.c */ 1489 1447 1490 1448 extern struct kmem_cache *drbd_request_cache; ··· 1505 1467 extern int proc_details; 1506 1468 1507 1469 /* drbd_req */ 1508 - extern int drbd_make_request_26(struct request_queue *q, struct bio *bio); 1470 + extern int drbd_make_request(struct request_queue *q, struct bio *bio); 1509 1471 extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); 1510 1472 extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); 1511 1473 extern int is_valid_ar_handle(struct drbd_request *, sector_t); ··· 1520 1482 extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); 1521 1483 extern void resync_after_online_grow(struct drbd_conf *); 1522 1484 extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1523 - extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, 1524 - int force); 1485 + extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, 1486 + enum drbd_role new_role, 1487 + int force); 1525 1488 extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); 1526 1489 extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev); 1527 1490 extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); ··· 1538 1499 extern int drbd_md_sync_page_io(struct drbd_conf *mdev, 1539 1500 struct drbd_backing_dev *bdev, sector_t sector, int rw); 1540 1501 extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); 1502 + extern void drbd_rs_controller_reset(struct drbd_conf *mdev); 1541 1503 1542 1504 static inline void ov_oos_print(struct drbd_conf *mdev) 1543 1505 { ··· 1562 1522 extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); 1563 1523 extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); 1564 1524 extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); 1565 - extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); 1525 + extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int); 1566 1526 extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); 1567 1527 extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); 1568 - extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int); 1569 1528 extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); 1570 1529 extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); 1571 1530 extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); 1572 1531 extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); 1573 1532 extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); 1574 1533 extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); 1534 + extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int); 1535 + extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int); 1575 1536 1576 1537 extern void resync_timer_fn(unsigned long data); 1538 + extern void start_resync_timer_fn(unsigned long data); 1577 1539 1578 1540 /* drbd_receiver.c */ 1579 - extern int drbd_rs_should_slow_down(struct drbd_conf *mdev); 1541 + extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector); 1580 1542 extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, 1581 1543 const unsigned rw, const int fault_type); 1582 1544 extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); ··· 1661 1619 extern void drbd_rs_failed_io(struct drbd_conf *mdev, 1662 1620 sector_t sector, int size); 1663 1621 extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); 1622 + extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go); 1664 1623 extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, 1665 1624 int size, const char *file, const unsigned int line); 1666 1625 #define drbd_set_in_sync(mdev, sector, size) \ 1667 1626 __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) 1668 - extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, 1627 + extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, 1669 1628 int size, const char *file, const unsigned int line); 1670 1629 #define drbd_set_out_of_sync(mdev, sector, size) \ 1671 1630 __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) 1672 1631 extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); 1673 - extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev); 1674 1632 extern void drbd_al_shrink(struct drbd_conf *mdev); 1675 1633 1676 1634 ··· 1789 1747 wake_up(&mdev->misc_wait); 1790 1748 } 1791 1749 1792 - static inline int _drbd_set_state(struct drbd_conf *mdev, 1793 - union drbd_state ns, enum chg_state_flags flags, 1794 - struct completion *done) 1750 + static inline enum drbd_state_rv 1751 + _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, 1752 + enum chg_state_flags flags, struct completion *done) 1795 1753 { 1796 - int rv; 1754 + enum drbd_state_rv rv; 1797 1755 1798 1756 read_lock(&global_state_lock); 1799 1757 rv = __drbd_set_state(mdev, ns, flags, done); ··· 2024 1982 2025 1983 static inline void drbd_thread_stop(struct drbd_thread *thi) 2026 1984 { 2027 - _drbd_thread_stop(thi, FALSE, TRUE); 1985 + _drbd_thread_stop(thi, false, true); 2028 1986 } 2029 1987 2030 1988 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) 2031 1989 { 2032 - _drbd_thread_stop(thi, FALSE, FALSE); 1990 + _drbd_thread_stop(thi, false, false); 2033 1991 } 2034 1992 2035 1993 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) 2036 1994 { 2037 - _drbd_thread_stop(thi, TRUE, FALSE); 1995 + _drbd_thread_stop(thi, true, false); 2038 1996 } 2039 1997 2040 1998 /* counts how many answer packets packets we expect from our peer, ··· 2188 2146 static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, 2189 2147 unsigned long *bits_left, unsigned int *per_mil_done) 2190 2148 { 2191 - /* 2192 - * this is to break it at compile time when we change that 2193 - * (we may feel 4TB maximum storage per drbd is not enough) 2194 - */ 2149 + /* this is to break it at compile time when we change that, in case we 2150 + * want to support more than (1<<32) bits on a 32bit arch. */ 2195 2151 typecheck(unsigned long, mdev->rs_total); 2196 2152 2197 2153 /* note: both rs_total and rs_left are in bits, i.e. in 2198 2154 * units of BM_BLOCK_SIZE. 2199 2155 * for the percentage, we don't care. */ 2200 2156 2201 - *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; 2157 + if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) 2158 + *bits_left = mdev->ov_left; 2159 + else 2160 + *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; 2202 2161 /* >> 10 to prevent overflow, 2203 2162 * +1 to prevent division by zero */ 2204 2163 if (*bits_left > mdev->rs_total) { ··· 2214 2171 *bits_left, mdev->rs_total, mdev->rs_failed); 2215 2172 *per_mil_done = 0; 2216 2173 } else { 2217 - /* make sure the calculation happens in long context */ 2218 - unsigned long tmp = 1000UL - 2219 - (*bits_left >> 10)*1000UL 2220 - / ((mdev->rs_total >> 10) + 1UL); 2174 + /* Make sure the division happens in long context. 2175 + * We allow up to one petabyte storage right now, 2176 + * at a granularity of 4k per bit that is 2**38 bits. 2177 + * After shift right and multiplication by 1000, 2178 + * this should still fit easily into a 32bit long, 2179 + * so we don't need a 64bit division on 32bit arch. 2180 + * Note: currently we don't support such large bitmaps on 32bit 2181 + * arch anyways, but no harm done to be prepared for it here. 2182 + */ 2183 + unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10; 2184 + unsigned long left = *bits_left >> shift; 2185 + unsigned long total = 1UL + (mdev->rs_total >> shift); 2186 + unsigned long tmp = 1000UL - left * 1000UL/total; 2221 2187 *per_mil_done = tmp; 2222 2188 } 2223 2189 } ··· 2245 2193 return mxb; 2246 2194 } 2247 2195 2248 - static inline int drbd_state_is_stable(union drbd_state s) 2196 + static inline int drbd_state_is_stable(struct drbd_conf *mdev) 2249 2197 { 2198 + union drbd_state s = mdev->state; 2250 2199 2251 2200 /* DO NOT add a default clause, we want the compiler to warn us 2252 2201 * for any newly introduced state we may have forgotten to add here */ ··· 2264 2211 case C_VERIFY_T: 2265 2212 case C_PAUSED_SYNC_S: 2266 2213 case C_PAUSED_SYNC_T: 2267 - /* maybe stable, look at the disk state */ 2268 - break; 2269 - 2270 - /* no new io accepted during tansitional states 2271 - * like handshake or teardown */ 2214 + case C_AHEAD: 2215 + case C_BEHIND: 2216 + /* transitional states, IO allowed */ 2272 2217 case C_DISCONNECTING: 2273 2218 case C_UNCONNECTED: 2274 2219 case C_TIMEOUT: ··· 2277 2226 case C_WF_REPORT_PARAMS: 2278 2227 case C_STARTING_SYNC_S: 2279 2228 case C_STARTING_SYNC_T: 2229 + break; 2230 + 2231 + /* Allow IO in BM exchange states with new protocols */ 2280 2232 case C_WF_BITMAP_S: 2233 + if (mdev->agreed_pro_version < 96) 2234 + return 0; 2235 + break; 2236 + 2237 + /* no new io accepted in these states */ 2281 2238 case C_WF_BITMAP_T: 2282 2239 case C_WF_SYNC_UUID: 2283 2240 case C_MASK: ··· 2320 2261 return s.susp || s.susp_nod || s.susp_fen; 2321 2262 } 2322 2263 2323 - static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) 2264 + static inline bool may_inc_ap_bio(struct drbd_conf *mdev) 2324 2265 { 2325 2266 int mxb = drbd_get_max_buffers(mdev); 2326 2267 2327 2268 if (is_susp(mdev->state)) 2328 - return 0; 2269 + return false; 2329 2270 if (test_bit(SUSPEND_IO, &mdev->flags)) 2330 - return 0; 2271 + return false; 2331 2272 2332 2273 /* to avoid potential deadlock or bitmap corruption, 2333 2274 * in various places, we only allow new application io 2334 2275 * to start during "stable" states. */ 2335 2276 2336 2277 /* no new io accepted when attaching or detaching the disk */ 2337 - if (!drbd_state_is_stable(mdev->state)) 2338 - return 0; 2278 + if (!drbd_state_is_stable(mdev)) 2279 + return false; 2339 2280 2340 2281 /* since some older kernels don't have atomic_add_unless, 2341 2282 * and we are within the spinlock anyways, we have this workaround. */ 2342 2283 if (atomic_read(&mdev->ap_bio_cnt) > mxb) 2343 - return 0; 2284 + return false; 2344 2285 if (test_bit(BITMAP_IO, &mdev->flags)) 2345 - return 0; 2346 - return 1; 2286 + return false; 2287 + return true; 2347 2288 } 2348 2289 2349 - /* I'd like to use wait_event_lock_irq, 2350 - * but I'm not sure when it got introduced, 2351 - * and not sure when it has 3 or 4 arguments */ 2290 + static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count) 2291 + { 2292 + bool rv = false; 2293 + 2294 + spin_lock_irq(&mdev->req_lock); 2295 + rv = may_inc_ap_bio(mdev); 2296 + if (rv) 2297 + atomic_add(count, &mdev->ap_bio_cnt); 2298 + spin_unlock_irq(&mdev->req_lock); 2299 + 2300 + return rv; 2301 + } 2302 + 2352 2303 static inline void inc_ap_bio(struct drbd_conf *mdev, int count) 2353 2304 { 2354 - /* compare with after_state_ch, 2355 - * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */ 2356 - DEFINE_WAIT(wait); 2357 - 2358 2305 /* we wait here 2359 2306 * as long as the device is suspended 2360 2307 * until the bitmap is no longer on the fly during connection ··· 2369 2304 * to avoid races with the reconnect code, 2370 2305 * we need to atomic_inc within the spinlock. */ 2371 2306 2372 - spin_lock_irq(&mdev->req_lock); 2373 - while (!__inc_ap_bio_cond(mdev)) { 2374 - prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); 2375 - spin_unlock_irq(&mdev->req_lock); 2376 - schedule(); 2377 - finish_wait(&mdev->misc_wait, &wait); 2378 - spin_lock_irq(&mdev->req_lock); 2379 - } 2380 - atomic_add(count, &mdev->ap_bio_cnt); 2381 - spin_unlock_irq(&mdev->req_lock); 2307 + wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count)); 2382 2308 } 2383 2309 2384 2310 static inline void dec_ap_bio(struct drbd_conf *mdev) ··· 2389 2333 } 2390 2334 } 2391 2335 2392 - static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) 2336 + static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) 2393 2337 { 2338 + int changed = mdev->ed_uuid != val; 2394 2339 mdev->ed_uuid = val; 2340 + return changed; 2395 2341 } 2396 2342 2397 2343 static inline int seq_cmp(u32 a, u32 b)
+440 -233
drivers/block/drbd/drbd_main.c
··· 85 85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); 86 86 MODULE_VERSION(REL_VERSION); 87 87 MODULE_LICENSE("GPL"); 88 - MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)"); 88 + MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (" 89 + __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")"); 89 90 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); 90 91 91 92 #include <linux/moduleparam.h> ··· 116 115 #endif 117 116 118 117 /* module parameter, defined */ 119 - unsigned int minor_count = 32; 118 + unsigned int minor_count = DRBD_MINOR_COUNT_DEF; 120 119 int disable_sendpage; 121 120 int allow_oos; 122 121 unsigned int cn_idx = CN_IDX_DRBD; ··· 336 335 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 337 336 } 338 337 338 + 339 339 /** 340 340 * _tl_restart() - Walks the transfer log, and applies an action to all requests 341 341 * @mdev: DRBD device. ··· 458 456 } 459 457 460 458 /** 461 - * cl_wide_st_chg() - TRUE if the state change is a cluster wide one 459 + * cl_wide_st_chg() - true if the state change is a cluster wide one 462 460 * @mdev: DRBD device. 463 461 * @os: old (current) state. 464 462 * @ns: new (wanted) state. ··· 475 473 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S); 476 474 } 477 475 478 - int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, 479 - union drbd_state mask, union drbd_state val) 476 + enum drbd_state_rv 477 + drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, 478 + union drbd_state mask, union drbd_state val) 480 479 { 481 480 unsigned long flags; 482 481 union drbd_state os, ns; 483 - int rv; 482 + enum drbd_state_rv rv; 484 483 485 484 spin_lock_irqsave(&mdev->req_lock, flags); 486 485 os = mdev->state; ··· 505 502 drbd_change_state(mdev, CS_HARD, mask, val); 506 503 } 507 504 508 - static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns); 509 - static int is_valid_state_transition(struct drbd_conf *, 510 - union drbd_state, union drbd_state); 505 + static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state); 506 + static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *, 507 + union drbd_state, 508 + union drbd_state); 511 509 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, 512 510 union drbd_state ns, const char **warn_sync_abort); 513 511 int drbd_send_state_req(struct drbd_conf *, 514 512 union drbd_state, union drbd_state); 515 513 516 - static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev, 517 - union drbd_state mask, union drbd_state val) 514 + static enum drbd_state_rv 515 + _req_st_cond(struct drbd_conf *mdev, union drbd_state mask, 516 + union drbd_state val) 518 517 { 519 518 union drbd_state os, ns; 520 519 unsigned long flags; 521 - int rv; 520 + enum drbd_state_rv rv; 522 521 523 522 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags)) 524 523 return SS_CW_SUCCESS; ··· 541 536 if (rv == SS_SUCCESS) { 542 537 rv = is_valid_state_transition(mdev, ns, os); 543 538 if (rv == SS_SUCCESS) 544 - rv = 0; /* cont waiting, otherwise fail. */ 539 + rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ 545 540 } 546 541 } 547 542 spin_unlock_irqrestore(&mdev->req_lock, flags); ··· 559 554 * Should not be called directly, use drbd_request_state() or 560 555 * _drbd_request_state(). 561 556 */ 562 - static int drbd_req_state(struct drbd_conf *mdev, 563 - union drbd_state mask, union drbd_state val, 564 - enum chg_state_flags f) 557 + static enum drbd_state_rv 558 + drbd_req_state(struct drbd_conf *mdev, union drbd_state mask, 559 + union drbd_state val, enum chg_state_flags f) 565 560 { 566 561 struct completion done; 567 562 unsigned long flags; 568 563 union drbd_state os, ns; 569 - int rv; 564 + enum drbd_state_rv rv; 570 565 571 566 init_completion(&done); 572 567 ··· 641 636 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE 642 637 * flag, or when logging of failed state change requests is not desired. 643 638 */ 644 - int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, 645 - union drbd_state val, enum chg_state_flags f) 639 + enum drbd_state_rv 640 + _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, 641 + union drbd_state val, enum chg_state_flags f) 646 642 { 647 - int rv; 643 + enum drbd_state_rv rv; 648 644 649 645 wait_event(mdev->state_wait, 650 646 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE); ··· 669 663 ); 670 664 } 671 665 672 - void print_st_err(struct drbd_conf *mdev, 673 - union drbd_state os, union drbd_state ns, int err) 666 + void print_st_err(struct drbd_conf *mdev, union drbd_state os, 667 + union drbd_state ns, enum drbd_state_rv err) 674 668 { 675 669 if (err == SS_IN_TRANSIENT_STATE) 676 670 return; ··· 680 674 } 681 675 682 676 683 - #define drbd_peer_str drbd_role_str 684 - #define drbd_pdsk_str drbd_disk_str 685 - 686 - #define drbd_susp_str(A) ((A) ? "1" : "0") 687 - #define drbd_aftr_isp_str(A) ((A) ? "1" : "0") 688 - #define drbd_peer_isp_str(A) ((A) ? "1" : "0") 689 - #define drbd_user_isp_str(A) ((A) ? "1" : "0") 690 - 691 - #define PSC(A) \ 692 - ({ if (ns.A != os.A) { \ 693 - pbp += sprintf(pbp, #A "( %s -> %s ) ", \ 694 - drbd_##A##_str(os.A), \ 695 - drbd_##A##_str(ns.A)); \ 696 - } }) 697 - 698 677 /** 699 678 * is_valid_state() - Returns an SS_ error code if ns is not valid 700 679 * @mdev: DRBD device. 701 680 * @ns: State to consider. 702 681 */ 703 - static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns) 682 + static enum drbd_state_rv 683 + is_valid_state(struct drbd_conf *mdev, union drbd_state ns) 704 684 { 705 685 /* See drbd_state_sw_errors in drbd_strings.c */ 706 686 707 687 enum drbd_fencing_p fp; 708 - int rv = SS_SUCCESS; 688 + enum drbd_state_rv rv = SS_SUCCESS; 709 689 710 690 fp = FP_DONT_CARE; 711 691 if (get_ldev(mdev)) { ··· 754 762 * @ns: new state. 755 763 * @os: old state. 756 764 */ 757 - static int is_valid_state_transition(struct drbd_conf *mdev, 758 - union drbd_state ns, union drbd_state os) 765 + static enum drbd_state_rv 766 + is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns, 767 + union drbd_state os) 759 768 { 760 - int rv = SS_SUCCESS; 769 + enum drbd_state_rv rv = SS_SUCCESS; 761 770 762 771 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) && 763 772 os.conn > C_CONNECTED) ··· 793 800 os.conn < C_CONNECTED) 794 801 rv = SS_NEED_CONNECTION; 795 802 803 + if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE) 804 + && os.conn < C_WF_REPORT_PARAMS) 805 + rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */ 806 + 796 807 return rv; 797 808 } 798 809 ··· 814 817 union drbd_state ns, const char **warn_sync_abort) 815 818 { 816 819 enum drbd_fencing_p fp; 820 + enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max; 817 821 818 822 fp = FP_DONT_CARE; 819 823 if (get_ldev(mdev)) { ··· 867 869 ns.conn = C_CONNECTED; 868 870 } 869 871 870 - if (ns.conn >= C_CONNECTED && 871 - ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) || 872 - (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) { 873 - switch (ns.conn) { 874 - case C_WF_BITMAP_T: 875 - case C_PAUSED_SYNC_T: 876 - ns.disk = D_OUTDATED; 877 - break; 878 - case C_CONNECTED: 879 - case C_WF_BITMAP_S: 880 - case C_SYNC_SOURCE: 881 - case C_PAUSED_SYNC_S: 882 - ns.disk = D_UP_TO_DATE; 883 - break; 884 - case C_SYNC_TARGET: 885 - ns.disk = D_INCONSISTENT; 886 - dev_warn(DEV, "Implicitly set disk state Inconsistent!\n"); 887 - break; 888 - } 889 - if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE) 890 - dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n"); 891 - } 892 - 893 - if (ns.conn >= C_CONNECTED && 894 - (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) { 895 - switch (ns.conn) { 896 - case C_CONNECTED: 897 - case C_WF_BITMAP_T: 898 - case C_PAUSED_SYNC_T: 899 - case C_SYNC_TARGET: 900 - ns.pdsk = D_UP_TO_DATE; 901 - break; 902 - case C_WF_BITMAP_S: 903 - case C_PAUSED_SYNC_S: 904 - /* remap any consistent state to D_OUTDATED, 905 - * but disallow "upgrade" of not even consistent states. 906 - */ 907 - ns.pdsk = 908 - (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED) 909 - ? os.pdsk : D_OUTDATED; 910 - break; 911 - case C_SYNC_SOURCE: 912 - ns.pdsk = D_INCONSISTENT; 913 - dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n"); 914 - break; 915 - } 916 - if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE) 917 - dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n"); 918 - } 919 - 920 872 /* Connection breaks down before we finished "Negotiating" */ 921 873 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING && 922 874 get_ldev_if_state(mdev, D_NEGOTIATING)) { ··· 879 931 ns.pdsk = D_UNKNOWN; 880 932 } 881 933 put_ldev(mdev); 934 + } 935 + 936 + /* D_CONSISTENT and D_OUTDATED vanish when we get connected */ 937 + if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) { 938 + if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) 939 + ns.disk = D_UP_TO_DATE; 940 + if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED) 941 + ns.pdsk = D_UP_TO_DATE; 942 + } 943 + 944 + /* Implications of the connection stat on the disk states */ 945 + disk_min = D_DISKLESS; 946 + disk_max = D_UP_TO_DATE; 947 + pdsk_min = D_INCONSISTENT; 948 + pdsk_max = D_UNKNOWN; 949 + switch ((enum drbd_conns)ns.conn) { 950 + case C_WF_BITMAP_T: 951 + case C_PAUSED_SYNC_T: 952 + case C_STARTING_SYNC_T: 953 + case C_WF_SYNC_UUID: 954 + case C_BEHIND: 955 + disk_min = D_INCONSISTENT; 956 + disk_max = D_OUTDATED; 957 + pdsk_min = D_UP_TO_DATE; 958 + pdsk_max = D_UP_TO_DATE; 959 + break; 960 + case C_VERIFY_S: 961 + case C_VERIFY_T: 962 + disk_min = D_UP_TO_DATE; 963 + disk_max = D_UP_TO_DATE; 964 + pdsk_min = D_UP_TO_DATE; 965 + pdsk_max = D_UP_TO_DATE; 966 + break; 967 + case C_CONNECTED: 968 + disk_min = D_DISKLESS; 969 + disk_max = D_UP_TO_DATE; 970 + pdsk_min = D_DISKLESS; 971 + pdsk_max = D_UP_TO_DATE; 972 + break; 973 + case C_WF_BITMAP_S: 974 + case C_PAUSED_SYNC_S: 975 + case C_STARTING_SYNC_S: 976 + case C_AHEAD: 977 + disk_min = D_UP_TO_DATE; 978 + disk_max = D_UP_TO_DATE; 979 + pdsk_min = D_INCONSISTENT; 980 + pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/ 981 + break; 982 + case C_SYNC_TARGET: 983 + disk_min = D_INCONSISTENT; 984 + disk_max = D_INCONSISTENT; 985 + pdsk_min = D_UP_TO_DATE; 986 + pdsk_max = D_UP_TO_DATE; 987 + break; 988 + case C_SYNC_SOURCE: 989 + disk_min = D_UP_TO_DATE; 990 + disk_max = D_UP_TO_DATE; 991 + pdsk_min = D_INCONSISTENT; 992 + pdsk_max = D_INCONSISTENT; 993 + break; 994 + case C_STANDALONE: 995 + case C_DISCONNECTING: 996 + case C_UNCONNECTED: 997 + case C_TIMEOUT: 998 + case C_BROKEN_PIPE: 999 + case C_NETWORK_FAILURE: 1000 + case C_PROTOCOL_ERROR: 1001 + case C_TEAR_DOWN: 1002 + case C_WF_CONNECTION: 1003 + case C_WF_REPORT_PARAMS: 1004 + case C_MASK: 1005 + break; 1006 + } 1007 + if (ns.disk > disk_max) 1008 + ns.disk = disk_max; 1009 + 1010 + if (ns.disk < disk_min) { 1011 + dev_warn(DEV, "Implicitly set disk from %s to %s\n", 1012 + drbd_disk_str(ns.disk), drbd_disk_str(disk_min)); 1013 + ns.disk = disk_min; 1014 + } 1015 + if (ns.pdsk > pdsk_max) 1016 + ns.pdsk = pdsk_max; 1017 + 1018 + if (ns.pdsk < pdsk_min) { 1019 + dev_warn(DEV, "Implicitly set pdsk from %s to %s\n", 1020 + drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min)); 1021 + ns.pdsk = pdsk_min; 882 1022 } 883 1023 884 1024 if (fp == FP_STONITH && ··· 997 961 /* helper for __drbd_set_state */ 998 962 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) 999 963 { 964 + if (mdev->agreed_pro_version < 90) 965 + mdev->ov_start_sector = 0; 966 + mdev->rs_total = drbd_bm_bits(mdev); 967 + mdev->ov_position = 0; 1000 968 if (cs == C_VERIFY_T) { 1001 969 /* starting online verify from an arbitrary position 1002 970 * does not fit well into the existing protocol. ··· 1010 970 mdev->ov_start_sector = ~(sector_t)0; 1011 971 } else { 1012 972 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector); 1013 - if (bit >= mdev->rs_total) 973 + if (bit >= mdev->rs_total) { 1014 974 mdev->ov_start_sector = 1015 975 BM_BIT_TO_SECT(mdev->rs_total - 1); 976 + mdev->rs_total = 1; 977 + } else 978 + mdev->rs_total -= bit; 1016 979 mdev->ov_position = mdev->ov_start_sector; 1017 980 } 981 + mdev->ov_left = mdev->rs_total; 1018 982 } 1019 983 1020 984 static void drbd_resume_al(struct drbd_conf *mdev) ··· 1036 992 * 1037 993 * Caller needs to hold req_lock, and global_state_lock. Do not call directly. 1038 994 */ 1039 - int __drbd_set_state(struct drbd_conf *mdev, 1040 - union drbd_state ns, enum chg_state_flags flags, 1041 - struct completion *done) 995 + enum drbd_state_rv 996 + __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, 997 + enum chg_state_flags flags, struct completion *done) 1042 998 { 1043 999 union drbd_state os; 1044 - int rv = SS_SUCCESS; 1000 + enum drbd_state_rv rv = SS_SUCCESS; 1045 1001 const char *warn_sync_abort = NULL; 1046 1002 struct after_state_chg_work *ascw; 1047 1003 ··· 1077 1033 dev_warn(DEV, "%s aborted.\n", warn_sync_abort); 1078 1034 1079 1035 { 1080 - char *pbp, pb[300]; 1081 - pbp = pb; 1082 - *pbp = 0; 1083 - PSC(role); 1084 - PSC(peer); 1085 - PSC(conn); 1086 - PSC(disk); 1087 - PSC(pdsk); 1088 - if (is_susp(ns) != is_susp(os)) 1089 - pbp += sprintf(pbp, "susp( %s -> %s ) ", 1090 - drbd_susp_str(is_susp(os)), 1091 - drbd_susp_str(is_susp(ns))); 1092 - PSC(aftr_isp); 1093 - PSC(peer_isp); 1094 - PSC(user_isp); 1095 - dev_info(DEV, "%s\n", pb); 1036 + char *pbp, pb[300]; 1037 + pbp = pb; 1038 + *pbp = 0; 1039 + if (ns.role != os.role) 1040 + pbp += sprintf(pbp, "role( %s -> %s ) ", 1041 + drbd_role_str(os.role), 1042 + drbd_role_str(ns.role)); 1043 + if (ns.peer != os.peer) 1044 + pbp += sprintf(pbp, "peer( %s -> %s ) ", 1045 + drbd_role_str(os.peer), 1046 + drbd_role_str(ns.peer)); 1047 + if (ns.conn != os.conn) 1048 + pbp += sprintf(pbp, "conn( %s -> %s ) ", 1049 + drbd_conn_str(os.conn), 1050 + drbd_conn_str(ns.conn)); 1051 + if (ns.disk != os.disk) 1052 + pbp += sprintf(pbp, "disk( %s -> %s ) ", 1053 + drbd_disk_str(os.disk), 1054 + drbd_disk_str(ns.disk)); 1055 + if (ns.pdsk != os.pdsk) 1056 + pbp += sprintf(pbp, "pdsk( %s -> %s ) ", 1057 + drbd_disk_str(os.pdsk), 1058 + drbd_disk_str(ns.pdsk)); 1059 + if (is_susp(ns) != is_susp(os)) 1060 + pbp += sprintf(pbp, "susp( %d -> %d ) ", 1061 + is_susp(os), 1062 + is_susp(ns)); 1063 + if (ns.aftr_isp != os.aftr_isp) 1064 + pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ", 1065 + os.aftr_isp, 1066 + ns.aftr_isp); 1067 + if (ns.peer_isp != os.peer_isp) 1068 + pbp += sprintf(pbp, "peer_isp( %d -> %d ) ", 1069 + os.peer_isp, 1070 + ns.peer_isp); 1071 + if (ns.user_isp != os.user_isp) 1072 + pbp += sprintf(pbp, "user_isp( %d -> %d ) ", 1073 + os.user_isp, 1074 + ns.user_isp); 1075 + dev_info(DEV, "%s\n", pb); 1096 1076 } 1097 1077 1098 1078 /* solve the race between becoming unconfigured, ··· 1142 1074 atomic_inc(&mdev->local_cnt); 1143 1075 1144 1076 mdev->state = ns; 1077 + 1078 + if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) 1079 + drbd_print_uuids(mdev, "attached to UUIDs"); 1080 + 1145 1081 wake_up(&mdev->misc_wait); 1146 1082 wake_up(&mdev->state_wait); 1147 1083 ··· 1153 1081 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) && 1154 1082 ns.conn < C_CONNECTED) { 1155 1083 mdev->ov_start_sector = 1156 - BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left); 1084 + BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left); 1157 1085 dev_info(DEV, "Online Verify reached sector %llu\n", 1158 1086 (unsigned long long)mdev->ov_start_sector); 1159 1087 } ··· 1178 1106 unsigned long now = jiffies; 1179 1107 int i; 1180 1108 1181 - mdev->ov_position = 0; 1182 - mdev->rs_total = drbd_bm_bits(mdev); 1183 - if (mdev->agreed_pro_version >= 90) 1184 - set_ov_position(mdev, ns.conn); 1185 - else 1186 - mdev->ov_start_sector = 0; 1187 - mdev->ov_left = mdev->rs_total 1188 - - BM_SECT_TO_BIT(mdev->ov_position); 1109 + set_ov_position(mdev, ns.conn); 1189 1110 mdev->rs_start = now; 1190 1111 mdev->rs_last_events = 0; 1191 1112 mdev->rs_last_sect_ev = 0; ··· 1186 1121 mdev->ov_last_oos_start = 0; 1187 1122 1188 1123 for (i = 0; i < DRBD_SYNC_MARKS; i++) { 1189 - mdev->rs_mark_left[i] = mdev->rs_total; 1124 + mdev->rs_mark_left[i] = mdev->ov_left; 1190 1125 mdev->rs_mark_time[i] = now; 1191 1126 } 1127 + 1128 + drbd_rs_controller_reset(mdev); 1192 1129 1193 1130 if (ns.conn == C_VERIFY_S) { 1194 1131 dev_info(DEV, "Starting Online Verify from sector %llu\n", ··· 1295 1228 } 1296 1229 } 1297 1230 1231 + int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, 1232 + int (*io_fn)(struct drbd_conf *), 1233 + char *why, enum bm_flag flags) 1234 + { 1235 + int rv; 1236 + 1237 + D_ASSERT(current == mdev->worker.task); 1238 + 1239 + /* open coded non-blocking drbd_suspend_io(mdev); */ 1240 + set_bit(SUSPEND_IO, &mdev->flags); 1241 + 1242 + drbd_bm_lock(mdev, why, flags); 1243 + rv = io_fn(mdev); 1244 + drbd_bm_unlock(mdev); 1245 + 1246 + drbd_resume_io(mdev); 1247 + 1248 + return rv; 1249 + } 1250 + 1298 1251 /** 1299 1252 * after_state_ch() - Perform after state change actions that may sleep 1300 1253 * @mdev: DRBD device. ··· 1353 1266 1354 1267 nsm.i = -1; 1355 1268 if (ns.susp_nod) { 1356 - if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { 1357 - if (ns.conn == C_CONNECTED) 1358 - what = resend, nsm.susp_nod = 0; 1359 - else /* ns.conn > C_CONNECTED */ 1360 - dev_err(DEV, "Unexpected Resynd going on!\n"); 1361 - } 1269 + if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) 1270 + what = resend; 1362 1271 1363 1272 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) 1364 - what = restart_frozen_disk_io, nsm.susp_nod = 0; 1273 + what = restart_frozen_disk_io; 1365 1274 1275 + if (what != nothing) 1276 + nsm.susp_nod = 0; 1366 1277 } 1367 1278 1368 1279 if (ns.susp_fen) { ··· 1391 1306 spin_unlock_irq(&mdev->req_lock); 1392 1307 } 1393 1308 1309 + /* Became sync source. With protocol >= 96, we still need to send out 1310 + * the sync uuid now. Need to do that before any drbd_send_state, or 1311 + * the other side may go "paused sync" before receiving the sync uuids, 1312 + * which is unexpected. */ 1313 + if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && 1314 + (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && 1315 + mdev->agreed_pro_version >= 96 && get_ldev(mdev)) { 1316 + drbd_gen_and_send_sync_uuid(mdev); 1317 + put_ldev(mdev); 1318 + } 1319 + 1394 1320 /* Do not change the order of the if above and the two below... */ 1395 1321 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ 1396 1322 drbd_send_uuids(mdev); 1397 1323 drbd_send_state(mdev); 1398 1324 } 1399 - if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S) 1400 - drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)"); 1325 + /* No point in queuing send_bitmap if we don't have a connection 1326 + * anymore, so check also the _current_ state, not only the new state 1327 + * at the time this work was queued. */ 1328 + if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S && 1329 + mdev->state.conn == C_WF_BITMAP_S) 1330 + drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, 1331 + "send_bitmap (WFBitMapS)", 1332 + BM_LOCKED_TEST_ALLOWED); 1401 1333 1402 1334 /* Lost contact to peer's copy of the data */ 1403 1335 if ((os.pdsk >= D_INCONSISTENT && ··· 1445 1343 1446 1344 /* D_DISKLESS Peer becomes secondary */ 1447 1345 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) 1448 - drbd_al_to_on_disk_bm(mdev); 1346 + /* We may still be Primary ourselves. 1347 + * No harm done if the bitmap still changes, 1348 + * redirtied pages will follow later. */ 1349 + drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, 1350 + "demote diskless peer", BM_LOCKED_SET_ALLOWED); 1351 + put_ldev(mdev); 1352 + } 1353 + 1354 + /* Write out all changed bits on demote. 1355 + * Though, no need to da that just yet 1356 + * if there is a resync going on still */ 1357 + if (os.role == R_PRIMARY && ns.role == R_SECONDARY && 1358 + mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) { 1359 + /* No changes to the bitmap expected this time, so assert that, 1360 + * even though no harm was done if it did change. */ 1361 + drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, 1362 + "demote", BM_LOCKED_TEST_ALLOWED); 1449 1363 put_ldev(mdev); 1450 1364 } 1451 1365 ··· 1489 1371 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) 1490 1372 drbd_send_state(mdev); 1491 1373 1374 + if (os.conn != C_AHEAD && ns.conn == C_AHEAD) 1375 + drbd_send_state(mdev); 1376 + 1492 1377 /* We are in the progress to start a full sync... */ 1493 1378 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || 1494 1379 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S)) 1495 - drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync"); 1380 + /* no other bitmap changes expected during this phase */ 1381 + drbd_queue_bitmap_io(mdev, 1382 + &drbd_bmio_set_n_write, &abw_start_sync, 1383 + "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED); 1496 1384 1497 1385 /* We are invalidating our self... */ 1498 1386 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED && 1499 1387 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) 1500 - drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); 1388 + /* other bitmap operation expected during this phase */ 1389 + drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, 1390 + "set_n_write from invalidate", BM_LOCKED_MASK); 1501 1391 1502 1392 /* first half of local IO error, failure to attach, 1503 1393 * or administrative detach */ ··· 1560 1434 1561 1435 if (drbd_send_state(mdev)) 1562 1436 dev_warn(DEV, "Notified peer that I'm now diskless.\n"); 1563 - else 1564 - dev_err(DEV, "Sending state for being diskless failed\n"); 1565 1437 /* corresponding get_ldev in __drbd_set_state 1566 1438 * this may finaly trigger drbd_ldev_destroy. */ 1567 1439 put_ldev(mdev); ··· 1582 1458 * it should (at least for non-empty resyncs) already know itself. */ 1583 1459 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) 1584 1460 drbd_send_state(mdev); 1461 + 1462 + /* This triggers bitmap writeout of potentially still unwritten pages 1463 + * if the resync finished cleanly, or aborted because of peer disk 1464 + * failure, or because of connection loss. 1465 + * For resync aborted because of local disk failure, we cannot do 1466 + * any bitmap writeout anymore. 1467 + * No harm done if some bits change during this phase. 1468 + */ 1469 + if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) { 1470 + drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, 1471 + "write from resync_finished", BM_LOCKED_SET_ALLOWED); 1472 + put_ldev(mdev); 1473 + } 1585 1474 1586 1475 /* free tl_hash if we Got thawed and are C_STANDALONE */ 1587 1476 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash) ··· 1696 1559 if (!try_module_get(THIS_MODULE)) { 1697 1560 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n"); 1698 1561 spin_unlock_irqrestore(&thi->t_lock, flags); 1699 - return FALSE; 1562 + return false; 1700 1563 } 1701 1564 1702 1565 init_completion(&thi->stop); ··· 1713 1576 dev_err(DEV, "Couldn't start thread\n"); 1714 1577 1715 1578 module_put(THIS_MODULE); 1716 - return FALSE; 1579 + return false; 1717 1580 } 1718 1581 spin_lock_irqsave(&thi->t_lock, flags); 1719 1582 thi->task = nt; ··· 1733 1596 break; 1734 1597 } 1735 1598 1736 - return TRUE; 1599 + return true; 1737 1600 } 1738 1601 1739 1602 ··· 1831 1694 { 1832 1695 int sent, ok; 1833 1696 1834 - ERR_IF(!h) return FALSE; 1835 - ERR_IF(!size) return FALSE; 1697 + ERR_IF(!h) return false; 1698 + ERR_IF(!size) return false; 1836 1699 1837 1700 h->magic = BE_DRBD_MAGIC; 1838 1701 h->command = cpu_to_be16(cmd); ··· 1841 1704 sent = drbd_send(mdev, sock, h, size, msg_flags); 1842 1705 1843 1706 ok = (sent == size); 1844 - if (!ok) 1845 - dev_err(DEV, "short sent %s size=%d sent=%d\n", 1707 + if (!ok && !signal_pending(current)) 1708 + dev_warn(DEV, "short sent %s size=%d sent=%d\n", 1846 1709 cmdname(cmd), (int)size, sent); 1847 1710 return ok; 1848 1711 } ··· 1977 1840 else { 1978 1841 dev_err(DEV, "--dry-run is not supported by peer"); 1979 1842 kfree(p); 1980 - return 0; 1843 + return -1; 1981 1844 } 1982 1845 } 1983 1846 p->conn_flags = cpu_to_be32(cf); ··· 2025 1888 return _drbd_send_uuids(mdev, 8); 2026 1889 } 2027 1890 1891 + void drbd_print_uuids(struct drbd_conf *mdev, const char *text) 1892 + { 1893 + if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 1894 + u64 *uuid = mdev->ldev->md.uuid; 1895 + dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n", 1896 + text, 1897 + (unsigned long long)uuid[UI_CURRENT], 1898 + (unsigned long long)uuid[UI_BITMAP], 1899 + (unsigned long long)uuid[UI_HISTORY_START], 1900 + (unsigned long long)uuid[UI_HISTORY_END]); 1901 + put_ldev(mdev); 1902 + } else { 1903 + dev_info(DEV, "%s effective data uuid: %016llX\n", 1904 + text, 1905 + (unsigned long long)mdev->ed_uuid); 1906 + } 1907 + } 2028 1908 2029 - int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val) 1909 + int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) 2030 1910 { 2031 1911 struct p_rs_uuid p; 1912 + u64 uuid; 2032 1913 2033 - p.uuid = cpu_to_be64(val); 1914 + D_ASSERT(mdev->state.disk == D_UP_TO_DATE); 1915 + 1916 + uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET; 1917 + drbd_uuid_set(mdev, UI_BITMAP, uuid); 1918 + drbd_print_uuids(mdev, "updated sync UUID"); 1919 + drbd_md_sync(mdev); 1920 + p.uuid = cpu_to_be64(uuid); 2034 1921 2035 1922 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, 2036 1923 (struct p_header80 *)&p, sizeof(p)); ··· 2082 1921 p.d_size = cpu_to_be64(d_size); 2083 1922 p.u_size = cpu_to_be64(u_size); 2084 1923 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); 2085 - p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue)); 1924 + p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9); 2086 1925 p.queue_order_type = cpu_to_be16(q_order_type); 2087 1926 p.dds_flags = cpu_to_be16(flags); 2088 1927 ··· 2133 1972 (struct p_header80 *)&p, sizeof(p)); 2134 1973 } 2135 1974 2136 - int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode) 1975 + int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) 2137 1976 { 2138 1977 struct p_req_state_reply p; 2139 1978 ··· 2237 2076 return len; 2238 2077 } 2239 2078 2240 - enum { OK, FAILED, DONE } 2079 + /** 2080 + * send_bitmap_rle_or_plain 2081 + * 2082 + * Return 0 when done, 1 when another iteration is needed, and a negative error 2083 + * code upon failure. 2084 + */ 2085 + static int 2241 2086 send_bitmap_rle_or_plain(struct drbd_conf *mdev, 2242 - struct p_header80 *h, struct bm_xfer_ctx *c) 2087 + struct p_header80 *h, struct bm_xfer_ctx *c) 2243 2088 { 2244 2089 struct p_compressed_bm *p = (void*)h; 2245 2090 unsigned long num_words; ··· 2255 2088 len = fill_bitmap_rle_bits(mdev, p, c); 2256 2089 2257 2090 if (len < 0) 2258 - return FAILED; 2091 + return -EIO; 2259 2092 2260 2093 if (len) { 2261 2094 DCBP_set_code(p, RLE_VLI_Bits); ··· 2285 2118 if (c->bit_offset > c->bm_bits) 2286 2119 c->bit_offset = c->bm_bits; 2287 2120 } 2288 - ok = ok ? ((len == 0) ? DONE : OK) : FAILED; 2289 - 2290 - if (ok == DONE) 2291 - INFO_bm_xfer_stats(mdev, "send", c); 2292 - return ok; 2121 + if (ok) { 2122 + if (len == 0) { 2123 + INFO_bm_xfer_stats(mdev, "send", c); 2124 + return 0; 2125 + } else 2126 + return 1; 2127 + } 2128 + return -EIO; 2293 2129 } 2294 2130 2295 2131 /* See the comment at receive_bitmap() */ ··· 2300 2130 { 2301 2131 struct bm_xfer_ctx c; 2302 2132 struct p_header80 *p; 2303 - int ret; 2133 + int err; 2304 2134 2305 - ERR_IF(!mdev->bitmap) return FALSE; 2135 + ERR_IF(!mdev->bitmap) return false; 2306 2136 2307 2137 /* maybe we should use some per thread scratch page, 2308 2138 * and allocate that during initial device creation? */ 2309 2139 p = (struct p_header80 *) __get_free_page(GFP_NOIO); 2310 2140 if (!p) { 2311 2141 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); 2312 - return FALSE; 2142 + return false; 2313 2143 } 2314 2144 2315 2145 if (get_ldev(mdev)) { ··· 2335 2165 }; 2336 2166 2337 2167 do { 2338 - ret = send_bitmap_rle_or_plain(mdev, p, &c); 2339 - } while (ret == OK); 2168 + err = send_bitmap_rle_or_plain(mdev, p, &c); 2169 + } while (err > 0); 2340 2170 2341 2171 free_page((unsigned long) p); 2342 - return (ret == DONE); 2172 + return err == 0; 2343 2173 } 2344 2174 2345 2175 int drbd_send_bitmap(struct drbd_conf *mdev) ··· 2362 2192 p.set_size = cpu_to_be32(set_size); 2363 2193 2364 2194 if (mdev->state.conn < C_CONNECTED) 2365 - return FALSE; 2195 + return false; 2366 2196 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, 2367 2197 (struct p_header80 *)&p, sizeof(p)); 2368 2198 return ok; ··· 2390 2220 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); 2391 2221 2392 2222 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) 2393 - return FALSE; 2223 + return false; 2394 2224 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, 2395 2225 (struct p_header80 *)&p, sizeof(p)); 2396 2226 return ok; ··· 2496 2326 } 2497 2327 2498 2328 /* called on sndtimeo 2499 - * returns FALSE if we should retry, 2500 - * TRUE if we think connection is dead 2329 + * returns false if we should retry, 2330 + * true if we think connection is dead 2501 2331 */ 2502 2332 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock) 2503 2333 { ··· 2510 2340 || mdev->state.conn < C_CONNECTED; 2511 2341 2512 2342 if (drop_it) 2513 - return TRUE; 2343 + return true; 2514 2344 2515 2345 drop_it = !--mdev->ko_count; 2516 2346 if (!drop_it) { ··· 2701 2531 if (ok && dgs) { 2702 2532 dgb = mdev->int_dig_out; 2703 2533 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); 2704 - ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); 2534 + ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); 2705 2535 } 2706 2536 if (ok) { 2707 - if (mdev->net_conf->wire_protocol == DRBD_PROT_A) 2537 + /* For protocol A, we have to memcpy the payload into 2538 + * socket buffers, as we may complete right away 2539 + * as soon as we handed it over to tcp, at which point the data 2540 + * pages may become invalid. 2541 + * 2542 + * For data-integrity enabled, we copy it as well, so we can be 2543 + * sure that even if the bio pages may still be modified, it 2544 + * won't change the data on the wire, thus if the digest checks 2545 + * out ok after sending on this side, but does not fit on the 2546 + * receiving side, we sure have detected corruption elsewhere. 2547 + */ 2548 + if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs) 2708 2549 ok = _drbd_send_bio(mdev, req->master_bio); 2709 2550 else 2710 2551 ok = _drbd_send_zc_bio(mdev, req->master_bio); 2552 + 2553 + /* double check digest, sometimes buffers have been modified in flight. */ 2554 + if (dgs > 0 && dgs <= 64) { 2555 + /* 64 byte, 512 bit, is the larges digest size 2556 + * currently supported in kernel crypto. */ 2557 + unsigned char digest[64]; 2558 + drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest); 2559 + if (memcmp(mdev->int_dig_out, digest, dgs)) { 2560 + dev_warn(DEV, 2561 + "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", 2562 + (unsigned long long)req->sector, req->size); 2563 + } 2564 + } /* else if (dgs > 64) { 2565 + ... Be noisy about digest too large ... 2566 + } */ 2711 2567 } 2712 2568 2713 2569 drbd_put_data_sock(mdev); ··· 2783 2587 if (ok && dgs) { 2784 2588 dgb = mdev->int_dig_out; 2785 2589 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); 2786 - ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); 2590 + ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0); 2787 2591 } 2788 2592 if (ok) 2789 2593 ok = _drbd_send_zc_ee(mdev, e); ··· 2791 2595 drbd_put_data_sock(mdev); 2792 2596 2793 2597 return ok; 2598 + } 2599 + 2600 + int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req) 2601 + { 2602 + struct p_block_desc p; 2603 + 2604 + p.sector = cpu_to_be64(req->sector); 2605 + p.blksize = cpu_to_be32(req->size); 2606 + 2607 + return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p)); 2794 2608 } 2795 2609 2796 2610 /* ··· 2976 2770 atomic_set(&mdev->pp_in_use_by_net, 0); 2977 2771 atomic_set(&mdev->rs_sect_in, 0); 2978 2772 atomic_set(&mdev->rs_sect_ev, 0); 2773 + atomic_set(&mdev->ap_in_flight, 0); 2979 2774 2980 2775 mutex_init(&mdev->md_io_mutex); 2981 2776 mutex_init(&mdev->data.mutex); ··· 3005 2798 INIT_LIST_HEAD(&mdev->unplug_work.list); 3006 2799 INIT_LIST_HEAD(&mdev->go_diskless.list); 3007 2800 INIT_LIST_HEAD(&mdev->md_sync_work.list); 2801 + INIT_LIST_HEAD(&mdev->start_resync_work.list); 3008 2802 INIT_LIST_HEAD(&mdev->bm_io_work.w.list); 3009 2803 3010 - mdev->resync_work.cb = w_resync_inactive; 2804 + mdev->resync_work.cb = w_resync_timer; 3011 2805 mdev->unplug_work.cb = w_send_write_hint; 3012 2806 mdev->go_diskless.cb = w_go_diskless; 3013 2807 mdev->md_sync_work.cb = w_md_sync; 3014 2808 mdev->bm_io_work.w.cb = w_bitmap_io; 2809 + mdev->start_resync_work.cb = w_start_resync; 3015 2810 init_timer(&mdev->resync_timer); 3016 2811 init_timer(&mdev->md_sync_timer); 2812 + init_timer(&mdev->start_resync_timer); 2813 + init_timer(&mdev->request_timer); 3017 2814 mdev->resync_timer.function = resync_timer_fn; 3018 2815 mdev->resync_timer.data = (unsigned long) mdev; 3019 2816 mdev->md_sync_timer.function = md_sync_timer_fn; 3020 2817 mdev->md_sync_timer.data = (unsigned long) mdev; 2818 + mdev->start_resync_timer.function = start_resync_timer_fn; 2819 + mdev->start_resync_timer.data = (unsigned long) mdev; 2820 + mdev->request_timer.function = request_timer_fn; 2821 + mdev->request_timer.data = (unsigned long) mdev; 3021 2822 3022 2823 init_waitqueue_head(&mdev->misc_wait); 3023 2824 init_waitqueue_head(&mdev->state_wait); ··· 3096 2881 D_ASSERT(list_empty(&mdev->resync_work.list)); 3097 2882 D_ASSERT(list_empty(&mdev->unplug_work.list)); 3098 2883 D_ASSERT(list_empty(&mdev->go_diskless.list)); 2884 + 2885 + drbd_set_defaults(mdev); 3099 2886 } 3100 2887 3101 2888 ··· 3140 2923 static int drbd_create_mempools(void) 3141 2924 { 3142 2925 struct page *page; 3143 - const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count; 2926 + const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; 3144 2927 int i; 3145 2928 3146 2929 /* prepare our caches and mempools */ ··· 3304 3087 3305 3088 unregister_reboot_notifier(&drbd_notifier); 3306 3089 3090 + /* first remove proc, 3091 + * drbdsetup uses it's presence to detect 3092 + * whether DRBD is loaded. 3093 + * If we would get stuck in proc removal, 3094 + * but have netlink already deregistered, 3095 + * some drbdsetup commands may wait forever 3096 + * for an answer. 3097 + */ 3098 + if (drbd_proc) 3099 + remove_proc_entry("drbd", NULL); 3100 + 3307 3101 drbd_nl_cleanup(); 3308 3102 3309 3103 if (minor_table) { 3310 - if (drbd_proc) 3311 - remove_proc_entry("drbd", NULL); 3312 3104 i = minor_count; 3313 3105 while (i--) 3314 3106 drbd_delete_device(i); ··· 3345 3119 char reason = '-'; 3346 3120 int r = 0; 3347 3121 3348 - if (!__inc_ap_bio_cond(mdev)) { 3122 + if (!may_inc_ap_bio(mdev)) { 3349 3123 /* DRBD has frozen IO */ 3350 3124 r = bdi_bits; 3351 3125 reason = 'd'; ··· 3398 3172 goto out_no_disk; 3399 3173 mdev->vdisk = disk; 3400 3174 3401 - set_disk_ro(disk, TRUE); 3175 + set_disk_ro(disk, true); 3402 3176 3403 3177 disk->queue = q; 3404 3178 disk->major = DRBD_MAJOR; ··· 3414 3188 q->backing_dev_info.congested_fn = drbd_congested; 3415 3189 q->backing_dev_info.congested_data = mdev; 3416 3190 3417 - blk_queue_make_request(q, drbd_make_request_26); 3418 - blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); 3191 + blk_queue_make_request(q, drbd_make_request); 3192 + blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9); 3419 3193 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3420 3194 blk_queue_merge_bvec(q, drbd_merge_bvec); 3421 3195 q->queue_lock = &mdev->req_lock; ··· 3477 3251 put_disk(mdev->vdisk); 3478 3252 blk_cleanup_queue(mdev->rq_queue); 3479 3253 free_cpumask_var(mdev->cpu_mask); 3254 + drbd_free_tl_hash(mdev); 3480 3255 kfree(mdev); 3481 3256 } 3482 3257 ··· 3493 3266 return -EINVAL; 3494 3267 } 3495 3268 3496 - if (1 > minor_count || minor_count > 255) { 3269 + if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { 3497 3270 printk(KERN_ERR 3498 3271 "drbd: invalid minor_count (%d)\n", minor_count); 3499 3272 #ifdef MODULE ··· 3675 3448 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { 3676 3449 /* this was a try anyways ... */ 3677 3450 dev_err(DEV, "meta data update failed!\n"); 3678 - drbd_chk_io_error(mdev, 1, TRUE); 3451 + drbd_chk_io_error(mdev, 1, true); 3679 3452 } 3680 3453 3681 3454 /* Update mdev->ldev->md.la_size_sect, ··· 3691 3464 * @mdev: DRBD device. 3692 3465 * @bdev: Device from which the meta data should be read in. 3693 3466 * 3694 - * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case 3467 + * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case 3695 3468 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID. 3696 3469 */ 3697 3470 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) ··· 3761 3534 return rv; 3762 3535 } 3763 3536 3764 - static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index) 3765 - { 3766 - static char *uuid_str[UI_EXTENDED_SIZE] = { 3767 - [UI_CURRENT] = "CURRENT", 3768 - [UI_BITMAP] = "BITMAP", 3769 - [UI_HISTORY_START] = "HISTORY_START", 3770 - [UI_HISTORY_END] = "HISTORY_END", 3771 - [UI_SIZE] = "SIZE", 3772 - [UI_FLAGS] = "FLAGS", 3773 - }; 3774 - 3775 - if (index >= UI_EXTENDED_SIZE) { 3776 - dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n"); 3777 - return; 3778 - } 3779 - 3780 - dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n", 3781 - uuid_str[index], 3782 - (unsigned long long)mdev->ldev->md.uuid[index]); 3783 - } 3784 - 3785 - 3786 3537 /** 3787 3538 * drbd_md_mark_dirty() - Mark meta data super block as dirty 3788 3539 * @mdev: DRBD device. ··· 3790 3585 { 3791 3586 int i; 3792 3587 3793 - for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) { 3588 + for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) 3794 3589 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; 3795 - debug_drbd_uuid(mdev, i+1); 3796 - } 3797 3590 } 3798 3591 3799 3592 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) ··· 3806 3603 } 3807 3604 3808 3605 mdev->ldev->md.uuid[idx] = val; 3809 - debug_drbd_uuid(mdev, idx); 3810 3606 drbd_md_mark_dirty(mdev); 3811 3607 } 3812 3608 ··· 3815 3613 if (mdev->ldev->md.uuid[idx]) { 3816 3614 drbd_uuid_move_history(mdev); 3817 3615 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; 3818 - debug_drbd_uuid(mdev, UI_HISTORY_START); 3819 3616 } 3820 3617 _drbd_uuid_set(mdev, idx, val); 3821 3618 } ··· 3829 3628 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) 3830 3629 { 3831 3630 u64 val; 3631 + unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; 3832 3632 3833 - dev_info(DEV, "Creating new current UUID\n"); 3834 - D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0); 3633 + if (bm_uuid) 3634 + dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); 3635 + 3835 3636 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; 3836 - debug_drbd_uuid(mdev, UI_BITMAP); 3837 3637 3838 3638 get_random_bytes(&val, sizeof(u64)); 3839 3639 _drbd_uuid_set(mdev, UI_CURRENT, val); 3640 + drbd_print_uuids(mdev, "new current UUID"); 3840 3641 /* get it to stable storage _now_ */ 3841 3642 drbd_md_sync(mdev); 3842 3643 } ··· 3852 3649 drbd_uuid_move_history(mdev); 3853 3650 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; 3854 3651 mdev->ldev->md.uuid[UI_BITMAP] = 0; 3855 - debug_drbd_uuid(mdev, UI_HISTORY_START); 3856 - debug_drbd_uuid(mdev, UI_BITMAP); 3857 3652 } else { 3858 - if (mdev->ldev->md.uuid[UI_BITMAP]) 3859 - dev_warn(DEV, "bm UUID already set"); 3653 + unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; 3654 + if (bm_uuid) 3655 + dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); 3860 3656 3861 - mdev->ldev->md.uuid[UI_BITMAP] = val; 3862 - mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1); 3863 - 3864 - debug_drbd_uuid(mdev, UI_BITMAP); 3657 + mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); 3865 3658 } 3866 3659 drbd_md_mark_dirty(mdev); 3867 3660 } ··· 3913 3714 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) 3914 3715 { 3915 3716 struct bm_io_work *work = container_of(w, struct bm_io_work, w); 3916 - int rv; 3717 + int rv = -EIO; 3917 3718 3918 3719 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); 3919 3720 3920 - drbd_bm_lock(mdev, work->why); 3921 - rv = work->io_fn(mdev); 3922 - drbd_bm_unlock(mdev); 3721 + if (get_ldev(mdev)) { 3722 + drbd_bm_lock(mdev, work->why, work->flags); 3723 + rv = work->io_fn(mdev); 3724 + drbd_bm_unlock(mdev); 3725 + put_ldev(mdev); 3726 + } 3923 3727 3924 3728 clear_bit(BITMAP_IO, &mdev->flags); 3729 + smp_mb__after_clear_bit(); 3925 3730 wake_up(&mdev->misc_wait); 3926 3731 3927 3732 if (work->done) ··· 3933 3730 3934 3731 clear_bit(BITMAP_IO_QUEUED, &mdev->flags); 3935 3732 work->why = NULL; 3733 + work->flags = 0; 3936 3734 3937 3735 return 1; 3938 3736 } ··· 3988 3784 void drbd_queue_bitmap_io(struct drbd_conf *mdev, 3989 3785 int (*io_fn)(struct drbd_conf *), 3990 3786 void (*done)(struct drbd_conf *, int), 3991 - char *why) 3787 + char *why, enum bm_flag flags) 3992 3788 { 3993 3789 D_ASSERT(current == mdev->worker.task); 3994 3790 ··· 4002 3798 mdev->bm_io_work.io_fn = io_fn; 4003 3799 mdev->bm_io_work.done = done; 4004 3800 mdev->bm_io_work.why = why; 3801 + mdev->bm_io_work.flags = flags; 4005 3802 3803 + spin_lock_irq(&mdev->req_lock); 4006 3804 set_bit(BITMAP_IO, &mdev->flags); 4007 3805 if (atomic_read(&mdev->ap_bio_cnt) == 0) { 4008 - if (list_empty(&mdev->bm_io_work.w.list)) { 4009 - set_bit(BITMAP_IO_QUEUED, &mdev->flags); 3806 + if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 4010 3807 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); 4011 - } else 4012 - dev_err(DEV, "FIXME avoided double queuing bm_io_work\n"); 4013 3808 } 3809 + spin_unlock_irq(&mdev->req_lock); 4014 3810 } 4015 3811 4016 3812 /** ··· 4022 3818 * freezes application IO while that the actual IO operations runs. This 4023 3819 * functions MAY NOT be called from worker context. 4024 3820 */ 4025 - int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why) 3821 + int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), 3822 + char *why, enum bm_flag flags) 4026 3823 { 4027 3824 int rv; 4028 3825 4029 3826 D_ASSERT(current != mdev->worker.task); 4030 3827 4031 - drbd_suspend_io(mdev); 3828 + if ((flags & BM_LOCKED_SET_ALLOWED) == 0) 3829 + drbd_suspend_io(mdev); 4032 3830 4033 - drbd_bm_lock(mdev, why); 3831 + drbd_bm_lock(mdev, why, flags); 4034 3832 rv = io_fn(mdev); 4035 3833 drbd_bm_unlock(mdev); 4036 3834 4037 - drbd_resume_io(mdev); 3835 + if ((flags & BM_LOCKED_SET_ALLOWED) == 0) 3836 + drbd_resume_io(mdev); 4038 3837 4039 3838 return rv; 4040 3839 }
+124 -59
drivers/block/drbd/drbd_nl.c
··· 288 288 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); 289 289 } 290 290 291 - int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 291 + enum drbd_state_rv 292 + drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 292 293 { 293 294 const int max_tries = 4; 294 - int r = 0; 295 + enum drbd_state_rv rv = SS_UNKNOWN_ERROR; 295 296 int try = 0; 296 297 int forced = 0; 297 298 union drbd_state mask, val; ··· 307 306 val.i = 0; val.role = new_role; 308 307 309 308 while (try++ < max_tries) { 310 - r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 309 + rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 311 310 312 311 /* in case we first succeeded to outdate, 313 312 * but now suddenly could establish a connection */ 314 - if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 313 + if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 315 314 val.pdsk = 0; 316 315 mask.pdsk = 0; 317 316 continue; 318 317 } 319 318 320 - if (r == SS_NO_UP_TO_DATE_DISK && force && 319 + if (rv == SS_NO_UP_TO_DATE_DISK && force && 321 320 (mdev->state.disk < D_UP_TO_DATE && 322 321 mdev->state.disk >= D_INCONSISTENT)) { 323 322 mask.disk = D_MASK; ··· 326 325 continue; 327 326 } 328 327 329 - if (r == SS_NO_UP_TO_DATE_DISK && 328 + if (rv == SS_NO_UP_TO_DATE_DISK && 330 329 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { 331 330 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 332 331 nps = drbd_try_outdate_peer(mdev); ··· 342 341 continue; 343 342 } 344 343 345 - if (r == SS_NOTHING_TO_DO) 344 + if (rv == SS_NOTHING_TO_DO) 346 345 goto fail; 347 - if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { 346 + if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { 348 347 nps = drbd_try_outdate_peer(mdev); 349 348 350 349 if (force && nps > D_OUTDATED) { ··· 357 356 358 357 continue; 359 358 } 360 - if (r == SS_TWO_PRIMARIES) { 359 + if (rv == SS_TWO_PRIMARIES) { 361 360 /* Maybe the peer is detected as dead very soon... 362 361 retry at most once more in this case. */ 363 - __set_current_state(TASK_INTERRUPTIBLE); 364 - schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); 362 + schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10); 365 363 if (try < max_tries) 366 364 try = max_tries - 1; 367 365 continue; 368 366 } 369 - if (r < SS_SUCCESS) { 370 - r = _drbd_request_state(mdev, mask, val, 367 + if (rv < SS_SUCCESS) { 368 + rv = _drbd_request_state(mdev, mask, val, 371 369 CS_VERBOSE + CS_WAIT_COMPLETE); 372 - if (r < SS_SUCCESS) 370 + if (rv < SS_SUCCESS) 373 371 goto fail; 374 372 } 375 373 break; 376 374 } 377 375 378 - if (r < SS_SUCCESS) 376 + if (rv < SS_SUCCESS) 379 377 goto fail; 380 378 381 379 if (forced) ··· 384 384 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); 385 385 386 386 if (new_role == R_SECONDARY) { 387 - set_disk_ro(mdev->vdisk, TRUE); 387 + set_disk_ro(mdev->vdisk, true); 388 388 if (get_ldev(mdev)) { 389 389 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 390 390 put_ldev(mdev); ··· 394 394 mdev->net_conf->want_lose = 0; 395 395 put_net_conf(mdev); 396 396 } 397 - set_disk_ro(mdev->vdisk, FALSE); 397 + set_disk_ro(mdev->vdisk, false); 398 398 if (get_ldev(mdev)) { 399 399 if (((mdev->state.conn < C_CONNECTED || 400 400 mdev->state.pdsk <= D_FAILED) ··· 406 406 } 407 407 } 408 408 409 - if ((new_role == R_SECONDARY) && get_ldev(mdev)) { 410 - drbd_al_to_on_disk_bm(mdev); 411 - put_ldev(mdev); 412 - } 409 + /* writeout of activity log covered areas of the bitmap 410 + * to stable storage done in after state change already */ 413 411 414 412 if (mdev->state.conn >= C_WF_REPORT_PARAMS) { 415 413 /* if this was forced, we should consider sync */ ··· 421 423 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 422 424 fail: 423 425 mutex_unlock(&mdev->state_mutex); 424 - return r; 426 + return rv; 425 427 } 426 428 427 429 static struct drbd_conf *ensure_mdev(int minor, int create) ··· 526 528 } 527 529 } 528 530 531 + /* input size is expected to be in KB */ 529 532 char *ppsize(char *buf, unsigned long long size) 530 533 { 531 - /* Needs 9 bytes at max. */ 534 + /* Needs 9 bytes at max including trailing NUL: 535 + * -1ULL ==> "16384 EB" */ 532 536 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 533 537 int base = 0; 534 - while (size >= 10000) { 538 + while (size >= 10000 && base < sizeof(units)-1) { 535 539 /* shift + round */ 536 540 size = (size >> 10) + !!(size & (1<<9)); 537 541 base++; 538 542 } 539 - sprintf(buf, "%lu %cB", (long)size, units[base]); 543 + sprintf(buf, "%u %cB", (unsigned)size, units[base]); 540 544 541 545 return buf; 542 546 } ··· 642 642 || prev_size != mdev->ldev->md.md_size_sect; 643 643 644 644 if (la_size_changed || md_moved) { 645 + int err; 646 + 645 647 drbd_al_shrink(mdev); /* All extents inactive. */ 646 648 dev_info(DEV, "Writing the whole bitmap, %s\n", 647 649 la_size_changed && md_moved ? "size changed and md moved" : 648 650 la_size_changed ? "size changed" : "md moved"); 649 - rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ 651 + /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ 652 + err = drbd_bitmap_io(mdev, &drbd_bm_write, 653 + "size changed", BM_LOCKED_MASK); 654 + if (err) { 655 + rv = dev_size_error; 656 + goto out; 657 + } 650 658 drbd_md_mark_dirty(mdev); 651 659 } 652 660 ··· 773 765 return 0; 774 766 } 775 767 776 - void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) 768 + void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local) 777 769 { 778 770 struct request_queue * const q = mdev->rq_queue; 779 771 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 780 772 int max_segments = mdev->ldev->dc.max_bio_bvecs; 773 + int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); 781 774 782 - max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 783 - 784 - blk_queue_max_hw_sectors(q, max_seg_s >> 9); 785 - blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 786 - blk_queue_max_segment_size(q, max_seg_s); 787 775 blk_queue_logical_block_size(q, 512); 788 - blk_queue_segment_boundary(q, PAGE_SIZE-1); 789 - blk_stack_limits(&q->limits, &b->limits, 0); 776 + blk_queue_max_hw_sectors(q, max_hw_sectors); 777 + /* This is the workaround for "bio would need to, but cannot, be split" */ 778 + blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 779 + blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); 780 + blk_queue_stack_limits(q, b); 790 781 791 - dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 782 + dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9); 792 783 793 784 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 794 785 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", ··· 857 850 static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 858 851 struct drbd_nl_cfg_reply *reply) 859 852 { 860 - enum drbd_ret_codes retcode; 853 + enum drbd_ret_code retcode; 861 854 enum determine_dev_size dd; 862 855 sector_t max_possible_sectors; 863 856 sector_t min_md_device_sectors; ··· 865 858 struct block_device *bdev; 866 859 struct lru_cache *resync_lru = NULL; 867 860 union drbd_state ns, os; 868 - unsigned int max_seg_s; 869 - int rv; 861 + unsigned int max_bio_size; 862 + enum drbd_state_rv rv; 870 863 int cp_discovered = 0; 871 864 int logical_block_size; 872 865 ··· 1012 1005 /* and for any other previously queued work */ 1013 1006 drbd_flush_workqueue(mdev); 1014 1007 1015 - retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 1008 + rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 1009 + retcode = rv; /* FIXME: Type mismatch. */ 1016 1010 drbd_resume_io(mdev); 1017 - if (retcode < SS_SUCCESS) 1011 + if (rv < SS_SUCCESS) 1018 1012 goto fail; 1019 1013 1020 1014 if (!get_ldev_if_state(mdev, D_ATTACHING)) ··· 1117 1109 mdev->read_cnt = 0; 1118 1110 mdev->writ_cnt = 0; 1119 1111 1120 - max_seg_s = DRBD_MAX_SEGMENT_SIZE; 1112 + max_bio_size = DRBD_MAX_BIO_SIZE; 1121 1113 if (mdev->state.conn == C_CONNECTED) { 1122 1114 /* We are Primary, Connected, and now attach a new local 1123 1115 * backing store. We must not increase the user visible maximum 1124 1116 * bio size on this device to something the peer may not be 1125 1117 * able to handle. */ 1126 1118 if (mdev->agreed_pro_version < 94) 1127 - max_seg_s = queue_max_segment_size(mdev->rq_queue); 1119 + max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9; 1128 1120 else if (mdev->agreed_pro_version == 94) 1129 - max_seg_s = DRBD_MAX_SIZE_H80_PACKET; 1121 + max_bio_size = DRBD_MAX_SIZE_H80_PACKET; 1130 1122 /* else: drbd 8.3.9 and later, stay with default */ 1131 1123 } 1132 1124 1133 - drbd_setup_queue_param(mdev, max_seg_s); 1125 + drbd_setup_queue_param(mdev, max_bio_size); 1134 1126 1135 1127 /* If I am currently not R_PRIMARY, 1136 1128 * but meta data primary indicator is set, ··· 1162 1154 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1163 1155 dev_info(DEV, "Assuming that all blocks are out of sync " 1164 1156 "(aka FullSync)\n"); 1165 - if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { 1157 + if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, 1158 + "set_n_write from attaching", BM_LOCKED_MASK)) { 1166 1159 retcode = ERR_IO_MD_DISK; 1167 1160 goto force_diskless_dec; 1168 1161 } 1169 1162 } else { 1170 - if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { 1163 + if (drbd_bitmap_io(mdev, &drbd_bm_read, 1164 + "read from attaching", BM_LOCKED_MASK) < 0) { 1171 1165 retcode = ERR_IO_MD_DISK; 1172 1166 goto force_diskless_dec; 1173 1167 } ··· 1177 1167 1178 1168 if (cp_discovered) { 1179 1169 drbd_al_apply_to_bm(mdev); 1180 - drbd_al_to_on_disk_bm(mdev); 1170 + if (drbd_bitmap_io(mdev, &drbd_bm_write, 1171 + "crashed primary apply AL", BM_LOCKED_MASK)) { 1172 + retcode = ERR_IO_MD_DISK; 1173 + goto force_diskless_dec; 1174 + } 1181 1175 } 1182 1176 1183 1177 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) ··· 1293 1279 struct drbd_nl_cfg_reply *reply) 1294 1280 { 1295 1281 int i, ns; 1296 - enum drbd_ret_codes retcode; 1282 + enum drbd_ret_code retcode; 1297 1283 struct net_conf *new_conf = NULL; 1298 1284 struct crypto_hash *tfm = NULL; 1299 1285 struct crypto_hash *integrity_w_tfm = NULL; ··· 1338 1324 new_conf->wire_protocol = DRBD_PROT_C; 1339 1325 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1340 1326 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1327 + new_conf->on_congestion = DRBD_ON_CONGESTION_DEF; 1328 + new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF; 1341 1329 1342 1330 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { 1343 1331 retcode = ERR_MANDATORY_TAG; ··· 1359 1343 retcode = ERR_STONITH_AND_PROT_A; 1360 1344 goto fail; 1361 1345 } 1346 + } 1347 + 1348 + if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) { 1349 + retcode = ERR_CONG_NOT_PROTO_A; 1350 + goto fail; 1362 1351 } 1363 1352 1364 1353 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { ··· 1546 1525 struct drbd_nl_cfg_reply *reply) 1547 1526 { 1548 1527 int retcode; 1528 + struct disconnect dc; 1529 + 1530 + memset(&dc, 0, sizeof(struct disconnect)); 1531 + if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) { 1532 + retcode = ERR_MANDATORY_TAG; 1533 + goto fail; 1534 + } 1535 + 1536 + if (dc.force) { 1537 + spin_lock_irq(&mdev->req_lock); 1538 + if (mdev->state.conn >= C_WF_CONNECTION) 1539 + _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL); 1540 + spin_unlock_irq(&mdev->req_lock); 1541 + goto done; 1542 + } 1549 1543 1550 1544 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); 1551 1545 ··· 1878 1842 { 1879 1843 int retcode; 1880 1844 1845 + /* If there is still bitmap IO pending, probably because of a previous 1846 + * resync just being finished, wait for it before requesting a new resync. */ 1847 + wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 1848 + 1881 1849 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1882 1850 1883 1851 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) ··· 1917 1877 { 1918 1878 int retcode; 1919 1879 1880 + /* If there is still bitmap IO pending, probably because of a previous 1881 + * resync just being finished, wait for it before requesting a new resync. */ 1882 + wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 1883 + 1920 1884 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); 1921 1885 1922 1886 if (retcode < SS_SUCCESS) { ··· 1929 1885 into a full resync. */ 1930 1886 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); 1931 1887 if (retcode >= SS_SUCCESS) { 1932 - /* open coded drbd_bitmap_io() */ 1933 1888 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, 1934 - "set_n_write from invalidate_peer")) 1889 + "set_n_write from invalidate_peer", 1890 + BM_LOCKED_SET_ALLOWED)) 1935 1891 retcode = ERR_IO_MD_DISK; 1936 1892 } 1937 1893 } else ··· 1958 1914 struct drbd_nl_cfg_reply *reply) 1959 1915 { 1960 1916 int retcode = NO_ERROR; 1917 + union drbd_state s; 1961 1918 1962 - if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) 1963 - retcode = ERR_PAUSE_IS_CLEAR; 1919 + if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { 1920 + s = mdev->state; 1921 + if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { 1922 + retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP : 1923 + s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR; 1924 + } else { 1925 + retcode = ERR_PAUSE_IS_CLEAR; 1926 + } 1927 + } 1964 1928 1965 1929 reply->ret_code = retcode; 1966 1930 return 0; ··· 2106 2054 reply->ret_code = ERR_MANDATORY_TAG; 2107 2055 return 0; 2108 2056 } 2057 + 2058 + /* If there is still bitmap IO pending, e.g. previous resync or verify 2059 + * just being finished, wait for it before requesting a new resync. */ 2060 + wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 2061 + 2109 2062 /* w_make_ov_request expects position to be aligned */ 2110 2063 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; 2111 2064 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); ··· 2154 2097 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ 2155 2098 2156 2099 if (args.clear_bm) { 2157 - err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); 2100 + err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, 2101 + "clear_n_write from new_c_uuid", BM_LOCKED_MASK); 2158 2102 if (err) { 2159 2103 dev_err(DEV, "Writing bitmap failed with %d\n",err); 2160 2104 retcode = ERR_IO_MD_DISK; ··· 2163 2105 if (skip_initial_sync) { 2164 2106 drbd_send_uuids_skip_initial_sync(mdev); 2165 2107 _drbd_uuid_set(mdev, UI_BITMAP, 0); 2108 + drbd_print_uuids(mdev, "cleared bitmap UUID"); 2166 2109 spin_lock_irq(&mdev->req_lock); 2167 2110 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 2168 2111 CS_VERBOSE, NULL); ··· 2248 2189 goto fail; 2249 2190 } 2250 2191 2251 - if (nlp->packet_type >= P_nl_after_last_packet) { 2192 + if (nlp->packet_type >= P_nl_after_last_packet || 2193 + nlp->packet_type == P_return_code_only) { 2252 2194 retcode = ERR_PACKET_NR; 2253 2195 goto fail; 2254 2196 } ··· 2265 2205 reply_size += cm->reply_body_size; 2266 2206 2267 2207 /* allocation not in the IO path, cqueue thread context */ 2268 - cn_reply = kmalloc(reply_size, GFP_KERNEL); 2208 + cn_reply = kzalloc(reply_size, GFP_KERNEL); 2269 2209 if (!cn_reply) { 2270 2210 retcode = ERR_NOMEM; 2271 2211 goto fail; ··· 2273 2213 reply = (struct drbd_nl_cfg_reply *) cn_reply->data; 2274 2214 2275 2215 reply->packet_type = 2276 - cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; 2216 + cm->reply_body_size ? nlp->packet_type : P_return_code_only; 2277 2217 reply->minor = nlp->drbd_minor; 2278 2218 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ 2279 2219 /* reply->tag_list; might be modified by cm->function. */ ··· 2436 2376 /* receiver thread context, which is not in the writeout path (of this node), 2437 2377 * but may be in the writeout path of the _other_ node. 2438 2378 * GFP_NOIO to avoid potential "distributed deadlock". */ 2439 - cn_reply = kmalloc( 2379 + cn_reply = kzalloc( 2440 2380 sizeof(struct cn_msg)+ 2441 2381 sizeof(struct drbd_nl_cfg_reply)+ 2442 2382 sizeof(struct dump_ee_tag_len_struct)+ ··· 2458 2398 tl = tl_add_int(tl, T_ee_sector, &e->sector); 2459 2399 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2460 2400 2401 + /* dump the first 32k */ 2402 + len = min_t(unsigned, e->size, 32 << 10); 2461 2403 put_unaligned(T_ee_data, tl++); 2462 - put_unaligned(e->size, tl++); 2404 + put_unaligned(len, tl++); 2463 2405 2464 - len = e->size; 2465 2406 page = e->pages; 2466 2407 page_chain_for_each(page) { 2467 2408 void *d = kmap_atomic(page, KM_USER0); ··· 2471 2410 kunmap_atomic(d, KM_USER0); 2472 2411 tl = (unsigned short*)((char*)tl + l); 2473 2412 len -= l; 2413 + if (len == 0) 2414 + break; 2474 2415 } 2475 2416 put_unaligned(TT_END, tl++); /* Close the tag list */ 2476 2417 ··· 2571 2508 (struct drbd_nl_cfg_reply *)cn_reply->data; 2572 2509 int rr; 2573 2510 2511 + memset(buffer, 0, sizeof(buffer)); 2574 2512 cn_reply->id = req->id; 2575 2513 2576 2514 cn_reply->seq = req->seq; ··· 2579 2515 cn_reply->len = sizeof(struct drbd_nl_cfg_reply); 2580 2516 cn_reply->flags = 0; 2581 2517 2518 + reply->packet_type = P_return_code_only; 2582 2519 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2583 2520 reply->ret_code = ret_code; 2584 2521
+81 -33
drivers/block/drbd/drbd_proc.c
··· 34 34 #include "drbd_int.h" 35 35 36 36 static int drbd_proc_open(struct inode *inode, struct file *file); 37 + static int drbd_proc_release(struct inode *inode, struct file *file); 37 38 38 39 39 40 struct proc_dir_entry *drbd_proc; ··· 43 42 .open = drbd_proc_open, 44 43 .read = seq_read, 45 44 .llseek = seq_lseek, 46 - .release = single_release, 45 + .release = drbd_proc_release, 47 46 }; 48 47 48 + void seq_printf_with_thousands_grouping(struct seq_file *seq, long v) 49 + { 50 + /* v is in kB/sec. We don't expect TiByte/sec yet. */ 51 + if (unlikely(v >= 1000000)) { 52 + /* cool: > GiByte/s */ 53 + seq_printf(seq, "%ld,", v / 1000000); 54 + v /= 1000000; 55 + seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000); 56 + } else if (likely(v >= 1000)) 57 + seq_printf(seq, "%ld,%03ld", v/1000, v % 1000); 58 + else 59 + seq_printf(seq, "%ld", v); 60 + } 49 61 50 62 /*lge 51 63 * progress bars shamelessly adapted from driver/md/md.c ··· 85 71 seq_printf(seq, "."); 86 72 seq_printf(seq, "] "); 87 73 88 - seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10); 89 - /* if more than 1 GB display in MB */ 90 - if (mdev->rs_total > 0x100000L) 91 - seq_printf(seq, "(%lu/%lu)M\n\t", 74 + if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) 75 + seq_printf(seq, "verified:"); 76 + else 77 + seq_printf(seq, "sync'ed:"); 78 + seq_printf(seq, "%3u.%u%% ", res / 10, res % 10); 79 + 80 + /* if more than a few GB, display in MB */ 81 + if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT))) 82 + seq_printf(seq, "(%lu/%lu)M", 92 83 (unsigned long) Bit2KB(rs_left >> 10), 93 84 (unsigned long) Bit2KB(mdev->rs_total >> 10)); 94 85 else ··· 113 94 /* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is 114 95 * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at 115 96 * least DRBD_SYNC_MARK_STEP time before it will be modified. */ 97 + /* ------------------------ ~18s average ------------------------ */ 116 98 i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS; 117 99 dt = (jiffies - mdev->rs_mark_time[i]) / HZ; 118 100 if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) ··· 127 107 seq_printf(seq, "finish: %lu:%02lu:%02lu", 128 108 rt / 3600, (rt % 3600) / 60, rt % 60); 129 109 130 - /* current speed average over (SYNC_MARKS * SYNC_MARK_STEP) jiffies */ 131 110 dbdt = Bit2KB(db/dt); 132 - if (dbdt > 1000) 133 - seq_printf(seq, " speed: %ld,%03ld", 134 - dbdt/1000, dbdt % 1000); 135 - else 136 - seq_printf(seq, " speed: %ld", dbdt); 111 + seq_printf(seq, " speed: "); 112 + seq_printf_with_thousands_grouping(seq, dbdt); 113 + seq_printf(seq, " ("); 114 + /* ------------------------- ~3s average ------------------------ */ 115 + if (proc_details >= 1) { 116 + /* this is what drbd_rs_should_slow_down() uses */ 117 + i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; 118 + dt = (jiffies - mdev->rs_mark_time[i]) / HZ; 119 + if (!dt) 120 + dt++; 121 + db = mdev->rs_mark_left[i] - rs_left; 122 + dbdt = Bit2KB(db/dt); 123 + seq_printf_with_thousands_grouping(seq, dbdt); 124 + seq_printf(seq, " -- "); 125 + } 137 126 127 + /* --------------------- long term average ---------------------- */ 138 128 /* mean speed since syncer started 139 129 * we do account for PausedSync periods */ 140 130 dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; ··· 152 122 dt = 1; 153 123 db = mdev->rs_total - rs_left; 154 124 dbdt = Bit2KB(db/dt); 155 - if (dbdt > 1000) 156 - seq_printf(seq, " (%ld,%03ld)", 157 - dbdt/1000, dbdt % 1000); 158 - else 159 - seq_printf(seq, " (%ld)", dbdt); 125 + seq_printf_with_thousands_grouping(seq, dbdt); 126 + seq_printf(seq, ")"); 160 127 161 - if (mdev->state.conn == C_SYNC_TARGET) { 162 - if (mdev->c_sync_rate > 1000) 163 - seq_printf(seq, " want: %d,%03d", 164 - mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000); 165 - else 166 - seq_printf(seq, " want: %d", mdev->c_sync_rate); 128 + if (mdev->state.conn == C_SYNC_TARGET || 129 + mdev->state.conn == C_VERIFY_S) { 130 + seq_printf(seq, " want: "); 131 + seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate); 167 132 } 168 133 seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); 134 + 135 + if (proc_details >= 1) { 136 + /* 64 bit: 137 + * we convert to sectors in the display below. */ 138 + unsigned long bm_bits = drbd_bm_bits(mdev); 139 + unsigned long bit_pos; 140 + if (mdev->state.conn == C_VERIFY_S || 141 + mdev->state.conn == C_VERIFY_T) 142 + bit_pos = bm_bits - mdev->ov_left; 143 + else 144 + bit_pos = mdev->bm_resync_fo; 145 + /* Total sectors may be slightly off for oddly 146 + * sized devices. So what. */ 147 + seq_printf(seq, 148 + "\t%3d%% sector pos: %llu/%llu\n", 149 + (int)(bit_pos / (bm_bits/100+1)), 150 + (unsigned long long)bit_pos * BM_SECT_PER_BIT, 151 + (unsigned long long)bm_bits * BM_SECT_PER_BIT); 152 + } 169 153 } 170 154 171 155 static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) ··· 276 232 mdev->epochs, 277 233 write_ordering_chars[mdev->write_ordering] 278 234 ); 279 - seq_printf(seq, " oos:%lu\n", 280 - Bit2KB(drbd_bm_total_weight(mdev))); 235 + seq_printf(seq, " oos:%llu\n", 236 + Bit2KB((unsigned long long) 237 + drbd_bm_total_weight(mdev))); 281 238 } 282 239 if (mdev->state.conn == C_SYNC_SOURCE || 283 - mdev->state.conn == C_SYNC_TARGET) 240 + mdev->state.conn == C_SYNC_TARGET || 241 + mdev->state.conn == C_VERIFY_S || 242 + mdev->state.conn == C_VERIFY_T) 284 243 drbd_syncer_progress(mdev, seq); 285 - 286 - if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) 287 - seq_printf(seq, "\t%3d%% %lu/%lu\n", 288 - (int)((mdev->rs_total-mdev->ov_left) / 289 - (mdev->rs_total/100+1)), 290 - mdev->rs_total - mdev->ov_left, 291 - mdev->rs_total); 292 244 293 245 if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) { 294 246 lc_seq_printf_stats(seq, mdev->resync); ··· 305 265 306 266 static int drbd_proc_open(struct inode *inode, struct file *file) 307 267 { 308 - return single_open(file, drbd_seq_show, PDE(inode)->data); 268 + if (try_module_get(THIS_MODULE)) 269 + return single_open(file, drbd_seq_show, PDE(inode)->data); 270 + return -ENODEV; 271 + } 272 + 273 + static int drbd_proc_release(struct inode *inode, struct file *file) 274 + { 275 + module_put(THIS_MODULE); 276 + return single_release(inode, file); 309 277 } 310 278 311 279 /* PROC FS stuff end */
+383 -225
drivers/block/drbd/drbd_receiver.c
··· 277 277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; 278 278 int i; 279 279 280 - if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) 280 + if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) 281 281 i = page_chain_free(page); 282 282 else { 283 283 struct page *tmp; ··· 319 319 struct page *page; 320 320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 321 321 322 - if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE)) 322 + if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) 323 323 return NULL; 324 324 325 325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); ··· 725 725 char tb[4]; 726 726 727 727 if (!*sock) 728 - return FALSE; 728 + return false; 729 729 730 730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); 731 731 732 732 if (rr > 0 || rr == -EAGAIN) { 733 - return TRUE; 733 + return true; 734 734 } else { 735 735 sock_release(*sock); 736 736 *sock = NULL; 737 - return FALSE; 737 + return false; 738 738 } 739 739 } 740 740 ··· 768 768 if (s || ++try >= 3) 769 769 break; 770 770 /* give the other side time to call bind() & listen() */ 771 - __set_current_state(TASK_INTERRUPTIBLE); 772 - schedule_timeout(HZ / 10); 771 + schedule_timeout_interruptible(HZ / 10); 773 772 } 774 773 775 774 if (s) { ··· 787 788 } 788 789 789 790 if (sock && msock) { 790 - __set_current_state(TASK_INTERRUPTIBLE); 791 - schedule_timeout(HZ / 10); 791 + schedule_timeout_interruptible(HZ / 10); 792 792 ok = drbd_socket_okay(mdev, &sock); 793 793 ok = drbd_socket_okay(mdev, &msock) && ok; 794 794 if (ok) ··· 904 906 put_ldev(mdev); 905 907 } 906 908 907 - if (!drbd_send_protocol(mdev)) 909 + if (drbd_send_protocol(mdev) == -1) 908 910 return -1; 909 911 drbd_send_sync_param(mdev, &mdev->sync_conf); 910 912 drbd_send_sizes(mdev, 0, 0); ··· 912 914 drbd_send_state(mdev); 913 915 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 914 916 clear_bit(RESIZE_PENDING, &mdev->flags); 917 + mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */ 915 918 916 919 return 1; 917 920 ··· 931 932 932 933 r = drbd_recv(mdev, h, sizeof(*h)); 933 934 if (unlikely(r != sizeof(*h))) { 934 - dev_err(DEV, "short read expecting header on sock: r=%d\n", r); 935 - return FALSE; 935 + if (!signal_pending(current)) 936 + dev_warn(DEV, "short read expecting header on sock: r=%d\n", r); 937 + return false; 936 938 } 937 939 938 940 if (likely(h->h80.magic == BE_DRBD_MAGIC)) { ··· 947 947 be32_to_cpu(h->h80.magic), 948 948 be16_to_cpu(h->h80.command), 949 949 be16_to_cpu(h->h80.length)); 950 - return FALSE; 950 + return false; 951 951 } 952 952 mdev->last_received = jiffies; 953 953 954 - return TRUE; 954 + return true; 955 955 } 956 956 957 957 static void drbd_flush(struct drbd_conf *mdev) ··· 1074 1074 * @mdev: DRBD device. 1075 1075 * @e: epoch entry 1076 1076 * @rw: flag field, see bio->bi_rw 1077 + * 1078 + * May spread the pages to multiple bios, 1079 + * depending on bio_add_page restrictions. 1080 + * 1081 + * Returns 0 if all bios have been submitted, 1082 + * -ENOMEM if we could not allocate enough bios, 1083 + * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a 1084 + * single page to an empty bio (which should never happen and likely indicates 1085 + * that the lower level IO stack is in some way broken). This has been observed 1086 + * on certain Xen deployments. 1077 1087 */ 1078 1088 /* TODO allocate from our own bio_set. */ 1079 1089 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, ··· 1096 1086 unsigned ds = e->size; 1097 1087 unsigned n_bios = 0; 1098 1088 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; 1089 + int err = -ENOMEM; 1099 1090 1100 1091 /* In most cases, we will only need one bio. But in case the lower 1101 1092 * level restrictions happen to be different at this offset on this ··· 1122 1111 page_chain_for_each(page) { 1123 1112 unsigned len = min_t(unsigned, ds, PAGE_SIZE); 1124 1113 if (!bio_add_page(bio, page, len, 0)) { 1125 - /* a single page must always be possible! */ 1126 - BUG_ON(bio->bi_vcnt == 0); 1114 + /* A single page must always be possible! 1115 + * But in case it fails anyways, 1116 + * we deal with it, and complain (below). */ 1117 + if (bio->bi_vcnt == 0) { 1118 + dev_err(DEV, 1119 + "bio_add_page failed for len=%u, " 1120 + "bi_vcnt=0 (bi_sector=%llu)\n", 1121 + len, (unsigned long long)bio->bi_sector); 1122 + err = -ENOSPC; 1123 + goto fail; 1124 + } 1127 1125 goto next_bio; 1128 1126 } 1129 1127 ds -= len; ··· 1158 1138 bios = bios->bi_next; 1159 1139 bio_put(bio); 1160 1140 } 1161 - return -ENOMEM; 1141 + return err; 1162 1142 } 1163 1143 1164 1144 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) ··· 1180 1160 switch (mdev->write_ordering) { 1181 1161 case WO_none: 1182 1162 if (rv == FE_RECYCLED) 1183 - return TRUE; 1163 + return true; 1184 1164 1185 1165 /* receiver context, in the writeout path of the other node. 1186 1166 * avoid potential distributed deadlock */ ··· 1208 1188 D_ASSERT(atomic_read(&epoch->active) == 0); 1209 1189 D_ASSERT(epoch->flags == 0); 1210 1190 1211 - return TRUE; 1191 + return true; 1212 1192 default: 1213 1193 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); 1214 - return FALSE; 1194 + return false; 1215 1195 } 1216 1196 1217 1197 epoch->flags = 0; ··· 1229 1209 } 1230 1210 spin_unlock(&mdev->epoch_lock); 1231 1211 1232 - return TRUE; 1212 + return true; 1233 1213 } 1234 1214 1235 1215 /* used from receive_RSDataReply (recv_resync_read) ··· 1251 1231 if (dgs) { 1252 1232 rr = drbd_recv(mdev, dig_in, dgs); 1253 1233 if (rr != dgs) { 1254 - dev_warn(DEV, "short read receiving data digest: read %d expected %d\n", 1255 - rr, dgs); 1234 + if (!signal_pending(current)) 1235 + dev_warn(DEV, 1236 + "short read receiving data digest: read %d expected %d\n", 1237 + rr, dgs); 1256 1238 return NULL; 1257 1239 } 1258 1240 } 1259 1241 1260 1242 data_size -= dgs; 1261 1243 1244 + ERR_IF(data_size == 0) return NULL; 1262 1245 ERR_IF(data_size & 0x1ff) return NULL; 1263 - ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL; 1246 + ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; 1264 1247 1265 1248 /* even though we trust out peer, 1266 1249 * we sometimes have to double check. */ 1267 1250 if (sector + (data_size>>9) > capacity) { 1268 - dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n", 1251 + dev_err(DEV, "request from peer beyond end of local disk: " 1252 + "capacity: %llus < sector: %llus + size: %u\n", 1269 1253 (unsigned long long)capacity, 1270 1254 (unsigned long long)sector, data_size); 1271 1255 return NULL; ··· 1288 1264 unsigned len = min_t(int, ds, PAGE_SIZE); 1289 1265 data = kmap(page); 1290 1266 rr = drbd_recv(mdev, data, len); 1291 - if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) { 1267 + if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { 1292 1268 dev_err(DEV, "Fault injection: Corrupting data on receive\n"); 1293 1269 data[0] = data[0] ^ (unsigned long)-1; 1294 1270 } 1295 1271 kunmap(page); 1296 1272 if (rr != len) { 1297 1273 drbd_free_ee(mdev, e); 1298 - dev_warn(DEV, "short read receiving data: read %d expected %d\n", 1299 - rr, len); 1274 + if (!signal_pending(current)) 1275 + dev_warn(DEV, "short read receiving data: read %d expected %d\n", 1276 + rr, len); 1300 1277 return NULL; 1301 1278 } 1302 1279 ds -= rr; ··· 1306 1281 if (dgs) { 1307 1282 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); 1308 1283 if (memcmp(dig_in, dig_vv, dgs)) { 1309 - dev_err(DEV, "Digest integrity check FAILED.\n"); 1284 + dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", 1285 + (unsigned long long)sector, data_size); 1310 1286 drbd_bcast_ee(mdev, "digest failed", 1311 1287 dgs, dig_in, dig_vv, e); 1312 1288 drbd_free_ee(mdev, e); ··· 1328 1302 void *data; 1329 1303 1330 1304 if (!data_size) 1331 - return TRUE; 1305 + return true; 1332 1306 1333 1307 page = drbd_pp_alloc(mdev, 1, 1); 1334 1308 ··· 1337 1311 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); 1338 1312 if (rr != min_t(int, data_size, PAGE_SIZE)) { 1339 1313 rv = 0; 1340 - dev_warn(DEV, "short read receiving data: read %d expected %d\n", 1341 - rr, min_t(int, data_size, PAGE_SIZE)); 1314 + if (!signal_pending(current)) 1315 + dev_warn(DEV, 1316 + "short read receiving data: read %d expected %d\n", 1317 + rr, min_t(int, data_size, PAGE_SIZE)); 1342 1318 break; 1343 1319 } 1344 1320 data_size -= rr; ··· 1365 1337 if (dgs) { 1366 1338 rr = drbd_recv(mdev, dig_in, dgs); 1367 1339 if (rr != dgs) { 1368 - dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n", 1369 - rr, dgs); 1340 + if (!signal_pending(current)) 1341 + dev_warn(DEV, 1342 + "short read receiving data reply digest: read %d expected %d\n", 1343 + rr, dgs); 1370 1344 return 0; 1371 1345 } 1372 1346 } ··· 1389 1359 expect); 1390 1360 kunmap(bvec->bv_page); 1391 1361 if (rr != expect) { 1392 - dev_warn(DEV, "short read receiving data reply: " 1393 - "read %d expected %d\n", 1394 - rr, expect); 1362 + if (!signal_pending(current)) 1363 + dev_warn(DEV, "short read receiving data reply: " 1364 + "read %d expected %d\n", 1365 + rr, expect); 1395 1366 return 0; 1396 1367 } 1397 1368 data_size -= rr; ··· 1456 1425 1457 1426 atomic_add(data_size >> 9, &mdev->rs_sect_ev); 1458 1427 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 1459 - return TRUE; 1428 + return true; 1460 1429 1461 - /* drbd_submit_ee currently fails for one reason only: 1462 - * not being able to allocate enough bios. 1463 - * Is dropping the connection going to help? */ 1430 + /* don't care for the reason here */ 1431 + dev_err(DEV, "submit failed, triggering re-connect\n"); 1464 1432 spin_lock_irq(&mdev->req_lock); 1465 1433 list_del(&e->w.list); 1466 1434 spin_unlock_irq(&mdev->req_lock); ··· 1467 1437 drbd_free_ee(mdev, e); 1468 1438 fail: 1469 1439 put_ldev(mdev); 1470 - return FALSE; 1440 + return false; 1471 1441 } 1472 1442 1473 1443 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) ··· 1484 1454 spin_unlock_irq(&mdev->req_lock); 1485 1455 if (unlikely(!req)) { 1486 1456 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); 1487 - return FALSE; 1457 + return false; 1488 1458 } 1489 1459 1490 1460 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid ··· 1641 1611 return ret; 1642 1612 } 1643 1613 1644 - static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf) 1614 + /* see also bio_flags_to_wire() 1615 + * DRBD_REQ_*, because we need to semantically map the flags to data packet 1616 + * flags and back. We may replicate to other kernel versions. */ 1617 + static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) 1645 1618 { 1646 - if (mdev->agreed_pro_version >= 95) 1647 - return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 1648 - (dpf & DP_FUA ? REQ_FUA : 0) | 1649 - (dpf & DP_FLUSH ? REQ_FUA : 0) | 1650 - (dpf & DP_DISCARD ? REQ_DISCARD : 0); 1651 - else 1652 - return dpf & DP_RW_SYNC ? REQ_SYNC : 0; 1619 + return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 1620 + (dpf & DP_FUA ? REQ_FUA : 0) | 1621 + (dpf & DP_FLUSH ? REQ_FLUSH : 0) | 1622 + (dpf & DP_DISCARD ? REQ_DISCARD : 0); 1653 1623 } 1654 1624 1655 1625 /* mirrored write */ ··· 1662 1632 u32 dp_flags; 1663 1633 1664 1634 if (!get_ldev(mdev)) { 1665 - if (__ratelimit(&drbd_ratelimit_state)) 1666 - dev_err(DEV, "Can not write mirrored data block " 1667 - "to local disk.\n"); 1668 1635 spin_lock(&mdev->peer_seq_lock); 1669 1636 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) 1670 1637 mdev->peer_seq++; ··· 1681 1654 e = read_in_block(mdev, p->block_id, sector, data_size); 1682 1655 if (!e) { 1683 1656 put_ldev(mdev); 1684 - return FALSE; 1657 + return false; 1685 1658 } 1686 1659 1687 1660 e->w.cb = e_end_block; 1661 + 1662 + dp_flags = be32_to_cpu(p->dp_flags); 1663 + rw |= wire_flags_to_bio(mdev, dp_flags); 1664 + 1665 + if (dp_flags & DP_MAY_SET_IN_SYNC) 1666 + e->flags |= EE_MAY_SET_IN_SYNC; 1688 1667 1689 1668 spin_lock(&mdev->epoch_lock); 1690 1669 e->epoch = mdev->current_epoch; 1691 1670 atomic_inc(&e->epoch->epoch_size); 1692 1671 atomic_inc(&e->epoch->active); 1693 1672 spin_unlock(&mdev->epoch_lock); 1694 - 1695 - dp_flags = be32_to_cpu(p->dp_flags); 1696 - rw |= write_flags_to_bio(mdev, dp_flags); 1697 - 1698 - if (dp_flags & DP_MAY_SET_IN_SYNC) 1699 - e->flags |= EE_MAY_SET_IN_SYNC; 1700 1673 1701 1674 /* I'm the receiver, I do hold a net_cnt reference. */ 1702 1675 if (!mdev->net_conf->two_primaries) { ··· 1800 1773 put_ldev(mdev); 1801 1774 wake_asender(mdev); 1802 1775 finish_wait(&mdev->misc_wait, &wait); 1803 - return TRUE; 1776 + return true; 1804 1777 } 1805 1778 1806 1779 if (signal_pending(current)) { ··· 1856 1829 } 1857 1830 1858 1831 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) 1859 - return TRUE; 1832 + return true; 1860 1833 1861 - /* drbd_submit_ee currently fails for one reason only: 1862 - * not being able to allocate enough bios. 1863 - * Is dropping the connection going to help? */ 1834 + /* don't care for the reason here */ 1835 + dev_err(DEV, "submit failed, triggering re-connect\n"); 1864 1836 spin_lock_irq(&mdev->req_lock); 1865 1837 list_del(&e->w.list); 1866 1838 hlist_del_init(&e->colision); ··· 1868 1842 drbd_al_complete_io(mdev, e->sector); 1869 1843 1870 1844 out_interrupted: 1871 - /* yes, the epoch_size now is imbalanced. 1872 - * but we drop the connection anyways, so we don't have a chance to 1873 - * receive a barrier... atomic_inc(&mdev->epoch_size); */ 1845 + drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP); 1874 1846 put_ldev(mdev); 1875 1847 drbd_free_ee(mdev, e); 1876 - return FALSE; 1848 + return false; 1877 1849 } 1878 1850 1879 1851 /* We may throttle resync, if the lower device seems to be busy, ··· 1885 1861 * The current sync rate used here uses only the most recent two step marks, 1886 1862 * to have a short time average so we can react faster. 1887 1863 */ 1888 - int drbd_rs_should_slow_down(struct drbd_conf *mdev) 1864 + int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) 1889 1865 { 1890 1866 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; 1891 1867 unsigned long db, dt, dbdt; 1868 + struct lc_element *tmp; 1892 1869 int curr_events; 1893 1870 int throttle = 0; 1894 1871 ··· 1897 1872 if (mdev->sync_conf.c_min_rate == 0) 1898 1873 return 0; 1899 1874 1875 + spin_lock_irq(&mdev->al_lock); 1876 + tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); 1877 + if (tmp) { 1878 + struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 1879 + if (test_bit(BME_PRIORITY, &bm_ext->flags)) { 1880 + spin_unlock_irq(&mdev->al_lock); 1881 + return 0; 1882 + } 1883 + /* Do not slow down if app IO is already waiting for this extent */ 1884 + } 1885 + spin_unlock_irq(&mdev->al_lock); 1886 + 1900 1887 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 1901 1888 (int)part_stat_read(&disk->part0, sectors[1]) - 1902 1889 atomic_read(&mdev->rs_sect_ev); 1890 + 1903 1891 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { 1904 1892 unsigned long rs_left; 1905 1893 int i; ··· 1921 1883 1922 1884 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, 1923 1885 * approx. */ 1924 - i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS; 1925 - rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; 1886 + i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; 1887 + 1888 + if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) 1889 + rs_left = mdev->ov_left; 1890 + else 1891 + rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; 1926 1892 1927 1893 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; 1928 1894 if (!dt) ··· 1954 1912 sector = be64_to_cpu(p->sector); 1955 1913 size = be32_to_cpu(p->blksize); 1956 1914 1957 - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1915 + if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 1958 1916 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 1959 1917 (unsigned long long)sector, size); 1960 - return FALSE; 1918 + return false; 1961 1919 } 1962 1920 if (sector + (size>>9) > capacity) { 1963 1921 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 1964 1922 (unsigned long long)sector, size); 1965 - return FALSE; 1923 + return false; 1966 1924 } 1967 1925 1968 1926 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { ··· 1999 1957 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); 2000 1958 if (!e) { 2001 1959 put_ldev(mdev); 2002 - return FALSE; 1960 + return false; 2003 1961 } 2004 1962 2005 1963 switch (cmd) { ··· 2012 1970 case P_RS_DATA_REQUEST: 2013 1971 e->w.cb = w_e_end_rsdata_req; 2014 1972 fault_type = DRBD_FAULT_RS_RD; 1973 + /* used in the sector offset progress display */ 1974 + mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 2015 1975 break; 2016 1976 2017 1977 case P_OV_REPLY: ··· 2035 1991 if (cmd == P_CSUM_RS_REQUEST) { 2036 1992 D_ASSERT(mdev->agreed_pro_version >= 89); 2037 1993 e->w.cb = w_e_end_csum_rs_req; 1994 + /* used in the sector offset progress display */ 1995 + mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 2038 1996 } else if (cmd == P_OV_REPLY) { 1997 + /* track progress, we may need to throttle */ 1998 + atomic_add(size >> 9, &mdev->rs_sect_in); 2039 1999 e->w.cb = w_e_end_ov_reply; 2040 2000 dec_rs_pending(mdev); 2041 2001 /* drbd_rs_begin_io done when we sent this request, ··· 2051 2003 case P_OV_REQUEST: 2052 2004 if (mdev->ov_start_sector == ~(sector_t)0 && 2053 2005 mdev->agreed_pro_version >= 90) { 2006 + unsigned long now = jiffies; 2007 + int i; 2054 2008 mdev->ov_start_sector = sector; 2055 2009 mdev->ov_position = sector; 2056 - mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector); 2010 + mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); 2011 + mdev->rs_total = mdev->ov_left; 2012 + for (i = 0; i < DRBD_SYNC_MARKS; i++) { 2013 + mdev->rs_mark_left[i] = mdev->ov_left; 2014 + mdev->rs_mark_time[i] = now; 2015 + } 2057 2016 dev_info(DEV, "Online Verify start sector: %llu\n", 2058 2017 (unsigned long long)sector); 2059 2018 } ··· 2097 2042 * we would also throttle its application reads. 2098 2043 * In that case, throttling is done on the SyncTarget only. 2099 2044 */ 2100 - if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev)) 2101 - msleep(100); 2102 - if (drbd_rs_begin_io(mdev, e->sector)) 2045 + if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) 2046 + schedule_timeout_uninterruptible(HZ/10); 2047 + if (drbd_rs_begin_io(mdev, sector)) 2103 2048 goto out_free_e; 2104 2049 2105 2050 submit_for_resync: ··· 2112 2057 spin_unlock_irq(&mdev->req_lock); 2113 2058 2114 2059 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 2115 - return TRUE; 2060 + return true; 2116 2061 2117 - /* drbd_submit_ee currently fails for one reason only: 2118 - * not being able to allocate enough bios. 2119 - * Is dropping the connection going to help? */ 2062 + /* don't care for the reason here */ 2063 + dev_err(DEV, "submit failed, triggering re-connect\n"); 2120 2064 spin_lock_irq(&mdev->req_lock); 2121 2065 list_del(&e->w.list); 2122 2066 spin_unlock_irq(&mdev->req_lock); ··· 2124 2070 out_free_e: 2125 2071 put_ldev(mdev); 2126 2072 drbd_free_ee(mdev, e); 2127 - return FALSE; 2073 + return false; 2128 2074 } 2129 2075 2130 2076 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) ··· 2201 2147 2202 2148 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) 2203 2149 { 2204 - int self, peer, hg, rv = -100; 2205 - 2206 - self = mdev->ldev->md.uuid[UI_BITMAP] & 1; 2207 - peer = mdev->p_uuid[UI_BITMAP] & 1; 2150 + int hg, rv = -100; 2208 2151 2209 2152 switch (mdev->net_conf->after_sb_1p) { 2210 2153 case ASB_DISCARD_YOUNGER_PRI: ··· 2228 2177 case ASB_CALL_HELPER: 2229 2178 hg = drbd_asb_recover_0p(mdev); 2230 2179 if (hg == -1 && mdev->state.role == R_PRIMARY) { 2231 - self = drbd_set_role(mdev, R_SECONDARY, 0); 2180 + enum drbd_state_rv rv2; 2181 + 2182 + drbd_set_role(mdev, R_SECONDARY, 0); 2232 2183 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2233 2184 * we might be here in C_WF_REPORT_PARAMS which is transient. 2234 2185 * we do not need to wait for the after state change work either. */ 2235 - self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2236 - if (self != SS_SUCCESS) { 2186 + rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2187 + if (rv2 != SS_SUCCESS) { 2237 2188 drbd_khelper(mdev, "pri-lost-after-sb"); 2238 2189 } else { 2239 2190 dev_warn(DEV, "Successfully gave up primary role.\n"); ··· 2250 2197 2251 2198 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) 2252 2199 { 2253 - int self, peer, hg, rv = -100; 2254 - 2255 - self = mdev->ldev->md.uuid[UI_BITMAP] & 1; 2256 - peer = mdev->p_uuid[UI_BITMAP] & 1; 2200 + int hg, rv = -100; 2257 2201 2258 2202 switch (mdev->net_conf->after_sb_2p) { 2259 2203 case ASB_DISCARD_YOUNGER_PRI: ··· 2270 2220 case ASB_CALL_HELPER: 2271 2221 hg = drbd_asb_recover_0p(mdev); 2272 2222 if (hg == -1) { 2223 + enum drbd_state_rv rv2; 2224 + 2273 2225 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2274 2226 * we might be here in C_WF_REPORT_PARAMS which is transient. 2275 2227 * we do not need to wait for the after state change work either. */ 2276 - self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2277 - if (self != SS_SUCCESS) { 2228 + rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2229 + if (rv2 != SS_SUCCESS) { 2278 2230 drbd_khelper(mdev, "pri-lost-after-sb"); 2279 2231 } else { 2280 2232 dev_warn(DEV, "Successfully gave up primary role.\n"); ··· 2315 2263 -2 C_SYNC_TARGET set BitMap 2316 2264 -100 after split brain, disconnect 2317 2265 -1000 unrelated data 2266 + -1091 requires proto 91 2267 + -1096 requires proto 96 2318 2268 */ 2319 2269 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) 2320 2270 { ··· 2346 2292 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { 2347 2293 2348 2294 if (mdev->agreed_pro_version < 91) 2349 - return -1001; 2295 + return -1091; 2350 2296 2351 2297 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 2352 2298 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { ··· 2367 2313 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { 2368 2314 2369 2315 if (mdev->agreed_pro_version < 91) 2370 - return -1001; 2316 + return -1091; 2371 2317 2372 2318 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && 2373 2319 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { ··· 2412 2358 *rule_nr = 51; 2413 2359 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); 2414 2360 if (self == peer) { 2415 - self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2416 - peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1); 2417 - if (self == peer) { 2361 + if (mdev->agreed_pro_version < 96 ? 2362 + (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == 2363 + (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : 2364 + peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { 2418 2365 /* The last P_SYNC_UUID did not get though. Undo the last start of 2419 2366 resync as sync source modifications of the peer's UUIDs. */ 2420 2367 2421 2368 if (mdev->agreed_pro_version < 91) 2422 - return -1001; 2369 + return -1091; 2423 2370 2424 2371 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; 2425 2372 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; 2373 + 2374 + dev_info(DEV, "Did not got last syncUUID packet, corrected:\n"); 2375 + drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 2376 + 2426 2377 return -1; 2427 2378 } 2428 2379 } ··· 2449 2390 *rule_nr = 71; 2450 2391 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2451 2392 if (self == peer) { 2452 - self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1); 2453 - peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); 2454 - if (self == peer) { 2393 + if (mdev->agreed_pro_version < 96 ? 2394 + (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == 2395 + (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : 2396 + self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { 2455 2397 /* The last P_SYNC_UUID did not get though. Undo the last start of 2456 2398 resync as sync source modifications of our UUIDs. */ 2457 2399 2458 2400 if (mdev->agreed_pro_version < 91) 2459 - return -1001; 2401 + return -1091; 2460 2402 2461 2403 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); 2462 2404 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); 2463 2405 2464 - dev_info(DEV, "Undid last start of resync:\n"); 2465 - 2406 + dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); 2466 2407 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, 2467 2408 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); 2468 2409 ··· 2525 2466 dev_alert(DEV, "Unrelated data, aborting!\n"); 2526 2467 return C_MASK; 2527 2468 } 2528 - if (hg == -1001) { 2529 - dev_alert(DEV, "To resolve this both sides have to support at least protocol\n"); 2469 + if (hg < -1000) { 2470 + dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); 2530 2471 return C_MASK; 2531 2472 } 2532 2473 ··· 2625 2566 2626 2567 if (abs(hg) >= 2) { 2627 2568 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 2628 - if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) 2569 + if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", 2570 + BM_LOCKED_SET_ALLOWED)) 2629 2571 return C_MASK; 2630 2572 } 2631 2573 ··· 2720 2660 unsigned char *my_alg = mdev->net_conf->integrity_alg; 2721 2661 2722 2662 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) 2723 - return FALSE; 2663 + return false; 2724 2664 2725 2665 p_integrity_alg[SHARED_SECRET_MAX-1] = 0; 2726 2666 if (strcmp(p_integrity_alg, my_alg)) { ··· 2731 2671 my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); 2732 2672 } 2733 2673 2734 - return TRUE; 2674 + return true; 2735 2675 2736 2676 disconnect: 2737 2677 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2738 - return FALSE; 2678 + return false; 2739 2679 } 2740 2680 2741 2681 /* helper function ··· 2767 2707 2768 2708 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) 2769 2709 { 2770 - int ok = TRUE; 2710 + int ok = true; 2771 2711 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; 2772 2712 unsigned int header_size, data_size, exp_max_sz; 2773 2713 struct crypto_hash *verify_tfm = NULL; ··· 2785 2725 if (packet_size > exp_max_sz) { 2786 2726 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", 2787 2727 packet_size, exp_max_sz); 2788 - return FALSE; 2728 + return false; 2789 2729 } 2790 2730 2791 2731 if (apv <= 88) { ··· 2805 2745 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 2806 2746 2807 2747 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) 2808 - return FALSE; 2748 + return false; 2809 2749 2810 2750 mdev->sync_conf.rate = be32_to_cpu(p->rate); 2811 2751 ··· 2815 2755 dev_err(DEV, "verify-alg too long, " 2816 2756 "peer wants %u, accepting only %u byte\n", 2817 2757 data_size, SHARED_SECRET_MAX); 2818 - return FALSE; 2758 + return false; 2819 2759 } 2820 2760 2821 2761 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) 2822 - return FALSE; 2762 + return false; 2823 2763 2824 2764 /* we expect NUL terminated string */ 2825 2765 /* but just in case someone tries to be evil */ ··· 2913 2853 /* but free the verify_tfm again, if csums_tfm did not work out */ 2914 2854 crypto_free_hash(verify_tfm); 2915 2855 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2916 - return FALSE; 2856 + return false; 2917 2857 } 2918 2858 2919 2859 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) ··· 2939 2879 { 2940 2880 struct p_sizes *p = &mdev->data.rbuf.sizes; 2941 2881 enum determine_dev_size dd = unchanged; 2942 - unsigned int max_seg_s; 2882 + unsigned int max_bio_size; 2943 2883 sector_t p_size, p_usize, my_usize; 2944 2884 int ldsc = 0; /* local disk size changed */ 2945 2885 enum dds_flags ddsf; ··· 2950 2890 if (p_size == 0 && mdev->state.disk == D_DISKLESS) { 2951 2891 dev_err(DEV, "some backing storage is needed\n"); 2952 2892 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2953 - return FALSE; 2893 + return false; 2954 2894 } 2955 2895 2956 2896 /* just store the peer's disk size for now. ··· 2987 2927 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2988 2928 mdev->ldev->dc.disk_size = my_usize; 2989 2929 put_ldev(mdev); 2990 - return FALSE; 2930 + return false; 2991 2931 } 2992 2932 put_ldev(mdev); 2993 2933 } 2994 - #undef min_not_zero 2995 2934 2996 2935 ddsf = be16_to_cpu(p->dds_flags); 2997 2936 if (get_ldev(mdev)) { 2998 2937 dd = drbd_determin_dev_size(mdev, ddsf); 2999 2938 put_ldev(mdev); 3000 2939 if (dd == dev_size_error) 3001 - return FALSE; 2940 + return false; 3002 2941 drbd_md_sync(mdev); 3003 2942 } else { 3004 2943 /* I am diskless, need to accept the peer's size. */ ··· 3011 2952 } 3012 2953 3013 2954 if (mdev->agreed_pro_version < 94) 3014 - max_seg_s = be32_to_cpu(p->max_segment_size); 2955 + max_bio_size = be32_to_cpu(p->max_bio_size); 3015 2956 else if (mdev->agreed_pro_version == 94) 3016 - max_seg_s = DRBD_MAX_SIZE_H80_PACKET; 2957 + max_bio_size = DRBD_MAX_SIZE_H80_PACKET; 3017 2958 else /* drbd 8.3.8 onwards */ 3018 - max_seg_s = DRBD_MAX_SEGMENT_SIZE; 2959 + max_bio_size = DRBD_MAX_BIO_SIZE; 3019 2960 3020 - if (max_seg_s != queue_max_segment_size(mdev->rq_queue)) 3021 - drbd_setup_queue_param(mdev, max_seg_s); 2961 + if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9) 2962 + drbd_setup_queue_param(mdev, max_bio_size); 3022 2963 3023 2964 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); 3024 2965 put_ldev(mdev); ··· 3044 2985 } 3045 2986 } 3046 2987 3047 - return TRUE; 2988 + return true; 3048 2989 } 3049 2990 3050 2991 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3051 2992 { 3052 2993 struct p_uuids *p = &mdev->data.rbuf.uuids; 3053 2994 u64 *p_uuid; 3054 - int i; 2995 + int i, updated_uuids = 0; 3055 2996 3056 2997 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); 3057 2998 ··· 3068 3009 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", 3069 3010 (unsigned long long)mdev->ed_uuid); 3070 3011 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3071 - return FALSE; 3012 + return false; 3072 3013 } 3073 3014 3074 3015 if (get_ldev(mdev)) { ··· 3080 3021 if (skip_initial_sync) { 3081 3022 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); 3082 3023 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, 3083 - "clear_n_write from receive_uuids"); 3024 + "clear_n_write from receive_uuids", 3025 + BM_LOCKED_TEST_ALLOWED); 3084 3026 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); 3085 3027 _drbd_uuid_set(mdev, UI_BITMAP, 0); 3086 3028 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 3087 3029 CS_VERBOSE, NULL); 3088 3030 drbd_md_sync(mdev); 3031 + updated_uuids = 1; 3089 3032 } 3090 3033 put_ldev(mdev); 3091 3034 } else if (mdev->state.disk < D_INCONSISTENT && 3092 3035 mdev->state.role == R_PRIMARY) { 3093 3036 /* I am a diskless primary, the peer just created a new current UUID 3094 3037 for me. */ 3095 - drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3038 + updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3096 3039 } 3097 3040 3098 3041 /* Before we test for the disk state, we should wait until an eventually ··· 3103 3042 new disk state... */ 3104 3043 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); 3105 3044 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) 3106 - drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3045 + updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3107 3046 3108 - return TRUE; 3047 + if (updated_uuids) 3048 + drbd_print_uuids(mdev, "receiver updated UUIDs to"); 3049 + 3050 + return true; 3109 3051 } 3110 3052 3111 3053 /** ··· 3145 3081 { 3146 3082 struct p_req_state *p = &mdev->data.rbuf.req_state; 3147 3083 union drbd_state mask, val; 3148 - int rv; 3084 + enum drbd_state_rv rv; 3149 3085 3150 3086 mask.i = be32_to_cpu(p->mask); 3151 3087 val.i = be32_to_cpu(p->val); ··· 3153 3089 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && 3154 3090 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { 3155 3091 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); 3156 - return TRUE; 3092 + return true; 3157 3093 } 3158 3094 3159 3095 mask = convert_state(mask); ··· 3164 3100 drbd_send_sr_reply(mdev, rv); 3165 3101 drbd_md_sync(mdev); 3166 3102 3167 - return TRUE; 3103 + return true; 3168 3104 } 3169 3105 3170 3106 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) ··· 3209 3145 peer_state.conn == C_CONNECTED) { 3210 3146 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) 3211 3147 drbd_resync_finished(mdev); 3212 - return TRUE; 3148 + return true; 3213 3149 } 3214 3150 } 3215 3151 ··· 3224 3160 3225 3161 if (ns.conn == C_WF_REPORT_PARAMS) 3226 3162 ns.conn = C_CONNECTED; 3163 + 3164 + if (peer_state.conn == C_AHEAD) 3165 + ns.conn = C_BEHIND; 3227 3166 3228 3167 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && 3229 3168 get_ldev_if_state(mdev, D_NEGOTIATING)) { ··· 3262 3195 real_peer_disk = D_DISKLESS; 3263 3196 } else { 3264 3197 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) 3265 - return FALSE; 3198 + return false; 3266 3199 D_ASSERT(os.conn == C_WF_REPORT_PARAMS); 3267 3200 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3268 - return FALSE; 3201 + return false; 3269 3202 } 3270 3203 } 3271 3204 } ··· 3290 3223 drbd_uuid_new_current(mdev); 3291 3224 clear_bit(NEW_CUR_UUID, &mdev->flags); 3292 3225 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); 3293 - return FALSE; 3226 + return false; 3294 3227 } 3295 3228 rv = _drbd_set_state(mdev, ns, cs_flags, NULL); 3296 3229 ns = mdev->state; ··· 3298 3231 3299 3232 if (rv < SS_SUCCESS) { 3300 3233 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3301 - return FALSE; 3234 + return false; 3302 3235 } 3303 3236 3304 3237 if (os.conn > C_WF_REPORT_PARAMS) { ··· 3316 3249 3317 3250 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ 3318 3251 3319 - return TRUE; 3252 + return true; 3320 3253 } 3321 3254 3322 3255 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) ··· 3325 3258 3326 3259 wait_event(mdev->misc_wait, 3327 3260 mdev->state.conn == C_WF_SYNC_UUID || 3261 + mdev->state.conn == C_BEHIND || 3328 3262 mdev->state.conn < C_CONNECTED || 3329 3263 mdev->state.disk < D_NEGOTIATING); 3330 3264 ··· 3337 3269 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); 3338 3270 _drbd_uuid_set(mdev, UI_BITMAP, 0UL); 3339 3271 3272 + drbd_print_uuids(mdev, "updated sync uuid"); 3340 3273 drbd_start_resync(mdev, C_SYNC_TARGET); 3341 3274 3342 3275 put_ldev(mdev); 3343 3276 } else 3344 3277 dev_err(DEV, "Ignoring SyncUUID packet!\n"); 3345 3278 3346 - return TRUE; 3279 + return true; 3347 3280 } 3348 3281 3349 - enum receive_bitmap_ret { OK, DONE, FAILED }; 3350 - 3351 - static enum receive_bitmap_ret 3282 + /** 3283 + * receive_bitmap_plain 3284 + * 3285 + * Return 0 when done, 1 when another iteration is needed, and a negative error 3286 + * code upon failure. 3287 + */ 3288 + static int 3352 3289 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, 3353 3290 unsigned long *buffer, struct bm_xfer_ctx *c) 3354 3291 { 3355 3292 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); 3356 3293 unsigned want = num_words * sizeof(long); 3294 + int err; 3357 3295 3358 3296 if (want != data_size) { 3359 3297 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); 3360 - return FAILED; 3298 + return -EIO; 3361 3299 } 3362 3300 if (want == 0) 3363 - return DONE; 3364 - if (drbd_recv(mdev, buffer, want) != want) 3365 - return FAILED; 3301 + return 0; 3302 + err = drbd_recv(mdev, buffer, want); 3303 + if (err != want) { 3304 + if (err >= 0) 3305 + err = -EIO; 3306 + return err; 3307 + } 3366 3308 3367 3309 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); 3368 3310 ··· 3381 3303 if (c->bit_offset > c->bm_bits) 3382 3304 c->bit_offset = c->bm_bits; 3383 3305 3384 - return OK; 3306 + return 1; 3385 3307 } 3386 3308 3387 - static enum receive_bitmap_ret 3309 + /** 3310 + * recv_bm_rle_bits 3311 + * 3312 + * Return 0 when done, 1 when another iteration is needed, and a negative error 3313 + * code upon failure. 3314 + */ 3315 + static int 3388 3316 recv_bm_rle_bits(struct drbd_conf *mdev, 3389 3317 struct p_compressed_bm *p, 3390 3318 struct bm_xfer_ctx *c) ··· 3410 3326 3411 3327 bits = bitstream_get_bits(&bs, &look_ahead, 64); 3412 3328 if (bits < 0) 3413 - return FAILED; 3329 + return -EIO; 3414 3330 3415 3331 for (have = bits; have > 0; s += rl, toggle = !toggle) { 3416 3332 bits = vli_decode_bits(&rl, look_ahead); 3417 3333 if (bits <= 0) 3418 - return FAILED; 3334 + return -EIO; 3419 3335 3420 3336 if (toggle) { 3421 3337 e = s + rl -1; 3422 3338 if (e >= c->bm_bits) { 3423 3339 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); 3424 - return FAILED; 3340 + return -EIO; 3425 3341 } 3426 3342 _drbd_bm_set_bits(mdev, s, e); 3427 3343 } ··· 3431 3347 have, bits, look_ahead, 3432 3348 (unsigned int)(bs.cur.b - p->code), 3433 3349 (unsigned int)bs.buf_len); 3434 - return FAILED; 3350 + return -EIO; 3435 3351 } 3436 3352 look_ahead >>= bits; 3437 3353 have -= bits; 3438 3354 3439 3355 bits = bitstream_get_bits(&bs, &tmp, 64 - have); 3440 3356 if (bits < 0) 3441 - return FAILED; 3357 + return -EIO; 3442 3358 look_ahead |= tmp << have; 3443 3359 have += bits; 3444 3360 } ··· 3446 3362 c->bit_offset = s; 3447 3363 bm_xfer_ctx_bit_to_word_offset(c); 3448 3364 3449 - return (s == c->bm_bits) ? DONE : OK; 3365 + return (s != c->bm_bits); 3450 3366 } 3451 3367 3452 - static enum receive_bitmap_ret 3368 + /** 3369 + * decode_bitmap_c 3370 + * 3371 + * Return 0 when done, 1 when another iteration is needed, and a negative error 3372 + * code upon failure. 3373 + */ 3374 + static int 3453 3375 decode_bitmap_c(struct drbd_conf *mdev, 3454 3376 struct p_compressed_bm *p, 3455 3377 struct bm_xfer_ctx *c) ··· 3469 3379 3470 3380 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 3471 3381 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3472 - return FAILED; 3382 + return -EIO; 3473 3383 } 3474 3384 3475 3385 void INFO_bm_xfer_stats(struct drbd_conf *mdev, ··· 3518 3428 { 3519 3429 struct bm_xfer_ctx c; 3520 3430 void *buffer; 3521 - enum receive_bitmap_ret ret; 3522 - int ok = FALSE; 3431 + int err; 3432 + int ok = false; 3523 3433 struct p_header80 *h = &mdev->data.rbuf.header.h80; 3524 3434 3525 - wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 3526 - 3527 - drbd_bm_lock(mdev, "receive bitmap"); 3435 + drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); 3436 + /* you are supposed to send additional out-of-sync information 3437 + * if you actually set bits during this phase */ 3528 3438 3529 3439 /* maybe we should use some per thread scratch page, 3530 3440 * and allocate that during initial device creation? */ ··· 3539 3449 .bm_words = drbd_bm_words(mdev), 3540 3450 }; 3541 3451 3542 - do { 3452 + for(;;) { 3543 3453 if (cmd == P_BITMAP) { 3544 - ret = receive_bitmap_plain(mdev, data_size, buffer, &c); 3454 + err = receive_bitmap_plain(mdev, data_size, buffer, &c); 3545 3455 } else if (cmd == P_COMPRESSED_BITMAP) { 3546 3456 /* MAYBE: sanity check that we speak proto >= 90, 3547 3457 * and the feature is enabled! */ ··· 3558 3468 goto out; 3559 3469 if (data_size <= (sizeof(*p) - sizeof(p->head))) { 3560 3470 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); 3561 - return FAILED; 3471 + goto out; 3562 3472 } 3563 - ret = decode_bitmap_c(mdev, p, &c); 3473 + err = decode_bitmap_c(mdev, p, &c); 3564 3474 } else { 3565 3475 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); 3566 3476 goto out; ··· 3569 3479 c.packets[cmd == P_BITMAP]++; 3570 3480 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; 3571 3481 3572 - if (ret != OK) 3482 + if (err <= 0) { 3483 + if (err < 0) 3484 + goto out; 3573 3485 break; 3574 - 3486 + } 3575 3487 if (!drbd_recv_header(mdev, &cmd, &data_size)) 3576 3488 goto out; 3577 - } while (ret == OK); 3578 - if (ret == FAILED) 3579 - goto out; 3489 + } 3580 3490 3581 3491 INFO_bm_xfer_stats(mdev, "receive", &c); 3582 3492 3583 3493 if (mdev->state.conn == C_WF_BITMAP_T) { 3494 + enum drbd_state_rv rv; 3495 + 3584 3496 ok = !drbd_send_bitmap(mdev); 3585 3497 if (!ok) 3586 3498 goto out; 3587 3499 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ 3588 - ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); 3589 - D_ASSERT(ok == SS_SUCCESS); 3500 + rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); 3501 + D_ASSERT(rv == SS_SUCCESS); 3590 3502 } else if (mdev->state.conn != C_WF_BITMAP_S) { 3591 3503 /* admin may have requested C_DISCONNECTING, 3592 3504 * other threads may have noticed network errors */ ··· 3596 3504 drbd_conn_str(mdev->state.conn)); 3597 3505 } 3598 3506 3599 - ok = TRUE; 3507 + ok = true; 3600 3508 out: 3601 3509 drbd_bm_unlock(mdev); 3602 3510 if (ok && mdev->state.conn == C_WF_BITMAP_S) ··· 3630 3538 * with the data requests being unplugged */ 3631 3539 drbd_tcp_quickack(mdev->data.socket); 3632 3540 3633 - return TRUE; 3541 + return true; 3542 + } 3543 + 3544 + static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3545 + { 3546 + struct p_block_desc *p = &mdev->data.rbuf.block_desc; 3547 + 3548 + switch (mdev->state.conn) { 3549 + case C_WF_SYNC_UUID: 3550 + case C_WF_BITMAP_T: 3551 + case C_BEHIND: 3552 + break; 3553 + default: 3554 + dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", 3555 + drbd_conn_str(mdev->state.conn)); 3556 + } 3557 + 3558 + drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); 3559 + 3560 + return true; 3634 3561 } 3635 3562 3636 3563 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); ··· 3682 3571 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 3683 3572 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 3684 3573 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, 3574 + [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, 3685 3575 /* anything missing from this table is in 3686 3576 * the asender_tbl, see get_asender_cmd */ 3687 3577 [P_MAX_CMD] = { 0, 0, NULL }, ··· 3722 3610 if (shs) { 3723 3611 rv = drbd_recv(mdev, &header->h80.payload, shs); 3724 3612 if (unlikely(rv != shs)) { 3725 - dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); 3613 + if (!signal_pending(current)) 3614 + dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv); 3726 3615 goto err_out; 3727 3616 } 3728 3617 } ··· 3795 3682 3796 3683 if (mdev->state.conn == C_STANDALONE) 3797 3684 return; 3798 - if (mdev->state.conn >= C_WF_CONNECTION) 3799 - dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n", 3800 - drbd_conn_str(mdev->state.conn)); 3801 3685 3802 3686 /* asender does not clean up anything. it must not interfere, either */ 3803 3687 drbd_thread_stop(&mdev->asender); ··· 3822 3712 mdev->rs_failed = 0; 3823 3713 atomic_set(&mdev->rs_pending_cnt, 0); 3824 3714 wake_up(&mdev->misc_wait); 3715 + 3716 + del_timer(&mdev->request_timer); 3825 3717 3826 3718 /* make sure syncer is stopped and w_resume_next_sg queued */ 3827 3719 del_timer_sync(&mdev->resync_timer); ··· 3870 3758 if (os.conn == C_DISCONNECTING) { 3871 3759 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); 3872 3760 3873 - if (!is_susp(mdev->state)) { 3874 - /* we must not free the tl_hash 3875 - * while application io is still on the fly */ 3876 - wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 3877 - drbd_free_tl_hash(mdev); 3878 - } 3879 - 3880 3761 crypto_free_hash(mdev->cram_hmac_tfm); 3881 3762 mdev->cram_hmac_tfm = NULL; 3882 3763 ··· 3877 3772 mdev->net_conf = NULL; 3878 3773 drbd_request_state(mdev, NS(conn, C_STANDALONE)); 3879 3774 } 3775 + 3776 + /* serialize with bitmap writeout triggered by the state change, 3777 + * if any. */ 3778 + wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 3880 3779 3881 3780 /* tcp_close and release of sendpage pages can be deferred. I don't 3882 3781 * want to use SO_LINGER, because apparently it can be deferred for ··· 3982 3873 rv = drbd_recv(mdev, &p->head.payload, expect); 3983 3874 3984 3875 if (rv != expect) { 3985 - dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv); 3876 + if (!signal_pending(current)) 3877 + dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv); 3986 3878 return 0; 3987 3879 } 3988 3880 ··· 4085 3975 rv = drbd_recv(mdev, peers_ch, length); 4086 3976 4087 3977 if (rv != length) { 4088 - dev_err(DEV, "short read AuthChallenge: l=%u\n", rv); 3978 + if (!signal_pending(current)) 3979 + dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv); 4089 3980 rv = 0; 4090 3981 goto fail; 4091 3982 } ··· 4133 4022 rv = drbd_recv(mdev, response , resp_size); 4134 4023 4135 4024 if (rv != resp_size) { 4136 - dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv); 4025 + if (!signal_pending(current)) 4026 + dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv); 4137 4027 rv = 0; 4138 4028 goto fail; 4139 4029 } ··· 4186 4074 h = drbd_connect(mdev); 4187 4075 if (h == 0) { 4188 4076 drbd_disconnect(mdev); 4189 - __set_current_state(TASK_INTERRUPTIBLE); 4190 - schedule_timeout(HZ); 4077 + schedule_timeout_interruptible(HZ); 4191 4078 } 4192 4079 if (h == -1) { 4193 4080 dev_warn(DEV, "Discarding network configuration.\n"); ··· 4224 4113 } 4225 4114 wake_up(&mdev->state_wait); 4226 4115 4227 - return TRUE; 4116 + return true; 4228 4117 } 4229 4118 4230 4119 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) ··· 4240 4129 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) 4241 4130 wake_up(&mdev->misc_wait); 4242 4131 4243 - return TRUE; 4132 + return true; 4244 4133 } 4245 4134 4246 4135 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) ··· 4263 4152 dec_rs_pending(mdev); 4264 4153 atomic_add(blksize >> 9, &mdev->rs_sect_in); 4265 4154 4266 - return TRUE; 4155 + return true; 4267 4156 } 4268 4157 4269 4158 /* when we receive the ACK for a write request, ··· 4287 4176 return req; 4288 4177 } 4289 4178 } 4290 - dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n", 4291 - (void *)(unsigned long)id, (unsigned long long)sector); 4292 4179 return NULL; 4293 4180 } 4294 4181 ··· 4304 4195 req = validator(mdev, id, sector); 4305 4196 if (unlikely(!req)) { 4306 4197 spin_unlock_irq(&mdev->req_lock); 4307 - dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func); 4308 - return FALSE; 4198 + 4199 + dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func, 4200 + (void *)(unsigned long)id, (unsigned long long)sector); 4201 + return false; 4309 4202 } 4310 4203 __req_mod(req, what, &m); 4311 4204 spin_unlock_irq(&mdev->req_lock); 4312 4205 4313 4206 if (m.bio) 4314 4207 complete_master_bio(mdev, &m); 4315 - return TRUE; 4208 + return true; 4316 4209 } 4317 4210 4318 4211 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) ··· 4329 4218 if (is_syncer_block_id(p->block_id)) { 4330 4219 drbd_set_in_sync(mdev, sector, blksize); 4331 4220 dec_rs_pending(mdev); 4332 - return TRUE; 4221 + return true; 4333 4222 } 4334 4223 switch (be16_to_cpu(h->command)) { 4335 4224 case P_RS_WRITE_ACK: ··· 4350 4239 break; 4351 4240 default: 4352 4241 D_ASSERT(0); 4353 - return FALSE; 4242 + return false; 4354 4243 } 4355 4244 4356 4245 return validate_req_change_req_state(mdev, p->block_id, sector, ··· 4361 4250 { 4362 4251 struct p_block_ack *p = (struct p_block_ack *)h; 4363 4252 sector_t sector = be64_to_cpu(p->sector); 4364 - 4365 - if (__ratelimit(&drbd_ratelimit_state)) 4366 - dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n"); 4253 + int size = be32_to_cpu(p->blksize); 4254 + struct drbd_request *req; 4255 + struct bio_and_error m; 4367 4256 4368 4257 update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4369 4258 4370 4259 if (is_syncer_block_id(p->block_id)) { 4371 - int size = be32_to_cpu(p->blksize); 4372 4260 dec_rs_pending(mdev); 4373 4261 drbd_rs_failed_io(mdev, sector, size); 4374 - return TRUE; 4262 + return true; 4375 4263 } 4376 - return validate_req_change_req_state(mdev, p->block_id, sector, 4377 - _ack_id_to_req, __func__ , neg_acked); 4264 + 4265 + spin_lock_irq(&mdev->req_lock); 4266 + req = _ack_id_to_req(mdev, p->block_id, sector); 4267 + if (!req) { 4268 + spin_unlock_irq(&mdev->req_lock); 4269 + if (mdev->net_conf->wire_protocol == DRBD_PROT_A || 4270 + mdev->net_conf->wire_protocol == DRBD_PROT_B) { 4271 + /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. 4272 + The master bio might already be completed, therefore the 4273 + request is no longer in the collision hash. 4274 + => Do not try to validate block_id as request. */ 4275 + /* In Protocol B we might already have got a P_RECV_ACK 4276 + but then get a P_NEG_ACK after wards. */ 4277 + drbd_set_out_of_sync(mdev, sector, size); 4278 + return true; 4279 + } else { 4280 + dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__, 4281 + (void *)(unsigned long)p->block_id, (unsigned long long)sector); 4282 + return false; 4283 + } 4284 + } 4285 + __req_mod(req, neg_acked, &m); 4286 + spin_unlock_irq(&mdev->req_lock); 4287 + 4288 + if (m.bio) 4289 + complete_master_bio(mdev, &m); 4290 + return true; 4378 4291 } 4379 4292 4380 4293 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) ··· 4429 4294 4430 4295 if (get_ldev_if_state(mdev, D_FAILED)) { 4431 4296 drbd_rs_complete_io(mdev, sector); 4432 - drbd_rs_failed_io(mdev, sector, size); 4297 + switch (be16_to_cpu(h->command)) { 4298 + case P_NEG_RS_DREPLY: 4299 + drbd_rs_failed_io(mdev, sector, size); 4300 + case P_RS_CANCEL: 4301 + break; 4302 + default: 4303 + D_ASSERT(0); 4304 + put_ldev(mdev); 4305 + return false; 4306 + } 4433 4307 put_ldev(mdev); 4434 4308 } 4435 4309 4436 - return TRUE; 4310 + return true; 4437 4311 } 4438 4312 4439 4313 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) ··· 4451 4307 4452 4308 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); 4453 4309 4454 - return TRUE; 4310 + if (mdev->state.conn == C_AHEAD && 4311 + atomic_read(&mdev->ap_in_flight) == 0 && 4312 + !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) { 4313 + mdev->start_resync_timer.expires = jiffies + HZ; 4314 + add_timer(&mdev->start_resync_timer); 4315 + } 4316 + 4317 + return true; 4455 4318 } 4456 4319 4457 4320 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) ··· 4479 4328 ov_oos_print(mdev); 4480 4329 4481 4330 if (!get_ldev(mdev)) 4482 - return TRUE; 4331 + return true; 4483 4332 4484 4333 drbd_rs_complete_io(mdev, sector); 4485 4334 dec_rs_pending(mdev); 4486 4335 4487 - if (--mdev->ov_left == 0) { 4336 + --mdev->ov_left; 4337 + 4338 + /* let's advance progress step marks only for every other megabyte */ 4339 + if ((mdev->ov_left & 0x200) == 0x200) 4340 + drbd_advance_rs_marks(mdev, mdev->ov_left); 4341 + 4342 + if (mdev->ov_left == 0) { 4488 4343 w = kmalloc(sizeof(*w), GFP_NOIO); 4489 4344 if (w) { 4490 4345 w->cb = w_ov_finished; ··· 4502 4345 } 4503 4346 } 4504 4347 put_ldev(mdev); 4505 - return TRUE; 4348 + return true; 4506 4349 } 4507 4350 4508 4351 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) 4509 4352 { 4510 - return TRUE; 4353 + return true; 4511 4354 } 4512 4355 4513 4356 struct asender_cmd { ··· 4535 4378 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 4536 4379 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 4537 4380 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, 4381 + [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply}, 4538 4382 [P_MAX_CMD] = { 0, NULL }, 4539 4383 }; 4540 4384 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
+131 -38
drivers/block/drbd/drbd_req.c
··· 140 140 struct hlist_node *n; 141 141 struct hlist_head *slot; 142 142 143 - /* before we can signal completion to the upper layers, 144 - * we may need to close the current epoch */ 143 + /* Before we can signal completion to the upper layers, 144 + * we may need to close the current epoch. 145 + * We can skip this, if this request has not even been sent, because we 146 + * did not have a fully established connection yet/anymore, during 147 + * bitmap exchange, or while we are C_AHEAD due to congestion policy. 148 + */ 145 149 if (mdev->state.conn >= C_CONNECTED && 150 + (s & RQ_NET_SENT) != 0 && 146 151 req->epoch == mdev->newest_tle->br_number) 147 152 queue_barrier(mdev); 148 153 ··· 445 440 req->rq_state |= RQ_LOCAL_COMPLETED; 446 441 req->rq_state &= ~RQ_LOCAL_PENDING; 447 442 448 - __drbd_chk_io_error(mdev, FALSE); 443 + __drbd_chk_io_error(mdev, false); 449 444 _req_may_be_done_not_susp(req, m); 450 445 put_ldev(mdev); 451 446 break; ··· 466 461 467 462 D_ASSERT(!(req->rq_state & RQ_NET_MASK)); 468 463 469 - __drbd_chk_io_error(mdev, FALSE); 464 + __drbd_chk_io_error(mdev, false); 470 465 put_ldev(mdev); 471 466 472 467 /* no point in retrying if there is no good remote data, ··· 550 545 551 546 break; 552 547 548 + case queue_for_send_oos: 549 + req->rq_state |= RQ_NET_QUEUED; 550 + req->w.cb = w_send_oos; 551 + drbd_queue_work(&mdev->data.work, &req->w); 552 + break; 553 + 554 + case oos_handed_to_network: 555 + /* actually the same */ 553 556 case send_canceled: 554 557 /* treat it the same */ 555 558 case send_failed: ··· 571 558 572 559 case handed_over_to_network: 573 560 /* assert something? */ 561 + if (bio_data_dir(req->master_bio) == WRITE) 562 + atomic_add(req->size>>9, &mdev->ap_in_flight); 563 + 574 564 if (bio_data_dir(req->master_bio) == WRITE && 575 565 mdev->net_conf->wire_protocol == DRBD_PROT_A) { 576 566 /* this is what is dangerous about protocol A: ··· 607 591 dec_ap_pending(mdev); 608 592 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); 609 593 req->rq_state |= RQ_NET_DONE; 594 + if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE) 595 + atomic_sub(req->size>>9, &mdev->ap_in_flight); 596 + 610 597 /* if it is still queued, we may not complete it here. 611 598 * it will be canceled soon. */ 612 599 if (!(req->rq_state & RQ_NET_QUEUED)) ··· 647 628 req->rq_state |= RQ_NET_OK; 648 629 D_ASSERT(req->rq_state & RQ_NET_PENDING); 649 630 dec_ap_pending(mdev); 631 + atomic_sub(req->size>>9, &mdev->ap_in_flight); 650 632 req->rq_state &= ~RQ_NET_PENDING; 651 633 _req_may_be_done_not_susp(req, m); 652 634 break; 653 635 654 636 case neg_acked: 655 637 /* assert something? */ 656 - if (req->rq_state & RQ_NET_PENDING) 638 + if (req->rq_state & RQ_NET_PENDING) { 657 639 dec_ap_pending(mdev); 640 + atomic_sub(req->size>>9, &mdev->ap_in_flight); 641 + } 658 642 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); 659 643 660 644 req->rq_state |= RQ_NET_DONE; ··· 712 690 dev_err(DEV, "FIXME (barrier_acked but pending)\n"); 713 691 list_move(&req->tl_requests, &mdev->out_of_sequence_requests); 714 692 } 715 - D_ASSERT(req->rq_state & RQ_NET_SENT); 716 - req->rq_state |= RQ_NET_DONE; 693 + if ((req->rq_state & RQ_NET_MASK) != 0) { 694 + req->rq_state |= RQ_NET_DONE; 695 + if (mdev->net_conf->wire_protocol == DRBD_PROT_A) 696 + atomic_sub(req->size>>9, &mdev->ap_in_flight); 697 + } 717 698 _req_may_be_done(req, m); /* Allowed while state.susp */ 718 699 break; 719 700 ··· 763 738 return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); 764 739 } 765 740 766 - static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) 741 + static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) 767 742 { 768 743 const int rw = bio_rw(bio); 769 744 const int size = bio->bi_size; 770 745 const sector_t sector = bio->bi_sector; 771 746 struct drbd_tl_epoch *b = NULL; 772 747 struct drbd_request *req; 773 - int local, remote; 748 + int local, remote, send_oos = 0; 774 749 int err = -EIO; 775 750 int ret = 0; 776 751 ··· 784 759 bio_endio(bio, -ENOMEM); 785 760 return 0; 786 761 } 762 + req->start_time = start_time; 787 763 788 764 local = get_ldev(mdev); 789 765 if (!local) { ··· 834 808 drbd_al_begin_io(mdev, sector); 835 809 } 836 810 837 - remote = remote && (mdev->state.pdsk == D_UP_TO_DATE || 838 - (mdev->state.pdsk == D_INCONSISTENT && 839 - mdev->state.conn >= C_CONNECTED)); 811 + remote = remote && drbd_should_do_remote(mdev->state); 812 + send_oos = rw == WRITE && drbd_should_send_oos(mdev->state); 813 + D_ASSERT(!(remote && send_oos)); 840 814 841 815 if (!(local || remote) && !is_susp(mdev->state)) { 842 816 if (__ratelimit(&drbd_ratelimit_state)) ··· 850 824 * but there is a race between testing the bit and pointer outside the 851 825 * spinlock, and grabbing the spinlock. 852 826 * if we lost that race, we retry. */ 853 - if (rw == WRITE && remote && 827 + if (rw == WRITE && (remote || send_oos) && 854 828 mdev->unused_spare_tle == NULL && 855 829 test_bit(CREATE_BARRIER, &mdev->flags)) { 856 830 allocate_barrier: ··· 868 842 if (is_susp(mdev->state)) { 869 843 /* If we got suspended, use the retry mechanism of 870 844 generic_make_request() to restart processing of this 871 - bio. In the next call to drbd_make_request_26 845 + bio. In the next call to drbd_make_request 872 846 we sleep in inc_ap_bio() */ 873 847 ret = 1; 874 848 spin_unlock_irq(&mdev->req_lock); 875 849 goto fail_free_complete; 876 850 } 877 851 878 - if (remote) { 879 - remote = (mdev->state.pdsk == D_UP_TO_DATE || 880 - (mdev->state.pdsk == D_INCONSISTENT && 881 - mdev->state.conn >= C_CONNECTED)); 882 - if (!remote) 852 + if (remote || send_oos) { 853 + remote = drbd_should_do_remote(mdev->state); 854 + send_oos = rw == WRITE && drbd_should_send_oos(mdev->state); 855 + D_ASSERT(!(remote && send_oos)); 856 + 857 + if (!(remote || send_oos)) 883 858 dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); 884 859 if (!(local || remote)) { 885 860 dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); ··· 893 866 mdev->unused_spare_tle = b; 894 867 b = NULL; 895 868 } 896 - if (rw == WRITE && remote && 869 + if (rw == WRITE && (remote || send_oos) && 897 870 mdev->unused_spare_tle == NULL && 898 871 test_bit(CREATE_BARRIER, &mdev->flags)) { 899 872 /* someone closed the current epoch ··· 916 889 * barrier packet. To get the write ordering right, we only have to 917 890 * make sure that, if this is a write request and it triggered a 918 891 * barrier packet, this request is queued within the same spinlock. */ 919 - if (remote && mdev->unused_spare_tle && 892 + if ((remote || send_oos) && mdev->unused_spare_tle && 920 893 test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { 921 894 _tl_add_barrier(mdev, mdev->unused_spare_tle); 922 895 mdev->unused_spare_tle = NULL; ··· 964 937 ? queue_for_net_write 965 938 : queue_for_net_read); 966 939 } 940 + if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) 941 + _req_mod(req, queue_for_send_oos); 942 + 943 + if (remote && 944 + mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { 945 + int congested = 0; 946 + 947 + if (mdev->net_conf->cong_fill && 948 + atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) { 949 + dev_info(DEV, "Congestion-fill threshold reached\n"); 950 + congested = 1; 951 + } 952 + 953 + if (mdev->act_log->used >= mdev->net_conf->cong_extents) { 954 + dev_info(DEV, "Congestion-extents threshold reached\n"); 955 + congested = 1; 956 + } 957 + 958 + if (congested) { 959 + queue_barrier(mdev); /* last barrier, after mirrored writes */ 960 + 961 + if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) 962 + _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); 963 + else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ 964 + _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); 965 + } 966 + } 967 + 967 968 spin_unlock_irq(&mdev->req_lock); 968 969 kfree(b); /* if someone else has beaten us to it... */ 969 970 ··· 1004 949 * stable storage, and this is a WRITE, we may not even submit 1005 950 * this bio. */ 1006 951 if (get_ldev(mdev)) { 1007 - if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR 1008 - : rw == READ ? DRBD_FAULT_DT_RD 1009 - : DRBD_FAULT_DT_RA)) 952 + if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR 953 + : rw == READ ? DRBD_FAULT_DT_RD 954 + : DRBD_FAULT_DT_RA)) 1010 955 bio_endio(req->private_bio, -EIO); 1011 956 else 1012 957 generic_make_request(req->private_bio); ··· 1073 1018 return 0; 1074 1019 } 1075 1020 1076 - int drbd_make_request_26(struct request_queue *q, struct bio *bio) 1021 + int drbd_make_request(struct request_queue *q, struct bio *bio) 1077 1022 { 1078 1023 unsigned int s_enr, e_enr; 1079 1024 struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; 1025 + unsigned long start_time; 1080 1026 1081 1027 if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { 1082 1028 bio_endio(bio, -EPERM); 1083 1029 return 0; 1084 1030 } 1031 + 1032 + start_time = jiffies; 1085 1033 1086 1034 /* 1087 1035 * what we "blindly" assume: ··· 1100 1042 1101 1043 if (likely(s_enr == e_enr)) { 1102 1044 inc_ap_bio(mdev, 1); 1103 - return drbd_make_request_common(mdev, bio); 1045 + return drbd_make_request_common(mdev, bio, start_time); 1104 1046 } 1105 1047 1106 1048 /* can this bio be split generically? 1107 1049 * Maybe add our own split-arbitrary-bios function. */ 1108 - if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_SEGMENT_SIZE) { 1050 + if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) { 1109 1051 /* rather error out here than BUG in bio_split */ 1110 1052 dev_err(DEV, "bio would need to, but cannot, be split: " 1111 1053 "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n", ··· 1127 1069 const int sps = 1 << HT_SHIFT; /* sectors per slot */ 1128 1070 const int mask = sps - 1; 1129 1071 const sector_t first_sectors = sps - (sect & mask); 1130 - bp = bio_split(bio, 1131 - #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) 1132 - bio_split_pool, 1133 - #endif 1134 - first_sectors); 1072 + bp = bio_split(bio, first_sectors); 1135 1073 1136 1074 /* we need to get a "reference count" (ap_bio_cnt) 1137 1075 * to avoid races with the disconnect/reconnect/suspend code. ··· 1138 1084 1139 1085 D_ASSERT(e_enr == s_enr + 1); 1140 1086 1141 - while (drbd_make_request_common(mdev, &bp->bio1)) 1087 + while (drbd_make_request_common(mdev, &bp->bio1, start_time)) 1142 1088 inc_ap_bio(mdev, 1); 1143 1089 1144 - while (drbd_make_request_common(mdev, &bp->bio2)) 1090 + while (drbd_make_request_common(mdev, &bp->bio2, start_time)) 1145 1091 inc_ap_bio(mdev, 1); 1146 1092 1147 1093 dec_ap_bio(mdev); ··· 1152 1098 } 1153 1099 1154 1100 /* This is called by bio_add_page(). With this function we reduce 1155 - * the number of BIOs that span over multiple DRBD_MAX_SEGMENT_SIZEs 1101 + * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs 1156 1102 * units (was AL_EXTENTs). 1157 1103 * 1158 1104 * we do the calculation within the lower 32bit of the byte offsets, ··· 1162 1108 * As long as the BIO is empty we have to allow at least one bvec, 1163 1109 * regardless of size and offset. so the resulting bio may still 1164 1110 * cross extent boundaries. those are dealt with (bio_split) in 1165 - * drbd_make_request_26. 1111 + * drbd_make_request. 1166 1112 */ 1167 1113 int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) 1168 1114 { ··· 1172 1118 unsigned int bio_size = bvm->bi_size; 1173 1119 int limit, backing_limit; 1174 1120 1175 - limit = DRBD_MAX_SEGMENT_SIZE 1176 - - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size); 1121 + limit = DRBD_MAX_BIO_SIZE 1122 + - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size); 1177 1123 if (limit < 0) 1178 1124 limit = 0; 1179 1125 if (bio_size == 0) { ··· 1189 1135 put_ldev(mdev); 1190 1136 } 1191 1137 return limit; 1138 + } 1139 + 1140 + void request_timer_fn(unsigned long data) 1141 + { 1142 + struct drbd_conf *mdev = (struct drbd_conf *) data; 1143 + struct drbd_request *req; /* oldest request */ 1144 + struct list_head *le; 1145 + unsigned long et = 0; /* effective timeout = ko_count * timeout */ 1146 + 1147 + if (get_net_conf(mdev)) { 1148 + et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count; 1149 + put_net_conf(mdev); 1150 + } 1151 + if (!et || mdev->state.conn < C_WF_REPORT_PARAMS) 1152 + return; /* Recurring timer stopped */ 1153 + 1154 + spin_lock_irq(&mdev->req_lock); 1155 + le = &mdev->oldest_tle->requests; 1156 + if (list_empty(le)) { 1157 + spin_unlock_irq(&mdev->req_lock); 1158 + mod_timer(&mdev->request_timer, jiffies + et); 1159 + return; 1160 + } 1161 + 1162 + le = le->prev; 1163 + req = list_entry(le, struct drbd_request, tl_requests); 1164 + if (time_is_before_eq_jiffies(req->start_time + et)) { 1165 + if (req->rq_state & RQ_NET_PENDING) { 1166 + dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); 1167 + _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL); 1168 + } else { 1169 + dev_warn(DEV, "Local backing block device frozen?\n"); 1170 + mod_timer(&mdev->request_timer, jiffies + et); 1171 + } 1172 + } else { 1173 + mod_timer(&mdev->request_timer, req->start_time + et); 1174 + } 1175 + 1176 + spin_unlock_irq(&mdev->req_lock); 1192 1177 }
+29 -7
drivers/block/drbd/drbd_req.h
··· 82 82 to_be_submitted, 83 83 84 84 /* XXX yes, now I am inconsistent... 85 - * these two are not "events" but "actions" 85 + * these are not "events" but "actions" 86 86 * oh, well... */ 87 87 queue_for_net_write, 88 88 queue_for_net_read, 89 + queue_for_send_oos, 89 90 90 91 send_canceled, 91 92 send_failed, 92 93 handed_over_to_network, 94 + oos_handed_to_network, 93 95 connection_lost_while_pending, 94 96 read_retry_remote_canceled, 95 97 recv_acked_by_peer, ··· 291 289 req->epoch = 0; 292 290 req->sector = bio_src->bi_sector; 293 291 req->size = bio_src->bi_size; 294 - req->start_time = jiffies; 295 292 INIT_HLIST_NODE(&req->colision); 296 293 INIT_LIST_HEAD(&req->tl_requests); 297 294 INIT_LIST_HEAD(&req->w.list); ··· 322 321 struct bio_and_error *m); 323 322 extern void complete_master_bio(struct drbd_conf *mdev, 324 323 struct bio_and_error *m); 324 + extern void request_timer_fn(unsigned long data); 325 325 326 326 /* use this if you don't want to deal with calling complete_master_bio() 327 327 * outside the spinlock, e.g. when walking some list on cleanup. */ ··· 340 338 return rv; 341 339 } 342 340 343 - /* completion of master bio is outside of spinlock. 344 - * If you need it irqsave, do it your self! 345 - * Which means: don't use from bio endio callback. */ 341 + /* completion of master bio is outside of our spinlock. 342 + * We still may or may not be inside some irqs disabled section 343 + * of the lower level driver completion callback, so we need to 344 + * spin_lock_irqsave here. */ 346 345 static inline int req_mod(struct drbd_request *req, 347 346 enum drbd_req_event what) 348 347 { 348 + unsigned long flags; 349 349 struct drbd_conf *mdev = req->mdev; 350 350 struct bio_and_error m; 351 351 int rv; 352 352 353 - spin_lock_irq(&mdev->req_lock); 353 + spin_lock_irqsave(&mdev->req_lock, flags); 354 354 rv = __req_mod(req, what, &m); 355 - spin_unlock_irq(&mdev->req_lock); 355 + spin_unlock_irqrestore(&mdev->req_lock, flags); 356 356 357 357 if (m.bio) 358 358 complete_master_bio(mdev, &m); 359 359 360 360 return rv; 361 361 } 362 + 363 + static inline bool drbd_should_do_remote(union drbd_state s) 364 + { 365 + return s.pdsk == D_UP_TO_DATE || 366 + (s.pdsk >= D_INCONSISTENT && 367 + s.conn >= C_WF_BITMAP_T && 368 + s.conn < C_AHEAD); 369 + /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. 370 + That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* 371 + states. */ 372 + } 373 + static inline bool drbd_should_send_oos(union drbd_state s) 374 + { 375 + return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; 376 + /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary 377 + since we enter state C_AHEAD only if proto >= 96 */ 378 + } 379 + 362 380 #endif
+4 -2
drivers/block/drbd/drbd_strings.c
··· 48 48 [C_PAUSED_SYNC_T] = "PausedSyncT", 49 49 [C_VERIFY_S] = "VerifyS", 50 50 [C_VERIFY_T] = "VerifyT", 51 + [C_AHEAD] = "Ahead", 52 + [C_BEHIND] = "Behind", 51 53 }; 52 54 53 55 static const char *drbd_role_s_names[] = { ··· 94 92 const char *drbd_conn_str(enum drbd_conns s) 95 93 { 96 94 /* enums are unsigned... */ 97 - return s > C_PAUSED_SYNC_T ? "TOO_LARGE" : drbd_conn_s_names[s]; 95 + return s > C_BEHIND ? "TOO_LARGE" : drbd_conn_s_names[s]; 98 96 } 99 97 100 98 const char *drbd_role_str(enum drbd_role s) ··· 107 105 return s > D_UP_TO_DATE ? "TOO_LARGE" : drbd_disk_s_names[s]; 108 106 } 109 107 110 - const char *drbd_set_st_err_str(enum drbd_state_ret_codes err) 108 + const char *drbd_set_st_err_str(enum drbd_state_rv err) 111 109 { 112 110 return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" : 113 111 err > SS_TWO_PRIMARIES ? "TOO_LARGE"
+198 -170
drivers/block/drbd/drbd_worker.c
··· 39 39 #include "drbd_req.h" 40 40 41 41 static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); 42 + static int w_make_resync_request(struct drbd_conf *mdev, 43 + struct drbd_work *w, int cancel); 42 44 43 45 44 46 45 - /* defined here: 46 - drbd_md_io_complete 47 - drbd_endio_sec 48 - drbd_endio_pri 49 - 50 - * more endio handlers: 51 - atodb_endio in drbd_actlog.c 52 - drbd_bm_async_io_complete in drbd_bitmap.c 53 - 47 + /* endio handlers: 48 + * drbd_md_io_complete (defined here) 49 + * drbd_endio_pri (defined here) 50 + * drbd_endio_sec (defined here) 51 + * bm_async_io_complete (defined in drbd_bitmap.c) 52 + * 54 53 * For all these callbacks, note the following: 55 54 * The callbacks will be called in irq context by the IDE drivers, 56 55 * and in Softirqs/Tasklets/BH context by the SCSI drivers. ··· 93 94 if (list_empty(&mdev->read_ee)) 94 95 wake_up(&mdev->ee_wait); 95 96 if (test_bit(__EE_WAS_ERROR, &e->flags)) 96 - __drbd_chk_io_error(mdev, FALSE); 97 + __drbd_chk_io_error(mdev, false); 97 98 spin_unlock_irqrestore(&mdev->req_lock, flags); 98 99 99 100 drbd_queue_work(&mdev->data.work, &e->w); ··· 136 137 : list_empty(&mdev->active_ee); 137 138 138 139 if (test_bit(__EE_WAS_ERROR, &e->flags)) 139 - __drbd_chk_io_error(mdev, FALSE); 140 + __drbd_chk_io_error(mdev, false); 140 141 spin_unlock_irqrestore(&mdev->req_lock, flags); 141 142 142 143 if (is_syncer_req) ··· 162 163 int uptodate = bio_flagged(bio, BIO_UPTODATE); 163 164 int is_write = bio_data_dir(bio) == WRITE; 164 165 165 - if (error) 166 + if (error && __ratelimit(&drbd_ratelimit_state)) 166 167 dev_warn(DEV, "%s: error=%d s=%llus\n", 167 168 is_write ? "write" : "read", error, 168 169 (unsigned long long)e->sector); 169 170 if (!error && !uptodate) { 170 - dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", 171 - is_write ? "write" : "read", 172 - (unsigned long long)e->sector); 171 + if (__ratelimit(&drbd_ratelimit_state)) 172 + dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", 173 + is_write ? "write" : "read", 174 + (unsigned long long)e->sector); 173 175 /* strange behavior of some lower level drivers... 174 176 * fail the request by clearing the uptodate flag, 175 177 * but do not return any error?! */ ··· 248 248 spin_unlock_irq(&mdev->req_lock); 249 249 250 250 return w_send_read_req(mdev, w, 0); 251 - } 252 - 253 - int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 254 - { 255 - ERR_IF(cancel) return 1; 256 - dev_err(DEV, "resync inactive, but callback triggered??\n"); 257 - return 1; /* Simply ignore this! */ 258 251 } 259 252 260 253 void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest) ··· 348 355 if (!get_ldev(mdev)) 349 356 return -EIO; 350 357 351 - if (drbd_rs_should_slow_down(mdev)) 358 + if (drbd_rs_should_slow_down(mdev, sector)) 352 359 goto defer; 353 360 354 361 /* GFP_TRY, because if there is no memory available right now, this may ··· 366 373 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) 367 374 return 0; 368 375 369 - /* drbd_submit_ee currently fails for one reason only: 370 - * not being able to allocate enough bios. 371 - * Is dropping the connection going to help? */ 376 + /* If it failed because of ENOMEM, retry should help. If it failed 377 + * because bio_add_page failed (probably broken lower level driver), 378 + * retry may or may not help. 379 + * If it does not, you may need to force disconnect. */ 372 380 spin_lock_irq(&mdev->req_lock); 373 381 list_del(&e->w.list); 374 382 spin_unlock_irq(&mdev->req_lock); ··· 380 386 return -EAGAIN; 381 387 } 382 388 389 + int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 390 + { 391 + switch (mdev->state.conn) { 392 + case C_VERIFY_S: 393 + w_make_ov_request(mdev, w, cancel); 394 + break; 395 + case C_SYNC_TARGET: 396 + w_make_resync_request(mdev, w, cancel); 397 + break; 398 + } 399 + 400 + return 1; 401 + } 402 + 383 403 void resync_timer_fn(unsigned long data) 384 404 { 385 405 struct drbd_conf *mdev = (struct drbd_conf *) data; 386 - int queue; 387 406 388 - queue = 1; 389 - switch (mdev->state.conn) { 390 - case C_VERIFY_S: 391 - mdev->resync_work.cb = w_make_ov_request; 392 - break; 393 - case C_SYNC_TARGET: 394 - mdev->resync_work.cb = w_make_resync_request; 395 - break; 396 - default: 397 - queue = 0; 398 - mdev->resync_work.cb = w_resync_inactive; 399 - } 400 - 401 - /* harmless race: list_empty outside data.work.q_lock */ 402 - if (list_empty(&mdev->resync_work.list) && queue) 407 + if (list_empty(&mdev->resync_work.list)) 403 408 drbd_queue_work(&mdev->data.work, &mdev->resync_work); 404 409 } 405 410 ··· 431 438 fb->values[i] += value; 432 439 } 433 440 434 - int drbd_rs_controller(struct drbd_conf *mdev) 441 + static int drbd_rs_controller(struct drbd_conf *mdev) 435 442 { 436 443 unsigned int sect_in; /* Number of sectors that came in since the last turn */ 437 444 unsigned int want; /* The number of sectors we want in the proxy */ ··· 485 492 return req_sect; 486 493 } 487 494 488 - int w_make_resync_request(struct drbd_conf *mdev, 489 - struct drbd_work *w, int cancel) 495 + static int drbd_rs_number_requests(struct drbd_conf *mdev) 496 + { 497 + int number; 498 + if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ 499 + number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); 500 + mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; 501 + } else { 502 + mdev->c_sync_rate = mdev->sync_conf.rate; 503 + number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); 504 + } 505 + 506 + /* ignore the amount of pending requests, the resync controller should 507 + * throttle down to incoming reply rate soon enough anyways. */ 508 + return number; 509 + } 510 + 511 + static int w_make_resync_request(struct drbd_conf *mdev, 512 + struct drbd_work *w, int cancel) 490 513 { 491 514 unsigned long bit; 492 515 sector_t sector; 493 516 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 494 - int max_segment_size; 495 - int number, rollback_i, size, pe, mx; 517 + int max_bio_size; 518 + int number, rollback_i, size; 496 519 int align, queued, sndbuf; 497 520 int i = 0; 498 521 499 522 if (unlikely(cancel)) 500 523 return 1; 501 - 502 - if (unlikely(mdev->state.conn < C_CONNECTED)) { 503 - dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected"); 504 - return 0; 505 - } 506 - 507 - if (mdev->state.conn != C_SYNC_TARGET) 508 - dev_err(DEV, "%s in w_make_resync_request\n", 509 - drbd_conn_str(mdev->state.conn)); 510 524 511 525 if (mdev->rs_total == 0) { 512 526 /* empty resync? */ ··· 527 527 to continue resync with a broken disk makes no sense at 528 528 all */ 529 529 dev_err(DEV, "Disk broke down during resync!\n"); 530 - mdev->resync_work.cb = w_resync_inactive; 531 530 return 1; 532 531 } 533 532 534 533 /* starting with drbd 8.3.8, we can handle multi-bio EEs, 535 534 * if it should be necessary */ 536 - max_segment_size = 537 - mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) : 538 - mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE; 535 + max_bio_size = 536 + mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 : 537 + mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE; 539 538 540 - if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ 541 - number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); 542 - mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; 543 - } else { 544 - mdev->c_sync_rate = mdev->sync_conf.rate; 545 - number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); 546 - } 547 - 548 - /* Throttle resync on lower level disk activity, which may also be 549 - * caused by application IO on Primary/SyncTarget. 550 - * Keep this after the call to drbd_rs_controller, as that assumes 551 - * to be called as precisely as possible every SLEEP_TIME, 552 - * and would be confused otherwise. */ 553 - if (drbd_rs_should_slow_down(mdev)) 539 + number = drbd_rs_number_requests(mdev); 540 + if (number == 0) 554 541 goto requeue; 555 - 556 - mutex_lock(&mdev->data.mutex); 557 - if (mdev->data.socket) 558 - mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req); 559 - else 560 - mx = 1; 561 - mutex_unlock(&mdev->data.mutex); 562 - 563 - /* For resync rates >160MB/sec, allow more pending RS requests */ 564 - if (number > mx) 565 - mx = number; 566 - 567 - /* Limit the number of pending RS requests to no more than the peer's receive buffer */ 568 - pe = atomic_read(&mdev->rs_pending_cnt); 569 - if ((pe + number) > mx) { 570 - number = mx - pe; 571 - } 572 542 573 543 for (i = 0; i < number; i++) { 574 544 /* Stop generating RS requests, when half of the send buffer is filled */ ··· 558 588 size = BM_BLOCK_SIZE; 559 589 bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); 560 590 561 - if (bit == -1UL) { 591 + if (bit == DRBD_END_OF_BITMAP) { 562 592 mdev->bm_resync_fo = drbd_bm_bits(mdev); 563 - mdev->resync_work.cb = w_resync_inactive; 564 593 put_ldev(mdev); 565 594 return 1; 566 595 } 567 596 568 597 sector = BM_BIT_TO_SECT(bit); 569 598 570 - if (drbd_try_rs_begin_io(mdev, sector)) { 599 + if (drbd_rs_should_slow_down(mdev, sector) || 600 + drbd_try_rs_begin_io(mdev, sector)) { 571 601 mdev->bm_resync_fo = bit; 572 602 goto requeue; 573 603 } ··· 578 608 goto next_sector; 579 609 } 580 610 581 - #if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE 611 + #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE 582 612 /* try to find some adjacent bits. 583 613 * we stop if we have already the maximum req size. 584 614 * ··· 588 618 align = 1; 589 619 rollback_i = i; 590 620 for (;;) { 591 - if (size + BM_BLOCK_SIZE > max_segment_size) 621 + if (size + BM_BLOCK_SIZE > max_bio_size) 592 622 break; 593 623 594 624 /* Be always aligned */ ··· 655 685 * resync data block, and the last bit is cleared. 656 686 * until then resync "work" is "inactive" ... 657 687 */ 658 - mdev->resync_work.cb = w_resync_inactive; 659 688 put_ldev(mdev); 660 689 return 1; 661 690 } ··· 675 706 if (unlikely(cancel)) 676 707 return 1; 677 708 678 - if (unlikely(mdev->state.conn < C_CONNECTED)) { 679 - dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected"); 680 - return 0; 681 - } 682 - 683 - number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ); 684 - if (atomic_read(&mdev->rs_pending_cnt) > number) 685 - goto requeue; 686 - 687 - number -= atomic_read(&mdev->rs_pending_cnt); 709 + number = drbd_rs_number_requests(mdev); 688 710 689 711 sector = mdev->ov_position; 690 712 for (i = 0; i < number; i++) { 691 713 if (sector >= capacity) { 692 - mdev->resync_work.cb = w_resync_inactive; 693 714 return 1; 694 715 } 695 716 696 717 size = BM_BLOCK_SIZE; 697 718 698 - if (drbd_try_rs_begin_io(mdev, sector)) { 719 + if (drbd_rs_should_slow_down(mdev, sector) || 720 + drbd_try_rs_begin_io(mdev, sector)) { 699 721 mdev->ov_position = sector; 700 722 goto requeue; 701 723 } ··· 704 744 mdev->ov_position = sector; 705 745 706 746 requeue: 747 + mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); 707 748 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); 708 749 return 1; 709 750 } 710 751 752 + 753 + void start_resync_timer_fn(unsigned long data) 754 + { 755 + struct drbd_conf *mdev = (struct drbd_conf *) data; 756 + 757 + drbd_queue_work(&mdev->data.work, &mdev->start_resync_work); 758 + } 759 + 760 + int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 761 + { 762 + if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) { 763 + dev_warn(DEV, "w_start_resync later...\n"); 764 + mdev->start_resync_timer.expires = jiffies + HZ/10; 765 + add_timer(&mdev->start_resync_timer); 766 + return 1; 767 + } 768 + 769 + drbd_start_resync(mdev, C_SYNC_SOURCE); 770 + clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags); 771 + return 1; 772 + } 711 773 712 774 int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 713 775 { ··· 764 782 union drbd_state os, ns; 765 783 struct drbd_work *w; 766 784 char *khelper_cmd = NULL; 785 + int verify_done = 0; 767 786 768 787 /* Remove all elements from the resync LRU. Since future actions 769 788 * might set bits in the (main) bitmap, then the entries in the ··· 775 792 * queue (or even the read operations for those packets 776 793 * is not finished by now). Retry in 100ms. */ 777 794 778 - __set_current_state(TASK_INTERRUPTIBLE); 779 - schedule_timeout(HZ / 10); 795 + schedule_timeout_interruptible(HZ / 10); 780 796 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); 781 797 if (w) { 782 798 w->cb = w_resync_finished; ··· 800 818 spin_lock_irq(&mdev->req_lock); 801 819 os = mdev->state; 802 820 821 + verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); 822 + 803 823 /* This protects us against multiple calls (that can happen in the presence 804 824 of application IO), and against connectivity loss just before we arrive here. */ 805 825 if (os.conn <= C_CONNECTED) ··· 811 827 ns.conn = C_CONNECTED; 812 828 813 829 dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", 814 - (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ? 815 - "Online verify " : "Resync", 830 + verify_done ? "Online verify " : "Resync", 816 831 dt + mdev->rs_paused, mdev->rs_paused, dbdt); 817 832 818 833 n_oos = drbd_bm_total_weight(mdev); ··· 869 886 } 870 887 } 871 888 872 - drbd_uuid_set_bm(mdev, 0UL); 873 - 874 - if (mdev->p_uuid) { 875 - /* Now the two UUID sets are equal, update what we 876 - * know of the peer. */ 877 - int i; 878 - for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) 879 - mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; 889 + if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) { 890 + /* for verify runs, we don't update uuids here, 891 + * so there would be nothing to report. */ 892 + drbd_uuid_set_bm(mdev, 0UL); 893 + drbd_print_uuids(mdev, "updated UUIDs"); 894 + if (mdev->p_uuid) { 895 + /* Now the two UUID sets are equal, update what we 896 + * know of the peer. */ 897 + int i; 898 + for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) 899 + mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; 900 + } 880 901 } 881 902 } 882 903 ··· 892 905 mdev->rs_total = 0; 893 906 mdev->rs_failed = 0; 894 907 mdev->rs_paused = 0; 895 - mdev->ov_start_sector = 0; 908 + if (verify_done) 909 + mdev->ov_start_sector = 0; 896 910 897 911 drbd_md_sync(mdev); 898 - 899 - if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { 900 - dev_info(DEV, "Writing the whole bitmap\n"); 901 - drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); 902 - } 903 912 904 913 if (khelper_cmd) 905 914 drbd_khelper(mdev, khelper_cmd); ··· 977 994 put_ldev(mdev); 978 995 } 979 996 980 - if (likely((e->flags & EE_WAS_ERROR) == 0)) { 997 + if (mdev->state.conn == C_AHEAD) { 998 + ok = drbd_send_ack(mdev, P_RS_CANCEL, e); 999 + } else if (likely((e->flags & EE_WAS_ERROR) == 0)) { 981 1000 if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { 982 1001 inc_rs_pending(mdev); 983 1002 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); ··· 1081 1096 if (unlikely(cancel)) 1082 1097 goto out; 1083 1098 1084 - if (unlikely((e->flags & EE_WAS_ERROR) != 0)) 1085 - goto out; 1086 - 1087 1099 digest_size = crypto_hash_digestsize(mdev->verify_tfm); 1088 - /* FIXME if this allocation fails, online verify will not terminate! */ 1089 1100 digest = kmalloc(digest_size, GFP_NOIO); 1090 - if (digest) { 1091 - drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); 1092 - inc_rs_pending(mdev); 1093 - ok = drbd_send_drequest_csum(mdev, e->sector, e->size, 1094 - digest, digest_size, P_OV_REPLY); 1095 - if (!ok) 1096 - dec_rs_pending(mdev); 1097 - kfree(digest); 1101 + if (!digest) { 1102 + ok = 0; /* terminate the connection in case the allocation failed */ 1103 + goto out; 1098 1104 } 1105 + 1106 + if (likely(!(e->flags & EE_WAS_ERROR))) 1107 + drbd_csum_ee(mdev, mdev->verify_tfm, e, digest); 1108 + else 1109 + memset(digest, 0, digest_size); 1110 + 1111 + inc_rs_pending(mdev); 1112 + ok = drbd_send_drequest_csum(mdev, e->sector, e->size, 1113 + digest, digest_size, P_OV_REPLY); 1114 + if (!ok) 1115 + dec_rs_pending(mdev); 1116 + kfree(digest); 1099 1117 1100 1118 out: 1101 1119 drbd_free_ee(mdev, e); 1102 - 1103 1120 dec_unacked(mdev); 1104 1121 1105 1122 return ok; ··· 1116 1129 mdev->ov_last_oos_size = size>>9; 1117 1130 } 1118 1131 drbd_set_out_of_sync(mdev, sector, size); 1119 - set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); 1120 1132 } 1121 1133 1122 1134 int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ··· 1151 1165 eq = !memcmp(digest, di->digest, digest_size); 1152 1166 kfree(digest); 1153 1167 } 1154 - } else { 1155 - ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e); 1156 - if (__ratelimit(&drbd_ratelimit_state)) 1157 - dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); 1158 1168 } 1159 1169 1160 1170 dec_unacked(mdev); ··· 1164 1182 1165 1183 drbd_free_ee(mdev, e); 1166 1184 1167 - if (--mdev->ov_left == 0) { 1185 + --mdev->ov_left; 1186 + 1187 + /* let's advance progress step marks only for every other megabyte */ 1188 + if ((mdev->ov_left & 0x200) == 0x200) 1189 + drbd_advance_rs_marks(mdev, mdev->ov_left); 1190 + 1191 + if (mdev->ov_left == 0) { 1168 1192 ov_oos_print(mdev); 1169 1193 drbd_resync_finished(mdev); 1170 1194 } ··· 1221 1233 if (cancel) 1222 1234 return 1; 1223 1235 return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE); 1236 + } 1237 + 1238 + int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1239 + { 1240 + struct drbd_request *req = container_of(w, struct drbd_request, w); 1241 + int ok; 1242 + 1243 + if (unlikely(cancel)) { 1244 + req_mod(req, send_canceled); 1245 + return 1; 1246 + } 1247 + 1248 + ok = drbd_send_oos(mdev, req); 1249 + req_mod(req, oos_handed_to_network); 1250 + 1251 + return ok; 1224 1252 } 1225 1253 1226 1254 /** ··· 1434 1430 return retcode; 1435 1431 } 1436 1432 1433 + void drbd_rs_controller_reset(struct drbd_conf *mdev) 1434 + { 1435 + atomic_set(&mdev->rs_sect_in, 0); 1436 + atomic_set(&mdev->rs_sect_ev, 0); 1437 + mdev->rs_in_flight = 0; 1438 + mdev->rs_planed = 0; 1439 + spin_lock(&mdev->peer_seq_lock); 1440 + fifo_set(&mdev->rs_plan_s, 0); 1441 + spin_unlock(&mdev->peer_seq_lock); 1442 + } 1443 + 1437 1444 /** 1438 1445 * drbd_start_resync() - Start the resync process 1439 1446 * @mdev: DRBD device. ··· 1458 1443 union drbd_state ns; 1459 1444 int r; 1460 1445 1461 - if (mdev->state.conn >= C_SYNC_SOURCE) { 1446 + if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) { 1462 1447 dev_err(DEV, "Resync already running!\n"); 1463 1448 return; 1464 1449 } 1465 1450 1466 - /* In case a previous resync run was aborted by an IO error/detach on the peer. */ 1467 - drbd_rs_cancel_all(mdev); 1451 + if (mdev->state.conn < C_AHEAD) { 1452 + /* In case a previous resync run was aborted by an IO error/detach on the peer. */ 1453 + drbd_rs_cancel_all(mdev); 1454 + /* This should be done when we abort the resync. We definitely do not 1455 + want to have this for connections going back and forth between 1456 + Ahead/Behind and SyncSource/SyncTarget */ 1457 + } 1468 1458 1469 1459 if (side == C_SYNC_TARGET) { 1470 1460 /* Since application IO was locked out during C_WF_BITMAP_T and ··· 1483 1463 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1484 1464 return; 1485 1465 } 1466 + } else /* C_SYNC_SOURCE */ { 1467 + r = drbd_khelper(mdev, "before-resync-source"); 1468 + r = (r >> 8) & 0xff; 1469 + if (r > 0) { 1470 + if (r == 3) { 1471 + dev_info(DEV, "before-resync-source handler returned %d, " 1472 + "ignoring. Old userland tools?", r); 1473 + } else { 1474 + dev_info(DEV, "before-resync-source handler returned %d, " 1475 + "dropping connection.\n", r); 1476 + drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1477 + return; 1478 + } 1479 + } 1486 1480 } 1487 1481 1488 1482 drbd_state_lock(mdev); ··· 1504 1470 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) { 1505 1471 drbd_state_unlock(mdev); 1506 1472 return; 1507 - } 1508 - 1509 - if (side == C_SYNC_TARGET) { 1510 - mdev->bm_resync_fo = 0; 1511 - } else /* side == C_SYNC_SOURCE */ { 1512 - u64 uuid; 1513 - 1514 - get_random_bytes(&uuid, sizeof(u64)); 1515 - drbd_uuid_set(mdev, UI_BITMAP, uuid); 1516 - drbd_send_sync_uuid(mdev, uuid); 1517 - 1518 - D_ASSERT(mdev->state.disk == D_UP_TO_DATE); 1519 1473 } 1520 1474 1521 1475 write_lock_irq(&global_state_lock); ··· 1543 1521 _drbd_pause_after(mdev); 1544 1522 } 1545 1523 write_unlock_irq(&global_state_lock); 1546 - put_ldev(mdev); 1547 1524 1548 1525 if (r == SS_SUCCESS) { 1549 1526 dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", 1550 1527 drbd_conn_str(ns.conn), 1551 1528 (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), 1552 1529 (unsigned long) mdev->rs_total); 1530 + if (side == C_SYNC_TARGET) 1531 + mdev->bm_resync_fo = 0; 1532 + 1533 + /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid 1534 + * with w_send_oos, or the sync target will get confused as to 1535 + * how much bits to resync. We cannot do that always, because for an 1536 + * empty resync and protocol < 95, we need to do it here, as we call 1537 + * drbd_resync_finished from here in that case. 1538 + * We drbd_gen_and_send_sync_uuid here for protocol < 96, 1539 + * and from after_state_ch otherwise. */ 1540 + if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96) 1541 + drbd_gen_and_send_sync_uuid(mdev); 1553 1542 1554 1543 if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) { 1555 1544 /* This still has a race (about when exactly the peers ··· 1580 1547 drbd_resync_finished(mdev); 1581 1548 } 1582 1549 1583 - atomic_set(&mdev->rs_sect_in, 0); 1584 - atomic_set(&mdev->rs_sect_ev, 0); 1585 - mdev->rs_in_flight = 0; 1586 - mdev->rs_planed = 0; 1587 - spin_lock(&mdev->peer_seq_lock); 1588 - fifo_set(&mdev->rs_plan_s, 0); 1589 - spin_unlock(&mdev->peer_seq_lock); 1550 + drbd_rs_controller_reset(mdev); 1590 1551 /* ns.conn may already be != mdev->state.conn, 1591 1552 * we may have been paused in between, or become paused until 1592 1553 * the timer triggers. ··· 1590 1563 1591 1564 drbd_md_sync(mdev); 1592 1565 } 1566 + put_ldev(mdev); 1593 1567 drbd_state_unlock(mdev); 1594 1568 } 1595 1569
+1 -1
drivers/block/drbd/drbd_wrappers.h
··· 39 39 return; 40 40 } 41 41 42 - if (FAULT_ACTIVE(mdev, fault_type)) 42 + if (drbd_insert_fault(mdev, fault_type)) 43 43 bio_endio(bio, -EIO); 44 44 else 45 45 generic_make_request(bio);
+18 -5
include/linux/drbd.h
··· 53 53 54 54 55 55 extern const char *drbd_buildtag(void); 56 - #define REL_VERSION "8.3.9" 56 + #define REL_VERSION "8.3.10" 57 57 #define API_VERSION 88 58 58 #define PRO_VERSION_MIN 86 59 - #define PRO_VERSION_MAX 95 59 + #define PRO_VERSION_MAX 96 60 60 61 61 62 62 enum drbd_io_error_p { ··· 96 96 OND_SUSPEND_IO 97 97 }; 98 98 99 + enum drbd_on_congestion { 100 + OC_BLOCK, 101 + OC_PULL_AHEAD, 102 + OC_DISCONNECT, 103 + }; 104 + 99 105 /* KEEP the order, do not delete or insert. Only append. */ 100 - enum drbd_ret_codes { 106 + enum drbd_ret_code { 101 107 ERR_CODE_BASE = 100, 102 108 NO_ERROR = 101, 103 109 ERR_LOCAL_ADDR = 102, ··· 152 146 ERR_PERM = 152, 153 147 ERR_NEED_APV_93 = 153, 154 148 ERR_STONITH_AND_PROT_A = 154, 149 + ERR_CONG_NOT_PROTO_A = 155, 150 + ERR_PIC_AFTER_DEP = 156, 151 + ERR_PIC_PEER_DEP = 157, 155 152 156 153 /* insert new ones above this line */ 157 154 AFTER_LAST_ERR_CODE ··· 208 199 C_VERIFY_T, 209 200 C_PAUSED_SYNC_S, 210 201 C_PAUSED_SYNC_T, 202 + 203 + C_AHEAD, 204 + C_BEHIND, 205 + 211 206 C_MASK = 31 212 207 }; 213 208 ··· 272 259 unsigned int i; 273 260 }; 274 261 275 - enum drbd_state_ret_codes { 262 + enum drbd_state_rv { 276 263 SS_CW_NO_NEED = 4, 277 264 SS_CW_SUCCESS = 3, 278 265 SS_NOTHING_TO_DO = 2, ··· 303 290 extern const char *drbd_conn_str(enum drbd_conns); 304 291 extern const char *drbd_role_str(enum drbd_role); 305 292 extern const char *drbd_disk_str(enum drbd_disk_state); 306 - extern const char *drbd_set_st_err_str(enum drbd_state_ret_codes); 293 + extern const char *drbd_set_st_err_str(enum drbd_state_rv); 307 294 308 295 #define SHARED_SECRET_MAX 64 309 296
+11 -1
include/linux/drbd_limits.h
··· 16 16 #define DEBUG_RANGE_CHECK 0 17 17 18 18 #define DRBD_MINOR_COUNT_MIN 1 19 - #define DRBD_MINOR_COUNT_MAX 255 19 + #define DRBD_MINOR_COUNT_MAX 256 20 + #define DRBD_MINOR_COUNT_DEF 32 20 21 21 22 #define DRBD_DIALOG_REFRESH_MIN 0 22 23 #define DRBD_DIALOG_REFRESH_MAX 600 ··· 130 129 #define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT 131 130 #define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT 132 131 #define DRBD_ON_NO_DATA_DEF OND_IO_ERROR 132 + #define DRBD_ON_CONGESTION_DEF OC_BLOCK 133 133 134 134 #define DRBD_MAX_BIO_BVECS_MIN 0 135 135 #define DRBD_MAX_BIO_BVECS_MAX 128 ··· 155 153 #define DRBD_C_MIN_RATE_MIN 0 /* kByte/sec */ 156 154 #define DRBD_C_MIN_RATE_MAX (4 << 20) 157 155 #define DRBD_C_MIN_RATE_DEF 4096 156 + 157 + #define DRBD_CONG_FILL_MIN 0 158 + #define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */ 159 + #define DRBD_CONG_FILL_DEF 0 160 + 161 + #define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN 162 + #define DRBD_CONG_EXTENTS_MAX DRBD_AL_EXTENTS_MAX 163 + #define DRBD_CONG_EXTENTS_DEF DRBD_AL_EXTENTS_DEF 158 164 159 165 #undef RANGE 160 166 #endif
+11 -2
include/linux/drbd_nl.h
··· 56 56 NL_INTEGER( 39, T_MAY_IGNORE, rr_conflict) 57 57 NL_INTEGER( 40, T_MAY_IGNORE, ping_timeo) 58 58 NL_INTEGER( 67, T_MAY_IGNORE, rcvbuf_size) 59 + NL_INTEGER( 81, T_MAY_IGNORE, on_congestion) 60 + NL_INTEGER( 82, T_MAY_IGNORE, cong_fill) 61 + NL_INTEGER( 83, T_MAY_IGNORE, cong_extents) 59 62 /* 59 addr_family was available in GIT, never released */ 60 63 NL_BIT( 60, T_MANDATORY, mind_af) 61 64 NL_BIT( 27, T_MAY_IGNORE, want_lose) ··· 69 66 NL_BIT( 70, T_MANDATORY, dry_run) 70 67 ) 71 68 72 - NL_PACKET(disconnect, 6, ) 69 + NL_PACKET(disconnect, 6, 70 + NL_BIT( 84, T_MAY_IGNORE, force) 71 + ) 73 72 74 73 NL_PACKET(resize, 7, 75 74 NL_INT64( 29, T_MAY_IGNORE, resize_size) ··· 148 143 NL_BIT( 63, T_MANDATORY, clear_bm) 149 144 ) 150 145 146 + #ifdef NL_RESPONSE 147 + NL_RESPONSE(return_code_only, 27) 148 + #endif 149 + 151 150 #undef NL_PACKET 152 151 #undef NL_INTEGER 153 152 #undef NL_INT64 154 153 #undef NL_BIT 155 154 #undef NL_STRING 156 - 155 + #undef NL_RESPONSE
+1
include/linux/drbd_tag_magic.h
··· 7 7 /* declare packet_type enums */ 8 8 enum packet_types { 9 9 #define NL_PACKET(name, number, fields) P_ ## name = number, 10 + #define NL_RESPONSE(name, number) P_ ## name = number, 10 11 #define NL_INTEGER(pn, pr, member) 11 12 #define NL_INT64(pn, pr, member) 12 13 #define NL_BIT(pn, pr, member)