Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
fs/bio.c: fix shadows sparse warning
drbd: The kernel code is now equivalent to out of tree release 8.3.7
drbd: Allow online resizing of DRBD devices while peer not reachable (needs to be explicitly forced)
drbd: Don't go into StandAlone mode when authentification failes because of network error
drivers/block/drbd/drbd_receiver.c: correct NULL test
cfq-iosched: Respect ioprio_class when preempting
genhd: overlapping variable definition
block: removed unused as_io_context
DM: Fix device mapper topology stacking
block: bdev_stack_limits wrapper
block: Fix discard alignment calculation and printing
block: Correct handling of bottom device misaligment
drbd: check on CONFIG_LBDAF, not LBD
drivers/block/drbd: Correct NULL test
drbd: Silenced an assert that could triggered after changing write ordering method
drbd: Kconfig fix
drbd: Fix for a race between IO and a detach operation [Bugz 262]
drbd: Use drbd_crypto_is_hash() instead of an open coded check

+107 -87
-5
block/blk-ioc.c
··· 39 40 if (atomic_long_dec_and_test(&ioc->refcount)) { 41 rcu_read_lock(); 42 - if (ioc->aic && ioc->aic->dtor) 43 - ioc->aic->dtor(ioc->aic); 44 cfq_dtor(ioc); 45 rcu_read_unlock(); 46 ··· 74 task_unlock(task); 75 76 if (atomic_dec_and_test(&ioc->nr_tasks)) { 77 - if (ioc->aic && ioc->aic->exit) 78 - ioc->aic->exit(ioc->aic); 79 cfq_exit(ioc); 80 81 } ··· 93 ret->ioprio = 0; 94 ret->last_waited = jiffies; /* doesn't matter... */ 95 ret->nr_batch_requests = 0; /* because this is 0 */ 96 - ret->aic = NULL; 97 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 98 INIT_HLIST_HEAD(&ret->cic_list); 99 ret->ioc_data = NULL;
··· 39 40 if (atomic_long_dec_and_test(&ioc->refcount)) { 41 rcu_read_lock(); 42 cfq_dtor(ioc); 43 rcu_read_unlock(); 44 ··· 76 task_unlock(task); 77 78 if (atomic_dec_and_test(&ioc->nr_tasks)) { 79 cfq_exit(ioc); 80 81 } ··· 97 ret->ioprio = 0; 98 ret->last_waited = jiffies; /* doesn't matter... */ 99 ret->nr_batch_requests = 0; /* because this is 0 */ 100 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 101 INIT_HLIST_HEAD(&ret->cic_list); 102 ret->ioc_data = NULL;
+35 -4
block/blk-settings.c
··· 528 sector_t offset) 529 { 530 sector_t alignment; 531 - unsigned int top, bottom; 532 533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); ··· 546 t->max_segment_size = min_not_zero(t->max_segment_size, 547 b->max_segment_size); 548 549 alignment = queue_limit_alignment_offset(b, offset); 550 551 /* Bottom device has different alignment. Check that it is ··· 560 bottom = max(b->physical_block_size, b->io_min) + alignment; 561 562 /* Verify that top and bottom intervals line up */ 563 - if (max(top, bottom) & (min(top, bottom) - 1)) 564 t->misaligned = 1; 565 } 566 567 t->logical_block_size = max(t->logical_block_size, ··· 582 if (t->physical_block_size & (t->logical_block_size - 1)) { 583 t->physical_block_size = t->logical_block_size; 584 t->misaligned = 1; 585 } 586 587 /* Minimum I/O a multiple of the physical block size? */ 588 if (t->io_min & (t->physical_block_size - 1)) { 589 t->io_min = t->physical_block_size; 590 t->misaligned = 1; 591 } 592 593 /* Optimal I/O a multiple of the physical block size? */ 594 if (t->io_opt & (t->physical_block_size - 1)) { 595 t->io_opt = 0; 596 t->misaligned = 1; 597 } 598 599 /* Find lowest common alignment_offset */ ··· 604 & (max(t->physical_block_size, t->io_min) - 1); 605 606 /* Verify that new alignment_offset is on a logical block boundary */ 607 - if (t->alignment_offset & (t->logical_block_size - 1)) 608 t->misaligned = 1; 609 610 /* Discard alignment and granularity */ 611 if (b->discard_granularity) { ··· 635 (t->discard_granularity - 1); 636 } 637 638 - return t->misaligned ? -1 : 0; 639 } 640 EXPORT_SYMBOL(blk_stack_limits); 641 642 /** 643 * disk_stack_limits - adjust queue limits for stacked drivers
··· 528 sector_t offset) 529 { 530 sector_t alignment; 531 + unsigned int top, bottom, ret = 0; 532 533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); ··· 546 t->max_segment_size = min_not_zero(t->max_segment_size, 547 b->max_segment_size); 548 549 + t->misaligned |= b->misaligned; 550 + 551 alignment = queue_limit_alignment_offset(b, offset); 552 553 /* Bottom device has different alignment. Check that it is ··· 558 bottom = max(b->physical_block_size, b->io_min) + alignment; 559 560 /* Verify that top and bottom intervals line up */ 561 + if (max(top, bottom) & (min(top, bottom) - 1)) { 562 t->misaligned = 1; 563 + ret = -1; 564 + } 565 } 566 567 t->logical_block_size = max(t->logical_block_size, ··· 578 if (t->physical_block_size & (t->logical_block_size - 1)) { 579 t->physical_block_size = t->logical_block_size; 580 t->misaligned = 1; 581 + ret = -1; 582 } 583 584 /* Minimum I/O a multiple of the physical block size? */ 585 if (t->io_min & (t->physical_block_size - 1)) { 586 t->io_min = t->physical_block_size; 587 t->misaligned = 1; 588 + ret = -1; 589 } 590 591 /* Optimal I/O a multiple of the physical block size? */ 592 if (t->io_opt & (t->physical_block_size - 1)) { 593 t->io_opt = 0; 594 t->misaligned = 1; 595 + ret = -1; 596 } 597 598 /* Find lowest common alignment_offset */ ··· 597 & (max(t->physical_block_size, t->io_min) - 1); 598 599 /* Verify that new alignment_offset is on a logical block boundary */ 600 + if (t->alignment_offset & (t->logical_block_size - 1)) { 601 t->misaligned = 1; 602 + ret = -1; 603 + } 604 605 /* Discard alignment and granularity */ 606 if (b->discard_granularity) { ··· 626 (t->discard_granularity - 1); 627 } 628 629 + return ret; 630 } 631 EXPORT_SYMBOL(blk_stack_limits); 632 + 633 + /** 634 + * bdev_stack_limits - adjust queue limits for stacked drivers 635 + * @t: the stacking driver limits (top device) 636 + * @bdev: the component block_device (bottom) 637 + * @start: first data sector within component device 638 + * 639 + * Description: 640 + * Merges queue limits for a top device and a block_device. Returns 641 + * 0 if alignment didn't change. Returns -1 if adding the bottom 642 + * device caused misalignment. 643 + */ 644 + int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 645 + sector_t start) 646 + { 647 + struct request_queue *bq = bdev_get_queue(bdev); 648 + 649 + start += get_start_sect(bdev); 650 + 651 + return blk_stack_limits(t, &bq->limits, start << 9); 652 + } 653 + EXPORT_SYMBOL(bdev_stack_limits); 654 655 /** 656 * disk_stack_limits - adjust queue limits for stacked drivers
+6
block/cfq-iosched.c
··· 3077 return true; 3078 3079 /* 3080 * if the new request is sync, but the currently running queue is 3081 * not, let the sync request have priority. 3082 */
··· 3077 return true; 3078 3079 /* 3080 + * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. 3081 + */ 3082 + if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) 3083 + return false; 3084 + 3085 + /* 3086 * if the new request is sync, but the currently running queue is 3087 * not, let the sync request have priority. 3088 */
+1 -1
block/genhd.c
··· 867 { 868 struct gendisk *disk = dev_to_disk(dev); 869 870 - return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue)); 871 } 872 873 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
··· 867 { 868 struct gendisk *disk = dev_to_disk(dev); 869 870 + return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); 871 } 872 873 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
+1 -1
drivers/block/drbd/Kconfig
··· 3 # 4 5 comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected" 6 - depends on !PROC_FS || !INET || !CONNECTOR 7 8 config BLK_DEV_DRBD 9 tristate "DRBD Distributed Replicated Block Device support"
··· 3 # 4 5 comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected" 6 + depends on PROC_FS='n' || INET='n' || CONNECTOR='n' 7 8 config BLK_DEV_DRBD 9 tristate "DRBD Distributed Replicated Block Device support"
+3 -4
drivers/block/drbd/drbd_int.h
··· 1275 #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 1276 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM 1277 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM 1278 - #elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32 1279 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 1280 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 1281 #else ··· 1371 extern void drbd_suspend_io(struct drbd_conf *mdev); 1372 extern void drbd_resume_io(struct drbd_conf *mdev); 1373 extern char *ppsize(char *buf, unsigned long long size); 1374 - extern sector_t drbd_new_dev_size(struct drbd_conf *, 1375 - struct drbd_backing_dev *); 1376 enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1377 - extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local); 1378 extern void resync_after_online_grow(struct drbd_conf *); 1379 extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1380 extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
··· 1275 #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 1276 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM 1277 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM 1278 + #elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32 1279 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 1280 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 1281 #else ··· 1371 extern void drbd_suspend_io(struct drbd_conf *mdev); 1372 extern void drbd_resume_io(struct drbd_conf *mdev); 1373 extern char *ppsize(char *buf, unsigned long long size); 1374 + extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); 1375 enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1376 + extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local); 1377 extern void resync_after_online_grow(struct drbd_conf *); 1378 extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1379 extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
+1
drivers/block/drbd/drbd_main.c
··· 1298 dev_err(DEV, "Sending state in drbd_io_error() failed\n"); 1299 } 1300 1301 lc_destroy(mdev->resync); 1302 mdev->resync = NULL; 1303 lc_destroy(mdev->act_log);
··· 1298 dev_err(DEV, "Sending state in drbd_io_error() failed\n"); 1299 } 1300 1301 + wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); 1302 lc_destroy(mdev->resync); 1303 mdev->resync = NULL; 1304 lc_destroy(mdev->act_log);
+12 -7
drivers/block/drbd/drbd_nl.c
··· 510 * Returns 0 on success, negative return values indicate errors. 511 * You should call drbd_md_sync() after calling this function. 512 */ 513 - enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local) 514 { 515 sector_t prev_first_sect, prev_size; /* previous meta location */ 516 sector_t la_size; ··· 541 /* TODO: should only be some assert here, not (re)init... */ 542 drbd_md_set_sector_offsets(mdev, mdev->ldev); 543 544 - size = drbd_new_dev_size(mdev, mdev->ldev); 545 546 if (drbd_get_capacity(mdev->this_bdev) != size || 547 drbd_bm_capacity(mdev) != size) { ··· 596 } 597 598 sector_t 599 - drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 600 { 601 sector_t p_size = mdev->p_size; /* partner's disk size. */ 602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ ··· 605 sector_t size = 0; 606 607 m_size = drbd_get_max_capacity(bdev); 608 609 if (p_size && m_size) { 610 size = min_t(sector_t, p_size, m_size); ··· 970 971 /* Prevent shrinking of consistent devices ! */ 972 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 973 - drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) { 974 dev_warn(DEV, "refusing to truncate a consistent device\n"); 975 retcode = ERR_DISK_TO_SMALL; 976 goto force_diskless_dec; ··· 1057 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1058 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1059 1060 - dd = drbd_determin_dev_size(mdev); 1061 if (dd == dev_size_error) { 1062 retcode = ERR_NOMEM_BITMAP; 1063 goto force_diskless_dec; ··· 1276 goto fail; 1277 } 1278 1279 - if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { 1280 retcode = ERR_AUTH_ALG_ND; 1281 goto fail; 1282 } ··· 1509 } 1510 1511 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1512 - dd = drbd_determin_dev_size(mdev); 1513 drbd_md_sync(mdev); 1514 put_ldev(mdev); 1515 if (dd == dev_size_error) {
··· 510 * Returns 0 on success, negative return values indicate errors. 511 * You should call drbd_md_sync() after calling this function. 512 */ 513 + enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local) 514 { 515 sector_t prev_first_sect, prev_size; /* previous meta location */ 516 sector_t la_size; ··· 541 /* TODO: should only be some assert here, not (re)init... */ 542 drbd_md_set_sector_offsets(mdev, mdev->ldev); 543 544 + size = drbd_new_dev_size(mdev, mdev->ldev, force); 545 546 if (drbd_get_capacity(mdev->this_bdev) != size || 547 drbd_bm_capacity(mdev) != size) { ··· 596 } 597 598 sector_t 599 + drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) 600 { 601 sector_t p_size = mdev->p_size; /* partner's disk size. */ 602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ ··· 605 sector_t size = 0; 606 607 m_size = drbd_get_max_capacity(bdev); 608 + 609 + if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { 610 + dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 611 + p_size = m_size; 612 + } 613 614 if (p_size && m_size) { 615 size = min_t(sector_t, p_size, m_size); ··· 965 966 /* Prevent shrinking of consistent devices ! */ 967 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 968 + drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { 969 dev_warn(DEV, "refusing to truncate a consistent device\n"); 970 retcode = ERR_DISK_TO_SMALL; 971 goto force_diskless_dec; ··· 1052 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1053 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1054 1055 + dd = drbd_determin_dev_size(mdev, 0); 1056 if (dd == dev_size_error) { 1057 retcode = ERR_NOMEM_BITMAP; 1058 goto force_diskless_dec; ··· 1271 goto fail; 1272 } 1273 1274 + if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 1275 retcode = ERR_AUTH_ALG_ND; 1276 goto fail; 1277 } ··· 1504 } 1505 1506 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1507 + dd = drbd_determin_dev_size(mdev, rs.resize_force); 1508 drbd_md_sync(mdev); 1509 put_ldev(mdev); 1510 if (dd == dev_size_error) {
+30 -16
drivers/block/drbd/drbd_receiver.c
··· 878 879 if (mdev->cram_hmac_tfm) { 880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */ 881 - if (!drbd_do_auth(mdev)) { 882 dev_err(DEV, "Authentication of peer failed\n"); 883 return -1; 884 } 885 } 886 ··· 1205 1206 case WO_bdev_flush: 1207 case WO_drain_io: 1208 - D_ASSERT(rv == FE_STILL_LIVE); 1209 - set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); 1210 - drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1211 - rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1212 if (rv == FE_RECYCLED) 1213 return TRUE; 1214 ··· 2870 2871 /* Never shrink a device with usable data during connect. 2872 But allow online shrinking if we are connected. */ 2873 - if (drbd_new_dev_size(mdev, mdev->ldev) < 2874 drbd_get_capacity(mdev->this_bdev) && 2875 mdev->state.disk >= D_OUTDATED && 2876 mdev->state.conn < C_CONNECTED) { ··· 2885 #undef min_not_zero 2886 2887 if (get_ldev(mdev)) { 2888 - dd = drbd_determin_dev_size(mdev); 2889 put_ldev(mdev); 2890 if (dd == dev_size_error) 2891 return FALSE; ··· 3835 { 3836 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 3837 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 3838 - return 0; 3839 } 3840 #else 3841 #define CHALLENGE_LEN 64 3842 static int drbd_do_auth(struct drbd_conf *mdev) 3843 { 3844 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ ··· 3866 (u8 *)mdev->net_conf->shared_secret, key_len); 3867 if (rv) { 3868 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); 3869 - rv = 0; 3870 goto fail; 3871 } 3872 ··· 3889 3890 if (p.length > CHALLENGE_LEN*2) { 3891 dev_err(DEV, "expected AuthChallenge payload too big.\n"); 3892 - rv = 0; 3893 goto fail; 3894 } 3895 3896 peers_ch = kmalloc(p.length, GFP_NOIO); 3897 if (peers_ch == NULL) { 3898 dev_err(DEV, "kmalloc of peers_ch failed\n"); 3899 - rv = 0; 3900 goto fail; 3901 } 3902 ··· 3912 response = kmalloc(resp_size, GFP_NOIO); 3913 if (response == NULL) { 3914 dev_err(DEV, "kmalloc of response failed\n"); 3915 - rv = 0; 3916 goto fail; 3917 } 3918 ··· 3922 rv = crypto_hash_digest(&desc, &sg, sg.length, response); 3923 if (rv) { 3924 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3925 - rv = 0; 3926 goto fail; 3927 } 3928 ··· 3956 } 3957 3958 right_response = kmalloc(resp_size, GFP_NOIO); 3959 - if (response == NULL) { 3960 dev_err(DEV, "kmalloc of right_response failed\n"); 3961 - rv = 0; 3962 goto fail; 3963 } 3964 ··· 3967 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 3968 if (rv) { 3969 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3970 - rv = 0; 3971 goto fail; 3972 } 3973 ··· 3976 if (rv) 3977 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", 3978 resp_size, mdev->net_conf->cram_hmac_alg); 3979 3980 fail: 3981 kfree(peers_ch);
··· 878 879 if (mdev->cram_hmac_tfm) { 880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */ 881 + switch (drbd_do_auth(mdev)) { 882 + case -1: 883 dev_err(DEV, "Authentication of peer failed\n"); 884 return -1; 885 + case 0: 886 + dev_err(DEV, "Authentication of peer failed, trying again.\n"); 887 + return 0; 888 } 889 } 890 ··· 1201 1202 case WO_bdev_flush: 1203 case WO_drain_io: 1204 + if (rv == FE_STILL_LIVE) { 1205 + set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); 1206 + drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1207 + rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1208 + } 1209 if (rv == FE_RECYCLED) 1210 return TRUE; 1211 ··· 2865 2866 /* Never shrink a device with usable data during connect. 2867 But allow online shrinking if we are connected. */ 2868 + if (drbd_new_dev_size(mdev, mdev->ldev, 0) < 2869 drbd_get_capacity(mdev->this_bdev) && 2870 mdev->state.disk >= D_OUTDATED && 2871 mdev->state.conn < C_CONNECTED) { ··· 2880 #undef min_not_zero 2881 2882 if (get_ldev(mdev)) { 2883 + dd = drbd_determin_dev_size(mdev, 0); 2884 put_ldev(mdev); 2885 if (dd == dev_size_error) 2886 return FALSE; ··· 3830 { 3831 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 3832 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 3833 + return -1; 3834 } 3835 #else 3836 #define CHALLENGE_LEN 64 3837 + 3838 + /* Return value: 3839 + 1 - auth succeeded, 3840 + 0 - failed, try again (network error), 3841 + -1 - auth failed, don't try again. 3842 + */ 3843 + 3844 static int drbd_do_auth(struct drbd_conf *mdev) 3845 { 3846 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ ··· 3854 (u8 *)mdev->net_conf->shared_secret, key_len); 3855 if (rv) { 3856 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); 3857 + rv = -1; 3858 goto fail; 3859 } 3860 ··· 3877 3878 if (p.length > CHALLENGE_LEN*2) { 3879 dev_err(DEV, "expected AuthChallenge payload too big.\n"); 3880 + rv = -1; 3881 goto fail; 3882 } 3883 3884 peers_ch = kmalloc(p.length, GFP_NOIO); 3885 if (peers_ch == NULL) { 3886 dev_err(DEV, "kmalloc of peers_ch failed\n"); 3887 + rv = -1; 3888 goto fail; 3889 } 3890 ··· 3900 response = kmalloc(resp_size, GFP_NOIO); 3901 if (response == NULL) { 3902 dev_err(DEV, "kmalloc of response failed\n"); 3903 + rv = -1; 3904 goto fail; 3905 } 3906 ··· 3910 rv = crypto_hash_digest(&desc, &sg, sg.length, response); 3911 if (rv) { 3912 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3913 + rv = -1; 3914 goto fail; 3915 } 3916 ··· 3944 } 3945 3946 right_response = kmalloc(resp_size, GFP_NOIO); 3947 + if (right_response == NULL) { 3948 dev_err(DEV, "kmalloc of right_response failed\n"); 3949 + rv = -1; 3950 goto fail; 3951 } 3952 ··· 3955 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 3956 if (rv) { 3957 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3958 + rv = -1; 3959 goto fail; 3960 } 3961 ··· 3964 if (rv) 3965 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", 3966 resp_size, mdev->net_conf->cram_hmac_alg); 3967 + else 3968 + rv = -1; 3969 3970 fail: 3971 kfree(peers_ch);
+5 -15
drivers/md/dm-table.c
··· 503 return 0; 504 } 505 506 - if (blk_stack_limits(limits, &q->limits, start << 9) < 0) 507 - DMWARN("%s: target device %s is misaligned: " 508 "physical_block_size=%u, logical_block_size=%u, " 509 "alignment_offset=%u, start=%llu", 510 dm_device_name(ti->table->md), bdevname(bdev, b), 511 q->limits.physical_block_size, 512 q->limits.logical_block_size, 513 q->limits.alignment_offset, 514 - (unsigned long long) start << 9); 515 - 516 517 /* 518 * Check if merge fn is supported. ··· 1025 * for the table. 1026 */ 1027 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1028 - DMWARN("%s: target device " 1029 "(start sect %llu len %llu) " 1030 - "is misaligned", 1031 dm_device_name(table->md), 1032 (unsigned long long) ti->begin, 1033 (unsigned long long) ti->len); ··· 1078 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1079 struct queue_limits *limits) 1080 { 1081 - /* 1082 - * Each target device in the table has a data area that should normally 1083 - * be aligned such that the DM device's alignment_offset is 0. 1084 - * FIXME: Propagate alignment_offsets up the stack and warn of 1085 - * sub-optimal or inconsistent settings. 1086 - */ 1087 - limits->alignment_offset = 0; 1088 - limits->misaligned = 0; 1089 - 1090 /* 1091 * Copy table's limits to the DM device's request_queue 1092 */
··· 503 return 0; 504 } 505 506 + if (bdev_stack_limits(limits, bdev, start) < 0) 507 + DMWARN("%s: adding target device %s caused an alignment inconsistency: " 508 "physical_block_size=%u, logical_block_size=%u, " 509 "alignment_offset=%u, start=%llu", 510 dm_device_name(ti->table->md), bdevname(bdev, b), 511 q->limits.physical_block_size, 512 q->limits.logical_block_size, 513 q->limits.alignment_offset, 514 + (unsigned long long) start << SECTOR_SHIFT); 515 516 /* 517 * Check if merge fn is supported. ··· 1026 * for the table. 1027 */ 1028 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1029 + DMWARN("%s: adding target device " 1030 "(start sect %llu len %llu) " 1031 + "caused an alignment inconsistency", 1032 dm_device_name(table->md), 1033 (unsigned long long) ti->begin, 1034 (unsigned long long) ti->len); ··· 1079 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1080 struct queue_limits *limits) 1081 { 1082 /* 1083 * Copy table's limits to the DM device's request_queue 1084 */
+1 -1
fs/bio.c
··· 78 79 i = 0; 80 while (i < bio_slab_nr) { 81 - struct bio_slab *bslab = &bio_slabs[i]; 82 83 if (!bslab->slab && entry == -1) 84 entry = i;
··· 78 79 i = 0; 80 while (i < bio_slab_nr) { 81 + bslab = &bio_slabs[i]; 82 83 if (!bslab->slab && entry == -1) 84 entry = i;
+7 -2
include/linux/blkdev.h
··· 938 extern void blk_set_default_limits(struct queue_limits *lim); 939 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 940 sector_t offset); 941 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 942 sector_t offset); 943 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); ··· 1150 static inline int queue_sector_discard_alignment(struct request_queue *q, 1151 sector_t sector) 1152 { 1153 - return ((sector << 9) - q->limits.discard_alignment) 1154 - & (q->limits.discard_granularity - 1); 1155 } 1156 1157 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
··· 938 extern void blk_set_default_limits(struct queue_limits *lim); 939 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 940 sector_t offset); 941 + extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 942 + sector_t offset); 943 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 944 sector_t offset); 945 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); ··· 1148 static inline int queue_sector_discard_alignment(struct request_queue *q, 1149 sector_t sector) 1150 { 1151 + struct queue_limits *lim = &q->limits; 1152 + unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1153 + 1154 + return (lim->discard_granularity + lim->discard_alignment - alignment) 1155 + & (lim->discard_granularity - 1); 1156 } 1157 1158 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
+1 -1
include/linux/drbd.h
··· 53 54 55 extern const char *drbd_buildtag(void); 56 - #define REL_VERSION "8.3.6" 57 #define API_VERSION 88 58 #define PRO_VERSION_MIN 86 59 #define PRO_VERSION_MAX 91
··· 53 54 55 extern const char *drbd_buildtag(void); 56 + #define REL_VERSION "8.3.7" 57 #define API_VERSION 88 58 #define PRO_VERSION_MIN 86 59 #define PRO_VERSION_MAX 91
+1
include/linux/drbd_nl.h
··· 69 70 NL_PACKET(resize, 7, 71 NL_INT64( 29, T_MAY_IGNORE, resize_size) 72 ) 73 74 NL_PACKET(syncer_conf, 8,
··· 69 70 NL_PACKET(resize, 7, 71 NL_INT64( 29, T_MAY_IGNORE, resize_size) 72 + NL_BIT( 68, T_MAY_IGNORE, resize_force) 73 ) 74 75 NL_PACKET(syncer_conf, 8,
+3 -3
include/linux/genhd.h
··· 256 #define part_stat_read(part, field) \ 257 ({ \ 258 typeof((part)->dkstats->field) res = 0; \ 259 - int i; \ 260 - for_each_possible_cpu(i) \ 261 - res += per_cpu_ptr((part)->dkstats, i)->field; \ 262 res; \ 263 }) 264
··· 256 #define part_stat_read(part, field) \ 257 ({ \ 258 typeof((part)->dkstats->field) res = 0; \ 259 + unsigned int _cpu; \ 260 + for_each_possible_cpu(_cpu) \ 261 + res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ 262 res; \ 263 }) 264
-27
include/linux/iocontext.h
··· 4 #include <linux/radix-tree.h> 5 #include <linux/rcupdate.h> 6 7 - /* 8 - * This is the per-process anticipatory I/O scheduler state. 9 - */ 10 - struct as_io_context { 11 - spinlock_t lock; 12 - 13 - void (*dtor)(struct as_io_context *aic); /* destructor */ 14 - void (*exit)(struct as_io_context *aic); /* called on task exit */ 15 - 16 - unsigned long state; 17 - atomic_t nr_queued; /* queued reads & sync writes */ 18 - atomic_t nr_dispatched; /* number of requests gone to the drivers */ 19 - 20 - /* IO History tracking */ 21 - /* Thinktime */ 22 - unsigned long last_end_request; 23 - unsigned long ttime_total; 24 - unsigned long ttime_samples; 25 - unsigned long ttime_mean; 26 - /* Layout pattern */ 27 - unsigned int seek_samples; 28 - sector_t last_request_pos; 29 - u64 seek_total; 30 - sector_t seek_mean; 31 - }; 32 - 33 struct cfq_queue; 34 struct cfq_io_context { 35 void *key; ··· 52 unsigned long last_waited; /* Time last woken after wait for request */ 53 int nr_batch_requests; /* Number of requests left in the batch */ 54 55 - struct as_io_context *aic; 56 struct radix_tree_root radix_root; 57 struct hlist_head cic_list; 58 void *ioc_data;
··· 4 #include <linux/radix-tree.h> 5 #include <linux/rcupdate.h> 6 7 struct cfq_queue; 8 struct cfq_io_context { 9 void *key; ··· 78 unsigned long last_waited; /* Time last woken after wait for request */ 79 int nr_batch_requests; /* Number of requests left in the batch */ 80 81 struct radix_tree_root radix_root; 82 struct hlist_head cic_list; 83 void *ioc_data;