Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
fs/bio.c: fix shadows sparse warning
drbd: The kernel code is now equivalent to out of tree release 8.3.7
drbd: Allow online resizing of DRBD devices while peer not reachable (needs to be explicitly forced)
drbd: Don't go into StandAlone mode when authentification failes because of network error
drivers/block/drbd/drbd_receiver.c: correct NULL test
cfq-iosched: Respect ioprio_class when preempting
genhd: overlapping variable definition
block: removed unused as_io_context
DM: Fix device mapper topology stacking
block: bdev_stack_limits wrapper
block: Fix discard alignment calculation and printing
block: Correct handling of bottom device misaligment
drbd: check on CONFIG_LBDAF, not LBD
drivers/block/drbd: Correct NULL test
drbd: Silenced an assert that could triggered after changing write ordering method
drbd: Kconfig fix
drbd: Fix for a race between IO and a detach operation [Bugz 262]
drbd: Use drbd_crypto_is_hash() instead of an open coded check

+107 -87
-5
block/blk-ioc.c
··· 39 39 40 40 if (atomic_long_dec_and_test(&ioc->refcount)) { 41 41 rcu_read_lock(); 42 - if (ioc->aic && ioc->aic->dtor) 43 - ioc->aic->dtor(ioc->aic); 44 42 cfq_dtor(ioc); 45 43 rcu_read_unlock(); 46 44 ··· 74 76 task_unlock(task); 75 77 76 78 if (atomic_dec_and_test(&ioc->nr_tasks)) { 77 - if (ioc->aic && ioc->aic->exit) 78 - ioc->aic->exit(ioc->aic); 79 79 cfq_exit(ioc); 80 80 81 81 } ··· 93 97 ret->ioprio = 0; 94 98 ret->last_waited = jiffies; /* doesn't matter... */ 95 99 ret->nr_batch_requests = 0; /* because this is 0 */ 96 - ret->aic = NULL; 97 100 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 98 101 INIT_HLIST_HEAD(&ret->cic_list); 99 102 ret->ioc_data = NULL;
+35 -4
block/blk-settings.c
··· 528 528 sector_t offset) 529 529 { 530 530 sector_t alignment; 531 - unsigned int top, bottom; 531 + unsigned int top, bottom, ret = 0; 532 532 533 533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 534 534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); ··· 546 546 t->max_segment_size = min_not_zero(t->max_segment_size, 547 547 b->max_segment_size); 548 548 549 + t->misaligned |= b->misaligned; 550 + 549 551 alignment = queue_limit_alignment_offset(b, offset); 550 552 551 553 /* Bottom device has different alignment. Check that it is ··· 560 558 bottom = max(b->physical_block_size, b->io_min) + alignment; 561 559 562 560 /* Verify that top and bottom intervals line up */ 563 - if (max(top, bottom) & (min(top, bottom) - 1)) 561 + if (max(top, bottom) & (min(top, bottom) - 1)) { 564 562 t->misaligned = 1; 563 + ret = -1; 564 + } 565 565 } 566 566 567 567 t->logical_block_size = max(t->logical_block_size, ··· 582 578 if (t->physical_block_size & (t->logical_block_size - 1)) { 583 579 t->physical_block_size = t->logical_block_size; 584 580 t->misaligned = 1; 581 + ret = -1; 585 582 } 586 583 587 584 /* Minimum I/O a multiple of the physical block size? */ 588 585 if (t->io_min & (t->physical_block_size - 1)) { 589 586 t->io_min = t->physical_block_size; 590 587 t->misaligned = 1; 588 + ret = -1; 591 589 } 592 590 593 591 /* Optimal I/O a multiple of the physical block size? */ 594 592 if (t->io_opt & (t->physical_block_size - 1)) { 595 593 t->io_opt = 0; 596 594 t->misaligned = 1; 595 + ret = -1; 597 596 } 598 597 599 598 /* Find lowest common alignment_offset */ ··· 604 597 & (max(t->physical_block_size, t->io_min) - 1); 605 598 606 599 /* Verify that new alignment_offset is on a logical block boundary */ 607 - if (t->alignment_offset & (t->logical_block_size - 1)) 600 + if (t->alignment_offset & (t->logical_block_size - 1)) { 608 601 t->misaligned = 1; 602 + ret = -1; 603 + } 609 604 610 605 /* Discard alignment and granularity */ 611 606 if (b->discard_granularity) { ··· 635 626 (t->discard_granularity - 1); 636 627 } 637 628 638 - return t->misaligned ? -1 : 0; 629 + return ret; 639 630 } 640 631 EXPORT_SYMBOL(blk_stack_limits); 632 + 633 + /** 634 + * bdev_stack_limits - adjust queue limits for stacked drivers 635 + * @t: the stacking driver limits (top device) 636 + * @bdev: the component block_device (bottom) 637 + * @start: first data sector within component device 638 + * 639 + * Description: 640 + * Merges queue limits for a top device and a block_device. Returns 641 + * 0 if alignment didn't change. Returns -1 if adding the bottom 642 + * device caused misalignment. 643 + */ 644 + int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 645 + sector_t start) 646 + { 647 + struct request_queue *bq = bdev_get_queue(bdev); 648 + 649 + start += get_start_sect(bdev); 650 + 651 + return blk_stack_limits(t, &bq->limits, start << 9); 652 + } 653 + EXPORT_SYMBOL(bdev_stack_limits); 641 654 642 655 /** 643 656 * disk_stack_limits - adjust queue limits for stacked drivers
+6
block/cfq-iosched.c
··· 3077 3077 return true; 3078 3078 3079 3079 /* 3080 + * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. 3081 + */ 3082 + if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) 3083 + return false; 3084 + 3085 + /* 3080 3086 * if the new request is sync, but the currently running queue is 3081 3087 * not, let the sync request have priority. 3082 3088 */
+1 -1
block/genhd.c
··· 867 867 { 868 868 struct gendisk *disk = dev_to_disk(dev); 869 869 870 - return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue)); 870 + return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); 871 871 } 872 872 873 873 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
+1 -1
drivers/block/drbd/Kconfig
··· 3 3 # 4 4 5 5 comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected" 6 - depends on !PROC_FS || !INET || !CONNECTOR 6 + depends on PROC_FS='n' || INET='n' || CONNECTOR='n' 7 7 8 8 config BLK_DEV_DRBD 9 9 tristate "DRBD Distributed Replicated Block Device support"
+3 -4
drivers/block/drbd/drbd_int.h
··· 1275 1275 #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 1276 1276 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM 1277 1277 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM 1278 - #elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32 1278 + #elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32 1279 1279 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 1280 1280 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 1281 1281 #else ··· 1371 1371 extern void drbd_suspend_io(struct drbd_conf *mdev); 1372 1372 extern void drbd_resume_io(struct drbd_conf *mdev); 1373 1373 extern char *ppsize(char *buf, unsigned long long size); 1374 - extern sector_t drbd_new_dev_size(struct drbd_conf *, 1375 - struct drbd_backing_dev *); 1374 + extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); 1376 1375 enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1377 - extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local); 1376 + extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local); 1378 1377 extern void resync_after_online_grow(struct drbd_conf *); 1379 1378 extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1380 1379 extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
+1
drivers/block/drbd/drbd_main.c
··· 1298 1298 dev_err(DEV, "Sending state in drbd_io_error() failed\n"); 1299 1299 } 1300 1300 1301 + wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); 1301 1302 lc_destroy(mdev->resync); 1302 1303 mdev->resync = NULL; 1303 1304 lc_destroy(mdev->act_log);
+12 -7
drivers/block/drbd/drbd_nl.c
··· 510 510 * Returns 0 on success, negative return values indicate errors. 511 511 * You should call drbd_md_sync() after calling this function. 512 512 */ 513 - enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local) 513 + enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local) 514 514 { 515 515 sector_t prev_first_sect, prev_size; /* previous meta location */ 516 516 sector_t la_size; ··· 541 541 /* TODO: should only be some assert here, not (re)init... */ 542 542 drbd_md_set_sector_offsets(mdev, mdev->ldev); 543 543 544 - size = drbd_new_dev_size(mdev, mdev->ldev); 544 + size = drbd_new_dev_size(mdev, mdev->ldev, force); 545 545 546 546 if (drbd_get_capacity(mdev->this_bdev) != size || 547 547 drbd_bm_capacity(mdev) != size) { ··· 596 596 } 597 597 598 598 sector_t 599 - drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 599 + drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) 600 600 { 601 601 sector_t p_size = mdev->p_size; /* partner's disk size. */ 602 602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ ··· 605 605 sector_t size = 0; 606 606 607 607 m_size = drbd_get_max_capacity(bdev); 608 + 609 + if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { 610 + dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 611 + p_size = m_size; 612 + } 608 613 609 614 if (p_size && m_size) { 610 615 size = min_t(sector_t, p_size, m_size); ··· 970 965 971 966 /* Prevent shrinking of consistent devices ! */ 972 967 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 973 - drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) { 968 + drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { 974 969 dev_warn(DEV, "refusing to truncate a consistent device\n"); 975 970 retcode = ERR_DISK_TO_SMALL; 976 971 goto force_diskless_dec; ··· 1057 1052 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1058 1053 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1059 1054 1060 - dd = drbd_determin_dev_size(mdev); 1055 + dd = drbd_determin_dev_size(mdev, 0); 1061 1056 if (dd == dev_size_error) { 1062 1057 retcode = ERR_NOMEM_BITMAP; 1063 1058 goto force_diskless_dec; ··· 1276 1271 goto fail; 1277 1272 } 1278 1273 1279 - if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { 1274 + if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 1280 1275 retcode = ERR_AUTH_ALG_ND; 1281 1276 goto fail; 1282 1277 } ··· 1509 1504 } 1510 1505 1511 1506 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1512 - dd = drbd_determin_dev_size(mdev); 1507 + dd = drbd_determin_dev_size(mdev, rs.resize_force); 1513 1508 drbd_md_sync(mdev); 1514 1509 put_ldev(mdev); 1515 1510 if (dd == dev_size_error) {
+30 -16
drivers/block/drbd/drbd_receiver.c
··· 878 878 879 879 if (mdev->cram_hmac_tfm) { 880 880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */ 881 - if (!drbd_do_auth(mdev)) { 881 + switch (drbd_do_auth(mdev)) { 882 + case -1: 882 883 dev_err(DEV, "Authentication of peer failed\n"); 883 884 return -1; 885 + case 0: 886 + dev_err(DEV, "Authentication of peer failed, trying again.\n"); 887 + return 0; 884 888 } 885 889 } 886 890 ··· 1205 1201 1206 1202 case WO_bdev_flush: 1207 1203 case WO_drain_io: 1208 - D_ASSERT(rv == FE_STILL_LIVE); 1209 - set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); 1210 - drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1211 - rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1204 + if (rv == FE_STILL_LIVE) { 1205 + set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); 1206 + drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1207 + rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1208 + } 1212 1209 if (rv == FE_RECYCLED) 1213 1210 return TRUE; 1214 1211 ··· 2870 2865 2871 2866 /* Never shrink a device with usable data during connect. 2872 2867 But allow online shrinking if we are connected. */ 2873 - if (drbd_new_dev_size(mdev, mdev->ldev) < 2868 + if (drbd_new_dev_size(mdev, mdev->ldev, 0) < 2874 2869 drbd_get_capacity(mdev->this_bdev) && 2875 2870 mdev->state.disk >= D_OUTDATED && 2876 2871 mdev->state.conn < C_CONNECTED) { ··· 2885 2880 #undef min_not_zero 2886 2881 2887 2882 if (get_ldev(mdev)) { 2888 - dd = drbd_determin_dev_size(mdev); 2883 + dd = drbd_determin_dev_size(mdev, 0); 2889 2884 put_ldev(mdev); 2890 2885 if (dd == dev_size_error) 2891 2886 return FALSE; ··· 3835 3830 { 3836 3831 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 3837 3832 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 3838 - return 0; 3833 + return -1; 3839 3834 } 3840 3835 #else 3841 3836 #define CHALLENGE_LEN 64 3837 + 3838 + /* Return value: 3839 + 1 - auth succeeded, 3840 + 0 - failed, try again (network error), 3841 + -1 - auth failed, don't try again. 3842 + */ 3843 + 3842 3844 static int drbd_do_auth(struct drbd_conf *mdev) 3843 3845 { 3844 3846 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ ··· 3866 3854 (u8 *)mdev->net_conf->shared_secret, key_len); 3867 3855 if (rv) { 3868 3856 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); 3869 - rv = 0; 3857 + rv = -1; 3870 3858 goto fail; 3871 3859 } 3872 3860 ··· 3889 3877 3890 3878 if (p.length > CHALLENGE_LEN*2) { 3891 3879 dev_err(DEV, "expected AuthChallenge payload too big.\n"); 3892 - rv = 0; 3880 + rv = -1; 3893 3881 goto fail; 3894 3882 } 3895 3883 3896 3884 peers_ch = kmalloc(p.length, GFP_NOIO); 3897 3885 if (peers_ch == NULL) { 3898 3886 dev_err(DEV, "kmalloc of peers_ch failed\n"); 3899 - rv = 0; 3887 + rv = -1; 3900 3888 goto fail; 3901 3889 } 3902 3890 ··· 3912 3900 response = kmalloc(resp_size, GFP_NOIO); 3913 3901 if (response == NULL) { 3914 3902 dev_err(DEV, "kmalloc of response failed\n"); 3915 - rv = 0; 3903 + rv = -1; 3916 3904 goto fail; 3917 3905 } 3918 3906 ··· 3922 3910 rv = crypto_hash_digest(&desc, &sg, sg.length, response); 3923 3911 if (rv) { 3924 3912 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3925 - rv = 0; 3913 + rv = -1; 3926 3914 goto fail; 3927 3915 } 3928 3916 ··· 3956 3944 } 3957 3945 3958 3946 right_response = kmalloc(resp_size, GFP_NOIO); 3959 - if (response == NULL) { 3947 + if (right_response == NULL) { 3960 3948 dev_err(DEV, "kmalloc of right_response failed\n"); 3961 - rv = 0; 3949 + rv = -1; 3962 3950 goto fail; 3963 3951 } 3964 3952 ··· 3967 3955 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 3968 3956 if (rv) { 3969 3957 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3970 - rv = 0; 3958 + rv = -1; 3971 3959 goto fail; 3972 3960 } 3973 3961 ··· 3976 3964 if (rv) 3977 3965 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", 3978 3966 resp_size, mdev->net_conf->cram_hmac_alg); 3967 + else 3968 + rv = -1; 3979 3969 3980 3970 fail: 3981 3971 kfree(peers_ch);
+5 -15
drivers/md/dm-table.c
··· 503 503 return 0; 504 504 } 505 505 506 - if (blk_stack_limits(limits, &q->limits, start << 9) < 0) 507 - DMWARN("%s: target device %s is misaligned: " 506 + if (bdev_stack_limits(limits, bdev, start) < 0) 507 + DMWARN("%s: adding target device %s caused an alignment inconsistency: " 508 508 "physical_block_size=%u, logical_block_size=%u, " 509 509 "alignment_offset=%u, start=%llu", 510 510 dm_device_name(ti->table->md), bdevname(bdev, b), 511 511 q->limits.physical_block_size, 512 512 q->limits.logical_block_size, 513 513 q->limits.alignment_offset, 514 - (unsigned long long) start << 9); 515 - 514 + (unsigned long long) start << SECTOR_SHIFT); 516 515 517 516 /* 518 517 * Check if merge fn is supported. ··· 1025 1026 * for the table. 1026 1027 */ 1027 1028 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1028 - DMWARN("%s: target device " 1029 + DMWARN("%s: adding target device " 1029 1030 "(start sect %llu len %llu) " 1030 - "is misaligned", 1031 + "caused an alignment inconsistency", 1031 1032 dm_device_name(table->md), 1032 1033 (unsigned long long) ti->begin, 1033 1034 (unsigned long long) ti->len); ··· 1078 1079 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1079 1080 struct queue_limits *limits) 1080 1081 { 1081 - /* 1082 - * Each target device in the table has a data area that should normally 1083 - * be aligned such that the DM device's alignment_offset is 0. 1084 - * FIXME: Propagate alignment_offsets up the stack and warn of 1085 - * sub-optimal or inconsistent settings. 1086 - */ 1087 - limits->alignment_offset = 0; 1088 - limits->misaligned = 0; 1089 - 1090 1082 /* 1091 1083 * Copy table's limits to the DM device's request_queue 1092 1084 */
+1 -1
fs/bio.c
··· 78 78 79 79 i = 0; 80 80 while (i < bio_slab_nr) { 81 - struct bio_slab *bslab = &bio_slabs[i]; 81 + bslab = &bio_slabs[i]; 82 82 83 83 if (!bslab->slab && entry == -1) 84 84 entry = i;
+7 -2
include/linux/blkdev.h
··· 938 938 extern void blk_set_default_limits(struct queue_limits *lim); 939 939 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 940 940 sector_t offset); 941 + extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 942 + sector_t offset); 941 943 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 942 944 sector_t offset); 943 945 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); ··· 1150 1148 static inline int queue_sector_discard_alignment(struct request_queue *q, 1151 1149 sector_t sector) 1152 1150 { 1153 - return ((sector << 9) - q->limits.discard_alignment) 1154 - & (q->limits.discard_granularity - 1); 1151 + struct queue_limits *lim = &q->limits; 1152 + unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1153 + 1154 + return (lim->discard_granularity + lim->discard_alignment - alignment) 1155 + & (lim->discard_granularity - 1); 1155 1156 } 1156 1157 1157 1158 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
+1 -1
include/linux/drbd.h
··· 53 53 54 54 55 55 extern const char *drbd_buildtag(void); 56 - #define REL_VERSION "8.3.6" 56 + #define REL_VERSION "8.3.7" 57 57 #define API_VERSION 88 58 58 #define PRO_VERSION_MIN 86 59 59 #define PRO_VERSION_MAX 91
+1
include/linux/drbd_nl.h
··· 69 69 70 70 NL_PACKET(resize, 7, 71 71 NL_INT64( 29, T_MAY_IGNORE, resize_size) 72 + NL_BIT( 68, T_MAY_IGNORE, resize_force) 72 73 ) 73 74 74 75 NL_PACKET(syncer_conf, 8,
+3 -3
include/linux/genhd.h
··· 256 256 #define part_stat_read(part, field) \ 257 257 ({ \ 258 258 typeof((part)->dkstats->field) res = 0; \ 259 - int i; \ 260 - for_each_possible_cpu(i) \ 261 - res += per_cpu_ptr((part)->dkstats, i)->field; \ 259 + unsigned int _cpu; \ 260 + for_each_possible_cpu(_cpu) \ 261 + res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ 262 262 res; \ 263 263 }) 264 264
-27
include/linux/iocontext.h
··· 4 4 #include <linux/radix-tree.h> 5 5 #include <linux/rcupdate.h> 6 6 7 - /* 8 - * This is the per-process anticipatory I/O scheduler state. 9 - */ 10 - struct as_io_context { 11 - spinlock_t lock; 12 - 13 - void (*dtor)(struct as_io_context *aic); /* destructor */ 14 - void (*exit)(struct as_io_context *aic); /* called on task exit */ 15 - 16 - unsigned long state; 17 - atomic_t nr_queued; /* queued reads & sync writes */ 18 - atomic_t nr_dispatched; /* number of requests gone to the drivers */ 19 - 20 - /* IO History tracking */ 21 - /* Thinktime */ 22 - unsigned long last_end_request; 23 - unsigned long ttime_total; 24 - unsigned long ttime_samples; 25 - unsigned long ttime_mean; 26 - /* Layout pattern */ 27 - unsigned int seek_samples; 28 - sector_t last_request_pos; 29 - u64 seek_total; 30 - sector_t seek_mean; 31 - }; 32 - 33 7 struct cfq_queue; 34 8 struct cfq_io_context { 35 9 void *key; ··· 52 78 unsigned long last_waited; /* Time last woken after wait for request */ 53 79 int nr_batch_requests; /* Number of requests left in the batch */ 54 80 55 - struct as_io_context *aic; 56 81 struct radix_tree_root radix_root; 57 82 struct hlist_head cic_list; 58 83 void *ioc_data;