Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: switch polling to be bio based

Replace the blk_poll interface that requires the caller to keep a queue
and cookie from the submissions with polling based on the bio.

Polling for the bio itself leads to a few advantages:

- the cookie construction can made entirely private in blk-mq.c
- the caller does not need to remember the request_queue and cookie
separately and thus sidesteps their lifetime issues
- keeping the device and the cookie inside the bio allows to trivially
support polling BIOs remapping by stacking drivers
- a lot of code to propagate the cookie back up the submission path can
be removed entirely.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Mark Wunderlich <mark.wunderlich@intel.com>
Link: https://lore.kernel.org/r/20211012111226.760968-15-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
3e08773c 19416123

+232 -264
+1 -2
arch/m68k/emu/nfblock.c
··· 58 58 struct gendisk *disk; 59 59 }; 60 60 61 - static blk_qc_t nfhd_submit_bio(struct bio *bio) 61 + static void nfhd_submit_bio(struct bio *bio) 62 62 { 63 63 struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data; 64 64 struct bio_vec bvec; ··· 76 76 sec += len; 77 77 } 78 78 bio_endio(bio); 79 - return BLK_QC_T_NONE; 80 79 } 81 80 82 81 static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+1 -2
arch/xtensa/platforms/iss/simdisk.c
··· 100 100 spin_unlock(&dev->lock); 101 101 } 102 102 103 - static blk_qc_t simdisk_submit_bio(struct bio *bio) 103 + static void simdisk_submit_bio(struct bio *bio) 104 104 { 105 105 struct simdisk *dev = bio->bi_bdev->bd_disk->private_data; 106 106 struct bio_vec bvec; ··· 118 118 } 119 119 120 120 bio_endio(bio); 121 - return BLK_QC_T_NONE; 122 121 } 123 122 124 123 static int simdisk_open(struct block_device *bdev, fmode_t mode)
+1
block/bio.c
··· 282 282 283 283 atomic_set(&bio->__bi_remaining, 1); 284 284 atomic_set(&bio->__bi_cnt, 1); 285 + bio->bi_cookie = BLK_QC_T_NONE; 285 286 286 287 bio->bi_max_vecs = max_vecs; 287 288 bio->bi_io_vec = table;
+95 -32
block/blk-core.c
··· 915 915 return false; 916 916 } 917 917 918 - static blk_qc_t __submit_bio(struct bio *bio) 918 + static void __submit_bio(struct bio *bio) 919 919 { 920 920 struct gendisk *disk = bio->bi_bdev->bd_disk; 921 - blk_qc_t ret = BLK_QC_T_NONE; 922 921 923 922 if (unlikely(bio_queue_enter(bio) != 0)) 924 - return BLK_QC_T_NONE; 923 + return; 925 924 926 925 if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio)) 927 926 goto queue_exit; 928 - if (disk->fops->submit_bio) { 929 - ret = disk->fops->submit_bio(bio); 930 - goto queue_exit; 927 + if (!disk->fops->submit_bio) { 928 + blk_mq_submit_bio(bio); 929 + return; 931 930 } 932 - return blk_mq_submit_bio(bio); 933 - 931 + disk->fops->submit_bio(bio); 934 932 queue_exit: 935 933 blk_queue_exit(disk->queue); 936 - return ret; 937 934 } 938 935 939 936 /* ··· 952 955 * bio_list_on_stack[1] contains bios that were submitted before the current 953 956 * ->submit_bio_bio, but that haven't been processed yet. 954 957 */ 955 - static blk_qc_t __submit_bio_noacct(struct bio *bio) 958 + static void __submit_bio_noacct(struct bio *bio) 956 959 { 957 960 struct bio_list bio_list_on_stack[2]; 958 - blk_qc_t ret = BLK_QC_T_NONE; 959 961 960 962 BUG_ON(bio->bi_next); 961 963 ··· 971 975 bio_list_on_stack[1] = bio_list_on_stack[0]; 972 976 bio_list_init(&bio_list_on_stack[0]); 973 977 974 - ret = __submit_bio(bio); 978 + __submit_bio(bio); 975 979 976 980 /* 977 981 * Sort new bios into those for a lower level and those for the ··· 994 998 } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); 995 999 996 1000 current->bio_list = NULL; 997 - return ret; 998 1001 } 999 1002 1000 - static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) 1003 + static void __submit_bio_noacct_mq(struct bio *bio) 1001 1004 { 1002 1005 struct bio_list bio_list[2] = { }; 1003 - blk_qc_t ret; 1004 1006 1005 1007 current->bio_list = bio_list; 1006 1008 1007 1009 do { 1008 - ret = __submit_bio(bio); 1010 + __submit_bio(bio); 1009 1011 } while ((bio = bio_list_pop(&bio_list[0]))); 1010 1012 1011 1013 current->bio_list = NULL; 1012 - return ret; 1013 1014 } 1014 1015 1015 1016 /** ··· 1018 1025 * systems and other upper level users of the block layer should use 1019 1026 * submit_bio() instead. 1020 1027 */ 1021 - blk_qc_t submit_bio_noacct(struct bio *bio) 1028 + void submit_bio_noacct(struct bio *bio) 1022 1029 { 1023 1030 /* 1024 1031 * We only want one ->submit_bio to be active at a time, else stack ··· 1026 1033 * to collect a list of requests submited by a ->submit_bio method while 1027 1034 * it is active, and then process them after it returned. 1028 1035 */ 1029 - if (current->bio_list) { 1036 + if (current->bio_list) 1030 1037 bio_list_add(&current->bio_list[0], bio); 1031 - return BLK_QC_T_NONE; 1032 - } 1033 - 1034 - if (!bio->bi_bdev->bd_disk->fops->submit_bio) 1035 - return __submit_bio_noacct_mq(bio); 1036 - return __submit_bio_noacct(bio); 1038 + else if (!bio->bi_bdev->bd_disk->fops->submit_bio) 1039 + __submit_bio_noacct_mq(bio); 1040 + else 1041 + __submit_bio_noacct(bio); 1037 1042 } 1038 1043 EXPORT_SYMBOL(submit_bio_noacct); 1039 1044 ··· 1048 1057 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has 1049 1058 * been called. 1050 1059 */ 1051 - blk_qc_t submit_bio(struct bio *bio) 1060 + void submit_bio(struct bio *bio) 1052 1061 { 1053 1062 if (blkcg_punt_bio_submit(bio)) 1054 - return BLK_QC_T_NONE; 1063 + return; 1055 1064 1056 1065 /* 1057 1066 * If it's a regular read/write or a barrier with data attached, ··· 1083 1092 if (unlikely(bio_op(bio) == REQ_OP_READ && 1084 1093 bio_flagged(bio, BIO_WORKINGSET))) { 1085 1094 unsigned long pflags; 1086 - blk_qc_t ret; 1087 1095 1088 1096 psi_memstall_enter(&pflags); 1089 - ret = submit_bio_noacct(bio); 1097 + submit_bio_noacct(bio); 1090 1098 psi_memstall_leave(&pflags); 1091 - 1092 - return ret; 1099 + return; 1093 1100 } 1094 1101 1095 - return submit_bio_noacct(bio); 1102 + submit_bio_noacct(bio); 1096 1103 } 1097 1104 EXPORT_SYMBOL(submit_bio); 1105 + 1106 + /** 1107 + * bio_poll - poll for BIO completions 1108 + * @bio: bio to poll for 1109 + * @flags: BLK_POLL_* flags that control the behavior 1110 + * 1111 + * Poll for completions on queue associated with the bio. Returns number of 1112 + * completed entries found. 1113 + * 1114 + * Note: the caller must either be the context that submitted @bio, or 1115 + * be in a RCU critical section to prevent freeing of @bio. 1116 + */ 1117 + int bio_poll(struct bio *bio, unsigned int flags) 1118 + { 1119 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 1120 + blk_qc_t cookie = READ_ONCE(bio->bi_cookie); 1121 + int ret; 1122 + 1123 + if (cookie == BLK_QC_T_NONE || 1124 + !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 1125 + return 0; 1126 + 1127 + if (current->plug) 1128 + blk_flush_plug_list(current->plug, false); 1129 + 1130 + if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) 1131 + return 0; 1132 + if (WARN_ON_ONCE(!queue_is_mq(q))) 1133 + ret = 0; /* not yet implemented, should not happen */ 1134 + else 1135 + ret = blk_mq_poll(q, cookie, flags); 1136 + blk_queue_exit(q); 1137 + return ret; 1138 + } 1139 + EXPORT_SYMBOL_GPL(bio_poll); 1140 + 1141 + /* 1142 + * Helper to implement file_operations.iopoll. Requires the bio to be stored 1143 + * in iocb->private, and cleared before freeing the bio. 1144 + */ 1145 + int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags) 1146 + { 1147 + struct bio *bio; 1148 + int ret = 0; 1149 + 1150 + /* 1151 + * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can 1152 + * point to a freshly allocated bio at this point. If that happens 1153 + * we have a few cases to consider: 1154 + * 1155 + * 1) the bio is beeing initialized and bi_bdev is NULL. We can just 1156 + * simply nothing in this case 1157 + * 2) the bio points to a not poll enabled device. bio_poll will catch 1158 + * this and return 0 1159 + * 3) the bio points to a poll capable device, including but not 1160 + * limited to the one that the original bio pointed to. In this 1161 + * case we will call into the actual poll method and poll for I/O, 1162 + * even if we don't need to, but it won't cause harm either. 1163 + * 1164 + * For cases 2) and 3) above the RCU grace period ensures that bi_bdev 1165 + * is still allocated. Because partitions hold a reference to the whole 1166 + * device bdev and thus disk, the disk is also still valid. Grabbing 1167 + * a reference to the queue in bio_poll() ensures the hctxs and requests 1168 + * are still valid as well. 1169 + */ 1170 + rcu_read_lock(); 1171 + bio = READ_ONCE(kiocb->private); 1172 + if (bio && bio->bi_bdev) 1173 + ret = bio_poll(bio, flags); 1174 + rcu_read_unlock(); 1175 + 1176 + return ret; 1177 + } 1178 + EXPORT_SYMBOL_GPL(iocb_bio_iopoll); 1098 1179 1099 1180 /** 1100 1181 * blk_cloned_rq_check_limits - Helper function to check a cloned request
+8 -2
block/blk-exec.c
··· 65 65 66 66 static bool blk_rq_is_poll(struct request *rq) 67 67 { 68 - return rq->mq_hctx && rq->mq_hctx->type == HCTX_TYPE_POLL; 68 + if (!rq->mq_hctx) 69 + return false; 70 + if (rq->mq_hctx->type != HCTX_TYPE_POLL) 71 + return false; 72 + if (WARN_ON_ONCE(!rq->bio)) 73 + return false; 74 + return true; 69 75 } 70 76 71 77 static void blk_rq_poll_completion(struct request *rq, struct completion *wait) 72 78 { 73 79 do { 74 - blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0); 80 + bio_poll(rq->bio, 0); 75 81 cond_resched(); 76 82 } while (!completion_done(wait)); 77 83 }
+23 -49
block/blk-mq.c
··· 65 65 return bucket; 66 66 } 67 67 68 + #define BLK_QC_T_SHIFT 16 69 + #define BLK_QC_T_INTERNAL (1U << 31) 70 + 68 71 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, 69 72 blk_qc_t qc) 70 73 { ··· 82 79 if (qc & BLK_QC_T_INTERNAL) 83 80 return blk_mq_tag_to_rq(hctx->sched_tags, tag); 84 81 return blk_mq_tag_to_rq(hctx->tags, tag); 82 + } 83 + 84 + static inline blk_qc_t blk_rq_to_qc(struct request *rq) 85 + { 86 + return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) | 87 + (rq->tag != -1 ? 88 + rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL)); 85 89 } 86 90 87 91 /* ··· 829 819 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 830 820 q->integrity.profile->prepare_fn(rq); 831 821 #endif 822 + if (rq->bio && rq->bio->bi_opf & REQ_POLLED) 823 + WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq)); 832 824 } 833 825 EXPORT_SYMBOL(blk_mq_start_request); 834 826 ··· 2057 2045 } 2058 2046 2059 2047 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 2060 - struct request *rq, 2061 - blk_qc_t *cookie, bool last) 2048 + struct request *rq, bool last) 2062 2049 { 2063 2050 struct request_queue *q = rq->q; 2064 2051 struct blk_mq_queue_data bd = { 2065 2052 .rq = rq, 2066 2053 .last = last, 2067 2054 }; 2068 - blk_qc_t new_cookie; 2069 2055 blk_status_t ret; 2070 - 2071 - new_cookie = request_to_qc_t(hctx, rq); 2072 2056 2073 2057 /* 2074 2058 * For OK queue, we are done. For error, caller may kill it. ··· 2075 2067 switch (ret) { 2076 2068 case BLK_STS_OK: 2077 2069 blk_mq_update_dispatch_busy(hctx, false); 2078 - *cookie = new_cookie; 2079 2070 break; 2080 2071 case BLK_STS_RESOURCE: 2081 2072 case BLK_STS_DEV_RESOURCE: ··· 2083 2076 break; 2084 2077 default: 2085 2078 blk_mq_update_dispatch_busy(hctx, false); 2086 - *cookie = BLK_QC_T_NONE; 2087 2079 break; 2088 2080 } 2089 2081 ··· 2091 2085 2092 2086 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2093 2087 struct request *rq, 2094 - blk_qc_t *cookie, 2095 2088 bool bypass_insert, bool last) 2096 2089 { 2097 2090 struct request_queue *q = rq->q; ··· 2124 2119 goto insert; 2125 2120 } 2126 2121 2127 - return __blk_mq_issue_directly(hctx, rq, cookie, last); 2122 + return __blk_mq_issue_directly(hctx, rq, last); 2128 2123 insert: 2129 2124 if (bypass_insert) 2130 2125 return BLK_STS_RESOURCE; ··· 2138 2133 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2139 2134 * @hctx: Pointer of the associated hardware queue. 2140 2135 * @rq: Pointer to request to be sent. 2141 - * @cookie: Request queue cookie. 2142 2136 * 2143 2137 * If the device has enough resources to accept a new request now, send the 2144 2138 * request directly to device driver. Else, insert at hctx->dispatch queue, so ··· 2145 2141 * queue have higher priority. 2146 2142 */ 2147 2143 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2148 - struct request *rq, blk_qc_t *cookie) 2144 + struct request *rq) 2149 2145 { 2150 2146 blk_status_t ret; 2151 2147 int srcu_idx; ··· 2154 2150 2155 2151 hctx_lock(hctx, &srcu_idx); 2156 2152 2157 - ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); 2153 + ret = __blk_mq_try_issue_directly(hctx, rq, false, true); 2158 2154 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 2159 2155 blk_mq_request_bypass_insert(rq, false, true); 2160 2156 else if (ret != BLK_STS_OK) ··· 2167 2163 { 2168 2164 blk_status_t ret; 2169 2165 int srcu_idx; 2170 - blk_qc_t unused_cookie; 2171 2166 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2172 2167 2173 2168 hctx_lock(hctx, &srcu_idx); 2174 - ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); 2169 + ret = __blk_mq_try_issue_directly(hctx, rq, true, last); 2175 2170 hctx_unlock(hctx, srcu_idx); 2176 2171 2177 2172 return ret; ··· 2250 2247 * 2251 2248 * It will not queue the request if there is an error with the bio, or at the 2252 2249 * request creation. 2253 - * 2254 - * Returns: Request queue cookie. 2255 2250 */ 2256 - blk_qc_t blk_mq_submit_bio(struct bio *bio) 2251 + void blk_mq_submit_bio(struct bio *bio) 2257 2252 { 2258 2253 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 2259 2254 const int is_sync = op_is_sync(bio->bi_opf); ··· 2260 2259 struct blk_plug *plug; 2261 2260 struct request *same_queue_rq = NULL; 2262 2261 unsigned int nr_segs; 2263 - blk_qc_t cookie; 2264 2262 blk_status_t ret; 2265 - bool hipri; 2266 2263 2267 2264 blk_queue_bounce(q, &bio); 2268 2265 __blk_queue_split(&bio, &nr_segs); ··· 2276 2277 goto queue_exit; 2277 2278 2278 2279 rq_qos_throttle(q, bio); 2279 - 2280 - hipri = bio->bi_opf & REQ_POLLED; 2281 2280 2282 2281 plug = blk_mq_plug(q, bio); 2283 2282 if (plug && plug->cached_rq) { ··· 2307 2310 2308 2311 rq_qos_track(q, rq, bio); 2309 2312 2310 - cookie = request_to_qc_t(rq->mq_hctx, rq); 2311 - 2312 2313 blk_mq_bio_to_request(rq, bio, nr_segs); 2313 2314 2314 2315 ret = blk_crypto_init_request(rq); ··· 2314 2319 bio->bi_status = ret; 2315 2320 bio_endio(bio); 2316 2321 blk_mq_free_request(rq); 2317 - return BLK_QC_T_NONE; 2322 + return; 2318 2323 } 2319 2324 2320 2325 if (unlikely(is_flush_fua)) { ··· 2370 2375 if (same_queue_rq) { 2371 2376 trace_block_unplug(q, 1, true); 2372 2377 blk_mq_try_issue_directly(same_queue_rq->mq_hctx, 2373 - same_queue_rq, &cookie); 2378 + same_queue_rq); 2374 2379 } 2375 2380 } else if ((q->nr_hw_queues > 1 && is_sync) || 2376 2381 !rq->mq_hctx->dispatch_busy) { ··· 2378 2383 * There is no scheduler and we can try to send directly 2379 2384 * to the hardware. 2380 2385 */ 2381 - blk_mq_try_issue_directly(rq->mq_hctx, rq, &cookie); 2386 + blk_mq_try_issue_directly(rq->mq_hctx, rq); 2382 2387 } else { 2383 2388 /* Default case. */ 2384 2389 blk_mq_sched_insert_request(rq, false, true, true); 2385 2390 } 2386 2391 2387 - if (!hipri) 2388 - return BLK_QC_T_NONE; 2389 - return cookie; 2392 + return; 2390 2393 queue_exit: 2391 2394 blk_queue_exit(q); 2392 - return BLK_QC_T_NONE; 2393 2395 } 2394 2396 2395 2397 static size_t order_to_size(unsigned int order) ··· 4076 4084 return 0; 4077 4085 } 4078 4086 4079 - /** 4080 - * blk_poll - poll for IO completions 4081 - * @q: the queue 4082 - * @cookie: cookie passed back at IO submission time 4083 - * @flags: BLK_POLL_* flags that control the behavior 4084 - * 4085 - * Description: 4086 - * Poll for completions on the passed in queue. Returns number of 4087 - * completed entries found. 4088 - */ 4089 - int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) 4087 + int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) 4090 4088 { 4091 - if (cookie == BLK_QC_T_NONE || 4092 - !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 4093 - return 0; 4094 - 4095 - if (current->plug) 4096 - blk_flush_plug_list(current->plug, false); 4097 - 4098 4089 if (!(flags & BLK_POLL_NOSLEEP) && 4099 4090 q->poll_nsec != BLK_MQ_POLL_CLASSIC) { 4100 4091 if (blk_mq_poll_hybrid(q, cookie)) ··· 4085 4110 } 4086 4111 return blk_mq_poll_classic(q, cookie, flags); 4087 4112 } 4088 - EXPORT_SYMBOL_GPL(blk_poll); 4089 4113 4090 4114 unsigned int blk_mq_rq_cpu(struct request *rq) 4091 4115 {
+2
block/blk-mq.h
··· 37 37 struct kobject kobj; 38 38 } ____cacheline_aligned_in_smp; 39 39 40 + void blk_mq_submit_bio(struct bio *bio); 41 + int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); 40 42 void blk_mq_exit_queue(struct request_queue *q); 41 43 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 42 44 void blk_mq_wake_waiters(struct request_queue *q);
+8 -17
block/fops.c
··· 61 61 bool should_dirty = false; 62 62 struct bio bio; 63 63 ssize_t ret; 64 - blk_qc_t qc; 65 64 66 65 if ((pos | iov_iter_alignment(iter)) & 67 66 (bdev_logical_block_size(bdev) - 1)) ··· 101 102 if (iocb->ki_flags & IOCB_HIPRI) 102 103 bio_set_polled(&bio, iocb); 103 104 104 - qc = submit_bio(&bio); 105 + submit_bio(&bio); 105 106 for (;;) { 106 107 set_current_state(TASK_UNINTERRUPTIBLE); 107 108 if (!READ_ONCE(bio.bi_private)) 108 109 break; 109 - if (!(iocb->ki_flags & IOCB_HIPRI) || 110 - !blk_poll(bdev_get_queue(bdev), qc, 0)) 110 + if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, 0)) 111 111 blk_io_schedule(); 112 112 } 113 113 __set_current_state(TASK_RUNNING); ··· 139 141 140 142 static struct bio_set blkdev_dio_pool; 141 143 142 - static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags) 143 - { 144 - struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); 145 - struct request_queue *q = bdev_get_queue(bdev); 146 - 147 - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); 148 - } 149 - 150 144 static void blkdev_bio_end_io(struct bio *bio) 151 145 { 152 146 struct blkdev_dio *dio = bio->bi_private; ··· 151 161 if (!dio->is_sync) { 152 162 struct kiocb *iocb = dio->iocb; 153 163 ssize_t ret; 164 + 165 + WRITE_ONCE(iocb->private, NULL); 154 166 155 167 if (likely(!dio->bio.bi_status)) { 156 168 ret = dio->size; ··· 192 200 bool do_poll = (iocb->ki_flags & IOCB_HIPRI); 193 201 bool is_read = (iov_iter_rw(iter) == READ), is_sync; 194 202 loff_t pos = iocb->ki_pos; 195 - blk_qc_t qc = BLK_QC_T_NONE; 196 203 int ret = 0; 197 204 198 205 if ((pos | iov_iter_alignment(iter)) & ··· 253 262 if (!nr_pages) { 254 263 if (do_poll) 255 264 bio_set_polled(bio, iocb); 256 - qc = submit_bio(bio); 265 + submit_bio(bio); 257 266 if (do_poll) 258 - WRITE_ONCE(iocb->ki_cookie, qc); 267 + WRITE_ONCE(iocb->private, bio); 259 268 break; 260 269 } 261 270 if (!dio->multi_bio) { ··· 288 297 if (!READ_ONCE(dio->waiter)) 289 298 break; 290 299 291 - if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0)) 300 + if (!do_poll || !bio_poll(bio, 0)) 292 301 blk_io_schedule(); 293 302 } 294 303 __set_current_state(TASK_RUNNING); ··· 585 594 .llseek = blkdev_llseek, 586 595 .read_iter = blkdev_read_iter, 587 596 .write_iter = blkdev_write_iter, 588 - .iopoll = blkdev_iopoll, 597 + .iopoll = iocb_bio_iopoll, 589 598 .mmap = generic_file_mmap, 590 599 .fsync = blkdev_fsync, 591 600 .unlocked_ioctl = blkdev_ioctl,
+5 -7
drivers/block/brd.c
··· 282 282 return err; 283 283 } 284 284 285 - static blk_qc_t brd_submit_bio(struct bio *bio) 285 + static void brd_submit_bio(struct bio *bio) 286 286 { 287 287 struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; 288 288 sector_t sector = bio->bi_iter.bi_sector; ··· 299 299 300 300 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, 301 301 bio_op(bio), sector); 302 - if (err) 303 - goto io_error; 302 + if (err) { 303 + bio_io_error(bio); 304 + return; 305 + } 304 306 sector += len >> SECTOR_SHIFT; 305 307 } 306 308 307 309 bio_endio(bio); 308 - return BLK_QC_T_NONE; 309 - io_error: 310 - bio_io_error(bio); 311 - return BLK_QC_T_NONE; 312 310 } 313 311 314 312 static int brd_rw_page(struct block_device *bdev, sector_t sector,
+1 -1
drivers/block/drbd/drbd_int.h
··· 1448 1448 /* drbd_req */ 1449 1449 extern void do_submit(struct work_struct *ws); 1450 1450 extern void __drbd_make_request(struct drbd_device *, struct bio *); 1451 - extern blk_qc_t drbd_submit_bio(struct bio *bio); 1451 + void drbd_submit_bio(struct bio *bio); 1452 1452 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); 1453 1453 extern int is_valid_ar_handle(struct drbd_request *, sector_t); 1454 1454
+1 -2
drivers/block/drbd/drbd_req.c
··· 1596 1596 } 1597 1597 } 1598 1598 1599 - blk_qc_t drbd_submit_bio(struct bio *bio) 1599 + void drbd_submit_bio(struct bio *bio) 1600 1600 { 1601 1601 struct drbd_device *device = bio->bi_bdev->bd_disk->private_data; 1602 1602 ··· 1609 1609 1610 1610 inc_ap_bio(device); 1611 1611 __drbd_make_request(device, bio); 1612 - return BLK_QC_T_NONE; 1613 1612 } 1614 1613 1615 1614 static bool net_timeout_reached(struct drbd_request *net_req,
+5 -7
drivers/block/n64cart.c
··· 84 84 return true; 85 85 } 86 86 87 - static blk_qc_t n64cart_submit_bio(struct bio *bio) 87 + static void n64cart_submit_bio(struct bio *bio) 88 88 { 89 89 struct bio_vec bvec; 90 90 struct bvec_iter iter; ··· 92 92 u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT; 93 93 94 94 bio_for_each_segment(bvec, bio, iter) { 95 - if (!n64cart_do_bvec(dev, &bvec, pos)) 96 - goto io_error; 95 + if (!n64cart_do_bvec(dev, &bvec, pos)) { 96 + bio_io_error(bio); 97 + return; 98 + } 97 99 pos += bvec.bv_len; 98 100 } 99 101 100 102 bio_endio(bio); 101 - return BLK_QC_T_NONE; 102 - io_error: 103 - bio_io_error(bio); 104 - return BLK_QC_T_NONE; 105 103 } 106 104 107 105 static const struct block_device_operations n64cart_fops = {
+1 -2
drivers/block/null_blk/main.c
··· 1422 1422 return &nullb->queues[index]; 1423 1423 } 1424 1424 1425 - static blk_qc_t null_submit_bio(struct bio *bio) 1425 + static void null_submit_bio(struct bio *bio) 1426 1426 { 1427 1427 sector_t sector = bio->bi_iter.bi_sector; 1428 1428 sector_t nr_sectors = bio_sectors(bio); ··· 1434 1434 cmd->bio = bio; 1435 1435 1436 1436 null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio)); 1437 - return BLK_QC_T_NONE; 1438 1437 } 1439 1438 1440 1439 static bool should_timeout_request(struct request *rq)
+3 -4
drivers/block/pktcdvd.c
··· 2400 2400 } 2401 2401 } 2402 2402 2403 - static blk_qc_t pkt_submit_bio(struct bio *bio) 2403 + static void pkt_submit_bio(struct bio *bio) 2404 2404 { 2405 2405 struct pktcdvd_device *pd; 2406 2406 char b[BDEVNAME_SIZE]; ··· 2423 2423 */ 2424 2424 if (bio_data_dir(bio) == READ) { 2425 2425 pkt_make_request_read(pd, bio); 2426 - return BLK_QC_T_NONE; 2426 + return; 2427 2427 } 2428 2428 2429 2429 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { ··· 2455 2455 pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split); 2456 2456 } while (split != bio); 2457 2457 2458 - return BLK_QC_T_NONE; 2458 + return; 2459 2459 end_io: 2460 2460 bio_io_error(bio); 2461 - return BLK_QC_T_NONE; 2462 2461 } 2463 2462 2464 2463 static void pkt_init_queue(struct pktcdvd_device *pd)
+2 -4
drivers/block/ps3vram.c
··· 578 578 return next; 579 579 } 580 580 581 - static blk_qc_t ps3vram_submit_bio(struct bio *bio) 581 + static void ps3vram_submit_bio(struct bio *bio) 582 582 { 583 583 struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data; 584 584 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); ··· 594 594 spin_unlock_irq(&priv->lock); 595 595 596 596 if (busy) 597 - return BLK_QC_T_NONE; 597 + return; 598 598 599 599 do { 600 600 bio = ps3vram_do_bio(dev, bio); 601 601 } while (bio); 602 - 603 - return BLK_QC_T_NONE; 604 602 } 605 603 606 604 static const struct block_device_operations ps3vram_fops = {
+3 -4
drivers/block/rsxx/dev.c
··· 50 50 51 51 static struct kmem_cache *bio_meta_pool; 52 52 53 - static blk_qc_t rsxx_submit_bio(struct bio *bio); 53 + static void rsxx_submit_bio(struct bio *bio); 54 54 55 55 /*----------------- Block Device Operations -----------------*/ 56 56 static int rsxx_blkdev_ioctl(struct block_device *bdev, ··· 120 120 } 121 121 } 122 122 123 - static blk_qc_t rsxx_submit_bio(struct bio *bio) 123 + static void rsxx_submit_bio(struct bio *bio) 124 124 { 125 125 struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data; 126 126 struct rsxx_bio_meta *bio_meta; ··· 169 169 if (st) 170 170 goto queue_err; 171 171 172 - return BLK_QC_T_NONE; 172 + return; 173 173 174 174 queue_err: 175 175 kmem_cache_free(bio_meta_pool, bio_meta); ··· 177 177 if (st) 178 178 bio->bi_status = st; 179 179 bio_endio(bio); 180 - return BLK_QC_T_NONE; 181 180 } 182 181 183 182 /*----------------- Device Setup -------------------*/
+3 -7
drivers/block/zram/zram_drv.c
··· 1598 1598 /* 1599 1599 * Handler function for all zram I/O requests. 1600 1600 */ 1601 - static blk_qc_t zram_submit_bio(struct bio *bio) 1601 + static void zram_submit_bio(struct bio *bio) 1602 1602 { 1603 1603 struct zram *zram = bio->bi_bdev->bd_disk->private_data; 1604 1604 1605 1605 if (!valid_io_request(zram, bio->bi_iter.bi_sector, 1606 1606 bio->bi_iter.bi_size)) { 1607 1607 atomic64_inc(&zram->stats.invalid_io); 1608 - goto error; 1608 + bio_io_error(bio); 1609 + return; 1609 1610 } 1610 1611 1611 1612 __zram_make_request(zram, bio); 1612 - return BLK_QC_T_NONE; 1613 - 1614 - error: 1615 - bio_io_error(bio); 1616 - return BLK_QC_T_NONE; 1617 1613 } 1618 1614 1619 1615 static void zram_slot_free_notify(struct block_device *bdev,
+5 -8
drivers/md/bcache/request.c
··· 1163 1163 1164 1164 /* Cached devices - read & write stuff */ 1165 1165 1166 - blk_qc_t cached_dev_submit_bio(struct bio *bio) 1166 + void cached_dev_submit_bio(struct bio *bio) 1167 1167 { 1168 1168 struct search *s; 1169 1169 struct block_device *orig_bdev = bio->bi_bdev; ··· 1176 1176 dc->io_disable)) { 1177 1177 bio->bi_status = BLK_STS_IOERR; 1178 1178 bio_endio(bio); 1179 - return BLK_QC_T_NONE; 1179 + return; 1180 1180 } 1181 1181 1182 1182 if (likely(d->c)) { ··· 1222 1222 } else 1223 1223 /* I/O request sent to backing device */ 1224 1224 detached_dev_do_request(d, bio, orig_bdev, start_time); 1225 - 1226 - return BLK_QC_T_NONE; 1227 1225 } 1228 1226 1229 1227 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, ··· 1271 1273 continue_at(cl, search_free, NULL); 1272 1274 } 1273 1275 1274 - blk_qc_t flash_dev_submit_bio(struct bio *bio) 1276 + void flash_dev_submit_bio(struct bio *bio) 1275 1277 { 1276 1278 struct search *s; 1277 1279 struct closure *cl; ··· 1280 1282 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { 1281 1283 bio->bi_status = BLK_STS_IOERR; 1282 1284 bio_endio(bio); 1283 - return BLK_QC_T_NONE; 1285 + return; 1284 1286 } 1285 1287 1286 1288 s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio)); ··· 1296 1298 continue_at_nobarrier(&s->cl, 1297 1299 flash_dev_nodata, 1298 1300 bcache_wq); 1299 - return BLK_QC_T_NONE; 1301 + return; 1300 1302 } else if (bio_data_dir(bio)) { 1301 1303 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1302 1304 &KEY(d->id, bio->bi_iter.bi_sector, 0), ··· 1312 1314 } 1313 1315 1314 1316 continue_at(cl, search_free, NULL); 1315 - return BLK_QC_T_NONE; 1316 1317 } 1317 1318 1318 1319 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
+2 -2
drivers/md/bcache/request.h
··· 37 37 void bch_data_insert(struct closure *cl); 38 38 39 39 void bch_cached_dev_request_init(struct cached_dev *dc); 40 - blk_qc_t cached_dev_submit_bio(struct bio *bio); 40 + void cached_dev_submit_bio(struct bio *bio); 41 41 42 42 void bch_flash_dev_request_init(struct bcache_device *d); 43 - blk_qc_t flash_dev_submit_bio(struct bio *bio); 43 + void flash_dev_submit_bio(struct bio *bio); 44 44 45 45 extern struct kmem_cache *bch_search_cache; 46 46
+10 -18
drivers/md/dm.c
··· 1183 1183 mutex_unlock(&md->swap_bios_lock); 1184 1184 } 1185 1185 1186 - static blk_qc_t __map_bio(struct dm_target_io *tio) 1186 + static void __map_bio(struct dm_target_io *tio) 1187 1187 { 1188 1188 int r; 1189 1189 sector_t sector; 1190 1190 struct bio *clone = &tio->clone; 1191 1191 struct dm_io *io = tio->io; 1192 1192 struct dm_target *ti = tio->ti; 1193 - blk_qc_t ret = BLK_QC_T_NONE; 1194 1193 1195 1194 clone->bi_end_io = clone_endio; 1196 1195 ··· 1225 1226 case DM_MAPIO_REMAPPED: 1226 1227 /* the bio has been remapped so dispatch it */ 1227 1228 trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector); 1228 - ret = submit_bio_noacct(clone); 1229 + submit_bio_noacct(clone); 1229 1230 break; 1230 1231 case DM_MAPIO_KILL: 1231 1232 if (unlikely(swap_bios_limit(ti, clone))) { ··· 1247 1248 DMWARN("unimplemented target map return value: %d", r); 1248 1249 BUG(); 1249 1250 } 1250 - 1251 - return ret; 1252 1251 } 1253 1252 1254 1253 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) ··· 1333 1336 } 1334 1337 } 1335 1338 1336 - static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, 1339 + static void __clone_and_map_simple_bio(struct clone_info *ci, 1337 1340 struct dm_target_io *tio, unsigned *len) 1338 1341 { 1339 1342 struct bio *clone = &tio->clone; ··· 1343 1346 __bio_clone_fast(clone, ci->bio); 1344 1347 if (len) 1345 1348 bio_setup_sector(clone, ci->sector, *len); 1346 - 1347 - return __map_bio(tio); 1349 + __map_bio(tio); 1348 1350 } 1349 1351 1350 1352 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, ··· 1357 1361 1358 1362 while ((bio = bio_list_pop(&blist))) { 1359 1363 tio = container_of(bio, struct dm_target_io, clone); 1360 - (void) __clone_and_map_simple_bio(ci, tio, len); 1364 + __clone_and_map_simple_bio(ci, tio, len); 1361 1365 } 1362 1366 } 1363 1367 ··· 1401 1405 free_tio(tio); 1402 1406 return r; 1403 1407 } 1404 - (void) __map_bio(tio); 1408 + __map_bio(tio); 1405 1409 1406 1410 return 0; 1407 1411 } ··· 1516 1520 /* 1517 1521 * Entry point to split a bio into clones and submit them to the targets. 1518 1522 */ 1519 - static blk_qc_t __split_and_process_bio(struct mapped_device *md, 1523 + static void __split_and_process_bio(struct mapped_device *md, 1520 1524 struct dm_table *map, struct bio *bio) 1521 1525 { 1522 1526 struct clone_info ci; 1523 - blk_qc_t ret = BLK_QC_T_NONE; 1524 1527 int error = 0; 1525 1528 1526 1529 init_clone_info(&ci, md, map, bio); ··· 1562 1567 1563 1568 bio_chain(b, bio); 1564 1569 trace_block_split(b, bio->bi_iter.bi_sector); 1565 - ret = submit_bio_noacct(bio); 1570 + submit_bio_noacct(bio); 1566 1571 } 1567 1572 } 1568 1573 1569 1574 /* drop the extra reference count */ 1570 1575 dm_io_dec_pending(ci.io, errno_to_blk_status(error)); 1571 - return ret; 1572 1576 } 1573 1577 1574 - static blk_qc_t dm_submit_bio(struct bio *bio) 1578 + static void dm_submit_bio(struct bio *bio) 1575 1579 { 1576 1580 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; 1577 - blk_qc_t ret = BLK_QC_T_NONE; 1578 1581 int srcu_idx; 1579 1582 struct dm_table *map; 1580 1583 ··· 1602 1609 if (is_abnormal_io(bio)) 1603 1610 blk_queue_split(&bio); 1604 1611 1605 - ret = __split_and_process_bio(md, map, bio); 1612 + __split_and_process_bio(md, map, bio); 1606 1613 out: 1607 1614 dm_put_live_table(md, srcu_idx); 1608 - return ret; 1609 1615 } 1610 1616 1611 1617 /*-----------------------------------------------------------------
+4 -6
drivers/md/md.c
··· 443 443 } 444 444 EXPORT_SYMBOL(md_handle_request); 445 445 446 - static blk_qc_t md_submit_bio(struct bio *bio) 446 + static void md_submit_bio(struct bio *bio) 447 447 { 448 448 const int rw = bio_data_dir(bio); 449 449 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; 450 450 451 451 if (mddev == NULL || mddev->pers == NULL) { 452 452 bio_io_error(bio); 453 - return BLK_QC_T_NONE; 453 + return; 454 454 } 455 455 456 456 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { 457 457 bio_io_error(bio); 458 - return BLK_QC_T_NONE; 458 + return; 459 459 } 460 460 461 461 blk_queue_split(&bio); ··· 464 464 if (bio_sectors(bio) != 0) 465 465 bio->bi_status = BLK_STS_IOERR; 466 466 bio_endio(bio); 467 - return BLK_QC_T_NONE; 467 + return; 468 468 } 469 469 470 470 /* bio could be mergeable after passing to underlayer */ 471 471 bio->bi_opf &= ~REQ_NOMERGE; 472 472 473 473 md_handle_request(mddev, bio); 474 - 475 - return BLK_QC_T_NONE; 476 474 } 477 475 478 476 /* mddev_suspend makes sure no new requests are submitted
+2 -3
drivers/nvdimm/blk.c
··· 162 162 return err; 163 163 } 164 164 165 - static blk_qc_t nd_blk_submit_bio(struct bio *bio) 165 + static void nd_blk_submit_bio(struct bio *bio) 166 166 { 167 167 struct bio_integrity_payload *bip; 168 168 struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data; ··· 173 173 bool do_acct; 174 174 175 175 if (!bio_integrity_prep(bio)) 176 - return BLK_QC_T_NONE; 176 + return; 177 177 178 178 bip = bio_integrity(bio); 179 179 rw = bio_data_dir(bio); ··· 199 199 bio_end_io_acct(bio, start); 200 200 201 201 bio_endio(bio); 202 - return BLK_QC_T_NONE; 203 202 } 204 203 205 204 static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
+2 -3
drivers/nvdimm/btt.c
··· 1440 1440 return ret; 1441 1441 } 1442 1442 1443 - static blk_qc_t btt_submit_bio(struct bio *bio) 1443 + static void btt_submit_bio(struct bio *bio) 1444 1444 { 1445 1445 struct bio_integrity_payload *bip = bio_integrity(bio); 1446 1446 struct btt *btt = bio->bi_bdev->bd_disk->private_data; ··· 1451 1451 bool do_acct; 1452 1452 1453 1453 if (!bio_integrity_prep(bio)) 1454 - return BLK_QC_T_NONE; 1454 + return; 1455 1455 1456 1456 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); 1457 1457 if (do_acct) ··· 1483 1483 bio_end_io_acct(bio, start); 1484 1484 1485 1485 bio_endio(bio); 1486 - return BLK_QC_T_NONE; 1487 1486 } 1488 1487 1489 1488 static int btt_rw_page(struct block_device *bdev, sector_t sector,
+1 -2
drivers/nvdimm/pmem.c
··· 190 190 return rc; 191 191 } 192 192 193 - static blk_qc_t pmem_submit_bio(struct bio *bio) 193 + static void pmem_submit_bio(struct bio *bio) 194 194 { 195 195 int ret = 0; 196 196 blk_status_t rc = 0; ··· 229 229 bio->bi_status = errno_to_blk_status(ret); 230 230 231 231 bio_endio(bio); 232 - return BLK_QC_T_NONE; 233 232 } 234 233 235 234 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
+2 -4
drivers/nvme/host/multipath.c
··· 312 312 return false; 313 313 } 314 314 315 - static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) 315 + static void nvme_ns_head_submit_bio(struct bio *bio) 316 316 { 317 317 struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; 318 318 struct device *dev = disk_to_dev(head->disk); 319 319 struct nvme_ns *ns; 320 - blk_qc_t ret = BLK_QC_T_NONE; 321 320 int srcu_idx; 322 321 323 322 /* ··· 333 334 bio->bi_opf |= REQ_NVME_MPATH; 334 335 trace_block_bio_remap(bio, disk_devt(ns->head->disk), 335 336 bio->bi_iter.bi_sector); 336 - ret = submit_bio_noacct(bio); 337 + submit_bio_noacct(bio); 337 338 } else if (nvme_available_path(head)) { 338 339 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); 339 340 ··· 348 349 } 349 350 350 351 srcu_read_unlock(&head->srcu, srcu_idx); 351 - return ret; 352 352 } 353 353 354 354 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
+3 -4
drivers/s390/block/dcssblk.c
··· 30 30 31 31 static int dcssblk_open(struct block_device *bdev, fmode_t mode); 32 32 static void dcssblk_release(struct gendisk *disk, fmode_t mode); 33 - static blk_qc_t dcssblk_submit_bio(struct bio *bio); 33 + static void dcssblk_submit_bio(struct bio *bio); 34 34 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 35 35 long nr_pages, void **kaddr, pfn_t *pfn); 36 36 ··· 854 854 up_write(&dcssblk_devices_sem); 855 855 } 856 856 857 - static blk_qc_t 857 + static void 858 858 dcssblk_submit_bio(struct bio *bio) 859 859 { 860 860 struct dcssblk_dev_info *dev_info; ··· 907 907 bytes_done += bvec.bv_len; 908 908 } 909 909 bio_endio(bio); 910 - return BLK_QC_T_NONE; 910 + return; 911 911 fail: 912 912 bio_io_error(bio); 913 - return BLK_QC_T_NONE; 914 913 } 915 914 916 915 static long
+3 -5
fs/btrfs/inode.c
··· 8248 8248 return dip; 8249 8249 } 8250 8250 8251 - static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter, 8251 + static void btrfs_submit_direct(const struct iomap_iter *iter, 8252 8252 struct bio *dio_bio, loff_t file_offset) 8253 8253 { 8254 8254 struct inode *inode = iter->inode; ··· 8278 8278 } 8279 8279 dio_bio->bi_status = BLK_STS_RESOURCE; 8280 8280 bio_endio(dio_bio); 8281 - return BLK_QC_T_NONE; 8281 + return; 8282 8282 } 8283 8283 8284 8284 if (!write) { ··· 8372 8372 8373 8373 free_extent_map(em); 8374 8374 } while (submit_len > 0); 8375 - return BLK_QC_T_NONE; 8375 + return; 8376 8376 8377 8377 out_err_em: 8378 8378 free_extent_map(em); 8379 8379 out_err: 8380 8380 dip->dio_bio->bi_status = status; 8381 8381 btrfs_dio_private_put(dip); 8382 - 8383 - return BLK_QC_T_NONE; 8384 8382 } 8385 8383 8386 8384 const struct iomap_ops btrfs_dio_iomap_ops = {
+1 -1
fs/ext4/file.c
··· 915 915 .llseek = ext4_llseek, 916 916 .read_iter = ext4_file_read_iter, 917 917 .write_iter = ext4_file_write_iter, 918 - .iopoll = iomap_dio_iopoll, 918 + .iopoll = iocb_bio_iopoll, 919 919 .unlocked_ioctl = ext4_ioctl, 920 920 #ifdef CONFIG_COMPAT 921 921 .compat_ioctl = ext4_compat_ioctl,
+2 -2
fs/gfs2/file.c
··· 1353 1353 .llseek = gfs2_llseek, 1354 1354 .read_iter = gfs2_file_read_iter, 1355 1355 .write_iter = gfs2_file_write_iter, 1356 - .iopoll = iomap_dio_iopoll, 1356 + .iopoll = iocb_bio_iopoll, 1357 1357 .unlocked_ioctl = gfs2_ioctl, 1358 1358 .compat_ioctl = gfs2_compat_ioctl, 1359 1359 .mmap = gfs2_mmap, ··· 1386 1386 .llseek = gfs2_llseek, 1387 1387 .read_iter = gfs2_file_read_iter, 1388 1388 .write_iter = gfs2_file_write_iter, 1389 - .iopoll = iomap_dio_iopoll, 1389 + .iopoll = iocb_bio_iopoll, 1390 1390 .unlocked_ioctl = gfs2_ioctl, 1391 1391 .compat_ioctl = gfs2_compat_ioctl, 1392 1392 .mmap = gfs2_mmap,
+12 -24
fs/iomap/direct-io.c
··· 38 38 struct { 39 39 struct iov_iter *iter; 40 40 struct task_struct *waiter; 41 - struct request_queue *last_queue; 42 - blk_qc_t cookie; 41 + struct bio *poll_bio; 43 42 } submit; 44 43 45 44 /* used for aio completion: */ ··· 48 49 }; 49 50 }; 50 51 51 - int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags) 52 - { 53 - struct request_queue *q = READ_ONCE(kiocb->private); 54 - 55 - if (!q) 56 - return 0; 57 - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); 58 - } 59 - EXPORT_SYMBOL_GPL(iomap_dio_iopoll); 60 - 61 52 static void iomap_dio_submit_bio(const struct iomap_iter *iter, 62 53 struct iomap_dio *dio, struct bio *bio, loff_t pos) 63 54 { 64 55 atomic_inc(&dio->ref); 65 56 66 - if (dio->iocb->ki_flags & IOCB_HIPRI) 57 + if (dio->iocb->ki_flags & IOCB_HIPRI) { 67 58 bio_set_polled(bio, dio->iocb); 59 + dio->submit.poll_bio = bio; 60 + } 68 61 69 - dio->submit.last_queue = bdev_get_queue(iter->iomap.bdev); 70 62 if (dio->dops && dio->dops->submit_io) 71 - dio->submit.cookie = dio->dops->submit_io(iter, bio, pos); 63 + dio->dops->submit_io(iter, bio, pos); 72 64 else 73 - dio->submit.cookie = submit_bio(bio); 65 + submit_bio(bio); 74 66 } 75 67 76 68 ssize_t iomap_dio_complete(struct iomap_dio *dio) ··· 154 164 } else if (dio->flags & IOMAP_DIO_WRITE) { 155 165 struct inode *inode = file_inode(dio->iocb->ki_filp); 156 166 167 + WRITE_ONCE(dio->iocb->private, NULL); 157 168 INIT_WORK(&dio->aio.work, iomap_dio_complete_work); 158 169 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); 159 170 } else { 171 + WRITE_ONCE(dio->iocb->private, NULL); 160 172 iomap_dio_complete_work(&dio->aio.work); 161 173 } 162 174 } ··· 489 497 490 498 dio->submit.iter = iter; 491 499 dio->submit.waiter = current; 492 - dio->submit.cookie = BLK_QC_T_NONE; 493 - dio->submit.last_queue = NULL; 500 + dio->submit.poll_bio = NULL; 494 501 495 502 if (iov_iter_rw(iter) == READ) { 496 503 if (iomi.pos >= dio->i_size) ··· 602 611 if (dio->flags & IOMAP_DIO_WRITE_FUA) 603 612 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 604 613 605 - WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie); 606 - WRITE_ONCE(iocb->private, dio->submit.last_queue); 614 + WRITE_ONCE(iocb->private, dio->submit.poll_bio); 607 615 608 616 /* 609 617 * We are about to drop our additional submission reference, which ··· 629 639 if (!READ_ONCE(dio->submit.waiter)) 630 640 break; 631 641 632 - if (!(iocb->ki_flags & IOCB_HIPRI) || 633 - !dio->submit.last_queue || 634 - !blk_poll(dio->submit.last_queue, 635 - dio->submit.cookie, 0)) 642 + if (!dio->submit.poll_bio || 643 + !bio_poll(dio->submit.poll_bio, 0)) 636 644 blk_io_schedule(); 637 645 } 638 646 __set_current_state(TASK_RUNNING);
+1 -1
fs/xfs/xfs_file.c
··· 1452 1452 .write_iter = xfs_file_write_iter, 1453 1453 .splice_read = generic_file_splice_read, 1454 1454 .splice_write = iter_file_splice_write, 1455 - .iopoll = iomap_dio_iopoll, 1455 + .iopoll = iocb_bio_iopoll, 1456 1456 .unlocked_ioctl = xfs_file_ioctl, 1457 1457 #ifdef CONFIG_COMPAT 1458 1458 .compat_ioctl = xfs_file_compat_ioctl,
+1 -1
fs/zonefs/super.c
··· 1128 1128 .write_iter = zonefs_file_write_iter, 1129 1129 .splice_read = generic_file_splice_read, 1130 1130 .splice_write = iter_file_splice_write, 1131 - .iopoll = iomap_dio_iopoll, 1131 + .iopoll = iocb_bio_iopoll, 1132 1132 }; 1133 1133 1134 1134 static struct kmem_cache *zonefs_inode_cachep;
+1 -1
include/linux/bio.h
··· 349 349 return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); 350 350 } 351 351 352 - extern blk_qc_t submit_bio(struct bio *); 352 + void submit_bio(struct bio *bio); 353 353 354 354 extern void bio_endio(struct bio *); 355 355
+2 -13
include/linux/blk-mq.h
··· 359 359 /** @kobj: Kernel object for sysfs. */ 360 360 struct kobject kobj; 361 361 362 - /** @poll_considered: Count times blk_poll() was called. */ 362 + /** @poll_considered: Count times blk_mq_poll() was called. */ 363 363 unsigned long poll_considered; 364 - /** @poll_invoked: Count how many requests blk_poll() polled. */ 364 + /** @poll_invoked: Count how many requests blk_mq_poll() polled. */ 365 365 unsigned long poll_invoked; 366 366 /** @poll_success: Count how many polled requests were completed. */ 367 367 unsigned long poll_success; ··· 815 815 for ((i) = 0; (i) < (hctx)->nr_ctx && \ 816 816 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) 817 817 818 - static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, 819 - struct request *rq) 820 - { 821 - if (rq->tag != -1) 822 - return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); 823 - 824 - return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | 825 - BLK_QC_T_INTERNAL; 826 - } 827 - 828 818 static inline void blk_mq_cleanup_rq(struct request *rq) 829 819 { 830 820 if (rq->q->mq_ops->cleanup_rq) ··· 833 843 rq->rq_disk = bio->bi_bdev->bd_disk; 834 844 } 835 845 836 - blk_qc_t blk_mq_submit_bio(struct bio *bio); 837 846 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, 838 847 struct lock_class_key *key); 839 848
+5 -7
include/linux/blk_types.h
··· 208 208 ((u64)size << BIO_ISSUE_SIZE_SHIFT)); 209 209 } 210 210 211 + typedef unsigned int blk_qc_t; 212 + #define BLK_QC_T_NONE -1U 213 + 211 214 /* 212 215 * main unit of I/O for the block layer and lower layers (ie drivers and 213 216 * stacking drivers) ··· 230 227 231 228 struct bvec_iter bi_iter; 232 229 230 + blk_qc_t bi_cookie; 233 231 bio_end_io_t *bi_end_io; 234 - 235 232 void *bi_private; 236 233 #ifdef CONFIG_BLK_CGROUP 237 234 /* ··· 387 384 /* command specific flags for REQ_OP_WRITE_ZEROES: */ 388 385 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 389 386 390 - __REQ_POLLED, /* caller polls for completion using blk_poll */ 387 + __REQ_POLLED, /* caller polls for completion using bio_poll */ 391 388 392 389 /* for driver use */ 393 390 __REQ_DRV, ··· 497 494 return STAT_DISCARD; 498 495 return op_is_write(op); 499 496 } 500 - 501 - typedef unsigned int blk_qc_t; 502 - #define BLK_QC_T_NONE -1U 503 - #define BLK_QC_T_SHIFT 16 504 - #define BLK_QC_T_INTERNAL (1U << 31) 505 497 506 498 struct blk_rq_stat { 507 499 u64 mean;
+5 -3
include/linux/blkdev.h
··· 25 25 struct sg_io_hdr; 26 26 struct blkcg_gq; 27 27 struct blk_flush_queue; 28 + struct kiocb; 28 29 struct pr_ops; 29 30 struct rq_qos; 30 31 struct blk_queue_stats; ··· 551 550 552 551 extern int blk_register_queue(struct gendisk *disk); 553 552 extern void blk_unregister_queue(struct gendisk *disk); 554 - blk_qc_t submit_bio_noacct(struct bio *bio); 553 + void submit_bio_noacct(struct bio *bio); 555 554 556 555 extern int blk_lld_busy(struct request_queue *q); 557 556 extern void blk_queue_split(struct bio **); ··· 569 568 #define BLK_POLL_ONESHOT (1 << 0) 570 569 /* do not sleep to wait for the expected completion time */ 571 570 #define BLK_POLL_NOSLEEP (1 << 1) 572 - int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); 571 + int bio_poll(struct bio *bio, unsigned int flags); 572 + int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags); 573 573 574 574 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 575 575 { ··· 1178 1176 1179 1177 1180 1178 struct block_device_operations { 1181 - blk_qc_t (*submit_bio) (struct bio *bio); 1179 + void (*submit_bio)(struct bio *bio); 1182 1180 int (*open) (struct block_device *, fmode_t); 1183 1181 void (*release) (struct gendisk *, fmode_t); 1184 1182 int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
+1 -5
include/linux/fs.h
··· 334 334 int ki_flags; 335 335 u16 ki_hint; 336 336 u16 ki_ioprio; /* See linux/ioprio.h */ 337 - union { 338 - unsigned int ki_cookie; /* for ->iopoll */ 339 - struct wait_page_queue *ki_waitq; /* for async buffered IO */ 340 - }; 341 - 337 + struct wait_page_queue *ki_waitq; /* for async buffered IO */ 342 338 randomized_struct_fields_end 343 339 }; 344 340
+2 -3
include/linux/iomap.h
··· 313 313 struct iomap_dio_ops { 314 314 int (*end_io)(struct kiocb *iocb, ssize_t size, int error, 315 315 unsigned flags); 316 - blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio, 317 - loff_t file_offset); 316 + void (*submit_io)(const struct iomap_iter *iter, struct bio *bio, 317 + loff_t file_offset); 318 318 }; 319 319 320 320 /* ··· 337 337 const struct iomap_ops *ops, const struct iomap_dio_ops *dops, 338 338 unsigned int dio_flags); 339 339 ssize_t iomap_dio_complete(struct iomap_dio *dio); 340 - int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags); 341 340 342 341 #ifdef CONFIG_SWAP 343 342 struct file;
+2 -6
mm/page_io.c
··· 358 358 struct bio *bio; 359 359 int ret = 0; 360 360 struct swap_info_struct *sis = page_swap_info(page); 361 - blk_qc_t qc; 362 - struct gendisk *disk; 363 361 unsigned long pflags; 364 362 365 363 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page); ··· 407 409 bio->bi_iter.bi_sector = swap_page_sector(page); 408 410 bio->bi_end_io = end_swap_bio_read; 409 411 bio_add_page(bio, page, thp_size(page), 0); 410 - 411 - disk = bio->bi_bdev->bd_disk; 412 412 /* 413 413 * Keep this task valid during swap readpage because the oom killer may 414 414 * attempt to access it in the page fault retry time check. ··· 418 422 } 419 423 count_vm_event(PSWPIN); 420 424 bio_get(bio); 421 - qc = submit_bio(bio); 425 + submit_bio(bio); 422 426 while (synchronous) { 423 427 set_current_state(TASK_UNINTERRUPTIBLE); 424 428 if (!READ_ONCE(bio->bi_private)) 425 429 break; 426 430 427 - if (!blk_poll(disk->queue, qc, 0)) 431 + if (!bio_poll(bio, 0)) 428 432 blk_io_schedule(); 429 433 } 430 434 __set_current_state(TASK_RUNNING);