Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: rename generic_make_request to submit_bio_noacct

generic_make_request has always been very confusingly misnamed, so rename
it to submit_bio_noacct to make it clear that it is submit_bio minus
accounting and a few checks.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
ed00aabd c62b37d9

+115 -118
+1 -1
Documentation/block/biodoc.rst
··· 1036 1036 provides drivers with a sector number relative to whole device, rather than 1037 1037 having to take partition number into account in order to arrive at the true 1038 1038 sector number. The routine blk_partition_remap() is invoked by 1039 - generic_make_request even before invoking the queue specific ->submit_bio, 1039 + submit_bio_noacct even before invoking the queue specific ->submit_bio, 1040 1040 so the i/o scheduler also gets to operate on whole disk sector numbers. This 1041 1041 should typically not require changes to block drivers, it just never gets 1042 1042 to invoke its own partition sector offset calculations since all bios
+1 -1
Documentation/fault-injection/fault-injection.rst
··· 24 24 25 25 injects disk IO errors on devices permitted by setting 26 26 /sys/block/<device>/make-it-fail or 27 - /sys/block/<device>/<partition>/make-it-fail. (generic_make_request()) 27 + /sys/block/<device>/<partition>/make-it-fail. (submit_bio_noacct()) 28 28 29 29 - fail_mmc_request 30 30
+2 -2
Documentation/trace/ftrace.rst
··· 1453 1453 => __blk_run_queue_uncond 1454 1454 => __blk_run_queue 1455 1455 => blk_queue_bio 1456 - => generic_make_request 1456 + => submit_bio_noacct 1457 1457 => submit_bio 1458 1458 => submit_bh 1459 1459 => __ext3_get_inode_loc ··· 1738 1738 => __blk_run_queue_uncond 1739 1739 => __blk_run_queue 1740 1740 => blk_queue_bio 1741 - => generic_make_request 1741 + => submit_bio_noacct 1742 1742 => submit_bio 1743 1743 => submit_bh 1744 1744 => ext3_bread
+7 -7
block/bio.c
··· 358 358 if (!bio) 359 359 break; 360 360 361 - generic_make_request(bio); 361 + submit_bio_noacct(bio); 362 362 } 363 363 } 364 364 ··· 416 416 * submit the previously allocated bio for IO before attempting to allocate 417 417 * a new one. Failure to do so can cause deadlocks under memory pressure. 418 418 * 419 - * Note that when running under generic_make_request() (i.e. any block 419 + * Note that when running under submit_bio_noacct() (i.e. any block 420 420 * driver), bios are not submitted until after you return - see the code in 421 - * generic_make_request() that converts recursion into iteration, to prevent 421 + * submit_bio_noacct() that converts recursion into iteration, to prevent 422 422 * stack overflows. 423 423 * 424 424 * This would normally mean allocating multiple bios under 425 - * generic_make_request() would be susceptible to deadlocks, but we have 425 + * submit_bio_noacct() would be susceptible to deadlocks, but we have 426 426 * deadlock avoidance code that resubmits any blocked bios from a rescuer 427 427 * thread. 428 428 * 429 429 * However, we do not guarantee forward progress for allocations from other 430 430 * mempools. Doing multiple allocations from the same mempool under 431 - * generic_make_request() should be avoided - instead, use bio_set's front_pad 431 + * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad 432 432 * for per bio allocations. 433 433 * 434 434 * RETURNS: ··· 457 457 nr_iovecs > 0)) 458 458 return NULL; 459 459 /* 460 - * generic_make_request() converts recursion to iteration; this 460 + * submit_bio_noacct() converts recursion to iteration; this 461 461 * means if we're running beneath it, any bios we allocate and 462 462 * submit will not be submitted (and thus freed) until after we 463 463 * return. 464 464 * 465 465 * This exposes us to a potential deadlock if we allocate 466 466 * multiple bios from the same bio_set() while running 467 - * underneath generic_make_request(). If we were to allocate 467 + * underneath submit_bio_noacct(). If we were to allocate 468 468 * multiple bios (say a stacking block driver that was splitting 469 469 * bios), we would deadlock if we exhausted the mempool's 470 470 * reserve.
+15 -17
block/blk-core.c
··· 956 956 return BLK_STS_OK; 957 957 } 958 958 959 - static noinline_for_stack bool 960 - generic_make_request_checks(struct bio *bio) 959 + static noinline_for_stack bool submit_bio_checks(struct bio *bio) 961 960 { 962 961 struct request_queue *q = bio->bi_disk->queue; 963 962 blk_status_t status = BLK_STS_IOERR; ··· 984 985 } 985 986 986 987 /* 987 - * Filter flush bio's early so that make_request based 988 - * drivers without flush support don't have to worry 989 - * about them. 988 + * Filter flush bio's early so that bio based drivers without flush 989 + * support don't have to worry about them. 990 990 */ 991 991 if (op_is_flush(bio->bi_opf) && 992 992 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { ··· 1070 1072 return false; 1071 1073 } 1072 1074 1073 - static blk_qc_t do_make_request(struct bio *bio) 1075 + static blk_qc_t __submit_bio(struct bio *bio) 1074 1076 { 1075 1077 struct gendisk *disk = bio->bi_disk; 1076 1078 blk_qc_t ret = BLK_QC_T_NONE; ··· 1085 1087 } 1086 1088 1087 1089 /** 1088 - * generic_make_request - re-submit a bio to the block device layer for I/O 1090 + * submit_bio_noacct - re-submit a bio to the block device layer for I/O 1089 1091 * @bio: The bio describing the location in memory and on the device. 1090 1092 * 1091 1093 * This is a version of submit_bio() that shall only be used for I/O that is ··· 1093 1095 * systems and other upper level users of the block layer should use 1094 1096 * submit_bio() instead. 1095 1097 */ 1096 - blk_qc_t generic_make_request(struct bio *bio) 1098 + blk_qc_t submit_bio_noacct(struct bio *bio) 1097 1099 { 1098 1100 /* 1099 1101 * bio_list_on_stack[0] contains bios submitted by the current ··· 1104 1106 struct bio_list bio_list_on_stack[2]; 1105 1107 blk_qc_t ret = BLK_QC_T_NONE; 1106 1108 1107 - if (!generic_make_request_checks(bio)) 1109 + if (!submit_bio_checks(bio)) 1108 1110 goto out; 1109 1111 1110 1112 /* ··· 1112 1114 * stack usage with stacked devices could be a problem. So use 1113 1115 * current->bio_list to keep a list of requests submited by a 1114 1116 * ->submit_bio method. current->bio_list is also used as a 1115 - * flag to say if generic_make_request is currently active in this 1117 + * flag to say if submit_bio_noacct is currently active in this 1116 1118 * task or not. If it is NULL, then no make_request is active. If 1117 1119 * it is non-NULL, then a make_request is active, and new requests 1118 1120 * should be added at the tail ··· 1130 1132 * we assign bio_list to a pointer to the bio_list_on_stack, 1131 1133 * thus initialising the bio_list of new bios to be 1132 1134 * added. ->submit_bio() may indeed add some more bios 1133 - * through a recursive call to generic_make_request. If it 1135 + * through a recursive call to submit_bio_noacct. If it 1134 1136 * did, we find a non-NULL value in bio_list and re-enter the loop 1135 1137 * from the top. In this case we really did just take the bio 1136 1138 * of the top of the list (no pretending) and so remove it from ··· 1148 1150 /* Create a fresh bio_list for all subordinate requests */ 1149 1151 bio_list_on_stack[1] = bio_list_on_stack[0]; 1150 1152 bio_list_init(&bio_list_on_stack[0]); 1151 - ret = do_make_request(bio); 1153 + ret = __submit_bio(bio); 1152 1154 1153 1155 /* sort new bios into those for a lower level 1154 1156 * and those for the same level ··· 1172 1174 out: 1173 1175 return ret; 1174 1176 } 1175 - EXPORT_SYMBOL(generic_make_request); 1177 + EXPORT_SYMBOL(submit_bio_noacct); 1176 1178 1177 1179 /** 1178 1180 * direct_make_request - hand a buffer directly to its device driver for I/O 1179 1181 * @bio: The bio describing the location in memory and on the device. 1180 1182 * 1181 - * This function behaves like generic_make_request(), but does not protect 1183 + * This function behaves like submit_bio_noacct(), but does not protect 1182 1184 * against recursion. Must only be used if the called driver is known 1183 1185 * to be blk-mq based. 1184 1186 */ ··· 1190 1192 bio_io_error(bio); 1191 1193 return BLK_QC_T_NONE; 1192 1194 } 1193 - if (!generic_make_request_checks(bio)) 1195 + if (!submit_bio_checks(bio)) 1194 1196 return BLK_QC_T_NONE; 1195 1197 if (unlikely(bio_queue_enter(bio))) 1196 1198 return BLK_QC_T_NONE; ··· 1261 1263 blk_qc_t ret; 1262 1264 1263 1265 psi_memstall_enter(&pflags); 1264 - ret = generic_make_request(bio); 1266 + ret = submit_bio_noacct(bio); 1265 1267 psi_memstall_leave(&pflags); 1266 1268 1267 1269 return ret; 1268 1270 } 1269 1271 1270 - return generic_make_request(bio); 1272 + return submit_bio_noacct(bio); 1271 1273 } 1272 1274 EXPORT_SYMBOL(submit_bio); 1273 1275
+1 -1
block/blk-crypto-fallback.c
··· 228 228 return false; 229 229 } 230 230 bio_chain(split_bio, bio); 231 - generic_make_request(bio); 231 + submit_bio_noacct(bio); 232 232 *bio_ptr = split_bio; 233 233 } 234 234
+1 -1
block/blk-crypto.c
··· 239 239 * kernel crypto API. When the crypto API fallback is used for encryption, 240 240 * blk-crypto may choose to split the bio into 2 - the first one that will 241 241 * continue to be processed and the second one that will be resubmitted via 242 - * generic_make_request. A bounce bio will be allocated to encrypt the contents 242 + * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents 243 243 * of the aforementioned "first one", and *bio_ptr will be updated to this 244 244 * bounce bio. 245 245 *
+1 -1
block/blk-merge.c
··· 338 338 339 339 bio_chain(split, *bio); 340 340 trace_block_split(q, split, (*bio)->bi_iter.bi_sector); 341 - generic_make_request(*bio); 341 + submit_bio_noacct(*bio); 342 342 *bio = split; 343 343 } 344 344 }
+2 -2
block/blk-throttle.c
··· 1339 1339 1340 1340 if (!bio_list_empty(&bio_list_on_stack)) { 1341 1341 blk_start_plug(&plug); 1342 - while((bio = bio_list_pop(&bio_list_on_stack))) 1343 - generic_make_request(bio); 1342 + while ((bio = bio_list_pop(&bio_list_on_stack))) 1343 + submit_bio_noacct(bio); 1344 1344 blk_finish_plug(&plug); 1345 1345 } 1346 1346 }
+1 -1
block/bounce.c
··· 309 309 if (!passthrough && sectors < bio_sectors(*bio_orig)) { 310 310 bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split); 311 311 bio_chain(bio, *bio_orig); 312 - generic_make_request(*bio_orig); 312 + submit_bio_noacct(*bio_orig); 313 313 *bio_orig = bio; 314 314 } 315 315 bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
+3 -3
drivers/block/drbd/drbd_int.h
··· 1576 1576 /* 1577 1577 * used to submit our private bio 1578 1578 */ 1579 - static inline void drbd_generic_make_request(struct drbd_device *device, 1579 + static inline void drbd_submit_bio_noacct(struct drbd_device *device, 1580 1580 int fault_type, struct bio *bio) 1581 1581 { 1582 1582 __release(local); 1583 1583 if (!bio->bi_disk) { 1584 - drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n"); 1584 + drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n"); 1585 1585 bio->bi_status = BLK_STS_IOERR; 1586 1586 bio_endio(bio); 1587 1587 return; ··· 1590 1590 if (drbd_insert_fault(device, fault_type)) 1591 1591 bio_io_error(bio); 1592 1592 else 1593 - generic_make_request(bio); 1593 + submit_bio_noacct(bio); 1594 1594 } 1595 1595 1596 1596 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
+1 -1
drivers/block/drbd/drbd_main.c
··· 2325 2325 * workqueues instead. 2326 2326 */ 2327 2327 2328 - /* We are not just doing generic_make_request(), 2328 + /* We are not just doing submit_bio_noacct(), 2329 2329 * as we want to keep the start_time information. */ 2330 2330 inc_ap_bio(device); 2331 2331 __drbd_make_request(device, bio, start_jif);
+1 -1
drivers/block/drbd/drbd_receiver.c
··· 1723 1723 bios = bios->bi_next; 1724 1724 bio->bi_next = NULL; 1725 1725 1726 - drbd_generic_make_request(device, fault_type, bio); 1726 + drbd_submit_bio_noacct(device, fault_type, bio); 1727 1727 } while (bios); 1728 1728 return 0; 1729 1729
+1 -1
drivers/block/drbd/drbd_req.c
··· 1164 1164 else if (bio_op(bio) == REQ_OP_DISCARD) 1165 1165 drbd_process_discard_or_zeroes_req(req, EE_TRIM); 1166 1166 else 1167 - generic_make_request(bio); 1167 + submit_bio_noacct(bio); 1168 1168 put_ldev(device); 1169 1169 } else 1170 1170 bio_io_error(bio);
+1 -1
drivers/block/drbd/drbd_worker.c
··· 1525 1525 1526 1526 drbd_req_make_private_bio(req, req->master_bio); 1527 1527 bio_set_dev(req->private_bio, device->ldev->backing_bdev); 1528 - generic_make_request(req->private_bio); 1528 + submit_bio_noacct(req->private_bio); 1529 1529 1530 1530 return 0; 1531 1531 }
+1 -1
drivers/block/pktcdvd.c
··· 913 913 } 914 914 915 915 atomic_inc(&pd->cdrw.pending_bios); 916 - generic_make_request(bio); 916 + submit_bio_noacct(bio); 917 917 } 918 918 } 919 919
+1 -1
drivers/lightnvm/pblk-read.c
··· 320 320 split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL, 321 321 &pblk_bio_set); 322 322 bio_chain(split_bio, bio); 323 - generic_make_request(bio); 323 + submit_bio_noacct(bio); 324 324 325 325 /* New bio contains first N sectors of the previous one, so 326 326 * we can continue to use existing rqd, but we need to shrink
+1 -1
drivers/md/bcache/bcache.h
··· 929 929 bio_endio(bio); 930 930 return; 931 931 } 932 - generic_make_request(bio); 932 + submit_bio_noacct(bio); 933 933 } 934 934 935 935 /*
+1 -1
drivers/md/bcache/btree.c
··· 959 959 * bch_btree_node_get - find a btree node in the cache and lock it, reading it 960 960 * in from disk if necessary. 961 961 * 962 - * If IO is necessary and running under generic_make_request, returns -EAGAIN. 962 + * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN. 963 963 * 964 964 * The btree node will have either a read or a write lock held, depending on 965 965 * level and op->lock.
+3 -4
drivers/md/bcache/request.c
··· 1115 1115 !blk_queue_discard(bdev_get_queue(dc->bdev))) 1116 1116 bio->bi_end_io(bio); 1117 1117 else 1118 - generic_make_request(bio); 1118 + submit_bio_noacct(bio); 1119 1119 } 1120 1120 1121 1121 static void quit_max_writeback_rate(struct cache_set *c, ··· 1197 1197 if (!bio->bi_iter.bi_size) { 1198 1198 /* 1199 1199 * can't call bch_journal_meta from under 1200 - * generic_make_request 1200 + * submit_bio_noacct 1201 1201 */ 1202 1202 continue_at_nobarrier(&s->cl, 1203 1203 cached_dev_nodata, ··· 1311 1311 1312 1312 if (!bio->bi_iter.bi_size) { 1313 1313 /* 1314 - * can't call bch_journal_meta from under 1315 - * generic_make_request 1314 + * can't call bch_journal_meta from under submit_bio_noacct 1316 1315 */ 1317 1316 continue_at_nobarrier(&s->cl, 1318 1317 flash_dev_nodata,
+3 -3
drivers/md/dm-cache-target.c
··· 886 886 static void accounted_request(struct cache *cache, struct bio *bio) 887 887 { 888 888 accounted_begin(cache, bio); 889 - generic_make_request(bio); 889 + submit_bio_noacct(bio); 890 890 } 891 891 892 892 static void issue_op(struct bio *bio, void *context) ··· 1792 1792 bool commit_needed; 1793 1793 1794 1794 if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED) 1795 - generic_make_request(bio); 1795 + submit_bio_noacct(bio); 1796 1796 1797 1797 return commit_needed; 1798 1798 } ··· 1858 1858 1859 1859 if (cache->features.discard_passdown) { 1860 1860 remap_to_origin(cache, bio); 1861 - generic_make_request(bio); 1861 + submit_bio_noacct(bio); 1862 1862 } else 1863 1863 bio_endio(bio); 1864 1864
+5 -5
drivers/md/dm-clone-target.c
··· 330 330 blk_start_plug(&plug); 331 331 332 332 while ((bio = bio_list_pop(bios))) 333 - generic_make_request(bio); 333 + submit_bio_noacct(bio); 334 334 335 335 blk_finish_plug(&plug); 336 336 } ··· 346 346 static void issue_bio(struct clone *clone, struct bio *bio) 347 347 { 348 348 if (!bio_triggers_commit(clone, bio)) { 349 - generic_make_request(bio); 349 + submit_bio_noacct(bio); 350 350 return; 351 351 } 352 352 ··· 473 473 bio_region_range(clone, bio, &rs, &nr_regions); 474 474 trim_bio(bio, region_to_sector(clone, rs), 475 475 nr_regions << clone->region_shift); 476 - generic_make_request(bio); 476 + submit_bio_noacct(bio); 477 477 } else 478 478 bio_endio(bio); 479 479 } ··· 865 865 bio->bi_private = hd; 866 866 867 867 atomic_inc(&hd->clone->hydrations_in_flight); 868 - generic_make_request(bio); 868 + submit_bio_noacct(bio); 869 869 } 870 870 871 871 /* ··· 1281 1281 */ 1282 1282 bio_endio(bio); 1283 1283 } else { 1284 - generic_make_request(bio); 1284 + submit_bio_noacct(bio); 1285 1285 } 1286 1286 } 1287 1287 }
+3 -3
drivers/md/dm-crypt.c
··· 1789 1789 return 1; 1790 1790 } 1791 1791 1792 - generic_make_request(clone); 1792 + submit_bio_noacct(clone); 1793 1793 return 0; 1794 1794 } 1795 1795 ··· 1815 1815 { 1816 1816 struct bio *clone = io->ctx.bio_out; 1817 1817 1818 - generic_make_request(clone); 1818 + submit_bio_noacct(clone); 1819 1819 } 1820 1820 1821 1821 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) ··· 1893 1893 clone->bi_iter.bi_sector = cc->start + io->sector; 1894 1894 1895 1895 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { 1896 - generic_make_request(clone); 1896 + submit_bio_noacct(clone); 1897 1897 return; 1898 1898 } 1899 1899
+1 -1
drivers/md/dm-delay.c
··· 72 72 while (bio) { 73 73 n = bio->bi_next; 74 74 bio->bi_next = NULL; 75 - generic_make_request(bio); 75 + submit_bio_noacct(bio); 76 76 bio = n; 77 77 } 78 78 }
+1 -1
drivers/md/dm-era-target.c
··· 1265 1265 bio_io_error(bio); 1266 1266 else 1267 1267 while ((bio = bio_list_pop(&marked_bios))) 1268 - generic_make_request(bio); 1268 + submit_bio_noacct(bio); 1269 1269 } 1270 1270 1271 1271 static void process_rpc_calls(struct era *era)
+2 -2
drivers/md/dm-integrity.c
··· 2115 2115 dio->in_flight = (atomic_t)ATOMIC_INIT(1); 2116 2116 dio->completion = NULL; 2117 2117 2118 - generic_make_request(bio); 2118 + submit_bio_noacct(bio); 2119 2119 2120 2120 return; 2121 2121 } 2122 2122 2123 - generic_make_request(bio); 2123 + submit_bio_noacct(bio); 2124 2124 2125 2125 if (need_sync_io) { 2126 2126 wait_for_completion_io(&read_comp);
+1 -1
drivers/md/dm-mpath.c
··· 677 677 bio_endio(bio); 678 678 break; 679 679 case DM_MAPIO_REMAPPED: 680 - generic_make_request(bio); 680 + submit_bio_noacct(bio); 681 681 break; 682 682 case DM_MAPIO_SUBMITTED: 683 683 break;
+1 -1
drivers/md/dm-raid1.c
··· 779 779 wakeup_mirrord(ms); 780 780 } else { 781 781 map_bio(get_default_mirror(ms), bio); 782 - generic_make_request(bio); 782 + submit_bio_noacct(bio); 783 783 } 784 784 } 785 785 }
+1 -1
drivers/md/dm-snap-persistent.c
··· 252 252 253 253 /* 254 254 * Issue the synchronous I/O from a different thread 255 - * to avoid generic_make_request recursion. 255 + * to avoid submit_bio_noacct recursion. 256 256 */ 257 257 INIT_WORK_ONSTACK(&req.work, do_metadata); 258 258 queue_work(ps->metadata_wq, &req.work);
+3 -3
drivers/md/dm-snap.c
··· 1568 1568 while (bio) { 1569 1569 n = bio->bi_next; 1570 1570 bio->bi_next = NULL; 1571 - generic_make_request(bio); 1571 + submit_bio_noacct(bio); 1572 1572 bio = n; 1573 1573 } 1574 1574 } ··· 1588 1588 bio->bi_next = NULL; 1589 1589 r = do_origin(s->origin, bio, false); 1590 1590 if (r == DM_MAPIO_REMAPPED) 1591 - generic_make_request(bio); 1591 + submit_bio_noacct(bio); 1592 1592 bio = n; 1593 1593 } 1594 1594 } ··· 1829 1829 bio->bi_end_io = full_bio_end_io; 1830 1830 bio->bi_private = callback_data; 1831 1831 1832 - generic_make_request(bio); 1832 + submit_bio_noacct(bio); 1833 1833 } 1834 1834 1835 1835 static struct dm_snap_pending_exception *
+2 -2
drivers/md/dm-thin.c
··· 758 758 struct pool *pool = tc->pool; 759 759 760 760 if (!bio_triggers_commit(tc, bio)) { 761 - generic_make_request(bio); 761 + submit_bio_noacct(bio); 762 762 return; 763 763 } 764 764 ··· 2394 2394 if (bio->bi_opf & REQ_PREFLUSH) 2395 2395 bio_endio(bio); 2396 2396 else 2397 - generic_make_request(bio); 2397 + submit_bio_noacct(bio); 2398 2398 } 2399 2399 } 2400 2400
+1 -1
drivers/md/dm-verity-target.c
··· 681 681 682 682 verity_submit_prefetch(v, io); 683 683 684 - generic_make_request(bio); 684 + submit_bio_noacct(bio); 685 685 686 686 return DM_MAPIO_SUBMITTED; 687 687 }
+1 -1
drivers/md/dm-writecache.c
··· 1238 1238 bio_end_sector(bio)); 1239 1239 wc_unlock(wc); 1240 1240 bio_set_dev(bio, wc->dev->bdev); 1241 - generic_make_request(bio); 1241 + submit_bio_noacct(bio); 1242 1242 } else { 1243 1243 writecache_flush(wc); 1244 1244 wc_unlock(wc);
+1 -1
drivers/md/dm-zoned-target.c
··· 140 140 bio_advance(bio, clone->bi_iter.bi_size); 141 141 142 142 refcount_inc(&bioctx->ref); 143 - generic_make_request(clone); 143 + submit_bio_noacct(clone); 144 144 145 145 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) 146 146 zone->wp_block += nr_blocks;
+5 -5
drivers/md/dm.c
··· 1305 1305 if (md->type == DM_TYPE_NVME_BIO_BASED) 1306 1306 ret = direct_make_request(clone); 1307 1307 else 1308 - ret = generic_make_request(clone); 1308 + ret = submit_bio_noacct(clone); 1309 1309 break; 1310 1310 case DM_MAPIO_KILL: 1311 1311 free_tio(tio); ··· 1652 1652 error = __split_and_process_non_flush(&ci); 1653 1653 if (current->bio_list && ci.sector_count && !error) { 1654 1654 /* 1655 - * Remainder must be passed to generic_make_request() 1655 + * Remainder must be passed to submit_bio_noacct() 1656 1656 * so that it gets handled *after* bios already submitted 1657 1657 * have been completely processed. 1658 1658 * We take a clone of the original to store in ··· 1677 1677 1678 1678 bio_chain(b, bio); 1679 1679 trace_block_split(md->queue, b, bio->bi_iter.bi_sector); 1680 - ret = generic_make_request(bio); 1680 + ret = submit_bio_noacct(bio); 1681 1681 break; 1682 1682 } 1683 1683 } ··· 1745 1745 1746 1746 bio_chain(split, *bio); 1747 1747 trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); 1748 - generic_make_request(*bio); 1748 + submit_bio_noacct(*bio); 1749 1749 *bio = split; 1750 1750 } 1751 1751 } ··· 2500 2500 break; 2501 2501 2502 2502 if (dm_request_based(md)) 2503 - (void) generic_make_request(c); 2503 + (void) submit_bio_noacct(c); 2504 2504 else 2505 2505 (void) dm_process_bio(md, map, c); 2506 2506 }
+2 -2
drivers/md/md-faulty.c
··· 169 169 if (bio_data_dir(bio) == WRITE) { 170 170 /* write request */ 171 171 if (atomic_read(&conf->counters[WriteAll])) { 172 - /* special case - don't decrement, don't generic_make_request, 172 + /* special case - don't decrement, don't submit_bio_noacct, 173 173 * just fail immediately 174 174 */ 175 175 bio_io_error(bio); ··· 214 214 } else 215 215 bio_set_dev(bio, conf->rdev->bdev); 216 216 217 - generic_make_request(bio); 217 + submit_bio_noacct(bio); 218 218 return true; 219 219 } 220 220
+2 -2
drivers/md/md-linear.c
··· 267 267 struct bio *split = bio_split(bio, end_sector - bio_sector, 268 268 GFP_NOIO, &mddev->bio_set); 269 269 bio_chain(split, bio); 270 - generic_make_request(bio); 270 + submit_bio_noacct(bio); 271 271 bio = split; 272 272 } 273 273 ··· 286 286 bio_sector); 287 287 mddev_check_writesame(mddev, bio); 288 288 mddev_check_write_zeroes(mddev, bio); 289 - generic_make_request(bio); 289 + submit_bio_noacct(bio); 290 290 } 291 291 return true; 292 292
+2 -2
drivers/md/md-multipath.c
··· 131 131 mp_bh->bio.bi_private = mp_bh; 132 132 mddev_check_writesame(mddev, &mp_bh->bio); 133 133 mddev_check_write_zeroes(mddev, &mp_bh->bio); 134 - generic_make_request(&mp_bh->bio); 134 + submit_bio_noacct(&mp_bh->bio); 135 135 return true; 136 136 } 137 137 ··· 348 348 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; 349 349 bio->bi_end_io = multipath_end_request; 350 350 bio->bi_private = mp_bh; 351 - generic_make_request(bio); 351 + submit_bio_noacct(bio); 352 352 } 353 353 } 354 354 spin_unlock_irqrestore(&conf->device_lock, flags);
+4 -4
drivers/md/raid0.c
··· 495 495 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, 496 496 &mddev->bio_set); 497 497 bio_chain(split, bio); 498 - generic_make_request(bio); 498 + submit_bio_noacct(bio); 499 499 bio = split; 500 500 end = zone->zone_end; 501 501 } else ··· 559 559 trace_block_bio_remap(bdev_get_queue(rdev->bdev), 560 560 discard_bio, disk_devt(mddev->gendisk), 561 561 bio->bi_iter.bi_sector); 562 - generic_make_request(discard_bio); 562 + submit_bio_noacct(discard_bio); 563 563 } 564 564 bio_endio(bio); 565 565 } ··· 600 600 struct bio *split = bio_split(bio, sectors, GFP_NOIO, 601 601 &mddev->bio_set); 602 602 bio_chain(split, bio); 603 - generic_make_request(bio); 603 + submit_bio_noacct(bio); 604 604 bio = split; 605 605 } 606 606 ··· 633 633 disk_devt(mddev->gendisk), bio_sector); 634 634 mddev_check_writesame(mddev, bio); 635 635 mddev_check_write_zeroes(mddev, bio); 636 - generic_make_request(bio); 636 + submit_bio_noacct(bio); 637 637 return true; 638 638 } 639 639
+7 -7
drivers/md/raid1.c
··· 834 834 /* Just ignore it */ 835 835 bio_endio(bio); 836 836 else 837 - generic_make_request(bio); 837 + submit_bio_noacct(bio); 838 838 bio = next; 839 839 cond_resched(); 840 840 } ··· 1312 1312 struct bio *split = bio_split(bio, max_sectors, 1313 1313 gfp, &conf->bio_split); 1314 1314 bio_chain(split, bio); 1315 - generic_make_request(bio); 1315 + submit_bio_noacct(bio); 1316 1316 bio = split; 1317 1317 r1_bio->master_bio = bio; 1318 1318 r1_bio->sectors = max_sectors; ··· 1338 1338 trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, 1339 1339 disk_devt(mddev->gendisk), r1_bio->sector); 1340 1340 1341 - generic_make_request(read_bio); 1341 + submit_bio_noacct(read_bio); 1342 1342 } 1343 1343 1344 1344 static void raid1_write_request(struct mddev *mddev, struct bio *bio, ··· 1483 1483 struct bio *split = bio_split(bio, max_sectors, 1484 1484 GFP_NOIO, &conf->bio_split); 1485 1485 bio_chain(split, bio); 1486 - generic_make_request(bio); 1486 + submit_bio_noacct(bio); 1487 1487 bio = split; 1488 1488 r1_bio->master_bio = bio; 1489 1489 r1_bio->sectors = max_sectors; ··· 2240 2240 atomic_inc(&r1_bio->remaining); 2241 2241 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); 2242 2242 2243 - generic_make_request(wbio); 2243 + submit_bio_noacct(wbio); 2244 2244 } 2245 2245 2246 2246 put_sync_write_buf(r1_bio, 1); ··· 2926 2926 md_sync_acct_bio(bio, nr_sectors); 2927 2927 if (read_targets == 1) 2928 2928 bio->bi_opf &= ~MD_FAILFAST; 2929 - generic_make_request(bio); 2929 + submit_bio_noacct(bio); 2930 2930 } 2931 2931 } 2932 2932 } else { ··· 2935 2935 md_sync_acct_bio(bio, nr_sectors); 2936 2936 if (read_targets == 1) 2937 2937 bio->bi_opf &= ~MD_FAILFAST; 2938 - generic_make_request(bio); 2938 + submit_bio_noacct(bio); 2939 2939 } 2940 2940 return nr_sectors; 2941 2941 }
+14 -14
drivers/md/raid10.c
··· 917 917 /* Just ignore it */ 918 918 bio_endio(bio); 919 919 else 920 - generic_make_request(bio); 920 + submit_bio_noacct(bio); 921 921 bio = next; 922 922 } 923 923 blk_finish_plug(&plug); ··· 1102 1102 /* Just ignore it */ 1103 1103 bio_endio(bio); 1104 1104 else 1105 - generic_make_request(bio); 1105 + submit_bio_noacct(bio); 1106 1106 bio = next; 1107 1107 } 1108 1108 kfree(plug); ··· 1194 1194 gfp, &conf->bio_split); 1195 1195 bio_chain(split, bio); 1196 1196 allow_barrier(conf); 1197 - generic_make_request(bio); 1197 + submit_bio_noacct(bio); 1198 1198 wait_barrier(conf); 1199 1199 bio = split; 1200 1200 r10_bio->master_bio = bio; ··· 1221 1221 trace_block_bio_remap(read_bio->bi_disk->queue, 1222 1222 read_bio, disk_devt(mddev->gendisk), 1223 1223 r10_bio->sector); 1224 - generic_make_request(read_bio); 1224 + submit_bio_noacct(read_bio); 1225 1225 return; 1226 1226 } 1227 1227 ··· 1479 1479 GFP_NOIO, &conf->bio_split); 1480 1480 bio_chain(split, bio); 1481 1481 allow_barrier(conf); 1482 - generic_make_request(bio); 1482 + submit_bio_noacct(bio); 1483 1483 wait_barrier(conf); 1484 1484 bio = split; 1485 1485 r10_bio->master_bio = bio; ··· 2099 2099 tbio->bi_opf |= MD_FAILFAST; 2100 2100 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; 2101 2101 bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); 2102 - generic_make_request(tbio); 2102 + submit_bio_noacct(tbio); 2103 2103 } 2104 2104 2105 2105 /* Now write out to any replacement devices ··· 2118 2118 atomic_inc(&r10_bio->remaining); 2119 2119 md_sync_acct(conf->mirrors[d].replacement->bdev, 2120 2120 bio_sectors(tbio)); 2121 - generic_make_request(tbio); 2121 + submit_bio_noacct(tbio); 2122 2122 } 2123 2123 2124 2124 done: ··· 2241 2241 wbio = r10_bio->devs[1].bio; 2242 2242 wbio2 = r10_bio->devs[1].repl_bio; 2243 2243 /* Need to test wbio2->bi_end_io before we call 2244 - * generic_make_request as if the former is NULL, 2244 + * submit_bio_noacct as if the former is NULL, 2245 2245 * the latter is free to free wbio2. 2246 2246 */ 2247 2247 if (wbio2 && !wbio2->bi_end_io) ··· 2249 2249 if (wbio->bi_end_io) { 2250 2250 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2251 2251 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); 2252 - generic_make_request(wbio); 2252 + submit_bio_noacct(wbio); 2253 2253 } 2254 2254 if (wbio2) { 2255 2255 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2256 2256 md_sync_acct(conf->mirrors[d].replacement->bdev, 2257 2257 bio_sectors(wbio2)); 2258 - generic_make_request(wbio2); 2258 + submit_bio_noacct(wbio2); 2259 2259 } 2260 2260 } 2261 2261 ··· 2889 2889 * a number of r10_bio structures, one for each out-of-sync device. 2890 2890 * As we setup these structures, we collect all bio's together into a list 2891 2891 * which we then process collectively to add pages, and then process again 2892 - * to pass to generic_make_request. 2892 + * to pass to submit_bio_noacct. 2893 2893 * 2894 2894 * The r10_bio structures are linked using a borrowed master_bio pointer. 2895 2895 * This link is counted in ->remaining. When the r10_bio that points to NULL ··· 3496 3496 if (bio->bi_end_io == end_sync_read) { 3497 3497 md_sync_acct_bio(bio, nr_sectors); 3498 3498 bio->bi_status = 0; 3499 - generic_make_request(bio); 3499 + submit_bio_noacct(bio); 3500 3500 } 3501 3501 } 3502 3502 ··· 4654 4654 md_sync_acct_bio(read_bio, r10_bio->sectors); 4655 4655 atomic_inc(&r10_bio->remaining); 4656 4656 read_bio->bi_next = NULL; 4657 - generic_make_request(read_bio); 4657 + submit_bio_noacct(read_bio); 4658 4658 sectors_done += nr_sectors; 4659 4659 if (sector_nr <= last) 4660 4660 goto read_more; ··· 4717 4717 md_sync_acct_bio(b, r10_bio->sectors); 4718 4718 atomic_inc(&r10_bio->remaining); 4719 4719 b->bi_next = NULL; 4720 - generic_make_request(b); 4720 + submit_bio_noacct(b); 4721 4721 } 4722 4722 end_reshape_request(r10_bio); 4723 4723 }
+5 -5
drivers/md/raid5.c
··· 873 873 struct bio *bio; 874 874 875 875 while ((bio = bio_list_pop(tmp))) 876 - generic_make_request(bio); 876 + submit_bio_noacct(bio); 877 877 } 878 878 879 879 static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b) ··· 1151 1151 if (should_defer && op_is_write(op)) 1152 1152 bio_list_add(&pending_bios, bi); 1153 1153 else 1154 - generic_make_request(bi); 1154 + submit_bio_noacct(bi); 1155 1155 } 1156 1156 if (rrdev) { 1157 1157 if (s->syncing || s->expanding || s->expanded ··· 1201 1201 if (should_defer && op_is_write(op)) 1202 1202 bio_list_add(&pending_bios, rbi); 1203 1203 else 1204 - generic_make_request(rbi); 1204 + submit_bio_noacct(rbi); 1205 1205 } 1206 1206 if (!rdev && !rrdev) { 1207 1207 if (op_is_write(op)) ··· 5289 5289 trace_block_bio_remap(align_bi->bi_disk->queue, 5290 5290 align_bi, disk_devt(mddev->gendisk), 5291 5291 raid_bio->bi_iter.bi_sector); 5292 - generic_make_request(align_bi); 5292 + submit_bio_noacct(align_bi); 5293 5293 return 1; 5294 5294 } else { 5295 5295 rcu_read_unlock(); ··· 5309 5309 struct r5conf *conf = mddev->private; 5310 5310 split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); 5311 5311 bio_chain(split, raid_bio); 5312 - generic_make_request(raid_bio); 5312 + submit_bio_noacct(raid_bio); 5313 5313 raid_bio = split; 5314 5314 } 5315 5315
+1 -1
drivers/nvme/host/multipath.c
··· 351 351 * path. 352 352 */ 353 353 bio->bi_disk = head->disk; 354 - generic_make_request(bio); 354 + submit_bio_noacct(bio); 355 355 } 356 356 } 357 357
+1 -1
include/linux/blkdev.h
··· 852 852 853 853 extern int blk_register_queue(struct gendisk *disk); 854 854 extern void blk_unregister_queue(struct gendisk *disk); 855 - extern blk_qc_t generic_make_request(struct bio *bio); 855 + blk_qc_t submit_bio_noacct(struct bio *bio); 856 856 extern blk_qc_t direct_make_request(struct bio *bio); 857 857 extern void blk_rq_init(struct request_queue *q, struct request *rq); 858 858 extern void blk_put_request(struct request *);