Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge remote-tracking branch 'jens/for-4.7/core' into dm-4.7

Needed in order to update the DM thinp code to use the new async
__blkdev_issue_discard() interface.

+208 -163
+9
Documentation/block/queue-sysfs.txt
··· 141 141 an IO scheduler name to this file will attempt to load that IO scheduler 142 142 module, if it isn't already present in the system. 143 143 144 + write_cache (RW) 145 + ---------------- 146 + When read, this file will display whether the device has write back 147 + caching enabled or not. It will return "write back" for the former 148 + case, and "write through" for the latter. Writing to this file can 149 + change the kernels view of the device, but it doesn't alter the 150 + device state. This means that it might not be safe to toggle the 151 + setting from "write back" to "write through", since that will also 152 + eliminate cache flushes issued by the kernel. 144 153 145 154 146 155 Jens Axboe <jens.axboe@oracle.com>, February 2009
-11
block/bio.c
··· 311 311 bio_endio(__bio_chain_endio(bio)); 312 312 } 313 313 314 - /* 315 - * Increment chain count for the bio. Make sure the CHAIN flag update 316 - * is visible before the raised count. 317 - */ 318 - static inline void bio_inc_remaining(struct bio *bio) 319 - { 320 - bio_set_flag(bio, BIO_CHAIN); 321 - smp_mb__before_atomic(); 322 - atomic_inc(&bio->__bi_remaining); 323 - } 324 - 325 314 /** 326 315 * bio_chain - chain bio completions 327 316 * @bio: the target bio
+3 -2
block/blk-core.c
··· 1523 1523 * blk_add_request_payload - add a payload to a request 1524 1524 * @rq: request to update 1525 1525 * @page: page backing the payload 1526 + * @offset: offset in page 1526 1527 * @len: length of the payload. 1527 1528 * 1528 1529 * This allows to later add a payload to an already submitted request by ··· 1534 1533 * discard requests should ever use it. 1535 1534 */ 1536 1535 void blk_add_request_payload(struct request *rq, struct page *page, 1537 - unsigned int len) 1536 + int offset, unsigned int len) 1538 1537 { 1539 1538 struct bio *bio = rq->bio; 1540 1539 1541 1540 bio->bi_io_vec->bv_page = page; 1542 - bio->bi_io_vec->bv_offset = 0; 1541 + bio->bi_io_vec->bv_offset = offset; 1543 1542 bio->bi_io_vec->bv_len = len; 1544 1543 1545 1544 bio->bi_iter.bi_size = len;
+92 -142
block/blk-lib.c
··· 9 9 10 10 #include "blk.h" 11 11 12 - struct bio_batch { 13 - atomic_t done; 14 - int error; 15 - struct completion *wait; 16 - }; 17 - 18 - static void bio_batch_end_io(struct bio *bio) 12 + static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, 13 + gfp_t gfp) 19 14 { 20 - struct bio_batch *bb = bio->bi_private; 15 + struct bio *new = bio_alloc(gfp, nr_pages); 21 16 22 - if (bio->bi_error && bio->bi_error != -EOPNOTSUPP) 23 - bb->error = bio->bi_error; 24 - if (atomic_dec_and_test(&bb->done)) 25 - complete(bb->wait); 26 - bio_put(bio); 17 + if (bio) { 18 + bio_chain(bio, new); 19 + submit_bio(rw, bio); 20 + } 21 + 22 + return new; 27 23 } 24 + 25 + int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 26 + sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop) 27 + { 28 + struct request_queue *q = bdev_get_queue(bdev); 29 + struct bio *bio = *biop; 30 + unsigned int granularity; 31 + int alignment; 32 + 33 + if (!q) 34 + return -ENXIO; 35 + if (!blk_queue_discard(q)) 36 + return -EOPNOTSUPP; 37 + if ((type & REQ_SECURE) && !blk_queue_secdiscard(q)) 38 + return -EOPNOTSUPP; 39 + 40 + /* Zero-sector (unknown) and one-sector granularities are the same. */ 41 + granularity = max(q->limits.discard_granularity >> 9, 1U); 42 + alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; 43 + 44 + while (nr_sects) { 45 + unsigned int req_sects; 46 + sector_t end_sect, tmp; 47 + 48 + /* Make sure bi_size doesn't overflow */ 49 + req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); 50 + 51 + /** 52 + * If splitting a request, and the next starting sector would be 53 + * misaligned, stop the discard at the previous aligned sector. 54 + */ 55 + end_sect = sector + req_sects; 56 + tmp = end_sect; 57 + if (req_sects < nr_sects && 58 + sector_div(tmp, granularity) != alignment) { 59 + end_sect = end_sect - alignment; 60 + sector_div(end_sect, granularity); 61 + end_sect = end_sect * granularity + alignment; 62 + req_sects = end_sect - sector; 63 + } 64 + 65 + bio = next_bio(bio, type, 1, gfp_mask); 66 + bio->bi_iter.bi_sector = sector; 67 + bio->bi_bdev = bdev; 68 + 69 + bio->bi_iter.bi_size = req_sects << 9; 70 + nr_sects -= req_sects; 71 + sector = end_sect; 72 + 73 + /* 74 + * We can loop for a long time in here, if someone does 75 + * full device discards (like mkfs). Be nice and allow 76 + * us to schedule out to avoid softlocking if preempt 77 + * is disabled. 78 + */ 79 + cond_resched(); 80 + } 81 + 82 + *biop = bio; 83 + return 0; 84 + } 85 + EXPORT_SYMBOL(__blkdev_issue_discard); 28 86 29 87 /** 30 88 * blkdev_issue_discard - queue a discard ··· 98 40 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 99 41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 100 42 { 101 - DECLARE_COMPLETION_ONSTACK(wait); 102 - struct request_queue *q = bdev_get_queue(bdev); 103 43 int type = REQ_WRITE | REQ_DISCARD; 104 - unsigned int granularity; 105 - int alignment; 106 - struct bio_batch bb; 107 - struct bio *bio; 108 - int ret = 0; 44 + struct bio *bio = NULL; 109 45 struct blk_plug plug; 46 + int ret; 110 47 111 - if (!q) 112 - return -ENXIO; 113 - 114 - if (!blk_queue_discard(q)) 115 - return -EOPNOTSUPP; 116 - 117 - /* Zero-sector (unknown) and one-sector granularities are the same. */ 118 - granularity = max(q->limits.discard_granularity >> 9, 1U); 119 - alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; 120 - 121 - if (flags & BLKDEV_DISCARD_SECURE) { 122 - if (!blk_queue_secdiscard(q)) 123 - return -EOPNOTSUPP; 48 + if (flags & BLKDEV_DISCARD_SECURE) 124 49 type |= REQ_SECURE; 125 - } 126 - 127 - atomic_set(&bb.done, 1); 128 - bb.error = 0; 129 - bb.wait = &wait; 130 50 131 51 blk_start_plug(&plug); 132 - while (nr_sects) { 133 - unsigned int req_sects; 134 - sector_t end_sect, tmp; 135 - 136 - bio = bio_alloc(gfp_mask, 1); 137 - if (!bio) { 138 - ret = -ENOMEM; 139 - break; 140 - } 141 - 142 - /* Make sure bi_size doesn't overflow */ 143 - req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); 144 - 145 - /* 146 - * If splitting a request, and the next starting sector would be 147 - * misaligned, stop the discard at the previous aligned sector. 148 - */ 149 - end_sect = sector + req_sects; 150 - tmp = end_sect; 151 - if (req_sects < nr_sects && 152 - sector_div(tmp, granularity) != alignment) { 153 - end_sect = end_sect - alignment; 154 - sector_div(end_sect, granularity); 155 - end_sect = end_sect * granularity + alignment; 156 - req_sects = end_sect - sector; 157 - } 158 - 159 - bio->bi_iter.bi_sector = sector; 160 - bio->bi_end_io = bio_batch_end_io; 161 - bio->bi_bdev = bdev; 162 - bio->bi_private = &bb; 163 - 164 - bio->bi_iter.bi_size = req_sects << 9; 165 - nr_sects -= req_sects; 166 - sector = end_sect; 167 - 168 - atomic_inc(&bb.done); 169 - submit_bio(type, bio); 170 - 171 - /* 172 - * We can loop for a long time in here, if someone does 173 - * full device discards (like mkfs). Be nice and allow 174 - * us to schedule out to avoid softlocking if preempt 175 - * is disabled. 176 - */ 177 - cond_resched(); 52 + ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type, 53 + &bio); 54 + if (!ret && bio) { 55 + ret = submit_bio_wait(type, bio); 56 + if (ret == -EOPNOTSUPP) 57 + ret = 0; 178 58 } 179 59 blk_finish_plug(&plug); 180 60 181 - /* Wait for bios in-flight */ 182 - if (!atomic_dec_and_test(&bb.done)) 183 - wait_for_completion_io(&wait); 184 - 185 - if (bb.error) 186 - return bb.error; 187 61 return ret; 188 62 } 189 63 EXPORT_SYMBOL(blkdev_issue_discard); ··· 135 145 sector_t nr_sects, gfp_t gfp_mask, 136 146 struct page *page) 137 147 { 138 - DECLARE_COMPLETION_ONSTACK(wait); 139 148 struct request_queue *q = bdev_get_queue(bdev); 140 149 unsigned int max_write_same_sectors; 141 - struct bio_batch bb; 142 - struct bio *bio; 150 + struct bio *bio = NULL; 143 151 int ret = 0; 144 152 145 153 if (!q) ··· 146 158 /* Ensure that max_write_same_sectors doesn't overflow bi_size */ 147 159 max_write_same_sectors = UINT_MAX >> 9; 148 160 149 - atomic_set(&bb.done, 1); 150 - bb.error = 0; 151 - bb.wait = &wait; 152 - 153 161 while (nr_sects) { 154 - bio = bio_alloc(gfp_mask, 1); 155 - if (!bio) { 156 - ret = -ENOMEM; 157 - break; 158 - } 159 - 162 + bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask); 160 163 bio->bi_iter.bi_sector = sector; 161 - bio->bi_end_io = bio_batch_end_io; 162 164 bio->bi_bdev = bdev; 163 - bio->bi_private = &bb; 164 165 bio->bi_vcnt = 1; 165 166 bio->bi_io_vec->bv_page = page; 166 167 bio->bi_io_vec->bv_offset = 0; ··· 163 186 bio->bi_iter.bi_size = nr_sects << 9; 164 187 nr_sects = 0; 165 188 } 166 - 167 - atomic_inc(&bb.done); 168 - submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); 169 189 } 170 190 171 - /* Wait for bios in-flight */ 172 - if (!atomic_dec_and_test(&bb.done)) 173 - wait_for_completion_io(&wait); 174 - 175 - if (bb.error) 176 - return bb.error; 177 - return ret; 191 + if (bio) 192 + ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); 193 + return ret != -EOPNOTSUPP ? ret : 0; 178 194 } 179 195 EXPORT_SYMBOL(blkdev_issue_write_same); 180 196 ··· 186 216 sector_t nr_sects, gfp_t gfp_mask) 187 217 { 188 218 int ret; 189 - struct bio *bio; 190 - struct bio_batch bb; 219 + struct bio *bio = NULL; 191 220 unsigned int sz; 192 - DECLARE_COMPLETION_ONSTACK(wait); 193 221 194 - atomic_set(&bb.done, 1); 195 - bb.error = 0; 196 - bb.wait = &wait; 197 - 198 - ret = 0; 199 222 while (nr_sects != 0) { 200 - bio = bio_alloc(gfp_mask, 201 - min(nr_sects, (sector_t)BIO_MAX_PAGES)); 202 - if (!bio) { 203 - ret = -ENOMEM; 204 - break; 205 - } 206 - 223 + bio = next_bio(bio, WRITE, 224 + min(nr_sects, (sector_t)BIO_MAX_PAGES), 225 + gfp_mask); 207 226 bio->bi_iter.bi_sector = sector; 208 227 bio->bi_bdev = bdev; 209 - bio->bi_end_io = bio_batch_end_io; 210 - bio->bi_private = &bb; 211 228 212 229 while (nr_sects != 0) { 213 230 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); ··· 204 247 if (ret < (sz << 9)) 205 248 break; 206 249 } 207 - ret = 0; 208 - atomic_inc(&bb.done); 209 - submit_bio(WRITE, bio); 210 250 } 211 251 212 - /* Wait for bios in-flight */ 213 - if (!atomic_dec_and_test(&bb.done)) 214 - wait_for_completion_io(&wait); 215 - 216 - if (bb.error) 217 - return bb.error; 218 - return ret; 252 + if (bio) 253 + return submit_bio_wait(WRITE, bio); 254 + return 0; 219 255 } 220 256 221 257 /**
+12
block/blk-mq-tag.c
··· 474 474 } 475 475 EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); 476 476 477 + void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 478 + busy_tag_iter_fn *fn, void *priv) 479 + { 480 + int i; 481 + 482 + for (i = 0; i < tagset->nr_hw_queues; i++) { 483 + if (tagset->tags && tagset->tags[i]) 484 + blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); 485 + } 486 + } 487 + EXPORT_SYMBOL(blk_mq_tagset_busy_iter); 488 + 477 489 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, 478 490 void *priv) 479 491 {
+1 -2
block/blk-mq.c
··· 1122 1122 { 1123 1123 init_request_from_bio(rq, bio); 1124 1124 1125 - if (blk_do_io_stat(rq)) 1126 - blk_account_io_start(rq, 1); 1125 + blk_account_io_start(rq, 1); 1127 1126 } 1128 1127 1129 1128 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
+26
block/blk-settings.c
··· 846 846 } 847 847 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); 848 848 849 + /** 850 + * blk_queue_write_cache - configure queue's write cache 851 + * @q: the request queue for the device 852 + * @wc: write back cache on or off 853 + * @fua: device supports FUA writes, if true 854 + * 855 + * Tell the block layer about the write cache of @q. 856 + */ 857 + void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) 858 + { 859 + spin_lock_irq(q->queue_lock); 860 + if (wc) { 861 + queue_flag_set(QUEUE_FLAG_WC, q); 862 + q->flush_flags = REQ_FLUSH; 863 + } else 864 + queue_flag_clear(QUEUE_FLAG_WC, q); 865 + if (fua) { 866 + if (wc) 867 + q->flush_flags |= REQ_FUA; 868 + queue_flag_set(QUEUE_FLAG_FUA, q); 869 + } else 870 + queue_flag_clear(QUEUE_FLAG_FUA, q); 871 + spin_unlock_irq(q->queue_lock); 872 + } 873 + EXPORT_SYMBOL_GPL(blk_queue_write_cache); 874 + 849 875 static int __init blk_settings_init(void) 850 876 { 851 877 blk_max_low_pfn = max_low_pfn - 1;
+39
block/blk-sysfs.c
··· 347 347 return ret; 348 348 } 349 349 350 + static ssize_t queue_wc_show(struct request_queue *q, char *page) 351 + { 352 + if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 353 + return sprintf(page, "write back\n"); 354 + 355 + return sprintf(page, "write through\n"); 356 + } 357 + 358 + static ssize_t queue_wc_store(struct request_queue *q, const char *page, 359 + size_t count) 360 + { 361 + int set = -1; 362 + 363 + if (!strncmp(page, "write back", 10)) 364 + set = 1; 365 + else if (!strncmp(page, "write through", 13) || 366 + !strncmp(page, "none", 4)) 367 + set = 0; 368 + 369 + if (set == -1) 370 + return -EINVAL; 371 + 372 + spin_lock_irq(q->queue_lock); 373 + if (set) 374 + queue_flag_set(QUEUE_FLAG_WC, q); 375 + else 376 + queue_flag_clear(QUEUE_FLAG_WC, q); 377 + spin_unlock_irq(q->queue_lock); 378 + 379 + return count; 380 + } 381 + 350 382 static struct queue_sysfs_entry queue_requests_entry = { 351 383 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 352 384 .show = queue_requests_show, ··· 510 478 .store = queue_poll_store, 511 479 }; 512 480 481 + static struct queue_sysfs_entry queue_wc_entry = { 482 + .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, 483 + .show = queue_wc_show, 484 + .store = queue_wc_store, 485 + }; 486 + 513 487 static struct attribute *default_attrs[] = { 514 488 &queue_requests_entry.attr, 515 489 &queue_ra_entry.attr, ··· 541 503 &queue_iostats_entry.attr, 542 504 &queue_random_entry.attr, 543 505 &queue_poll_entry.attr, 506 + &queue_wc_entry.attr, 544 507 NULL, 545 508 }; 546 509
+1 -1
drivers/block/skd_main.c
··· 562 562 put_unaligned_be32(count, &buf[16]); 563 563 564 564 req = skreq->req; 565 - blk_add_request_payload(req, page, len); 565 + blk_add_request_payload(req, page, 0, len); 566 566 } 567 567 568 568 static void skd_request_fn_not_online(struct request_queue *q);
+1 -1
drivers/scsi/sd.c
··· 779 779 * discarded on disk. This allows us to report completion on the full 780 780 * amount of blocks described by the request. 781 781 */ 782 - blk_add_request_payload(rq, page, len); 782 + blk_add_request_payload(rq, page, 0, len); 783 783 ret = scsi_init_io(cmd); 784 784 rq->__data_len = nr_bytes; 785 785
+11
include/linux/bio.h
··· 703 703 } 704 704 705 705 /* 706 + * Increment chain count for the bio. Make sure the CHAIN flag update 707 + * is visible before the raised count. 708 + */ 709 + static inline void bio_inc_remaining(struct bio *bio) 710 + { 711 + bio_set_flag(bio, BIO_CHAIN); 712 + smp_mb__before_atomic(); 713 + atomic_inc(&bio->__bi_remaining); 714 + } 715 + 716 + /* 706 717 * bio_set is used to allow other portions of the IO system to 707 718 * allocate their own private memory pools for bio and iovec structures. 708 719 * These memory pools in turn all allocate from the bio_slab
+2
include/linux/blk-mq.h
··· 240 240 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 241 241 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, 242 242 void *priv); 243 + void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 244 + busy_tag_iter_fn *fn, void *priv); 243 245 void blk_mq_freeze_queue(struct request_queue *q); 244 246 void blk_mq_unfreeze_queue(struct request_queue *q); 245 247 void blk_mq_freeze_queue_start(struct request_queue *q);
+1 -1
include/linux/blk_types.h
··· 208 208 #define REQ_COMMON_MASK \ 209 209 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ 210 210 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ 211 - REQ_SECURE | REQ_INTEGRITY) 211 + REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE) 212 212 #define REQ_CLONE_MASK REQ_COMMON_MASK 213 213 214 214 #define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
+6 -1
include/linux/blkdev.h
··· 491 491 #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 492 492 #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 493 493 #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ 494 + #define QUEUE_FLAG_WC 23 /* Write back caching */ 495 + #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ 494 496 495 497 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 496 498 (1 << QUEUE_FLAG_STACKABLE) | \ ··· 781 779 extern void blk_rq_set_block_pc(struct request *); 782 780 extern void blk_requeue_request(struct request_queue *, struct request *); 783 781 extern void blk_add_request_payload(struct request *rq, struct page *page, 784 - unsigned int len); 782 + int offset, unsigned int len); 785 783 extern int blk_lld_busy(struct request_queue *q); 786 784 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 787 785 struct bio_set *bs, gfp_t gfp_mask, ··· 1011 1009 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1012 1010 extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 1013 1011 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1012 + extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 1014 1013 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 1015 1014 1016 1015 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); ··· 1131 1128 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1132 1129 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1133 1130 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1131 + extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1132 + sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop); 1134 1133 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1135 1134 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1136 1135 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+4 -2
mm/page-writeback.c
··· 1910 1910 if (gdtc->dirty > gdtc->bg_thresh) 1911 1911 return true; 1912 1912 1913 - if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc)) 1913 + if (wb_stat(wb, WB_RECLAIMABLE) > 1914 + wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) 1914 1915 return true; 1915 1916 1916 1917 if (mdtc) { ··· 1925 1924 if (mdtc->dirty > mdtc->bg_thresh) 1926 1925 return true; 1927 1926 1928 - if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc)) 1927 + if (wb_stat(wb, WB_RECLAIMABLE) > 1928 + wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) 1929 1929 return true; 1930 1930 } 1931 1931