Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'block-5.11-2020-12-23' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"A few stragglers in here, but mostly just straight fixes. In
particular:

- Set of rnbd fixes for issues around changes for the merge window
(Gioh, Jack, Md Haris Iqbal)

- iocost tracepoint addition (Baolin)

- Copyright/maintainers update (Christoph)

- Remove old blk-mq fast path CPU warning (Daniel)

- loop max_part fix (Josh)

- Remote IPI threaded IRQ fix (Sebastian)

- dasd stable fixes (Stefan)

- bcache merge window fixup and style fixup (Yi, Zheng)"

* tag 'block-5.11-2020-12-23' of git://git.kernel.dk/linux-block:
md/bcache: convert comma to semicolon
bcache:remove a superfluous check in register_bcache
block: update some copyrights
block: remove a pointless self-reference in block_dev.c
MAINTAINERS: add fs/block_dev.c to the block section
blk-mq: Don't complete on a remote CPU in force threaded mode
s390/dasd: fix list corruption of lcu list
s390/dasd: fix list corruption of pavgroup group list
s390/dasd: prevent inconsistent LCU device data
s390/dasd: fix hanging device offline processing
blk-iocost: Add iocg idle state tracepoint
nbd: Respect max_part for all partition scans
block/rnbd-clt: Does not request pdu to rtrs-clt
block/rnbd-clt: Dynamically allocate sglist for rnbd_iu
block/rnbd: Set write-back cache and fua same to the target device
block/rnbd: Fix typos
block/rnbd-srv: Protect dev session sysfs removal
block/rnbd-clt: Fix possible memleak
block/rnbd-clt: Get rid of warning regarding size argument in strlcpy
blk-mq: Remove 'running from the wrong CPU' warning

+145 -94
+1
MAINTAINERS
··· 3199 3199 T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git 3200 3200 F: block/ 3201 3201 F: drivers/block/ 3202 + F: fs/block_dev.c 3202 3203 F: include/linux/blk* 3203 3204 F: kernel/trace/blktrace.c 3204 3205 F: lib/sbitmap.c
+3
block/blk-iocost.c
··· 2185 2185 WEIGHT_ONE); 2186 2186 } 2187 2187 2188 + TRACE_IOCG_PATH(iocg_idle, iocg, now, 2189 + atomic64_read(&iocg->active_period), 2190 + atomic64_read(&ioc->cur_period), vtime); 2188 2191 __propagate_weights(iocg, 0, 0, false, now); 2189 2192 list_del_init(&iocg->active_list); 2190 2193 }
+8 -25
block/blk-mq.c
··· 650 650 if (!IS_ENABLED(CONFIG_SMP) || 651 651 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 652 652 return false; 653 + /* 654 + * With force threaded interrupts enabled, raising softirq from an SMP 655 + * function call will always result in waking the ksoftirqd thread. 656 + * This is probably worse than completing the request on a different 657 + * cache domain. 658 + */ 659 + if (force_irqthreads) 660 + return false; 653 661 654 662 /* same CPU or cache domain? Complete locally */ 655 663 if (cpu == rq->mq_ctx->cpu || ··· 1502 1494 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1503 1495 { 1504 1496 int srcu_idx; 1505 - 1506 - /* 1507 - * We should be running this queue from one of the CPUs that 1508 - * are mapped to it. 1509 - * 1510 - * There are at least two related races now between setting 1511 - * hctx->next_cpu from blk_mq_hctx_next_cpu() and running 1512 - * __blk_mq_run_hw_queue(): 1513 - * 1514 - * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(), 1515 - * but later it becomes online, then this warning is harmless 1516 - * at all 1517 - * 1518 - * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(), 1519 - * but later it becomes offline, then the warning can't be 1520 - * triggered, and we depend on blk-mq timeout handler to 1521 - * handle dispatched requests to this hctx 1522 - */ 1523 - if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 1524 - cpu_online(hctx->next_cpu)) { 1525 - printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n", 1526 - raw_smp_processor_id(), 1527 - cpumask_empty(hctx->cpumask) ? "inactive": "active"); 1528 - dump_stack(); 1529 - } 1530 1497 1531 1498 /* 1532 1499 * We can't run the queue inline with ints disabled. Ensure that
+2
block/genhd.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 3 * gendisk handling 4 + * 5 + * Portions Copyright (C) 2020 Christoph Hellwig 4 6 */ 5 7 6 8 #include <linux/module.h>
+1
block/partitions/core.c
··· 2 2 /* 3 3 * Copyright (C) 1991-1998 Linus Torvalds 4 4 * Re-organised Feb 1998 Russell King 5 + * Copyright (C) 2020 Christoph Hellwig 5 6 */ 6 7 #include <linux/fs.h> 7 8 #include <linux/slab.h>
+6 -3
drivers/block/nbd.c
··· 318 318 blk_queue_logical_block_size(nbd->disk->queue, blksize); 319 319 blk_queue_physical_block_size(nbd->disk->queue, blksize); 320 320 321 - set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); 321 + if (max_part) 322 + set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); 322 323 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9)) 323 324 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 324 325 return 0; ··· 1477 1476 refcount_set(&nbd->config_refs, 1); 1478 1477 refcount_inc(&nbd->refs); 1479 1478 mutex_unlock(&nbd->config_lock); 1480 - set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); 1479 + if (max_part) 1480 + set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); 1481 1481 } else if (nbd_disconnected(nbd->config)) { 1482 - set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); 1482 + if (max_part) 1483 + set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); 1483 1484 } 1484 1485 out: 1485 1486 mutex_unlock(&nbd_index_mutex);
+3 -2
drivers/block/rnbd/rnbd-clt-sysfs.c
··· 432 432 * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because 433 433 * of sysfs link already was removed already. 434 434 */ 435 - if (strlen(dev->blk_symlink_name) && try_module_get(THIS_MODULE)) { 435 + if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) { 436 436 sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name); 437 437 kfree(dev->blk_symlink_name); 438 438 module_put(THIS_MODULE); ··· 521 521 return 0; 522 522 523 523 out_err: 524 - dev->blk_symlink_name[0] = '\0'; 524 + kfree(dev->blk_symlink_name); 525 + dev->blk_symlink_name = NULL ; 525 526 return ret; 526 527 } 527 528
+56 -38
drivers/block/rnbd/rnbd-clt.c
··· 88 88 dev->discard_alignment = le32_to_cpu(rsp->discard_alignment); 89 89 dev->secure_discard = le16_to_cpu(rsp->secure_discard); 90 90 dev->rotational = rsp->rotational; 91 + dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK); 92 + dev->fua = !!(rsp->cache_policy & RNBD_FUA); 91 93 92 94 dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE; 93 95 dev->max_segments = BMAX_SEGMENTS; ··· 349 347 struct rnbd_iu *iu; 350 348 struct rtrs_permit *permit; 351 349 350 + iu = kzalloc(sizeof(*iu), GFP_KERNEL); 351 + if (!iu) { 352 + return NULL; 353 + } 354 + 352 355 permit = rnbd_get_permit(sess, con_type, 353 356 wait ? RTRS_PERMIT_WAIT : 354 357 RTRS_PERMIT_NOWAIT); 355 - if (unlikely(!permit)) 358 + if (unlikely(!permit)) { 359 + kfree(iu); 356 360 return NULL; 357 - iu = rtrs_permit_to_pdu(permit); 361 + } 362 + 358 363 iu->permit = permit; 359 364 /* 360 365 * 1st reference is dropped after finishing sending a "user" message, 361 366 * 2nd reference is dropped after confirmation with the response is 362 367 * returned. 363 368 * 1st and 2nd can happen in any order, so the rnbd_iu should be 364 - * released (rtrs_permit returned to ibbtrs) only leased after both 369 + * released (rtrs_permit returned to rtrs) only after both 365 370 * are finished. 366 371 */ 367 372 atomic_set(&iu->refcount, 2); ··· 380 371 381 372 static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu) 382 373 { 383 - if (atomic_dec_and_test(&iu->refcount)) 374 + if (atomic_dec_and_test(&iu->refcount)) { 384 375 rnbd_put_permit(sess, iu->permit); 376 + kfree(iu); 377 + } 385 378 } 386 379 387 380 static void rnbd_softirq_done_fn(struct request *rq) ··· 393 382 struct rnbd_iu *iu; 394 383 395 384 iu = blk_mq_rq_to_pdu(rq); 385 + sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT); 396 386 rnbd_put_permit(sess, iu->permit); 397 387 blk_mq_end_request(rq, errno_to_blk_status(iu->errno)); 398 388 } ··· 487 475 iu->buf = NULL; 488 476 iu->dev = dev; 489 477 490 - sg_mark_end(&iu->sglist[0]); 478 + sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 491 479 492 480 msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE); 493 481 msg.device_id = cpu_to_le32(device_id); ··· 502 490 err = errno; 503 491 } 504 492 493 + sg_free_table(&iu->sgt); 505 494 rnbd_put_iu(sess, iu); 506 495 return err; 507 496 } ··· 575 562 iu->buf = rsp; 576 563 iu->dev = dev; 577 564 578 - sg_init_one(iu->sglist, rsp, sizeof(*rsp)); 565 + sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 566 + sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); 579 567 580 568 msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN); 581 569 msg.access_mode = dev->access_mode; ··· 584 570 585 571 WARN_ON(!rnbd_clt_get_dev(dev)); 586 572 err = send_usr_msg(sess->rtrs, READ, iu, 587 - &vec, sizeof(*rsp), iu->sglist, 1, 573 + &vec, sizeof(*rsp), iu->sgt.sgl, 1, 588 574 msg_open_conf, &errno, wait); 589 575 if (err) { 590 576 rnbd_clt_put_dev(dev); ··· 594 580 err = errno; 595 581 } 596 582 583 + sg_free_table(&iu->sgt); 597 584 rnbd_put_iu(sess, iu); 598 585 return err; 599 586 } ··· 623 608 iu->buf = rsp; 624 609 iu->sess = sess; 625 610 626 - sg_init_one(iu->sglist, rsp, sizeof(*rsp)); 611 + sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 612 + sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); 627 613 628 614 msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO); 629 615 msg.ver = RNBD_PROTO_VER_MAJOR; ··· 640 624 goto put_iu; 641 625 } 642 626 err = send_usr_msg(sess->rtrs, READ, iu, 643 - &vec, sizeof(*rsp), iu->sglist, 1, 627 + &vec, sizeof(*rsp), iu->sgt.sgl, 1, 644 628 msg_sess_info_conf, &errno, wait); 645 629 if (err) { 646 630 rnbd_clt_put_sess(sess); ··· 650 634 } else { 651 635 err = errno; 652 636 } 653 - 637 + sg_free_table(&iu->sgt); 654 638 rnbd_put_iu(sess, iu); 655 639 return err; 656 640 } ··· 819 803 rnbd_init_cpu_qlists(sess->cpu_queues); 820 804 821 805 /* 822 - * That is simple percpu variable which stores cpu indeces, which are 806 + * That is simple percpu variable which stores cpu indices, which are 823 807 * incremented on each access. We need that for the sake of fairness 824 808 * to wake up queues in a round-robin manner. 825 809 */ ··· 1030 1014 * See queue limits. 1031 1015 */ 1032 1016 if (req_op(rq) != REQ_OP_DISCARD) 1033 - sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sglist); 1017 + sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl); 1034 1018 1035 1019 if (sg_cnt == 0) 1036 - /* Do not forget to mark the end */ 1037 - sg_mark_end(&iu->sglist[0]); 1020 + sg_mark_end(&iu->sgt.sgl[0]); 1038 1021 1039 1022 msg.hdr.type = cpu_to_le16(RNBD_MSG_IO); 1040 1023 msg.device_id = cpu_to_le32(dev->device_id); ··· 1042 1027 .iov_base = &msg, 1043 1028 .iov_len = sizeof(msg) 1044 1029 }; 1045 - size = rnbd_clt_get_sg_size(iu->sglist, sg_cnt); 1030 + size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt); 1046 1031 req_ops = (struct rtrs_clt_req_ops) { 1047 1032 .priv = iu, 1048 1033 .conf_fn = msg_io_conf, 1049 1034 }; 1050 1035 err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit, 1051 - &vec, 1, size, iu->sglist, sg_cnt); 1036 + &vec, 1, size, iu->sgt.sgl, sg_cnt); 1052 1037 if (unlikely(err)) { 1053 1038 rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n", 1054 1039 err); ··· 1135 1120 struct rnbd_clt_dev *dev = rq->rq_disk->private_data; 1136 1121 struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq); 1137 1122 int err; 1123 + blk_status_t ret = BLK_STS_IOERR; 1138 1124 1139 1125 if (unlikely(dev->dev_state != DEV_STATE_MAPPED)) 1140 1126 return BLK_STS_IOERR; ··· 1147 1131 return BLK_STS_RESOURCE; 1148 1132 } 1149 1133 1134 + iu->sgt.sgl = iu->first_sgl; 1135 + err = sg_alloc_table_chained(&iu->sgt, 1136 + /* Even-if the request has no segment, 1137 + * sglist must have one entry at least */ 1138 + blk_rq_nr_phys_segments(rq) ? : 1, 1139 + iu->sgt.sgl, 1140 + RNBD_INLINE_SG_CNT); 1141 + if (err) { 1142 + rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err); 1143 + rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); 1144 + rnbd_put_permit(dev->sess, iu->permit); 1145 + return BLK_STS_RESOURCE; 1146 + } 1147 + 1150 1148 blk_mq_start_request(rq); 1151 1149 err = rnbd_client_xfer_request(dev, rq, iu); 1152 1150 if (likely(err == 0)) 1153 1151 return BLK_STS_OK; 1154 1152 if (unlikely(err == -EAGAIN || err == -ENOMEM)) { 1155 1153 rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); 1156 - rnbd_put_permit(dev->sess, iu->permit); 1157 - return BLK_STS_RESOURCE; 1154 + ret = BLK_STS_RESOURCE; 1158 1155 } 1159 - 1156 + sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT); 1160 1157 rnbd_put_permit(dev->sess, iu->permit); 1161 - return BLK_STS_IOERR; 1162 - } 1163 - 1164 - static int rnbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 1165 - unsigned int hctx_idx, unsigned int numa_node) 1166 - { 1167 - struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq); 1168 - 1169 - sg_init_table(iu->sglist, BMAX_SEGMENTS); 1170 - return 0; 1158 + return ret; 1171 1159 } 1172 1160 1173 1161 static struct blk_mq_ops rnbd_mq_ops = { 1174 1162 .queue_rq = rnbd_queue_rq, 1175 - .init_request = rnbd_init_request, 1176 1163 .complete = rnbd_softirq_done_fn, 1177 1164 }; 1178 1165 ··· 1189 1170 tag_set->numa_node = NUMA_NO_NODE; 1190 1171 tag_set->flags = BLK_MQ_F_SHOULD_MERGE | 1191 1172 BLK_MQ_F_TAG_QUEUE_SHARED; 1192 - tag_set->cmd_size = sizeof(struct rnbd_iu); 1173 + tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE; 1193 1174 tag_set->nr_hw_queues = num_online_cpus(); 1194 1175 1195 1176 return blk_mq_alloc_tag_set(tag_set); ··· 1227 1208 */ 1228 1209 sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname, 1229 1210 paths, path_cnt, port_nr, 1230 - sizeof(struct rnbd_iu), 1211 + 0, /* Do not use pdu of rtrs */ 1231 1212 RECONNECT_DELAY, BMAX_SEGMENTS, 1232 1213 BLK_MAX_SEGMENT_SIZE, 1233 1214 MAX_RECONNECTS); ··· 1324 1305 blk_queue_max_segments(dev->queue, dev->max_segments); 1325 1306 blk_queue_io_opt(dev->queue, dev->sess->max_io_size); 1326 1307 blk_queue_virt_boundary(dev->queue, SZ_4K - 1); 1327 - blk_queue_write_cache(dev->queue, true, true); 1308 + blk_queue_write_cache(dev->queue, dev->wc, dev->fua); 1328 1309 dev->queue->queuedata = dev; 1329 1310 } 1330 1311 ··· 1407 1388 goto out_queues; 1408 1389 } 1409 1390 1410 - dev->pathname = kzalloc(strlen(pathname) + 1, GFP_KERNEL); 1391 + dev->pathname = kstrdup(pathname, GFP_KERNEL); 1411 1392 if (!dev->pathname) { 1412 1393 ret = -ENOMEM; 1413 1394 goto out_queues; 1414 1395 } 1415 - strlcpy(dev->pathname, pathname, strlen(pathname) + 1); 1416 1396 1417 1397 dev->clt_device_id = ret; 1418 1398 dev->sess = sess; ··· 1547 1529 } 1548 1530 1549 1531 rnbd_clt_info(dev, 1550 - "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n", 1532 + "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n", 1551 1533 dev->gd->disk_name, dev->nsectors, 1552 1534 dev->logical_block_size, dev->physical_block_size, 1553 1535 dev->max_write_same_sectors, dev->max_discard_sectors, 1554 1536 dev->discard_granularity, dev->discard_alignment, 1555 1537 dev->secure_discard, dev->max_segments, 1556 - dev->max_hw_sectors, dev->rotational); 1538 + dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua); 1557 1539 1558 1540 mutex_unlock(&dev->lock); 1559 1541 ··· 1685 1667 /* 1686 1668 * Here at this point there is no any concurrent access to sessions 1687 1669 * list and devices list: 1688 - * 1. New session or device can'be be created - session sysfs files 1670 + * 1. New session or device can't be created - session sysfs files 1689 1671 * are removed. 1690 1672 * 2. Device or session can't be removed - module reference is taken 1691 1673 * into account in unmap device sysfs callback.
+11 -1
drivers/block/rnbd/rnbd-clt.h
··· 44 44 int errno; 45 45 }; 46 46 47 + #ifdef CONFIG_ARCH_NO_SG_CHAIN 48 + #define RNBD_INLINE_SG_CNT 0 49 + #else 50 + #define RNBD_INLINE_SG_CNT 2 51 + #endif 52 + #define RNBD_RDMA_SGL_SIZE (sizeof(struct scatterlist) * RNBD_INLINE_SG_CNT) 53 + 47 54 struct rnbd_iu { 48 55 union { 49 56 struct request *rq; /* for block io */ ··· 63 56 /* use to send msg associated with a sess */ 64 57 struct rnbd_clt_session *sess; 65 58 }; 66 - struct scatterlist sglist[BMAX_SEGMENTS]; 59 + struct sg_table sgt; 67 60 struct work_struct work; 68 61 int errno; 69 62 struct rnbd_iu_comp comp; 70 63 atomic_t refcount; 64 + struct scatterlist first_sgl[]; /* must be the last one */ 71 65 }; 72 66 73 67 struct rnbd_cpu_qlist { ··· 120 112 enum rnbd_access_mode access_mode; 121 113 bool read_only; 122 114 bool rotational; 115 + bool wc; 116 + bool fua; 123 117 u32 max_hw_sectors; 124 118 u32 max_write_same_sectors; 125 119 u32 max_discard_sectors;
+8 -1
drivers/block/rnbd/rnbd-proto.h
··· 108 108 __le32 device_id; 109 109 }; 110 110 111 + enum rnbd_cache_policy { 112 + RNBD_FUA = 1 << 0, 113 + RNBD_WRITEBACK = 1 << 1, 114 + }; 115 + 111 116 /** 112 117 * struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN 113 118 * @hdr: message header ··· 129 124 * @max_segments: max segments hardware support in one transfer 130 125 * @secure_discard: supports secure discard 131 126 * @rotation: is a rotational disc? 127 + * @cache_policy: support write-back caching or FUA? 132 128 */ 133 129 struct rnbd_msg_open_rsp { 134 130 struct rnbd_msg_hdr hdr; ··· 145 139 __le16 max_segments; 146 140 __le16 secure_discard; 147 141 u8 rotational; 148 - u8 reserved[11]; 142 + u8 cache_policy; 143 + u8 reserved[10]; 149 144 }; 150 145 151 146 /**
+9 -3
drivers/block/rnbd/rnbd-srv.c
··· 338 338 339 339 void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev) 340 340 { 341 + mutex_lock(&sess_dev->sess->lock); 341 342 rnbd_srv_destroy_dev_session_sysfs(sess_dev); 343 + mutex_unlock(&sess_dev->sess->lock); 342 344 sess_dev->keep_id = true; 343 - 344 345 } 345 346 346 347 static int process_msg_close(struct rtrs_srv *rtrs, ··· 550 549 struct rnbd_srv_sess_dev *sess_dev) 551 550 { 552 551 struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev; 552 + struct request_queue *q = bdev_get_queue(rnbd_dev->bdev); 553 553 554 554 rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP); 555 555 rsp->device_id = ··· 575 573 cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev)); 576 574 rsp->secure_discard = 577 575 cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev)); 578 - rsp->rotational = 579 - !blk_queue_nonrot(bdev_get_queue(rnbd_dev->bdev)); 576 + rsp->rotational = !blk_queue_nonrot(q); 577 + rsp->cache_policy = 0; 578 + if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 579 + rsp->cache_policy |= RNBD_WRITEBACK; 580 + if (blk_queue_fua(q)) 581 + rsp->cache_policy |= RNBD_FUA; 580 582 } 581 583 582 584 static struct rnbd_srv_sess_dev *
-6
drivers/infiniband/ulp/rtrs/rtrs-clt.c
··· 157 157 } 158 158 EXPORT_SYMBOL(rtrs_clt_put_permit); 159 159 160 - void *rtrs_permit_to_pdu(struct rtrs_permit *permit) 161 - { 162 - return permit + 1; 163 - } 164 - EXPORT_SYMBOL(rtrs_permit_to_pdu); 165 - 166 160 /** 167 161 * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit 168 162 * @sess: client session pointer
-7
drivers/infiniband/ulp/rtrs/rtrs.h
··· 63 63 64 64 void rtrs_clt_close(struct rtrs_clt *sess); 65 65 66 - /** 67 - * rtrs_permit_to_pdu() - converts rtrs_permit to opaque pdu pointer 68 - * @permit: RTRS permit pointer, it associates the memory allocation for future 69 - * RDMA operation. 70 - */ 71 - void *rtrs_permit_to_pdu(struct rtrs_permit *permit); 72 - 73 66 enum { 74 67 RTRS_PERMIT_NOWAIT = 0, 75 68 RTRS_PERMIT_WAIT = 1,
-2
drivers/md/bcache/super.c
··· 2535 2535 else 2536 2536 err = "device busy"; 2537 2537 mutex_unlock(&bch_register_lock); 2538 - if (!IS_ERR(bdev)) 2539 - bdput(bdev); 2540 2538 if (attr == &ksysfs_register_quiet) 2541 2539 goto done; 2542 2540 }
+1 -1
drivers/md/bcache/sysfs.c
··· 404 404 if (!env) 405 405 return -ENOMEM; 406 406 add_uevent_var(env, "DRIVER=bcache"); 407 - add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), 407 + add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid); 408 408 add_uevent_var(env, "CACHED_LABEL=%s", buf); 409 409 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj, 410 410 KOBJ_CHANGE,
+20 -2
drivers/s390/block/dasd_alias.c
··· 256 256 return; 257 257 device->discipline->get_uid(device, &uid); 258 258 spin_lock_irqsave(&lcu->lock, flags); 259 - list_del_init(&device->alias_list); 260 259 /* make sure that the workers don't use this device */ 261 260 if (device == lcu->suc_data.device) { 262 261 spin_unlock_irqrestore(&lcu->lock, flags); ··· 282 283 283 284 spin_lock_irqsave(&aliastree.lock, flags); 284 285 spin_lock(&lcu->lock); 286 + list_del_init(&device->alias_list); 285 287 if (list_empty(&lcu->grouplist) && 286 288 list_empty(&lcu->active_devices) && 287 289 list_empty(&lcu->inactive_devices)) { ··· 462 462 spin_unlock_irqrestore(&lcu->lock, flags); 463 463 464 464 rc = dasd_sleep_on(cqr); 465 - if (rc && !suborder_not_supported(cqr)) { 465 + if (!rc) 466 + goto out; 467 + 468 + if (suborder_not_supported(cqr)) { 469 + /* suborder not supported or device unusable for IO */ 470 + rc = -EOPNOTSUPP; 471 + } else { 472 + /* IO failed but should be retried */ 466 473 spin_lock_irqsave(&lcu->lock, flags); 467 474 lcu->flags |= NEED_UAC_UPDATE; 468 475 spin_unlock_irqrestore(&lcu->lock, flags); 469 476 } 477 + out: 470 478 dasd_sfree_request(cqr, cqr->memdev); 471 479 return rc; 472 480 } ··· 511 503 return rc; 512 504 513 505 spin_lock_irqsave(&lcu->lock, flags); 506 + /* 507 + * there is another update needed skip the remaining handling 508 + * the data might already be outdated 509 + * but especially do not add the device to an LCU with pending 510 + * update 511 + */ 512 + if (lcu->flags & NEED_UAC_UPDATE) 513 + goto out; 514 514 lcu->pav = NO_PAV; 515 515 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { 516 516 switch (lcu->uac->unit[i].ua_type) { ··· 537 521 alias_list) { 538 522 _add_device_to_lcu(lcu, device, refdev); 539 523 } 524 + out: 540 525 spin_unlock_irqrestore(&lcu->lock, flags); 541 526 return 0; 542 527 } ··· 642 625 } 643 626 if (lcu->flags & UPDATE_PENDING) { 644 627 list_move(&device->alias_list, &lcu->active_devices); 628 + private->pavgroup = NULL; 645 629 _schedule_lcu_update(lcu, device); 646 630 } 647 631 spin_unlock_irqrestore(&lcu->lock, flags);
+1 -2
fs/block_dev.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * linux/fs/block_dev.c 4 - * 5 3 * Copyright (C) 1991, 1992 Linus Torvalds 6 4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 5 + * Copyright (C) 2016 - 2020 Christoph Hellwig 7 6 */ 8 7 9 8 #include <linux/init.h>
+15 -1
include/trace/events/iocost.h
··· 11 11 12 12 #include <linux/tracepoint.h> 13 13 14 - TRACE_EVENT(iocost_iocg_activate, 14 + DECLARE_EVENT_CLASS(iocost_iocg_state, 15 15 16 16 TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, 17 17 u64 last_period, u64 cur_period, u64 vtime), ··· 57 57 __entry->vtime, __entry->inuse, __entry->weight, 58 58 __entry->hweight_inuse, __entry->hweight_active 59 59 ) 60 + ); 61 + 62 + DEFINE_EVENT(iocost_iocg_state, iocost_iocg_activate, 63 + TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, 64 + u64 last_period, u64 cur_period, u64 vtime), 65 + 66 + TP_ARGS(iocg, path, now, last_period, cur_period, vtime) 67 + ); 68 + 69 + DEFINE_EVENT(iocost_iocg_state, iocost_iocg_idle, 70 + TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, 71 + u64 last_period, u64 cur_period, u64 vtime), 72 + 73 + TP_ARGS(iocg, path, now, last_period, cur_period, vtime) 60 74 ); 61 75 62 76 DECLARE_EVENT_CLASS(iocg_inuse_update,