Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
"A selection of fixes/changes that should make it into this series.
This contains:

- NVMe, two merges, containing:
- pci-e, rdma, and fc fixes
- Device quirks

- Fix for a badblocks leak in null_blk

- bcache fix from Rui Hua for a race condition regression where
-EINTR was returned to upper layers that didn't expect it.

- Regression fix for blktrace for a bug introduced in this series.

- blktrace cleanup for cgroup id.

- bdi registration error handling.

- Small series with cleanups for blk-wbt.

- Various little fixes for typos and the like.

Nothing earth shattering, most important are the NVMe and bcache fixes"

* 'for-linus' of git://git.kernel.dk/linux-block: (34 commits)
nvme-pci: fix NULL pointer dereference in nvme_free_host_mem()
nvme-rdma: fix memory leak during queue allocation
blktrace: fix trace mutex deadlock
nvme-rdma: Use mr pool
nvme-rdma: Check remotely invalidated rkey matches our expected rkey
nvme-rdma: wait for local invalidation before completing a request
nvme-rdma: don't complete requests before a send work request has completed
nvme-rdma: don't suppress send completions
bcache: check return value of register_shrinker
bcache: recover data from backing when data is clean
bcache: Fix building error on MIPS
bcache: add a comment in journal bucket reading
nvme-fc: don't use bit masks for set/test_bit() numbers
blk-wbt: fix comments typo
blk-wbt: move wbt_clear_stat to common place in wbt_done
blk-sysfs: remove NULL pointer checking in queue_wb_lat_store
blk-wbt: remove duplicated setting in wbt_init
nvme-pci: add quirk for delay before CHK RDY for WDC SN200
block: remove useless assignment in bio_split
null_blk: fix dev->badblocks leak
...

+290 -210
+1 -1
block/bio.c
··· 1819 1819 struct bio *bio_split(struct bio *bio, int sectors, 1820 1820 gfp_t gfp, struct bio_set *bs) 1821 1821 { 1822 - struct bio *split = NULL; 1822 + struct bio *split; 1823 1823 1824 1824 BUG_ON(sectors <= 0); 1825 1825 BUG_ON(sectors >= bio_sectors(bio));
+1 -4
block/blk-sysfs.c
··· 450 450 ret = wbt_init(q); 451 451 if (ret) 452 452 return ret; 453 - 454 - rwb = q->rq_wb; 455 - if (!rwb) 456 - return -EINVAL; 457 453 } 458 454 455 + rwb = q->rq_wb; 459 456 if (val == -1) 460 457 rwb->min_lat_nsec = wbt_default_latency_nsec(q); 461 458 else if (val >= 0)
+2 -5
block/blk-wbt.c
··· 178 178 179 179 if (wbt_is_read(stat)) 180 180 wb_timestamp(rwb, &rwb->last_comp); 181 - wbt_clear_state(stat); 182 181 } else { 183 182 WARN_ON_ONCE(stat == rwb->sync_cookie); 184 183 __wbt_done(rwb, wbt_stat_to_mask(stat)); 185 - wbt_clear_state(stat); 186 184 } 185 + wbt_clear_state(stat); 187 186 } 188 187 189 188 /* ··· 481 482 482 483 /* 483 484 * At this point we know it's a buffered write. If this is 484 - * kswapd trying to free memory, or REQ_SYNC is set, set, then 485 + * kswapd trying to free memory, or REQ_SYNC is set, then 485 486 * it's WB_SYNC_ALL writeback, and we'll use the max limit for 486 487 * that. If the write is marked as a background write, then use 487 488 * the idle limit, or go to normal if we haven't had competing ··· 722 723 init_waitqueue_head(&rwb->rq_wait[i].wait); 723 724 } 724 725 725 - rwb->wc = 1; 726 - rwb->queue_depth = RWB_DEF_DEPTH; 727 726 rwb->last_comp = rwb->last_issue = jiffies; 728 727 rwb->queue = q; 729 728 rwb->win_nsec = RWB_WINDOW_NSEC;
+6 -3
block/genhd.c
··· 671 671 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; 672 672 disk->flags |= GENHD_FL_NO_PART_SCAN; 673 673 } else { 674 + int ret; 675 + 674 676 /* Register BDI before referencing it from bdev */ 675 677 disk_to_dev(disk)->devt = devt; 676 - bdi_register_owner(disk->queue->backing_dev_info, 677 - disk_to_dev(disk)); 678 + ret = bdi_register_owner(disk->queue->backing_dev_info, 679 + disk_to_dev(disk)); 680 + WARN_ON(ret); 678 681 blk_register_region(disk_devt(disk), disk->minors, NULL, 679 682 exact_match, exact_lock, disk); 680 683 } ··· 1392 1389 1393 1390 if (minors > DISK_MAX_PARTS) { 1394 1391 printk(KERN_ERR 1395 - "block: can't allocated more than %d partitions\n", 1392 + "block: can't allocate more than %d partitions\n", 1396 1393 DISK_MAX_PARTS); 1397 1394 minors = DISK_MAX_PARTS; 1398 1395 }
+4 -1
drivers/block/null_blk.c
··· 471 471 { 472 472 struct nullb_device *dev = to_nullb_device(item); 473 473 474 - badblocks_exit(&dev->badblocks); 475 474 null_free_device_storage(dev, false); 476 475 null_free_dev(dev); 477 476 } ··· 581 582 582 583 static void null_free_dev(struct nullb_device *dev) 583 584 { 585 + if (!dev) 586 + return; 587 + 588 + badblocks_exit(&dev->badblocks); 584 589 kfree(dev); 585 590 } 586 591
+1 -1
drivers/md/bcache/alloc.c
··· 490 490 if (b == -1) 491 491 goto err; 492 492 493 - k->ptr[i] = PTR(ca->buckets[b].gen, 493 + k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, 494 494 bucket_to_sector(c, b), 495 495 ca->sb.nr_this_dev); 496 496
+4 -1
drivers/md/bcache/btree.c
··· 807 807 c->shrink.scan_objects = bch_mca_scan; 808 808 c->shrink.seeks = 4; 809 809 c->shrink.batch = c->btree_pages * 2; 810 - register_shrinker(&c->shrink); 810 + 811 + if (register_shrinker(&c->shrink)) 812 + pr_warn("bcache: %s: could not register shrinker", 813 + __func__); 811 814 812 815 return 0; 813 816 }
+1 -1
drivers/md/bcache/extents.c
··· 585 585 return false; 586 586 587 587 for (i = 0; i < KEY_PTRS(l); i++) 588 - if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || 588 + if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || 589 589 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) 590 590 return false; 591 591
+6 -1
drivers/md/bcache/journal.c
··· 170 170 * find a sequence of buckets with valid journal entries 171 171 */ 172 172 for (i = 0; i < ca->sb.njournal_buckets; i++) { 173 + /* 174 + * We must try the index l with ZERO first for 175 + * correctness due to the scenario that the journal 176 + * bucket is circular buffer which might have wrapped 177 + */ 173 178 l = (i * 2654435769U) % ca->sb.njournal_buckets; 174 179 175 180 if (test_bit(l, bitmap)) ··· 512 507 continue; 513 508 514 509 ja->cur_idx = next; 515 - k->ptr[n++] = PTR(0, 510 + k->ptr[n++] = MAKE_PTR(0, 516 511 bucket_to_sector(c, ca->sb.d[ja->cur_idx]), 517 512 ca->sb.nr_this_dev); 518 513 }
+6 -7
drivers/md/bcache/request.c
··· 708 708 { 709 709 struct search *s = container_of(cl, struct search, cl); 710 710 struct bio *bio = &s->bio.bio; 711 - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 712 711 713 712 /* 714 - * If cache device is dirty (dc->has_dirty is non-zero), then 715 - * recovery a failed read request from cached device may get a 716 - * stale data back. So read failure recovery is only permitted 717 - * when cache device is clean. 713 + * If read request hit dirty data (s->read_dirty_data is true), 714 + * then recovery a failed read request from cached device may 715 + * get a stale data back. So read failure recovery is only 716 + * permitted when read request hit clean data in cache device, 717 + * or when cache read race happened. 718 718 */ 719 - if (s->recoverable && 720 - (dc && !atomic_read(&dc->has_dirty))) { 719 + if (s->recoverable && !s->read_dirty_data) { 721 720 /* Retry from the backing device: */ 722 721 trace_bcache_read_retry(s->orig_bio); 723 722
+8 -11
drivers/nvme/host/core.c
··· 1449 1449 int srcu_idx, ret; 1450 1450 u8 data[16] = { 0, }; 1451 1451 1452 + ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1453 + if (unlikely(!ns)) 1454 + return -EWOULDBLOCK; 1455 + 1452 1456 put_unaligned_le64(key, &data[0]); 1453 1457 put_unaligned_le64(sa_key, &data[8]); 1454 1458 1455 1459 memset(&c, 0, sizeof(c)); 1456 1460 c.common.opcode = op; 1457 - c.common.nsid = cpu_to_le32(head->ns_id); 1461 + c.common.nsid = cpu_to_le32(ns->head->ns_id); 1458 1462 c.common.cdw10[0] = cpu_to_le32(cdw10); 1459 1463 1460 - ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1461 - if (unlikely(!ns)) 1462 - ret = -EWOULDBLOCK; 1463 - else 1464 - ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); 1464 + ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); 1465 1465 nvme_put_ns_from_disk(head, srcu_idx); 1466 1466 return ret; 1467 1467 } ··· 2961 2961 2962 2962 static void nvme_ns_remove(struct nvme_ns *ns) 2963 2963 { 2964 - struct nvme_ns_head *head = ns->head; 2965 - 2966 2964 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 2967 2965 return; 2968 2966 ··· 2978 2980 2979 2981 mutex_lock(&ns->ctrl->subsys->lock); 2980 2982 nvme_mpath_clear_current_path(ns); 2981 - if (head) 2982 - list_del_rcu(&ns->siblings); 2983 + list_del_rcu(&ns->siblings); 2983 2984 mutex_unlock(&ns->ctrl->subsys->lock); 2984 2985 2985 2986 mutex_lock(&ns->ctrl->namespaces_mutex); 2986 2987 list_del_init(&ns->list); 2987 2988 mutex_unlock(&ns->ctrl->namespaces_mutex); 2988 2989 2989 - synchronize_srcu(&head->srcu); 2990 + synchronize_srcu(&ns->head->srcu); 2990 2991 nvme_put_ns(ns); 2991 2992 } 2992 2993
+30
drivers/nvme/host/fabrics.h
··· 156 156 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); 157 157 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); 158 158 159 + static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl, 160 + struct request *rq) 161 + { 162 + struct nvme_command *cmd = nvme_req(rq)->cmd; 163 + 164 + /* 165 + * We cannot accept any other command until the connect command has 166 + * completed, so only allow connect to pass. 167 + */ 168 + if (!blk_rq_is_passthrough(rq) || 169 + cmd->common.opcode != nvme_fabrics_command || 170 + cmd->fabrics.fctype != nvme_fabrics_type_connect) { 171 + /* 172 + * Reconnecting state means transport disruption, which can take 173 + * a long time and even might fail permanently, fail fast to 174 + * give upper layers a chance to failover. 175 + * Deleting state means that the ctrl will never accept commands 176 + * again, fail it permanently. 177 + */ 178 + if (ctrl->state == NVME_CTRL_RECONNECTING || 179 + ctrl->state == NVME_CTRL_DELETING) { 180 + nvme_req(rq)->status = NVME_SC_ABORT_REQ; 181 + return BLK_STS_IOERR; 182 + } 183 + return BLK_STS_RESOURCE; /* try again later */ 184 + } 185 + 186 + return BLK_STS_OK; 187 + } 188 + 159 189 #endif /* _NVME_FABRICS_H */
+19 -2
drivers/nvme/host/fc.c
··· 31 31 32 32 33 33 enum nvme_fc_queue_flags { 34 - NVME_FC_Q_CONNECTED = (1 << 0), 34 + NVME_FC_Q_CONNECTED = 0, 35 + NVME_FC_Q_LIVE, 35 36 }; 36 37 37 38 #define NVMEFC_QUEUE_DELAY 3 /* ms units */ ··· 1928 1927 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) 1929 1928 return; 1930 1929 1930 + clear_bit(NVME_FC_Q_LIVE, &queue->flags); 1931 1931 /* 1932 1932 * Current implementation never disconnects a single queue. 1933 1933 * It always terminates a whole association. So there is never ··· 1936 1934 */ 1937 1935 1938 1936 queue->connection_id = 0; 1939 - clear_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1940 1937 } 1941 1938 1942 1939 static void ··· 2014 2013 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 2015 2014 if (ret) 2016 2015 break; 2016 + 2017 + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); 2017 2018 } 2018 2019 2019 2020 return ret; ··· 2323 2320 return BLK_STS_RESOURCE; 2324 2321 } 2325 2322 2323 + static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue, 2324 + struct request *rq) 2325 + { 2326 + if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags))) 2327 + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); 2328 + return BLK_STS_OK; 2329 + } 2330 + 2326 2331 static blk_status_t 2327 2332 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, 2328 2333 const struct blk_mq_queue_data *bd) ··· 2345 2334 enum nvmefc_fcp_datadir io_dir; 2346 2335 u32 data_len; 2347 2336 blk_status_t ret; 2337 + 2338 + ret = nvme_fc_is_ready(queue, rq); 2339 + if (unlikely(ret)) 2340 + return ret; 2348 2341 2349 2342 ret = nvme_setup_cmd(ns, rq, sqe); 2350 2343 if (ret) ··· 2741 2726 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 2742 2727 if (ret) 2743 2728 goto out_disconnect_admin_queue; 2729 + 2730 + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 2744 2731 2745 2732 /* 2746 2733 * Check controller capabilities
+1 -1
drivers/nvme/host/multipath.c
··· 131 131 bio->bi_opf |= REQ_NVME_MPATH; 132 132 ret = direct_make_request(bio); 133 133 } else if (!list_empty_careful(&head->list)) { 134 - dev_warn_ratelimited(dev, "no path available - requeing I/O\n"); 134 + dev_warn_ratelimited(dev, "no path available - requeuing I/O\n"); 135 135 136 136 spin_lock_irq(&head->requeue_lock); 137 137 bio_list_add(&head->requeue_list, bio);
+1 -1
drivers/nvme/host/nvme.h
··· 114 114 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 115 115 * found empirically. 116 116 */ 117 - #define NVME_QUIRK_DELAY_AMOUNT 2000 117 + #define NVME_QUIRK_DELAY_AMOUNT 2300 118 118 119 119 enum nvme_ctrl_state { 120 120 NVME_CTRL_NEW,
+14 -3
drivers/nvme/host/pci.c
··· 1759 1759 dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 1760 1760 dev->host_mem_descs, dev->host_mem_descs_dma); 1761 1761 dev->host_mem_descs = NULL; 1762 + dev->nr_host_mem_descs = 0; 1762 1763 } 1763 1764 1764 1765 static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, ··· 1788 1787 if (!bufs) 1789 1788 goto out_free_descs; 1790 1789 1791 - for (size = 0; size < preferred; size += len) { 1790 + for (size = 0; size < preferred && i < max_entries; size += len) { 1792 1791 dma_addr_t dma_addr; 1793 1792 1794 1793 len = min_t(u64, chunk_size, preferred - size); ··· 2429 2428 return -ENODEV; 2430 2429 } 2431 2430 2432 - static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) 2431 + static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 2433 2432 { 2434 2433 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2435 2434 /* ··· 2444 2443 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2445 2444 dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2446 2445 return NVME_QUIRK_NO_DEEPEST_PS; 2446 + } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 2447 + /* 2448 + * Samsung SSD 960 EVO drops off the PCIe bus after system 2449 + * suspend on a Ryzen board, ASUS PRIME B350M-A. 2450 + */ 2451 + if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 2452 + dmi_match(DMI_BOARD_NAME, "PRIME B350M-A")) 2453 + return NVME_QUIRK_NO_APST; 2447 2454 } 2448 2455 2449 2456 return 0; ··· 2491 2482 if (result) 2492 2483 goto unmap; 2493 2484 2494 - quirks |= check_dell_samsung_bug(pdev); 2485 + quirks |= check_vendor_combination_bug(pdev); 2495 2486 2496 2487 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2497 2488 quirks); ··· 2673 2664 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2674 2665 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2675 2666 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 2667 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2668 + { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 2676 2669 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2677 2670 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 2678 2671 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+123 -141
drivers/nvme/host/rdma.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/init.h> 17 17 #include <linux/slab.h> 18 + #include <rdma/mr_pool.h> 18 19 #include <linux/err.h> 19 20 #include <linux/string.h> 20 21 #include <linux/atomic.h> ··· 60 59 struct nvme_request req; 61 60 struct ib_mr *mr; 62 61 struct nvme_rdma_qe sqe; 62 + union nvme_result result; 63 + __le16 status; 64 + refcount_t ref; 63 65 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; 64 66 u32 num_sge; 65 67 int nents; ··· 77 73 enum nvme_rdma_queue_flags { 78 74 NVME_RDMA_Q_ALLOCATED = 0, 79 75 NVME_RDMA_Q_LIVE = 1, 76 + NVME_RDMA_Q_TR_READY = 2, 80 77 }; 81 78 82 79 struct nvme_rdma_queue { 83 80 struct nvme_rdma_qe *rsp_ring; 84 - atomic_t sig_count; 85 81 int queue_size; 86 82 size_t cmnd_capsule_len; 87 83 struct nvme_rdma_ctrl *ctrl; ··· 262 258 return ret; 263 259 } 264 260 265 - static int nvme_rdma_reinit_request(void *data, struct request *rq) 266 - { 267 - struct nvme_rdma_ctrl *ctrl = data; 268 - struct nvme_rdma_device *dev = ctrl->device; 269 - struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 270 - int ret = 0; 271 - 272 - if (WARN_ON_ONCE(!req->mr)) 273 - return 0; 274 - 275 - ib_dereg_mr(req->mr); 276 - 277 - req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG, 278 - ctrl->max_fr_pages); 279 - if (IS_ERR(req->mr)) { 280 - ret = PTR_ERR(req->mr); 281 - req->mr = NULL; 282 - goto out; 283 - } 284 - 285 - req->mr->need_inval = false; 286 - 287 - out: 288 - return ret; 289 - } 290 - 291 261 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, 292 262 struct request *rq, unsigned int hctx_idx) 293 263 { ··· 270 292 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 271 293 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; 272 294 struct nvme_rdma_device *dev = queue->device; 273 - 274 - if (req->mr) 275 - ib_dereg_mr(req->mr); 276 295 277 296 nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command), 278 297 DMA_TO_DEVICE); ··· 292 317 if (ret) 293 318 return ret; 294 319 295 - req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG, 296 - ctrl->max_fr_pages); 297 - if (IS_ERR(req->mr)) { 298 - ret = PTR_ERR(req->mr); 299 - goto out_free_qe; 300 - } 301 - 302 320 req->queue = queue; 303 321 304 322 return 0; 305 - 306 - out_free_qe: 307 - nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command), 308 - DMA_TO_DEVICE); 309 - return -ENOMEM; 310 323 } 311 324 312 325 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, ··· 391 428 392 429 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) 393 430 { 394 - struct nvme_rdma_device *dev = queue->device; 395 - struct ib_device *ibdev = dev->dev; 431 + struct nvme_rdma_device *dev; 432 + struct ib_device *ibdev; 396 433 397 - rdma_destroy_qp(queue->cm_id); 434 + if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) 435 + return; 436 + 437 + dev = queue->device; 438 + ibdev = dev->dev; 439 + 440 + ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); 441 + 442 + /* 443 + * The cm_id object might have been destroyed during RDMA connection 444 + * establishment error flow to avoid getting other cma events, thus 445 + * the destruction of the QP shouldn't use rdma_cm API. 446 + */ 447 + ib_destroy_qp(queue->qp); 398 448 ib_free_cq(queue->ib_cq); 399 449 400 450 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, 401 451 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 402 452 403 453 nvme_rdma_dev_put(dev); 454 + } 455 + 456 + static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev) 457 + { 458 + return min_t(u32, NVME_RDMA_MAX_SEGMENTS, 459 + ibdev->attrs.max_fast_reg_page_list_len); 404 460 } 405 461 406 462 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) ··· 464 482 goto out_destroy_qp; 465 483 } 466 484 485 + ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, 486 + queue->queue_size, 487 + IB_MR_TYPE_MEM_REG, 488 + nvme_rdma_get_max_fr_pages(ibdev)); 489 + if (ret) { 490 + dev_err(queue->ctrl->ctrl.device, 491 + "failed to initialize MR pool sized %d for QID %d\n", 492 + queue->queue_size, idx); 493 + goto out_destroy_ring; 494 + } 495 + 496 + set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); 497 + 467 498 return 0; 468 499 500 + out_destroy_ring: 501 + nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, 502 + sizeof(struct nvme_completion), DMA_FROM_DEVICE); 469 503 out_destroy_qp: 470 504 rdma_destroy_qp(queue->cm_id); 471 505 out_destroy_ib_cq: ··· 508 510 queue->cmnd_capsule_len = sizeof(struct nvme_command); 509 511 510 512 queue->queue_size = queue_size; 511 - atomic_set(&queue->sig_count, 0); 512 513 513 514 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, 514 515 RDMA_PS_TCP, IB_QPT_RC); ··· 543 546 544 547 out_destroy_cm_id: 545 548 rdma_destroy_id(queue->cm_id); 549 + nvme_rdma_destroy_queue_ib(queue); 546 550 return ret; 547 551 } 548 552 ··· 754 756 755 757 ctrl->device = ctrl->queues[0].device; 756 758 757 - ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS, 758 - ctrl->device->dev->attrs.max_fast_reg_page_list_len); 759 + ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev); 759 760 760 761 if (new) { 761 762 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); ··· 768 771 error = PTR_ERR(ctrl->ctrl.admin_q); 769 772 goto out_free_tagset; 770 773 } 771 - } else { 772 - error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); 773 - if (error) 774 - goto out_free_queue; 775 774 } 776 775 777 776 error = nvme_rdma_start_queue(ctrl, 0); ··· 847 854 goto out_free_tag_set; 848 855 } 849 856 } else { 850 - ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); 851 - if (ret) 852 - goto out_free_io_queues; 853 - 854 857 blk_mq_update_nr_hw_queues(&ctrl->tag_set, 855 858 ctrl->ctrl.queue_count - 1); 856 859 } ··· 1007 1018 1008 1019 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) 1009 1020 { 1010 - if (unlikely(wc->status != IB_WC_SUCCESS)) 1021 + struct nvme_rdma_request *req = 1022 + container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); 1023 + struct request *rq = blk_mq_rq_from_pdu(req); 1024 + 1025 + if (unlikely(wc->status != IB_WC_SUCCESS)) { 1011 1026 nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); 1027 + return; 1028 + } 1029 + 1030 + if (refcount_dec_and_test(&req->ref)) 1031 + nvme_end_request(rq, req->status, req->result); 1032 + 1012 1033 } 1013 1034 1014 1035 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, ··· 1029 1030 .opcode = IB_WR_LOCAL_INV, 1030 1031 .next = NULL, 1031 1032 .num_sge = 0, 1032 - .send_flags = 0, 1033 + .send_flags = IB_SEND_SIGNALED, 1033 1034 .ex.invalidate_rkey = req->mr->rkey, 1034 1035 }; 1035 1036 ··· 1043 1044 struct request *rq) 1044 1045 { 1045 1046 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1046 - struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1047 1047 struct nvme_rdma_device *dev = queue->device; 1048 1048 struct ib_device *ibdev = dev->dev; 1049 - int res; 1050 1049 1051 1050 if (!blk_rq_bytes(rq)) 1052 1051 return; 1053 1052 1054 - if (req->mr->need_inval && test_bit(NVME_RDMA_Q_LIVE, &req->queue->flags)) { 1055 - res = nvme_rdma_inv_rkey(queue, req); 1056 - if (unlikely(res < 0)) { 1057 - dev_err(ctrl->ctrl.device, 1058 - "Queueing INV WR for rkey %#x failed (%d)\n", 1059 - req->mr->rkey, res); 1060 - nvme_rdma_error_recovery(queue->ctrl); 1061 - } 1053 + if (req->mr) { 1054 + ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); 1055 + req->mr = NULL; 1062 1056 } 1063 1057 1064 1058 ib_dma_unmap_sg(ibdev, req->sg_table.sgl, ··· 1110 1118 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 1111 1119 int nr; 1112 1120 1121 + req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); 1122 + if (WARN_ON_ONCE(!req->mr)) 1123 + return -EAGAIN; 1124 + 1113 1125 /* 1114 1126 * Align the MR to a 4K page size to match the ctrl page size and 1115 1127 * the block virtual boundary. 1116 1128 */ 1117 1129 nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K); 1118 1130 if (unlikely(nr < count)) { 1131 + ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); 1132 + req->mr = NULL; 1119 1133 if (nr < 0) 1120 1134 return nr; 1121 1135 return -EINVAL; ··· 1139 1141 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | 1140 1142 IB_ACCESS_REMOTE_READ | 1141 1143 IB_ACCESS_REMOTE_WRITE; 1142 - 1143 - req->mr->need_inval = true; 1144 1144 1145 1145 sg->addr = cpu_to_le64(req->mr->iova); 1146 1146 put_unaligned_le24(req->mr->length, sg->length); ··· 1159 1163 1160 1164 req->num_sge = 1; 1161 1165 req->inline_data = false; 1162 - req->mr->need_inval = false; 1166 + refcount_set(&req->ref, 2); /* send and recv completions */ 1163 1167 1164 1168 c->common.flags |= NVME_CMD_SGL_METABUF; 1165 1169 ··· 1196 1200 1197 1201 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 1198 1202 { 1199 - if (unlikely(wc->status != IB_WC_SUCCESS)) 1203 + struct nvme_rdma_qe *qe = 1204 + container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); 1205 + struct nvme_rdma_request *req = 1206 + container_of(qe, struct nvme_rdma_request, sqe); 1207 + struct request *rq = blk_mq_rq_from_pdu(req); 1208 + 1209 + if (unlikely(wc->status != IB_WC_SUCCESS)) { 1200 1210 nvme_rdma_wr_error(cq, wc, "SEND"); 1201 - } 1211 + return; 1212 + } 1202 1213 1203 - /* 1204 - * We want to signal completion at least every queue depth/2. This returns the 1205 - * largest power of two that is not above half of (queue size + 1) to optimize 1206 - * (avoid divisions). 1207 - */ 1208 - static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) 1209 - { 1210 - int limit = 1 << ilog2((queue->queue_size + 1) / 2); 1211 - 1212 - return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0; 1214 + if (refcount_dec_and_test(&req->ref)) 1215 + nvme_end_request(rq, req->status, req->result); 1213 1216 } 1214 1217 1215 1218 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, 1216 1219 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, 1217 - struct ib_send_wr *first, bool flush) 1220 + struct ib_send_wr *first) 1218 1221 { 1219 1222 struct ib_send_wr wr, *bad_wr; 1220 1223 int ret; ··· 1222 1227 sge->length = sizeof(struct nvme_command), 1223 1228 sge->lkey = queue->device->pd->local_dma_lkey; 1224 1229 1225 - qe->cqe.done = nvme_rdma_send_done; 1226 - 1227 1230 wr.next = NULL; 1228 1231 wr.wr_cqe = &qe->cqe; 1229 1232 wr.sg_list = sge; 1230 1233 wr.num_sge = num_sge; 1231 1234 wr.opcode = IB_WR_SEND; 1232 - wr.send_flags = 0; 1233 - 1234 - /* 1235 - * Unsignalled send completions are another giant desaster in the 1236 - * IB Verbs spec: If we don't regularly post signalled sends 1237 - * the send queue will fill up and only a QP reset will rescue us. 1238 - * Would have been way to obvious to handle this in hardware or 1239 - * at least the RDMA stack.. 1240 - * 1241 - * Always signal the flushes. The magic request used for the flush 1242 - * sequencer is not allocated in our driver's tagset and it's 1243 - * triggered to be freed by blk_cleanup_queue(). So we need to 1244 - * always mark it as signaled to ensure that the "wr_cqe", which is 1245 - * embedded in request's payload, is not freed when __ib_process_cq() 1246 - * calls wr_cqe->done(). 1247 - */ 1248 - if (nvme_rdma_queue_sig_limit(queue) || flush) 1249 - wr.send_flags |= IB_SEND_SIGNALED; 1235 + wr.send_flags = IB_SEND_SIGNALED; 1250 1236 1251 1237 if (first) 1252 1238 first->next = &wr; ··· 1277 1301 return queue->ctrl->tag_set.tags[queue_idx - 1]; 1278 1302 } 1279 1303 1304 + static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) 1305 + { 1306 + if (unlikely(wc->status != IB_WC_SUCCESS)) 1307 + nvme_rdma_wr_error(cq, wc, "ASYNC"); 1308 + } 1309 + 1280 1310 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg) 1281 1311 { 1282 1312 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); ··· 1301 1319 cmd->common.flags |= NVME_CMD_SGL_METABUF; 1302 1320 nvme_rdma_set_sg_null(cmd); 1303 1321 1322 + sqe->cqe.done = nvme_rdma_async_done; 1323 + 1304 1324 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), 1305 1325 DMA_TO_DEVICE); 1306 1326 1307 - ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false); 1327 + ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); 1308 1328 WARN_ON_ONCE(ret); 1309 1329 } 1310 1330 ··· 1327 1343 } 1328 1344 req = blk_mq_rq_to_pdu(rq); 1329 1345 1330 - if (rq->tag == tag) 1331 - ret = 1; 1346 + req->status = cqe->status; 1347 + req->result = cqe->result; 1332 1348 1333 - if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && 1334 - wc->ex.invalidate_rkey == req->mr->rkey) 1335 - req->mr->need_inval = false; 1349 + if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { 1350 + if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) { 1351 + dev_err(queue->ctrl->ctrl.device, 1352 + "Bogus remote invalidation for rkey %#x\n", 1353 + req->mr->rkey); 1354 + nvme_rdma_error_recovery(queue->ctrl); 1355 + } 1356 + } else if (req->mr) { 1357 + ret = nvme_rdma_inv_rkey(queue, req); 1358 + if (unlikely(ret < 0)) { 1359 + dev_err(queue->ctrl->ctrl.device, 1360 + "Queueing INV WR for rkey %#x failed (%d)\n", 1361 + req->mr->rkey, ret); 1362 + nvme_rdma_error_recovery(queue->ctrl); 1363 + } 1364 + /* the local invalidation completion will end the request */ 1365 + return 0; 1366 + } 1336 1367 1337 - nvme_end_request(rq, cqe->status, cqe->result); 1368 + if (refcount_dec_and_test(&req->ref)) { 1369 + if (rq->tag == tag) 1370 + ret = 1; 1371 + nvme_end_request(rq, req->status, req->result); 1372 + } 1373 + 1338 1374 return ret; 1339 1375 } 1340 1376 ··· 1595 1591 * We cannot accept any other command until the Connect command has completed. 1596 1592 */ 1597 1593 static inline blk_status_t 1598 - nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) 1594 + nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq) 1599 1595 { 1600 - if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1601 - struct nvme_command *cmd = nvme_req(rq)->cmd; 1602 - 1603 - if (!blk_rq_is_passthrough(rq) || 1604 - cmd->common.opcode != nvme_fabrics_command || 1605 - cmd->fabrics.fctype != nvme_fabrics_type_connect) { 1606 - /* 1607 - * reconnecting state means transport disruption, which 1608 - * can take a long time and even might fail permanently, 1609 - * fail fast to give upper layers a chance to failover. 1610 - * deleting state means that the ctrl will never accept 1611 - * commands again, fail it permanently. 1612 - */ 1613 - if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING || 1614 - queue->ctrl->ctrl.state == NVME_CTRL_DELETING) { 1615 - nvme_req(rq)->status = NVME_SC_ABORT_REQ; 1616 - return BLK_STS_IOERR; 1617 - } 1618 - return BLK_STS_RESOURCE; /* try again later */ 1619 - } 1620 - } 1621 - 1622 - return 0; 1596 + if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) 1597 + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); 1598 + return BLK_STS_OK; 1623 1599 } 1624 1600 1625 1601 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ··· 1611 1627 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1612 1628 struct nvme_rdma_qe *sqe = &req->sqe; 1613 1629 struct nvme_command *c = sqe->data; 1614 - bool flush = false; 1615 1630 struct ib_device *dev; 1616 1631 blk_status_t ret; 1617 1632 int err; 1618 1633 1619 1634 WARN_ON_ONCE(rq->tag < 0); 1620 1635 1621 - ret = nvme_rdma_queue_is_ready(queue, rq); 1636 + ret = nvme_rdma_is_ready(queue, rq); 1622 1637 if (unlikely(ret)) 1623 1638 return ret; 1624 1639 ··· 1639 1656 goto err; 1640 1657 } 1641 1658 1659 + sqe->cqe.done = nvme_rdma_send_done; 1660 + 1642 1661 ib_dma_sync_single_for_device(dev, sqe->dma, 1643 1662 sizeof(struct nvme_command), DMA_TO_DEVICE); 1644 1663 1645 - if (req_op(rq) == REQ_OP_FLUSH) 1646 - flush = true; 1647 1664 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1648 - req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); 1665 + req->mr ? &req->reg_wr.wr : NULL); 1649 1666 if (unlikely(err)) { 1650 1667 nvme_rdma_unmap_data(queue, rq); 1651 1668 goto err; ··· 1793 1810 .submit_async_event = nvme_rdma_submit_async_event, 1794 1811 .delete_ctrl = nvme_rdma_delete_ctrl, 1795 1812 .get_address = nvmf_get_address, 1796 - .reinit_request = nvme_rdma_reinit_request, 1797 1813 }; 1798 1814 1799 1815 static inline bool
+6 -3
drivers/nvme/target/fc.c
··· 533 533 534 534 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 535 535 536 + /* release the queue lookup reference on the completed IO */ 537 + nvmet_fc_tgt_q_put(queue); 538 + 536 539 spin_lock_irqsave(&queue->qlock, flags); 537 540 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 538 541 struct nvmet_fc_defer_fcp_req, req_list); 539 542 if (!deferfcp) { 540 543 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 541 544 spin_unlock_irqrestore(&queue->qlock, flags); 542 - 543 - /* Release reference taken at queue lookup and fod allocation */ 544 - nvmet_fc_tgt_q_put(queue); 545 545 return; 546 546 } 547 547 ··· 759 759 760 760 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 761 761 deferfcp->fcp_req); 762 + 763 + /* release the queue lookup reference */ 764 + nvmet_fc_tgt_q_put(queue); 762 765 763 766 kfree(deferfcp); 764 767
+24 -1
drivers/nvme/target/loop.c
··· 52 52 return container_of(ctrl, struct nvme_loop_ctrl, ctrl); 53 53 } 54 54 55 + enum nvme_loop_queue_flags { 56 + NVME_LOOP_Q_LIVE = 0, 57 + }; 58 + 55 59 struct nvme_loop_queue { 56 60 struct nvmet_cq nvme_cq; 57 61 struct nvmet_sq nvme_sq; 58 62 struct nvme_loop_ctrl *ctrl; 63 + unsigned long flags; 59 64 }; 60 65 61 66 static struct nvmet_port *nvmet_loop_port; ··· 149 144 return BLK_EH_HANDLED; 150 145 } 151 146 147 + static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue, 148 + struct request *rq) 149 + { 150 + if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags))) 151 + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); 152 + return BLK_STS_OK; 153 + } 154 + 152 155 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, 153 156 const struct blk_mq_queue_data *bd) 154 157 { ··· 165 152 struct request *req = bd->rq; 166 153 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 167 154 blk_status_t ret; 155 + 156 + ret = nvme_loop_is_ready(queue, req); 157 + if (unlikely(ret)) 158 + return ret; 168 159 169 160 ret = nvme_setup_cmd(ns, req, &iod->cmd); 170 161 if (ret) ··· 284 267 285 268 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) 286 269 { 270 + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); 287 271 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); 288 272 blk_cleanup_queue(ctrl->ctrl.admin_q); 289 273 blk_mq_free_tag_set(&ctrl->admin_tag_set); ··· 315 297 { 316 298 int i; 317 299 318 - for (i = 1; i < ctrl->ctrl.queue_count; i++) 300 + for (i = 1; i < ctrl->ctrl.queue_count; i++) { 301 + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); 319 302 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 303 + } 320 304 } 321 305 322 306 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) ··· 358 338 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 359 339 if (ret) 360 340 return ret; 341 + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); 361 342 } 362 343 363 344 return 0; ··· 400 379 error = nvmf_connect_admin_queue(&ctrl->ctrl); 401 380 if (error) 402 381 goto out_cleanup_queue; 382 + 383 + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); 403 384 404 385 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); 405 386 if (error) {
+1 -1
include/uapi/linux/bcache.h
··· 91 91 92 92 #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1) 93 93 94 - #define PTR(gen, offset, dev) \ 94 + #define MAKE_PTR(gen, offset, dev) \ 95 95 ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen) 96 96 97 97 /* Bkey utility code */
+12 -18
kernel/trace/blktrace.c
··· 591 591 return ret; 592 592 593 593 if (copy_to_user(arg, &buts, sizeof(buts))) { 594 - blk_trace_remove(q); 594 + __blk_trace_remove(q); 595 595 return -EFAULT; 596 596 } 597 597 return 0; ··· 637 637 return ret; 638 638 639 639 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { 640 - blk_trace_remove(q); 640 + __blk_trace_remove(q); 641 641 return -EFAULT; 642 642 } 643 643 ··· 872 872 * 873 873 **/ 874 874 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, 875 - u32 what, int error, union kernfs_node_id *cgid) 875 + u32 what, int error) 876 876 { 877 877 struct blk_trace *bt = q->blk_trace; 878 878 ··· 880 880 return; 881 881 882 882 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 883 - bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid); 883 + bio_op(bio), bio->bi_opf, what, error, 0, NULL, 884 + blk_trace_bio_get_cgid(q, bio)); 884 885 } 885 886 886 887 static void blk_add_trace_bio_bounce(void *ignore, 887 888 struct request_queue *q, struct bio *bio) 888 889 { 889 - blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0, 890 - blk_trace_bio_get_cgid(q, bio)); 890 + blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); 891 891 } 892 892 893 893 static void blk_add_trace_bio_complete(void *ignore, 894 894 struct request_queue *q, struct bio *bio, 895 895 int error) 896 896 { 897 - blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error, 898 - blk_trace_bio_get_cgid(q, bio)); 897 + blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); 899 898 } 900 899 901 900 static void blk_add_trace_bio_backmerge(void *ignore, ··· 902 903 struct request *rq, 903 904 struct bio *bio) 904 905 { 905 - blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0, 906 - blk_trace_bio_get_cgid(q, bio)); 906 + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); 907 907 } 908 908 909 909 static void blk_add_trace_bio_frontmerge(void *ignore, ··· 910 912 struct request *rq, 911 913 struct bio *bio) 912 914 { 913 - blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0, 914 - blk_trace_bio_get_cgid(q, bio)); 915 + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); 915 916 } 916 917 917 918 static void blk_add_trace_bio_queue(void *ignore, 918 919 struct request_queue *q, struct bio *bio) 919 920 { 920 - blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0, 921 - blk_trace_bio_get_cgid(q, bio)); 921 + blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0); 922 922 } 923 923 924 924 static void blk_add_trace_getrq(void *ignore, ··· 924 928 struct bio *bio, int rw) 925 929 { 926 930 if (bio) 927 - blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0, 928 - blk_trace_bio_get_cgid(q, bio)); 931 + blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); 929 932 else { 930 933 struct blk_trace *bt = q->blk_trace; 931 934 ··· 940 945 struct bio *bio, int rw) 941 946 { 942 947 if (bio) 943 - blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0, 944 - blk_trace_bio_get_cgid(q, bio)); 948 + blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); 945 949 else { 946 950 struct blk_trace *bt = q->blk_trace; 947 951
+19 -3
mm/backing-dev.c
··· 113 113 .release = single_release, 114 114 }; 115 115 116 - static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) 116 + static int bdi_debug_register(struct backing_dev_info *bdi, const char *name) 117 117 { 118 + if (!bdi_debug_root) 119 + return -ENOMEM; 120 + 118 121 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); 122 + if (!bdi->debug_dir) 123 + return -ENOMEM; 124 + 119 125 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, 120 126 bdi, &bdi_debug_stats_fops); 127 + if (!bdi->debug_stats) { 128 + debugfs_remove(bdi->debug_dir); 129 + return -ENOMEM; 130 + } 131 + 132 + return 0; 121 133 } 122 134 123 135 static void bdi_debug_unregister(struct backing_dev_info *bdi) ··· 141 129 static inline void bdi_debug_init(void) 142 130 { 143 131 } 144 - static inline void bdi_debug_register(struct backing_dev_info *bdi, 132 + static inline int bdi_debug_register(struct backing_dev_info *bdi, 145 133 const char *name) 146 134 { 135 + return 0; 147 136 } 148 137 static inline void bdi_debug_unregister(struct backing_dev_info *bdi) 149 138 { ··· 882 869 if (IS_ERR(dev)) 883 870 return PTR_ERR(dev); 884 871 872 + if (bdi_debug_register(bdi, dev_name(dev))) { 873 + device_destroy(bdi_class, dev->devt); 874 + return -ENOMEM; 875 + } 885 876 cgwb_bdi_register(bdi); 886 877 bdi->dev = dev; 887 878 888 - bdi_debug_register(bdi, dev_name(dev)); 889 879 set_bit(WB_registered, &bdi->wb.state); 890 880 891 881 spin_lock_bh(&bdi_lock);