Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

null_blk: remove lightnvm support

With rrpc to be removed, the null_blk lightnvm support is no longer
functional. Remove the lightnvm implementation and maybe add it to
another module in the future if someone takes on the challenge.

Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Matias Bjørling and committed by
Jens Axboe
74ede5af 913a9500

+3 -217
+3 -217
drivers/block/null_blk.c
··· 12 12 #include <linux/slab.h> 13 13 #include <linux/blk-mq.h> 14 14 #include <linux/hrtimer.h> 15 - #include <linux/lightnvm.h> 16 15 #include <linux/configfs.h> 17 16 #include <linux/badblocks.h> 18 17 ··· 106 107 unsigned int hw_queue_depth; /* queue depth */ 107 108 unsigned int index; /* index of the disk, only valid with a disk */ 108 109 unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */ 109 - bool use_lightnvm; /* register as a LightNVM device */ 110 110 bool blocking; /* blocking blk-mq device */ 111 111 bool use_per_node_hctx; /* use per-node allocation for hardware context */ 112 112 bool power; /* power on/off the device */ ··· 119 121 unsigned int index; 120 122 struct request_queue *q; 121 123 struct gendisk *disk; 122 - struct nvm_dev *ndev; 123 124 struct blk_mq_tag_set *tag_set; 124 125 struct blk_mq_tag_set __tag_set; 125 126 unsigned int queue_depth; ··· 136 139 static struct mutex lock; 137 140 static int null_major; 138 141 static DEFINE_IDA(nullb_indexes); 139 - static struct kmem_cache *ppa_cache; 140 142 static struct blk_mq_tag_set tag_set; 141 143 142 144 enum { ··· 203 207 static int nr_devices = 1; 204 208 module_param(nr_devices, int, S_IRUGO); 205 209 MODULE_PARM_DESC(nr_devices, "Number of devices to register"); 206 - 207 - static bool g_use_lightnvm; 208 - module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO); 209 - MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); 210 210 211 211 static bool g_blocking; 212 212 module_param_named(blocking, g_blocking, bool, S_IRUGO); ··· 337 345 NULLB_DEVICE_ATTR(irqmode, uint); 338 346 NULLB_DEVICE_ATTR(hw_queue_depth, uint); 339 347 NULLB_DEVICE_ATTR(index, uint); 340 - NULLB_DEVICE_ATTR(use_lightnvm, bool); 341 348 NULLB_DEVICE_ATTR(blocking, bool); 342 349 NULLB_DEVICE_ATTR(use_per_node_hctx, bool); 343 350 NULLB_DEVICE_ATTR(memory_backed, bool); ··· 446 455 &nullb_device_attr_irqmode, 447 456 &nullb_device_attr_hw_queue_depth, 448 457 &nullb_device_attr_index, 449 - &nullb_device_attr_use_lightnvm, 450 458 &nullb_device_attr_blocking, 451 459 &nullb_device_attr_use_per_node_hctx, 452 460 &nullb_device_attr_power, ··· 563 573 dev->blocksize = g_bs; 564 574 dev->irqmode = g_irqmode; 565 575 dev->hw_queue_depth = g_hw_queue_depth; 566 - dev->use_lightnvm = g_use_lightnvm; 567 576 dev->blocking = g_blocking; 568 577 dev->use_per_node_hctx = g_use_per_node_hctx; 569 578 return dev; ··· 1412 1423 kfree(nullb->queues); 1413 1424 } 1414 1425 1415 - #ifdef CONFIG_NVM 1416 - 1417 - static void null_lnvm_end_io(struct request *rq, blk_status_t status) 1418 - { 1419 - struct nvm_rq *rqd = rq->end_io_data; 1420 - 1421 - /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */ 1422 - rqd->error = status ? -EIO : 0; 1423 - nvm_end_io(rqd); 1424 - 1425 - blk_put_request(rq); 1426 - } 1427 - 1428 - static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) 1429 - { 1430 - struct request_queue *q = dev->q; 1431 - struct request *rq; 1432 - struct bio *bio = rqd->bio; 1433 - 1434 - rq = blk_mq_alloc_request(q, 1435 - op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 1436 - if (IS_ERR(rq)) 1437 - return -ENOMEM; 1438 - 1439 - blk_init_request_from_bio(rq, bio); 1440 - 1441 - rq->end_io_data = rqd; 1442 - 1443 - blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); 1444 - 1445 - return 0; 1446 - } 1447 - 1448 - static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) 1449 - { 1450 - struct nullb *nullb = dev->q->queuedata; 1451 - sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL; 1452 - sector_t blksize; 1453 - struct nvm_id_group *grp; 1454 - 1455 - id->ver_id = 0x1; 1456 - id->vmnt = 0; 1457 - id->cap = 0x2; 1458 - id->dom = 0x1; 1459 - 1460 - id->ppaf.blk_offset = 0; 1461 - id->ppaf.blk_len = 16; 1462 - id->ppaf.pg_offset = 16; 1463 - id->ppaf.pg_len = 16; 1464 - id->ppaf.sect_offset = 32; 1465 - id->ppaf.sect_len = 8; 1466 - id->ppaf.pln_offset = 40; 1467 - id->ppaf.pln_len = 8; 1468 - id->ppaf.lun_offset = 48; 1469 - id->ppaf.lun_len = 8; 1470 - id->ppaf.ch_offset = 56; 1471 - id->ppaf.ch_len = 8; 1472 - 1473 - sector_div(size, nullb->dev->blocksize); /* convert size to pages */ 1474 - size >>= 8; /* concert size to pgs pr blk */ 1475 - grp = &id->grp; 1476 - grp->mtype = 0; 1477 - grp->fmtype = 0; 1478 - grp->num_ch = 1; 1479 - grp->num_pg = 256; 1480 - blksize = size; 1481 - size >>= 16; 1482 - grp->num_lun = size + 1; 1483 - sector_div(blksize, grp->num_lun); 1484 - grp->num_blk = blksize; 1485 - grp->num_pln = 1; 1486 - 1487 - grp->fpg_sz = nullb->dev->blocksize; 1488 - grp->csecs = nullb->dev->blocksize; 1489 - grp->trdt = 25000; 1490 - grp->trdm = 25000; 1491 - grp->tprt = 500000; 1492 - grp->tprm = 500000; 1493 - grp->tbet = 1500000; 1494 - grp->tbem = 1500000; 1495 - grp->mpos = 0x010101; /* single plane rwe */ 1496 - grp->cpar = nullb->dev->hw_queue_depth; 1497 - 1498 - return 0; 1499 - } 1500 - 1501 - static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name) 1502 - { 1503 - mempool_t *virtmem_pool; 1504 - 1505 - virtmem_pool = mempool_create_slab_pool(64, ppa_cache); 1506 - if (!virtmem_pool) { 1507 - pr_err("null_blk: Unable to create virtual memory pool\n"); 1508 - return NULL; 1509 - } 1510 - 1511 - return virtmem_pool; 1512 - } 1513 - 1514 - static void null_lnvm_destroy_dma_pool(void *pool) 1515 - { 1516 - mempool_destroy(pool); 1517 - } 1518 - 1519 - static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, 1520 - gfp_t mem_flags, dma_addr_t *dma_handler) 1521 - { 1522 - return mempool_alloc(pool, mem_flags); 1523 - } 1524 - 1525 - static void null_lnvm_dev_dma_free(void *pool, void *entry, 1526 - dma_addr_t dma_handler) 1527 - { 1528 - mempool_free(entry, pool); 1529 - } 1530 - 1531 - static struct nvm_dev_ops null_lnvm_dev_ops = { 1532 - .identity = null_lnvm_id, 1533 - .submit_io = null_lnvm_submit_io, 1534 - 1535 - .create_dma_pool = null_lnvm_create_dma_pool, 1536 - .destroy_dma_pool = null_lnvm_destroy_dma_pool, 1537 - .dev_dma_alloc = null_lnvm_dev_dma_alloc, 1538 - .dev_dma_free = null_lnvm_dev_dma_free, 1539 - 1540 - /* Simulate nvme protocol restriction */ 1541 - .max_phys_sect = 64, 1542 - }; 1543 - 1544 - static int null_nvm_register(struct nullb *nullb) 1545 - { 1546 - struct nvm_dev *dev; 1547 - int rv; 1548 - 1549 - dev = nvm_alloc_dev(0); 1550 - if (!dev) 1551 - return -ENOMEM; 1552 - 1553 - dev->q = nullb->q; 1554 - memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN); 1555 - dev->ops = &null_lnvm_dev_ops; 1556 - 1557 - rv = nvm_register(dev); 1558 - if (rv) { 1559 - kfree(dev); 1560 - return rv; 1561 - } 1562 - nullb->ndev = dev; 1563 - return 0; 1564 - } 1565 - 1566 - static void null_nvm_unregister(struct nullb *nullb) 1567 - { 1568 - nvm_unregister(nullb->ndev); 1569 - } 1570 - #else 1571 - static int null_nvm_register(struct nullb *nullb) 1572 - { 1573 - pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n"); 1574 - return -EINVAL; 1575 - } 1576 - static void null_nvm_unregister(struct nullb *nullb) {} 1577 - #endif /* CONFIG_NVM */ 1578 - 1579 1426 static void null_del_dev(struct nullb *nullb) 1580 1427 { 1581 1428 struct nullb_device *dev = nullb->dev; ··· 1420 1595 1421 1596 list_del_init(&nullb->list); 1422 1597 1423 - if (dev->use_lightnvm) 1424 - null_nvm_unregister(nullb); 1425 - else 1426 - del_gendisk(nullb->disk); 1598 + del_gendisk(nullb->disk); 1427 1599 1428 1600 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { 1429 1601 hrtimer_cancel(&nullb->bw_timer); ··· 1432 1610 if (dev->queue_mode == NULL_Q_MQ && 1433 1611 nullb->tag_set == &nullb->__tag_set) 1434 1612 blk_mq_free_tag_set(nullb->tag_set); 1435 - if (!dev->use_lightnvm) 1436 - put_disk(nullb->disk); 1613 + put_disk(nullb->disk); 1437 1614 cleanup_queues(nullb); 1438 1615 if (null_cache_active(nullb)) 1439 1616 null_free_device_storage(nullb->dev, true); ··· 1596 1775 { 1597 1776 dev->blocksize = round_down(dev->blocksize, 512); 1598 1777 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); 1599 - if (dev->use_lightnvm && dev->blocksize != 4096) 1600 - dev->blocksize = 4096; 1601 - 1602 - if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ) 1603 - dev->queue_mode = NULL_Q_MQ; 1604 1778 1605 1779 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) { 1606 1780 if (dev->submit_queues != nr_online_nodes) ··· 1711 1895 1712 1896 sprintf(nullb->disk_name, "nullb%d", nullb->index); 1713 1897 1714 - if (dev->use_lightnvm) 1715 - rv = null_nvm_register(nullb); 1716 - else 1717 - rv = null_gendisk_register(nullb); 1718 - 1898 + rv = null_gendisk_register(nullb); 1719 1899 if (rv) 1720 1900 goto out_cleanup_blk_queue; 1721 1901 ··· 1750 1938 g_bs = PAGE_SIZE; 1751 1939 } 1752 1940 1753 - if (g_use_lightnvm && g_bs != 4096) { 1754 - pr_warn("null_blk: LightNVM only supports 4k block size\n"); 1755 - pr_warn("null_blk: defaults block size to 4k\n"); 1756 - g_bs = 4096; 1757 - } 1758 - 1759 - if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) { 1760 - pr_warn("null_blk: LightNVM only supported for blk-mq\n"); 1761 - pr_warn("null_blk: defaults queue mode to blk-mq\n"); 1762 - g_queue_mode = NULL_Q_MQ; 1763 - } 1764 - 1765 1941 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) { 1766 1942 if (g_submit_queues != nr_online_nodes) { 1767 1943 pr_warn("null_blk: submit_queues param is set to %u.\n", ··· 1782 1982 goto err_conf; 1783 1983 } 1784 1984 1785 - if (g_use_lightnvm) { 1786 - ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), 1787 - 0, 0, NULL); 1788 - if (!ppa_cache) { 1789 - pr_err("null_blk: unable to create ppa cache\n"); 1790 - ret = -ENOMEM; 1791 - goto err_ppa; 1792 - } 1793 - } 1794 - 1795 1985 for (i = 0; i < nr_devices; i++) { 1796 1986 dev = null_alloc_dev(); 1797 1987 if (!dev) { ··· 1805 2015 null_del_dev(nullb); 1806 2016 null_free_dev(dev); 1807 2017 } 1808 - kmem_cache_destroy(ppa_cache); 1809 - err_ppa: 1810 2018 unregister_blkdev(null_major, "nullb"); 1811 2019 err_conf: 1812 2020 configfs_unregister_subsystem(&nullb_subsys); ··· 1835 2047 1836 2048 if (g_queue_mode == NULL_Q_MQ && shared_tags) 1837 2049 blk_mq_free_tag_set(&tag_set); 1838 - 1839 - kmem_cache_destroy(ppa_cache); 1840 2050 } 1841 2051 1842 2052 module_init(null_init);