Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.12 891 lines 19 kB view raw
1#include <linux/module.h> 2 3#include <linux/moduleparam.h> 4#include <linux/sched.h> 5#include <linux/fs.h> 6#include <linux/blkdev.h> 7#include <linux/init.h> 8#include <linux/slab.h> 9#include <linux/blk-mq.h> 10#include <linux/hrtimer.h> 11#include <linux/lightnvm.h> 12 13struct nullb_cmd { 14 struct list_head list; 15 struct llist_node ll_list; 16 struct call_single_data csd; 17 struct request *rq; 18 struct bio *bio; 19 unsigned int tag; 20 struct nullb_queue *nq; 21 struct hrtimer timer; 22}; 23 24struct nullb_queue { 25 unsigned long *tag_map; 26 wait_queue_head_t wait; 27 unsigned int queue_depth; 28 29 struct nullb_cmd *cmds; 30}; 31 32struct nullb { 33 struct list_head list; 34 unsigned int index; 35 struct request_queue *q; 36 struct gendisk *disk; 37 struct nvm_dev *ndev; 38 struct blk_mq_tag_set tag_set; 39 struct hrtimer timer; 40 unsigned int queue_depth; 41 spinlock_t lock; 42 43 struct nullb_queue *queues; 44 unsigned int nr_queues; 45 char disk_name[DISK_NAME_LEN]; 46}; 47 48static LIST_HEAD(nullb_list); 49static struct mutex lock; 50static int null_major; 51static int nullb_indexes; 52static struct kmem_cache *ppa_cache; 53 54enum { 55 NULL_IRQ_NONE = 0, 56 NULL_IRQ_SOFTIRQ = 1, 57 NULL_IRQ_TIMER = 2, 58}; 59 60enum { 61 NULL_Q_BIO = 0, 62 NULL_Q_RQ = 1, 63 NULL_Q_MQ = 2, 64}; 65 66static int submit_queues; 67module_param(submit_queues, int, S_IRUGO); 68MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 69 70static int home_node = NUMA_NO_NODE; 71module_param(home_node, int, S_IRUGO); 72MODULE_PARM_DESC(home_node, "Home node for the device"); 73 74static int queue_mode = NULL_Q_MQ; 75 76static int null_param_store_val(const char *str, int *val, int min, int max) 77{ 78 int ret, new_val; 79 80 ret = kstrtoint(str, 10, &new_val); 81 if (ret) 82 return -EINVAL; 83 84 if (new_val < min || new_val > max) 85 return -EINVAL; 86 87 *val = new_val; 88 return 0; 89} 90 91static int null_set_queue_mode(const char *str, const struct kernel_param *kp) 92{ 93 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ); 94} 95 96static const struct kernel_param_ops null_queue_mode_param_ops = { 97 .set = null_set_queue_mode, 98 .get = param_get_int, 99}; 100 101device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO); 102MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); 103 104static int gb = 250; 105module_param(gb, int, S_IRUGO); 106MODULE_PARM_DESC(gb, "Size in GB"); 107 108static int bs = 512; 109module_param(bs, int, S_IRUGO); 110MODULE_PARM_DESC(bs, "Block size (in bytes)"); 111 112static int nr_devices = 2; 113module_param(nr_devices, int, S_IRUGO); 114MODULE_PARM_DESC(nr_devices, "Number of devices to register"); 115 116static bool use_lightnvm; 117module_param(use_lightnvm, bool, S_IRUGO); 118MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); 119 120static bool blocking; 121module_param(blocking, bool, S_IRUGO); 122MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); 123 124static int irqmode = NULL_IRQ_SOFTIRQ; 125 126static int null_set_irqmode(const char *str, const struct kernel_param *kp) 127{ 128 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE, 129 NULL_IRQ_TIMER); 130} 131 132static const struct kernel_param_ops null_irqmode_param_ops = { 133 .set = null_set_irqmode, 134 .get = param_get_int, 135}; 136 137device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); 138MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); 139 140static unsigned long completion_nsec = 10000; 141module_param(completion_nsec, ulong, S_IRUGO); 142MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); 143 144static int hw_queue_depth = 64; 145module_param(hw_queue_depth, int, S_IRUGO); 146MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 147 148static bool use_per_node_hctx = false; 149module_param(use_per_node_hctx, bool, S_IRUGO); 150MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); 151 152static void put_tag(struct nullb_queue *nq, unsigned int tag) 153{ 154 clear_bit_unlock(tag, nq->tag_map); 155 156 if (waitqueue_active(&nq->wait)) 157 wake_up(&nq->wait); 158} 159 160static unsigned int get_tag(struct nullb_queue *nq) 161{ 162 unsigned int tag; 163 164 do { 165 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); 166 if (tag >= nq->queue_depth) 167 return -1U; 168 } while (test_and_set_bit_lock(tag, nq->tag_map)); 169 170 return tag; 171} 172 173static void free_cmd(struct nullb_cmd *cmd) 174{ 175 put_tag(cmd->nq, cmd->tag); 176} 177 178static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); 179 180static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) 181{ 182 struct nullb_cmd *cmd; 183 unsigned int tag; 184 185 tag = get_tag(nq); 186 if (tag != -1U) { 187 cmd = &nq->cmds[tag]; 188 cmd->tag = tag; 189 cmd->nq = nq; 190 if (irqmode == NULL_IRQ_TIMER) { 191 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, 192 HRTIMER_MODE_REL); 193 cmd->timer.function = null_cmd_timer_expired; 194 } 195 return cmd; 196 } 197 198 return NULL; 199} 200 201static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) 202{ 203 struct nullb_cmd *cmd; 204 DEFINE_WAIT(wait); 205 206 cmd = __alloc_cmd(nq); 207 if (cmd || !can_wait) 208 return cmd; 209 210 do { 211 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); 212 cmd = __alloc_cmd(nq); 213 if (cmd) 214 break; 215 216 io_schedule(); 217 } while (1); 218 219 finish_wait(&nq->wait, &wait); 220 return cmd; 221} 222 223static void end_cmd(struct nullb_cmd *cmd) 224{ 225 struct request_queue *q = NULL; 226 227 if (cmd->rq) 228 q = cmd->rq->q; 229 230 switch (queue_mode) { 231 case NULL_Q_MQ: 232 blk_mq_end_request(cmd->rq, 0); 233 return; 234 case NULL_Q_RQ: 235 INIT_LIST_HEAD(&cmd->rq->queuelist); 236 blk_end_request_all(cmd->rq, 0); 237 break; 238 case NULL_Q_BIO: 239 bio_endio(cmd->bio); 240 break; 241 } 242 243 free_cmd(cmd); 244 245 /* Restart queue if needed, as we are freeing a tag */ 246 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) { 247 unsigned long flags; 248 249 spin_lock_irqsave(q->queue_lock, flags); 250 blk_start_queue_async(q); 251 spin_unlock_irqrestore(q->queue_lock, flags); 252 } 253} 254 255static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) 256{ 257 end_cmd(container_of(timer, struct nullb_cmd, timer)); 258 259 return HRTIMER_NORESTART; 260} 261 262static void null_cmd_end_timer(struct nullb_cmd *cmd) 263{ 264 ktime_t kt = completion_nsec; 265 266 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); 267} 268 269static void null_softirq_done_fn(struct request *rq) 270{ 271 if (queue_mode == NULL_Q_MQ) 272 end_cmd(blk_mq_rq_to_pdu(rq)); 273 else 274 end_cmd(rq->special); 275} 276 277static inline void null_handle_cmd(struct nullb_cmd *cmd) 278{ 279 /* Complete IO by inline, softirq or timer */ 280 switch (irqmode) { 281 case NULL_IRQ_SOFTIRQ: 282 switch (queue_mode) { 283 case NULL_Q_MQ: 284 blk_mq_complete_request(cmd->rq); 285 break; 286 case NULL_Q_RQ: 287 blk_complete_request(cmd->rq); 288 break; 289 case NULL_Q_BIO: 290 /* 291 * XXX: no proper submitting cpu information available. 292 */ 293 end_cmd(cmd); 294 break; 295 } 296 break; 297 case NULL_IRQ_NONE: 298 end_cmd(cmd); 299 break; 300 case NULL_IRQ_TIMER: 301 null_cmd_end_timer(cmd); 302 break; 303 } 304} 305 306static struct nullb_queue *nullb_to_queue(struct nullb *nullb) 307{ 308 int index = 0; 309 310 if (nullb->nr_queues != 1) 311 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); 312 313 return &nullb->queues[index]; 314} 315 316static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) 317{ 318 struct nullb *nullb = q->queuedata; 319 struct nullb_queue *nq = nullb_to_queue(nullb); 320 struct nullb_cmd *cmd; 321 322 cmd = alloc_cmd(nq, 1); 323 cmd->bio = bio; 324 325 null_handle_cmd(cmd); 326 return BLK_QC_T_NONE; 327} 328 329static int null_rq_prep_fn(struct request_queue *q, struct request *req) 330{ 331 struct nullb *nullb = q->queuedata; 332 struct nullb_queue *nq = nullb_to_queue(nullb); 333 struct nullb_cmd *cmd; 334 335 cmd = alloc_cmd(nq, 0); 336 if (cmd) { 337 cmd->rq = req; 338 req->special = cmd; 339 return BLKPREP_OK; 340 } 341 blk_stop_queue(q); 342 343 return BLKPREP_DEFER; 344} 345 346static void null_request_fn(struct request_queue *q) 347{ 348 struct request *rq; 349 350 while ((rq = blk_fetch_request(q)) != NULL) { 351 struct nullb_cmd *cmd = rq->special; 352 353 spin_unlock_irq(q->queue_lock); 354 null_handle_cmd(cmd); 355 spin_lock_irq(q->queue_lock); 356 } 357} 358 359static int null_queue_rq(struct blk_mq_hw_ctx *hctx, 360 const struct blk_mq_queue_data *bd) 361{ 362 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 363 364 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 365 366 if (irqmode == NULL_IRQ_TIMER) { 367 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 368 cmd->timer.function = null_cmd_timer_expired; 369 } 370 cmd->rq = bd->rq; 371 cmd->nq = hctx->driver_data; 372 373 blk_mq_start_request(bd->rq); 374 375 null_handle_cmd(cmd); 376 return BLK_MQ_RQ_QUEUE_OK; 377} 378 379static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) 380{ 381 BUG_ON(!nullb); 382 BUG_ON(!nq); 383 384 init_waitqueue_head(&nq->wait); 385 nq->queue_depth = nullb->queue_depth; 386} 387 388static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 389 unsigned int index) 390{ 391 struct nullb *nullb = data; 392 struct nullb_queue *nq = &nullb->queues[index]; 393 394 hctx->driver_data = nq; 395 null_init_queue(nullb, nq); 396 nullb->nr_queues++; 397 398 return 0; 399} 400 401static const struct blk_mq_ops null_mq_ops = { 402 .queue_rq = null_queue_rq, 403 .init_hctx = null_init_hctx, 404 .complete = null_softirq_done_fn, 405}; 406 407static void cleanup_queue(struct nullb_queue *nq) 408{ 409 kfree(nq->tag_map); 410 kfree(nq->cmds); 411} 412 413static void cleanup_queues(struct nullb *nullb) 414{ 415 int i; 416 417 for (i = 0; i < nullb->nr_queues; i++) 418 cleanup_queue(&nullb->queues[i]); 419 420 kfree(nullb->queues); 421} 422 423#ifdef CONFIG_NVM 424 425static void null_lnvm_end_io(struct request *rq, int error) 426{ 427 struct nvm_rq *rqd = rq->end_io_data; 428 429 rqd->error = error; 430 nvm_end_io(rqd); 431 432 blk_put_request(rq); 433} 434 435static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) 436{ 437 struct request_queue *q = dev->q; 438 struct request *rq; 439 struct bio *bio = rqd->bio; 440 441 rq = blk_mq_alloc_request(q, 442 op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 443 if (IS_ERR(rq)) 444 return -ENOMEM; 445 446 blk_init_request_from_bio(rq, bio); 447 448 rq->end_io_data = rqd; 449 450 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); 451 452 return 0; 453} 454 455static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) 456{ 457 sector_t size = gb * 1024 * 1024 * 1024ULL; 458 sector_t blksize; 459 struct nvm_id_group *grp; 460 461 id->ver_id = 0x1; 462 id->vmnt = 0; 463 id->cap = 0x2; 464 id->dom = 0x1; 465 466 id->ppaf.blk_offset = 0; 467 id->ppaf.blk_len = 16; 468 id->ppaf.pg_offset = 16; 469 id->ppaf.pg_len = 16; 470 id->ppaf.sect_offset = 32; 471 id->ppaf.sect_len = 8; 472 id->ppaf.pln_offset = 40; 473 id->ppaf.pln_len = 8; 474 id->ppaf.lun_offset = 48; 475 id->ppaf.lun_len = 8; 476 id->ppaf.ch_offset = 56; 477 id->ppaf.ch_len = 8; 478 479 sector_div(size, bs); /* convert size to pages */ 480 size >>= 8; /* concert size to pgs pr blk */ 481 grp = &id->grp; 482 grp->mtype = 0; 483 grp->fmtype = 0; 484 grp->num_ch = 1; 485 grp->num_pg = 256; 486 blksize = size; 487 size >>= 16; 488 grp->num_lun = size + 1; 489 sector_div(blksize, grp->num_lun); 490 grp->num_blk = blksize; 491 grp->num_pln = 1; 492 493 grp->fpg_sz = bs; 494 grp->csecs = bs; 495 grp->trdt = 25000; 496 grp->trdm = 25000; 497 grp->tprt = 500000; 498 grp->tprm = 500000; 499 grp->tbet = 1500000; 500 grp->tbem = 1500000; 501 grp->mpos = 0x010101; /* single plane rwe */ 502 grp->cpar = hw_queue_depth; 503 504 return 0; 505} 506 507static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name) 508{ 509 mempool_t *virtmem_pool; 510 511 virtmem_pool = mempool_create_slab_pool(64, ppa_cache); 512 if (!virtmem_pool) { 513 pr_err("null_blk: Unable to create virtual memory pool\n"); 514 return NULL; 515 } 516 517 return virtmem_pool; 518} 519 520static void null_lnvm_destroy_dma_pool(void *pool) 521{ 522 mempool_destroy(pool); 523} 524 525static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, 526 gfp_t mem_flags, dma_addr_t *dma_handler) 527{ 528 return mempool_alloc(pool, mem_flags); 529} 530 531static void null_lnvm_dev_dma_free(void *pool, void *entry, 532 dma_addr_t dma_handler) 533{ 534 mempool_free(entry, pool); 535} 536 537static struct nvm_dev_ops null_lnvm_dev_ops = { 538 .identity = null_lnvm_id, 539 .submit_io = null_lnvm_submit_io, 540 541 .create_dma_pool = null_lnvm_create_dma_pool, 542 .destroy_dma_pool = null_lnvm_destroy_dma_pool, 543 .dev_dma_alloc = null_lnvm_dev_dma_alloc, 544 .dev_dma_free = null_lnvm_dev_dma_free, 545 546 /* Simulate nvme protocol restriction */ 547 .max_phys_sect = 64, 548}; 549 550static int null_nvm_register(struct nullb *nullb) 551{ 552 struct nvm_dev *dev; 553 int rv; 554 555 dev = nvm_alloc_dev(0); 556 if (!dev) 557 return -ENOMEM; 558 559 dev->q = nullb->q; 560 memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN); 561 dev->ops = &null_lnvm_dev_ops; 562 563 rv = nvm_register(dev); 564 if (rv) { 565 kfree(dev); 566 return rv; 567 } 568 nullb->ndev = dev; 569 return 0; 570} 571 572static void null_nvm_unregister(struct nullb *nullb) 573{ 574 nvm_unregister(nullb->ndev); 575} 576#else 577static int null_nvm_register(struct nullb *nullb) 578{ 579 pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n"); 580 return -EINVAL; 581} 582static void null_nvm_unregister(struct nullb *nullb) {} 583#endif /* CONFIG_NVM */ 584 585static void null_del_dev(struct nullb *nullb) 586{ 587 list_del_init(&nullb->list); 588 589 if (use_lightnvm) 590 null_nvm_unregister(nullb); 591 else 592 del_gendisk(nullb->disk); 593 blk_cleanup_queue(nullb->q); 594 if (queue_mode == NULL_Q_MQ) 595 blk_mq_free_tag_set(&nullb->tag_set); 596 if (!use_lightnvm) 597 put_disk(nullb->disk); 598 cleanup_queues(nullb); 599 kfree(nullb); 600} 601 602static int null_open(struct block_device *bdev, fmode_t mode) 603{ 604 return 0; 605} 606 607static void null_release(struct gendisk *disk, fmode_t mode) 608{ 609} 610 611static const struct block_device_operations null_fops = { 612 .owner = THIS_MODULE, 613 .open = null_open, 614 .release = null_release, 615}; 616 617static int setup_commands(struct nullb_queue *nq) 618{ 619 struct nullb_cmd *cmd; 620 int i, tag_size; 621 622 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); 623 if (!nq->cmds) 624 return -ENOMEM; 625 626 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; 627 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); 628 if (!nq->tag_map) { 629 kfree(nq->cmds); 630 return -ENOMEM; 631 } 632 633 for (i = 0; i < nq->queue_depth; i++) { 634 cmd = &nq->cmds[i]; 635 INIT_LIST_HEAD(&cmd->list); 636 cmd->ll_list.next = NULL; 637 cmd->tag = -1U; 638 } 639 640 return 0; 641} 642 643static int setup_queues(struct nullb *nullb) 644{ 645 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), 646 GFP_KERNEL); 647 if (!nullb->queues) 648 return -ENOMEM; 649 650 nullb->nr_queues = 0; 651 nullb->queue_depth = hw_queue_depth; 652 653 return 0; 654} 655 656static int init_driver_queues(struct nullb *nullb) 657{ 658 struct nullb_queue *nq; 659 int i, ret = 0; 660 661 for (i = 0; i < submit_queues; i++) { 662 nq = &nullb->queues[i]; 663 664 null_init_queue(nullb, nq); 665 666 ret = setup_commands(nq); 667 if (ret) 668 return ret; 669 nullb->nr_queues++; 670 } 671 return 0; 672} 673 674static int null_gendisk_register(struct nullb *nullb) 675{ 676 struct gendisk *disk; 677 sector_t size; 678 679 disk = nullb->disk = alloc_disk_node(1, home_node); 680 if (!disk) 681 return -ENOMEM; 682 size = gb * 1024 * 1024 * 1024ULL; 683 set_capacity(disk, size >> 9); 684 685 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; 686 disk->major = null_major; 687 disk->first_minor = nullb->index; 688 disk->fops = &null_fops; 689 disk->private_data = nullb; 690 disk->queue = nullb->q; 691 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 692 693 add_disk(disk); 694 return 0; 695} 696 697static int null_add_dev(void) 698{ 699 struct nullb *nullb; 700 int rv; 701 702 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); 703 if (!nullb) { 704 rv = -ENOMEM; 705 goto out; 706 } 707 708 spin_lock_init(&nullb->lock); 709 710 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) 711 submit_queues = nr_online_nodes; 712 713 rv = setup_queues(nullb); 714 if (rv) 715 goto out_free_nullb; 716 717 if (queue_mode == NULL_Q_MQ) { 718 nullb->tag_set.ops = &null_mq_ops; 719 nullb->tag_set.nr_hw_queues = submit_queues; 720 nullb->tag_set.queue_depth = hw_queue_depth; 721 nullb->tag_set.numa_node = home_node; 722 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); 723 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 724 nullb->tag_set.driver_data = nullb; 725 726 if (blocking) 727 nullb->tag_set.flags |= BLK_MQ_F_BLOCKING; 728 729 rv = blk_mq_alloc_tag_set(&nullb->tag_set); 730 if (rv) 731 goto out_cleanup_queues; 732 733 nullb->q = blk_mq_init_queue(&nullb->tag_set); 734 if (IS_ERR(nullb->q)) { 735 rv = -ENOMEM; 736 goto out_cleanup_tags; 737 } 738 } else if (queue_mode == NULL_Q_BIO) { 739 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 740 if (!nullb->q) { 741 rv = -ENOMEM; 742 goto out_cleanup_queues; 743 } 744 blk_queue_make_request(nullb->q, null_queue_bio); 745 rv = init_driver_queues(nullb); 746 if (rv) 747 goto out_cleanup_blk_queue; 748 } else { 749 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 750 if (!nullb->q) { 751 rv = -ENOMEM; 752 goto out_cleanup_queues; 753 } 754 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 755 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 756 rv = init_driver_queues(nullb); 757 if (rv) 758 goto out_cleanup_blk_queue; 759 } 760 761 nullb->q->queuedata = nullb; 762 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 763 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 764 765 mutex_lock(&lock); 766 nullb->index = nullb_indexes++; 767 mutex_unlock(&lock); 768 769 blk_queue_logical_block_size(nullb->q, bs); 770 blk_queue_physical_block_size(nullb->q, bs); 771 772 sprintf(nullb->disk_name, "nullb%d", nullb->index); 773 774 if (use_lightnvm) 775 rv = null_nvm_register(nullb); 776 else 777 rv = null_gendisk_register(nullb); 778 779 if (rv) 780 goto out_cleanup_blk_queue; 781 782 mutex_lock(&lock); 783 list_add_tail(&nullb->list, &nullb_list); 784 mutex_unlock(&lock); 785 786 return 0; 787out_cleanup_blk_queue: 788 blk_cleanup_queue(nullb->q); 789out_cleanup_tags: 790 if (queue_mode == NULL_Q_MQ) 791 blk_mq_free_tag_set(&nullb->tag_set); 792out_cleanup_queues: 793 cleanup_queues(nullb); 794out_free_nullb: 795 kfree(nullb); 796out: 797 return rv; 798} 799 800static int __init null_init(void) 801{ 802 int ret = 0; 803 unsigned int i; 804 struct nullb *nullb; 805 806 if (bs > PAGE_SIZE) { 807 pr_warn("null_blk: invalid block size\n"); 808 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); 809 bs = PAGE_SIZE; 810 } 811 812 if (use_lightnvm && bs != 4096) { 813 pr_warn("null_blk: LightNVM only supports 4k block size\n"); 814 pr_warn("null_blk: defaults block size to 4k\n"); 815 bs = 4096; 816 } 817 818 if (use_lightnvm && queue_mode != NULL_Q_MQ) { 819 pr_warn("null_blk: LightNVM only supported for blk-mq\n"); 820 pr_warn("null_blk: defaults queue mode to blk-mq\n"); 821 queue_mode = NULL_Q_MQ; 822 } 823 824 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { 825 if (submit_queues < nr_online_nodes) { 826 pr_warn("null_blk: submit_queues param is set to %u.", 827 nr_online_nodes); 828 submit_queues = nr_online_nodes; 829 } 830 } else if (submit_queues > nr_cpu_ids) 831 submit_queues = nr_cpu_ids; 832 else if (!submit_queues) 833 submit_queues = 1; 834 835 mutex_init(&lock); 836 837 null_major = register_blkdev(0, "nullb"); 838 if (null_major < 0) 839 return null_major; 840 841 if (use_lightnvm) { 842 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), 843 0, 0, NULL); 844 if (!ppa_cache) { 845 pr_err("null_blk: unable to create ppa cache\n"); 846 ret = -ENOMEM; 847 goto err_ppa; 848 } 849 } 850 851 for (i = 0; i < nr_devices; i++) { 852 ret = null_add_dev(); 853 if (ret) 854 goto err_dev; 855 } 856 857 pr_info("null: module loaded\n"); 858 return 0; 859 860err_dev: 861 while (!list_empty(&nullb_list)) { 862 nullb = list_entry(nullb_list.next, struct nullb, list); 863 null_del_dev(nullb); 864 } 865 kmem_cache_destroy(ppa_cache); 866err_ppa: 867 unregister_blkdev(null_major, "nullb"); 868 return ret; 869} 870 871static void __exit null_exit(void) 872{ 873 struct nullb *nullb; 874 875 unregister_blkdev(null_major, "nullb"); 876 877 mutex_lock(&lock); 878 while (!list_empty(&nullb_list)) { 879 nullb = list_entry(nullb_list.next, struct nullb, list); 880 null_del_dev(nullb); 881 } 882 mutex_unlock(&lock); 883 884 kmem_cache_destroy(ppa_cache); 885} 886 887module_init(null_init); 888module_exit(null_exit); 889 890MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); 891MODULE_LICENSE("GPL");