Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.3-rc5 1852 lines 44 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and 4 * Shaohua Li <shli@fb.com> 5 */ 6#include <linux/module.h> 7 8#include <linux/moduleparam.h> 9#include <linux/sched.h> 10#include <linux/fs.h> 11#include <linux/init.h> 12#include "null_blk.h" 13 14#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 15#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) 16#define SECTOR_MASK (PAGE_SECTORS - 1) 17 18#define FREE_BATCH 16 19 20#define TICKS_PER_SEC 50ULL 21#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC) 22 23#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 24static DECLARE_FAULT_ATTR(null_timeout_attr); 25static DECLARE_FAULT_ATTR(null_requeue_attr); 26#endif 27 28static inline u64 mb_per_tick(int mbps) 29{ 30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps); 31} 32 33/* 34 * Status flags for nullb_device. 35 * 36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure. 37 * UP: Device is currently on and visible in userspace. 38 * THROTTLED: Device is being throttled. 39 * CACHE: Device is using a write-back cache. 40 */ 41enum nullb_device_flags { 42 NULLB_DEV_FL_CONFIGURED = 0, 43 NULLB_DEV_FL_UP = 1, 44 NULLB_DEV_FL_THROTTLED = 2, 45 NULLB_DEV_FL_CACHE = 3, 46}; 47 48#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) 49/* 50 * nullb_page is a page in memory for nullb devices. 51 * 52 * @page: The page holding the data. 53 * @bitmap: The bitmap represents which sector in the page has data. 54 * Each bit represents one block size. For example, sector 8 55 * will use the 7th bit 56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache 57 * page is being flushing to storage. FREE means the cache page is freed and 58 * should be skipped from flushing to storage. Please see 59 * null_make_cache_space 60 */ 61struct nullb_page { 62 struct page *page; 63 DECLARE_BITMAP(bitmap, MAP_SZ); 64}; 65#define NULLB_PAGE_LOCK (MAP_SZ - 1) 66#define NULLB_PAGE_FREE (MAP_SZ - 2) 67 68static LIST_HEAD(nullb_list); 69static struct mutex lock; 70static int null_major; 71static DEFINE_IDA(nullb_indexes); 72static struct blk_mq_tag_set tag_set; 73 74enum { 75 NULL_IRQ_NONE = 0, 76 NULL_IRQ_SOFTIRQ = 1, 77 NULL_IRQ_TIMER = 2, 78}; 79 80enum { 81 NULL_Q_BIO = 0, 82 NULL_Q_RQ = 1, 83 NULL_Q_MQ = 2, 84}; 85 86static int g_no_sched; 87module_param_named(no_sched, g_no_sched, int, 0444); 88MODULE_PARM_DESC(no_sched, "No io scheduler"); 89 90static int g_submit_queues = 1; 91module_param_named(submit_queues, g_submit_queues, int, 0444); 92MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 93 94static int g_home_node = NUMA_NO_NODE; 95module_param_named(home_node, g_home_node, int, 0444); 96MODULE_PARM_DESC(home_node, "Home node for the device"); 97 98#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 99static char g_timeout_str[80]; 100module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444); 101 102static char g_requeue_str[80]; 103module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444); 104#endif 105 106static int g_queue_mode = NULL_Q_MQ; 107 108static int null_param_store_val(const char *str, int *val, int min, int max) 109{ 110 int ret, new_val; 111 112 ret = kstrtoint(str, 10, &new_val); 113 if (ret) 114 return -EINVAL; 115 116 if (new_val < min || new_val > max) 117 return -EINVAL; 118 119 *val = new_val; 120 return 0; 121} 122 123static int null_set_queue_mode(const char *str, const struct kernel_param *kp) 124{ 125 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ); 126} 127 128static const struct kernel_param_ops null_queue_mode_param_ops = { 129 .set = null_set_queue_mode, 130 .get = param_get_int, 131}; 132 133device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444); 134MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); 135 136static int g_gb = 250; 137module_param_named(gb, g_gb, int, 0444); 138MODULE_PARM_DESC(gb, "Size in GB"); 139 140static int g_bs = 512; 141module_param_named(bs, g_bs, int, 0444); 142MODULE_PARM_DESC(bs, "Block size (in bytes)"); 143 144static int nr_devices = 1; 145module_param(nr_devices, int, 0444); 146MODULE_PARM_DESC(nr_devices, "Number of devices to register"); 147 148static bool g_blocking; 149module_param_named(blocking, g_blocking, bool, 0444); 150MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); 151 152static bool shared_tags; 153module_param(shared_tags, bool, 0444); 154MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); 155 156static int g_irqmode = NULL_IRQ_SOFTIRQ; 157 158static int null_set_irqmode(const char *str, const struct kernel_param *kp) 159{ 160 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE, 161 NULL_IRQ_TIMER); 162} 163 164static const struct kernel_param_ops null_irqmode_param_ops = { 165 .set = null_set_irqmode, 166 .get = param_get_int, 167}; 168 169device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444); 170MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); 171 172static unsigned long g_completion_nsec = 10000; 173module_param_named(completion_nsec, g_completion_nsec, ulong, 0444); 174MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); 175 176static int g_hw_queue_depth = 64; 177module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444); 178MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 179 180static bool g_use_per_node_hctx; 181module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444); 182MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); 183 184static bool g_zoned; 185module_param_named(zoned, g_zoned, bool, S_IRUGO); 186MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false"); 187 188static unsigned long g_zone_size = 256; 189module_param_named(zone_size, g_zone_size, ulong, S_IRUGO); 190MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256"); 191 192static unsigned int g_zone_nr_conv; 193module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444); 194MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0"); 195 196static struct nullb_device *null_alloc_dev(void); 197static void null_free_dev(struct nullb_device *dev); 198static void null_del_dev(struct nullb *nullb); 199static int null_add_dev(struct nullb_device *dev); 200static void null_free_device_storage(struct nullb_device *dev, bool is_cache); 201 202static inline struct nullb_device *to_nullb_device(struct config_item *item) 203{ 204 return item ? container_of(item, struct nullb_device, item) : NULL; 205} 206 207static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page) 208{ 209 return snprintf(page, PAGE_SIZE, "%u\n", val); 210} 211 212static inline ssize_t nullb_device_ulong_attr_show(unsigned long val, 213 char *page) 214{ 215 return snprintf(page, PAGE_SIZE, "%lu\n", val); 216} 217 218static inline ssize_t nullb_device_bool_attr_show(bool val, char *page) 219{ 220 return snprintf(page, PAGE_SIZE, "%u\n", val); 221} 222 223static ssize_t nullb_device_uint_attr_store(unsigned int *val, 224 const char *page, size_t count) 225{ 226 unsigned int tmp; 227 int result; 228 229 result = kstrtouint(page, 0, &tmp); 230 if (result) 231 return result; 232 233 *val = tmp; 234 return count; 235} 236 237static ssize_t nullb_device_ulong_attr_store(unsigned long *val, 238 const char *page, size_t count) 239{ 240 int result; 241 unsigned long tmp; 242 243 result = kstrtoul(page, 0, &tmp); 244 if (result) 245 return result; 246 247 *val = tmp; 248 return count; 249} 250 251static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, 252 size_t count) 253{ 254 bool tmp; 255 int result; 256 257 result = kstrtobool(page, &tmp); 258 if (result) 259 return result; 260 261 *val = tmp; 262 return count; 263} 264 265/* The following macro should only be used with TYPE = {uint, ulong, bool}. */ 266#define NULLB_DEVICE_ATTR(NAME, TYPE) \ 267static ssize_t \ 268nullb_device_##NAME##_show(struct config_item *item, char *page) \ 269{ \ 270 return nullb_device_##TYPE##_attr_show( \ 271 to_nullb_device(item)->NAME, page); \ 272} \ 273static ssize_t \ 274nullb_device_##NAME##_store(struct config_item *item, const char *page, \ 275 size_t count) \ 276{ \ 277 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \ 278 return -EBUSY; \ 279 return nullb_device_##TYPE##_attr_store( \ 280 &to_nullb_device(item)->NAME, page, count); \ 281} \ 282CONFIGFS_ATTR(nullb_device_, NAME); 283 284NULLB_DEVICE_ATTR(size, ulong); 285NULLB_DEVICE_ATTR(completion_nsec, ulong); 286NULLB_DEVICE_ATTR(submit_queues, uint); 287NULLB_DEVICE_ATTR(home_node, uint); 288NULLB_DEVICE_ATTR(queue_mode, uint); 289NULLB_DEVICE_ATTR(blocksize, uint); 290NULLB_DEVICE_ATTR(irqmode, uint); 291NULLB_DEVICE_ATTR(hw_queue_depth, uint); 292NULLB_DEVICE_ATTR(index, uint); 293NULLB_DEVICE_ATTR(blocking, bool); 294NULLB_DEVICE_ATTR(use_per_node_hctx, bool); 295NULLB_DEVICE_ATTR(memory_backed, bool); 296NULLB_DEVICE_ATTR(discard, bool); 297NULLB_DEVICE_ATTR(mbps, uint); 298NULLB_DEVICE_ATTR(cache_size, ulong); 299NULLB_DEVICE_ATTR(zoned, bool); 300NULLB_DEVICE_ATTR(zone_size, ulong); 301NULLB_DEVICE_ATTR(zone_nr_conv, uint); 302 303static ssize_t nullb_device_power_show(struct config_item *item, char *page) 304{ 305 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page); 306} 307 308static ssize_t nullb_device_power_store(struct config_item *item, 309 const char *page, size_t count) 310{ 311 struct nullb_device *dev = to_nullb_device(item); 312 bool newp = false; 313 ssize_t ret; 314 315 ret = nullb_device_bool_attr_store(&newp, page, count); 316 if (ret < 0) 317 return ret; 318 319 if (!dev->power && newp) { 320 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) 321 return count; 322 if (null_add_dev(dev)) { 323 clear_bit(NULLB_DEV_FL_UP, &dev->flags); 324 return -ENOMEM; 325 } 326 327 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); 328 dev->power = newp; 329 } else if (dev->power && !newp) { 330 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { 331 mutex_lock(&lock); 332 dev->power = newp; 333 null_del_dev(dev->nullb); 334 mutex_unlock(&lock); 335 } 336 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); 337 } 338 339 return count; 340} 341 342CONFIGFS_ATTR(nullb_device_, power); 343 344static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page) 345{ 346 struct nullb_device *t_dev = to_nullb_device(item); 347 348 return badblocks_show(&t_dev->badblocks, page, 0); 349} 350 351static ssize_t nullb_device_badblocks_store(struct config_item *item, 352 const char *page, size_t count) 353{ 354 struct nullb_device *t_dev = to_nullb_device(item); 355 char *orig, *buf, *tmp; 356 u64 start, end; 357 int ret; 358 359 orig = kstrndup(page, count, GFP_KERNEL); 360 if (!orig) 361 return -ENOMEM; 362 363 buf = strstrip(orig); 364 365 ret = -EINVAL; 366 if (buf[0] != '+' && buf[0] != '-') 367 goto out; 368 tmp = strchr(&buf[1], '-'); 369 if (!tmp) 370 goto out; 371 *tmp = '\0'; 372 ret = kstrtoull(buf + 1, 0, &start); 373 if (ret) 374 goto out; 375 ret = kstrtoull(tmp + 1, 0, &end); 376 if (ret) 377 goto out; 378 ret = -EINVAL; 379 if (start > end) 380 goto out; 381 /* enable badblocks */ 382 cmpxchg(&t_dev->badblocks.shift, -1, 0); 383 if (buf[0] == '+') 384 ret = badblocks_set(&t_dev->badblocks, start, 385 end - start + 1, 1); 386 else 387 ret = badblocks_clear(&t_dev->badblocks, start, 388 end - start + 1); 389 if (ret == 0) 390 ret = count; 391out: 392 kfree(orig); 393 return ret; 394} 395CONFIGFS_ATTR(nullb_device_, badblocks); 396 397static struct configfs_attribute *nullb_device_attrs[] = { 398 &nullb_device_attr_size, 399 &nullb_device_attr_completion_nsec, 400 &nullb_device_attr_submit_queues, 401 &nullb_device_attr_home_node, 402 &nullb_device_attr_queue_mode, 403 &nullb_device_attr_blocksize, 404 &nullb_device_attr_irqmode, 405 &nullb_device_attr_hw_queue_depth, 406 &nullb_device_attr_index, 407 &nullb_device_attr_blocking, 408 &nullb_device_attr_use_per_node_hctx, 409 &nullb_device_attr_power, 410 &nullb_device_attr_memory_backed, 411 &nullb_device_attr_discard, 412 &nullb_device_attr_mbps, 413 &nullb_device_attr_cache_size, 414 &nullb_device_attr_badblocks, 415 &nullb_device_attr_zoned, 416 &nullb_device_attr_zone_size, 417 &nullb_device_attr_zone_nr_conv, 418 NULL, 419}; 420 421static void nullb_device_release(struct config_item *item) 422{ 423 struct nullb_device *dev = to_nullb_device(item); 424 425 null_free_device_storage(dev, false); 426 null_free_dev(dev); 427} 428 429static struct configfs_item_operations nullb_device_ops = { 430 .release = nullb_device_release, 431}; 432 433static const struct config_item_type nullb_device_type = { 434 .ct_item_ops = &nullb_device_ops, 435 .ct_attrs = nullb_device_attrs, 436 .ct_owner = THIS_MODULE, 437}; 438 439static struct 440config_item *nullb_group_make_item(struct config_group *group, const char *name) 441{ 442 struct nullb_device *dev; 443 444 dev = null_alloc_dev(); 445 if (!dev) 446 return ERR_PTR(-ENOMEM); 447 448 config_item_init_type_name(&dev->item, name, &nullb_device_type); 449 450 return &dev->item; 451} 452 453static void 454nullb_group_drop_item(struct config_group *group, struct config_item *item) 455{ 456 struct nullb_device *dev = to_nullb_device(item); 457 458 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { 459 mutex_lock(&lock); 460 dev->power = false; 461 null_del_dev(dev->nullb); 462 mutex_unlock(&lock); 463 } 464 465 config_item_put(item); 466} 467 468static ssize_t memb_group_features_show(struct config_item *item, char *page) 469{ 470 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n"); 471} 472 473CONFIGFS_ATTR_RO(memb_group_, features); 474 475static struct configfs_attribute *nullb_group_attrs[] = { 476 &memb_group_attr_features, 477 NULL, 478}; 479 480static struct configfs_group_operations nullb_group_ops = { 481 .make_item = nullb_group_make_item, 482 .drop_item = nullb_group_drop_item, 483}; 484 485static const struct config_item_type nullb_group_type = { 486 .ct_group_ops = &nullb_group_ops, 487 .ct_attrs = nullb_group_attrs, 488 .ct_owner = THIS_MODULE, 489}; 490 491static struct configfs_subsystem nullb_subsys = { 492 .su_group = { 493 .cg_item = { 494 .ci_namebuf = "nullb", 495 .ci_type = &nullb_group_type, 496 }, 497 }, 498}; 499 500static inline int null_cache_active(struct nullb *nullb) 501{ 502 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); 503} 504 505static struct nullb_device *null_alloc_dev(void) 506{ 507 struct nullb_device *dev; 508 509 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 510 if (!dev) 511 return NULL; 512 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC); 513 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); 514 if (badblocks_init(&dev->badblocks, 0)) { 515 kfree(dev); 516 return NULL; 517 } 518 519 dev->size = g_gb * 1024; 520 dev->completion_nsec = g_completion_nsec; 521 dev->submit_queues = g_submit_queues; 522 dev->home_node = g_home_node; 523 dev->queue_mode = g_queue_mode; 524 dev->blocksize = g_bs; 525 dev->irqmode = g_irqmode; 526 dev->hw_queue_depth = g_hw_queue_depth; 527 dev->blocking = g_blocking; 528 dev->use_per_node_hctx = g_use_per_node_hctx; 529 dev->zoned = g_zoned; 530 dev->zone_size = g_zone_size; 531 dev->zone_nr_conv = g_zone_nr_conv; 532 return dev; 533} 534 535static void null_free_dev(struct nullb_device *dev) 536{ 537 if (!dev) 538 return; 539 540 null_zone_exit(dev); 541 badblocks_exit(&dev->badblocks); 542 kfree(dev); 543} 544 545static void put_tag(struct nullb_queue *nq, unsigned int tag) 546{ 547 clear_bit_unlock(tag, nq->tag_map); 548 549 if (waitqueue_active(&nq->wait)) 550 wake_up(&nq->wait); 551} 552 553static unsigned int get_tag(struct nullb_queue *nq) 554{ 555 unsigned int tag; 556 557 do { 558 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); 559 if (tag >= nq->queue_depth) 560 return -1U; 561 } while (test_and_set_bit_lock(tag, nq->tag_map)); 562 563 return tag; 564} 565 566static void free_cmd(struct nullb_cmd *cmd) 567{ 568 put_tag(cmd->nq, cmd->tag); 569} 570 571static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); 572 573static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) 574{ 575 struct nullb_cmd *cmd; 576 unsigned int tag; 577 578 tag = get_tag(nq); 579 if (tag != -1U) { 580 cmd = &nq->cmds[tag]; 581 cmd->tag = tag; 582 cmd->nq = nq; 583 if (nq->dev->irqmode == NULL_IRQ_TIMER) { 584 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, 585 HRTIMER_MODE_REL); 586 cmd->timer.function = null_cmd_timer_expired; 587 } 588 return cmd; 589 } 590 591 return NULL; 592} 593 594static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) 595{ 596 struct nullb_cmd *cmd; 597 DEFINE_WAIT(wait); 598 599 cmd = __alloc_cmd(nq); 600 if (cmd || !can_wait) 601 return cmd; 602 603 do { 604 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); 605 cmd = __alloc_cmd(nq); 606 if (cmd) 607 break; 608 609 io_schedule(); 610 } while (1); 611 612 finish_wait(&nq->wait, &wait); 613 return cmd; 614} 615 616static void end_cmd(struct nullb_cmd *cmd) 617{ 618 int queue_mode = cmd->nq->dev->queue_mode; 619 620 switch (queue_mode) { 621 case NULL_Q_MQ: 622 blk_mq_end_request(cmd->rq, cmd->error); 623 return; 624 case NULL_Q_BIO: 625 cmd->bio->bi_status = cmd->error; 626 bio_endio(cmd->bio); 627 break; 628 } 629 630 free_cmd(cmd); 631} 632 633static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) 634{ 635 end_cmd(container_of(timer, struct nullb_cmd, timer)); 636 637 return HRTIMER_NORESTART; 638} 639 640static void null_cmd_end_timer(struct nullb_cmd *cmd) 641{ 642 ktime_t kt = cmd->nq->dev->completion_nsec; 643 644 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); 645} 646 647static void null_complete_rq(struct request *rq) 648{ 649 end_cmd(blk_mq_rq_to_pdu(rq)); 650} 651 652static struct nullb_page *null_alloc_page(gfp_t gfp_flags) 653{ 654 struct nullb_page *t_page; 655 656 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags); 657 if (!t_page) 658 goto out; 659 660 t_page->page = alloc_pages(gfp_flags, 0); 661 if (!t_page->page) 662 goto out_freepage; 663 664 memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); 665 return t_page; 666out_freepage: 667 kfree(t_page); 668out: 669 return NULL; 670} 671 672static void null_free_page(struct nullb_page *t_page) 673{ 674 __set_bit(NULLB_PAGE_FREE, t_page->bitmap); 675 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) 676 return; 677 __free_page(t_page->page); 678 kfree(t_page); 679} 680 681static bool null_page_empty(struct nullb_page *page) 682{ 683 int size = MAP_SZ - 2; 684 685 return find_first_bit(page->bitmap, size) == size; 686} 687 688static void null_free_sector(struct nullb *nullb, sector_t sector, 689 bool is_cache) 690{ 691 unsigned int sector_bit; 692 u64 idx; 693 struct nullb_page *t_page, *ret; 694 struct radix_tree_root *root; 695 696 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; 697 idx = sector >> PAGE_SECTORS_SHIFT; 698 sector_bit = (sector & SECTOR_MASK); 699 700 t_page = radix_tree_lookup(root, idx); 701 if (t_page) { 702 __clear_bit(sector_bit, t_page->bitmap); 703 704 if (null_page_empty(t_page)) { 705 ret = radix_tree_delete_item(root, idx, t_page); 706 WARN_ON(ret != t_page); 707 null_free_page(ret); 708 if (is_cache) 709 nullb->dev->curr_cache -= PAGE_SIZE; 710 } 711 } 712} 713 714static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, 715 struct nullb_page *t_page, bool is_cache) 716{ 717 struct radix_tree_root *root; 718 719 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; 720 721 if (radix_tree_insert(root, idx, t_page)) { 722 null_free_page(t_page); 723 t_page = radix_tree_lookup(root, idx); 724 WARN_ON(!t_page || t_page->page->index != idx); 725 } else if (is_cache) 726 nullb->dev->curr_cache += PAGE_SIZE; 727 728 return t_page; 729} 730 731static void null_free_device_storage(struct nullb_device *dev, bool is_cache) 732{ 733 unsigned long pos = 0; 734 int nr_pages; 735 struct nullb_page *ret, *t_pages[FREE_BATCH]; 736 struct radix_tree_root *root; 737 738 root = is_cache ? &dev->cache : &dev->data; 739 740 do { 741 int i; 742 743 nr_pages = radix_tree_gang_lookup(root, 744 (void **)t_pages, pos, FREE_BATCH); 745 746 for (i = 0; i < nr_pages; i++) { 747 pos = t_pages[i]->page->index; 748 ret = radix_tree_delete_item(root, pos, t_pages[i]); 749 WARN_ON(ret != t_pages[i]); 750 null_free_page(ret); 751 } 752 753 pos++; 754 } while (nr_pages == FREE_BATCH); 755 756 if (is_cache) 757 dev->curr_cache = 0; 758} 759 760static struct nullb_page *__null_lookup_page(struct nullb *nullb, 761 sector_t sector, bool for_write, bool is_cache) 762{ 763 unsigned int sector_bit; 764 u64 idx; 765 struct nullb_page *t_page; 766 struct radix_tree_root *root; 767 768 idx = sector >> PAGE_SECTORS_SHIFT; 769 sector_bit = (sector & SECTOR_MASK); 770 771 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; 772 t_page = radix_tree_lookup(root, idx); 773 WARN_ON(t_page && t_page->page->index != idx); 774 775 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) 776 return t_page; 777 778 return NULL; 779} 780 781static struct nullb_page *null_lookup_page(struct nullb *nullb, 782 sector_t sector, bool for_write, bool ignore_cache) 783{ 784 struct nullb_page *page = NULL; 785 786 if (!ignore_cache) 787 page = __null_lookup_page(nullb, sector, for_write, true); 788 if (page) 789 return page; 790 return __null_lookup_page(nullb, sector, for_write, false); 791} 792 793static struct nullb_page *null_insert_page(struct nullb *nullb, 794 sector_t sector, bool ignore_cache) 795 __releases(&nullb->lock) 796 __acquires(&nullb->lock) 797{ 798 u64 idx; 799 struct nullb_page *t_page; 800 801 t_page = null_lookup_page(nullb, sector, true, ignore_cache); 802 if (t_page) 803 return t_page; 804 805 spin_unlock_irq(&nullb->lock); 806 807 t_page = null_alloc_page(GFP_NOIO); 808 if (!t_page) 809 goto out_lock; 810 811 if (radix_tree_preload(GFP_NOIO)) 812 goto out_freepage; 813 814 spin_lock_irq(&nullb->lock); 815 idx = sector >> PAGE_SECTORS_SHIFT; 816 t_page->page->index = idx; 817 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); 818 radix_tree_preload_end(); 819 820 return t_page; 821out_freepage: 822 null_free_page(t_page); 823out_lock: 824 spin_lock_irq(&nullb->lock); 825 return null_lookup_page(nullb, sector, true, ignore_cache); 826} 827 828static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) 829{ 830 int i; 831 unsigned int offset; 832 u64 idx; 833 struct nullb_page *t_page, *ret; 834 void *dst, *src; 835 836 idx = c_page->page->index; 837 838 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); 839 840 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); 841 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { 842 null_free_page(c_page); 843 if (t_page && null_page_empty(t_page)) { 844 ret = radix_tree_delete_item(&nullb->dev->data, 845 idx, t_page); 846 null_free_page(t_page); 847 } 848 return 0; 849 } 850 851 if (!t_page) 852 return -ENOMEM; 853 854 src = kmap_atomic(c_page->page); 855 dst = kmap_atomic(t_page->page); 856 857 for (i = 0; i < PAGE_SECTORS; 858 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { 859 if (test_bit(i, c_page->bitmap)) { 860 offset = (i << SECTOR_SHIFT); 861 memcpy(dst + offset, src + offset, 862 nullb->dev->blocksize); 863 __set_bit(i, t_page->bitmap); 864 } 865 } 866 867 kunmap_atomic(dst); 868 kunmap_atomic(src); 869 870 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); 871 null_free_page(ret); 872 nullb->dev->curr_cache -= PAGE_SIZE; 873 874 return 0; 875} 876 877static int null_make_cache_space(struct nullb *nullb, unsigned long n) 878{ 879 int i, err, nr_pages; 880 struct nullb_page *c_pages[FREE_BATCH]; 881 unsigned long flushed = 0, one_round; 882 883again: 884 if ((nullb->dev->cache_size * 1024 * 1024) > 885 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) 886 return 0; 887 888 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, 889 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); 890 /* 891 * nullb_flush_cache_page could unlock before using the c_pages. To 892 * avoid race, we don't allow page free 893 */ 894 for (i = 0; i < nr_pages; i++) { 895 nullb->cache_flush_pos = c_pages[i]->page->index; 896 /* 897 * We found the page which is being flushed to disk by other 898 * threads 899 */ 900 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) 901 c_pages[i] = NULL; 902 else 903 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); 904 } 905 906 one_round = 0; 907 for (i = 0; i < nr_pages; i++) { 908 if (c_pages[i] == NULL) 909 continue; 910 err = null_flush_cache_page(nullb, c_pages[i]); 911 if (err) 912 return err; 913 one_round++; 914 } 915 flushed += one_round << PAGE_SHIFT; 916 917 if (n > flushed) { 918 if (nr_pages == 0) 919 nullb->cache_flush_pos = 0; 920 if (one_round == 0) { 921 /* give other threads a chance */ 922 spin_unlock_irq(&nullb->lock); 923 spin_lock_irq(&nullb->lock); 924 } 925 goto again; 926 } 927 return 0; 928} 929 930static int copy_to_nullb(struct nullb *nullb, struct page *source, 931 unsigned int off, sector_t sector, size_t n, bool is_fua) 932{ 933 size_t temp, count = 0; 934 unsigned int offset; 935 struct nullb_page *t_page; 936 void *dst, *src; 937 938 while (count < n) { 939 temp = min_t(size_t, nullb->dev->blocksize, n - count); 940 941 if (null_cache_active(nullb) && !is_fua) 942 null_make_cache_space(nullb, PAGE_SIZE); 943 944 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; 945 t_page = null_insert_page(nullb, sector, 946 !null_cache_active(nullb) || is_fua); 947 if (!t_page) 948 return -ENOSPC; 949 950 src = kmap_atomic(source); 951 dst = kmap_atomic(t_page->page); 952 memcpy(dst + offset, src + off + count, temp); 953 kunmap_atomic(dst); 954 kunmap_atomic(src); 955 956 __set_bit(sector & SECTOR_MASK, t_page->bitmap); 957 958 if (is_fua) 959 null_free_sector(nullb, sector, true); 960 961 count += temp; 962 sector += temp >> SECTOR_SHIFT; 963 } 964 return 0; 965} 966 967static int copy_from_nullb(struct nullb *nullb, struct page *dest, 968 unsigned int off, sector_t sector, size_t n) 969{ 970 size_t temp, count = 0; 971 unsigned int offset; 972 struct nullb_page *t_page; 973 void *dst, *src; 974 975 while (count < n) { 976 temp = min_t(size_t, nullb->dev->blocksize, n - count); 977 978 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; 979 t_page = null_lookup_page(nullb, sector, false, 980 !null_cache_active(nullb)); 981 982 dst = kmap_atomic(dest); 983 if (!t_page) { 984 memset(dst + off + count, 0, temp); 985 goto next; 986 } 987 src = kmap_atomic(t_page->page); 988 memcpy(dst + off + count, src + offset, temp); 989 kunmap_atomic(src); 990next: 991 kunmap_atomic(dst); 992 993 count += temp; 994 sector += temp >> SECTOR_SHIFT; 995 } 996 return 0; 997} 998 999static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n) 1000{ 1001 size_t temp; 1002 1003 spin_lock_irq(&nullb->lock); 1004 while (n > 0) { 1005 temp = min_t(size_t, n, nullb->dev->blocksize); 1006 null_free_sector(nullb, sector, false); 1007 if (null_cache_active(nullb)) 1008 null_free_sector(nullb, sector, true); 1009 sector += temp >> SECTOR_SHIFT; 1010 n -= temp; 1011 } 1012 spin_unlock_irq(&nullb->lock); 1013} 1014 1015static int null_handle_flush(struct nullb *nullb) 1016{ 1017 int err; 1018 1019 if (!null_cache_active(nullb)) 1020 return 0; 1021 1022 spin_lock_irq(&nullb->lock); 1023 while (true) { 1024 err = null_make_cache_space(nullb, 1025 nullb->dev->cache_size * 1024 * 1024); 1026 if (err || nullb->dev->curr_cache == 0) 1027 break; 1028 } 1029 1030 WARN_ON(!radix_tree_empty(&nullb->dev->cache)); 1031 spin_unlock_irq(&nullb->lock); 1032 return err; 1033} 1034 1035static int null_transfer(struct nullb *nullb, struct page *page, 1036 unsigned int len, unsigned int off, bool is_write, sector_t sector, 1037 bool is_fua) 1038{ 1039 int err = 0; 1040 1041 if (!is_write) { 1042 err = copy_from_nullb(nullb, page, off, sector, len); 1043 flush_dcache_page(page); 1044 } else { 1045 flush_dcache_page(page); 1046 err = copy_to_nullb(nullb, page, off, sector, len, is_fua); 1047 } 1048 1049 return err; 1050} 1051 1052static int null_handle_rq(struct nullb_cmd *cmd) 1053{ 1054 struct request *rq = cmd->rq; 1055 struct nullb *nullb = cmd->nq->dev->nullb; 1056 int err; 1057 unsigned int len; 1058 sector_t sector; 1059 struct req_iterator iter; 1060 struct bio_vec bvec; 1061 1062 sector = blk_rq_pos(rq); 1063 1064 if (req_op(rq) == REQ_OP_DISCARD) { 1065 null_handle_discard(nullb, sector, blk_rq_bytes(rq)); 1066 return 0; 1067 } 1068 1069 spin_lock_irq(&nullb->lock); 1070 rq_for_each_segment(bvec, rq, iter) { 1071 len = bvec.bv_len; 1072 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, 1073 op_is_write(req_op(rq)), sector, 1074 req_op(rq) & REQ_FUA); 1075 if (err) { 1076 spin_unlock_irq(&nullb->lock); 1077 return err; 1078 } 1079 sector += len >> SECTOR_SHIFT; 1080 } 1081 spin_unlock_irq(&nullb->lock); 1082 1083 return 0; 1084} 1085 1086static int null_handle_bio(struct nullb_cmd *cmd) 1087{ 1088 struct bio *bio = cmd->bio; 1089 struct nullb *nullb = cmd->nq->dev->nullb; 1090 int err; 1091 unsigned int len; 1092 sector_t sector; 1093 struct bio_vec bvec; 1094 struct bvec_iter iter; 1095 1096 sector = bio->bi_iter.bi_sector; 1097 1098 if (bio_op(bio) == REQ_OP_DISCARD) { 1099 null_handle_discard(nullb, sector, 1100 bio_sectors(bio) << SECTOR_SHIFT); 1101 return 0; 1102 } 1103 1104 spin_lock_irq(&nullb->lock); 1105 bio_for_each_segment(bvec, bio, iter) { 1106 len = bvec.bv_len; 1107 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, 1108 op_is_write(bio_op(bio)), sector, 1109 bio->bi_opf & REQ_FUA); 1110 if (err) { 1111 spin_unlock_irq(&nullb->lock); 1112 return err; 1113 } 1114 sector += len >> SECTOR_SHIFT; 1115 } 1116 spin_unlock_irq(&nullb->lock); 1117 return 0; 1118} 1119 1120static void null_stop_queue(struct nullb *nullb) 1121{ 1122 struct request_queue *q = nullb->q; 1123 1124 if (nullb->dev->queue_mode == NULL_Q_MQ) 1125 blk_mq_stop_hw_queues(q); 1126} 1127 1128static void null_restart_queue_async(struct nullb *nullb) 1129{ 1130 struct request_queue *q = nullb->q; 1131 1132 if (nullb->dev->queue_mode == NULL_Q_MQ) 1133 blk_mq_start_stopped_hw_queues(q, true); 1134} 1135 1136static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) 1137{ 1138 struct nullb_device *dev = cmd->nq->dev; 1139 struct nullb *nullb = dev->nullb; 1140 int err = 0; 1141 1142 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { 1143 struct request *rq = cmd->rq; 1144 1145 if (!hrtimer_active(&nullb->bw_timer)) 1146 hrtimer_restart(&nullb->bw_timer); 1147 1148 if (atomic_long_sub_return(blk_rq_bytes(rq), 1149 &nullb->cur_bytes) < 0) { 1150 null_stop_queue(nullb); 1151 /* race with timer */ 1152 if (atomic_long_read(&nullb->cur_bytes) > 0) 1153 null_restart_queue_async(nullb); 1154 /* requeue request */ 1155 return BLK_STS_DEV_RESOURCE; 1156 } 1157 } 1158 1159 if (nullb->dev->badblocks.shift != -1) { 1160 int bad_sectors; 1161 sector_t sector, size, first_bad; 1162 bool is_flush = true; 1163 1164 if (dev->queue_mode == NULL_Q_BIO && 1165 bio_op(cmd->bio) != REQ_OP_FLUSH) { 1166 is_flush = false; 1167 sector = cmd->bio->bi_iter.bi_sector; 1168 size = bio_sectors(cmd->bio); 1169 } 1170 if (dev->queue_mode != NULL_Q_BIO && 1171 req_op(cmd->rq) != REQ_OP_FLUSH) { 1172 is_flush = false; 1173 sector = blk_rq_pos(cmd->rq); 1174 size = blk_rq_sectors(cmd->rq); 1175 } 1176 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector, 1177 size, &first_bad, &bad_sectors)) { 1178 cmd->error = BLK_STS_IOERR; 1179 goto out; 1180 } 1181 } 1182 1183 if (dev->memory_backed) { 1184 if (dev->queue_mode == NULL_Q_BIO) { 1185 if (bio_op(cmd->bio) == REQ_OP_FLUSH) 1186 err = null_handle_flush(nullb); 1187 else 1188 err = null_handle_bio(cmd); 1189 } else { 1190 if (req_op(cmd->rq) == REQ_OP_FLUSH) 1191 err = null_handle_flush(nullb); 1192 else 1193 err = null_handle_rq(cmd); 1194 } 1195 } 1196 cmd->error = errno_to_blk_status(err); 1197 1198 if (!cmd->error && dev->zoned) { 1199 sector_t sector; 1200 unsigned int nr_sectors; 1201 enum req_opf op; 1202 1203 if (dev->queue_mode == NULL_Q_BIO) { 1204 op = bio_op(cmd->bio); 1205 sector = cmd->bio->bi_iter.bi_sector; 1206 nr_sectors = cmd->bio->bi_iter.bi_size >> 9; 1207 } else { 1208 op = req_op(cmd->rq); 1209 sector = blk_rq_pos(cmd->rq); 1210 nr_sectors = blk_rq_sectors(cmd->rq); 1211 } 1212 1213 if (op == REQ_OP_WRITE) 1214 null_zone_write(cmd, sector, nr_sectors); 1215 else if (op == REQ_OP_ZONE_RESET) 1216 null_zone_reset(cmd, sector); 1217 } 1218out: 1219 /* Complete IO by inline, softirq or timer */ 1220 switch (dev->irqmode) { 1221 case NULL_IRQ_SOFTIRQ: 1222 switch (dev->queue_mode) { 1223 case NULL_Q_MQ: 1224 blk_mq_complete_request(cmd->rq); 1225 break; 1226 case NULL_Q_BIO: 1227 /* 1228 * XXX: no proper submitting cpu information available. 1229 */ 1230 end_cmd(cmd); 1231 break; 1232 } 1233 break; 1234 case NULL_IRQ_NONE: 1235 end_cmd(cmd); 1236 break; 1237 case NULL_IRQ_TIMER: 1238 null_cmd_end_timer(cmd); 1239 break; 1240 } 1241 return BLK_STS_OK; 1242} 1243 1244static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) 1245{ 1246 struct nullb *nullb = container_of(timer, struct nullb, bw_timer); 1247 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); 1248 unsigned int mbps = nullb->dev->mbps; 1249 1250 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) 1251 return HRTIMER_NORESTART; 1252 1253 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); 1254 null_restart_queue_async(nullb); 1255 1256 hrtimer_forward_now(&nullb->bw_timer, timer_interval); 1257 1258 return HRTIMER_RESTART; 1259} 1260 1261static void nullb_setup_bwtimer(struct nullb *nullb) 1262{ 1263 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); 1264 1265 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1266 nullb->bw_timer.function = nullb_bwtimer_fn; 1267 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); 1268 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); 1269} 1270 1271static struct nullb_queue *nullb_to_queue(struct nullb *nullb) 1272{ 1273 int index = 0; 1274 1275 if (nullb->nr_queues != 1) 1276 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); 1277 1278 return &nullb->queues[index]; 1279} 1280 1281static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) 1282{ 1283 struct nullb *nullb = q->queuedata; 1284 struct nullb_queue *nq = nullb_to_queue(nullb); 1285 struct nullb_cmd *cmd; 1286 1287 cmd = alloc_cmd(nq, 1); 1288 cmd->bio = bio; 1289 1290 null_handle_cmd(cmd); 1291 return BLK_QC_T_NONE; 1292} 1293 1294static bool should_timeout_request(struct request *rq) 1295{ 1296#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 1297 if (g_timeout_str[0]) 1298 return should_fail(&null_timeout_attr, 1); 1299#endif 1300 return false; 1301} 1302 1303static bool should_requeue_request(struct request *rq) 1304{ 1305#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 1306 if (g_requeue_str[0]) 1307 return should_fail(&null_requeue_attr, 1); 1308#endif 1309 return false; 1310} 1311 1312static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) 1313{ 1314 pr_info("null: rq %p timed out\n", rq); 1315 blk_mq_complete_request(rq); 1316 return BLK_EH_DONE; 1317} 1318 1319static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, 1320 const struct blk_mq_queue_data *bd) 1321{ 1322 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1323 struct nullb_queue *nq = hctx->driver_data; 1324 1325 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1326 1327 if (nq->dev->irqmode == NULL_IRQ_TIMER) { 1328 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1329 cmd->timer.function = null_cmd_timer_expired; 1330 } 1331 cmd->rq = bd->rq; 1332 cmd->nq = nq; 1333 1334 blk_mq_start_request(bd->rq); 1335 1336 if (should_requeue_request(bd->rq)) { 1337 /* 1338 * Alternate between hitting the core BUSY path, and the 1339 * driver driven requeue path 1340 */ 1341 nq->requeue_selection++; 1342 if (nq->requeue_selection & 1) 1343 return BLK_STS_RESOURCE; 1344 else { 1345 blk_mq_requeue_request(bd->rq, true); 1346 return BLK_STS_OK; 1347 } 1348 } 1349 if (should_timeout_request(bd->rq)) 1350 return BLK_STS_OK; 1351 1352 return null_handle_cmd(cmd); 1353} 1354 1355static const struct blk_mq_ops null_mq_ops = { 1356 .queue_rq = null_queue_rq, 1357 .complete = null_complete_rq, 1358 .timeout = null_timeout_rq, 1359}; 1360 1361static void cleanup_queue(struct nullb_queue *nq) 1362{ 1363 kfree(nq->tag_map); 1364 kfree(nq->cmds); 1365} 1366 1367static void cleanup_queues(struct nullb *nullb) 1368{ 1369 int i; 1370 1371 for (i = 0; i < nullb->nr_queues; i++) 1372 cleanup_queue(&nullb->queues[i]); 1373 1374 kfree(nullb->queues); 1375} 1376 1377static void null_del_dev(struct nullb *nullb) 1378{ 1379 struct nullb_device *dev = nullb->dev; 1380 1381 ida_simple_remove(&nullb_indexes, nullb->index); 1382 1383 list_del_init(&nullb->list); 1384 1385 del_gendisk(nullb->disk); 1386 1387 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { 1388 hrtimer_cancel(&nullb->bw_timer); 1389 atomic_long_set(&nullb->cur_bytes, LONG_MAX); 1390 null_restart_queue_async(nullb); 1391 } 1392 1393 blk_cleanup_queue(nullb->q); 1394 if (dev->queue_mode == NULL_Q_MQ && 1395 nullb->tag_set == &nullb->__tag_set) 1396 blk_mq_free_tag_set(nullb->tag_set); 1397 put_disk(nullb->disk); 1398 cleanup_queues(nullb); 1399 if (null_cache_active(nullb)) 1400 null_free_device_storage(nullb->dev, true); 1401 kfree(nullb); 1402 dev->nullb = NULL; 1403} 1404 1405static void null_config_discard(struct nullb *nullb) 1406{ 1407 if (nullb->dev->discard == false) 1408 return; 1409 nullb->q->limits.discard_granularity = nullb->dev->blocksize; 1410 nullb->q->limits.discard_alignment = nullb->dev->blocksize; 1411 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); 1412 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); 1413} 1414 1415static int null_open(struct block_device *bdev, fmode_t mode) 1416{ 1417 return 0; 1418} 1419 1420static void null_release(struct gendisk *disk, fmode_t mode) 1421{ 1422} 1423 1424static const struct block_device_operations null_fops = { 1425 .owner = THIS_MODULE, 1426 .open = null_open, 1427 .release = null_release, 1428 .report_zones = null_zone_report, 1429}; 1430 1431static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) 1432{ 1433 BUG_ON(!nullb); 1434 BUG_ON(!nq); 1435 1436 init_waitqueue_head(&nq->wait); 1437 nq->queue_depth = nullb->queue_depth; 1438 nq->dev = nullb->dev; 1439} 1440 1441static void null_init_queues(struct nullb *nullb) 1442{ 1443 struct request_queue *q = nullb->q; 1444 struct blk_mq_hw_ctx *hctx; 1445 struct nullb_queue *nq; 1446 int i; 1447 1448 queue_for_each_hw_ctx(q, hctx, i) { 1449 if (!hctx->nr_ctx || !hctx->tags) 1450 continue; 1451 nq = &nullb->queues[i]; 1452 hctx->driver_data = nq; 1453 null_init_queue(nullb, nq); 1454 nullb->nr_queues++; 1455 } 1456} 1457 1458static int setup_commands(struct nullb_queue *nq) 1459{ 1460 struct nullb_cmd *cmd; 1461 int i, tag_size; 1462 1463 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL); 1464 if (!nq->cmds) 1465 return -ENOMEM; 1466 1467 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; 1468 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL); 1469 if (!nq->tag_map) { 1470 kfree(nq->cmds); 1471 return -ENOMEM; 1472 } 1473 1474 for (i = 0; i < nq->queue_depth; i++) { 1475 cmd = &nq->cmds[i]; 1476 INIT_LIST_HEAD(&cmd->list); 1477 cmd->ll_list.next = NULL; 1478 cmd->tag = -1U; 1479 } 1480 1481 return 0; 1482} 1483 1484static int setup_queues(struct nullb *nullb) 1485{ 1486 nullb->queues = kcalloc(nullb->dev->submit_queues, 1487 sizeof(struct nullb_queue), 1488 GFP_KERNEL); 1489 if (!nullb->queues) 1490 return -ENOMEM; 1491 1492 nullb->queue_depth = nullb->dev->hw_queue_depth; 1493 1494 return 0; 1495} 1496 1497static int init_driver_queues(struct nullb *nullb) 1498{ 1499 struct nullb_queue *nq; 1500 int i, ret = 0; 1501 1502 for (i = 0; i < nullb->dev->submit_queues; i++) { 1503 nq = &nullb->queues[i]; 1504 1505 null_init_queue(nullb, nq); 1506 1507 ret = setup_commands(nq); 1508 if (ret) 1509 return ret; 1510 nullb->nr_queues++; 1511 } 1512 return 0; 1513} 1514 1515static int null_gendisk_register(struct nullb *nullb) 1516{ 1517 struct gendisk *disk; 1518 sector_t size; 1519 1520 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node); 1521 if (!disk) 1522 return -ENOMEM; 1523 size = (sector_t)nullb->dev->size * 1024 * 1024ULL; 1524 set_capacity(disk, size >> 9); 1525 1526 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; 1527 disk->major = null_major; 1528 disk->first_minor = nullb->index; 1529 disk->fops = &null_fops; 1530 disk->private_data = nullb; 1531 disk->queue = nullb->q; 1532 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 1533 1534 if (nullb->dev->zoned) { 1535 int ret = blk_revalidate_disk_zones(disk); 1536 1537 if (ret != 0) 1538 return ret; 1539 } 1540 1541 add_disk(disk); 1542 return 0; 1543} 1544 1545static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) 1546{ 1547 set->ops = &null_mq_ops; 1548 set->nr_hw_queues = nullb ? nullb->dev->submit_queues : 1549 g_submit_queues; 1550 set->queue_depth = nullb ? nullb->dev->hw_queue_depth : 1551 g_hw_queue_depth; 1552 set->numa_node = nullb ? nullb->dev->home_node : g_home_node; 1553 set->cmd_size = sizeof(struct nullb_cmd); 1554 set->flags = BLK_MQ_F_SHOULD_MERGE; 1555 if (g_no_sched) 1556 set->flags |= BLK_MQ_F_NO_SCHED; 1557 set->driver_data = NULL; 1558 1559 if ((nullb && nullb->dev->blocking) || g_blocking) 1560 set->flags |= BLK_MQ_F_BLOCKING; 1561 1562 return blk_mq_alloc_tag_set(set); 1563} 1564 1565static void null_validate_conf(struct nullb_device *dev) 1566{ 1567 dev->blocksize = round_down(dev->blocksize, 512); 1568 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); 1569 1570 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) { 1571 if (dev->submit_queues != nr_online_nodes) 1572 dev->submit_queues = nr_online_nodes; 1573 } else if (dev->submit_queues > nr_cpu_ids) 1574 dev->submit_queues = nr_cpu_ids; 1575 else if (dev->submit_queues == 0) 1576 dev->submit_queues = 1; 1577 1578 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ); 1579 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); 1580 1581 /* Do memory allocation, so set blocking */ 1582 if (dev->memory_backed) 1583 dev->blocking = true; 1584 else /* cache is meaningless */ 1585 dev->cache_size = 0; 1586 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, 1587 dev->cache_size); 1588 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); 1589 /* can not stop a queue */ 1590 if (dev->queue_mode == NULL_Q_BIO) 1591 dev->mbps = 0; 1592} 1593 1594#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 1595static bool __null_setup_fault(struct fault_attr *attr, char *str) 1596{ 1597 if (!str[0]) 1598 return true; 1599 1600 if (!setup_fault_attr(attr, str)) 1601 return false; 1602 1603 attr->verbose = 0; 1604 return true; 1605} 1606#endif 1607 1608static bool null_setup_fault(void) 1609{ 1610#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 1611 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str)) 1612 return false; 1613 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str)) 1614 return false; 1615#endif 1616 return true; 1617} 1618 1619static int null_add_dev(struct nullb_device *dev) 1620{ 1621 struct nullb *nullb; 1622 int rv; 1623 1624 null_validate_conf(dev); 1625 1626 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); 1627 if (!nullb) { 1628 rv = -ENOMEM; 1629 goto out; 1630 } 1631 nullb->dev = dev; 1632 dev->nullb = nullb; 1633 1634 spin_lock_init(&nullb->lock); 1635 1636 rv = setup_queues(nullb); 1637 if (rv) 1638 goto out_free_nullb; 1639 1640 if (dev->queue_mode == NULL_Q_MQ) { 1641 if (shared_tags) { 1642 nullb->tag_set = &tag_set; 1643 rv = 0; 1644 } else { 1645 nullb->tag_set = &nullb->__tag_set; 1646 rv = null_init_tag_set(nullb, nullb->tag_set); 1647 } 1648 1649 if (rv) 1650 goto out_cleanup_queues; 1651 1652 if (!null_setup_fault()) 1653 goto out_cleanup_queues; 1654 1655 nullb->tag_set->timeout = 5 * HZ; 1656 nullb->q = blk_mq_init_queue(nullb->tag_set); 1657 if (IS_ERR(nullb->q)) { 1658 rv = -ENOMEM; 1659 goto out_cleanup_tags; 1660 } 1661 null_init_queues(nullb); 1662 } else if (dev->queue_mode == NULL_Q_BIO) { 1663 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); 1664 if (!nullb->q) { 1665 rv = -ENOMEM; 1666 goto out_cleanup_queues; 1667 } 1668 blk_queue_make_request(nullb->q, null_queue_bio); 1669 rv = init_driver_queues(nullb); 1670 if (rv) 1671 goto out_cleanup_blk_queue; 1672 } 1673 1674 if (dev->mbps) { 1675 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); 1676 nullb_setup_bwtimer(nullb); 1677 } 1678 1679 if (dev->cache_size > 0) { 1680 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); 1681 blk_queue_write_cache(nullb->q, true, true); 1682 } 1683 1684 if (dev->zoned) { 1685 rv = null_zone_init(dev); 1686 if (rv) 1687 goto out_cleanup_blk_queue; 1688 1689 blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects); 1690 nullb->q->limits.zoned = BLK_ZONED_HM; 1691 } 1692 1693 nullb->q->queuedata = nullb; 1694 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); 1695 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); 1696 1697 mutex_lock(&lock); 1698 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); 1699 dev->index = nullb->index; 1700 mutex_unlock(&lock); 1701 1702 blk_queue_logical_block_size(nullb->q, dev->blocksize); 1703 blk_queue_physical_block_size(nullb->q, dev->blocksize); 1704 1705 null_config_discard(nullb); 1706 1707 sprintf(nullb->disk_name, "nullb%d", nullb->index); 1708 1709 rv = null_gendisk_register(nullb); 1710 if (rv) 1711 goto out_cleanup_zone; 1712 1713 mutex_lock(&lock); 1714 list_add_tail(&nullb->list, &nullb_list); 1715 mutex_unlock(&lock); 1716 1717 return 0; 1718out_cleanup_zone: 1719 if (dev->zoned) 1720 null_zone_exit(dev); 1721out_cleanup_blk_queue: 1722 blk_cleanup_queue(nullb->q); 1723out_cleanup_tags: 1724 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) 1725 blk_mq_free_tag_set(nullb->tag_set); 1726out_cleanup_queues: 1727 cleanup_queues(nullb); 1728out_free_nullb: 1729 kfree(nullb); 1730out: 1731 return rv; 1732} 1733 1734static int __init null_init(void) 1735{ 1736 int ret = 0; 1737 unsigned int i; 1738 struct nullb *nullb; 1739 struct nullb_device *dev; 1740 1741 if (g_bs > PAGE_SIZE) { 1742 pr_warn("null_blk: invalid block size\n"); 1743 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); 1744 g_bs = PAGE_SIZE; 1745 } 1746 1747 if (!is_power_of_2(g_zone_size)) { 1748 pr_err("null_blk: zone_size must be power-of-two\n"); 1749 return -EINVAL; 1750 } 1751 1752 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { 1753 pr_err("null_blk: invalid home_node value\n"); 1754 g_home_node = NUMA_NO_NODE; 1755 } 1756 1757 if (g_queue_mode == NULL_Q_RQ) { 1758 pr_err("null_blk: legacy IO path no longer available\n"); 1759 return -EINVAL; 1760 } 1761 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) { 1762 if (g_submit_queues != nr_online_nodes) { 1763 pr_warn("null_blk: submit_queues param is set to %u.\n", 1764 nr_online_nodes); 1765 g_submit_queues = nr_online_nodes; 1766 } 1767 } else if (g_submit_queues > nr_cpu_ids) 1768 g_submit_queues = nr_cpu_ids; 1769 else if (g_submit_queues <= 0) 1770 g_submit_queues = 1; 1771 1772 if (g_queue_mode == NULL_Q_MQ && shared_tags) { 1773 ret = null_init_tag_set(NULL, &tag_set); 1774 if (ret) 1775 return ret; 1776 } 1777 1778 config_group_init(&nullb_subsys.su_group); 1779 mutex_init(&nullb_subsys.su_mutex); 1780 1781 ret = configfs_register_subsystem(&nullb_subsys); 1782 if (ret) 1783 goto err_tagset; 1784 1785 mutex_init(&lock); 1786 1787 null_major = register_blkdev(0, "nullb"); 1788 if (null_major < 0) { 1789 ret = null_major; 1790 goto err_conf; 1791 } 1792 1793 for (i = 0; i < nr_devices; i++) { 1794 dev = null_alloc_dev(); 1795 if (!dev) { 1796 ret = -ENOMEM; 1797 goto err_dev; 1798 } 1799 ret = null_add_dev(dev); 1800 if (ret) { 1801 null_free_dev(dev); 1802 goto err_dev; 1803 } 1804 } 1805 1806 pr_info("null: module loaded\n"); 1807 return 0; 1808 1809err_dev: 1810 while (!list_empty(&nullb_list)) { 1811 nullb = list_entry(nullb_list.next, struct nullb, list); 1812 dev = nullb->dev; 1813 null_del_dev(nullb); 1814 null_free_dev(dev); 1815 } 1816 unregister_blkdev(null_major, "nullb"); 1817err_conf: 1818 configfs_unregister_subsystem(&nullb_subsys); 1819err_tagset: 1820 if (g_queue_mode == NULL_Q_MQ && shared_tags) 1821 blk_mq_free_tag_set(&tag_set); 1822 return ret; 1823} 1824 1825static void __exit null_exit(void) 1826{ 1827 struct nullb *nullb; 1828 1829 configfs_unregister_subsystem(&nullb_subsys); 1830 1831 unregister_blkdev(null_major, "nullb"); 1832 1833 mutex_lock(&lock); 1834 while (!list_empty(&nullb_list)) { 1835 struct nullb_device *dev; 1836 1837 nullb = list_entry(nullb_list.next, struct nullb, list); 1838 dev = nullb->dev; 1839 null_del_dev(nullb); 1840 null_free_dev(dev); 1841 } 1842 mutex_unlock(&lock); 1843 1844 if (g_queue_mode == NULL_Q_MQ && shared_tags) 1845 blk_mq_free_tag_set(&tag_set); 1846} 1847 1848module_init(null_init); 1849module_exit(null_exit); 1850 1851MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>"); 1852MODULE_LICENSE("GPL");