at v5.7-rc2 1456 lines 32 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved. 4 * Initial release: Matias Bjorling <m@bjorling.me> 5 */ 6 7#define pr_fmt(fmt) "nvm: " fmt 8 9#include <linux/list.h> 10#include <linux/types.h> 11#include <linux/sem.h> 12#include <linux/bitmap.h> 13#include <linux/module.h> 14#include <linux/moduleparam.h> 15#include <linux/miscdevice.h> 16#include <linux/lightnvm.h> 17#include <linux/sched/sysctl.h> 18 19static LIST_HEAD(nvm_tgt_types); 20static DECLARE_RWSEM(nvm_tgtt_lock); 21static LIST_HEAD(nvm_devices); 22static DECLARE_RWSEM(nvm_lock); 23 24/* Map between virtual and physical channel and lun */ 25struct nvm_ch_map { 26 int ch_off; 27 int num_lun; 28 int *lun_offs; 29}; 30 31struct nvm_dev_map { 32 struct nvm_ch_map *chnls; 33 int num_ch; 34}; 35 36static void nvm_free(struct kref *ref); 37 38static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name) 39{ 40 struct nvm_target *tgt; 41 42 list_for_each_entry(tgt, &dev->targets, list) 43 if (!strcmp(name, tgt->disk->disk_name)) 44 return tgt; 45 46 return NULL; 47} 48 49static bool nvm_target_exists(const char *name) 50{ 51 struct nvm_dev *dev; 52 struct nvm_target *tgt; 53 bool ret = false; 54 55 down_write(&nvm_lock); 56 list_for_each_entry(dev, &nvm_devices, devices) { 57 mutex_lock(&dev->mlock); 58 list_for_each_entry(tgt, &dev->targets, list) { 59 if (!strcmp(name, tgt->disk->disk_name)) { 60 ret = true; 61 mutex_unlock(&dev->mlock); 62 goto out; 63 } 64 } 65 mutex_unlock(&dev->mlock); 66 } 67 68out: 69 up_write(&nvm_lock); 70 return ret; 71} 72 73static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end) 74{ 75 int i; 76 77 for (i = lun_begin; i <= lun_end; i++) { 78 if (test_and_set_bit(i, dev->lun_map)) { 79 pr_err("lun %d already allocated\n", i); 80 goto err; 81 } 82 } 83 84 return 0; 85err: 86 while (--i >= lun_begin) 87 clear_bit(i, dev->lun_map); 88 89 return -EBUSY; 90} 91 92static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin, 93 int lun_end) 94{ 95 int i; 96 97 for (i = lun_begin; i <= lun_end; i++) 98 WARN_ON(!test_and_clear_bit(i, dev->lun_map)); 99} 100 101static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear) 102{ 103 struct nvm_dev *dev = tgt_dev->parent; 104 struct nvm_dev_map *dev_map = tgt_dev->map; 105 int i, j; 106 107 for (i = 0; i < dev_map->num_ch; i++) { 108 struct nvm_ch_map *ch_map = &dev_map->chnls[i]; 109 int *lun_offs = ch_map->lun_offs; 110 int ch = i + ch_map->ch_off; 111 112 if (clear) { 113 for (j = 0; j < ch_map->num_lun; j++) { 114 int lun = j + lun_offs[j]; 115 int lunid = (ch * dev->geo.num_lun) + lun; 116 117 WARN_ON(!test_and_clear_bit(lunid, 118 dev->lun_map)); 119 } 120 } 121 122 kfree(ch_map->lun_offs); 123 } 124 125 kfree(dev_map->chnls); 126 kfree(dev_map); 127 128 kfree(tgt_dev->luns); 129 kfree(tgt_dev); 130} 131 132static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, 133 u16 lun_begin, u16 lun_end, 134 u16 op) 135{ 136 struct nvm_tgt_dev *tgt_dev = NULL; 137 struct nvm_dev_map *dev_rmap = dev->rmap; 138 struct nvm_dev_map *dev_map; 139 struct ppa_addr *luns; 140 int num_lun = lun_end - lun_begin + 1; 141 int luns_left = num_lun; 142 int num_ch = num_lun / dev->geo.num_lun; 143 int num_ch_mod = num_lun % dev->geo.num_lun; 144 int bch = lun_begin / dev->geo.num_lun; 145 int blun = lun_begin % dev->geo.num_lun; 146 int lunid = 0; 147 int lun_balanced = 1; 148 int sec_per_lun, prev_num_lun; 149 int i, j; 150 151 num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1; 152 153 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); 154 if (!dev_map) 155 goto err_dev; 156 157 dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL); 158 if (!dev_map->chnls) 159 goto err_chnls; 160 161 luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL); 162 if (!luns) 163 goto err_luns; 164 165 prev_num_lun = (luns_left > dev->geo.num_lun) ? 166 dev->geo.num_lun : luns_left; 167 for (i = 0; i < num_ch; i++) { 168 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch]; 169 int *lun_roffs = ch_rmap->lun_offs; 170 struct nvm_ch_map *ch_map = &dev_map->chnls[i]; 171 int *lun_offs; 172 int luns_in_chnl = (luns_left > dev->geo.num_lun) ? 173 dev->geo.num_lun : luns_left; 174 175 if (lun_balanced && prev_num_lun != luns_in_chnl) 176 lun_balanced = 0; 177 178 ch_map->ch_off = ch_rmap->ch_off = bch; 179 ch_map->num_lun = luns_in_chnl; 180 181 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); 182 if (!lun_offs) 183 goto err_ch; 184 185 for (j = 0; j < luns_in_chnl; j++) { 186 luns[lunid].ppa = 0; 187 luns[lunid].a.ch = i; 188 luns[lunid++].a.lun = j; 189 190 lun_offs[j] = blun; 191 lun_roffs[j + blun] = blun; 192 } 193 194 ch_map->lun_offs = lun_offs; 195 196 /* when starting a new channel, lun offset is reset */ 197 blun = 0; 198 luns_left -= luns_in_chnl; 199 } 200 201 dev_map->num_ch = num_ch; 202 203 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL); 204 if (!tgt_dev) 205 goto err_ch; 206 207 /* Inherit device geometry from parent */ 208 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); 209 210 /* Target device only owns a portion of the physical device */ 211 tgt_dev->geo.num_ch = num_ch; 212 tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1; 213 tgt_dev->geo.all_luns = num_lun; 214 tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk; 215 216 tgt_dev->geo.op = op; 217 218 sec_per_lun = dev->geo.clba * dev->geo.num_chk; 219 tgt_dev->geo.total_secs = num_lun * sec_per_lun; 220 221 tgt_dev->q = dev->q; 222 tgt_dev->map = dev_map; 223 tgt_dev->luns = luns; 224 tgt_dev->parent = dev; 225 226 return tgt_dev; 227err_ch: 228 while (--i >= 0) 229 kfree(dev_map->chnls[i].lun_offs); 230 kfree(luns); 231err_luns: 232 kfree(dev_map->chnls); 233err_chnls: 234 kfree(dev_map); 235err_dev: 236 return tgt_dev; 237} 238 239static const struct block_device_operations nvm_fops = { 240 .owner = THIS_MODULE, 241}; 242 243static struct nvm_tgt_type *__nvm_find_target_type(const char *name) 244{ 245 struct nvm_tgt_type *tt; 246 247 list_for_each_entry(tt, &nvm_tgt_types, list) 248 if (!strcmp(name, tt->name)) 249 return tt; 250 251 return NULL; 252} 253 254static struct nvm_tgt_type *nvm_find_target_type(const char *name) 255{ 256 struct nvm_tgt_type *tt; 257 258 down_write(&nvm_tgtt_lock); 259 tt = __nvm_find_target_type(name); 260 up_write(&nvm_tgtt_lock); 261 262 return tt; 263} 264 265static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin, 266 int lun_end) 267{ 268 if (lun_begin > lun_end || lun_end >= geo->all_luns) { 269 pr_err("lun out of bound (%u:%u > %u)\n", 270 lun_begin, lun_end, geo->all_luns - 1); 271 return -EINVAL; 272 } 273 274 return 0; 275} 276 277static int __nvm_config_simple(struct nvm_dev *dev, 278 struct nvm_ioctl_create_simple *s) 279{ 280 struct nvm_geo *geo = &dev->geo; 281 282 if (s->lun_begin == -1 && s->lun_end == -1) { 283 s->lun_begin = 0; 284 s->lun_end = geo->all_luns - 1; 285 } 286 287 return nvm_config_check_luns(geo, s->lun_begin, s->lun_end); 288} 289 290static int __nvm_config_extended(struct nvm_dev *dev, 291 struct nvm_ioctl_create_extended *e) 292{ 293 if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) { 294 e->lun_begin = 0; 295 e->lun_end = dev->geo.all_luns - 1; 296 } 297 298 /* op not set falls into target's default */ 299 if (e->op == 0xFFFF) { 300 e->op = NVM_TARGET_DEFAULT_OP; 301 } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) { 302 pr_err("invalid over provisioning value\n"); 303 return -EINVAL; 304 } 305 306 return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end); 307} 308 309static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) 310{ 311 struct nvm_ioctl_create_extended e; 312 struct request_queue *tqueue; 313 struct gendisk *tdisk; 314 struct nvm_tgt_type *tt; 315 struct nvm_target *t; 316 struct nvm_tgt_dev *tgt_dev; 317 void *targetdata; 318 unsigned int mdts; 319 int ret; 320 321 switch (create->conf.type) { 322 case NVM_CONFIG_TYPE_SIMPLE: 323 ret = __nvm_config_simple(dev, &create->conf.s); 324 if (ret) 325 return ret; 326 327 e.lun_begin = create->conf.s.lun_begin; 328 e.lun_end = create->conf.s.lun_end; 329 e.op = NVM_TARGET_DEFAULT_OP; 330 break; 331 case NVM_CONFIG_TYPE_EXTENDED: 332 ret = __nvm_config_extended(dev, &create->conf.e); 333 if (ret) 334 return ret; 335 336 e = create->conf.e; 337 break; 338 default: 339 pr_err("config type not valid\n"); 340 return -EINVAL; 341 } 342 343 tt = nvm_find_target_type(create->tgttype); 344 if (!tt) { 345 pr_err("target type %s not found\n", create->tgttype); 346 return -EINVAL; 347 } 348 349 if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) { 350 pr_err("device is incompatible with target L2P type.\n"); 351 return -EINVAL; 352 } 353 354 if (nvm_target_exists(create->tgtname)) { 355 pr_err("target name already exists (%s)\n", 356 create->tgtname); 357 return -EINVAL; 358 } 359 360 ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end); 361 if (ret) 362 return ret; 363 364 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL); 365 if (!t) { 366 ret = -ENOMEM; 367 goto err_reserve; 368 } 369 370 tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op); 371 if (!tgt_dev) { 372 pr_err("could not create target device\n"); 373 ret = -ENOMEM; 374 goto err_t; 375 } 376 377 tdisk = alloc_disk(0); 378 if (!tdisk) { 379 ret = -ENOMEM; 380 goto err_dev; 381 } 382 383 tqueue = blk_alloc_queue(tt->make_rq, dev->q->node); 384 if (!tqueue) { 385 ret = -ENOMEM; 386 goto err_disk; 387 } 388 389 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name)); 390 tdisk->flags = GENHD_FL_EXT_DEVT; 391 tdisk->major = 0; 392 tdisk->first_minor = 0; 393 tdisk->fops = &nvm_fops; 394 tdisk->queue = tqueue; 395 396 targetdata = tt->init(tgt_dev, tdisk, create->flags); 397 if (IS_ERR(targetdata)) { 398 ret = PTR_ERR(targetdata); 399 goto err_init; 400 } 401 402 tdisk->private_data = targetdata; 403 tqueue->queuedata = targetdata; 404 405 mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA; 406 if (dev->geo.mdts) { 407 mdts = min_t(u32, dev->geo.mdts, 408 (dev->geo.csecs >> 9) * NVM_MAX_VLBA); 409 } 410 blk_queue_max_hw_sectors(tqueue, mdts); 411 412 set_capacity(tdisk, tt->capacity(targetdata)); 413 add_disk(tdisk); 414 415 if (tt->sysfs_init && tt->sysfs_init(tdisk)) { 416 ret = -ENOMEM; 417 goto err_sysfs; 418 } 419 420 t->type = tt; 421 t->disk = tdisk; 422 t->dev = tgt_dev; 423 424 mutex_lock(&dev->mlock); 425 list_add_tail(&t->list, &dev->targets); 426 mutex_unlock(&dev->mlock); 427 428 __module_get(tt->owner); 429 430 return 0; 431err_sysfs: 432 if (tt->exit) 433 tt->exit(targetdata, true); 434err_init: 435 blk_cleanup_queue(tqueue); 436 tdisk->queue = NULL; 437err_disk: 438 put_disk(tdisk); 439err_dev: 440 nvm_remove_tgt_dev(tgt_dev, 0); 441err_t: 442 kfree(t); 443err_reserve: 444 nvm_release_luns_err(dev, e.lun_begin, e.lun_end); 445 return ret; 446} 447 448static void __nvm_remove_target(struct nvm_target *t, bool graceful) 449{ 450 struct nvm_tgt_type *tt = t->type; 451 struct gendisk *tdisk = t->disk; 452 struct request_queue *q = tdisk->queue; 453 454 del_gendisk(tdisk); 455 blk_cleanup_queue(q); 456 457 if (tt->sysfs_exit) 458 tt->sysfs_exit(tdisk); 459 460 if (tt->exit) 461 tt->exit(tdisk->private_data, graceful); 462 463 nvm_remove_tgt_dev(t->dev, 1); 464 put_disk(tdisk); 465 module_put(t->type->owner); 466 467 list_del(&t->list); 468 kfree(t); 469} 470 471/** 472 * nvm_remove_tgt - Removes a target from the media manager 473 * @remove: ioctl structure with target name to remove. 474 * 475 * Returns: 476 * 0: on success 477 * 1: on not found 478 * <0: on error 479 */ 480static int nvm_remove_tgt(struct nvm_ioctl_remove *remove) 481{ 482 struct nvm_target *t = NULL; 483 struct nvm_dev *dev; 484 485 down_read(&nvm_lock); 486 list_for_each_entry(dev, &nvm_devices, devices) { 487 mutex_lock(&dev->mlock); 488 t = nvm_find_target(dev, remove->tgtname); 489 if (t) { 490 mutex_unlock(&dev->mlock); 491 break; 492 } 493 mutex_unlock(&dev->mlock); 494 } 495 up_read(&nvm_lock); 496 497 if (!t) { 498 pr_err("failed to remove target %s\n", 499 remove->tgtname); 500 return 1; 501 } 502 503 __nvm_remove_target(t, true); 504 kref_put(&dev->ref, nvm_free); 505 506 return 0; 507} 508 509static int nvm_register_map(struct nvm_dev *dev) 510{ 511 struct nvm_dev_map *rmap; 512 int i, j; 513 514 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); 515 if (!rmap) 516 goto err_rmap; 517 518 rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map), 519 GFP_KERNEL); 520 if (!rmap->chnls) 521 goto err_chnls; 522 523 for (i = 0; i < dev->geo.num_ch; i++) { 524 struct nvm_ch_map *ch_rmap; 525 int *lun_roffs; 526 int luns_in_chnl = dev->geo.num_lun; 527 528 ch_rmap = &rmap->chnls[i]; 529 530 ch_rmap->ch_off = -1; 531 ch_rmap->num_lun = luns_in_chnl; 532 533 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); 534 if (!lun_roffs) 535 goto err_ch; 536 537 for (j = 0; j < luns_in_chnl; j++) 538 lun_roffs[j] = -1; 539 540 ch_rmap->lun_offs = lun_roffs; 541 } 542 543 dev->rmap = rmap; 544 545 return 0; 546err_ch: 547 while (--i >= 0) 548 kfree(rmap->chnls[i].lun_offs); 549err_chnls: 550 kfree(rmap); 551err_rmap: 552 return -ENOMEM; 553} 554 555static void nvm_unregister_map(struct nvm_dev *dev) 556{ 557 struct nvm_dev_map *rmap = dev->rmap; 558 int i; 559 560 for (i = 0; i < dev->geo.num_ch; i++) 561 kfree(rmap->chnls[i].lun_offs); 562 563 kfree(rmap->chnls); 564 kfree(rmap); 565} 566 567static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) 568{ 569 struct nvm_dev_map *dev_map = tgt_dev->map; 570 struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch]; 571 int lun_off = ch_map->lun_offs[p->a.lun]; 572 573 p->a.ch += ch_map->ch_off; 574 p->a.lun += lun_off; 575} 576 577static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) 578{ 579 struct nvm_dev *dev = tgt_dev->parent; 580 struct nvm_dev_map *dev_rmap = dev->rmap; 581 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch]; 582 int lun_roff = ch_rmap->lun_offs[p->a.lun]; 583 584 p->a.ch -= ch_rmap->ch_off; 585 p->a.lun -= lun_roff; 586} 587 588static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, 589 struct ppa_addr *ppa_list, int nr_ppas) 590{ 591 int i; 592 593 for (i = 0; i < nr_ppas; i++) { 594 nvm_map_to_dev(tgt_dev, &ppa_list[i]); 595 ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]); 596 } 597} 598 599static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, 600 struct ppa_addr *ppa_list, int nr_ppas) 601{ 602 int i; 603 604 for (i = 0; i < nr_ppas; i++) { 605 ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]); 606 nvm_map_to_tgt(tgt_dev, &ppa_list[i]); 607 } 608} 609 610static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 611{ 612 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); 613 614 nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas); 615} 616 617static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 618{ 619 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); 620 621 nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas); 622} 623 624int nvm_register_tgt_type(struct nvm_tgt_type *tt) 625{ 626 int ret = 0; 627 628 down_write(&nvm_tgtt_lock); 629 if (__nvm_find_target_type(tt->name)) 630 ret = -EEXIST; 631 else 632 list_add(&tt->list, &nvm_tgt_types); 633 up_write(&nvm_tgtt_lock); 634 635 return ret; 636} 637EXPORT_SYMBOL(nvm_register_tgt_type); 638 639void nvm_unregister_tgt_type(struct nvm_tgt_type *tt) 640{ 641 if (!tt) 642 return; 643 644 down_write(&nvm_tgtt_lock); 645 list_del(&tt->list); 646 up_write(&nvm_tgtt_lock); 647} 648EXPORT_SYMBOL(nvm_unregister_tgt_type); 649 650void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, 651 dma_addr_t *dma_handler) 652{ 653 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags, 654 dma_handler); 655} 656EXPORT_SYMBOL(nvm_dev_dma_alloc); 657 658void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler) 659{ 660 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler); 661} 662EXPORT_SYMBOL(nvm_dev_dma_free); 663 664static struct nvm_dev *nvm_find_nvm_dev(const char *name) 665{ 666 struct nvm_dev *dev; 667 668 list_for_each_entry(dev, &nvm_devices, devices) 669 if (!strcmp(name, dev->name)) 670 return dev; 671 672 return NULL; 673} 674 675static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, 676 const struct ppa_addr *ppas, int nr_ppas) 677{ 678 struct nvm_dev *dev = tgt_dev->parent; 679 struct nvm_geo *geo = &tgt_dev->geo; 680 int i, plane_cnt, pl_idx; 681 struct ppa_addr ppa; 682 683 if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) { 684 rqd->nr_ppas = nr_ppas; 685 rqd->ppa_addr = ppas[0]; 686 687 return 0; 688 } 689 690 rqd->nr_ppas = nr_ppas; 691 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); 692 if (!rqd->ppa_list) { 693 pr_err("failed to allocate dma memory\n"); 694 return -ENOMEM; 695 } 696 697 plane_cnt = geo->pln_mode; 698 rqd->nr_ppas *= plane_cnt; 699 700 for (i = 0; i < nr_ppas; i++) { 701 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { 702 ppa = ppas[i]; 703 ppa.g.pl = pl_idx; 704 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa; 705 } 706 } 707 708 return 0; 709} 710 711static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, 712 struct nvm_rq *rqd) 713{ 714 if (!rqd->ppa_list) 715 return; 716 717 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); 718} 719 720static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd) 721{ 722 int flags = 0; 723 724 if (geo->version == NVM_OCSSD_SPEC_20) 725 return 0; 726 727 if (rqd->is_seq) 728 flags |= geo->pln_mode >> 1; 729 730 if (rqd->opcode == NVM_OP_PREAD) 731 flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND); 732 else if (rqd->opcode == NVM_OP_PWRITE) 733 flags |= NVM_IO_SCRAMBLE_ENABLE; 734 735 return flags; 736} 737 738int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf) 739{ 740 struct nvm_dev *dev = tgt_dev->parent; 741 int ret; 742 743 if (!dev->ops->submit_io) 744 return -ENODEV; 745 746 nvm_rq_tgt_to_dev(tgt_dev, rqd); 747 748 rqd->dev = tgt_dev; 749 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd); 750 751 /* In case of error, fail with right address format */ 752 ret = dev->ops->submit_io(dev, rqd, buf); 753 if (ret) 754 nvm_rq_dev_to_tgt(tgt_dev, rqd); 755 return ret; 756} 757EXPORT_SYMBOL(nvm_submit_io); 758 759static void nvm_sync_end_io(struct nvm_rq *rqd) 760{ 761 struct completion *waiting = rqd->private; 762 763 complete(waiting); 764} 765 766static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd, 767 void *buf) 768{ 769 DECLARE_COMPLETION_ONSTACK(wait); 770 int ret = 0; 771 772 rqd->end_io = nvm_sync_end_io; 773 rqd->private = &wait; 774 775 ret = dev->ops->submit_io(dev, rqd, buf); 776 if (ret) 777 return ret; 778 779 wait_for_completion_io(&wait); 780 781 return 0; 782} 783 784int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, 785 void *buf) 786{ 787 struct nvm_dev *dev = tgt_dev->parent; 788 int ret; 789 790 if (!dev->ops->submit_io) 791 return -ENODEV; 792 793 nvm_rq_tgt_to_dev(tgt_dev, rqd); 794 795 rqd->dev = tgt_dev; 796 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd); 797 798 ret = nvm_submit_io_wait(dev, rqd, buf); 799 800 return ret; 801} 802EXPORT_SYMBOL(nvm_submit_io_sync); 803 804void nvm_end_io(struct nvm_rq *rqd) 805{ 806 struct nvm_tgt_dev *tgt_dev = rqd->dev; 807 808 /* Convert address space */ 809 if (tgt_dev) 810 nvm_rq_dev_to_tgt(tgt_dev, rqd); 811 812 if (rqd->end_io) 813 rqd->end_io(rqd); 814} 815EXPORT_SYMBOL(nvm_end_io); 816 817static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd) 818{ 819 if (!dev->ops->submit_io) 820 return -ENODEV; 821 822 rqd->dev = NULL; 823 rqd->flags = nvm_set_flags(&dev->geo, rqd); 824 825 return nvm_submit_io_wait(dev, rqd, NULL); 826} 827 828static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa) 829{ 830 struct nvm_rq rqd = { NULL }; 831 struct bio bio; 832 struct bio_vec bio_vec; 833 struct page *page; 834 int ret; 835 836 page = alloc_page(GFP_KERNEL); 837 if (!page) 838 return -ENOMEM; 839 840 bio_init(&bio, &bio_vec, 1); 841 bio_add_page(&bio, page, PAGE_SIZE, 0); 842 bio_set_op_attrs(&bio, REQ_OP_READ, 0); 843 844 rqd.bio = &bio; 845 rqd.opcode = NVM_OP_PREAD; 846 rqd.is_seq = 1; 847 rqd.nr_ppas = 1; 848 rqd.ppa_addr = generic_to_dev_addr(dev, ppa); 849 850 ret = nvm_submit_io_sync_raw(dev, &rqd); 851 if (ret) 852 return ret; 853 854 __free_page(page); 855 856 return rqd.error; 857} 858 859/* 860 * Scans a 1.2 chunk first and last page to determine if its state. 861 * If the chunk is found to be open, also scan it to update the write 862 * pointer. 863 */ 864static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa, 865 struct nvm_chk_meta *meta) 866{ 867 struct nvm_geo *geo = &dev->geo; 868 int ret, pg, pl; 869 870 /* sense first page */ 871 ret = nvm_bb_chunk_sense(dev, ppa); 872 if (ret < 0) /* io error */ 873 return ret; 874 else if (ret == 0) /* valid data */ 875 meta->state = NVM_CHK_ST_OPEN; 876 else if (ret > 0) { 877 /* 878 * If empty page, the chunk is free, else it is an 879 * actual io error. In that case, mark it offline. 880 */ 881 switch (ret) { 882 case NVM_RSP_ERR_EMPTYPAGE: 883 meta->state = NVM_CHK_ST_FREE; 884 return 0; 885 case NVM_RSP_ERR_FAILCRC: 886 case NVM_RSP_ERR_FAILECC: 887 case NVM_RSP_WARN_HIGHECC: 888 meta->state = NVM_CHK_ST_OPEN; 889 goto scan; 890 default: 891 return -ret; /* other io error */ 892 } 893 } 894 895 /* sense last page */ 896 ppa.g.pg = geo->num_pg - 1; 897 ppa.g.pl = geo->num_pln - 1; 898 899 ret = nvm_bb_chunk_sense(dev, ppa); 900 if (ret < 0) /* io error */ 901 return ret; 902 else if (ret == 0) { /* Chunk fully written */ 903 meta->state = NVM_CHK_ST_CLOSED; 904 meta->wp = geo->clba; 905 return 0; 906 } else if (ret > 0) { 907 switch (ret) { 908 case NVM_RSP_ERR_EMPTYPAGE: 909 case NVM_RSP_ERR_FAILCRC: 910 case NVM_RSP_ERR_FAILECC: 911 case NVM_RSP_WARN_HIGHECC: 912 meta->state = NVM_CHK_ST_OPEN; 913 break; 914 default: 915 return -ret; /* other io error */ 916 } 917 } 918 919scan: 920 /* 921 * chunk is open, we scan sequentially to update the write pointer. 922 * We make the assumption that targets write data across all planes 923 * before moving to the next page. 924 */ 925 for (pg = 0; pg < geo->num_pg; pg++) { 926 for (pl = 0; pl < geo->num_pln; pl++) { 927 ppa.g.pg = pg; 928 ppa.g.pl = pl; 929 930 ret = nvm_bb_chunk_sense(dev, ppa); 931 if (ret < 0) /* io error */ 932 return ret; 933 else if (ret == 0) { 934 meta->wp += geo->ws_min; 935 } else if (ret > 0) { 936 switch (ret) { 937 case NVM_RSP_ERR_EMPTYPAGE: 938 return 0; 939 case NVM_RSP_ERR_FAILCRC: 940 case NVM_RSP_ERR_FAILECC: 941 case NVM_RSP_WARN_HIGHECC: 942 meta->wp += geo->ws_min; 943 break; 944 default: 945 return -ret; /* other io error */ 946 } 947 } 948 } 949 } 950 951 return 0; 952} 953 954/* 955 * folds a bad block list from its plane representation to its 956 * chunk representation. 957 * 958 * If any of the planes status are bad or grown bad, the chunk is marked 959 * offline. If not bad, the first plane state acts as the chunk state. 960 */ 961static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa, 962 u8 *blks, int nr_blks, struct nvm_chk_meta *meta) 963{ 964 struct nvm_geo *geo = &dev->geo; 965 int ret, blk, pl, offset, blktype; 966 967 for (blk = 0; blk < geo->num_chk; blk++) { 968 offset = blk * geo->pln_mode; 969 blktype = blks[offset]; 970 971 for (pl = 0; pl < geo->pln_mode; pl++) { 972 if (blks[offset + pl] & 973 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { 974 blktype = blks[offset + pl]; 975 break; 976 } 977 } 978 979 ppa.g.blk = blk; 980 981 meta->wp = 0; 982 meta->type = NVM_CHK_TP_W_SEQ; 983 meta->wi = 0; 984 meta->slba = generic_to_dev_addr(dev, ppa).ppa; 985 meta->cnlb = dev->geo.clba; 986 987 if (blktype == NVM_BLK_T_FREE) { 988 ret = nvm_bb_chunk_scan(dev, ppa, meta); 989 if (ret) 990 return ret; 991 } else { 992 meta->state = NVM_CHK_ST_OFFLINE; 993 } 994 995 meta++; 996 } 997 998 return 0; 999} 1000 1001static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba, 1002 int nchks, struct nvm_chk_meta *meta) 1003{ 1004 struct nvm_geo *geo = &dev->geo; 1005 struct ppa_addr ppa; 1006 u8 *blks; 1007 int ch, lun, nr_blks; 1008 int ret = 0; 1009 1010 ppa.ppa = slba; 1011 ppa = dev_to_generic_addr(dev, ppa); 1012 1013 if (ppa.g.blk != 0) 1014 return -EINVAL; 1015 1016 if ((nchks % geo->num_chk) != 0) 1017 return -EINVAL; 1018 1019 nr_blks = geo->num_chk * geo->pln_mode; 1020 1021 blks = kmalloc(nr_blks, GFP_KERNEL); 1022 if (!blks) 1023 return -ENOMEM; 1024 1025 for (ch = ppa.g.ch; ch < geo->num_ch; ch++) { 1026 for (lun = ppa.g.lun; lun < geo->num_lun; lun++) { 1027 struct ppa_addr ppa_gen, ppa_dev; 1028 1029 if (!nchks) 1030 goto done; 1031 1032 ppa_gen.ppa = 0; 1033 ppa_gen.g.ch = ch; 1034 ppa_gen.g.lun = lun; 1035 ppa_dev = generic_to_dev_addr(dev, ppa_gen); 1036 1037 ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks); 1038 if (ret) 1039 goto done; 1040 1041 ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks, 1042 meta); 1043 if (ret) 1044 goto done; 1045 1046 meta += geo->num_chk; 1047 nchks -= geo->num_chk; 1048 } 1049 } 1050done: 1051 kfree(blks); 1052 return ret; 1053} 1054 1055int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, 1056 int nchks, struct nvm_chk_meta *meta) 1057{ 1058 struct nvm_dev *dev = tgt_dev->parent; 1059 1060 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); 1061 1062 if (dev->geo.version == NVM_OCSSD_SPEC_12) 1063 return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta); 1064 1065 return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta); 1066} 1067EXPORT_SYMBOL_GPL(nvm_get_chunk_meta); 1068 1069int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, 1070 int nr_ppas, int type) 1071{ 1072 struct nvm_dev *dev = tgt_dev->parent; 1073 struct nvm_rq rqd; 1074 int ret; 1075 1076 if (dev->geo.version == NVM_OCSSD_SPEC_20) 1077 return 0; 1078 1079 if (nr_ppas > NVM_MAX_VLBA) { 1080 pr_err("unable to update all blocks atomically\n"); 1081 return -EINVAL; 1082 } 1083 1084 memset(&rqd, 0, sizeof(struct nvm_rq)); 1085 1086 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas); 1087 nvm_rq_tgt_to_dev(tgt_dev, &rqd); 1088 1089 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); 1090 nvm_free_rqd_ppalist(tgt_dev, &rqd); 1091 if (ret) 1092 return -EINVAL; 1093 1094 return 0; 1095} 1096EXPORT_SYMBOL_GPL(nvm_set_chunk_meta); 1097 1098static int nvm_core_init(struct nvm_dev *dev) 1099{ 1100 struct nvm_geo *geo = &dev->geo; 1101 int ret; 1102 1103 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns), 1104 sizeof(unsigned long), GFP_KERNEL); 1105 if (!dev->lun_map) 1106 return -ENOMEM; 1107 1108 INIT_LIST_HEAD(&dev->area_list); 1109 INIT_LIST_HEAD(&dev->targets); 1110 mutex_init(&dev->mlock); 1111 spin_lock_init(&dev->lock); 1112 1113 ret = nvm_register_map(dev); 1114 if (ret) 1115 goto err_fmtype; 1116 1117 return 0; 1118err_fmtype: 1119 kfree(dev->lun_map); 1120 return ret; 1121} 1122 1123static void nvm_free(struct kref *ref) 1124{ 1125 struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref); 1126 1127 if (dev->dma_pool) 1128 dev->ops->destroy_dma_pool(dev->dma_pool); 1129 1130 if (dev->rmap) 1131 nvm_unregister_map(dev); 1132 1133 kfree(dev->lun_map); 1134 kfree(dev); 1135} 1136 1137static int nvm_init(struct nvm_dev *dev) 1138{ 1139 struct nvm_geo *geo = &dev->geo; 1140 int ret = -EINVAL; 1141 1142 if (dev->ops->identity(dev)) { 1143 pr_err("device could not be identified\n"); 1144 goto err; 1145 } 1146 1147 pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id, 1148 geo->minor_ver_id, geo->vmnt); 1149 1150 ret = nvm_core_init(dev); 1151 if (ret) { 1152 pr_err("could not initialize core structures.\n"); 1153 goto err; 1154 } 1155 1156 pr_info("registered %s [%u/%u/%u/%u/%u]\n", 1157 dev->name, dev->geo.ws_min, dev->geo.ws_opt, 1158 dev->geo.num_chk, dev->geo.all_luns, 1159 dev->geo.num_ch); 1160 return 0; 1161err: 1162 pr_err("failed to initialize nvm\n"); 1163 return ret; 1164} 1165 1166struct nvm_dev *nvm_alloc_dev(int node) 1167{ 1168 struct nvm_dev *dev; 1169 1170 dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node); 1171 if (dev) 1172 kref_init(&dev->ref); 1173 1174 return dev; 1175} 1176EXPORT_SYMBOL(nvm_alloc_dev); 1177 1178int nvm_register(struct nvm_dev *dev) 1179{ 1180 int ret, exp_pool_size; 1181 1182 if (!dev->q || !dev->ops) { 1183 kref_put(&dev->ref, nvm_free); 1184 return -EINVAL; 1185 } 1186 1187 ret = nvm_init(dev); 1188 if (ret) { 1189 kref_put(&dev->ref, nvm_free); 1190 return ret; 1191 } 1192 1193 exp_pool_size = max_t(int, PAGE_SIZE, 1194 (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos))); 1195 exp_pool_size = round_up(exp_pool_size, PAGE_SIZE); 1196 1197 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist", 1198 exp_pool_size); 1199 if (!dev->dma_pool) { 1200 pr_err("could not create dma pool\n"); 1201 kref_put(&dev->ref, nvm_free); 1202 return -ENOMEM; 1203 } 1204 1205 /* register device with a supported media manager */ 1206 down_write(&nvm_lock); 1207 list_add(&dev->devices, &nvm_devices); 1208 up_write(&nvm_lock); 1209 1210 return 0; 1211} 1212EXPORT_SYMBOL(nvm_register); 1213 1214void nvm_unregister(struct nvm_dev *dev) 1215{ 1216 struct nvm_target *t, *tmp; 1217 1218 mutex_lock(&dev->mlock); 1219 list_for_each_entry_safe(t, tmp, &dev->targets, list) { 1220 if (t->dev->parent != dev) 1221 continue; 1222 __nvm_remove_target(t, false); 1223 kref_put(&dev->ref, nvm_free); 1224 } 1225 mutex_unlock(&dev->mlock); 1226 1227 down_write(&nvm_lock); 1228 list_del(&dev->devices); 1229 up_write(&nvm_lock); 1230 1231 kref_put(&dev->ref, nvm_free); 1232} 1233EXPORT_SYMBOL(nvm_unregister); 1234 1235static int __nvm_configure_create(struct nvm_ioctl_create *create) 1236{ 1237 struct nvm_dev *dev; 1238 int ret; 1239 1240 down_write(&nvm_lock); 1241 dev = nvm_find_nvm_dev(create->dev); 1242 up_write(&nvm_lock); 1243 1244 if (!dev) { 1245 pr_err("device not found\n"); 1246 return -EINVAL; 1247 } 1248 1249 kref_get(&dev->ref); 1250 ret = nvm_create_tgt(dev, create); 1251 if (ret) 1252 kref_put(&dev->ref, nvm_free); 1253 1254 return ret; 1255} 1256 1257static long nvm_ioctl_info(struct file *file, void __user *arg) 1258{ 1259 struct nvm_ioctl_info *info; 1260 struct nvm_tgt_type *tt; 1261 int tgt_iter = 0; 1262 1263 info = memdup_user(arg, sizeof(struct nvm_ioctl_info)); 1264 if (IS_ERR(info)) 1265 return -EFAULT; 1266 1267 info->version[0] = NVM_VERSION_MAJOR; 1268 info->version[1] = NVM_VERSION_MINOR; 1269 info->version[2] = NVM_VERSION_PATCH; 1270 1271 down_write(&nvm_tgtt_lock); 1272 list_for_each_entry(tt, &nvm_tgt_types, list) { 1273 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter]; 1274 1275 tgt->version[0] = tt->version[0]; 1276 tgt->version[1] = tt->version[1]; 1277 tgt->version[2] = tt->version[2]; 1278 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX); 1279 1280 tgt_iter++; 1281 } 1282 1283 info->tgtsize = tgt_iter; 1284 up_write(&nvm_tgtt_lock); 1285 1286 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) { 1287 kfree(info); 1288 return -EFAULT; 1289 } 1290 1291 kfree(info); 1292 return 0; 1293} 1294 1295static long nvm_ioctl_get_devices(struct file *file, void __user *arg) 1296{ 1297 struct nvm_ioctl_get_devices *devices; 1298 struct nvm_dev *dev; 1299 int i = 0; 1300 1301 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL); 1302 if (!devices) 1303 return -ENOMEM; 1304 1305 down_write(&nvm_lock); 1306 list_for_each_entry(dev, &nvm_devices, devices) { 1307 struct nvm_ioctl_device_info *info = &devices->info[i]; 1308 1309 strlcpy(info->devname, dev->name, sizeof(info->devname)); 1310 1311 /* kept for compatibility */ 1312 info->bmversion[0] = 1; 1313 info->bmversion[1] = 0; 1314 info->bmversion[2] = 0; 1315 strlcpy(info->bmname, "gennvm", sizeof(info->bmname)); 1316 i++; 1317 1318 if (i > 31) { 1319 pr_err("max 31 devices can be reported.\n"); 1320 break; 1321 } 1322 } 1323 up_write(&nvm_lock); 1324 1325 devices->nr_devices = i; 1326 1327 if (copy_to_user(arg, devices, 1328 sizeof(struct nvm_ioctl_get_devices))) { 1329 kfree(devices); 1330 return -EFAULT; 1331 } 1332 1333 kfree(devices); 1334 return 0; 1335} 1336 1337static long nvm_ioctl_dev_create(struct file *file, void __user *arg) 1338{ 1339 struct nvm_ioctl_create create; 1340 1341 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) 1342 return -EFAULT; 1343 1344 if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED && 1345 create.conf.e.rsv != 0) { 1346 pr_err("reserved config field in use\n"); 1347 return -EINVAL; 1348 } 1349 1350 create.dev[DISK_NAME_LEN - 1] = '\0'; 1351 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0'; 1352 create.tgtname[DISK_NAME_LEN - 1] = '\0'; 1353 1354 if (create.flags != 0) { 1355 __u32 flags = create.flags; 1356 1357 /* Check for valid flags */ 1358 if (flags & NVM_TARGET_FACTORY) 1359 flags &= ~NVM_TARGET_FACTORY; 1360 1361 if (flags) { 1362 pr_err("flag not supported\n"); 1363 return -EINVAL; 1364 } 1365 } 1366 1367 return __nvm_configure_create(&create); 1368} 1369 1370static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) 1371{ 1372 struct nvm_ioctl_remove remove; 1373 1374 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove))) 1375 return -EFAULT; 1376 1377 remove.tgtname[DISK_NAME_LEN - 1] = '\0'; 1378 1379 if (remove.flags != 0) { 1380 pr_err("no flags supported\n"); 1381 return -EINVAL; 1382 } 1383 1384 return nvm_remove_tgt(&remove); 1385} 1386 1387/* kept for compatibility reasons */ 1388static long nvm_ioctl_dev_init(struct file *file, void __user *arg) 1389{ 1390 struct nvm_ioctl_dev_init init; 1391 1392 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init))) 1393 return -EFAULT; 1394 1395 if (init.flags != 0) { 1396 pr_err("no flags supported\n"); 1397 return -EINVAL; 1398 } 1399 1400 return 0; 1401} 1402 1403/* Kept for compatibility reasons */ 1404static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) 1405{ 1406 struct nvm_ioctl_dev_factory fact; 1407 1408 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory))) 1409 return -EFAULT; 1410 1411 fact.dev[DISK_NAME_LEN - 1] = '\0'; 1412 1413 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1)) 1414 return -EINVAL; 1415 1416 return 0; 1417} 1418 1419static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1420{ 1421 void __user *argp = (void __user *)arg; 1422 1423 if (!capable(CAP_SYS_ADMIN)) 1424 return -EPERM; 1425 1426 switch (cmd) { 1427 case NVM_INFO: 1428 return nvm_ioctl_info(file, argp); 1429 case NVM_GET_DEVICES: 1430 return nvm_ioctl_get_devices(file, argp); 1431 case NVM_DEV_CREATE: 1432 return nvm_ioctl_dev_create(file, argp); 1433 case NVM_DEV_REMOVE: 1434 return nvm_ioctl_dev_remove(file, argp); 1435 case NVM_DEV_INIT: 1436 return nvm_ioctl_dev_init(file, argp); 1437 case NVM_DEV_FACTORY: 1438 return nvm_ioctl_dev_factory(file, argp); 1439 } 1440 return 0; 1441} 1442 1443static const struct file_operations _ctl_fops = { 1444 .open = nonseekable_open, 1445 .unlocked_ioctl = nvm_ctl_ioctl, 1446 .owner = THIS_MODULE, 1447 .llseek = noop_llseek, 1448}; 1449 1450static struct miscdevice _nvm_misc = { 1451 .minor = MISC_DYNAMIC_MINOR, 1452 .name = "lightnvm", 1453 .nodename = "lightnvm/control", 1454 .fops = &_ctl_fops, 1455}; 1456builtin_misc_device(_nvm_misc);