Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.16-rc1 1259 lines 28 kB view raw
1/* 2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved. 3 * Initial release: Matias Bjorling <m@bjorling.me> 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 7 * 2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; see the file COPYING. If not, write to 16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 17 * USA. 18 * 19 */ 20 21#include <linux/list.h> 22#include <linux/types.h> 23#include <linux/sem.h> 24#include <linux/bitmap.h> 25#include <linux/module.h> 26#include <linux/moduleparam.h> 27#include <linux/miscdevice.h> 28#include <linux/lightnvm.h> 29#include <linux/sched/sysctl.h> 30 31static LIST_HEAD(nvm_tgt_types); 32static DECLARE_RWSEM(nvm_tgtt_lock); 33static LIST_HEAD(nvm_devices); 34static DECLARE_RWSEM(nvm_lock); 35 36/* Map between virtual and physical channel and lun */ 37struct nvm_ch_map { 38 int ch_off; 39 int nr_luns; 40 int *lun_offs; 41}; 42 43struct nvm_dev_map { 44 struct nvm_ch_map *chnls; 45 int nr_chnls; 46}; 47 48static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name) 49{ 50 struct nvm_target *tgt; 51 52 list_for_each_entry(tgt, &dev->targets, list) 53 if (!strcmp(name, tgt->disk->disk_name)) 54 return tgt; 55 56 return NULL; 57} 58 59static bool nvm_target_exists(const char *name) 60{ 61 struct nvm_dev *dev; 62 struct nvm_target *tgt; 63 bool ret = false; 64 65 down_write(&nvm_lock); 66 list_for_each_entry(dev, &nvm_devices, devices) { 67 mutex_lock(&dev->mlock); 68 list_for_each_entry(tgt, &dev->targets, list) { 69 if (!strcmp(name, tgt->disk->disk_name)) { 70 ret = true; 71 mutex_unlock(&dev->mlock); 72 goto out; 73 } 74 } 75 mutex_unlock(&dev->mlock); 76 } 77 78out: 79 up_write(&nvm_lock); 80 return ret; 81} 82 83static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end) 84{ 85 int i; 86 87 for (i = lun_begin; i <= lun_end; i++) { 88 if (test_and_set_bit(i, dev->lun_map)) { 89 pr_err("nvm: lun %d already allocated\n", i); 90 goto err; 91 } 92 } 93 94 return 0; 95err: 96 while (--i >= lun_begin) 97 clear_bit(i, dev->lun_map); 98 99 return -EBUSY; 100} 101 102static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin, 103 int lun_end) 104{ 105 int i; 106 107 for (i = lun_begin; i <= lun_end; i++) 108 WARN_ON(!test_and_clear_bit(i, dev->lun_map)); 109} 110 111static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear) 112{ 113 struct nvm_dev *dev = tgt_dev->parent; 114 struct nvm_dev_map *dev_map = tgt_dev->map; 115 int i, j; 116 117 for (i = 0; i < dev_map->nr_chnls; i++) { 118 struct nvm_ch_map *ch_map = &dev_map->chnls[i]; 119 int *lun_offs = ch_map->lun_offs; 120 int ch = i + ch_map->ch_off; 121 122 if (clear) { 123 for (j = 0; j < ch_map->nr_luns; j++) { 124 int lun = j + lun_offs[j]; 125 int lunid = (ch * dev->geo.nr_luns) + lun; 126 127 WARN_ON(!test_and_clear_bit(lunid, 128 dev->lun_map)); 129 } 130 } 131 132 kfree(ch_map->lun_offs); 133 } 134 135 kfree(dev_map->chnls); 136 kfree(dev_map); 137 138 kfree(tgt_dev->luns); 139 kfree(tgt_dev); 140} 141 142static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, 143 u16 lun_begin, u16 lun_end, 144 u16 op) 145{ 146 struct nvm_tgt_dev *tgt_dev = NULL; 147 struct nvm_dev_map *dev_rmap = dev->rmap; 148 struct nvm_dev_map *dev_map; 149 struct ppa_addr *luns; 150 int nr_luns = lun_end - lun_begin + 1; 151 int luns_left = nr_luns; 152 int nr_chnls = nr_luns / dev->geo.nr_luns; 153 int nr_chnls_mod = nr_luns % dev->geo.nr_luns; 154 int bch = lun_begin / dev->geo.nr_luns; 155 int blun = lun_begin % dev->geo.nr_luns; 156 int lunid = 0; 157 int lun_balanced = 1; 158 int prev_nr_luns; 159 int i, j; 160 161 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1; 162 163 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); 164 if (!dev_map) 165 goto err_dev; 166 167 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map), 168 GFP_KERNEL); 169 if (!dev_map->chnls) 170 goto err_chnls; 171 172 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL); 173 if (!luns) 174 goto err_luns; 175 176 prev_nr_luns = (luns_left > dev->geo.nr_luns) ? 177 dev->geo.nr_luns : luns_left; 178 for (i = 0; i < nr_chnls; i++) { 179 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch]; 180 int *lun_roffs = ch_rmap->lun_offs; 181 struct nvm_ch_map *ch_map = &dev_map->chnls[i]; 182 int *lun_offs; 183 int luns_in_chnl = (luns_left > dev->geo.nr_luns) ? 184 dev->geo.nr_luns : luns_left; 185 186 if (lun_balanced && prev_nr_luns != luns_in_chnl) 187 lun_balanced = 0; 188 189 ch_map->ch_off = ch_rmap->ch_off = bch; 190 ch_map->nr_luns = luns_in_chnl; 191 192 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); 193 if (!lun_offs) 194 goto err_ch; 195 196 for (j = 0; j < luns_in_chnl; j++) { 197 luns[lunid].ppa = 0; 198 luns[lunid].g.ch = i; 199 luns[lunid++].g.lun = j; 200 201 lun_offs[j] = blun; 202 lun_roffs[j + blun] = blun; 203 } 204 205 ch_map->lun_offs = lun_offs; 206 207 /* when starting a new channel, lun offset is reset */ 208 blun = 0; 209 luns_left -= luns_in_chnl; 210 } 211 212 dev_map->nr_chnls = nr_chnls; 213 214 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL); 215 if (!tgt_dev) 216 goto err_ch; 217 218 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); 219 /* Target device only owns a portion of the physical device */ 220 tgt_dev->geo.nr_chnls = nr_chnls; 221 tgt_dev->geo.all_luns = nr_luns; 222 tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1; 223 tgt_dev->geo.op = op; 224 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun; 225 tgt_dev->q = dev->q; 226 tgt_dev->map = dev_map; 227 tgt_dev->luns = luns; 228 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id)); 229 230 tgt_dev->parent = dev; 231 232 return tgt_dev; 233err_ch: 234 while (--i >= 0) 235 kfree(dev_map->chnls[i].lun_offs); 236 kfree(luns); 237err_luns: 238 kfree(dev_map->chnls); 239err_chnls: 240 kfree(dev_map); 241err_dev: 242 return tgt_dev; 243} 244 245static const struct block_device_operations nvm_fops = { 246 .owner = THIS_MODULE, 247}; 248 249static struct nvm_tgt_type *__nvm_find_target_type(const char *name) 250{ 251 struct nvm_tgt_type *tt; 252 253 list_for_each_entry(tt, &nvm_tgt_types, list) 254 if (!strcmp(name, tt->name)) 255 return tt; 256 257 return NULL; 258} 259 260static struct nvm_tgt_type *nvm_find_target_type(const char *name) 261{ 262 struct nvm_tgt_type *tt; 263 264 down_write(&nvm_tgtt_lock); 265 tt = __nvm_find_target_type(name); 266 up_write(&nvm_tgtt_lock); 267 268 return tt; 269} 270 271static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin, 272 int lun_end) 273{ 274 if (lun_begin > lun_end || lun_end >= geo->all_luns) { 275 pr_err("nvm: lun out of bound (%u:%u > %u)\n", 276 lun_begin, lun_end, geo->all_luns - 1); 277 return -EINVAL; 278 } 279 280 return 0; 281} 282 283static int __nvm_config_simple(struct nvm_dev *dev, 284 struct nvm_ioctl_create_simple *s) 285{ 286 struct nvm_geo *geo = &dev->geo; 287 288 if (s->lun_begin == -1 && s->lun_end == -1) { 289 s->lun_begin = 0; 290 s->lun_end = geo->all_luns - 1; 291 } 292 293 return nvm_config_check_luns(geo, s->lun_begin, s->lun_end); 294} 295 296static int __nvm_config_extended(struct nvm_dev *dev, 297 struct nvm_ioctl_create_extended *e) 298{ 299 struct nvm_geo *geo = &dev->geo; 300 301 if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) { 302 e->lun_begin = 0; 303 e->lun_end = dev->geo.all_luns - 1; 304 } 305 306 /* op not set falls into target's default */ 307 if (e->op == 0xFFFF) 308 e->op = NVM_TARGET_DEFAULT_OP; 309 310 if (e->op < NVM_TARGET_MIN_OP || 311 e->op > NVM_TARGET_MAX_OP) { 312 pr_err("nvm: invalid over provisioning value\n"); 313 return -EINVAL; 314 } 315 316 return nvm_config_check_luns(geo, e->lun_begin, e->lun_end); 317} 318 319static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) 320{ 321 struct nvm_ioctl_create_extended e; 322 struct request_queue *tqueue; 323 struct gendisk *tdisk; 324 struct nvm_tgt_type *tt; 325 struct nvm_target *t; 326 struct nvm_tgt_dev *tgt_dev; 327 void *targetdata; 328 int ret; 329 330 switch (create->conf.type) { 331 case NVM_CONFIG_TYPE_SIMPLE: 332 ret = __nvm_config_simple(dev, &create->conf.s); 333 if (ret) 334 return ret; 335 336 e.lun_begin = create->conf.s.lun_begin; 337 e.lun_end = create->conf.s.lun_end; 338 e.op = NVM_TARGET_DEFAULT_OP; 339 break; 340 case NVM_CONFIG_TYPE_EXTENDED: 341 ret = __nvm_config_extended(dev, &create->conf.e); 342 if (ret) 343 return ret; 344 345 e = create->conf.e; 346 break; 347 default: 348 pr_err("nvm: config type not valid\n"); 349 return -EINVAL; 350 } 351 352 tt = nvm_find_target_type(create->tgttype); 353 if (!tt) { 354 pr_err("nvm: target type %s not found\n", create->tgttype); 355 return -EINVAL; 356 } 357 358 if (nvm_target_exists(create->tgtname)) { 359 pr_err("nvm: target name already exists (%s)\n", 360 create->tgtname); 361 return -EINVAL; 362 } 363 364 ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end); 365 if (ret) 366 return ret; 367 368 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL); 369 if (!t) { 370 ret = -ENOMEM; 371 goto err_reserve; 372 } 373 374 tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op); 375 if (!tgt_dev) { 376 pr_err("nvm: could not create target device\n"); 377 ret = -ENOMEM; 378 goto err_t; 379 } 380 381 tdisk = alloc_disk(0); 382 if (!tdisk) { 383 ret = -ENOMEM; 384 goto err_dev; 385 } 386 387 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); 388 if (!tqueue) { 389 ret = -ENOMEM; 390 goto err_disk; 391 } 392 blk_queue_make_request(tqueue, tt->make_rq); 393 394 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name)); 395 tdisk->flags = GENHD_FL_EXT_DEVT; 396 tdisk->major = 0; 397 tdisk->first_minor = 0; 398 tdisk->fops = &nvm_fops; 399 tdisk->queue = tqueue; 400 401 targetdata = tt->init(tgt_dev, tdisk, create->flags); 402 if (IS_ERR(targetdata)) { 403 ret = PTR_ERR(targetdata); 404 goto err_init; 405 } 406 407 tdisk->private_data = targetdata; 408 tqueue->queuedata = targetdata; 409 410 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); 411 412 set_capacity(tdisk, tt->capacity(targetdata)); 413 add_disk(tdisk); 414 415 if (tt->sysfs_init && tt->sysfs_init(tdisk)) { 416 ret = -ENOMEM; 417 goto err_sysfs; 418 } 419 420 t->type = tt; 421 t->disk = tdisk; 422 t->dev = tgt_dev; 423 424 mutex_lock(&dev->mlock); 425 list_add_tail(&t->list, &dev->targets); 426 mutex_unlock(&dev->mlock); 427 428 __module_get(tt->owner); 429 430 return 0; 431err_sysfs: 432 if (tt->exit) 433 tt->exit(targetdata); 434err_init: 435 blk_cleanup_queue(tqueue); 436 tdisk->queue = NULL; 437err_disk: 438 put_disk(tdisk); 439err_dev: 440 nvm_remove_tgt_dev(tgt_dev, 0); 441err_t: 442 kfree(t); 443err_reserve: 444 nvm_release_luns_err(dev, e.lun_begin, e.lun_end); 445 return ret; 446} 447 448static void __nvm_remove_target(struct nvm_target *t) 449{ 450 struct nvm_tgt_type *tt = t->type; 451 struct gendisk *tdisk = t->disk; 452 struct request_queue *q = tdisk->queue; 453 454 del_gendisk(tdisk); 455 blk_cleanup_queue(q); 456 457 if (tt->sysfs_exit) 458 tt->sysfs_exit(tdisk); 459 460 if (tt->exit) 461 tt->exit(tdisk->private_data); 462 463 nvm_remove_tgt_dev(t->dev, 1); 464 put_disk(tdisk); 465 module_put(t->type->owner); 466 467 list_del(&t->list); 468 kfree(t); 469} 470 471/** 472 * nvm_remove_tgt - Removes a target from the media manager 473 * @dev: device 474 * @remove: ioctl structure with target name to remove. 475 * 476 * Returns: 477 * 0: on success 478 * 1: on not found 479 * <0: on error 480 */ 481static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove) 482{ 483 struct nvm_target *t; 484 485 mutex_lock(&dev->mlock); 486 t = nvm_find_target(dev, remove->tgtname); 487 if (!t) { 488 mutex_unlock(&dev->mlock); 489 return 1; 490 } 491 __nvm_remove_target(t); 492 mutex_unlock(&dev->mlock); 493 494 return 0; 495} 496 497static int nvm_register_map(struct nvm_dev *dev) 498{ 499 struct nvm_dev_map *rmap; 500 int i, j; 501 502 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); 503 if (!rmap) 504 goto err_rmap; 505 506 rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map), 507 GFP_KERNEL); 508 if (!rmap->chnls) 509 goto err_chnls; 510 511 for (i = 0; i < dev->geo.nr_chnls; i++) { 512 struct nvm_ch_map *ch_rmap; 513 int *lun_roffs; 514 int luns_in_chnl = dev->geo.nr_luns; 515 516 ch_rmap = &rmap->chnls[i]; 517 518 ch_rmap->ch_off = -1; 519 ch_rmap->nr_luns = luns_in_chnl; 520 521 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); 522 if (!lun_roffs) 523 goto err_ch; 524 525 for (j = 0; j < luns_in_chnl; j++) 526 lun_roffs[j] = -1; 527 528 ch_rmap->lun_offs = lun_roffs; 529 } 530 531 dev->rmap = rmap; 532 533 return 0; 534err_ch: 535 while (--i >= 0) 536 kfree(rmap->chnls[i].lun_offs); 537err_chnls: 538 kfree(rmap); 539err_rmap: 540 return -ENOMEM; 541} 542 543static void nvm_unregister_map(struct nvm_dev *dev) 544{ 545 struct nvm_dev_map *rmap = dev->rmap; 546 int i; 547 548 for (i = 0; i < dev->geo.nr_chnls; i++) 549 kfree(rmap->chnls[i].lun_offs); 550 551 kfree(rmap->chnls); 552 kfree(rmap); 553} 554 555static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) 556{ 557 struct nvm_dev_map *dev_map = tgt_dev->map; 558 struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch]; 559 int lun_off = ch_map->lun_offs[p->g.lun]; 560 561 p->g.ch += ch_map->ch_off; 562 p->g.lun += lun_off; 563} 564 565static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) 566{ 567 struct nvm_dev *dev = tgt_dev->parent; 568 struct nvm_dev_map *dev_rmap = dev->rmap; 569 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch]; 570 int lun_roff = ch_rmap->lun_offs[p->g.lun]; 571 572 p->g.ch -= ch_rmap->ch_off; 573 p->g.lun -= lun_roff; 574} 575 576static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, 577 struct ppa_addr *ppa_list, int nr_ppas) 578{ 579 int i; 580 581 for (i = 0; i < nr_ppas; i++) { 582 nvm_map_to_dev(tgt_dev, &ppa_list[i]); 583 ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]); 584 } 585} 586 587static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, 588 struct ppa_addr *ppa_list, int nr_ppas) 589{ 590 int i; 591 592 for (i = 0; i < nr_ppas; i++) { 593 ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]); 594 nvm_map_to_tgt(tgt_dev, &ppa_list[i]); 595 } 596} 597 598static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 599{ 600 if (rqd->nr_ppas == 1) { 601 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1); 602 return; 603 } 604 605 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas); 606} 607 608static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 609{ 610 if (rqd->nr_ppas == 1) { 611 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1); 612 return; 613 } 614 615 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas); 616} 617 618int nvm_register_tgt_type(struct nvm_tgt_type *tt) 619{ 620 int ret = 0; 621 622 down_write(&nvm_tgtt_lock); 623 if (__nvm_find_target_type(tt->name)) 624 ret = -EEXIST; 625 else 626 list_add(&tt->list, &nvm_tgt_types); 627 up_write(&nvm_tgtt_lock); 628 629 return ret; 630} 631EXPORT_SYMBOL(nvm_register_tgt_type); 632 633void nvm_unregister_tgt_type(struct nvm_tgt_type *tt) 634{ 635 if (!tt) 636 return; 637 638 down_write(&nvm_tgtt_lock); 639 list_del(&tt->list); 640 up_write(&nvm_tgtt_lock); 641} 642EXPORT_SYMBOL(nvm_unregister_tgt_type); 643 644void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, 645 dma_addr_t *dma_handler) 646{ 647 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags, 648 dma_handler); 649} 650EXPORT_SYMBOL(nvm_dev_dma_alloc); 651 652void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler) 653{ 654 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler); 655} 656EXPORT_SYMBOL(nvm_dev_dma_free); 657 658static struct nvm_dev *nvm_find_nvm_dev(const char *name) 659{ 660 struct nvm_dev *dev; 661 662 list_for_each_entry(dev, &nvm_devices, devices) 663 if (!strcmp(name, dev->name)) 664 return dev; 665 666 return NULL; 667} 668 669static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, 670 const struct ppa_addr *ppas, int nr_ppas) 671{ 672 struct nvm_dev *dev = tgt_dev->parent; 673 struct nvm_geo *geo = &tgt_dev->geo; 674 int i, plane_cnt, pl_idx; 675 struct ppa_addr ppa; 676 677 if (geo->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) { 678 rqd->nr_ppas = nr_ppas; 679 rqd->ppa_addr = ppas[0]; 680 681 return 0; 682 } 683 684 rqd->nr_ppas = nr_ppas; 685 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); 686 if (!rqd->ppa_list) { 687 pr_err("nvm: failed to allocate dma memory\n"); 688 return -ENOMEM; 689 } 690 691 plane_cnt = geo->plane_mode; 692 rqd->nr_ppas *= plane_cnt; 693 694 for (i = 0; i < nr_ppas; i++) { 695 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { 696 ppa = ppas[i]; 697 ppa.g.pl = pl_idx; 698 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa; 699 } 700 } 701 702 return 0; 703} 704 705static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, 706 struct nvm_rq *rqd) 707{ 708 if (!rqd->ppa_list) 709 return; 710 711 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); 712} 713 714 715int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, 716 int nr_ppas, int type) 717{ 718 struct nvm_dev *dev = tgt_dev->parent; 719 struct nvm_rq rqd; 720 int ret; 721 722 if (nr_ppas > dev->ops->max_phys_sect) { 723 pr_err("nvm: unable to update all blocks atomically\n"); 724 return -EINVAL; 725 } 726 727 memset(&rqd, 0, sizeof(struct nvm_rq)); 728 729 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas); 730 nvm_rq_tgt_to_dev(tgt_dev, &rqd); 731 732 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); 733 nvm_free_rqd_ppalist(tgt_dev, &rqd); 734 if (ret) { 735 pr_err("nvm: failed bb mark\n"); 736 return -EINVAL; 737 } 738 739 return 0; 740} 741EXPORT_SYMBOL(nvm_set_tgt_bb_tbl); 742 743int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev) 744{ 745 struct nvm_dev *dev = tgt_dev->parent; 746 747 return dev->ops->max_phys_sect; 748} 749EXPORT_SYMBOL(nvm_max_phys_sects); 750 751int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 752{ 753 struct nvm_dev *dev = tgt_dev->parent; 754 int ret; 755 756 if (!dev->ops->submit_io) 757 return -ENODEV; 758 759 nvm_rq_tgt_to_dev(tgt_dev, rqd); 760 761 rqd->dev = tgt_dev; 762 763 /* In case of error, fail with right address format */ 764 ret = dev->ops->submit_io(dev, rqd); 765 if (ret) 766 nvm_rq_dev_to_tgt(tgt_dev, rqd); 767 return ret; 768} 769EXPORT_SYMBOL(nvm_submit_io); 770 771int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 772{ 773 struct nvm_dev *dev = tgt_dev->parent; 774 int ret; 775 776 if (!dev->ops->submit_io_sync) 777 return -ENODEV; 778 779 nvm_rq_tgt_to_dev(tgt_dev, rqd); 780 781 rqd->dev = tgt_dev; 782 783 /* In case of error, fail with right address format */ 784 ret = dev->ops->submit_io_sync(dev, rqd); 785 nvm_rq_dev_to_tgt(tgt_dev, rqd); 786 787 return ret; 788} 789EXPORT_SYMBOL(nvm_submit_io_sync); 790 791void nvm_end_io(struct nvm_rq *rqd) 792{ 793 struct nvm_tgt_dev *tgt_dev = rqd->dev; 794 795 /* Convert address space */ 796 if (tgt_dev) 797 nvm_rq_dev_to_tgt(tgt_dev, rqd); 798 799 if (rqd->end_io) 800 rqd->end_io(rqd); 801} 802EXPORT_SYMBOL(nvm_end_io); 803 804/* 805 * folds a bad block list from its plane representation to its virtual 806 * block representation. The fold is done in place and reduced size is 807 * returned. 808 * 809 * If any of the planes status are bad or grown bad block, the virtual block 810 * is marked bad. If not bad, the first plane state acts as the block state. 811 */ 812int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) 813{ 814 struct nvm_geo *geo = &dev->geo; 815 int blk, offset, pl, blktype; 816 817 if (nr_blks != geo->nr_chks * geo->plane_mode) 818 return -EINVAL; 819 820 for (blk = 0; blk < geo->nr_chks; blk++) { 821 offset = blk * geo->plane_mode; 822 blktype = blks[offset]; 823 824 /* Bad blocks on any planes take precedence over other types */ 825 for (pl = 0; pl < geo->plane_mode; pl++) { 826 if (blks[offset + pl] & 827 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { 828 blktype = blks[offset + pl]; 829 break; 830 } 831 } 832 833 blks[blk] = blktype; 834 } 835 836 return geo->nr_chks; 837} 838EXPORT_SYMBOL(nvm_bb_tbl_fold); 839 840int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, 841 u8 *blks) 842{ 843 struct nvm_dev *dev = tgt_dev->parent; 844 845 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); 846 847 return dev->ops->get_bb_tbl(dev, ppa, blks); 848} 849EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); 850 851static int nvm_core_init(struct nvm_dev *dev) 852{ 853 struct nvm_id *id = &dev->identity; 854 struct nvm_id_group *grp = &id->grp; 855 struct nvm_geo *geo = &dev->geo; 856 int ret; 857 858 memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); 859 860 if (grp->mtype != 0) { 861 pr_err("nvm: memory type not supported\n"); 862 return -EINVAL; 863 } 864 865 /* Whole device values */ 866 geo->nr_chnls = grp->num_ch; 867 geo->nr_luns = grp->num_lun; 868 869 /* Generic device geometry values */ 870 geo->ws_min = grp->ws_min; 871 geo->ws_opt = grp->ws_opt; 872 geo->ws_seq = grp->ws_seq; 873 geo->ws_per_chk = grp->ws_per_chk; 874 geo->nr_chks = grp->num_chk; 875 geo->sec_size = grp->csecs; 876 geo->oob_size = grp->sos; 877 geo->mccap = grp->mccap; 878 geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size; 879 880 geo->sec_per_chk = grp->clba; 881 geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks; 882 geo->all_luns = geo->nr_luns * geo->nr_chnls; 883 884 /* 1.2 spec device geometry values */ 885 geo->plane_mode = 1 << geo->ws_seq; 886 geo->nr_planes = geo->ws_opt / geo->ws_min; 887 geo->sec_per_pg = geo->ws_min; 888 geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes; 889 890 dev->total_secs = geo->all_luns * geo->sec_per_lun; 891 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns), 892 sizeof(unsigned long), GFP_KERNEL); 893 if (!dev->lun_map) 894 return -ENOMEM; 895 896 INIT_LIST_HEAD(&dev->area_list); 897 INIT_LIST_HEAD(&dev->targets); 898 mutex_init(&dev->mlock); 899 spin_lock_init(&dev->lock); 900 901 ret = nvm_register_map(dev); 902 if (ret) 903 goto err_fmtype; 904 905 blk_queue_logical_block_size(dev->q, geo->sec_size); 906 return 0; 907err_fmtype: 908 kfree(dev->lun_map); 909 return ret; 910} 911 912static void nvm_free(struct nvm_dev *dev) 913{ 914 if (!dev) 915 return; 916 917 if (dev->dma_pool) 918 dev->ops->destroy_dma_pool(dev->dma_pool); 919 920 nvm_unregister_map(dev); 921 kfree(dev->lun_map); 922 kfree(dev); 923} 924 925static int nvm_init(struct nvm_dev *dev) 926{ 927 struct nvm_geo *geo = &dev->geo; 928 int ret = -EINVAL; 929 930 if (dev->ops->identity(dev, &dev->identity)) { 931 pr_err("nvm: device could not be identified\n"); 932 goto err; 933 } 934 935 pr_debug("nvm: ver:%x nvm_vendor:%x\n", 936 dev->identity.ver_id, dev->identity.vmnt); 937 938 if (dev->identity.ver_id != 1) { 939 pr_err("nvm: device not supported by kernel."); 940 goto err; 941 } 942 943 ret = nvm_core_init(dev); 944 if (ret) { 945 pr_err("nvm: could not initialize core structures.\n"); 946 goto err; 947 } 948 949 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", 950 dev->name, geo->sec_per_pg, geo->nr_planes, 951 geo->ws_per_chk, geo->nr_chks, 952 geo->all_luns, geo->nr_chnls); 953 return 0; 954err: 955 pr_err("nvm: failed to initialize nvm\n"); 956 return ret; 957} 958 959struct nvm_dev *nvm_alloc_dev(int node) 960{ 961 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node); 962} 963EXPORT_SYMBOL(nvm_alloc_dev); 964 965int nvm_register(struct nvm_dev *dev) 966{ 967 int ret; 968 969 if (!dev->q || !dev->ops) 970 return -EINVAL; 971 972 if (dev->ops->max_phys_sect > 256) { 973 pr_info("nvm: max sectors supported is 256.\n"); 974 return -EINVAL; 975 } 976 977 if (dev->ops->max_phys_sect > 1) { 978 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); 979 if (!dev->dma_pool) { 980 pr_err("nvm: could not create dma pool\n"); 981 return -ENOMEM; 982 } 983 } 984 985 ret = nvm_init(dev); 986 if (ret) 987 goto err_init; 988 989 /* register device with a supported media manager */ 990 down_write(&nvm_lock); 991 list_add(&dev->devices, &nvm_devices); 992 up_write(&nvm_lock); 993 994 return 0; 995err_init: 996 dev->ops->destroy_dma_pool(dev->dma_pool); 997 return ret; 998} 999EXPORT_SYMBOL(nvm_register); 1000 1001void nvm_unregister(struct nvm_dev *dev) 1002{ 1003 struct nvm_target *t, *tmp; 1004 1005 mutex_lock(&dev->mlock); 1006 list_for_each_entry_safe(t, tmp, &dev->targets, list) { 1007 if (t->dev->parent != dev) 1008 continue; 1009 __nvm_remove_target(t); 1010 } 1011 mutex_unlock(&dev->mlock); 1012 1013 down_write(&nvm_lock); 1014 list_del(&dev->devices); 1015 up_write(&nvm_lock); 1016 1017 nvm_free(dev); 1018} 1019EXPORT_SYMBOL(nvm_unregister); 1020 1021static int __nvm_configure_create(struct nvm_ioctl_create *create) 1022{ 1023 struct nvm_dev *dev; 1024 1025 down_write(&nvm_lock); 1026 dev = nvm_find_nvm_dev(create->dev); 1027 up_write(&nvm_lock); 1028 1029 if (!dev) { 1030 pr_err("nvm: device not found\n"); 1031 return -EINVAL; 1032 } 1033 1034 return nvm_create_tgt(dev, create); 1035} 1036 1037static long nvm_ioctl_info(struct file *file, void __user *arg) 1038{ 1039 struct nvm_ioctl_info *info; 1040 struct nvm_tgt_type *tt; 1041 int tgt_iter = 0; 1042 1043 if (!capable(CAP_SYS_ADMIN)) 1044 return -EPERM; 1045 1046 info = memdup_user(arg, sizeof(struct nvm_ioctl_info)); 1047 if (IS_ERR(info)) 1048 return -EFAULT; 1049 1050 info->version[0] = NVM_VERSION_MAJOR; 1051 info->version[1] = NVM_VERSION_MINOR; 1052 info->version[2] = NVM_VERSION_PATCH; 1053 1054 down_write(&nvm_tgtt_lock); 1055 list_for_each_entry(tt, &nvm_tgt_types, list) { 1056 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter]; 1057 1058 tgt->version[0] = tt->version[0]; 1059 tgt->version[1] = tt->version[1]; 1060 tgt->version[2] = tt->version[2]; 1061 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX); 1062 1063 tgt_iter++; 1064 } 1065 1066 info->tgtsize = tgt_iter; 1067 up_write(&nvm_tgtt_lock); 1068 1069 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) { 1070 kfree(info); 1071 return -EFAULT; 1072 } 1073 1074 kfree(info); 1075 return 0; 1076} 1077 1078static long nvm_ioctl_get_devices(struct file *file, void __user *arg) 1079{ 1080 struct nvm_ioctl_get_devices *devices; 1081 struct nvm_dev *dev; 1082 int i = 0; 1083 1084 if (!capable(CAP_SYS_ADMIN)) 1085 return -EPERM; 1086 1087 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL); 1088 if (!devices) 1089 return -ENOMEM; 1090 1091 down_write(&nvm_lock); 1092 list_for_each_entry(dev, &nvm_devices, devices) { 1093 struct nvm_ioctl_device_info *info = &devices->info[i]; 1094 1095 strlcpy(info->devname, dev->name, sizeof(info->devname)); 1096 1097 /* kept for compatibility */ 1098 info->bmversion[0] = 1; 1099 info->bmversion[1] = 0; 1100 info->bmversion[2] = 0; 1101 strlcpy(info->bmname, "gennvm", sizeof(info->bmname)); 1102 i++; 1103 1104 if (i > 31) { 1105 pr_err("nvm: max 31 devices can be reported.\n"); 1106 break; 1107 } 1108 } 1109 up_write(&nvm_lock); 1110 1111 devices->nr_devices = i; 1112 1113 if (copy_to_user(arg, devices, 1114 sizeof(struct nvm_ioctl_get_devices))) { 1115 kfree(devices); 1116 return -EFAULT; 1117 } 1118 1119 kfree(devices); 1120 return 0; 1121} 1122 1123static long nvm_ioctl_dev_create(struct file *file, void __user *arg) 1124{ 1125 struct nvm_ioctl_create create; 1126 1127 if (!capable(CAP_SYS_ADMIN)) 1128 return -EPERM; 1129 1130 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) 1131 return -EFAULT; 1132 1133 if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED && 1134 create.conf.e.rsv != 0) { 1135 pr_err("nvm: reserved config field in use\n"); 1136 return -EINVAL; 1137 } 1138 1139 create.dev[DISK_NAME_LEN - 1] = '\0'; 1140 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0'; 1141 create.tgtname[DISK_NAME_LEN - 1] = '\0'; 1142 1143 if (create.flags != 0) { 1144 __u32 flags = create.flags; 1145 1146 /* Check for valid flags */ 1147 if (flags & NVM_TARGET_FACTORY) 1148 flags &= ~NVM_TARGET_FACTORY; 1149 1150 if (flags) { 1151 pr_err("nvm: flag not supported\n"); 1152 return -EINVAL; 1153 } 1154 } 1155 1156 return __nvm_configure_create(&create); 1157} 1158 1159static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) 1160{ 1161 struct nvm_ioctl_remove remove; 1162 struct nvm_dev *dev; 1163 int ret = 0; 1164 1165 if (!capable(CAP_SYS_ADMIN)) 1166 return -EPERM; 1167 1168 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove))) 1169 return -EFAULT; 1170 1171 remove.tgtname[DISK_NAME_LEN - 1] = '\0'; 1172 1173 if (remove.flags != 0) { 1174 pr_err("nvm: no flags supported\n"); 1175 return -EINVAL; 1176 } 1177 1178 list_for_each_entry(dev, &nvm_devices, devices) { 1179 ret = nvm_remove_tgt(dev, &remove); 1180 if (!ret) 1181 break; 1182 } 1183 1184 return ret; 1185} 1186 1187/* kept for compatibility reasons */ 1188static long nvm_ioctl_dev_init(struct file *file, void __user *arg) 1189{ 1190 struct nvm_ioctl_dev_init init; 1191 1192 if (!capable(CAP_SYS_ADMIN)) 1193 return -EPERM; 1194 1195 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init))) 1196 return -EFAULT; 1197 1198 if (init.flags != 0) { 1199 pr_err("nvm: no flags supported\n"); 1200 return -EINVAL; 1201 } 1202 1203 return 0; 1204} 1205 1206/* Kept for compatibility reasons */ 1207static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) 1208{ 1209 struct nvm_ioctl_dev_factory fact; 1210 1211 if (!capable(CAP_SYS_ADMIN)) 1212 return -EPERM; 1213 1214 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory))) 1215 return -EFAULT; 1216 1217 fact.dev[DISK_NAME_LEN - 1] = '\0'; 1218 1219 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1)) 1220 return -EINVAL; 1221 1222 return 0; 1223} 1224 1225static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1226{ 1227 void __user *argp = (void __user *)arg; 1228 1229 switch (cmd) { 1230 case NVM_INFO: 1231 return nvm_ioctl_info(file, argp); 1232 case NVM_GET_DEVICES: 1233 return nvm_ioctl_get_devices(file, argp); 1234 case NVM_DEV_CREATE: 1235 return nvm_ioctl_dev_create(file, argp); 1236 case NVM_DEV_REMOVE: 1237 return nvm_ioctl_dev_remove(file, argp); 1238 case NVM_DEV_INIT: 1239 return nvm_ioctl_dev_init(file, argp); 1240 case NVM_DEV_FACTORY: 1241 return nvm_ioctl_dev_factory(file, argp); 1242 } 1243 return 0; 1244} 1245 1246static const struct file_operations _ctl_fops = { 1247 .open = nonseekable_open, 1248 .unlocked_ioctl = nvm_ctl_ioctl, 1249 .owner = THIS_MODULE, 1250 .llseek = noop_llseek, 1251}; 1252 1253static struct miscdevice _nvm_misc = { 1254 .minor = MISC_DYNAMIC_MINOR, 1255 .name = "lightnvm", 1256 .nodename = "lightnvm/control", 1257 .fops = &_ctl_fops, 1258}; 1259builtin_misc_device(_nvm_misc);