at v4.12-rc6 1372 lines 31 kB view raw
1/* 2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved. 3 * Initial release: Matias Bjorling <m@bjorling.me> 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 7 * 2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; see the file COPYING. If not, write to 16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 17 * USA. 18 * 19 */ 20 21#include <linux/list.h> 22#include <linux/types.h> 23#include <linux/sem.h> 24#include <linux/bitmap.h> 25#include <linux/moduleparam.h> 26#include <linux/miscdevice.h> 27#include <linux/lightnvm.h> 28#include <linux/sched/sysctl.h> 29 30static LIST_HEAD(nvm_tgt_types); 31static DECLARE_RWSEM(nvm_tgtt_lock); 32static LIST_HEAD(nvm_devices); 33static DECLARE_RWSEM(nvm_lock); 34 35/* Map between virtual and physical channel and lun */ 36struct nvm_ch_map { 37 int ch_off; 38 int nr_luns; 39 int *lun_offs; 40}; 41 42struct nvm_dev_map { 43 struct nvm_ch_map *chnls; 44 int nr_chnls; 45}; 46 47struct nvm_area { 48 struct list_head list; 49 sector_t begin; 50 sector_t end; /* end is excluded */ 51}; 52 53static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name) 54{ 55 struct nvm_target *tgt; 56 57 list_for_each_entry(tgt, &dev->targets, list) 58 if (!strcmp(name, tgt->disk->disk_name)) 59 return tgt; 60 61 return NULL; 62} 63 64static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end) 65{ 66 int i; 67 68 for (i = lun_begin; i <= lun_end; i++) { 69 if (test_and_set_bit(i, dev->lun_map)) { 70 pr_err("nvm: lun %d already allocated\n", i); 71 goto err; 72 } 73 } 74 75 return 0; 76err: 77 while (--i >= lun_begin) 78 clear_bit(i, dev->lun_map); 79 80 return -EBUSY; 81} 82 83static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin, 84 int lun_end) 85{ 86 int i; 87 88 for (i = lun_begin; i <= lun_end; i++) 89 WARN_ON(!test_and_clear_bit(i, dev->lun_map)); 90} 91 92static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear) 93{ 94 struct nvm_dev *dev = tgt_dev->parent; 95 struct nvm_dev_map *dev_map = tgt_dev->map; 96 int i, j; 97 98 for (i = 0; i < dev_map->nr_chnls; i++) { 99 struct nvm_ch_map *ch_map = &dev_map->chnls[i]; 100 int *lun_offs = ch_map->lun_offs; 101 int ch = i + ch_map->ch_off; 102 103 if (clear) { 104 for (j = 0; j < ch_map->nr_luns; j++) { 105 int lun = j + lun_offs[j]; 106 int lunid = (ch * dev->geo.luns_per_chnl) + lun; 107 108 WARN_ON(!test_and_clear_bit(lunid, 109 dev->lun_map)); 110 } 111 } 112 113 kfree(ch_map->lun_offs); 114 } 115 116 kfree(dev_map->chnls); 117 kfree(dev_map); 118 119 kfree(tgt_dev->luns); 120 kfree(tgt_dev); 121} 122 123static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, 124 int lun_begin, int lun_end) 125{ 126 struct nvm_tgt_dev *tgt_dev = NULL; 127 struct nvm_dev_map *dev_rmap = dev->rmap; 128 struct nvm_dev_map *dev_map; 129 struct ppa_addr *luns; 130 int nr_luns = lun_end - lun_begin + 1; 131 int luns_left = nr_luns; 132 int nr_chnls = nr_luns / dev->geo.luns_per_chnl; 133 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl; 134 int bch = lun_begin / dev->geo.luns_per_chnl; 135 int blun = lun_begin % dev->geo.luns_per_chnl; 136 int lunid = 0; 137 int lun_balanced = 1; 138 int prev_nr_luns; 139 int i, j; 140 141 nr_chnls = nr_luns / dev->geo.luns_per_chnl; 142 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1; 143 144 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); 145 if (!dev_map) 146 goto err_dev; 147 148 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map), 149 GFP_KERNEL); 150 if (!dev_map->chnls) 151 goto err_chnls; 152 153 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL); 154 if (!luns) 155 goto err_luns; 156 157 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ? 158 dev->geo.luns_per_chnl : luns_left; 159 for (i = 0; i < nr_chnls; i++) { 160 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch]; 161 int *lun_roffs = ch_rmap->lun_offs; 162 struct nvm_ch_map *ch_map = &dev_map->chnls[i]; 163 int *lun_offs; 164 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ? 165 dev->geo.luns_per_chnl : luns_left; 166 167 if (lun_balanced && prev_nr_luns != luns_in_chnl) 168 lun_balanced = 0; 169 170 ch_map->ch_off = ch_rmap->ch_off = bch; 171 ch_map->nr_luns = luns_in_chnl; 172 173 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); 174 if (!lun_offs) 175 goto err_ch; 176 177 for (j = 0; j < luns_in_chnl; j++) { 178 luns[lunid].ppa = 0; 179 luns[lunid].g.ch = i; 180 luns[lunid++].g.lun = j; 181 182 lun_offs[j] = blun; 183 lun_roffs[j + blun] = blun; 184 } 185 186 ch_map->lun_offs = lun_offs; 187 188 /* when starting a new channel, lun offset is reset */ 189 blun = 0; 190 luns_left -= luns_in_chnl; 191 } 192 193 dev_map->nr_chnls = nr_chnls; 194 195 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL); 196 if (!tgt_dev) 197 goto err_ch; 198 199 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); 200 /* Target device only owns a portion of the physical device */ 201 tgt_dev->geo.nr_chnls = nr_chnls; 202 tgt_dev->geo.nr_luns = nr_luns; 203 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1; 204 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun; 205 tgt_dev->q = dev->q; 206 tgt_dev->map = dev_map; 207 tgt_dev->luns = luns; 208 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id)); 209 210 tgt_dev->parent = dev; 211 212 return tgt_dev; 213err_ch: 214 while (--i >= 0) 215 kfree(dev_map->chnls[i].lun_offs); 216 kfree(luns); 217err_luns: 218 kfree(dev_map->chnls); 219err_chnls: 220 kfree(dev_map); 221err_dev: 222 return tgt_dev; 223} 224 225static const struct block_device_operations nvm_fops = { 226 .owner = THIS_MODULE, 227}; 228 229static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) 230{ 231 struct nvm_ioctl_create_simple *s = &create->conf.s; 232 struct request_queue *tqueue; 233 struct gendisk *tdisk; 234 struct nvm_tgt_type *tt; 235 struct nvm_target *t; 236 struct nvm_tgt_dev *tgt_dev; 237 void *targetdata; 238 int ret; 239 240 tt = nvm_find_target_type(create->tgttype, 1); 241 if (!tt) { 242 pr_err("nvm: target type %s not found\n", create->tgttype); 243 return -EINVAL; 244 } 245 246 mutex_lock(&dev->mlock); 247 t = nvm_find_target(dev, create->tgtname); 248 if (t) { 249 pr_err("nvm: target name already exists.\n"); 250 mutex_unlock(&dev->mlock); 251 return -EINVAL; 252 } 253 mutex_unlock(&dev->mlock); 254 255 if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end)) 256 return -ENOMEM; 257 258 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL); 259 if (!t) { 260 ret = -ENOMEM; 261 goto err_reserve; 262 } 263 264 tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end); 265 if (!tgt_dev) { 266 pr_err("nvm: could not create target device\n"); 267 ret = -ENOMEM; 268 goto err_t; 269 } 270 271 tdisk = alloc_disk(0); 272 if (!tdisk) { 273 ret = -ENOMEM; 274 goto err_dev; 275 } 276 277 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); 278 if (!tqueue) { 279 ret = -ENOMEM; 280 goto err_disk; 281 } 282 blk_queue_make_request(tqueue, tt->make_rq); 283 284 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name)); 285 tdisk->flags = GENHD_FL_EXT_DEVT; 286 tdisk->major = 0; 287 tdisk->first_minor = 0; 288 tdisk->fops = &nvm_fops; 289 tdisk->queue = tqueue; 290 291 targetdata = tt->init(tgt_dev, tdisk, create->flags); 292 if (IS_ERR(targetdata)) { 293 ret = PTR_ERR(targetdata); 294 goto err_init; 295 } 296 297 tdisk->private_data = targetdata; 298 tqueue->queuedata = targetdata; 299 300 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); 301 302 set_capacity(tdisk, tt->capacity(targetdata)); 303 add_disk(tdisk); 304 305 if (tt->sysfs_init && tt->sysfs_init(tdisk)) { 306 ret = -ENOMEM; 307 goto err_sysfs; 308 } 309 310 t->type = tt; 311 t->disk = tdisk; 312 t->dev = tgt_dev; 313 314 mutex_lock(&dev->mlock); 315 list_add_tail(&t->list, &dev->targets); 316 mutex_unlock(&dev->mlock); 317 318 return 0; 319err_sysfs: 320 if (tt->exit) 321 tt->exit(targetdata); 322err_init: 323 blk_cleanup_queue(tqueue); 324 tdisk->queue = NULL; 325err_disk: 326 put_disk(tdisk); 327err_dev: 328 nvm_remove_tgt_dev(tgt_dev, 0); 329err_t: 330 kfree(t); 331err_reserve: 332 nvm_release_luns_err(dev, s->lun_begin, s->lun_end); 333 return ret; 334} 335 336static void __nvm_remove_target(struct nvm_target *t) 337{ 338 struct nvm_tgt_type *tt = t->type; 339 struct gendisk *tdisk = t->disk; 340 struct request_queue *q = tdisk->queue; 341 342 del_gendisk(tdisk); 343 blk_cleanup_queue(q); 344 345 if (tt->sysfs_exit) 346 tt->sysfs_exit(tdisk); 347 348 if (tt->exit) 349 tt->exit(tdisk->private_data); 350 351 nvm_remove_tgt_dev(t->dev, 1); 352 put_disk(tdisk); 353 354 list_del(&t->list); 355 kfree(t); 356} 357 358/** 359 * nvm_remove_tgt - Removes a target from the media manager 360 * @dev: device 361 * @remove: ioctl structure with target name to remove. 362 * 363 * Returns: 364 * 0: on success 365 * 1: on not found 366 * <0: on error 367 */ 368static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove) 369{ 370 struct nvm_target *t; 371 372 mutex_lock(&dev->mlock); 373 t = nvm_find_target(dev, remove->tgtname); 374 if (!t) { 375 mutex_unlock(&dev->mlock); 376 return 1; 377 } 378 __nvm_remove_target(t); 379 mutex_unlock(&dev->mlock); 380 381 return 0; 382} 383 384static int nvm_register_map(struct nvm_dev *dev) 385{ 386 struct nvm_dev_map *rmap; 387 int i, j; 388 389 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); 390 if (!rmap) 391 goto err_rmap; 392 393 rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map), 394 GFP_KERNEL); 395 if (!rmap->chnls) 396 goto err_chnls; 397 398 for (i = 0; i < dev->geo.nr_chnls; i++) { 399 struct nvm_ch_map *ch_rmap; 400 int *lun_roffs; 401 int luns_in_chnl = dev->geo.luns_per_chnl; 402 403 ch_rmap = &rmap->chnls[i]; 404 405 ch_rmap->ch_off = -1; 406 ch_rmap->nr_luns = luns_in_chnl; 407 408 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); 409 if (!lun_roffs) 410 goto err_ch; 411 412 for (j = 0; j < luns_in_chnl; j++) 413 lun_roffs[j] = -1; 414 415 ch_rmap->lun_offs = lun_roffs; 416 } 417 418 dev->rmap = rmap; 419 420 return 0; 421err_ch: 422 while (--i >= 0) 423 kfree(rmap->chnls[i].lun_offs); 424err_chnls: 425 kfree(rmap); 426err_rmap: 427 return -ENOMEM; 428} 429 430static void nvm_unregister_map(struct nvm_dev *dev) 431{ 432 struct nvm_dev_map *rmap = dev->rmap; 433 int i; 434 435 for (i = 0; i < dev->geo.nr_chnls; i++) 436 kfree(rmap->chnls[i].lun_offs); 437 438 kfree(rmap->chnls); 439 kfree(rmap); 440} 441 442static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) 443{ 444 struct nvm_dev_map *dev_map = tgt_dev->map; 445 struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch]; 446 int lun_off = ch_map->lun_offs[p->g.lun]; 447 448 p->g.ch += ch_map->ch_off; 449 p->g.lun += lun_off; 450} 451 452static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) 453{ 454 struct nvm_dev *dev = tgt_dev->parent; 455 struct nvm_dev_map *dev_rmap = dev->rmap; 456 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch]; 457 int lun_roff = ch_rmap->lun_offs[p->g.lun]; 458 459 p->g.ch -= ch_rmap->ch_off; 460 p->g.lun -= lun_roff; 461} 462 463static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, 464 struct ppa_addr *ppa_list, int nr_ppas) 465{ 466 int i; 467 468 for (i = 0; i < nr_ppas; i++) { 469 nvm_map_to_dev(tgt_dev, &ppa_list[i]); 470 ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]); 471 } 472} 473 474static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, 475 struct ppa_addr *ppa_list, int nr_ppas) 476{ 477 int i; 478 479 for (i = 0; i < nr_ppas; i++) { 480 ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]); 481 nvm_map_to_tgt(tgt_dev, &ppa_list[i]); 482 } 483} 484 485static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 486{ 487 if (rqd->nr_ppas == 1) { 488 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1); 489 return; 490 } 491 492 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas); 493} 494 495static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 496{ 497 if (rqd->nr_ppas == 1) { 498 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1); 499 return; 500 } 501 502 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas); 503} 504 505void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries, 506 int len) 507{ 508 struct nvm_geo *geo = &dev->geo; 509 struct nvm_dev_map *dev_rmap = dev->rmap; 510 u64 i; 511 512 for (i = 0; i < len; i++) { 513 struct nvm_ch_map *ch_rmap; 514 int *lun_roffs; 515 struct ppa_addr gaddr; 516 u64 pba = le64_to_cpu(entries[i]); 517 u64 diff; 518 519 if (!pba) 520 continue; 521 522 gaddr = linear_to_generic_addr(geo, pba); 523 ch_rmap = &dev_rmap->chnls[gaddr.g.ch]; 524 lun_roffs = ch_rmap->lun_offs; 525 526 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) + 527 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun; 528 529 entries[i] -= cpu_to_le64(diff); 530 } 531} 532EXPORT_SYMBOL(nvm_part_to_tgt); 533 534struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock) 535{ 536 struct nvm_tgt_type *tmp, *tt = NULL; 537 538 if (lock) 539 down_write(&nvm_tgtt_lock); 540 541 list_for_each_entry(tmp, &nvm_tgt_types, list) 542 if (!strcmp(name, tmp->name)) { 543 tt = tmp; 544 break; 545 } 546 547 if (lock) 548 up_write(&nvm_tgtt_lock); 549 return tt; 550} 551EXPORT_SYMBOL(nvm_find_target_type); 552 553int nvm_register_tgt_type(struct nvm_tgt_type *tt) 554{ 555 int ret = 0; 556 557 down_write(&nvm_tgtt_lock); 558 if (nvm_find_target_type(tt->name, 0)) 559 ret = -EEXIST; 560 else 561 list_add(&tt->list, &nvm_tgt_types); 562 up_write(&nvm_tgtt_lock); 563 564 return ret; 565} 566EXPORT_SYMBOL(nvm_register_tgt_type); 567 568void nvm_unregister_tgt_type(struct nvm_tgt_type *tt) 569{ 570 if (!tt) 571 return; 572 573 down_write(&nvm_lock); 574 list_del(&tt->list); 575 up_write(&nvm_lock); 576} 577EXPORT_SYMBOL(nvm_unregister_tgt_type); 578 579void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, 580 dma_addr_t *dma_handler) 581{ 582 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags, 583 dma_handler); 584} 585EXPORT_SYMBOL(nvm_dev_dma_alloc); 586 587void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler) 588{ 589 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler); 590} 591EXPORT_SYMBOL(nvm_dev_dma_free); 592 593static struct nvm_dev *nvm_find_nvm_dev(const char *name) 594{ 595 struct nvm_dev *dev; 596 597 list_for_each_entry(dev, &nvm_devices, devices) 598 if (!strcmp(name, dev->name)) 599 return dev; 600 601 return NULL; 602} 603 604int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, 605 int nr_ppas, int type) 606{ 607 struct nvm_dev *dev = tgt_dev->parent; 608 struct nvm_rq rqd; 609 int ret; 610 611 if (nr_ppas > dev->ops->max_phys_sect) { 612 pr_err("nvm: unable to update all blocks atomically\n"); 613 return -EINVAL; 614 } 615 616 memset(&rqd, 0, sizeof(struct nvm_rq)); 617 618 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1); 619 nvm_rq_tgt_to_dev(tgt_dev, &rqd); 620 621 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); 622 nvm_free_rqd_ppalist(tgt_dev, &rqd); 623 if (ret) { 624 pr_err("nvm: failed bb mark\n"); 625 return -EINVAL; 626 } 627 628 return 0; 629} 630EXPORT_SYMBOL(nvm_set_tgt_bb_tbl); 631 632int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev) 633{ 634 struct nvm_dev *dev = tgt_dev->parent; 635 636 return dev->ops->max_phys_sect; 637} 638EXPORT_SYMBOL(nvm_max_phys_sects); 639 640int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 641{ 642 struct nvm_dev *dev = tgt_dev->parent; 643 644 if (!dev->ops->submit_io) 645 return -ENODEV; 646 647 nvm_rq_tgt_to_dev(tgt_dev, rqd); 648 649 rqd->dev = tgt_dev; 650 return dev->ops->submit_io(dev, rqd); 651} 652EXPORT_SYMBOL(nvm_submit_io); 653 654static void nvm_end_io_sync(struct nvm_rq *rqd) 655{ 656 struct completion *waiting = rqd->private; 657 658 complete(waiting); 659} 660 661int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, 662 int nr_ppas) 663{ 664 struct nvm_geo *geo = &tgt_dev->geo; 665 struct nvm_rq rqd; 666 int ret; 667 DECLARE_COMPLETION_ONSTACK(wait); 668 669 memset(&rqd, 0, sizeof(struct nvm_rq)); 670 671 rqd.opcode = NVM_OP_ERASE; 672 rqd.end_io = nvm_end_io_sync; 673 rqd.private = &wait; 674 rqd.flags = geo->plane_mode >> 1; 675 676 ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1); 677 if (ret) 678 return ret; 679 680 ret = nvm_submit_io(tgt_dev, &rqd); 681 if (ret) { 682 pr_err("rrpr: erase I/O submission failed: %d\n", ret); 683 goto free_ppa_list; 684 } 685 wait_for_completion_io(&wait); 686 687free_ppa_list: 688 nvm_free_rqd_ppalist(tgt_dev, &rqd); 689 690 return ret; 691} 692EXPORT_SYMBOL(nvm_erase_sync); 693 694int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb, 695 nvm_l2p_update_fn *update_l2p, void *priv) 696{ 697 struct nvm_dev *dev = tgt_dev->parent; 698 699 if (!dev->ops->get_l2p_tbl) 700 return 0; 701 702 return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv); 703} 704EXPORT_SYMBOL(nvm_get_l2p_tbl); 705 706int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len) 707{ 708 struct nvm_dev *dev = tgt_dev->parent; 709 struct nvm_geo *geo = &dev->geo; 710 struct nvm_area *area, *prev, *next; 711 sector_t begin = 0; 712 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9; 713 714 if (len > max_sectors) 715 return -EINVAL; 716 717 area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL); 718 if (!area) 719 return -ENOMEM; 720 721 prev = NULL; 722 723 spin_lock(&dev->lock); 724 list_for_each_entry(next, &dev->area_list, list) { 725 if (begin + len > next->begin) { 726 begin = next->end; 727 prev = next; 728 continue; 729 } 730 break; 731 } 732 733 if ((begin + len) > max_sectors) { 734 spin_unlock(&dev->lock); 735 kfree(area); 736 return -EINVAL; 737 } 738 739 area->begin = *lba = begin; 740 area->end = begin + len; 741 742 if (prev) /* insert into sorted order */ 743 list_add(&area->list, &prev->list); 744 else 745 list_add(&area->list, &dev->area_list); 746 spin_unlock(&dev->lock); 747 748 return 0; 749} 750EXPORT_SYMBOL(nvm_get_area); 751 752void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin) 753{ 754 struct nvm_dev *dev = tgt_dev->parent; 755 struct nvm_area *area; 756 757 spin_lock(&dev->lock); 758 list_for_each_entry(area, &dev->area_list, list) { 759 if (area->begin != begin) 760 continue; 761 762 list_del(&area->list); 763 spin_unlock(&dev->lock); 764 kfree(area); 765 return; 766 } 767 spin_unlock(&dev->lock); 768} 769EXPORT_SYMBOL(nvm_put_area); 770 771int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, 772 const struct ppa_addr *ppas, int nr_ppas, int vblk) 773{ 774 struct nvm_dev *dev = tgt_dev->parent; 775 struct nvm_geo *geo = &tgt_dev->geo; 776 int i, plane_cnt, pl_idx; 777 struct ppa_addr ppa; 778 779 if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) { 780 rqd->nr_ppas = nr_ppas; 781 rqd->ppa_addr = ppas[0]; 782 783 return 0; 784 } 785 786 rqd->nr_ppas = nr_ppas; 787 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); 788 if (!rqd->ppa_list) { 789 pr_err("nvm: failed to allocate dma memory\n"); 790 return -ENOMEM; 791 } 792 793 if (!vblk) { 794 for (i = 0; i < nr_ppas; i++) 795 rqd->ppa_list[i] = ppas[i]; 796 } else { 797 plane_cnt = geo->plane_mode; 798 rqd->nr_ppas *= plane_cnt; 799 800 for (i = 0; i < nr_ppas; i++) { 801 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { 802 ppa = ppas[i]; 803 ppa.g.pl = pl_idx; 804 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa; 805 } 806 } 807 } 808 809 return 0; 810} 811EXPORT_SYMBOL(nvm_set_rqd_ppalist); 812 813void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) 814{ 815 if (!rqd->ppa_list) 816 return; 817 818 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); 819} 820EXPORT_SYMBOL(nvm_free_rqd_ppalist); 821 822void nvm_end_io(struct nvm_rq *rqd) 823{ 824 struct nvm_tgt_dev *tgt_dev = rqd->dev; 825 826 /* Convert address space */ 827 if (tgt_dev) 828 nvm_rq_dev_to_tgt(tgt_dev, rqd); 829 830 if (rqd->end_io) 831 rqd->end_io(rqd); 832} 833EXPORT_SYMBOL(nvm_end_io); 834 835/* 836 * folds a bad block list from its plane representation to its virtual 837 * block representation. The fold is done in place and reduced size is 838 * returned. 839 * 840 * If any of the planes status are bad or grown bad block, the virtual block 841 * is marked bad. If not bad, the first plane state acts as the block state. 842 */ 843int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) 844{ 845 struct nvm_geo *geo = &dev->geo; 846 int blk, offset, pl, blktype; 847 848 if (nr_blks != geo->blks_per_lun * geo->plane_mode) 849 return -EINVAL; 850 851 for (blk = 0; blk < geo->blks_per_lun; blk++) { 852 offset = blk * geo->plane_mode; 853 blktype = blks[offset]; 854 855 /* Bad blocks on any planes take precedence over other types */ 856 for (pl = 0; pl < geo->plane_mode; pl++) { 857 if (blks[offset + pl] & 858 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { 859 blktype = blks[offset + pl]; 860 break; 861 } 862 } 863 864 blks[blk] = blktype; 865 } 866 867 return geo->blks_per_lun; 868} 869EXPORT_SYMBOL(nvm_bb_tbl_fold); 870 871int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, 872 u8 *blks) 873{ 874 struct nvm_dev *dev = tgt_dev->parent; 875 876 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); 877 878 return dev->ops->get_bb_tbl(dev, ppa, blks); 879} 880EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); 881 882static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) 883{ 884 struct nvm_geo *geo = &dev->geo; 885 int i; 886 887 dev->lps_per_blk = geo->pgs_per_blk; 888 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL); 889 if (!dev->lptbl) 890 return -ENOMEM; 891 892 /* Just a linear array */ 893 for (i = 0; i < dev->lps_per_blk; i++) 894 dev->lptbl[i] = i; 895 896 return 0; 897} 898 899static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) 900{ 901 int i, p; 902 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc; 903 904 if (!mlc->num_pairs) 905 return 0; 906 907 dev->lps_per_blk = mlc->num_pairs; 908 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL); 909 if (!dev->lptbl) 910 return -ENOMEM; 911 912 /* The lower page table encoding consists of a list of bytes, where each 913 * has a lower and an upper half. The first half byte maintains the 914 * increment value and every value after is an offset added to the 915 * previous incrementation value 916 */ 917 dev->lptbl[0] = mlc->pairs[0] & 0xF; 918 for (i = 1; i < dev->lps_per_blk; i++) { 919 p = mlc->pairs[i >> 1]; 920 if (i & 0x1) /* upper */ 921 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4); 922 else /* lower */ 923 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF); 924 } 925 926 return 0; 927} 928 929static int nvm_core_init(struct nvm_dev *dev) 930{ 931 struct nvm_id *id = &dev->identity; 932 struct nvm_id_group *grp = &id->grp; 933 struct nvm_geo *geo = &dev->geo; 934 int ret; 935 936 /* Whole device values */ 937 geo->nr_chnls = grp->num_ch; 938 geo->luns_per_chnl = grp->num_lun; 939 940 /* Generic device values */ 941 geo->pgs_per_blk = grp->num_pg; 942 geo->blks_per_lun = grp->num_blk; 943 geo->nr_planes = grp->num_pln; 944 geo->fpg_size = grp->fpg_sz; 945 geo->pfpg_size = grp->fpg_sz * grp->num_pln; 946 geo->sec_size = grp->csecs; 947 geo->oob_size = grp->sos; 948 geo->sec_per_pg = grp->fpg_sz / grp->csecs; 949 geo->mccap = grp->mccap; 950 memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); 951 952 geo->plane_mode = NVM_PLANE_SINGLE; 953 geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size; 954 955 if (grp->mpos & 0x020202) 956 geo->plane_mode = NVM_PLANE_DOUBLE; 957 if (grp->mpos & 0x040404) 958 geo->plane_mode = NVM_PLANE_QUAD; 959 960 if (grp->mtype != 0) { 961 pr_err("nvm: memory type not supported\n"); 962 return -EINVAL; 963 } 964 965 /* calculated values */ 966 geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes; 967 geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk; 968 geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun; 969 geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls; 970 971 dev->total_secs = geo->nr_luns * geo->sec_per_lun; 972 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns), 973 sizeof(unsigned long), GFP_KERNEL); 974 if (!dev->lun_map) 975 return -ENOMEM; 976 977 switch (grp->fmtype) { 978 case NVM_ID_FMTYPE_SLC: 979 if (nvm_init_slc_tbl(dev, grp)) { 980 ret = -ENOMEM; 981 goto err_fmtype; 982 } 983 break; 984 case NVM_ID_FMTYPE_MLC: 985 if (nvm_init_mlc_tbl(dev, grp)) { 986 ret = -ENOMEM; 987 goto err_fmtype; 988 } 989 break; 990 default: 991 pr_err("nvm: flash type not supported\n"); 992 ret = -EINVAL; 993 goto err_fmtype; 994 } 995 996 INIT_LIST_HEAD(&dev->area_list); 997 INIT_LIST_HEAD(&dev->targets); 998 mutex_init(&dev->mlock); 999 spin_lock_init(&dev->lock); 1000 1001 ret = nvm_register_map(dev); 1002 if (ret) 1003 goto err_fmtype; 1004 1005 blk_queue_logical_block_size(dev->q, geo->sec_size); 1006 return 0; 1007err_fmtype: 1008 kfree(dev->lun_map); 1009 return ret; 1010} 1011 1012static void nvm_free(struct nvm_dev *dev) 1013{ 1014 if (!dev) 1015 return; 1016 1017 if (dev->dma_pool) 1018 dev->ops->destroy_dma_pool(dev->dma_pool); 1019 1020 nvm_unregister_map(dev); 1021 kfree(dev->lptbl); 1022 kfree(dev->lun_map); 1023 kfree(dev); 1024} 1025 1026static int nvm_init(struct nvm_dev *dev) 1027{ 1028 struct nvm_geo *geo = &dev->geo; 1029 int ret = -EINVAL; 1030 1031 if (dev->ops->identity(dev, &dev->identity)) { 1032 pr_err("nvm: device could not be identified\n"); 1033 goto err; 1034 } 1035 1036 pr_debug("nvm: ver:%x nvm_vendor:%x\n", 1037 dev->identity.ver_id, dev->identity.vmnt); 1038 1039 if (dev->identity.ver_id != 1) { 1040 pr_err("nvm: device not supported by kernel."); 1041 goto err; 1042 } 1043 1044 ret = nvm_core_init(dev); 1045 if (ret) { 1046 pr_err("nvm: could not initialize core structures.\n"); 1047 goto err; 1048 } 1049 1050 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", 1051 dev->name, geo->sec_per_pg, geo->nr_planes, 1052 geo->pgs_per_blk, geo->blks_per_lun, 1053 geo->nr_luns, geo->nr_chnls); 1054 return 0; 1055err: 1056 pr_err("nvm: failed to initialize nvm\n"); 1057 return ret; 1058} 1059 1060struct nvm_dev *nvm_alloc_dev(int node) 1061{ 1062 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node); 1063} 1064EXPORT_SYMBOL(nvm_alloc_dev); 1065 1066int nvm_register(struct nvm_dev *dev) 1067{ 1068 int ret; 1069 1070 if (!dev->q || !dev->ops) 1071 return -EINVAL; 1072 1073 if (dev->ops->max_phys_sect > 256) { 1074 pr_info("nvm: max sectors supported is 256.\n"); 1075 return -EINVAL; 1076 } 1077 1078 if (dev->ops->max_phys_sect > 1) { 1079 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); 1080 if (!dev->dma_pool) { 1081 pr_err("nvm: could not create dma pool\n"); 1082 return -ENOMEM; 1083 } 1084 } 1085 1086 ret = nvm_init(dev); 1087 if (ret) 1088 goto err_init; 1089 1090 /* register device with a supported media manager */ 1091 down_write(&nvm_lock); 1092 list_add(&dev->devices, &nvm_devices); 1093 up_write(&nvm_lock); 1094 1095 return 0; 1096err_init: 1097 dev->ops->destroy_dma_pool(dev->dma_pool); 1098 return ret; 1099} 1100EXPORT_SYMBOL(nvm_register); 1101 1102void nvm_unregister(struct nvm_dev *dev) 1103{ 1104 struct nvm_target *t, *tmp; 1105 1106 mutex_lock(&dev->mlock); 1107 list_for_each_entry_safe(t, tmp, &dev->targets, list) { 1108 if (t->dev->parent != dev) 1109 continue; 1110 __nvm_remove_target(t); 1111 } 1112 mutex_unlock(&dev->mlock); 1113 1114 down_write(&nvm_lock); 1115 list_del(&dev->devices); 1116 up_write(&nvm_lock); 1117 1118 nvm_free(dev); 1119} 1120EXPORT_SYMBOL(nvm_unregister); 1121 1122static int __nvm_configure_create(struct nvm_ioctl_create *create) 1123{ 1124 struct nvm_dev *dev; 1125 struct nvm_ioctl_create_simple *s; 1126 1127 down_write(&nvm_lock); 1128 dev = nvm_find_nvm_dev(create->dev); 1129 up_write(&nvm_lock); 1130 1131 if (!dev) { 1132 pr_err("nvm: device not found\n"); 1133 return -EINVAL; 1134 } 1135 1136 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) { 1137 pr_err("nvm: config type not valid\n"); 1138 return -EINVAL; 1139 } 1140 s = &create->conf.s; 1141 1142 if (s->lun_begin == -1 && s->lun_end == -1) { 1143 s->lun_begin = 0; 1144 s->lun_end = dev->geo.nr_luns - 1; 1145 } 1146 1147 if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) { 1148 pr_err("nvm: lun out of bound (%u:%u > %u)\n", 1149 s->lun_begin, s->lun_end, dev->geo.nr_luns - 1); 1150 return -EINVAL; 1151 } 1152 1153 return nvm_create_tgt(dev, create); 1154} 1155 1156static long nvm_ioctl_info(struct file *file, void __user *arg) 1157{ 1158 struct nvm_ioctl_info *info; 1159 struct nvm_tgt_type *tt; 1160 int tgt_iter = 0; 1161 1162 if (!capable(CAP_SYS_ADMIN)) 1163 return -EPERM; 1164 1165 info = memdup_user(arg, sizeof(struct nvm_ioctl_info)); 1166 if (IS_ERR(info)) 1167 return -EFAULT; 1168 1169 info->version[0] = NVM_VERSION_MAJOR; 1170 info->version[1] = NVM_VERSION_MINOR; 1171 info->version[2] = NVM_VERSION_PATCH; 1172 1173 down_write(&nvm_lock); 1174 list_for_each_entry(tt, &nvm_tgt_types, list) { 1175 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter]; 1176 1177 tgt->version[0] = tt->version[0]; 1178 tgt->version[1] = tt->version[1]; 1179 tgt->version[2] = tt->version[2]; 1180 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX); 1181 1182 tgt_iter++; 1183 } 1184 1185 info->tgtsize = tgt_iter; 1186 up_write(&nvm_lock); 1187 1188 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) { 1189 kfree(info); 1190 return -EFAULT; 1191 } 1192 1193 kfree(info); 1194 return 0; 1195} 1196 1197static long nvm_ioctl_get_devices(struct file *file, void __user *arg) 1198{ 1199 struct nvm_ioctl_get_devices *devices; 1200 struct nvm_dev *dev; 1201 int i = 0; 1202 1203 if (!capable(CAP_SYS_ADMIN)) 1204 return -EPERM; 1205 1206 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL); 1207 if (!devices) 1208 return -ENOMEM; 1209 1210 down_write(&nvm_lock); 1211 list_for_each_entry(dev, &nvm_devices, devices) { 1212 struct nvm_ioctl_device_info *info = &devices->info[i]; 1213 1214 strlcpy(info->devname, dev->name, sizeof(info->devname)); 1215 1216 /* kept for compatibility */ 1217 info->bmversion[0] = 1; 1218 info->bmversion[1] = 0; 1219 info->bmversion[2] = 0; 1220 strlcpy(info->bmname, "gennvm", sizeof(info->bmname)); 1221 i++; 1222 1223 if (i > 31) { 1224 pr_err("nvm: max 31 devices can be reported.\n"); 1225 break; 1226 } 1227 } 1228 up_write(&nvm_lock); 1229 1230 devices->nr_devices = i; 1231 1232 if (copy_to_user(arg, devices, 1233 sizeof(struct nvm_ioctl_get_devices))) { 1234 kfree(devices); 1235 return -EFAULT; 1236 } 1237 1238 kfree(devices); 1239 return 0; 1240} 1241 1242static long nvm_ioctl_dev_create(struct file *file, void __user *arg) 1243{ 1244 struct nvm_ioctl_create create; 1245 1246 if (!capable(CAP_SYS_ADMIN)) 1247 return -EPERM; 1248 1249 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) 1250 return -EFAULT; 1251 1252 create.dev[DISK_NAME_LEN - 1] = '\0'; 1253 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0'; 1254 create.tgtname[DISK_NAME_LEN - 1] = '\0'; 1255 1256 if (create.flags != 0) { 1257 __u32 flags = create.flags; 1258 1259 /* Check for valid flags */ 1260 if (flags & NVM_TARGET_FACTORY) 1261 flags &= ~NVM_TARGET_FACTORY; 1262 1263 if (flags) { 1264 pr_err("nvm: flag not supported\n"); 1265 return -EINVAL; 1266 } 1267 } 1268 1269 return __nvm_configure_create(&create); 1270} 1271 1272static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) 1273{ 1274 struct nvm_ioctl_remove remove; 1275 struct nvm_dev *dev; 1276 int ret = 0; 1277 1278 if (!capable(CAP_SYS_ADMIN)) 1279 return -EPERM; 1280 1281 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove))) 1282 return -EFAULT; 1283 1284 remove.tgtname[DISK_NAME_LEN - 1] = '\0'; 1285 1286 if (remove.flags != 0) { 1287 pr_err("nvm: no flags supported\n"); 1288 return -EINVAL; 1289 } 1290 1291 list_for_each_entry(dev, &nvm_devices, devices) { 1292 ret = nvm_remove_tgt(dev, &remove); 1293 if (!ret) 1294 break; 1295 } 1296 1297 return ret; 1298} 1299 1300/* kept for compatibility reasons */ 1301static long nvm_ioctl_dev_init(struct file *file, void __user *arg) 1302{ 1303 struct nvm_ioctl_dev_init init; 1304 1305 if (!capable(CAP_SYS_ADMIN)) 1306 return -EPERM; 1307 1308 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init))) 1309 return -EFAULT; 1310 1311 if (init.flags != 0) { 1312 pr_err("nvm: no flags supported\n"); 1313 return -EINVAL; 1314 } 1315 1316 return 0; 1317} 1318 1319/* Kept for compatibility reasons */ 1320static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) 1321{ 1322 struct nvm_ioctl_dev_factory fact; 1323 1324 if (!capable(CAP_SYS_ADMIN)) 1325 return -EPERM; 1326 1327 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory))) 1328 return -EFAULT; 1329 1330 fact.dev[DISK_NAME_LEN - 1] = '\0'; 1331 1332 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1)) 1333 return -EINVAL; 1334 1335 return 0; 1336} 1337 1338static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1339{ 1340 void __user *argp = (void __user *)arg; 1341 1342 switch (cmd) { 1343 case NVM_INFO: 1344 return nvm_ioctl_info(file, argp); 1345 case NVM_GET_DEVICES: 1346 return nvm_ioctl_get_devices(file, argp); 1347 case NVM_DEV_CREATE: 1348 return nvm_ioctl_dev_create(file, argp); 1349 case NVM_DEV_REMOVE: 1350 return nvm_ioctl_dev_remove(file, argp); 1351 case NVM_DEV_INIT: 1352 return nvm_ioctl_dev_init(file, argp); 1353 case NVM_DEV_FACTORY: 1354 return nvm_ioctl_dev_factory(file, argp); 1355 } 1356 return 0; 1357} 1358 1359static const struct file_operations _ctl_fops = { 1360 .open = nonseekable_open, 1361 .unlocked_ioctl = nvm_ctl_ioctl, 1362 .owner = THIS_MODULE, 1363 .llseek = noop_llseek, 1364}; 1365 1366static struct miscdevice _nvm_misc = { 1367 .minor = MISC_DYNAMIC_MINOR, 1368 .name = "lightnvm", 1369 .nodename = "lightnvm/control", 1370 .fops = &_ctl_fops, 1371}; 1372builtin_misc_device(_nvm_misc);