at v4.8-rc2 1197 lines 26 kB view raw
1/* 2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved. 3 * Initial release: Matias Bjorling <m@bjorling.me> 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 7 * 2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; see the file COPYING. If not, write to 16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 17 * USA. 18 * 19 */ 20 21#include <linux/list.h> 22#include <linux/types.h> 23#include <linux/sem.h> 24#include <linux/bitmap.h> 25#include <linux/module.h> 26#include <linux/miscdevice.h> 27#include <linux/lightnvm.h> 28#include <linux/sched/sysctl.h> 29 30static LIST_HEAD(nvm_tgt_types); 31static DECLARE_RWSEM(nvm_tgtt_lock); 32static LIST_HEAD(nvm_mgrs); 33static LIST_HEAD(nvm_devices); 34static DECLARE_RWSEM(nvm_lock); 35 36struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock) 37{ 38 struct nvm_tgt_type *tmp, *tt = NULL; 39 40 if (lock) 41 down_write(&nvm_tgtt_lock); 42 43 list_for_each_entry(tmp, &nvm_tgt_types, list) 44 if (!strcmp(name, tmp->name)) { 45 tt = tmp; 46 break; 47 } 48 49 if (lock) 50 up_write(&nvm_tgtt_lock); 51 return tt; 52} 53EXPORT_SYMBOL(nvm_find_target_type); 54 55int nvm_register_tgt_type(struct nvm_tgt_type *tt) 56{ 57 int ret = 0; 58 59 down_write(&nvm_tgtt_lock); 60 if (nvm_find_target_type(tt->name, 0)) 61 ret = -EEXIST; 62 else 63 list_add(&tt->list, &nvm_tgt_types); 64 up_write(&nvm_tgtt_lock); 65 66 return ret; 67} 68EXPORT_SYMBOL(nvm_register_tgt_type); 69 70void nvm_unregister_tgt_type(struct nvm_tgt_type *tt) 71{ 72 if (!tt) 73 return; 74 75 down_write(&nvm_lock); 76 list_del(&tt->list); 77 up_write(&nvm_lock); 78} 79EXPORT_SYMBOL(nvm_unregister_tgt_type); 80 81void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, 82 dma_addr_t *dma_handler) 83{ 84 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags, 85 dma_handler); 86} 87EXPORT_SYMBOL(nvm_dev_dma_alloc); 88 89void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, 90 dma_addr_t dma_handler) 91{ 92 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler); 93} 94EXPORT_SYMBOL(nvm_dev_dma_free); 95 96static struct nvmm_type *nvm_find_mgr_type(const char *name) 97{ 98 struct nvmm_type *mt; 99 100 list_for_each_entry(mt, &nvm_mgrs, list) 101 if (!strcmp(name, mt->name)) 102 return mt; 103 104 return NULL; 105} 106 107static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev) 108{ 109 struct nvmm_type *mt; 110 int ret; 111 112 lockdep_assert_held(&nvm_lock); 113 114 list_for_each_entry(mt, &nvm_mgrs, list) { 115 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN)) 116 continue; 117 118 ret = mt->register_mgr(dev); 119 if (ret < 0) { 120 pr_err("nvm: media mgr failed to init (%d) on dev %s\n", 121 ret, dev->name); 122 return NULL; /* initialization failed */ 123 } else if (ret > 0) 124 return mt; 125 } 126 127 return NULL; 128} 129 130int nvm_register_mgr(struct nvmm_type *mt) 131{ 132 struct nvm_dev *dev; 133 int ret = 0; 134 135 down_write(&nvm_lock); 136 if (nvm_find_mgr_type(mt->name)) { 137 ret = -EEXIST; 138 goto finish; 139 } else { 140 list_add(&mt->list, &nvm_mgrs); 141 } 142 143 /* try to register media mgr if any device have none configured */ 144 list_for_each_entry(dev, &nvm_devices, devices) { 145 if (dev->mt) 146 continue; 147 148 dev->mt = nvm_init_mgr(dev); 149 } 150finish: 151 up_write(&nvm_lock); 152 153 return ret; 154} 155EXPORT_SYMBOL(nvm_register_mgr); 156 157void nvm_unregister_mgr(struct nvmm_type *mt) 158{ 159 if (!mt) 160 return; 161 162 down_write(&nvm_lock); 163 list_del(&mt->list); 164 up_write(&nvm_lock); 165} 166EXPORT_SYMBOL(nvm_unregister_mgr); 167 168static struct nvm_dev *nvm_find_nvm_dev(const char *name) 169{ 170 struct nvm_dev *dev; 171 172 list_for_each_entry(dev, &nvm_devices, devices) 173 if (!strcmp(name, dev->name)) 174 return dev; 175 176 return NULL; 177} 178 179struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun, 180 unsigned long flags) 181{ 182 return dev->mt->get_blk(dev, lun, flags); 183} 184EXPORT_SYMBOL(nvm_get_blk); 185 186/* Assumes that all valid pages have already been moved on release to bm */ 187void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) 188{ 189 return dev->mt->put_blk(dev, blk); 190} 191EXPORT_SYMBOL(nvm_put_blk); 192 193void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type) 194{ 195 return dev->mt->mark_blk(dev, ppa, type); 196} 197EXPORT_SYMBOL(nvm_mark_blk); 198 199int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) 200{ 201 return dev->mt->submit_io(dev, rqd); 202} 203EXPORT_SYMBOL(nvm_submit_io); 204 205int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) 206{ 207 return dev->mt->erase_blk(dev, blk, 0); 208} 209EXPORT_SYMBOL(nvm_erase_blk); 210 211void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) 212{ 213 int i; 214 215 if (rqd->nr_ppas > 1) { 216 for (i = 0; i < rqd->nr_ppas; i++) 217 rqd->ppa_list[i] = dev_to_generic_addr(dev, 218 rqd->ppa_list[i]); 219 } else { 220 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); 221 } 222} 223EXPORT_SYMBOL(nvm_addr_to_generic_mode); 224 225void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) 226{ 227 int i; 228 229 if (rqd->nr_ppas > 1) { 230 for (i = 0; i < rqd->nr_ppas; i++) 231 rqd->ppa_list[i] = generic_to_dev_addr(dev, 232 rqd->ppa_list[i]); 233 } else { 234 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); 235 } 236} 237EXPORT_SYMBOL(nvm_generic_to_addr_mode); 238 239int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, 240 const struct ppa_addr *ppas, int nr_ppas, int vblk) 241{ 242 int i, plane_cnt, pl_idx; 243 struct ppa_addr ppa; 244 245 if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) { 246 rqd->nr_ppas = nr_ppas; 247 rqd->ppa_addr = ppas[0]; 248 249 return 0; 250 } 251 252 rqd->nr_ppas = nr_ppas; 253 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); 254 if (!rqd->ppa_list) { 255 pr_err("nvm: failed to allocate dma memory\n"); 256 return -ENOMEM; 257 } 258 259 if (!vblk) { 260 for (i = 0; i < nr_ppas; i++) 261 rqd->ppa_list[i] = ppas[i]; 262 } else { 263 plane_cnt = dev->plane_mode; 264 rqd->nr_ppas *= plane_cnt; 265 266 for (i = 0; i < nr_ppas; i++) { 267 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { 268 ppa = ppas[i]; 269 ppa.g.pl = pl_idx; 270 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa; 271 } 272 } 273 } 274 275 return 0; 276} 277EXPORT_SYMBOL(nvm_set_rqd_ppalist); 278 279void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd) 280{ 281 if (!rqd->ppa_list) 282 return; 283 284 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list); 285} 286EXPORT_SYMBOL(nvm_free_rqd_ppalist); 287 288int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas) 289{ 290 struct nvm_rq rqd; 291 int ret; 292 293 if (!dev->ops->erase_block) 294 return 0; 295 296 memset(&rqd, 0, sizeof(struct nvm_rq)); 297 298 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); 299 if (ret) 300 return ret; 301 302 nvm_generic_to_addr_mode(dev, &rqd); 303 304 ret = dev->ops->erase_block(dev, &rqd); 305 306 nvm_free_rqd_ppalist(dev, &rqd); 307 308 return ret; 309} 310EXPORT_SYMBOL(nvm_erase_ppa); 311 312void nvm_end_io(struct nvm_rq *rqd, int error) 313{ 314 rqd->error = error; 315 rqd->end_io(rqd); 316} 317EXPORT_SYMBOL(nvm_end_io); 318 319static void nvm_end_io_sync(struct nvm_rq *rqd) 320{ 321 struct completion *waiting = rqd->wait; 322 323 rqd->wait = NULL; 324 325 complete(waiting); 326} 327 328static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode, 329 int flags, void *buf, int len) 330{ 331 DECLARE_COMPLETION_ONSTACK(wait); 332 struct bio *bio; 333 int ret; 334 unsigned long hang_check; 335 336 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL); 337 if (IS_ERR_OR_NULL(bio)) 338 return -ENOMEM; 339 340 nvm_generic_to_addr_mode(dev, rqd); 341 342 rqd->dev = dev; 343 rqd->opcode = opcode; 344 rqd->flags = flags; 345 rqd->bio = bio; 346 rqd->wait = &wait; 347 rqd->end_io = nvm_end_io_sync; 348 349 ret = dev->ops->submit_io(dev, rqd); 350 if (ret) { 351 bio_put(bio); 352 return ret; 353 } 354 355 /* Prevent hang_check timer from firing at us during very long I/O */ 356 hang_check = sysctl_hung_task_timeout_secs; 357 if (hang_check) 358 while (!wait_for_completion_io_timeout(&wait, 359 hang_check * (HZ/2))) 360 ; 361 else 362 wait_for_completion_io(&wait); 363 364 return rqd->error; 365} 366 367/** 368 * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must 369 * take to free ppa list if necessary. 370 * @dev: device 371 * @ppa_list: user created ppa_list 372 * @nr_ppas: length of ppa_list 373 * @opcode: device opcode 374 * @flags: device flags 375 * @buf: data buffer 376 * @len: data buffer length 377 */ 378int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list, 379 int nr_ppas, int opcode, int flags, void *buf, int len) 380{ 381 struct nvm_rq rqd; 382 383 if (dev->ops->max_phys_sect < nr_ppas) 384 return -EINVAL; 385 386 memset(&rqd, 0, sizeof(struct nvm_rq)); 387 388 rqd.nr_ppas = nr_ppas; 389 if (nr_ppas > 1) 390 rqd.ppa_list = ppa_list; 391 else 392 rqd.ppa_addr = ppa_list[0]; 393 394 return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len); 395} 396EXPORT_SYMBOL(nvm_submit_ppa_list); 397 398/** 399 * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded 400 * as single, dual, quad plane PPAs depending on device type. 401 * @dev: device 402 * @ppa: user created ppa_list 403 * @nr_ppas: length of ppa_list 404 * @opcode: device opcode 405 * @flags: device flags 406 * @buf: data buffer 407 * @len: data buffer length 408 */ 409int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, 410 int opcode, int flags, void *buf, int len) 411{ 412 struct nvm_rq rqd; 413 int ret; 414 415 memset(&rqd, 0, sizeof(struct nvm_rq)); 416 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1); 417 if (ret) 418 return ret; 419 420 ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len); 421 422 nvm_free_rqd_ppalist(dev, &rqd); 423 424 return ret; 425} 426EXPORT_SYMBOL(nvm_submit_ppa); 427 428/* 429 * folds a bad block list from its plane representation to its virtual 430 * block representation. The fold is done in place and reduced size is 431 * returned. 432 * 433 * If any of the planes status are bad or grown bad block, the virtual block 434 * is marked bad. If not bad, the first plane state acts as the block state. 435 */ 436int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) 437{ 438 int blk, offset, pl, blktype; 439 440 if (nr_blks != dev->blks_per_lun * dev->plane_mode) 441 return -EINVAL; 442 443 for (blk = 0; blk < dev->blks_per_lun; blk++) { 444 offset = blk * dev->plane_mode; 445 blktype = blks[offset]; 446 447 /* Bad blocks on any planes take precedence over other types */ 448 for (pl = 0; pl < dev->plane_mode; pl++) { 449 if (blks[offset + pl] & 450 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { 451 blktype = blks[offset + pl]; 452 break; 453 } 454 } 455 456 blks[blk] = blktype; 457 } 458 459 return dev->blks_per_lun; 460} 461EXPORT_SYMBOL(nvm_bb_tbl_fold); 462 463int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks) 464{ 465 ppa = generic_to_dev_addr(dev, ppa); 466 467 return dev->ops->get_bb_tbl(dev, ppa, blks); 468} 469EXPORT_SYMBOL(nvm_get_bb_tbl); 470 471static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) 472{ 473 int i; 474 475 dev->lps_per_blk = dev->pgs_per_blk; 476 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL); 477 if (!dev->lptbl) 478 return -ENOMEM; 479 480 /* Just a linear array */ 481 for (i = 0; i < dev->lps_per_blk; i++) 482 dev->lptbl[i] = i; 483 484 return 0; 485} 486 487static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) 488{ 489 int i, p; 490 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc; 491 492 if (!mlc->num_pairs) 493 return 0; 494 495 dev->lps_per_blk = mlc->num_pairs; 496 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL); 497 if (!dev->lptbl) 498 return -ENOMEM; 499 500 /* The lower page table encoding consists of a list of bytes, where each 501 * has a lower and an upper half. The first half byte maintains the 502 * increment value and every value after is an offset added to the 503 * previous incrementation value 504 */ 505 dev->lptbl[0] = mlc->pairs[0] & 0xF; 506 for (i = 1; i < dev->lps_per_blk; i++) { 507 p = mlc->pairs[i >> 1]; 508 if (i & 0x1) /* upper */ 509 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4); 510 else /* lower */ 511 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF); 512 } 513 514 return 0; 515} 516 517static int nvm_core_init(struct nvm_dev *dev) 518{ 519 struct nvm_id *id = &dev->identity; 520 struct nvm_id_group *grp = &id->groups[0]; 521 int ret; 522 523 /* device values */ 524 dev->nr_chnls = grp->num_ch; 525 dev->luns_per_chnl = grp->num_lun; 526 dev->pgs_per_blk = grp->num_pg; 527 dev->blks_per_lun = grp->num_blk; 528 dev->nr_planes = grp->num_pln; 529 dev->fpg_size = grp->fpg_sz; 530 dev->pfpg_size = grp->fpg_sz * grp->num_pln; 531 dev->sec_size = grp->csecs; 532 dev->oob_size = grp->sos; 533 dev->sec_per_pg = grp->fpg_sz / grp->csecs; 534 dev->mccap = grp->mccap; 535 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); 536 537 dev->plane_mode = NVM_PLANE_SINGLE; 538 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; 539 540 if (grp->mpos & 0x020202) 541 dev->plane_mode = NVM_PLANE_DOUBLE; 542 if (grp->mpos & 0x040404) 543 dev->plane_mode = NVM_PLANE_QUAD; 544 545 if (grp->mtype != 0) { 546 pr_err("nvm: memory type not supported\n"); 547 return -EINVAL; 548 } 549 550 /* calculated values */ 551 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes; 552 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk; 553 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun; 554 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls; 555 556 dev->total_secs = dev->nr_luns * dev->sec_per_lun; 557 dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns), 558 sizeof(unsigned long), GFP_KERNEL); 559 if (!dev->lun_map) 560 return -ENOMEM; 561 562 switch (grp->fmtype) { 563 case NVM_ID_FMTYPE_SLC: 564 if (nvm_init_slc_tbl(dev, grp)) { 565 ret = -ENOMEM; 566 goto err_fmtype; 567 } 568 break; 569 case NVM_ID_FMTYPE_MLC: 570 if (nvm_init_mlc_tbl(dev, grp)) { 571 ret = -ENOMEM; 572 goto err_fmtype; 573 } 574 break; 575 default: 576 pr_err("nvm: flash type not supported\n"); 577 ret = -EINVAL; 578 goto err_fmtype; 579 } 580 581 mutex_init(&dev->mlock); 582 spin_lock_init(&dev->lock); 583 584 return 0; 585err_fmtype: 586 kfree(dev->lun_map); 587 return ret; 588} 589 590static void nvm_free_mgr(struct nvm_dev *dev) 591{ 592 if (!dev->mt) 593 return; 594 595 dev->mt->unregister_mgr(dev); 596 dev->mt = NULL; 597} 598 599static void nvm_free(struct nvm_dev *dev) 600{ 601 if (!dev) 602 return; 603 604 nvm_free_mgr(dev); 605 606 kfree(dev->lptbl); 607 kfree(dev->lun_map); 608} 609 610static int nvm_init(struct nvm_dev *dev) 611{ 612 int ret = -EINVAL; 613 614 if (!dev->q || !dev->ops) 615 return ret; 616 617 if (dev->ops->identity(dev, &dev->identity)) { 618 pr_err("nvm: device could not be identified\n"); 619 goto err; 620 } 621 622 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n", 623 dev->identity.ver_id, dev->identity.vmnt, 624 dev->identity.cgrps); 625 626 if (dev->identity.ver_id != 1) { 627 pr_err("nvm: device not supported by kernel."); 628 goto err; 629 } 630 631 if (dev->identity.cgrps != 1) { 632 pr_err("nvm: only one group configuration supported."); 633 goto err; 634 } 635 636 ret = nvm_core_init(dev); 637 if (ret) { 638 pr_err("nvm: could not initialize core structures.\n"); 639 goto err; 640 } 641 642 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", 643 dev->name, dev->sec_per_pg, dev->nr_planes, 644 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns, 645 dev->nr_chnls); 646 return 0; 647err: 648 pr_err("nvm: failed to initialize nvm\n"); 649 return ret; 650} 651 652static void nvm_exit(struct nvm_dev *dev) 653{ 654 if (dev->dma_pool) 655 dev->ops->destroy_dma_pool(dev->dma_pool); 656 nvm_free(dev); 657 658 pr_info("nvm: successfully unloaded\n"); 659} 660 661int nvm_register(struct request_queue *q, char *disk_name, 662 struct nvm_dev_ops *ops) 663{ 664 struct nvm_dev *dev; 665 int ret; 666 667 if (!ops->identity) 668 return -EINVAL; 669 670 dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL); 671 if (!dev) 672 return -ENOMEM; 673 674 dev->q = q; 675 dev->ops = ops; 676 strncpy(dev->name, disk_name, DISK_NAME_LEN); 677 678 ret = nvm_init(dev); 679 if (ret) 680 goto err_init; 681 682 if (dev->ops->max_phys_sect > 256) { 683 pr_info("nvm: max sectors supported is 256.\n"); 684 ret = -EINVAL; 685 goto err_init; 686 } 687 688 if (dev->ops->max_phys_sect > 1) { 689 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); 690 if (!dev->dma_pool) { 691 pr_err("nvm: could not create dma pool\n"); 692 ret = -ENOMEM; 693 goto err_init; 694 } 695 } 696 697 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) { 698 ret = nvm_get_sysblock(dev, &dev->sb); 699 if (!ret) 700 pr_err("nvm: device not initialized.\n"); 701 else if (ret < 0) 702 pr_err("nvm: err (%d) on device initialization\n", ret); 703 } 704 705 /* register device with a supported media manager */ 706 down_write(&nvm_lock); 707 if (ret > 0) 708 dev->mt = nvm_init_mgr(dev); 709 list_add(&dev->devices, &nvm_devices); 710 up_write(&nvm_lock); 711 712 return 0; 713err_init: 714 kfree(dev->lun_map); 715 kfree(dev); 716 return ret; 717} 718EXPORT_SYMBOL(nvm_register); 719 720void nvm_unregister(char *disk_name) 721{ 722 struct nvm_dev *dev; 723 724 down_write(&nvm_lock); 725 dev = nvm_find_nvm_dev(disk_name); 726 if (!dev) { 727 pr_err("nvm: could not find device %s to unregister\n", 728 disk_name); 729 up_write(&nvm_lock); 730 return; 731 } 732 733 list_del(&dev->devices); 734 up_write(&nvm_lock); 735 736 nvm_exit(dev); 737 kfree(dev); 738} 739EXPORT_SYMBOL(nvm_unregister); 740 741static int __nvm_configure_create(struct nvm_ioctl_create *create) 742{ 743 struct nvm_dev *dev; 744 struct nvm_ioctl_create_simple *s; 745 746 down_write(&nvm_lock); 747 dev = nvm_find_nvm_dev(create->dev); 748 up_write(&nvm_lock); 749 750 if (!dev) { 751 pr_err("nvm: device not found\n"); 752 return -EINVAL; 753 } 754 755 if (!dev->mt) { 756 pr_info("nvm: device has no media manager registered.\n"); 757 return -ENODEV; 758 } 759 760 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) { 761 pr_err("nvm: config type not valid\n"); 762 return -EINVAL; 763 } 764 s = &create->conf.s; 765 766 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) { 767 pr_err("nvm: lun out of bound (%u:%u > %u)\n", 768 s->lun_begin, s->lun_end, dev->nr_luns); 769 return -EINVAL; 770 } 771 772 return dev->mt->create_tgt(dev, create); 773} 774 775#ifdef CONFIG_NVM_DEBUG 776static int nvm_configure_show(const char *val) 777{ 778 struct nvm_dev *dev; 779 char opcode, devname[DISK_NAME_LEN]; 780 int ret; 781 782 ret = sscanf(val, "%c %32s", &opcode, devname); 783 if (ret != 2) { 784 pr_err("nvm: invalid command. Use \"opcode devicename\".\n"); 785 return -EINVAL; 786 } 787 788 down_write(&nvm_lock); 789 dev = nvm_find_nvm_dev(devname); 790 up_write(&nvm_lock); 791 if (!dev) { 792 pr_err("nvm: device not found\n"); 793 return -EINVAL; 794 } 795 796 if (!dev->mt) 797 return 0; 798 799 dev->mt->lun_info_print(dev); 800 801 return 0; 802} 803 804static int nvm_configure_remove(const char *val) 805{ 806 struct nvm_ioctl_remove remove; 807 struct nvm_dev *dev; 808 char opcode; 809 int ret = 0; 810 811 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname); 812 if (ret != 2) { 813 pr_err("nvm: invalid command. Use \"d targetname\".\n"); 814 return -EINVAL; 815 } 816 817 remove.flags = 0; 818 819 list_for_each_entry(dev, &nvm_devices, devices) { 820 ret = dev->mt->remove_tgt(dev, &remove); 821 if (!ret) 822 break; 823 } 824 825 return ret; 826} 827 828static int nvm_configure_create(const char *val) 829{ 830 struct nvm_ioctl_create create; 831 char opcode; 832 int lun_begin, lun_end, ret; 833 834 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev, 835 create.tgtname, create.tgttype, 836 &lun_begin, &lun_end); 837 if (ret != 6) { 838 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n"); 839 return -EINVAL; 840 } 841 842 create.flags = 0; 843 create.conf.type = NVM_CONFIG_TYPE_SIMPLE; 844 create.conf.s.lun_begin = lun_begin; 845 create.conf.s.lun_end = lun_end; 846 847 return __nvm_configure_create(&create); 848} 849 850 851/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */ 852static int nvm_configure_by_str_event(const char *val, 853 const struct kernel_param *kp) 854{ 855 char opcode; 856 int ret; 857 858 ret = sscanf(val, "%c", &opcode); 859 if (ret != 1) { 860 pr_err("nvm: string must have the format of \"cmd ...\"\n"); 861 return -EINVAL; 862 } 863 864 switch (opcode) { 865 case 'a': 866 return nvm_configure_create(val); 867 case 'd': 868 return nvm_configure_remove(val); 869 case 's': 870 return nvm_configure_show(val); 871 default: 872 pr_err("nvm: invalid command\n"); 873 return -EINVAL; 874 } 875 876 return 0; 877} 878 879static int nvm_configure_get(char *buf, const struct kernel_param *kp) 880{ 881 int sz; 882 struct nvm_dev *dev; 883 884 sz = sprintf(buf, "available devices:\n"); 885 down_write(&nvm_lock); 886 list_for_each_entry(dev, &nvm_devices, devices) { 887 if (sz > 4095 - DISK_NAME_LEN - 2) 888 break; 889 sz += sprintf(buf + sz, " %32s\n", dev->name); 890 } 891 up_write(&nvm_lock); 892 893 return sz; 894} 895 896static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = { 897 .set = nvm_configure_by_str_event, 898 .get = nvm_configure_get, 899}; 900 901#undef MODULE_PARAM_PREFIX 902#define MODULE_PARAM_PREFIX "lnvm." 903 904module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL, 905 0644); 906 907#endif /* CONFIG_NVM_DEBUG */ 908 909static long nvm_ioctl_info(struct file *file, void __user *arg) 910{ 911 struct nvm_ioctl_info *info; 912 struct nvm_tgt_type *tt; 913 int tgt_iter = 0; 914 915 if (!capable(CAP_SYS_ADMIN)) 916 return -EPERM; 917 918 info = memdup_user(arg, sizeof(struct nvm_ioctl_info)); 919 if (IS_ERR(info)) 920 return -EFAULT; 921 922 info->version[0] = NVM_VERSION_MAJOR; 923 info->version[1] = NVM_VERSION_MINOR; 924 info->version[2] = NVM_VERSION_PATCH; 925 926 down_write(&nvm_lock); 927 list_for_each_entry(tt, &nvm_tgt_types, list) { 928 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter]; 929 930 tgt->version[0] = tt->version[0]; 931 tgt->version[1] = tt->version[1]; 932 tgt->version[2] = tt->version[2]; 933 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX); 934 935 tgt_iter++; 936 } 937 938 info->tgtsize = tgt_iter; 939 up_write(&nvm_lock); 940 941 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) { 942 kfree(info); 943 return -EFAULT; 944 } 945 946 kfree(info); 947 return 0; 948} 949 950static long nvm_ioctl_get_devices(struct file *file, void __user *arg) 951{ 952 struct nvm_ioctl_get_devices *devices; 953 struct nvm_dev *dev; 954 int i = 0; 955 956 if (!capable(CAP_SYS_ADMIN)) 957 return -EPERM; 958 959 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL); 960 if (!devices) 961 return -ENOMEM; 962 963 down_write(&nvm_lock); 964 list_for_each_entry(dev, &nvm_devices, devices) { 965 struct nvm_ioctl_device_info *info = &devices->info[i]; 966 967 sprintf(info->devname, "%s", dev->name); 968 if (dev->mt) { 969 info->bmversion[0] = dev->mt->version[0]; 970 info->bmversion[1] = dev->mt->version[1]; 971 info->bmversion[2] = dev->mt->version[2]; 972 sprintf(info->bmname, "%s", dev->mt->name); 973 } else { 974 sprintf(info->bmname, "none"); 975 } 976 977 i++; 978 if (i > 31) { 979 pr_err("nvm: max 31 devices can be reported.\n"); 980 break; 981 } 982 } 983 up_write(&nvm_lock); 984 985 devices->nr_devices = i; 986 987 if (copy_to_user(arg, devices, 988 sizeof(struct nvm_ioctl_get_devices))) { 989 kfree(devices); 990 return -EFAULT; 991 } 992 993 kfree(devices); 994 return 0; 995} 996 997static long nvm_ioctl_dev_create(struct file *file, void __user *arg) 998{ 999 struct nvm_ioctl_create create; 1000 1001 if (!capable(CAP_SYS_ADMIN)) 1002 return -EPERM; 1003 1004 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) 1005 return -EFAULT; 1006 1007 create.dev[DISK_NAME_LEN - 1] = '\0'; 1008 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0'; 1009 create.tgtname[DISK_NAME_LEN - 1] = '\0'; 1010 1011 if (create.flags != 0) { 1012 pr_err("nvm: no flags supported\n"); 1013 return -EINVAL; 1014 } 1015 1016 return __nvm_configure_create(&create); 1017} 1018 1019static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) 1020{ 1021 struct nvm_ioctl_remove remove; 1022 struct nvm_dev *dev; 1023 int ret = 0; 1024 1025 if (!capable(CAP_SYS_ADMIN)) 1026 return -EPERM; 1027 1028 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove))) 1029 return -EFAULT; 1030 1031 remove.tgtname[DISK_NAME_LEN - 1] = '\0'; 1032 1033 if (remove.flags != 0) { 1034 pr_err("nvm: no flags supported\n"); 1035 return -EINVAL; 1036 } 1037 1038 list_for_each_entry(dev, &nvm_devices, devices) { 1039 ret = dev->mt->remove_tgt(dev, &remove); 1040 if (!ret) 1041 break; 1042 } 1043 1044 return ret; 1045} 1046 1047static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info) 1048{ 1049 info->seqnr = 1; 1050 info->erase_cnt = 0; 1051 info->version = 1; 1052} 1053 1054static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init) 1055{ 1056 struct nvm_dev *dev; 1057 struct nvm_sb_info info; 1058 int ret; 1059 1060 down_write(&nvm_lock); 1061 dev = nvm_find_nvm_dev(init->dev); 1062 up_write(&nvm_lock); 1063 if (!dev) { 1064 pr_err("nvm: device not found\n"); 1065 return -EINVAL; 1066 } 1067 1068 nvm_setup_nvm_sb_info(&info); 1069 1070 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); 1071 info.fs_ppa.ppa = -1; 1072 1073 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) { 1074 ret = nvm_init_sysblock(dev, &info); 1075 if (ret) 1076 return ret; 1077 } 1078 1079 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); 1080 1081 down_write(&nvm_lock); 1082 dev->mt = nvm_init_mgr(dev); 1083 up_write(&nvm_lock); 1084 1085 return 0; 1086} 1087 1088static long nvm_ioctl_dev_init(struct file *file, void __user *arg) 1089{ 1090 struct nvm_ioctl_dev_init init; 1091 1092 if (!capable(CAP_SYS_ADMIN)) 1093 return -EPERM; 1094 1095 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init))) 1096 return -EFAULT; 1097 1098 if (init.flags != 0) { 1099 pr_err("nvm: no flags supported\n"); 1100 return -EINVAL; 1101 } 1102 1103 init.dev[DISK_NAME_LEN - 1] = '\0'; 1104 1105 return __nvm_ioctl_dev_init(&init); 1106} 1107 1108static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) 1109{ 1110 struct nvm_ioctl_dev_factory fact; 1111 struct nvm_dev *dev; 1112 1113 if (!capable(CAP_SYS_ADMIN)) 1114 return -EPERM; 1115 1116 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory))) 1117 return -EFAULT; 1118 1119 fact.dev[DISK_NAME_LEN - 1] = '\0'; 1120 1121 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1)) 1122 return -EINVAL; 1123 1124 down_write(&nvm_lock); 1125 dev = nvm_find_nvm_dev(fact.dev); 1126 up_write(&nvm_lock); 1127 if (!dev) { 1128 pr_err("nvm: device not found\n"); 1129 return -EINVAL; 1130 } 1131 1132 nvm_free_mgr(dev); 1133 1134 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) 1135 return nvm_dev_factory(dev, fact.flags); 1136 1137 return 0; 1138} 1139 1140static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1141{ 1142 void __user *argp = (void __user *)arg; 1143 1144 switch (cmd) { 1145 case NVM_INFO: 1146 return nvm_ioctl_info(file, argp); 1147 case NVM_GET_DEVICES: 1148 return nvm_ioctl_get_devices(file, argp); 1149 case NVM_DEV_CREATE: 1150 return nvm_ioctl_dev_create(file, argp); 1151 case NVM_DEV_REMOVE: 1152 return nvm_ioctl_dev_remove(file, argp); 1153 case NVM_DEV_INIT: 1154 return nvm_ioctl_dev_init(file, argp); 1155 case NVM_DEV_FACTORY: 1156 return nvm_ioctl_dev_factory(file, argp); 1157 } 1158 return 0; 1159} 1160 1161static const struct file_operations _ctl_fops = { 1162 .open = nonseekable_open, 1163 .unlocked_ioctl = nvm_ctl_ioctl, 1164 .owner = THIS_MODULE, 1165 .llseek = noop_llseek, 1166}; 1167 1168static struct miscdevice _nvm_misc = { 1169 .minor = MISC_DYNAMIC_MINOR, 1170 .name = "lightnvm", 1171 .nodename = "lightnvm/control", 1172 .fops = &_ctl_fops, 1173}; 1174 1175MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR); 1176 1177static int __init nvm_mod_init(void) 1178{ 1179 int ret; 1180 1181 ret = misc_register(&_nvm_misc); 1182 if (ret) 1183 pr_err("nvm: misc_register failed for control device"); 1184 1185 return ret; 1186} 1187 1188static void __exit nvm_mod_exit(void) 1189{ 1190 misc_deregister(&_nvm_misc); 1191} 1192 1193MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>"); 1194MODULE_LICENSE("GPL v2"); 1195MODULE_VERSION("0.1"); 1196module_init(nvm_mod_init); 1197module_exit(nvm_mod_exit);