Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.7-rc4 1480 lines 34 kB view raw
1/* 2 * Copyright (C) 2015 IT University of Copenhagen 3 * Initial release: Matias Bjorling <m@bjorling.me> 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 7 * 2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs. 15 */ 16 17#include "rrpc.h" 18 19static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache; 20static DECLARE_RWSEM(rrpc_lock); 21 22static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, 23 struct nvm_rq *rqd, unsigned long flags); 24 25#define rrpc_for_each_lun(rrpc, rlun, i) \ 26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \ 27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)]) 28 29static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) 30{ 31 struct rrpc_block *rblk = a->rblk; 32 unsigned int pg_offset; 33 34 lockdep_assert_held(&rrpc->rev_lock); 35 36 if (a->addr == ADDR_EMPTY || !rblk) 37 return; 38 39 spin_lock(&rblk->lock); 40 41 div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset); 42 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages)); 43 rblk->nr_invalid_pages++; 44 45 spin_unlock(&rblk->lock); 46 47 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY; 48} 49 50static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, 51 unsigned len) 52{ 53 sector_t i; 54 55 spin_lock(&rrpc->rev_lock); 56 for (i = slba; i < slba + len; i++) { 57 struct rrpc_addr *gp = &rrpc->trans_map[i]; 58 59 rrpc_page_invalidate(rrpc, gp); 60 gp->rblk = NULL; 61 } 62 spin_unlock(&rrpc->rev_lock); 63} 64 65static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc, 66 sector_t laddr, unsigned int pages) 67{ 68 struct nvm_rq *rqd; 69 struct rrpc_inflight_rq *inf; 70 71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC); 72 if (!rqd) 73 return ERR_PTR(-ENOMEM); 74 75 inf = rrpc_get_inflight_rq(rqd); 76 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) { 77 mempool_free(rqd, rrpc->rq_pool); 78 return NULL; 79 } 80 81 return rqd; 82} 83 84static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd) 85{ 86 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd); 87 88 rrpc_unlock_laddr(rrpc, inf); 89 90 mempool_free(rqd, rrpc->rq_pool); 91} 92 93static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) 94{ 95 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG; 96 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; 97 struct nvm_rq *rqd; 98 99 do { 100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); 101 schedule(); 102 } while (!rqd); 103 104 if (IS_ERR(rqd)) { 105 pr_err("rrpc: unable to acquire inflight IO\n"); 106 bio_io_error(bio); 107 return; 108 } 109 110 rrpc_invalidate_range(rrpc, slba, len); 111 rrpc_inflight_laddr_release(rrpc, rqd); 112} 113 114static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk) 115{ 116 return (rblk->next_page == rrpc->dev->sec_per_blk); 117} 118 119/* Calculate relative addr for the given block, considering instantiated LUNs */ 120static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk) 121{ 122 struct nvm_block *blk = rblk->parent; 123 int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns); 124 125 return lun_blk * rrpc->dev->sec_per_blk; 126} 127 128/* Calculate global addr for the given block */ 129static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) 130{ 131 struct nvm_block *blk = rblk->parent; 132 133 return blk->id * rrpc->dev->sec_per_blk; 134} 135 136static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, 137 struct ppa_addr r) 138{ 139 struct ppa_addr l; 140 int secs, pgs, blks, luns; 141 sector_t ppa = r.ppa; 142 143 l.ppa = 0; 144 145 div_u64_rem(ppa, dev->sec_per_pg, &secs); 146 l.g.sec = secs; 147 148 sector_div(ppa, dev->sec_per_pg); 149 div_u64_rem(ppa, dev->pgs_per_blk, &pgs); 150 l.g.pg = pgs; 151 152 sector_div(ppa, dev->pgs_per_blk); 153 div_u64_rem(ppa, dev->blks_per_lun, &blks); 154 l.g.blk = blks; 155 156 sector_div(ppa, dev->blks_per_lun); 157 div_u64_rem(ppa, dev->luns_per_chnl, &luns); 158 l.g.lun = luns; 159 160 sector_div(ppa, dev->luns_per_chnl); 161 l.g.ch = ppa; 162 163 return l; 164} 165 166static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) 167{ 168 struct ppa_addr paddr; 169 170 paddr.ppa = addr; 171 return linear_to_generic_addr(dev, paddr); 172} 173 174/* requires lun->lock taken */ 175static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk) 176{ 177 struct rrpc *rrpc = rlun->rrpc; 178 179 BUG_ON(!rblk); 180 181 if (rlun->cur) { 182 spin_lock(&rlun->cur->lock); 183 WARN_ON(!block_is_full(rrpc, rlun->cur)); 184 spin_unlock(&rlun->cur->lock); 185 } 186 rlun->cur = rblk; 187} 188 189static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, 190 unsigned long flags) 191{ 192 struct nvm_lun *lun = rlun->parent; 193 struct nvm_block *blk; 194 struct rrpc_block *rblk; 195 196 spin_lock(&lun->lock); 197 blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags); 198 if (!blk) { 199 pr_err("nvm: rrpc: cannot get new block from media manager\n"); 200 spin_unlock(&lun->lock); 201 return NULL; 202 } 203 204 rblk = rrpc_get_rblk(rlun, blk->id); 205 list_add_tail(&rblk->list, &rlun->open_list); 206 spin_unlock(&lun->lock); 207 208 blk->priv = rblk; 209 bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk); 210 rblk->next_page = 0; 211 rblk->nr_invalid_pages = 0; 212 atomic_set(&rblk->data_cmnt_size, 0); 213 214 return rblk; 215} 216 217static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) 218{ 219 struct rrpc_lun *rlun = rblk->rlun; 220 struct nvm_lun *lun = rlun->parent; 221 222 spin_lock(&lun->lock); 223 nvm_put_blk_unlocked(rrpc->dev, rblk->parent); 224 list_del(&rblk->list); 225 spin_unlock(&lun->lock); 226} 227 228static void rrpc_put_blks(struct rrpc *rrpc) 229{ 230 struct rrpc_lun *rlun; 231 int i; 232 233 for (i = 0; i < rrpc->nr_luns; i++) { 234 rlun = &rrpc->luns[i]; 235 if (rlun->cur) 236 rrpc_put_blk(rrpc, rlun->cur); 237 if (rlun->gc_cur) 238 rrpc_put_blk(rrpc, rlun->gc_cur); 239 } 240} 241 242static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) 243{ 244 int next = atomic_inc_return(&rrpc->next_lun); 245 246 return &rrpc->luns[next % rrpc->nr_luns]; 247} 248 249static void rrpc_gc_kick(struct rrpc *rrpc) 250{ 251 struct rrpc_lun *rlun; 252 unsigned int i; 253 254 for (i = 0; i < rrpc->nr_luns; i++) { 255 rlun = &rrpc->luns[i]; 256 queue_work(rrpc->krqd_wq, &rlun->ws_gc); 257 } 258} 259 260/* 261 * timed GC every interval. 262 */ 263static void rrpc_gc_timer(unsigned long data) 264{ 265 struct rrpc *rrpc = (struct rrpc *)data; 266 267 rrpc_gc_kick(rrpc); 268 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); 269} 270 271static void rrpc_end_sync_bio(struct bio *bio) 272{ 273 struct completion *waiting = bio->bi_private; 274 275 if (bio->bi_error) 276 pr_err("nvm: gc request failed (%u).\n", bio->bi_error); 277 278 complete(waiting); 279} 280 281/* 282 * rrpc_move_valid_pages -- migrate live data off the block 283 * @rrpc: the 'rrpc' structure 284 * @block: the block from which to migrate live pages 285 * 286 * Description: 287 * GC algorithms may call this function to migrate remaining live 288 * pages off the block prior to erasing it. This function blocks 289 * further execution until the operation is complete. 290 */ 291static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) 292{ 293 struct request_queue *q = rrpc->dev->q; 294 struct rrpc_rev_addr *rev; 295 struct nvm_rq *rqd; 296 struct bio *bio; 297 struct page *page; 298 int slot; 299 int nr_sec_per_blk = rrpc->dev->sec_per_blk; 300 u64 phys_addr; 301 DECLARE_COMPLETION_ONSTACK(wait); 302 303 if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) 304 return 0; 305 306 bio = bio_alloc(GFP_NOIO, 1); 307 if (!bio) { 308 pr_err("nvm: could not alloc bio to gc\n"); 309 return -ENOMEM; 310 } 311 312 page = mempool_alloc(rrpc->page_pool, GFP_NOIO); 313 if (!page) { 314 bio_put(bio); 315 return -ENOMEM; 316 } 317 318 while ((slot = find_first_zero_bit(rblk->invalid_pages, 319 nr_sec_per_blk)) < nr_sec_per_blk) { 320 321 /* Lock laddr */ 322 phys_addr = rblk->parent->id * nr_sec_per_blk + slot; 323 324try: 325 spin_lock(&rrpc->rev_lock); 326 /* Get logical address from physical to logical table */ 327 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset]; 328 /* already updated by previous regular write */ 329 if (rev->addr == ADDR_EMPTY) { 330 spin_unlock(&rrpc->rev_lock); 331 continue; 332 } 333 334 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); 335 if (IS_ERR_OR_NULL(rqd)) { 336 spin_unlock(&rrpc->rev_lock); 337 schedule(); 338 goto try; 339 } 340 341 spin_unlock(&rrpc->rev_lock); 342 343 /* Perform read to do GC */ 344 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); 345 bio->bi_rw = READ; 346 bio->bi_private = &wait; 347 bio->bi_end_io = rrpc_end_sync_bio; 348 349 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */ 350 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); 351 352 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { 353 pr_err("rrpc: gc read failed.\n"); 354 rrpc_inflight_laddr_release(rrpc, rqd); 355 goto finished; 356 } 357 wait_for_completion_io(&wait); 358 if (bio->bi_error) { 359 rrpc_inflight_laddr_release(rrpc, rqd); 360 goto finished; 361 } 362 363 bio_reset(bio); 364 reinit_completion(&wait); 365 366 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); 367 bio->bi_rw = WRITE; 368 bio->bi_private = &wait; 369 bio->bi_end_io = rrpc_end_sync_bio; 370 371 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0); 372 373 /* turn the command around and write the data back to a new 374 * address 375 */ 376 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { 377 pr_err("rrpc: gc write failed.\n"); 378 rrpc_inflight_laddr_release(rrpc, rqd); 379 goto finished; 380 } 381 wait_for_completion_io(&wait); 382 383 rrpc_inflight_laddr_release(rrpc, rqd); 384 if (bio->bi_error) 385 goto finished; 386 387 bio_reset(bio); 388 } 389 390finished: 391 mempool_free(page, rrpc->page_pool); 392 bio_put(bio); 393 394 if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) { 395 pr_err("nvm: failed to garbage collect block\n"); 396 return -EIO; 397 } 398 399 return 0; 400} 401 402static void rrpc_block_gc(struct work_struct *work) 403{ 404 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, 405 ws_gc); 406 struct rrpc *rrpc = gcb->rrpc; 407 struct rrpc_block *rblk = gcb->rblk; 408 struct rrpc_lun *rlun = rblk->rlun; 409 struct nvm_dev *dev = rrpc->dev; 410 411 mempool_free(gcb, rrpc->gcb_pool); 412 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id); 413 414 if (rrpc_move_valid_pages(rrpc, rblk)) 415 goto put_back; 416 417 if (nvm_erase_blk(dev, rblk->parent)) 418 goto put_back; 419 420 rrpc_put_blk(rrpc, rblk); 421 422 return; 423 424put_back: 425 spin_lock(&rlun->lock); 426 list_add_tail(&rblk->prio, &rlun->prio_list); 427 spin_unlock(&rlun->lock); 428} 429 430/* the block with highest number of invalid pages, will be in the beginning 431 * of the list 432 */ 433static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra, 434 struct rrpc_block *rb) 435{ 436 if (ra->nr_invalid_pages == rb->nr_invalid_pages) 437 return ra; 438 439 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra; 440} 441 442/* linearly find the block with highest number of invalid pages 443 * requires lun->lock 444 */ 445static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun) 446{ 447 struct list_head *prio_list = &rlun->prio_list; 448 struct rrpc_block *rblock, *max; 449 450 BUG_ON(list_empty(prio_list)); 451 452 max = list_first_entry(prio_list, struct rrpc_block, prio); 453 list_for_each_entry(rblock, prio_list, prio) 454 max = rblock_max_invalid(max, rblock); 455 456 return max; 457} 458 459static void rrpc_lun_gc(struct work_struct *work) 460{ 461 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc); 462 struct rrpc *rrpc = rlun->rrpc; 463 struct nvm_lun *lun = rlun->parent; 464 struct rrpc_block_gc *gcb; 465 unsigned int nr_blocks_need; 466 467 nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE; 468 469 if (nr_blocks_need < rrpc->nr_luns) 470 nr_blocks_need = rrpc->nr_luns; 471 472 spin_lock(&rlun->lock); 473 while (nr_blocks_need > lun->nr_free_blocks && 474 !list_empty(&rlun->prio_list)) { 475 struct rrpc_block *rblock = block_prio_find_max(rlun); 476 struct nvm_block *block = rblock->parent; 477 478 if (!rblock->nr_invalid_pages) 479 break; 480 481 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); 482 if (!gcb) 483 break; 484 485 list_del_init(&rblock->prio); 486 487 BUG_ON(!block_is_full(rrpc, rblock)); 488 489 pr_debug("rrpc: selected block '%lu' for GC\n", block->id); 490 491 gcb->rrpc = rrpc; 492 gcb->rblk = rblock; 493 INIT_WORK(&gcb->ws_gc, rrpc_block_gc); 494 495 queue_work(rrpc->kgc_wq, &gcb->ws_gc); 496 497 nr_blocks_need--; 498 } 499 spin_unlock(&rlun->lock); 500 501 /* TODO: Hint that request queue can be started again */ 502} 503 504static void rrpc_gc_queue(struct work_struct *work) 505{ 506 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc, 507 ws_gc); 508 struct rrpc *rrpc = gcb->rrpc; 509 struct rrpc_block *rblk = gcb->rblk; 510 struct rrpc_lun *rlun = rblk->rlun; 511 struct nvm_lun *lun = rblk->parent->lun; 512 struct nvm_block *blk = rblk->parent; 513 514 spin_lock(&rlun->lock); 515 list_add_tail(&rblk->prio, &rlun->prio_list); 516 spin_unlock(&rlun->lock); 517 518 spin_lock(&lun->lock); 519 lun->nr_open_blocks--; 520 lun->nr_closed_blocks++; 521 blk->state &= ~NVM_BLK_ST_OPEN; 522 blk->state |= NVM_BLK_ST_CLOSED; 523 list_move_tail(&rblk->list, &rlun->closed_list); 524 spin_unlock(&lun->lock); 525 526 mempool_free(gcb, rrpc->gcb_pool); 527 pr_debug("nvm: block '%lu' is full, allow GC (sched)\n", 528 rblk->parent->id); 529} 530 531static const struct block_device_operations rrpc_fops = { 532 .owner = THIS_MODULE, 533}; 534 535static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc) 536{ 537 unsigned int i; 538 struct rrpc_lun *rlun, *max_free; 539 540 if (!is_gc) 541 return get_next_lun(rrpc); 542 543 /* during GC, we don't care about RR, instead we want to make 544 * sure that we maintain evenness between the block luns. 545 */ 546 max_free = &rrpc->luns[0]; 547 /* prevent GC-ing lun from devouring pages of a lun with 548 * little free blocks. We don't take the lock as we only need an 549 * estimate. 550 */ 551 rrpc_for_each_lun(rrpc, rlun, i) { 552 if (rlun->parent->nr_free_blocks > 553 max_free->parent->nr_free_blocks) 554 max_free = rlun; 555 } 556 557 return max_free; 558} 559 560static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, 561 struct rrpc_block *rblk, u64 paddr) 562{ 563 struct rrpc_addr *gp; 564 struct rrpc_rev_addr *rev; 565 566 BUG_ON(laddr >= rrpc->nr_sects); 567 568 gp = &rrpc->trans_map[laddr]; 569 spin_lock(&rrpc->rev_lock); 570 if (gp->rblk) 571 rrpc_page_invalidate(rrpc, gp); 572 573 gp->addr = paddr; 574 gp->rblk = rblk; 575 576 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset]; 577 rev->addr = laddr; 578 spin_unlock(&rrpc->rev_lock); 579 580 return gp; 581} 582 583static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) 584{ 585 u64 addr = ADDR_EMPTY; 586 587 spin_lock(&rblk->lock); 588 if (block_is_full(rrpc, rblk)) 589 goto out; 590 591 addr = block_to_addr(rrpc, rblk) + rblk->next_page; 592 593 rblk->next_page++; 594out: 595 spin_unlock(&rblk->lock); 596 return addr; 597} 598 599/* Simple round-robin Logical to physical address translation. 600 * 601 * Retrieve the mapping using the active append point. Then update the ap for 602 * the next write to the disk. 603 * 604 * Returns rrpc_addr with the physical address and block. Remember to return to 605 * rrpc->addr_cache when request is finished. 606 */ 607static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, 608 int is_gc) 609{ 610 struct rrpc_lun *rlun; 611 struct rrpc_block *rblk; 612 struct nvm_lun *lun; 613 u64 paddr; 614 615 rlun = rrpc_get_lun_rr(rrpc, is_gc); 616 lun = rlun->parent; 617 618 if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4) 619 return NULL; 620 621 spin_lock(&rlun->lock); 622 623 rblk = rlun->cur; 624retry: 625 paddr = rrpc_alloc_addr(rrpc, rblk); 626 627 if (paddr == ADDR_EMPTY) { 628 rblk = rrpc_get_blk(rrpc, rlun, 0); 629 if (rblk) { 630 rrpc_set_lun_cur(rlun, rblk); 631 goto retry; 632 } 633 634 if (is_gc) { 635 /* retry from emergency gc block */ 636 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); 637 if (paddr == ADDR_EMPTY) { 638 rblk = rrpc_get_blk(rrpc, rlun, 1); 639 if (!rblk) { 640 pr_err("rrpc: no more blocks"); 641 goto err; 642 } 643 644 rlun->gc_cur = rblk; 645 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); 646 } 647 rblk = rlun->gc_cur; 648 } 649 } 650 651 spin_unlock(&rlun->lock); 652 return rrpc_update_map(rrpc, laddr, rblk, paddr); 653err: 654 spin_unlock(&rlun->lock); 655 return NULL; 656} 657 658static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) 659{ 660 struct rrpc_block_gc *gcb; 661 662 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); 663 if (!gcb) { 664 pr_err("rrpc: unable to queue block for gc."); 665 return; 666 } 667 668 gcb->rrpc = rrpc; 669 gcb->rblk = rblk; 670 671 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue); 672 queue_work(rrpc->kgc_wq, &gcb->ws_gc); 673} 674 675static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, 676 sector_t laddr, uint8_t npages) 677{ 678 struct rrpc_addr *p; 679 struct rrpc_block *rblk; 680 struct nvm_lun *lun; 681 int cmnt_size, i; 682 683 for (i = 0; i < npages; i++) { 684 p = &rrpc->trans_map[laddr + i]; 685 rblk = p->rblk; 686 lun = rblk->parent->lun; 687 688 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); 689 if (unlikely(cmnt_size == rrpc->dev->sec_per_blk)) 690 rrpc_run_gc(rrpc, rblk); 691 } 692} 693 694static void rrpc_end_io(struct nvm_rq *rqd) 695{ 696 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); 697 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); 698 uint8_t npages = rqd->nr_ppas; 699 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; 700 701 if (bio_data_dir(rqd->bio) == WRITE) 702 rrpc_end_io_write(rrpc, rrqd, laddr, npages); 703 704 bio_put(rqd->bio); 705 706 if (rrqd->flags & NVM_IOTYPE_GC) 707 return; 708 709 rrpc_unlock_rq(rrpc, rqd); 710 711 if (npages > 1) 712 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); 713 714 mempool_free(rqd, rrpc->rq_pool); 715} 716 717static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, 718 struct nvm_rq *rqd, unsigned long flags, int npages) 719{ 720 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); 721 struct rrpc_addr *gp; 722 sector_t laddr = rrpc_get_laddr(bio); 723 int is_gc = flags & NVM_IOTYPE_GC; 724 int i; 725 726 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { 727 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); 728 return NVM_IO_REQUEUE; 729 } 730 731 for (i = 0; i < npages; i++) { 732 /* We assume that mapping occurs at 4KB granularity */ 733 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects)); 734 gp = &rrpc->trans_map[laddr + i]; 735 736 if (gp->rblk) { 737 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, 738 gp->addr); 739 } else { 740 BUG_ON(is_gc); 741 rrpc_unlock_laddr(rrpc, r); 742 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, 743 rqd->dma_ppa_list); 744 return NVM_IO_DONE; 745 } 746 } 747 748 rqd->opcode = NVM_OP_HBREAD; 749 750 return NVM_IO_OK; 751} 752 753static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, 754 unsigned long flags) 755{ 756 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); 757 int is_gc = flags & NVM_IOTYPE_GC; 758 sector_t laddr = rrpc_get_laddr(bio); 759 struct rrpc_addr *gp; 760 761 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) 762 return NVM_IO_REQUEUE; 763 764 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects)); 765 gp = &rrpc->trans_map[laddr]; 766 767 if (gp->rblk) { 768 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr); 769 } else { 770 BUG_ON(is_gc); 771 rrpc_unlock_rq(rrpc, rqd); 772 return NVM_IO_DONE; 773 } 774 775 rqd->opcode = NVM_OP_HBREAD; 776 rrqd->addr = gp; 777 778 return NVM_IO_OK; 779} 780 781static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio, 782 struct nvm_rq *rqd, unsigned long flags, int npages) 783{ 784 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); 785 struct rrpc_addr *p; 786 sector_t laddr = rrpc_get_laddr(bio); 787 int is_gc = flags & NVM_IOTYPE_GC; 788 int i; 789 790 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { 791 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); 792 return NVM_IO_REQUEUE; 793 } 794 795 for (i = 0; i < npages; i++) { 796 /* We assume that mapping occurs at 4KB granularity */ 797 p = rrpc_map_page(rrpc, laddr + i, is_gc); 798 if (!p) { 799 BUG_ON(is_gc); 800 rrpc_unlock_laddr(rrpc, r); 801 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, 802 rqd->dma_ppa_list); 803 rrpc_gc_kick(rrpc); 804 return NVM_IO_REQUEUE; 805 } 806 807 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, 808 p->addr); 809 } 810 811 rqd->opcode = NVM_OP_HBWRITE; 812 813 return NVM_IO_OK; 814} 815 816static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio, 817 struct nvm_rq *rqd, unsigned long flags) 818{ 819 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); 820 struct rrpc_addr *p; 821 int is_gc = flags & NVM_IOTYPE_GC; 822 sector_t laddr = rrpc_get_laddr(bio); 823 824 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) 825 return NVM_IO_REQUEUE; 826 827 p = rrpc_map_page(rrpc, laddr, is_gc); 828 if (!p) { 829 BUG_ON(is_gc); 830 rrpc_unlock_rq(rrpc, rqd); 831 rrpc_gc_kick(rrpc); 832 return NVM_IO_REQUEUE; 833 } 834 835 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr); 836 rqd->opcode = NVM_OP_HBWRITE; 837 rrqd->addr = p; 838 839 return NVM_IO_OK; 840} 841 842static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio, 843 struct nvm_rq *rqd, unsigned long flags, uint8_t npages) 844{ 845 if (npages > 1) { 846 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL, 847 &rqd->dma_ppa_list); 848 if (!rqd->ppa_list) { 849 pr_err("rrpc: not able to allocate ppa list\n"); 850 return NVM_IO_ERR; 851 } 852 853 if (bio_rw(bio) == WRITE) 854 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, 855 npages); 856 857 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); 858 } 859 860 if (bio_rw(bio) == WRITE) 861 return rrpc_write_rq(rrpc, bio, rqd, flags); 862 863 return rrpc_read_rq(rrpc, bio, rqd, flags); 864} 865 866static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, 867 struct nvm_rq *rqd, unsigned long flags) 868{ 869 int err; 870 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd); 871 uint8_t nr_pages = rrpc_get_pages(bio); 872 int bio_size = bio_sectors(bio) << 9; 873 874 if (bio_size < rrpc->dev->sec_size) 875 return NVM_IO_ERR; 876 else if (bio_size > rrpc->dev->max_rq_size) 877 return NVM_IO_ERR; 878 879 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); 880 if (err) 881 return err; 882 883 bio_get(bio); 884 rqd->bio = bio; 885 rqd->ins = &rrpc->instance; 886 rqd->nr_ppas = nr_pages; 887 rrq->flags = flags; 888 889 err = nvm_submit_io(rrpc->dev, rqd); 890 if (err) { 891 pr_err("rrpc: I/O submission failed: %d\n", err); 892 bio_put(bio); 893 if (!(flags & NVM_IOTYPE_GC)) { 894 rrpc_unlock_rq(rrpc, rqd); 895 if (rqd->nr_ppas > 1) 896 nvm_dev_dma_free(rrpc->dev, 897 rqd->ppa_list, rqd->dma_ppa_list); 898 } 899 return NVM_IO_ERR; 900 } 901 902 return NVM_IO_OK; 903} 904 905static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) 906{ 907 struct rrpc *rrpc = q->queuedata; 908 struct nvm_rq *rqd; 909 int err; 910 911 if (bio->bi_rw & REQ_DISCARD) { 912 rrpc_discard(rrpc, bio); 913 return BLK_QC_T_NONE; 914 } 915 916 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); 917 if (!rqd) { 918 pr_err_ratelimited("rrpc: not able to queue bio."); 919 bio_io_error(bio); 920 return BLK_QC_T_NONE; 921 } 922 memset(rqd, 0, sizeof(struct nvm_rq)); 923 924 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); 925 switch (err) { 926 case NVM_IO_OK: 927 return BLK_QC_T_NONE; 928 case NVM_IO_ERR: 929 bio_io_error(bio); 930 break; 931 case NVM_IO_DONE: 932 bio_endio(bio); 933 break; 934 case NVM_IO_REQUEUE: 935 spin_lock(&rrpc->bio_lock); 936 bio_list_add(&rrpc->requeue_bios, bio); 937 spin_unlock(&rrpc->bio_lock); 938 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue); 939 break; 940 } 941 942 mempool_free(rqd, rrpc->rq_pool); 943 return BLK_QC_T_NONE; 944} 945 946static void rrpc_requeue(struct work_struct *work) 947{ 948 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue); 949 struct bio_list bios; 950 struct bio *bio; 951 952 bio_list_init(&bios); 953 954 spin_lock(&rrpc->bio_lock); 955 bio_list_merge(&bios, &rrpc->requeue_bios); 956 bio_list_init(&rrpc->requeue_bios); 957 spin_unlock(&rrpc->bio_lock); 958 959 while ((bio = bio_list_pop(&bios))) 960 rrpc_make_rq(rrpc->disk->queue, bio); 961} 962 963static void rrpc_gc_free(struct rrpc *rrpc) 964{ 965 if (rrpc->krqd_wq) 966 destroy_workqueue(rrpc->krqd_wq); 967 968 if (rrpc->kgc_wq) 969 destroy_workqueue(rrpc->kgc_wq); 970} 971 972static int rrpc_gc_init(struct rrpc *rrpc) 973{ 974 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND, 975 rrpc->nr_luns); 976 if (!rrpc->krqd_wq) 977 return -ENOMEM; 978 979 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1); 980 if (!rrpc->kgc_wq) 981 return -ENOMEM; 982 983 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc); 984 985 return 0; 986} 987 988static void rrpc_map_free(struct rrpc *rrpc) 989{ 990 vfree(rrpc->rev_trans_map); 991 vfree(rrpc->trans_map); 992} 993 994static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) 995{ 996 struct rrpc *rrpc = (struct rrpc *)private; 997 struct nvm_dev *dev = rrpc->dev; 998 struct rrpc_addr *addr = rrpc->trans_map + slba; 999 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; 1000 u64 elba = slba + nlb; 1001 u64 i; 1002 1003 if (unlikely(elba > dev->total_secs)) { 1004 pr_err("nvm: L2P data from device is out of bounds!\n"); 1005 return -EINVAL; 1006 } 1007 1008 for (i = 0; i < nlb; i++) { 1009 u64 pba = le64_to_cpu(entries[i]); 1010 unsigned int mod; 1011 /* LNVM treats address-spaces as silos, LBA and PBA are 1012 * equally large and zero-indexed. 1013 */ 1014 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) { 1015 pr_err("nvm: L2P data entry is out of bounds!\n"); 1016 return -EINVAL; 1017 } 1018 1019 /* Address zero is a special one. The first page on a disk is 1020 * protected. As it often holds internal device boot 1021 * information. 1022 */ 1023 if (!pba) 1024 continue; 1025 1026 div_u64_rem(pba, rrpc->nr_sects, &mod); 1027 1028 addr[i].addr = pba; 1029 raddr[mod].addr = slba + i; 1030 } 1031 1032 return 0; 1033} 1034 1035static int rrpc_map_init(struct rrpc *rrpc) 1036{ 1037 struct nvm_dev *dev = rrpc->dev; 1038 sector_t i; 1039 int ret; 1040 1041 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects); 1042 if (!rrpc->trans_map) 1043 return -ENOMEM; 1044 1045 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) 1046 * rrpc->nr_sects); 1047 if (!rrpc->rev_trans_map) 1048 return -ENOMEM; 1049 1050 for (i = 0; i < rrpc->nr_sects; i++) { 1051 struct rrpc_addr *p = &rrpc->trans_map[i]; 1052 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; 1053 1054 p->addr = ADDR_EMPTY; 1055 r->addr = ADDR_EMPTY; 1056 } 1057 1058 if (!dev->ops->get_l2p_tbl) 1059 return 0; 1060 1061 /* Bring up the mapping table from device */ 1062 ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects, 1063 rrpc_l2p_update, rrpc); 1064 if (ret) { 1065 pr_err("nvm: rrpc: could not read L2P table.\n"); 1066 return -EINVAL; 1067 } 1068 1069 return 0; 1070} 1071 1072/* Minimum pages needed within a lun */ 1073#define PAGE_POOL_SIZE 16 1074#define ADDR_POOL_SIZE 64 1075 1076static int rrpc_core_init(struct rrpc *rrpc) 1077{ 1078 down_write(&rrpc_lock); 1079 if (!rrpc_gcb_cache) { 1080 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb", 1081 sizeof(struct rrpc_block_gc), 0, 0, NULL); 1082 if (!rrpc_gcb_cache) { 1083 up_write(&rrpc_lock); 1084 return -ENOMEM; 1085 } 1086 1087 rrpc_rq_cache = kmem_cache_create("rrpc_rq", 1088 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq), 1089 0, 0, NULL); 1090 if (!rrpc_rq_cache) { 1091 kmem_cache_destroy(rrpc_gcb_cache); 1092 up_write(&rrpc_lock); 1093 return -ENOMEM; 1094 } 1095 } 1096 up_write(&rrpc_lock); 1097 1098 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); 1099 if (!rrpc->page_pool) 1100 return -ENOMEM; 1101 1102 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns, 1103 rrpc_gcb_cache); 1104 if (!rrpc->gcb_pool) 1105 return -ENOMEM; 1106 1107 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache); 1108 if (!rrpc->rq_pool) 1109 return -ENOMEM; 1110 1111 spin_lock_init(&rrpc->inflights.lock); 1112 INIT_LIST_HEAD(&rrpc->inflights.reqs); 1113 1114 return 0; 1115} 1116 1117static void rrpc_core_free(struct rrpc *rrpc) 1118{ 1119 mempool_destroy(rrpc->page_pool); 1120 mempool_destroy(rrpc->gcb_pool); 1121 mempool_destroy(rrpc->rq_pool); 1122} 1123 1124static void rrpc_luns_free(struct rrpc *rrpc) 1125{ 1126 struct nvm_dev *dev = rrpc->dev; 1127 struct nvm_lun *lun; 1128 struct rrpc_lun *rlun; 1129 int i; 1130 1131 if (!rrpc->luns) 1132 return; 1133 1134 for (i = 0; i < rrpc->nr_luns; i++) { 1135 rlun = &rrpc->luns[i]; 1136 lun = rlun->parent; 1137 if (!lun) 1138 break; 1139 dev->mt->release_lun(dev, lun->id); 1140 vfree(rlun->blocks); 1141 } 1142 1143 kfree(rrpc->luns); 1144} 1145 1146static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) 1147{ 1148 struct nvm_dev *dev = rrpc->dev; 1149 struct rrpc_lun *rlun; 1150 int i, j, ret = -EINVAL; 1151 1152 if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { 1153 pr_err("rrpc: number of pages per block too high."); 1154 return -EINVAL; 1155 } 1156 1157 spin_lock_init(&rrpc->rev_lock); 1158 1159 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), 1160 GFP_KERNEL); 1161 if (!rrpc->luns) 1162 return -ENOMEM; 1163 1164 /* 1:1 mapping */ 1165 for (i = 0; i < rrpc->nr_luns; i++) { 1166 int lunid = lun_begin + i; 1167 struct nvm_lun *lun; 1168 1169 if (dev->mt->reserve_lun(dev, lunid)) { 1170 pr_err("rrpc: lun %u is already allocated\n", lunid); 1171 goto err; 1172 } 1173 1174 lun = dev->mt->get_lun(dev, lunid); 1175 if (!lun) 1176 goto err; 1177 1178 rlun = &rrpc->luns[i]; 1179 rlun->parent = lun; 1180 rlun->blocks = vzalloc(sizeof(struct rrpc_block) * 1181 rrpc->dev->blks_per_lun); 1182 if (!rlun->blocks) { 1183 ret = -ENOMEM; 1184 goto err; 1185 } 1186 1187 for (j = 0; j < rrpc->dev->blks_per_lun; j++) { 1188 struct rrpc_block *rblk = &rlun->blocks[j]; 1189 struct nvm_block *blk = &lun->blocks[j]; 1190 1191 rblk->parent = blk; 1192 rblk->rlun = rlun; 1193 INIT_LIST_HEAD(&rblk->prio); 1194 spin_lock_init(&rblk->lock); 1195 } 1196 1197 rlun->rrpc = rrpc; 1198 INIT_LIST_HEAD(&rlun->prio_list); 1199 INIT_LIST_HEAD(&rlun->open_list); 1200 INIT_LIST_HEAD(&rlun->closed_list); 1201 1202 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); 1203 spin_lock_init(&rlun->lock); 1204 } 1205 1206 return 0; 1207err: 1208 return ret; 1209} 1210 1211/* returns 0 on success and stores the beginning address in *begin */ 1212static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin) 1213{ 1214 struct nvm_dev *dev = rrpc->dev; 1215 struct nvmm_type *mt = dev->mt; 1216 sector_t size = rrpc->nr_sects * dev->sec_size; 1217 int ret; 1218 1219 size >>= 9; 1220 1221 ret = mt->get_area(dev, begin, size); 1222 if (!ret) 1223 *begin >>= (ilog2(dev->sec_size) - 9); 1224 1225 return ret; 1226} 1227 1228static void rrpc_area_free(struct rrpc *rrpc) 1229{ 1230 struct nvm_dev *dev = rrpc->dev; 1231 struct nvmm_type *mt = dev->mt; 1232 sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9); 1233 1234 mt->put_area(dev, begin); 1235} 1236 1237static void rrpc_free(struct rrpc *rrpc) 1238{ 1239 rrpc_gc_free(rrpc); 1240 rrpc_map_free(rrpc); 1241 rrpc_core_free(rrpc); 1242 rrpc_luns_free(rrpc); 1243 rrpc_area_free(rrpc); 1244 1245 kfree(rrpc); 1246} 1247 1248static void rrpc_exit(void *private) 1249{ 1250 struct rrpc *rrpc = private; 1251 1252 del_timer(&rrpc->gc_timer); 1253 1254 flush_workqueue(rrpc->krqd_wq); 1255 flush_workqueue(rrpc->kgc_wq); 1256 1257 rrpc_free(rrpc); 1258} 1259 1260static sector_t rrpc_capacity(void *private) 1261{ 1262 struct rrpc *rrpc = private; 1263 struct nvm_dev *dev = rrpc->dev; 1264 sector_t reserved, provisioned; 1265 1266 /* cur, gc, and two emergency blocks for each lun */ 1267 reserved = rrpc->nr_luns * dev->sec_per_blk * 4; 1268 provisioned = rrpc->nr_sects - reserved; 1269 1270 if (reserved > rrpc->nr_sects) { 1271 pr_err("rrpc: not enough space available to expose storage.\n"); 1272 return 0; 1273 } 1274 1275 sector_div(provisioned, 10); 1276 return provisioned * 9 * NR_PHY_IN_LOG; 1277} 1278 1279/* 1280 * Looks up the logical address from reverse trans map and check if its valid by 1281 * comparing the logical to physical address with the physical address. 1282 * Returns 0 on free, otherwise 1 if in use 1283 */ 1284static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) 1285{ 1286 struct nvm_dev *dev = rrpc->dev; 1287 int offset; 1288 struct rrpc_addr *laddr; 1289 u64 bpaddr, paddr, pladdr; 1290 1291 bpaddr = block_to_rel_addr(rrpc, rblk); 1292 for (offset = 0; offset < dev->sec_per_blk; offset++) { 1293 paddr = bpaddr + offset; 1294 1295 pladdr = rrpc->rev_trans_map[paddr].addr; 1296 if (pladdr == ADDR_EMPTY) 1297 continue; 1298 1299 laddr = &rrpc->trans_map[pladdr]; 1300 1301 if (paddr == laddr->addr) { 1302 laddr->rblk = rblk; 1303 } else { 1304 set_bit(offset, rblk->invalid_pages); 1305 rblk->nr_invalid_pages++; 1306 } 1307 } 1308} 1309 1310static int rrpc_blocks_init(struct rrpc *rrpc) 1311{ 1312 struct rrpc_lun *rlun; 1313 struct rrpc_block *rblk; 1314 int lun_iter, blk_iter; 1315 1316 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) { 1317 rlun = &rrpc->luns[lun_iter]; 1318 1319 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun; 1320 blk_iter++) { 1321 rblk = &rlun->blocks[blk_iter]; 1322 rrpc_block_map_update(rrpc, rblk); 1323 } 1324 } 1325 1326 return 0; 1327} 1328 1329static int rrpc_luns_configure(struct rrpc *rrpc) 1330{ 1331 struct rrpc_lun *rlun; 1332 struct rrpc_block *rblk; 1333 int i; 1334 1335 for (i = 0; i < rrpc->nr_luns; i++) { 1336 rlun = &rrpc->luns[i]; 1337 1338 rblk = rrpc_get_blk(rrpc, rlun, 0); 1339 if (!rblk) 1340 goto err; 1341 1342 rrpc_set_lun_cur(rlun, rblk); 1343 1344 /* Emergency gc block */ 1345 rblk = rrpc_get_blk(rrpc, rlun, 1); 1346 if (!rblk) 1347 goto err; 1348 rlun->gc_cur = rblk; 1349 } 1350 1351 return 0; 1352err: 1353 rrpc_put_blks(rrpc); 1354 return -EINVAL; 1355} 1356 1357static struct nvm_tgt_type tt_rrpc; 1358 1359static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, 1360 int lun_begin, int lun_end) 1361{ 1362 struct request_queue *bqueue = dev->q; 1363 struct request_queue *tqueue = tdisk->queue; 1364 struct rrpc *rrpc; 1365 sector_t soffset; 1366 int ret; 1367 1368 if (!(dev->identity.dom & NVM_RSP_L2P)) { 1369 pr_err("nvm: rrpc: device does not support l2p (%x)\n", 1370 dev->identity.dom); 1371 return ERR_PTR(-EINVAL); 1372 } 1373 1374 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL); 1375 if (!rrpc) 1376 return ERR_PTR(-ENOMEM); 1377 1378 rrpc->instance.tt = &tt_rrpc; 1379 rrpc->dev = dev; 1380 rrpc->disk = tdisk; 1381 1382 bio_list_init(&rrpc->requeue_bios); 1383 spin_lock_init(&rrpc->bio_lock); 1384 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue); 1385 1386 rrpc->nr_luns = lun_end - lun_begin + 1; 1387 rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns; 1388 rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns; 1389 1390 /* simple round-robin strategy */ 1391 atomic_set(&rrpc->next_lun, -1); 1392 1393 ret = rrpc_area_init(rrpc, &soffset); 1394 if (ret < 0) { 1395 pr_err("nvm: rrpc: could not initialize area\n"); 1396 return ERR_PTR(ret); 1397 } 1398 rrpc->soffset = soffset; 1399 1400 ret = rrpc_luns_init(rrpc, lun_begin, lun_end); 1401 if (ret) { 1402 pr_err("nvm: rrpc: could not initialize luns\n"); 1403 goto err; 1404 } 1405 1406 rrpc->poffset = dev->sec_per_lun * lun_begin; 1407 rrpc->lun_offset = lun_begin; 1408 1409 ret = rrpc_core_init(rrpc); 1410 if (ret) { 1411 pr_err("nvm: rrpc: could not initialize core\n"); 1412 goto err; 1413 } 1414 1415 ret = rrpc_map_init(rrpc); 1416 if (ret) { 1417 pr_err("nvm: rrpc: could not initialize maps\n"); 1418 goto err; 1419 } 1420 1421 ret = rrpc_blocks_init(rrpc); 1422 if (ret) { 1423 pr_err("nvm: rrpc: could not initialize state for blocks\n"); 1424 goto err; 1425 } 1426 1427 ret = rrpc_luns_configure(rrpc); 1428 if (ret) { 1429 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n"); 1430 goto err; 1431 } 1432 1433 ret = rrpc_gc_init(rrpc); 1434 if (ret) { 1435 pr_err("nvm: rrpc: could not initialize gc\n"); 1436 goto err; 1437 } 1438 1439 /* inherit the size from the underlying device */ 1440 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue)); 1441 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); 1442 1443 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n", 1444 rrpc->nr_luns, (unsigned long long)rrpc->nr_sects); 1445 1446 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); 1447 1448 return rrpc; 1449err: 1450 rrpc_free(rrpc); 1451 return ERR_PTR(ret); 1452} 1453 1454/* round robin, page-based FTL, and cost-based GC */ 1455static struct nvm_tgt_type tt_rrpc = { 1456 .name = "rrpc", 1457 .version = {1, 0, 0}, 1458 1459 .make_rq = rrpc_make_rq, 1460 .capacity = rrpc_capacity, 1461 .end_io = rrpc_end_io, 1462 1463 .init = rrpc_init, 1464 .exit = rrpc_exit, 1465}; 1466 1467static int __init rrpc_module_init(void) 1468{ 1469 return nvm_register_tgt_type(&tt_rrpc); 1470} 1471 1472static void rrpc_module_exit(void) 1473{ 1474 nvm_unregister_tgt_type(&tt_rrpc); 1475} 1476 1477module_init(rrpc_module_init); 1478module_exit(rrpc_module_exit); 1479MODULE_LICENSE("GPL v2"); 1480MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");