Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lightnvm: pblk: control I/O flow also on tear down

When removing a pblk instance, control the write I/O flow to the
controller as we do in the fast path.

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <matias@cnexlabs.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Javier González and committed by
Jens Axboe
3eaa11e2 0e2ff113

+78 -35
+48 -13
drivers/lightnvm/pblk-core.c
··· 1670 1670 queue_work(wq, &line_ws->ws); 1671 1671 } 1672 1672 1673 - void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, 1674 - unsigned long *lun_bitmap) 1673 + static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, 1674 + int nr_ppas, int pos) 1675 1675 { 1676 - struct nvm_tgt_dev *dev = pblk->dev; 1677 - struct nvm_geo *geo = &dev->geo; 1678 - struct pblk_lun *rlun; 1679 - int pos = pblk_ppa_to_pos(geo, ppa_list[0]); 1676 + struct pblk_lun *rlun = &pblk->luns[pos]; 1680 1677 int ret; 1681 1678 1682 1679 /* ··· 1687 1690 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun || 1688 1691 ppa_list[0].g.ch != ppa_list[i].g.ch); 1689 1692 #endif 1690 - /* If the LUN has been locked for this same request, do no attempt to 1691 - * lock it again 1692 - */ 1693 - if (test_and_set_bit(pos, lun_bitmap)) 1694 - return; 1695 1693 1696 - rlun = &pblk->luns[pos]; 1697 - ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000)); 1694 + ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000)); 1698 1695 if (ret) { 1699 1696 switch (ret) { 1700 1697 case -ETIME: ··· 1699 1708 break; 1700 1709 } 1701 1710 } 1711 + } 1712 + 1713 + void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas) 1714 + { 1715 + struct nvm_tgt_dev *dev = pblk->dev; 1716 + struct nvm_geo *geo = &dev->geo; 1717 + int pos = pblk_ppa_to_pos(geo, ppa_list[0]); 1718 + 1719 + __pblk_down_page(pblk, ppa_list, nr_ppas, pos); 1720 + } 1721 + 1722 + void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, 1723 + unsigned long *lun_bitmap) 1724 + { 1725 + struct nvm_tgt_dev *dev = pblk->dev; 1726 + struct nvm_geo *geo = &dev->geo; 1727 + int pos = pblk_ppa_to_pos(geo, ppa_list[0]); 1728 + 1729 + /* If the LUN has been locked for this same request, do no attempt to 1730 + * lock it again 1731 + */ 1732 + if (test_and_set_bit(pos, lun_bitmap)) 1733 + return; 1734 + 1735 + __pblk_down_page(pblk, ppa_list, nr_ppas, pos); 1736 + } 1737 + 1738 + void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas) 1739 + { 1740 + struct nvm_tgt_dev *dev = pblk->dev; 1741 + struct nvm_geo *geo = &dev->geo; 1742 + struct pblk_lun *rlun; 1743 + int pos = pblk_ppa_to_pos(geo, ppa_list[0]); 1744 + 1745 + #ifdef CONFIG_NVM_DEBUG 1746 + int i; 1747 + 1748 + for (i = 1; i < nr_ppas; i++) 1749 + WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun || 1750 + ppa_list[0].g.ch != ppa_list[i].g.ch); 1751 + #endif 1752 + 1753 + rlun = &pblk->luns[pos]; 1754 + up(&rlun->wr_sem); 1702 1755 } 1703 1756 1704 1757 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
+21 -10
drivers/lightnvm/pblk-recovery.c
··· 340 340 struct pblk *pblk = pad_rq->pblk; 341 341 struct nvm_tgt_dev *dev = pblk->dev; 342 342 343 - kref_put(&pad_rq->ref, pblk_recov_complete); 343 + pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); 344 + 345 + bio_put(rqd->bio); 344 346 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); 345 347 pblk_free_rqd(pblk, rqd, WRITE); 348 + 349 + atomic_dec(&pblk->inflight_io); 350 + kref_put(&pad_rq->ref, pblk_recov_complete); 346 351 } 347 352 348 353 static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, ··· 390 385 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); 391 386 if (rq_ppas < pblk->min_write_pgs) { 392 387 pr_err("pblk: corrupted pad line %d\n", line->id); 393 - goto free_rq; 388 + goto fail_free_pad; 394 389 } 395 390 396 391 rq_len = rq_ppas * geo->sec_size; ··· 398 393 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list); 399 394 if (!meta_list) { 400 395 ret = -ENOMEM; 401 - goto free_data; 396 + goto fail_free_pad; 402 397 } 403 398 404 399 ppa_list = (void *)(meta_list) + pblk_dma_meta_size; ··· 409 404 ret = PTR_ERR(rqd); 410 405 goto fail_free_meta; 411 406 } 412 - memset(rqd, 0, pblk_w_rq_size); 413 407 414 - bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL); 408 + bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, 409 + PBLK_VMALLOC_META, GFP_KERNEL); 415 410 if (IS_ERR(bio)) { 416 411 ret = PTR_ERR(bio); 417 412 goto fail_free_rqd; ··· 458 453 } 459 454 460 455 kref_get(&pad_rq->ref); 456 + pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas); 461 457 462 458 ret = pblk_submit_io(pblk, rqd); 463 459 if (ret) { 464 460 pr_err("pblk: I/O submission failed: %d\n", ret); 465 - goto free_data; 461 + pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); 462 + goto fail_free_bio; 466 463 } 467 - 468 - atomic_dec(&pblk->inflight_io); 469 464 470 465 left_line_ppas -= rq_ppas; 471 466 left_ppas -= rq_ppas; ··· 480 475 ret = -ETIME; 481 476 } 482 477 478 + if (!pblk_line_is_full(line)) 479 + pr_err("pblk: corrupted padded line: %d\n", line->id); 480 + 481 + vfree(data); 483 482 free_rq: 484 483 kfree(pad_rq); 485 - free_data: 486 - vfree(data); 487 484 return ret; 488 485 486 + fail_free_bio: 487 + bio_put(bio); 489 488 fail_free_rqd: 490 489 pblk_free_rqd(pblk, rqd, WRITE); 491 490 fail_free_meta: 492 491 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list); 492 + fail_free_pad: 493 493 kfree(pad_rq); 494 + vfree(data); 494 495 return ret; 495 496 } 496 497
+7 -12
drivers/lightnvm/pblk-write.c
··· 178 178 { 179 179 struct pblk *pblk = rqd->private; 180 180 struct nvm_tgt_dev *dev = pblk->dev; 181 - struct nvm_geo *geo = &dev->geo; 182 181 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd); 183 182 struct pblk_line *line = m_ctx->private; 184 183 struct pblk_emeta *emeta = line->emeta; 185 - int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]); 186 - struct pblk_lun *rlun = &pblk->luns[pos]; 187 184 int sync; 188 185 189 - up(&rlun->wr_sem); 186 + pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); 190 187 191 188 if (rqd->error) { 192 189 pblk_log_write_err(pblk, rqd); ··· 200 203 pblk->close_wq); 201 204 202 205 bio_put(rqd->bio); 206 + nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); 203 207 pblk_free_rqd(pblk, rqd, READ); 204 208 205 209 atomic_dec(&pblk->inflight_io); ··· 365 367 struct pblk_line_meta *lm = &pblk->lm; 366 368 struct pblk_emeta *emeta = meta_line->emeta; 367 369 struct pblk_g_ctx *m_ctx; 368 - struct pblk_lun *rlun; 369 370 struct bio *bio; 370 371 struct nvm_rq *rqd; 371 372 void *data; ··· 408 411 rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id); 409 412 } 410 413 411 - rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])]; 412 - ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000)); 413 - if (ret) { 414 - pr_err("pblk: lun semaphore timed out (%d)\n", ret); 415 - goto fail_free_bio; 416 - } 417 - 418 414 emeta->mem += rq_len; 419 415 if (emeta->mem >= lm->emeta_len[0]) { 420 416 spin_lock(&l_mg->close_lock); ··· 416 426 "pblk: corrupt meta line %d\n", meta_line->id); 417 427 spin_unlock(&l_mg->close_lock); 418 428 } 429 + 430 + pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas); 419 431 420 432 ret = pblk_submit_io(pblk, rqd); 421 433 if (ret) { ··· 428 436 return NVM_IO_OK; 429 437 430 438 fail_rollback: 439 + pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); 431 440 spin_lock(&l_mg->close_lock); 432 441 pblk_dealloc_page(pblk, meta_line, rq_ppas); 433 442 list_add(&meta_line->list, &meta_line->list); 434 443 spin_unlock(&l_mg->close_lock); 444 + 445 + nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); 435 446 fail_free_bio: 436 447 if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META)) 437 448 bio_put(bio);
+2
drivers/lightnvm/pblk.h
··· 739 739 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); 740 740 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail, 741 741 unsigned long secs_to_flush); 742 + void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas); 742 743 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, 743 744 unsigned long *lun_bitmap); 745 + void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas); 744 746 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, 745 747 unsigned long *lun_bitmap); 746 748 void pblk_end_bio_sync(struct bio *bio);