Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lightnvm: pblk: use nvm_rq_to_ppa_list()

This patch replaces few remaining usages of rqd->ppa_list[] with
existing nvm_rq_to_ppa_list() helpers. This is needed for theoretical
devices with ws_min/ws_opt equal to 1.

Signed-off-by: Igor Konopko <igor.j.konopko@intel.com>
Reviewed-by: Javier González <javier@javigon.com>
Signed-off-by: Matias Bjørling <mb@lightnvm.io>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Igor Konopko and committed by
Jens Axboe
45c5fcbb a96de64a

+22 -17
+14 -12
drivers/lightnvm/pblk-core.c
··· 562 562 563 563 int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd) 564 564 { 565 - struct ppa_addr *ppa_list; 565 + struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); 566 566 int ret; 567 - 568 - ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; 569 567 570 568 pblk_down_chunk(pblk, ppa_list[0]); 571 569 ret = pblk_submit_io_sync(pblk, rqd); ··· 723 725 struct nvm_tgt_dev *dev = pblk->dev; 724 726 struct pblk_line_meta *lm = &pblk->lm; 725 727 struct bio *bio; 728 + struct ppa_addr *ppa_list; 726 729 struct nvm_rq rqd; 727 730 u64 paddr = pblk_line_smeta_start(pblk, line); 728 731 int i, ret; ··· 747 748 rqd.opcode = NVM_OP_PREAD; 748 749 rqd.nr_ppas = lm->smeta_sec; 749 750 rqd.is_seq = 1; 751 + ppa_list = nvm_rq_to_ppa_list(&rqd); 750 752 751 753 for (i = 0; i < lm->smeta_sec; i++, paddr++) 752 - rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); 754 + ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); 753 755 754 756 ret = pblk_submit_io_sync(pblk, &rqd); 755 757 if (ret) { ··· 777 777 struct nvm_tgt_dev *dev = pblk->dev; 778 778 struct pblk_line_meta *lm = &pblk->lm; 779 779 struct bio *bio; 780 + struct ppa_addr *ppa_list; 780 781 struct nvm_rq rqd; 781 782 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf); 782 783 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY); ··· 802 801 rqd.opcode = NVM_OP_PWRITE; 803 802 rqd.nr_ppas = lm->smeta_sec; 804 803 rqd.is_seq = 1; 804 + ppa_list = nvm_rq_to_ppa_list(&rqd); 805 805 806 806 for (i = 0; i < lm->smeta_sec; i++, paddr++) { 807 807 struct pblk_sec_meta *meta = pblk_get_meta(pblk, 808 808 rqd.meta_list, i); 809 809 810 - rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); 810 + ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); 811 811 meta->lba = lba_list[paddr] = addr_empty; 812 812 } 813 813 ··· 838 836 struct nvm_geo *geo = &dev->geo; 839 837 struct pblk_line_mgmt *l_mg = &pblk->l_mg; 840 838 struct pblk_line_meta *lm = &pblk->lm; 841 - void *ppa_list, *meta_list; 839 + void *ppa_list_buf, *meta_list; 842 840 struct bio *bio; 841 + struct ppa_addr *ppa_list; 843 842 struct nvm_rq rqd; 844 843 u64 paddr = line->emeta_ssec; 845 844 dma_addr_t dma_ppa_list, dma_meta_list; ··· 856 853 if (!meta_list) 857 854 return -ENOMEM; 858 855 859 - ppa_list = meta_list + pblk_dma_meta_size(pblk); 856 + ppa_list_buf = meta_list + pblk_dma_meta_size(pblk); 860 857 dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk); 861 858 862 859 next_rq: ··· 877 874 878 875 rqd.bio = bio; 879 876 rqd.meta_list = meta_list; 880 - rqd.ppa_list = ppa_list; 877 + rqd.ppa_list = ppa_list_buf; 881 878 rqd.dma_meta_list = dma_meta_list; 882 879 rqd.dma_ppa_list = dma_ppa_list; 883 880 rqd.opcode = NVM_OP_PREAD; 884 881 rqd.nr_ppas = rq_ppas; 882 + ppa_list = nvm_rq_to_ppa_list(&rqd); 885 883 886 884 for (i = 0; i < rqd.nr_ppas; ) { 887 885 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id); ··· 910 906 } 911 907 912 908 for (j = 0; j < min; j++, i++, paddr++) 913 - rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id); 909 + ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id); 914 910 } 915 911 916 912 ret = pblk_submit_io_sync(pblk, &rqd); ··· 1529 1525 1530 1526 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd) 1531 1527 { 1532 - struct ppa_addr *ppa_list; 1528 + struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); 1533 1529 int i; 1534 - 1535 - ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; 1536 1530 1537 1531 for (i = 0; i < rqd->nr_ppas; i++) 1538 1532 pblk_ppa_to_line_put(pblk, ppa_list[i]);
+8 -5
drivers/lightnvm/pblk-recovery.c
··· 179 179 struct pblk_pad_rq *pad_rq; 180 180 struct nvm_rq *rqd; 181 181 struct bio *bio; 182 + struct ppa_addr *ppa_list; 182 183 void *data; 183 184 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf); 184 185 u64 w_ptr = line->cur_sec; ··· 240 239 rqd->end_io = pblk_end_io_recov; 241 240 rqd->private = pad_rq; 242 241 242 + ppa_list = nvm_rq_to_ppa_list(rqd); 243 243 meta_list = rqd->meta_list; 244 244 245 245 for (i = 0; i < rqd->nr_ppas; ) { ··· 268 266 lba_list[w_ptr] = addr_empty; 269 267 meta = pblk_get_meta(pblk, meta_list, i); 270 268 meta->lba = addr_empty; 271 - rqd->ppa_list[i] = dev_ppa; 269 + ppa_list[i] = dev_ppa; 272 270 } 273 271 } 274 272 275 273 kref_get(&pad_rq->ref); 276 - pblk_down_chunk(pblk, rqd->ppa_list[0]); 274 + pblk_down_chunk(pblk, ppa_list[0]); 277 275 278 276 ret = pblk_submit_io(pblk, rqd); 279 277 if (ret) { 280 278 pblk_err(pblk, "I/O submission failed: %d\n", ret); 281 - pblk_up_chunk(pblk, rqd->ppa_list[0]); 279 + pblk_up_chunk(pblk, ppa_list[0]); 282 280 kref_put(&pad_rq->ref, pblk_recov_complete); 283 281 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); 284 282 bio_put(bio); ··· 422 420 rqd->ppa_list = ppa_list; 423 421 rqd->dma_ppa_list = dma_ppa_list; 424 422 rqd->dma_meta_list = dma_meta_list; 423 + ppa_list = nvm_rq_to_ppa_list(rqd); 425 424 426 425 if (pblk_io_aligned(pblk, rq_ppas)) 427 426 rqd->is_seq = 1; ··· 441 438 } 442 439 443 440 for (j = 0; j < pblk->min_write_pgs; j++, i++) 444 - rqd->ppa_list[i] = 441 + ppa_list[i] = 445 442 addr_to_gen_ppa(pblk, paddr + j, line->id); 446 443 } 447 444 ··· 489 486 continue; 490 487 491 488 line->nr_valid_lbas++; 492 - pblk_update_map(pblk, lba, rqd->ppa_list[i]); 489 + pblk_update_map(pblk, lba, ppa_list[i]); 493 490 } 494 491 495 492 left_ppas -= rq_ppas;