Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lightnvm: pblk: simplify partial read path

This patch changes the approach to handling partial read path.

In old approach merging of data from round buffer and drive was fully
made by drive. This had some disadvantages - code was complex and
relies on bio internals, so it was hard to maintain and was strongly
dependent on bio changes.

In new approach most of the handling is done mostly by block layer
functions such as bio_split(), bio_chain() and generic_make request()
and generally is less complex and easier to maintain. Below some more
details of the new approach.

When read bio arrives, it is cloned for pblk internal purposes. All
the L2P mapping, which includes copying data from round buffer to bio
and thus bio_advance() calls is done on the cloned bio, so the original
bio is untouched. If we found that we have partial read case, we
still have original bio untouched, so we can split it and continue to
process only first part of it in current context, when the rest will be
called as separate bio request which is passed to generic_make_request()
for further processing.

Signed-off-by: Igor Konopko <igor.j.konopko@intel.com>
Reviewed-by: Heiner Litz <hlitz@ucsc.edu>
Reviewed-by: Javier González <javier@javigon.com>
Signed-off-by: Matias Bjørling <mb@lightnvm.io>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Igor Konopko and committed by
Jens Axboe
a96de64a 843f2edb

+100 -281
+11 -2
drivers/lightnvm/pblk-core.c
··· 2147 2147 spin_unlock(&pblk->trans_lock); 2148 2148 } 2149 2149 2150 - void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, 2151 - sector_t blba, int nr_secs) 2150 + int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, 2151 + sector_t blba, int nr_secs, bool *from_cache) 2152 2152 { 2153 2153 int i; 2154 2154 ··· 2162 2162 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) { 2163 2163 struct pblk_line *line = pblk_ppa_to_line(pblk, ppa); 2164 2164 2165 + if (i > 0 && *from_cache) 2166 + break; 2167 + *from_cache = false; 2168 + 2165 2169 kref_get(&line->ref); 2170 + } else { 2171 + if (i > 0 && !*from_cache) 2172 + break; 2173 + *from_cache = true; 2166 2174 } 2167 2175 } 2168 2176 spin_unlock(&pblk->trans_lock); 2177 + return i; 2169 2178 } 2170 2179 2171 2180 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
+1 -10
drivers/lightnvm/pblk-rb.c
··· 642 642 * be directed to disk. 643 643 */ 644 644 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, 645 - struct ppa_addr ppa, int bio_iter, bool advanced_bio) 645 + struct ppa_addr ppa) 646 646 { 647 647 struct pblk *pblk = container_of(rb, struct pblk, rwb); 648 648 struct pblk_rb_entry *entry; ··· 673 673 ret = 0; 674 674 goto out; 675 675 } 676 - 677 - /* Only advance the bio if it hasn't been advanced already. If advanced, 678 - * this bio is at least a partial bio (i.e., it has partially been 679 - * filled with data from the cache). If part of the data resides on the 680 - * media, we will read later on 681 - */ 682 - if (unlikely(!advanced_bio)) 683 - bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE); 684 - 685 676 data = bio_data(bio); 686 677 memcpy(data, entry->data, rb->seg_size); 687 678
+85 -254
drivers/lightnvm/pblk-read.c
··· 26 26 * issued. 27 27 */ 28 28 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, 29 - sector_t lba, struct ppa_addr ppa, 30 - int bio_iter, bool advanced_bio) 29 + sector_t lba, struct ppa_addr ppa) 31 30 { 32 31 #ifdef CONFIG_NVM_PBLK_DEBUG 33 32 /* Callers must ensure that the ppa points to a cache address */ ··· 34 35 BUG_ON(!pblk_addr_in_cache(ppa)); 35 36 #endif 36 37 37 - return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, 38 - bio_iter, advanced_bio); 38 + return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa); 39 39 } 40 40 41 - static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, 41 + static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, 42 42 struct bio *bio, sector_t blba, 43 - unsigned long *read_bitmap) 43 + bool *from_cache) 44 44 { 45 45 void *meta_list = rqd->meta_list; 46 - struct ppa_addr ppas[NVM_MAX_VLBA]; 47 - int nr_secs = rqd->nr_ppas; 48 - bool advanced_bio = false; 49 - int i, j = 0; 46 + int nr_secs, i; 50 47 51 - pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs); 48 + retry: 49 + nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas, 50 + from_cache); 51 + 52 + if (!*from_cache) 53 + goto end; 52 54 53 55 for (i = 0; i < nr_secs; i++) { 54 - struct ppa_addr p = ppas[i]; 55 56 struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); 56 57 sector_t lba = blba + i; 57 58 58 - retry: 59 - if (pblk_ppa_empty(p)) { 59 + if (pblk_ppa_empty(rqd->ppa_list[i])) { 60 60 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY); 61 61 62 - WARN_ON(test_and_set_bit(i, read_bitmap)); 63 62 meta->lba = addr_empty; 64 - 65 - if (unlikely(!advanced_bio)) { 66 - bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE); 67 - advanced_bio = true; 63 + } else if (pblk_addr_in_cache(rqd->ppa_list[i])) { 64 + /* 65 + * Try to read from write buffer. The address is later 66 + * checked on the write buffer to prevent retrieving 67 + * overwritten data. 68 + */ 69 + if (!pblk_read_from_cache(pblk, bio, lba, 70 + rqd->ppa_list[i])) { 71 + if (i == 0) { 72 + /* 73 + * We didn't call with bio_advance() 74 + * yet, so we can just retry. 75 + */ 76 + goto retry; 77 + } else { 78 + /* 79 + * We already call bio_advance() 80 + * so we cannot retry and we need 81 + * to quit that function in order 82 + * to allow caller to handle the bio 83 + * splitting in the current sector 84 + * position. 85 + */ 86 + nr_secs = i; 87 + goto end; 88 + } 68 89 } 69 - 70 - goto next; 71 - } 72 - 73 - /* Try to read from write buffer. The address is later checked 74 - * on the write buffer to prevent retrieving overwritten data. 75 - */ 76 - if (pblk_addr_in_cache(p)) { 77 - if (!pblk_read_from_cache(pblk, bio, lba, p, i, 78 - advanced_bio)) { 79 - pblk_lookup_l2p_seq(pblk, &p, lba, 1); 80 - goto retry; 81 - } 82 - WARN_ON(test_and_set_bit(i, read_bitmap)); 83 90 meta->lba = cpu_to_le64(lba); 84 - advanced_bio = true; 85 91 #ifdef CONFIG_NVM_PBLK_DEBUG 86 92 atomic_long_inc(&pblk->cache_reads); 87 93 #endif 88 - } else { 89 - /* Read from media non-cached sectors */ 90 - rqd->ppa_list[j++] = p; 91 94 } 92 - 93 - next: 94 - if (advanced_bio) 95 - bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); 95 + bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); 96 96 } 97 97 98 + end: 98 99 if (pblk_io_aligned(pblk, nr_secs)) 99 100 rqd->is_seq = 1; 100 101 101 102 #ifdef CONFIG_NVM_PBLK_DEBUG 102 103 atomic_long_add(nr_secs, &pblk->inflight_reads); 103 104 #endif 105 + 106 + return nr_secs; 104 107 } 105 108 106 109 ··· 198 197 pblk_log_read_err(pblk, rqd); 199 198 200 199 pblk_read_check_seq(pblk, rqd, r_ctx->lba); 201 - 202 - if (int_bio) 203 - bio_put(int_bio); 200 + bio_put(int_bio); 204 201 205 202 if (put_line) 206 203 pblk_rq_to_line_put(pblk, rqd); ··· 222 223 __pblk_end_io_read(pblk, rqd, true); 223 224 } 224 225 225 - static void pblk_end_partial_read(struct nvm_rq *rqd) 226 - { 227 - struct pblk *pblk = rqd->private; 228 - struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); 229 - struct pblk_pr_ctx *pr_ctx = r_ctx->private; 230 - struct pblk_sec_meta *meta; 231 - struct bio *new_bio = rqd->bio; 232 - struct bio *bio = pr_ctx->orig_bio; 233 - void *meta_list = rqd->meta_list; 234 - unsigned long *read_bitmap = pr_ctx->bitmap; 235 - struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT; 236 - struct bvec_iter new_iter = BVEC_ITER_ALL_INIT; 237 - int nr_secs = pr_ctx->orig_nr_secs; 238 - int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); 239 - void *src_p, *dst_p; 240 - int bit, i; 241 - 242 - if (unlikely(nr_holes == 1)) { 243 - struct ppa_addr ppa; 244 - 245 - ppa = rqd->ppa_addr; 246 - rqd->ppa_list = pr_ctx->ppa_ptr; 247 - rqd->dma_ppa_list = pr_ctx->dma_ppa_list; 248 - rqd->ppa_list[0] = ppa; 249 - } 250 - 251 - for (i = 0; i < nr_secs; i++) { 252 - meta = pblk_get_meta(pblk, meta_list, i); 253 - pr_ctx->lba_list_media[i] = le64_to_cpu(meta->lba); 254 - meta->lba = cpu_to_le64(pr_ctx->lba_list_mem[i]); 255 - } 256 - 257 - /* Fill the holes in the original bio */ 258 - i = 0; 259 - for (bit = 0; bit < nr_secs; bit++) { 260 - if (!test_bit(bit, read_bitmap)) { 261 - struct bio_vec dst_bv, src_bv; 262 - struct pblk_line *line; 263 - 264 - line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); 265 - kref_put(&line->ref, pblk_line_put); 266 - 267 - meta = pblk_get_meta(pblk, meta_list, bit); 268 - meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); 269 - 270 - dst_bv = bio_iter_iovec(bio, orig_iter); 271 - src_bv = bio_iter_iovec(new_bio, new_iter); 272 - 273 - src_p = kmap_atomic(src_bv.bv_page); 274 - dst_p = kmap_atomic(dst_bv.bv_page); 275 - 276 - memcpy(dst_p + dst_bv.bv_offset, 277 - src_p + src_bv.bv_offset, 278 - PBLK_EXPOSED_PAGE_SIZE); 279 - 280 - kunmap_atomic(src_p); 281 - kunmap_atomic(dst_p); 282 - 283 - flush_dcache_page(dst_bv.bv_page); 284 - mempool_free(src_bv.bv_page, &pblk->page_bio_pool); 285 - 286 - bio_advance_iter(new_bio, &new_iter, 287 - PBLK_EXPOSED_PAGE_SIZE); 288 - i++; 289 - } 290 - bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE); 291 - } 292 - 293 - bio_put(new_bio); 294 - kfree(pr_ctx); 295 - 296 - /* restore original request */ 297 - rqd->bio = NULL; 298 - rqd->nr_ppas = nr_secs; 299 - 300 - pblk_end_user_read(bio, rqd->error); 301 - __pblk_end_io_read(pblk, rqd, false); 302 - } 303 - 304 - static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd, 305 - unsigned int bio_init_idx, 306 - unsigned long *read_bitmap, 307 - int nr_holes) 308 - { 309 - void *meta_list = rqd->meta_list; 310 - struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); 311 - struct pblk_pr_ctx *pr_ctx; 312 - struct bio *new_bio, *bio = r_ctx->private; 313 - int nr_secs = rqd->nr_ppas; 314 - int i; 315 - 316 - new_bio = bio_alloc(GFP_KERNEL, nr_holes); 317 - 318 - if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes)) 319 - goto fail_bio_put; 320 - 321 - if (nr_holes != new_bio->bi_vcnt) { 322 - WARN_ONCE(1, "pblk: malformed bio\n"); 323 - goto fail_free_pages; 324 - } 325 - 326 - pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL); 327 - if (!pr_ctx) 328 - goto fail_free_pages; 329 - 330 - for (i = 0; i < nr_secs; i++) { 331 - struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); 332 - 333 - pr_ctx->lba_list_mem[i] = le64_to_cpu(meta->lba); 334 - } 335 - 336 - new_bio->bi_iter.bi_sector = 0; /* internal bio */ 337 - bio_set_op_attrs(new_bio, REQ_OP_READ, 0); 338 - 339 - rqd->bio = new_bio; 340 - rqd->nr_ppas = nr_holes; 341 - 342 - pr_ctx->orig_bio = bio; 343 - bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA); 344 - pr_ctx->bio_init_idx = bio_init_idx; 345 - pr_ctx->orig_nr_secs = nr_secs; 346 - r_ctx->private = pr_ctx; 347 - 348 - if (unlikely(nr_holes == 1)) { 349 - pr_ctx->ppa_ptr = rqd->ppa_list; 350 - pr_ctx->dma_ppa_list = rqd->dma_ppa_list; 351 - rqd->ppa_addr = rqd->ppa_list[0]; 352 - } 353 - return 0; 354 - 355 - fail_free_pages: 356 - pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt); 357 - fail_bio_put: 358 - bio_put(new_bio); 359 - 360 - return -ENOMEM; 361 - } 362 - 363 - static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, 364 - unsigned int bio_init_idx, 365 - unsigned long *read_bitmap, int nr_secs) 366 - { 367 - int nr_holes; 368 - int ret; 369 - 370 - nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); 371 - 372 - if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap, 373 - nr_holes)) 374 - return NVM_IO_ERR; 375 - 376 - rqd->end_io = pblk_end_partial_read; 377 - 378 - ret = pblk_submit_io(pblk, rqd); 379 - if (ret) { 380 - bio_put(rqd->bio); 381 - pblk_err(pblk, "partial read IO submission failed\n"); 382 - goto err; 383 - } 384 - 385 - return NVM_IO_OK; 386 - 387 - err: 388 - pblk_err(pblk, "failed to perform partial read\n"); 389 - 390 - /* Free allocated pages in new bio */ 391 - pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt); 392 - return NVM_IO_ERR; 393 - } 394 - 395 226 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio, 396 - sector_t lba, unsigned long *read_bitmap) 227 + sector_t lba, bool *from_cache) 397 228 { 398 229 struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0); 399 230 struct ppa_addr ppa; 400 231 401 - pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); 232 + pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache); 402 233 403 234 #ifdef CONFIG_NVM_PBLK_DEBUG 404 235 atomic_long_inc(&pblk->inflight_reads); ··· 238 409 if (pblk_ppa_empty(ppa)) { 239 410 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY); 240 411 241 - WARN_ON(test_and_set_bit(0, read_bitmap)); 242 412 meta->lba = addr_empty; 243 413 return; 244 414 } ··· 246 418 * write buffer to prevent retrieving overwritten data. 247 419 */ 248 420 if (pblk_addr_in_cache(ppa)) { 249 - if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) { 250 - pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); 421 + if (!pblk_read_from_cache(pblk, bio, lba, ppa)) { 422 + pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache); 251 423 goto retry; 252 424 } 253 425 254 - WARN_ON(test_and_set_bit(0, read_bitmap)); 255 426 meta->lba = cpu_to_le64(lba); 256 427 257 428 #ifdef CONFIG_NVM_PBLK_DEBUG ··· 267 440 struct request_queue *q = dev->q; 268 441 sector_t blba = pblk_get_lba(bio); 269 442 unsigned int nr_secs = pblk_get_secs(bio); 443 + bool from_cache; 270 444 struct pblk_g_ctx *r_ctx; 271 445 struct nvm_rq *rqd; 272 - struct bio *int_bio; 273 - unsigned int bio_init_idx; 274 - DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA); 446 + struct bio *int_bio, *split_bio; 275 447 276 448 generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio), 277 449 &pblk->disk->part0); 278 - 279 - bitmap_zero(read_bitmap, nr_secs); 280 450 281 451 rqd = pblk_alloc_rqd(pblk, PBLK_READ); 282 452 ··· 286 462 r_ctx->start_time = jiffies; 287 463 r_ctx->lba = blba; 288 464 289 - /* Save the index for this bio's start. This is needed in case 290 - * we need to fill a partial read. 291 - */ 292 - bio_init_idx = pblk_get_bi_idx(bio); 293 - 294 465 if (pblk_alloc_rqd_meta(pblk, rqd)) { 295 466 bio_io_error(bio); 296 467 pblk_free_rqd(pblk, rqd, PBLK_READ); ··· 294 475 295 476 /* Clone read bio to deal internally with: 296 477 * -read errors when reading from drive 297 - * -bio_advance() calls during l2p lookup and cache reads 478 + * -bio_advance() calls during cache reads 298 479 */ 299 480 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set); 300 481 301 482 if (nr_secs > 1) 302 - pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap); 483 + nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba, 484 + &from_cache); 303 485 else 304 - pblk_read_rq(pblk, rqd, bio, blba, read_bitmap); 486 + pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache); 305 487 488 + split_retry: 306 489 r_ctx->private = bio; /* original bio */ 307 490 rqd->bio = int_bio; /* internal bio */ 308 491 309 - if (bitmap_full(read_bitmap, nr_secs)) { 492 + if (from_cache && nr_secs == rqd->nr_ppas) { 493 + /* All data was read from cache, we can complete the IO. */ 310 494 pblk_end_user_read(bio, 0); 311 495 atomic_inc(&pblk->inflight_io); 312 496 __pblk_end_io_read(pblk, rqd, false); 313 - return; 314 - } 315 - 316 - if (!bitmap_empty(read_bitmap, rqd->nr_ppas)) { 497 + } else if (nr_secs != rqd->nr_ppas) { 317 498 /* The read bio request could be partially filled by the write 318 499 * buffer, but there are some holes that need to be read from 319 - * the drive. 500 + * the drive. In order to handle this, we will use block layer 501 + * mechanism to split this request in to smaller ones and make 502 + * a chain of it. 503 + */ 504 + split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL, 505 + &pblk_bio_set); 506 + bio_chain(split_bio, bio); 507 + generic_make_request(bio); 508 + 509 + /* New bio contains first N sectors of the previous one, so 510 + * we can continue to use existing rqd, but we need to shrink 511 + * the number of PPAs in it. New bio is also guaranteed that 512 + * it contains only either data from cache or from drive, newer 513 + * mix of them. 514 + */ 515 + bio = split_bio; 516 + rqd->nr_ppas = nr_secs; 517 + if (rqd->nr_ppas == 1) 518 + rqd->ppa_addr = rqd->ppa_list[0]; 519 + 520 + /* Recreate int_bio - existing might have some needed internal 521 + * fields modified already. 320 522 */ 321 523 bio_put(int_bio); 322 - rqd->bio = NULL; 323 - if (pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap, 324 - nr_secs)) { 325 - pblk_err(pblk, "read IO submission failed\n"); 326 - bio_io_error(bio); 327 - __pblk_end_io_read(pblk, rqd, false); 328 - } 329 - return; 330 - } 331 - 332 - /* All sectors are to be read from the device */ 333 - if (pblk_submit_io(pblk, rqd)) { 334 - pblk_err(pblk, "read IO submission failed\n"); 335 - bio_io_error(bio); 336 - __pblk_end_io_read(pblk, rqd, false); 524 + int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set); 525 + goto split_retry; 526 + } else if (pblk_submit_io(pblk, rqd)) { 527 + /* Submitting IO to drive failed, let's report an error */ 528 + rqd->error = -ENODEV; 529 + pblk_end_io_read(rqd); 337 530 } 338 531 } 339 532
+3 -15
drivers/lightnvm/pblk.h
··· 121 121 u64 lba; 122 122 }; 123 123 124 - /* partial read context */ 125 - struct pblk_pr_ctx { 126 - struct bio *orig_bio; 127 - DECLARE_BITMAP(bitmap, NVM_MAX_VLBA); 128 - unsigned int orig_nr_secs; 129 - unsigned int bio_init_idx; 130 - void *ppa_ptr; 131 - dma_addr_t dma_ppa_list; 132 - u64 lba_list_mem[NVM_MAX_VLBA]; 133 - u64 lba_list_media[NVM_MAX_VLBA]; 134 - }; 135 - 136 124 /* Pad context */ 137 125 struct pblk_pad_rq { 138 126 struct pblk *pblk; ··· 747 759 unsigned int pos, unsigned int nr_entries, 748 760 unsigned int count); 749 761 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, 750 - struct ppa_addr ppa, int bio_iter, bool advanced_bio); 762 + struct ppa_addr ppa); 751 763 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); 752 764 753 765 unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); ··· 847 859 struct pblk_line *gc_line, u64 paddr); 848 860 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas, 849 861 u64 *lba_list, int nr_secs); 850 - void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, 851 - sector_t blba, int nr_secs); 862 + int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, 863 + sector_t blba, int nr_secs, bool *from_cache); 852 864 void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd); 853 865 void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd); 854 866