Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * pblk-core.c - pblk's core functionality
17 *
18 */
19
20#define CREATE_TRACE_POINTS
21
22#include "pblk.h"
23#include "pblk-trace.h"
24
25static void pblk_line_mark_bb(struct work_struct *work)
26{
27 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
28 ws);
29 struct pblk *pblk = line_ws->pblk;
30 struct nvm_tgt_dev *dev = pblk->dev;
31 struct ppa_addr *ppa = line_ws->priv;
32 int ret;
33
34 ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
35 if (ret) {
36 struct pblk_line *line;
37 int pos;
38
39 line = pblk_ppa_to_line(pblk, *ppa);
40 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
41
42 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
43 line->id, pos);
44 }
45
46 kfree(ppa);
47 mempool_free(line_ws, &pblk->gen_ws_pool);
48}
49
50static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
51 struct ppa_addr ppa_addr)
52{
53 struct nvm_tgt_dev *dev = pblk->dev;
54 struct nvm_geo *geo = &dev->geo;
55 struct ppa_addr *ppa;
56 int pos = pblk_ppa_to_pos(geo, ppa_addr);
57
58 pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
59 atomic_long_inc(&pblk->erase_failed);
60
61 atomic_dec(&line->blk_in_line);
62 if (test_and_set_bit(pos, line->blk_bitmap))
63 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
64 line->id, pos);
65
66 /* Not necessary to mark bad blocks on 2.0 spec. */
67 if (geo->version == NVM_OCSSD_SPEC_20)
68 return;
69
70 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
71 if (!ppa)
72 return;
73
74 *ppa = ppa_addr;
75 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
76 GFP_ATOMIC, pblk->bb_wq);
77}
78
79static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
80{
81 struct nvm_tgt_dev *dev = pblk->dev;
82 struct nvm_geo *geo = &dev->geo;
83 struct nvm_chk_meta *chunk;
84 struct pblk_line *line;
85 int pos;
86
87 line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
88 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
89 chunk = &line->chks[pos];
90
91 atomic_dec(&line->left_seblks);
92
93 if (rqd->error) {
94 trace_pblk_chunk_reset(pblk_disk_name(pblk),
95 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
96
97 chunk->state = NVM_CHK_ST_OFFLINE;
98 pblk_mark_bb(pblk, line, rqd->ppa_addr);
99 } else {
100 trace_pblk_chunk_reset(pblk_disk_name(pblk),
101 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
102
103 chunk->state = NVM_CHK_ST_FREE;
104 }
105
106 trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
107 chunk->state);
108
109 atomic_dec(&pblk->inflight_io);
110}
111
112/* Erase completion assumes that only one block is erased at the time */
113static void pblk_end_io_erase(struct nvm_rq *rqd)
114{
115 struct pblk *pblk = rqd->private;
116
117 __pblk_end_io_erase(pblk, rqd);
118 mempool_free(rqd, &pblk->e_rq_pool);
119}
120
121/*
122 * Get information for all chunks from the device.
123 *
124 * The caller is responsible for freeing (vmalloc) the returned structure
125 */
126struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
127{
128 struct nvm_tgt_dev *dev = pblk->dev;
129 struct nvm_geo *geo = &dev->geo;
130 struct nvm_chk_meta *meta;
131 struct ppa_addr ppa;
132 unsigned long len;
133 int ret;
134
135 ppa.ppa = 0;
136
137 len = geo->all_chunks * sizeof(*meta);
138 meta = vzalloc(len);
139 if (!meta)
140 return ERR_PTR(-ENOMEM);
141
142 ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
143 if (ret) {
144 vfree(meta);
145 return ERR_PTR(-EIO);
146 }
147
148 return meta;
149}
150
151struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
152 struct nvm_chk_meta *meta,
153 struct ppa_addr ppa)
154{
155 struct nvm_tgt_dev *dev = pblk->dev;
156 struct nvm_geo *geo = &dev->geo;
157 int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
158 int lun_off = ppa.m.pu * geo->num_chk;
159 int chk_off = ppa.m.chk;
160
161 return meta + ch_off + lun_off + chk_off;
162}
163
164void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
165 u64 paddr)
166{
167 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
168 struct list_head *move_list = NULL;
169
170 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
171 * table is modified with reclaimed sectors, a check is done to endure
172 * that newer updates are not overwritten.
173 */
174 spin_lock(&line->lock);
175 WARN_ON(line->state == PBLK_LINESTATE_FREE);
176
177 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
178 WARN_ONCE(1, "pblk: double invalidate\n");
179 spin_unlock(&line->lock);
180 return;
181 }
182 le32_add_cpu(line->vsc, -1);
183
184 if (line->state == PBLK_LINESTATE_CLOSED)
185 move_list = pblk_line_gc_list(pblk, line);
186 spin_unlock(&line->lock);
187
188 if (move_list) {
189 spin_lock(&l_mg->gc_lock);
190 spin_lock(&line->lock);
191 /* Prevent moving a line that has just been chosen for GC */
192 if (line->state == PBLK_LINESTATE_GC) {
193 spin_unlock(&line->lock);
194 spin_unlock(&l_mg->gc_lock);
195 return;
196 }
197 spin_unlock(&line->lock);
198
199 list_move_tail(&line->list, move_list);
200 spin_unlock(&l_mg->gc_lock);
201 }
202}
203
204void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
205{
206 struct pblk_line *line;
207 u64 paddr;
208
209#ifdef CONFIG_NVM_PBLK_DEBUG
210 /* Callers must ensure that the ppa points to a device address */
211 BUG_ON(pblk_addr_in_cache(ppa));
212 BUG_ON(pblk_ppa_empty(ppa));
213#endif
214
215 line = pblk_ppa_to_line(pblk, ppa);
216 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
217
218 __pblk_map_invalidate(pblk, line, paddr);
219}
220
221static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
222 unsigned int nr_secs)
223{
224 sector_t lba;
225
226 spin_lock(&pblk->trans_lock);
227 for (lba = slba; lba < slba + nr_secs; lba++) {
228 struct ppa_addr ppa;
229
230 ppa = pblk_trans_map_get(pblk, lba);
231
232 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
233 pblk_map_invalidate(pblk, ppa);
234
235 pblk_ppa_set_empty(&ppa);
236 pblk_trans_map_set(pblk, lba, ppa);
237 }
238 spin_unlock(&pblk->trans_lock);
239}
240
241int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
242{
243 struct nvm_tgt_dev *dev = pblk->dev;
244
245 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
246 &rqd->dma_meta_list);
247 if (!rqd->meta_list)
248 return -ENOMEM;
249
250 if (rqd->nr_ppas == 1)
251 return 0;
252
253 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
254 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
255
256 return 0;
257}
258
259void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
260{
261 struct nvm_tgt_dev *dev = pblk->dev;
262
263 if (rqd->meta_list)
264 nvm_dev_dma_free(dev->parent, rqd->meta_list,
265 rqd->dma_meta_list);
266}
267
268/* Caller must guarantee that the request is a valid type */
269struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
270{
271 mempool_t *pool;
272 struct nvm_rq *rqd;
273 int rq_size;
274
275 switch (type) {
276 case PBLK_WRITE:
277 case PBLK_WRITE_INT:
278 pool = &pblk->w_rq_pool;
279 rq_size = pblk_w_rq_size;
280 break;
281 case PBLK_READ:
282 pool = &pblk->r_rq_pool;
283 rq_size = pblk_g_rq_size;
284 break;
285 default:
286 pool = &pblk->e_rq_pool;
287 rq_size = pblk_g_rq_size;
288 }
289
290 rqd = mempool_alloc(pool, GFP_KERNEL);
291 memset(rqd, 0, rq_size);
292
293 return rqd;
294}
295
296/* Typically used on completion path. Cannot guarantee request consistency */
297void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
298{
299 mempool_t *pool;
300
301 switch (type) {
302 case PBLK_WRITE:
303 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
304 /* fall through */
305 case PBLK_WRITE_INT:
306 pool = &pblk->w_rq_pool;
307 break;
308 case PBLK_READ:
309 pool = &pblk->r_rq_pool;
310 break;
311 case PBLK_ERASE:
312 pool = &pblk->e_rq_pool;
313 break;
314 default:
315 pblk_err(pblk, "trying to free unknown rqd type\n");
316 return;
317 }
318
319 pblk_free_rqd_meta(pblk, rqd);
320 mempool_free(rqd, pool);
321}
322
323void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
324 int nr_pages)
325{
326 struct bio_vec bv;
327 int i;
328
329 WARN_ON(off + nr_pages != bio->bi_vcnt);
330
331 for (i = off; i < nr_pages + off; i++) {
332 bv = bio->bi_io_vec[i];
333 mempool_free(bv.bv_page, &pblk->page_bio_pool);
334 }
335}
336
337int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
338 int nr_pages)
339{
340 struct request_queue *q = pblk->dev->q;
341 struct page *page;
342 int i, ret;
343
344 for (i = 0; i < nr_pages; i++) {
345 page = mempool_alloc(&pblk->page_bio_pool, flags);
346
347 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
348 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
349 pblk_err(pblk, "could not add page to bio\n");
350 mempool_free(page, &pblk->page_bio_pool);
351 goto err;
352 }
353 }
354
355 return 0;
356err:
357 pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
358 return -1;
359}
360
361void pblk_write_kick(struct pblk *pblk)
362{
363 wake_up_process(pblk->writer_ts);
364 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
365}
366
367void pblk_write_timer_fn(struct timer_list *t)
368{
369 struct pblk *pblk = from_timer(pblk, t, wtimer);
370
371 /* kick the write thread every tick to flush outstanding data */
372 pblk_write_kick(pblk);
373}
374
375void pblk_write_should_kick(struct pblk *pblk)
376{
377 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
378
379 if (secs_avail >= pblk->min_write_pgs_data)
380 pblk_write_kick(pblk);
381}
382
383static void pblk_wait_for_meta(struct pblk *pblk)
384{
385 do {
386 if (!atomic_read(&pblk->inflight_io))
387 break;
388
389 schedule();
390 } while (1);
391}
392
393static void pblk_flush_writer(struct pblk *pblk)
394{
395 pblk_rb_flush(&pblk->rwb);
396 do {
397 if (!pblk_rb_sync_count(&pblk->rwb))
398 break;
399
400 pblk_write_kick(pblk);
401 schedule();
402 } while (1);
403}
404
405struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
406{
407 struct pblk_line_meta *lm = &pblk->lm;
408 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
409 struct list_head *move_list = NULL;
410 int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
411 * (pblk->min_write_pgs - pblk->min_write_pgs_data);
412 int vsc = le32_to_cpu(*line->vsc) + packed_meta;
413
414 lockdep_assert_held(&line->lock);
415
416 if (line->w_err_gc->has_write_err) {
417 if (line->gc_group != PBLK_LINEGC_WERR) {
418 line->gc_group = PBLK_LINEGC_WERR;
419 move_list = &l_mg->gc_werr_list;
420 pblk_rl_werr_line_in(&pblk->rl);
421 }
422 } else if (!vsc) {
423 if (line->gc_group != PBLK_LINEGC_FULL) {
424 line->gc_group = PBLK_LINEGC_FULL;
425 move_list = &l_mg->gc_full_list;
426 }
427 } else if (vsc < lm->high_thrs) {
428 if (line->gc_group != PBLK_LINEGC_HIGH) {
429 line->gc_group = PBLK_LINEGC_HIGH;
430 move_list = &l_mg->gc_high_list;
431 }
432 } else if (vsc < lm->mid_thrs) {
433 if (line->gc_group != PBLK_LINEGC_MID) {
434 line->gc_group = PBLK_LINEGC_MID;
435 move_list = &l_mg->gc_mid_list;
436 }
437 } else if (vsc < line->sec_in_line) {
438 if (line->gc_group != PBLK_LINEGC_LOW) {
439 line->gc_group = PBLK_LINEGC_LOW;
440 move_list = &l_mg->gc_low_list;
441 }
442 } else if (vsc == line->sec_in_line) {
443 if (line->gc_group != PBLK_LINEGC_EMPTY) {
444 line->gc_group = PBLK_LINEGC_EMPTY;
445 move_list = &l_mg->gc_empty_list;
446 }
447 } else {
448 line->state = PBLK_LINESTATE_CORRUPT;
449 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
450 line->state);
451
452 line->gc_group = PBLK_LINEGC_NONE;
453 move_list = &l_mg->corrupt_list;
454 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
455 line->id, vsc,
456 line->sec_in_line,
457 lm->high_thrs, lm->mid_thrs);
458 }
459
460 return move_list;
461}
462
463void pblk_discard(struct pblk *pblk, struct bio *bio)
464{
465 sector_t slba = pblk_get_lba(bio);
466 sector_t nr_secs = pblk_get_secs(bio);
467
468 pblk_invalidate_range(pblk, slba, nr_secs);
469}
470
471void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
472{
473 atomic_long_inc(&pblk->write_failed);
474#ifdef CONFIG_NVM_PBLK_DEBUG
475 pblk_print_failed_rqd(pblk, rqd, rqd->error);
476#endif
477}
478
479void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
480{
481 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
482 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
483 atomic_long_inc(&pblk->read_empty);
484 return;
485 }
486
487 switch (rqd->error) {
488 case NVM_RSP_WARN_HIGHECC:
489 atomic_long_inc(&pblk->read_high_ecc);
490 break;
491 case NVM_RSP_ERR_FAILECC:
492 case NVM_RSP_ERR_FAILCRC:
493 atomic_long_inc(&pblk->read_failed);
494 break;
495 default:
496 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
497 }
498#ifdef CONFIG_NVM_PBLK_DEBUG
499 pblk_print_failed_rqd(pblk, rqd, rqd->error);
500#endif
501}
502
503void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
504{
505 pblk->sec_per_write = sec_per_write;
506}
507
508int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
509{
510 struct nvm_tgt_dev *dev = pblk->dev;
511
512 atomic_inc(&pblk->inflight_io);
513
514#ifdef CONFIG_NVM_PBLK_DEBUG
515 if (pblk_check_io(pblk, rqd))
516 return NVM_IO_ERR;
517#endif
518
519 return nvm_submit_io(dev, rqd);
520}
521
522void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
523{
524 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
525
526 int i;
527
528 for (i = 0; i < rqd->nr_ppas; i++) {
529 struct ppa_addr *ppa = &ppa_list[i];
530 struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
531 u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
532
533 if (caddr == 0)
534 trace_pblk_chunk_state(pblk_disk_name(pblk),
535 ppa, NVM_CHK_ST_OPEN);
536 else if (caddr == (chunk->cnlb - 1))
537 trace_pblk_chunk_state(pblk_disk_name(pblk),
538 ppa, NVM_CHK_ST_CLOSED);
539 }
540}
541
542int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
543{
544 struct nvm_tgt_dev *dev = pblk->dev;
545 int ret;
546
547 atomic_inc(&pblk->inflight_io);
548
549#ifdef CONFIG_NVM_PBLK_DEBUG
550 if (pblk_check_io(pblk, rqd))
551 return NVM_IO_ERR;
552#endif
553
554 ret = nvm_submit_io_sync(dev, rqd);
555
556 if (trace_pblk_chunk_state_enabled() && !ret &&
557 rqd->opcode == NVM_OP_PWRITE)
558 pblk_check_chunk_state_update(pblk, rqd);
559
560 return ret;
561}
562
563int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
564{
565 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
566 int ret;
567
568 pblk_down_chunk(pblk, ppa_list[0]);
569 ret = pblk_submit_io_sync(pblk, rqd);
570 pblk_up_chunk(pblk, ppa_list[0]);
571
572 return ret;
573}
574
575static void pblk_bio_map_addr_endio(struct bio *bio)
576{
577 bio_put(bio);
578}
579
580struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
581 unsigned int nr_secs, unsigned int len,
582 int alloc_type, gfp_t gfp_mask)
583{
584 struct nvm_tgt_dev *dev = pblk->dev;
585 void *kaddr = data;
586 struct page *page;
587 struct bio *bio;
588 int i, ret;
589
590 if (alloc_type == PBLK_KMALLOC_META)
591 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
592
593 bio = bio_kmalloc(gfp_mask, nr_secs);
594 if (!bio)
595 return ERR_PTR(-ENOMEM);
596
597 for (i = 0; i < nr_secs; i++) {
598 page = vmalloc_to_page(kaddr);
599 if (!page) {
600 pblk_err(pblk, "could not map vmalloc bio\n");
601 bio_put(bio);
602 bio = ERR_PTR(-ENOMEM);
603 goto out;
604 }
605
606 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
607 if (ret != PAGE_SIZE) {
608 pblk_err(pblk, "could not add page to bio\n");
609 bio_put(bio);
610 bio = ERR_PTR(-ENOMEM);
611 goto out;
612 }
613
614 kaddr += PAGE_SIZE;
615 }
616
617 bio->bi_end_io = pblk_bio_map_addr_endio;
618out:
619 return bio;
620}
621
622int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
623 unsigned long secs_to_flush, bool skip_meta)
624{
625 int max = pblk->sec_per_write;
626 int min = pblk->min_write_pgs;
627 int secs_to_sync = 0;
628
629 if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
630 min = max = pblk->min_write_pgs_data;
631
632 if (secs_avail >= max)
633 secs_to_sync = max;
634 else if (secs_avail >= min)
635 secs_to_sync = min * (secs_avail / min);
636 else if (secs_to_flush)
637 secs_to_sync = min;
638
639 return secs_to_sync;
640}
641
642void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
643{
644 u64 addr;
645 int i;
646
647 spin_lock(&line->lock);
648 addr = find_next_zero_bit(line->map_bitmap,
649 pblk->lm.sec_per_line, line->cur_sec);
650 line->cur_sec = addr - nr_secs;
651
652 for (i = 0; i < nr_secs; i++, line->cur_sec--)
653 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
654 spin_unlock(&line->lock);
655}
656
657u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
658{
659 u64 addr;
660 int i;
661
662 lockdep_assert_held(&line->lock);
663
664 /* logic error: ppa out-of-bounds. Prevent generating bad address */
665 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
666 WARN(1, "pblk: page allocation out of bounds\n");
667 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
668 }
669
670 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
671 pblk->lm.sec_per_line, line->cur_sec);
672 for (i = 0; i < nr_secs; i++, line->cur_sec++)
673 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
674
675 return addr;
676}
677
678u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
679{
680 u64 addr;
681
682 /* Lock needed in case a write fails and a recovery needs to remap
683 * failed write buffer entries
684 */
685 spin_lock(&line->lock);
686 addr = __pblk_alloc_page(pblk, line, nr_secs);
687 line->left_msecs -= nr_secs;
688 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
689 spin_unlock(&line->lock);
690
691 return addr;
692}
693
694u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
695{
696 u64 paddr;
697
698 spin_lock(&line->lock);
699 paddr = find_next_zero_bit(line->map_bitmap,
700 pblk->lm.sec_per_line, line->cur_sec);
701 spin_unlock(&line->lock);
702
703 return paddr;
704}
705
706u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
707{
708 struct nvm_tgt_dev *dev = pblk->dev;
709 struct nvm_geo *geo = &dev->geo;
710 struct pblk_line_meta *lm = &pblk->lm;
711 int bit;
712
713 /* This usually only happens on bad lines */
714 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
715 if (bit >= lm->blk_per_line)
716 return -1;
717
718 return bit * geo->ws_opt;
719}
720
721int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
722{
723 struct nvm_tgt_dev *dev = pblk->dev;
724 struct pblk_line_meta *lm = &pblk->lm;
725 struct bio *bio;
726 struct ppa_addr *ppa_list;
727 struct nvm_rq rqd;
728 u64 paddr = pblk_line_smeta_start(pblk, line);
729 int i, ret;
730
731 memset(&rqd, 0, sizeof(struct nvm_rq));
732
733 ret = pblk_alloc_rqd_meta(pblk, &rqd);
734 if (ret)
735 return ret;
736
737 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
738 if (IS_ERR(bio)) {
739 ret = PTR_ERR(bio);
740 goto clear_rqd;
741 }
742
743 bio->bi_iter.bi_sector = 0; /* internal bio */
744 bio_set_op_attrs(bio, REQ_OP_READ, 0);
745
746 rqd.bio = bio;
747 rqd.opcode = NVM_OP_PREAD;
748 rqd.nr_ppas = lm->smeta_sec;
749 rqd.is_seq = 1;
750 ppa_list = nvm_rq_to_ppa_list(&rqd);
751
752 for (i = 0; i < lm->smeta_sec; i++, paddr++)
753 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
754
755 ret = pblk_submit_io_sync(pblk, &rqd);
756 if (ret) {
757 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
758 bio_put(bio);
759 goto clear_rqd;
760 }
761
762 atomic_dec(&pblk->inflight_io);
763
764 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
765 pblk_log_read_err(pblk, &rqd);
766 ret = -EIO;
767 }
768
769clear_rqd:
770 pblk_free_rqd_meta(pblk, &rqd);
771 return ret;
772}
773
774static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
775 u64 paddr)
776{
777 struct nvm_tgt_dev *dev = pblk->dev;
778 struct pblk_line_meta *lm = &pblk->lm;
779 struct bio *bio;
780 struct ppa_addr *ppa_list;
781 struct nvm_rq rqd;
782 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
783 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
784 int i, ret;
785
786 memset(&rqd, 0, sizeof(struct nvm_rq));
787
788 ret = pblk_alloc_rqd_meta(pblk, &rqd);
789 if (ret)
790 return ret;
791
792 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
793 if (IS_ERR(bio)) {
794 ret = PTR_ERR(bio);
795 goto clear_rqd;
796 }
797
798 bio->bi_iter.bi_sector = 0; /* internal bio */
799 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
800
801 rqd.bio = bio;
802 rqd.opcode = NVM_OP_PWRITE;
803 rqd.nr_ppas = lm->smeta_sec;
804 rqd.is_seq = 1;
805 ppa_list = nvm_rq_to_ppa_list(&rqd);
806
807 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
808 struct pblk_sec_meta *meta = pblk_get_meta(pblk,
809 rqd.meta_list, i);
810
811 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
812 meta->lba = lba_list[paddr] = addr_empty;
813 }
814
815 ret = pblk_submit_io_sync_sem(pblk, &rqd);
816 if (ret) {
817 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
818 bio_put(bio);
819 goto clear_rqd;
820 }
821
822 atomic_dec(&pblk->inflight_io);
823
824 if (rqd.error) {
825 pblk_log_write_err(pblk, &rqd);
826 ret = -EIO;
827 }
828
829clear_rqd:
830 pblk_free_rqd_meta(pblk, &rqd);
831 return ret;
832}
833
834int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
835 void *emeta_buf)
836{
837 struct nvm_tgt_dev *dev = pblk->dev;
838 struct nvm_geo *geo = &dev->geo;
839 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
840 struct pblk_line_meta *lm = &pblk->lm;
841 void *ppa_list_buf, *meta_list;
842 struct bio *bio;
843 struct ppa_addr *ppa_list;
844 struct nvm_rq rqd;
845 u64 paddr = line->emeta_ssec;
846 dma_addr_t dma_ppa_list, dma_meta_list;
847 int min = pblk->min_write_pgs;
848 int left_ppas = lm->emeta_sec[0];
849 int line_id = line->id;
850 int rq_ppas, rq_len;
851 int i, j;
852 int ret;
853
854 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
855 &dma_meta_list);
856 if (!meta_list)
857 return -ENOMEM;
858
859 ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
860 dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
861
862next_rq:
863 memset(&rqd, 0, sizeof(struct nvm_rq));
864
865 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
866 rq_len = rq_ppas * geo->csecs;
867
868 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
869 l_mg->emeta_alloc_type, GFP_KERNEL);
870 if (IS_ERR(bio)) {
871 ret = PTR_ERR(bio);
872 goto free_rqd_dma;
873 }
874
875 bio->bi_iter.bi_sector = 0; /* internal bio */
876 bio_set_op_attrs(bio, REQ_OP_READ, 0);
877
878 rqd.bio = bio;
879 rqd.meta_list = meta_list;
880 rqd.ppa_list = ppa_list_buf;
881 rqd.dma_meta_list = dma_meta_list;
882 rqd.dma_ppa_list = dma_ppa_list;
883 rqd.opcode = NVM_OP_PREAD;
884 rqd.nr_ppas = rq_ppas;
885 ppa_list = nvm_rq_to_ppa_list(&rqd);
886
887 for (i = 0; i < rqd.nr_ppas; ) {
888 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
889 int pos = pblk_ppa_to_pos(geo, ppa);
890
891 if (pblk_io_aligned(pblk, rq_ppas))
892 rqd.is_seq = 1;
893
894 while (test_bit(pos, line->blk_bitmap)) {
895 paddr += min;
896 if (pblk_boundary_paddr_checks(pblk, paddr)) {
897 bio_put(bio);
898 ret = -EINTR;
899 goto free_rqd_dma;
900 }
901
902 ppa = addr_to_gen_ppa(pblk, paddr, line_id);
903 pos = pblk_ppa_to_pos(geo, ppa);
904 }
905
906 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
907 bio_put(bio);
908 ret = -EINTR;
909 goto free_rqd_dma;
910 }
911
912 for (j = 0; j < min; j++, i++, paddr++)
913 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
914 }
915
916 ret = pblk_submit_io_sync(pblk, &rqd);
917 if (ret) {
918 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
919 bio_put(bio);
920 goto free_rqd_dma;
921 }
922
923 atomic_dec(&pblk->inflight_io);
924
925 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
926 pblk_log_read_err(pblk, &rqd);
927 ret = -EIO;
928 goto free_rqd_dma;
929 }
930
931 emeta_buf += rq_len;
932 left_ppas -= rq_ppas;
933 if (left_ppas)
934 goto next_rq;
935
936free_rqd_dma:
937 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
938 return ret;
939}
940
941static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
942 struct ppa_addr ppa)
943{
944 rqd->opcode = NVM_OP_ERASE;
945 rqd->ppa_addr = ppa;
946 rqd->nr_ppas = 1;
947 rqd->is_seq = 1;
948 rqd->bio = NULL;
949}
950
951static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
952{
953 struct nvm_rq rqd = {NULL};
954 int ret;
955
956 trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
957 PBLK_CHUNK_RESET_START);
958
959 pblk_setup_e_rq(pblk, &rqd, ppa);
960
961 /* The write thread schedules erases so that it minimizes disturbances
962 * with writes. Thus, there is no need to take the LUN semaphore.
963 */
964 ret = pblk_submit_io_sync(pblk, &rqd);
965 rqd.private = pblk;
966 __pblk_end_io_erase(pblk, &rqd);
967
968 return ret;
969}
970
971int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
972{
973 struct pblk_line_meta *lm = &pblk->lm;
974 struct ppa_addr ppa;
975 int ret, bit = -1;
976
977 /* Erase only good blocks, one at a time */
978 do {
979 spin_lock(&line->lock);
980 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
981 bit + 1);
982 if (bit >= lm->blk_per_line) {
983 spin_unlock(&line->lock);
984 break;
985 }
986
987 ppa = pblk->luns[bit].bppa; /* set ch and lun */
988 ppa.a.blk = line->id;
989
990 atomic_dec(&line->left_eblks);
991 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
992 spin_unlock(&line->lock);
993
994 ret = pblk_blk_erase_sync(pblk, ppa);
995 if (ret) {
996 pblk_err(pblk, "failed to erase line %d\n", line->id);
997 return ret;
998 }
999 } while (1);
1000
1001 return 0;
1002}
1003
1004static void pblk_line_setup_metadata(struct pblk_line *line,
1005 struct pblk_line_mgmt *l_mg,
1006 struct pblk_line_meta *lm)
1007{
1008 int meta_line;
1009
1010 lockdep_assert_held(&l_mg->free_lock);
1011
1012retry_meta:
1013 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1014 if (meta_line == PBLK_DATA_LINES) {
1015 spin_unlock(&l_mg->free_lock);
1016 io_schedule();
1017 spin_lock(&l_mg->free_lock);
1018 goto retry_meta;
1019 }
1020
1021 set_bit(meta_line, &l_mg->meta_bitmap);
1022 line->meta_line = meta_line;
1023
1024 line->smeta = l_mg->sline_meta[meta_line];
1025 line->emeta = l_mg->eline_meta[meta_line];
1026
1027 memset(line->smeta, 0, lm->smeta_len);
1028 memset(line->emeta->buf, 0, lm->emeta_len[0]);
1029
1030 line->emeta->mem = 0;
1031 atomic_set(&line->emeta->sync, 0);
1032}
1033
1034/* For now lines are always assumed full lines. Thus, smeta former and current
1035 * lun bitmaps are omitted.
1036 */
1037static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
1038 struct pblk_line *cur)
1039{
1040 struct nvm_tgt_dev *dev = pblk->dev;
1041 struct nvm_geo *geo = &dev->geo;
1042 struct pblk_line_meta *lm = &pblk->lm;
1043 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1044 struct pblk_emeta *emeta = line->emeta;
1045 struct line_emeta *emeta_buf = emeta->buf;
1046 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
1047 int nr_blk_line;
1048
1049 /* After erasing the line, new bad blocks might appear and we risk
1050 * having an invalid line
1051 */
1052 nr_blk_line = lm->blk_per_line -
1053 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1054 if (nr_blk_line < lm->min_blk_line) {
1055 spin_lock(&l_mg->free_lock);
1056 spin_lock(&line->lock);
1057 line->state = PBLK_LINESTATE_BAD;
1058 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1059 line->state);
1060 spin_unlock(&line->lock);
1061
1062 list_add_tail(&line->list, &l_mg->bad_list);
1063 spin_unlock(&l_mg->free_lock);
1064
1065 pblk_debug(pblk, "line %d is bad\n", line->id);
1066
1067 return 0;
1068 }
1069
1070 /* Run-time metadata */
1071 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1072
1073 /* Mark LUNs allocated in this line (all for now) */
1074 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1075
1076 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1077 guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
1078 smeta_buf->header.id = cpu_to_le32(line->id);
1079 smeta_buf->header.type = cpu_to_le16(line->type);
1080 smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1081 smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1082
1083 /* Start metadata */
1084 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1085 smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1086
1087 /* Fill metadata among lines */
1088 if (cur) {
1089 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1090 smeta_buf->prev_id = cpu_to_le32(cur->id);
1091 cur->emeta->buf->next_id = cpu_to_le32(line->id);
1092 } else {
1093 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1094 }
1095
1096 /* All smeta must be set at this point */
1097 smeta_buf->header.crc = cpu_to_le32(
1098 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1099 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1100
1101 /* End metadata */
1102 memcpy(&emeta_buf->header, &smeta_buf->header,
1103 sizeof(struct line_header));
1104
1105 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1106 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1107 emeta_buf->header.crc = cpu_to_le32(
1108 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1109
1110 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1111 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1112 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1113 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1114 emeta_buf->crc = cpu_to_le32(0);
1115 emeta_buf->prev_id = smeta_buf->prev_id;
1116
1117 return 1;
1118}
1119
1120static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1121{
1122 struct pblk_line_meta *lm = &pblk->lm;
1123 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1124
1125 line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1126 if (!line->map_bitmap)
1127 return -ENOMEM;
1128
1129 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1130
1131 /* will be initialized using bb info from map_bitmap */
1132 line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1133 if (!line->invalid_bitmap) {
1134 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1135 line->map_bitmap = NULL;
1136 return -ENOMEM;
1137 }
1138
1139 return 0;
1140}
1141
1142/* For now lines are always assumed full lines. Thus, smeta former and current
1143 * lun bitmaps are omitted.
1144 */
1145static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1146 int init)
1147{
1148 struct nvm_tgt_dev *dev = pblk->dev;
1149 struct nvm_geo *geo = &dev->geo;
1150 struct pblk_line_meta *lm = &pblk->lm;
1151 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1152 u64 off;
1153 int bit = -1;
1154 int emeta_secs;
1155
1156 line->sec_in_line = lm->sec_per_line;
1157
1158 /* Capture bad block information on line mapping bitmaps */
1159 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1160 bit + 1)) < lm->blk_per_line) {
1161 off = bit * geo->ws_opt;
1162 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1163 lm->sec_per_line);
1164 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1165 lm->sec_per_line);
1166 line->sec_in_line -= geo->clba;
1167 }
1168
1169 /* Mark smeta metadata sectors as bad sectors */
1170 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1171 off = bit * geo->ws_opt;
1172 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1173 line->sec_in_line -= lm->smeta_sec;
1174 line->cur_sec = off + lm->smeta_sec;
1175
1176 if (init && pblk_line_smeta_write(pblk, line, off)) {
1177 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1178 return 0;
1179 }
1180
1181 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1182
1183 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1184 * blocks to make sure that there are enough sectors to store emeta
1185 */
1186 emeta_secs = lm->emeta_sec[0];
1187 off = lm->sec_per_line;
1188 while (emeta_secs) {
1189 off -= geo->ws_opt;
1190 if (!test_bit(off, line->invalid_bitmap)) {
1191 bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1192 emeta_secs -= geo->ws_opt;
1193 }
1194 }
1195
1196 line->emeta_ssec = off;
1197 line->sec_in_line -= lm->emeta_sec[0];
1198 line->nr_valid_lbas = 0;
1199 line->left_msecs = line->sec_in_line;
1200 *line->vsc = cpu_to_le32(line->sec_in_line);
1201
1202 if (lm->sec_per_line - line->sec_in_line !=
1203 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1204 spin_lock(&line->lock);
1205 line->state = PBLK_LINESTATE_BAD;
1206 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1207 line->state);
1208 spin_unlock(&line->lock);
1209
1210 list_add_tail(&line->list, &l_mg->bad_list);
1211 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1212
1213 return 0;
1214 }
1215
1216 return 1;
1217}
1218
1219static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1220{
1221 struct pblk_line_meta *lm = &pblk->lm;
1222 struct nvm_tgt_dev *dev = pblk->dev;
1223 struct nvm_geo *geo = &dev->geo;
1224 int blk_to_erase = atomic_read(&line->blk_in_line);
1225 int i;
1226
1227 for (i = 0; i < lm->blk_per_line; i++) {
1228 struct pblk_lun *rlun = &pblk->luns[i];
1229 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1230 int state = line->chks[pos].state;
1231
1232 /* Free chunks should not be erased */
1233 if (state & NVM_CHK_ST_FREE) {
1234 set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1235 line->erase_bitmap);
1236 blk_to_erase--;
1237 }
1238 }
1239
1240 return blk_to_erase;
1241}
1242
1243static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1244{
1245 struct pblk_line_meta *lm = &pblk->lm;
1246 int blk_in_line = atomic_read(&line->blk_in_line);
1247 int blk_to_erase;
1248
1249 /* Bad blocks do not need to be erased */
1250 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1251
1252 spin_lock(&line->lock);
1253
1254 /* If we have not written to this line, we need to mark up free chunks
1255 * as already erased
1256 */
1257 if (line->state == PBLK_LINESTATE_NEW) {
1258 blk_to_erase = pblk_prepare_new_line(pblk, line);
1259 line->state = PBLK_LINESTATE_FREE;
1260 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1261 line->state);
1262 } else {
1263 blk_to_erase = blk_in_line;
1264 }
1265
1266 if (blk_in_line < lm->min_blk_line) {
1267 spin_unlock(&line->lock);
1268 return -EAGAIN;
1269 }
1270
1271 if (line->state != PBLK_LINESTATE_FREE) {
1272 WARN(1, "pblk: corrupted line %d, state %d\n",
1273 line->id, line->state);
1274 spin_unlock(&line->lock);
1275 return -EINTR;
1276 }
1277
1278 line->state = PBLK_LINESTATE_OPEN;
1279 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1280 line->state);
1281
1282 atomic_set(&line->left_eblks, blk_to_erase);
1283 atomic_set(&line->left_seblks, blk_to_erase);
1284
1285 line->meta_distance = lm->meta_distance;
1286 spin_unlock(&line->lock);
1287
1288 kref_init(&line->ref);
1289 atomic_set(&line->sec_to_update, 0);
1290
1291 return 0;
1292}
1293
1294/* Line allocations in the recovery path are always single threaded */
1295int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1296{
1297 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1298 int ret;
1299
1300 spin_lock(&l_mg->free_lock);
1301 l_mg->data_line = line;
1302 list_del(&line->list);
1303
1304 ret = pblk_line_prepare(pblk, line);
1305 if (ret) {
1306 list_add(&line->list, &l_mg->free_list);
1307 spin_unlock(&l_mg->free_lock);
1308 return ret;
1309 }
1310 spin_unlock(&l_mg->free_lock);
1311
1312 ret = pblk_line_alloc_bitmaps(pblk, line);
1313 if (ret)
1314 goto fail;
1315
1316 if (!pblk_line_init_bb(pblk, line, 0)) {
1317 ret = -EINTR;
1318 goto fail;
1319 }
1320
1321 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1322 return 0;
1323
1324fail:
1325 spin_lock(&l_mg->free_lock);
1326 list_add(&line->list, &l_mg->free_list);
1327 spin_unlock(&l_mg->free_lock);
1328
1329 return ret;
1330}
1331
1332void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1333{
1334 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1335
1336 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1337 line->map_bitmap = NULL;
1338 line->smeta = NULL;
1339 line->emeta = NULL;
1340}
1341
1342static void pblk_line_reinit(struct pblk_line *line)
1343{
1344 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1345
1346 line->map_bitmap = NULL;
1347 line->invalid_bitmap = NULL;
1348 line->smeta = NULL;
1349 line->emeta = NULL;
1350}
1351
1352void pblk_line_free(struct pblk_line *line)
1353{
1354 struct pblk *pblk = line->pblk;
1355 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1356
1357 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1358 mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1359
1360 pblk_line_reinit(line);
1361}
1362
1363struct pblk_line *pblk_line_get(struct pblk *pblk)
1364{
1365 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1366 struct pblk_line_meta *lm = &pblk->lm;
1367 struct pblk_line *line;
1368 int ret, bit;
1369
1370 lockdep_assert_held(&l_mg->free_lock);
1371
1372retry:
1373 if (list_empty(&l_mg->free_list)) {
1374 pblk_err(pblk, "no free lines\n");
1375 return NULL;
1376 }
1377
1378 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1379 list_del(&line->list);
1380 l_mg->nr_free_lines--;
1381
1382 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1383 if (unlikely(bit >= lm->blk_per_line)) {
1384 spin_lock(&line->lock);
1385 line->state = PBLK_LINESTATE_BAD;
1386 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1387 line->state);
1388 spin_unlock(&line->lock);
1389
1390 list_add_tail(&line->list, &l_mg->bad_list);
1391
1392 pblk_debug(pblk, "line %d is bad\n", line->id);
1393 goto retry;
1394 }
1395
1396 ret = pblk_line_prepare(pblk, line);
1397 if (ret) {
1398 switch (ret) {
1399 case -EAGAIN:
1400 list_add(&line->list, &l_mg->bad_list);
1401 goto retry;
1402 case -EINTR:
1403 list_add(&line->list, &l_mg->corrupt_list);
1404 goto retry;
1405 default:
1406 pblk_err(pblk, "failed to prepare line %d\n", line->id);
1407 list_add(&line->list, &l_mg->free_list);
1408 l_mg->nr_free_lines++;
1409 return NULL;
1410 }
1411 }
1412
1413 return line;
1414}
1415
1416static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1417 struct pblk_line *line)
1418{
1419 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1420 struct pblk_line *retry_line;
1421
1422retry:
1423 spin_lock(&l_mg->free_lock);
1424 retry_line = pblk_line_get(pblk);
1425 if (!retry_line) {
1426 l_mg->data_line = NULL;
1427 spin_unlock(&l_mg->free_lock);
1428 return NULL;
1429 }
1430
1431 retry_line->map_bitmap = line->map_bitmap;
1432 retry_line->invalid_bitmap = line->invalid_bitmap;
1433 retry_line->smeta = line->smeta;
1434 retry_line->emeta = line->emeta;
1435 retry_line->meta_line = line->meta_line;
1436
1437 pblk_line_reinit(line);
1438
1439 l_mg->data_line = retry_line;
1440 spin_unlock(&l_mg->free_lock);
1441
1442 pblk_rl_free_lines_dec(&pblk->rl, line, false);
1443
1444 if (pblk_line_erase(pblk, retry_line))
1445 goto retry;
1446
1447 return retry_line;
1448}
1449
1450static void pblk_set_space_limit(struct pblk *pblk)
1451{
1452 struct pblk_rl *rl = &pblk->rl;
1453
1454 atomic_set(&rl->rb_space, 0);
1455}
1456
1457struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1458{
1459 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1460 struct pblk_line *line;
1461
1462 spin_lock(&l_mg->free_lock);
1463 line = pblk_line_get(pblk);
1464 if (!line) {
1465 spin_unlock(&l_mg->free_lock);
1466 return NULL;
1467 }
1468
1469 line->seq_nr = l_mg->d_seq_nr++;
1470 line->type = PBLK_LINETYPE_DATA;
1471 l_mg->data_line = line;
1472
1473 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1474
1475 /* Allocate next line for preparation */
1476 l_mg->data_next = pblk_line_get(pblk);
1477 if (!l_mg->data_next) {
1478 /* If we cannot get a new line, we need to stop the pipeline.
1479 * Only allow as many writes in as we can store safely and then
1480 * fail gracefully
1481 */
1482 pblk_set_space_limit(pblk);
1483
1484 l_mg->data_next = NULL;
1485 } else {
1486 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1487 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1488 }
1489 spin_unlock(&l_mg->free_lock);
1490
1491 if (pblk_line_alloc_bitmaps(pblk, line))
1492 return NULL;
1493
1494 if (pblk_line_erase(pblk, line)) {
1495 line = pblk_line_retry(pblk, line);
1496 if (!line)
1497 return NULL;
1498 }
1499
1500retry_setup:
1501 if (!pblk_line_init_metadata(pblk, line, NULL)) {
1502 line = pblk_line_retry(pblk, line);
1503 if (!line)
1504 return NULL;
1505
1506 goto retry_setup;
1507 }
1508
1509 if (!pblk_line_init_bb(pblk, line, 1)) {
1510 line = pblk_line_retry(pblk, line);
1511 if (!line)
1512 return NULL;
1513
1514 goto retry_setup;
1515 }
1516
1517 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1518
1519 return line;
1520}
1521
1522void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1523{
1524 struct pblk_line *line;
1525
1526 line = pblk_ppa_to_line(pblk, ppa);
1527 kref_put(&line->ref, pblk_line_put_wq);
1528}
1529
1530void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1531{
1532 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1533 int i;
1534
1535 for (i = 0; i < rqd->nr_ppas; i++)
1536 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1537}
1538
1539static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1540{
1541 lockdep_assert_held(&pblk->l_mg.free_lock);
1542
1543 pblk_set_space_limit(pblk);
1544 pblk->state = PBLK_STATE_STOPPING;
1545 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1546}
1547
1548static void pblk_line_close_meta_sync(struct pblk *pblk)
1549{
1550 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1551 struct pblk_line_meta *lm = &pblk->lm;
1552 struct pblk_line *line, *tline;
1553 LIST_HEAD(list);
1554
1555 spin_lock(&l_mg->close_lock);
1556 if (list_empty(&l_mg->emeta_list)) {
1557 spin_unlock(&l_mg->close_lock);
1558 return;
1559 }
1560
1561 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1562 spin_unlock(&l_mg->close_lock);
1563
1564 list_for_each_entry_safe(line, tline, &list, list) {
1565 struct pblk_emeta *emeta = line->emeta;
1566
1567 while (emeta->mem < lm->emeta_len[0]) {
1568 int ret;
1569
1570 ret = pblk_submit_meta_io(pblk, line);
1571 if (ret) {
1572 pblk_err(pblk, "sync meta line %d failed (%d)\n",
1573 line->id, ret);
1574 return;
1575 }
1576 }
1577 }
1578
1579 pblk_wait_for_meta(pblk);
1580 flush_workqueue(pblk->close_wq);
1581}
1582
1583void __pblk_pipeline_flush(struct pblk *pblk)
1584{
1585 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1586 int ret;
1587
1588 spin_lock(&l_mg->free_lock);
1589 if (pblk->state == PBLK_STATE_RECOVERING ||
1590 pblk->state == PBLK_STATE_STOPPED) {
1591 spin_unlock(&l_mg->free_lock);
1592 return;
1593 }
1594 pblk->state = PBLK_STATE_RECOVERING;
1595 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1596 spin_unlock(&l_mg->free_lock);
1597
1598 pblk_flush_writer(pblk);
1599 pblk_wait_for_meta(pblk);
1600
1601 ret = pblk_recov_pad(pblk);
1602 if (ret) {
1603 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1604 return;
1605 }
1606
1607 flush_workqueue(pblk->bb_wq);
1608 pblk_line_close_meta_sync(pblk);
1609}
1610
1611void __pblk_pipeline_stop(struct pblk *pblk)
1612{
1613 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1614
1615 spin_lock(&l_mg->free_lock);
1616 pblk->state = PBLK_STATE_STOPPED;
1617 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1618 l_mg->data_line = NULL;
1619 l_mg->data_next = NULL;
1620 spin_unlock(&l_mg->free_lock);
1621}
1622
1623void pblk_pipeline_stop(struct pblk *pblk)
1624{
1625 __pblk_pipeline_flush(pblk);
1626 __pblk_pipeline_stop(pblk);
1627}
1628
1629struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1630{
1631 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1632 struct pblk_line *cur, *new = NULL;
1633 unsigned int left_seblks;
1634
1635 new = l_mg->data_next;
1636 if (!new)
1637 goto out;
1638
1639 spin_lock(&l_mg->free_lock);
1640 cur = l_mg->data_line;
1641 l_mg->data_line = new;
1642
1643 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1644 spin_unlock(&l_mg->free_lock);
1645
1646retry_erase:
1647 left_seblks = atomic_read(&new->left_seblks);
1648 if (left_seblks) {
1649 /* If line is not fully erased, erase it */
1650 if (atomic_read(&new->left_eblks)) {
1651 if (pblk_line_erase(pblk, new))
1652 goto out;
1653 } else {
1654 io_schedule();
1655 }
1656 goto retry_erase;
1657 }
1658
1659 if (pblk_line_alloc_bitmaps(pblk, new))
1660 return NULL;
1661
1662retry_setup:
1663 if (!pblk_line_init_metadata(pblk, new, cur)) {
1664 new = pblk_line_retry(pblk, new);
1665 if (!new)
1666 goto out;
1667
1668 goto retry_setup;
1669 }
1670
1671 if (!pblk_line_init_bb(pblk, new, 1)) {
1672 new = pblk_line_retry(pblk, new);
1673 if (!new)
1674 goto out;
1675
1676 goto retry_setup;
1677 }
1678
1679 pblk_rl_free_lines_dec(&pblk->rl, new, true);
1680
1681 /* Allocate next line for preparation */
1682 spin_lock(&l_mg->free_lock);
1683 l_mg->data_next = pblk_line_get(pblk);
1684 if (!l_mg->data_next) {
1685 /* If we cannot get a new line, we need to stop the pipeline.
1686 * Only allow as many writes in as we can store safely and then
1687 * fail gracefully
1688 */
1689 pblk_stop_writes(pblk, new);
1690 l_mg->data_next = NULL;
1691 } else {
1692 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1693 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1694 }
1695 spin_unlock(&l_mg->free_lock);
1696
1697out:
1698 return new;
1699}
1700
1701static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1702{
1703 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1704 struct pblk_gc *gc = &pblk->gc;
1705
1706 spin_lock(&line->lock);
1707 WARN_ON(line->state != PBLK_LINESTATE_GC);
1708 if (line->w_err_gc->has_gc_err) {
1709 spin_unlock(&line->lock);
1710 pblk_err(pblk, "line %d had errors during GC\n", line->id);
1711 pblk_put_line_back(pblk, line);
1712 line->w_err_gc->has_gc_err = 0;
1713 return;
1714 }
1715
1716 line->state = PBLK_LINESTATE_FREE;
1717 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1718 line->state);
1719 line->gc_group = PBLK_LINEGC_NONE;
1720 pblk_line_free(line);
1721
1722 if (line->w_err_gc->has_write_err) {
1723 pblk_rl_werr_line_out(&pblk->rl);
1724 line->w_err_gc->has_write_err = 0;
1725 }
1726
1727 spin_unlock(&line->lock);
1728 atomic_dec(&gc->pipeline_gc);
1729
1730 spin_lock(&l_mg->free_lock);
1731 list_add_tail(&line->list, &l_mg->free_list);
1732 l_mg->nr_free_lines++;
1733 spin_unlock(&l_mg->free_lock);
1734
1735 pblk_rl_free_lines_inc(&pblk->rl, line);
1736}
1737
1738static void pblk_line_put_ws(struct work_struct *work)
1739{
1740 struct pblk_line_ws *line_put_ws = container_of(work,
1741 struct pblk_line_ws, ws);
1742 struct pblk *pblk = line_put_ws->pblk;
1743 struct pblk_line *line = line_put_ws->line;
1744
1745 __pblk_line_put(pblk, line);
1746 mempool_free(line_put_ws, &pblk->gen_ws_pool);
1747}
1748
1749void pblk_line_put(struct kref *ref)
1750{
1751 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1752 struct pblk *pblk = line->pblk;
1753
1754 __pblk_line_put(pblk, line);
1755}
1756
1757void pblk_line_put_wq(struct kref *ref)
1758{
1759 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1760 struct pblk *pblk = line->pblk;
1761 struct pblk_line_ws *line_put_ws;
1762
1763 line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1764 if (!line_put_ws)
1765 return;
1766
1767 line_put_ws->pblk = pblk;
1768 line_put_ws->line = line;
1769 line_put_ws->priv = NULL;
1770
1771 INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1772 queue_work(pblk->r_end_wq, &line_put_ws->ws);
1773}
1774
1775int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1776{
1777 struct nvm_rq *rqd;
1778 int err;
1779
1780 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1781
1782 pblk_setup_e_rq(pblk, rqd, ppa);
1783
1784 rqd->end_io = pblk_end_io_erase;
1785 rqd->private = pblk;
1786
1787 trace_pblk_chunk_reset(pblk_disk_name(pblk),
1788 &ppa, PBLK_CHUNK_RESET_START);
1789
1790 /* The write thread schedules erases so that it minimizes disturbances
1791 * with writes. Thus, there is no need to take the LUN semaphore.
1792 */
1793 err = pblk_submit_io(pblk, rqd);
1794 if (err) {
1795 struct nvm_tgt_dev *dev = pblk->dev;
1796 struct nvm_geo *geo = &dev->geo;
1797
1798 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1799 pblk_ppa_to_line_id(ppa),
1800 pblk_ppa_to_pos(geo, ppa));
1801 }
1802
1803 return err;
1804}
1805
1806struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1807{
1808 return pblk->l_mg.data_line;
1809}
1810
1811/* For now, always erase next line */
1812struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1813{
1814 return pblk->l_mg.data_next;
1815}
1816
1817int pblk_line_is_full(struct pblk_line *line)
1818{
1819 return (line->left_msecs == 0);
1820}
1821
1822static void pblk_line_should_sync_meta(struct pblk *pblk)
1823{
1824 if (pblk_rl_is_limit(&pblk->rl))
1825 pblk_line_close_meta_sync(pblk);
1826}
1827
1828void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1829{
1830 struct nvm_tgt_dev *dev = pblk->dev;
1831 struct nvm_geo *geo = &dev->geo;
1832 struct pblk_line_meta *lm = &pblk->lm;
1833 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1834 struct list_head *move_list;
1835 int i;
1836
1837#ifdef CONFIG_NVM_PBLK_DEBUG
1838 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1839 "pblk: corrupt closed line %d\n", line->id);
1840#endif
1841
1842 spin_lock(&l_mg->free_lock);
1843 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1844 spin_unlock(&l_mg->free_lock);
1845
1846 spin_lock(&l_mg->gc_lock);
1847 spin_lock(&line->lock);
1848 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1849 line->state = PBLK_LINESTATE_CLOSED;
1850 move_list = pblk_line_gc_list(pblk, line);
1851 list_add_tail(&line->list, move_list);
1852
1853 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1854 line->map_bitmap = NULL;
1855 line->smeta = NULL;
1856 line->emeta = NULL;
1857
1858 for (i = 0; i < lm->blk_per_line; i++) {
1859 struct pblk_lun *rlun = &pblk->luns[i];
1860 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1861 int state = line->chks[pos].state;
1862
1863 if (!(state & NVM_CHK_ST_OFFLINE))
1864 state = NVM_CHK_ST_CLOSED;
1865 }
1866
1867 spin_unlock(&line->lock);
1868 spin_unlock(&l_mg->gc_lock);
1869
1870 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1871 line->state);
1872}
1873
1874void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1875{
1876 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1877 struct pblk_line_meta *lm = &pblk->lm;
1878 struct pblk_emeta *emeta = line->emeta;
1879 struct line_emeta *emeta_buf = emeta->buf;
1880 struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1881
1882 /* No need for exact vsc value; avoid a big line lock and take aprox. */
1883 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1884 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1885
1886 wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1887 wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1888 wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1889
1890 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1891 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1892 guid_copy((guid_t *)&emeta_buf->header.uuid,
1893 &pblk->instance_uuid);
1894 emeta_buf->header.id = cpu_to_le32(line->id);
1895 emeta_buf->header.type = cpu_to_le16(line->type);
1896 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1897 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1898 emeta_buf->header.crc = cpu_to_le32(
1899 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1900 }
1901
1902 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1903 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1904
1905 spin_lock(&l_mg->close_lock);
1906 spin_lock(&line->lock);
1907
1908 /* Update the in-memory start address for emeta, in case it has
1909 * shifted due to write errors
1910 */
1911 if (line->emeta_ssec != line->cur_sec)
1912 line->emeta_ssec = line->cur_sec;
1913
1914 list_add_tail(&line->list, &l_mg->emeta_list);
1915 spin_unlock(&line->lock);
1916 spin_unlock(&l_mg->close_lock);
1917
1918 pblk_line_should_sync_meta(pblk);
1919}
1920
1921static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1922{
1923 struct pblk_line_meta *lm = &pblk->lm;
1924 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1925 unsigned int lba_list_size = lm->emeta_len[2];
1926 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1927 struct pblk_emeta *emeta = line->emeta;
1928
1929 w_err_gc->lba_list = pblk_malloc(lba_list_size,
1930 l_mg->emeta_alloc_type, GFP_KERNEL);
1931 memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1932 lba_list_size);
1933}
1934
1935void pblk_line_close_ws(struct work_struct *work)
1936{
1937 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1938 ws);
1939 struct pblk *pblk = line_ws->pblk;
1940 struct pblk_line *line = line_ws->line;
1941 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1942
1943 /* Write errors makes the emeta start address stored in smeta invalid,
1944 * so keep a copy of the lba list until we've gc'd the line
1945 */
1946 if (w_err_gc->has_write_err)
1947 pblk_save_lba_list(pblk, line);
1948
1949 pblk_line_close(pblk, line);
1950 mempool_free(line_ws, &pblk->gen_ws_pool);
1951}
1952
1953void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1954 void (*work)(struct work_struct *), gfp_t gfp_mask,
1955 struct workqueue_struct *wq)
1956{
1957 struct pblk_line_ws *line_ws;
1958
1959 line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1960
1961 line_ws->pblk = pblk;
1962 line_ws->line = line;
1963 line_ws->priv = priv;
1964
1965 INIT_WORK(&line_ws->ws, work);
1966 queue_work(wq, &line_ws->ws);
1967}
1968
1969static void __pblk_down_chunk(struct pblk *pblk, int pos)
1970{
1971 struct pblk_lun *rlun = &pblk->luns[pos];
1972 int ret;
1973
1974 /*
1975 * Only send one inflight I/O per LUN. Since we map at a page
1976 * granurality, all ppas in the I/O will map to the same LUN
1977 */
1978
1979 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1980 if (ret == -ETIME || ret == -EINTR)
1981 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1982 -ret);
1983}
1984
1985void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1986{
1987 struct nvm_tgt_dev *dev = pblk->dev;
1988 struct nvm_geo *geo = &dev->geo;
1989 int pos = pblk_ppa_to_pos(geo, ppa);
1990
1991 __pblk_down_chunk(pblk, pos);
1992}
1993
1994void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1995 unsigned long *lun_bitmap)
1996{
1997 struct nvm_tgt_dev *dev = pblk->dev;
1998 struct nvm_geo *geo = &dev->geo;
1999 int pos = pblk_ppa_to_pos(geo, ppa);
2000
2001 /* If the LUN has been locked for this same request, do no attempt to
2002 * lock it again
2003 */
2004 if (test_and_set_bit(pos, lun_bitmap))
2005 return;
2006
2007 __pblk_down_chunk(pblk, pos);
2008}
2009
2010void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
2011{
2012 struct nvm_tgt_dev *dev = pblk->dev;
2013 struct nvm_geo *geo = &dev->geo;
2014 struct pblk_lun *rlun;
2015 int pos = pblk_ppa_to_pos(geo, ppa);
2016
2017 rlun = &pblk->luns[pos];
2018 up(&rlun->wr_sem);
2019}
2020
2021void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
2022{
2023 struct nvm_tgt_dev *dev = pblk->dev;
2024 struct nvm_geo *geo = &dev->geo;
2025 struct pblk_lun *rlun;
2026 int num_lun = geo->all_luns;
2027 int bit = -1;
2028
2029 while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
2030 rlun = &pblk->luns[bit];
2031 up(&rlun->wr_sem);
2032 }
2033}
2034
2035void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2036{
2037 struct ppa_addr ppa_l2p;
2038
2039 /* logic error: lba out-of-bounds. Ignore update */
2040 if (!(lba < pblk->capacity)) {
2041 WARN(1, "pblk: corrupted L2P map request\n");
2042 return;
2043 }
2044
2045 spin_lock(&pblk->trans_lock);
2046 ppa_l2p = pblk_trans_map_get(pblk, lba);
2047
2048 if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
2049 pblk_map_invalidate(pblk, ppa_l2p);
2050
2051 pblk_trans_map_set(pblk, lba, ppa);
2052 spin_unlock(&pblk->trans_lock);
2053}
2054
2055void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2056{
2057
2058#ifdef CONFIG_NVM_PBLK_DEBUG
2059 /* Callers must ensure that the ppa points to a cache address */
2060 BUG_ON(!pblk_addr_in_cache(ppa));
2061 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
2062#endif
2063
2064 pblk_update_map(pblk, lba, ppa);
2065}
2066
2067int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
2068 struct pblk_line *gc_line, u64 paddr_gc)
2069{
2070 struct ppa_addr ppa_l2p, ppa_gc;
2071 int ret = 1;
2072
2073#ifdef CONFIG_NVM_PBLK_DEBUG
2074 /* Callers must ensure that the ppa points to a cache address */
2075 BUG_ON(!pblk_addr_in_cache(ppa_new));
2076 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
2077#endif
2078
2079 /* logic error: lba out-of-bounds. Ignore update */
2080 if (!(lba < pblk->capacity)) {
2081 WARN(1, "pblk: corrupted L2P map request\n");
2082 return 0;
2083 }
2084
2085 spin_lock(&pblk->trans_lock);
2086 ppa_l2p = pblk_trans_map_get(pblk, lba);
2087 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
2088
2089 if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2090 spin_lock(&gc_line->lock);
2091 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2092 "pblk: corrupted GC update");
2093 spin_unlock(&gc_line->lock);
2094
2095 ret = 0;
2096 goto out;
2097 }
2098
2099 pblk_trans_map_set(pblk, lba, ppa_new);
2100out:
2101 spin_unlock(&pblk->trans_lock);
2102 return ret;
2103}
2104
2105void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2106 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2107{
2108 struct ppa_addr ppa_l2p;
2109
2110#ifdef CONFIG_NVM_PBLK_DEBUG
2111 /* Callers must ensure that the ppa points to a device address */
2112 BUG_ON(pblk_addr_in_cache(ppa_mapped));
2113#endif
2114 /* Invalidate and discard padded entries */
2115 if (lba == ADDR_EMPTY) {
2116 atomic64_inc(&pblk->pad_wa);
2117#ifdef CONFIG_NVM_PBLK_DEBUG
2118 atomic_long_inc(&pblk->padded_wb);
2119#endif
2120 if (!pblk_ppa_empty(ppa_mapped))
2121 pblk_map_invalidate(pblk, ppa_mapped);
2122 return;
2123 }
2124
2125 /* logic error: lba out-of-bounds. Ignore update */
2126 if (!(lba < pblk->capacity)) {
2127 WARN(1, "pblk: corrupted L2P map request\n");
2128 return;
2129 }
2130
2131 spin_lock(&pblk->trans_lock);
2132 ppa_l2p = pblk_trans_map_get(pblk, lba);
2133
2134 /* Do not update L2P if the cacheline has been updated. In this case,
2135 * the mapped ppa must be invalidated
2136 */
2137 if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2138 if (!pblk_ppa_empty(ppa_mapped))
2139 pblk_map_invalidate(pblk, ppa_mapped);
2140 goto out;
2141 }
2142
2143#ifdef CONFIG_NVM_PBLK_DEBUG
2144 WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2145#endif
2146
2147 pblk_trans_map_set(pblk, lba, ppa_mapped);
2148out:
2149 spin_unlock(&pblk->trans_lock);
2150}
2151
2152int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2153 sector_t blba, int nr_secs, bool *from_cache)
2154{
2155 int i;
2156
2157 spin_lock(&pblk->trans_lock);
2158 for (i = 0; i < nr_secs; i++) {
2159 struct ppa_addr ppa;
2160
2161 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2162
2163 /* If the L2P entry maps to a line, the reference is valid */
2164 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2165 struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2166
2167 if (i > 0 && *from_cache)
2168 break;
2169 *from_cache = false;
2170
2171 kref_get(&line->ref);
2172 } else {
2173 if (i > 0 && !*from_cache)
2174 break;
2175 *from_cache = true;
2176 }
2177 }
2178 spin_unlock(&pblk->trans_lock);
2179 return i;
2180}
2181
2182void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2183 u64 *lba_list, int nr_secs)
2184{
2185 u64 lba;
2186 int i;
2187
2188 spin_lock(&pblk->trans_lock);
2189 for (i = 0; i < nr_secs; i++) {
2190 lba = lba_list[i];
2191 if (lba != ADDR_EMPTY) {
2192 /* logic error: lba out-of-bounds. Ignore update */
2193 if (!(lba < pblk->capacity)) {
2194 WARN(1, "pblk: corrupted L2P map request\n");
2195 continue;
2196 }
2197 ppas[i] = pblk_trans_map_get(pblk, lba);
2198 }
2199 }
2200 spin_unlock(&pblk->trans_lock);
2201}
2202
2203void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2204{
2205 void *buffer;
2206
2207 if (pblk_is_oob_meta_supported(pblk)) {
2208 /* Just use OOB metadata buffer as always */
2209 buffer = rqd->meta_list;
2210 } else {
2211 /* We need to reuse last page of request (packed metadata)
2212 * in similar way as traditional oob metadata
2213 */
2214 buffer = page_to_virt(
2215 rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2216 }
2217
2218 return buffer;
2219}
2220
2221void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2222{
2223 void *meta_list = rqd->meta_list;
2224 void *page;
2225 int i = 0;
2226
2227 if (pblk_is_oob_meta_supported(pblk))
2228 return;
2229
2230 page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2231 /* We need to fill oob meta buffer with data from packed metadata */
2232 for (; i < rqd->nr_ppas; i++)
2233 memcpy(pblk_get_meta(pblk, meta_list, i),
2234 page + (i * sizeof(struct pblk_sec_meta)),
2235 sizeof(struct pblk_sec_meta));
2236}