Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.7-rc6 743 lines 18 kB view raw
1/* 2 * Copyright (C) 2015 Matias Bjorling. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License version 6 * 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; see the file COPYING. If not, write to 15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 16 * USA. 17 * 18 */ 19 20#include <linux/lightnvm.h> 21 22#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */ 23#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases 24 * enables ~1.5M updates per sysblk unit 25 */ 26 27struct sysblk_scan { 28 /* A row is a collection of flash blocks for a system block. */ 29 int nr_rows; 30 int row; 31 int act_blk[MAX_SYSBLKS]; 32 33 int nr_ppas; 34 struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */ 35}; 36 37static inline int scan_ppa_idx(int row, int blkid) 38{ 39 return (row * MAX_BLKS_PR_SYSBLK) + blkid; 40} 41 42void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb) 43{ 44 info->seqnr = be32_to_cpu(sb->seqnr); 45 info->erase_cnt = be32_to_cpu(sb->erase_cnt); 46 info->version = be16_to_cpu(sb->version); 47 strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN); 48 info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa); 49} 50 51void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info) 52{ 53 sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC); 54 sb->seqnr = cpu_to_be32(info->seqnr); 55 sb->erase_cnt = cpu_to_be32(info->erase_cnt); 56 sb->version = cpu_to_be16(info->version); 57 strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN); 58 sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa); 59} 60 61static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas) 62{ 63 int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls); 64 int i; 65 66 for (i = 0; i < nr_rows; i++) 67 sysblk_ppas[i].ppa = 0; 68 69 /* if possible, place sysblk at first channel, middle channel and last 70 * channel of the device. If not, create only one or two sys blocks 71 */ 72 switch (dev->nr_chnls) { 73 case 2: 74 sysblk_ppas[1].g.ch = 1; 75 /* fall-through */ 76 case 1: 77 sysblk_ppas[0].g.ch = 0; 78 break; 79 default: 80 sysblk_ppas[0].g.ch = 0; 81 sysblk_ppas[1].g.ch = dev->nr_chnls / 2; 82 sysblk_ppas[2].g.ch = dev->nr_chnls - 1; 83 break; 84 } 85 86 return nr_rows; 87} 88 89void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s, 90 struct ppa_addr *sysblk_ppas) 91{ 92 memset(s, 0, sizeof(struct sysblk_scan)); 93 s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas); 94} 95 96static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa, 97 u8 *blks, int nr_blks, 98 struct sysblk_scan *s) 99{ 100 struct ppa_addr *sppa; 101 int i, blkid = 0; 102 103 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); 104 if (nr_blks < 0) 105 return nr_blks; 106 107 for (i = 0; i < nr_blks; i++) { 108 if (blks[i] == NVM_BLK_T_HOST) 109 return -EEXIST; 110 111 if (blks[i] != NVM_BLK_T_FREE) 112 continue; 113 114 sppa = &s->ppas[scan_ppa_idx(s->row, blkid)]; 115 sppa->g.ch = ppa.g.ch; 116 sppa->g.lun = ppa.g.lun; 117 sppa->g.blk = i; 118 s->nr_ppas++; 119 blkid++; 120 121 pr_debug("nvm: use (%u %u %u) as sysblk\n", 122 sppa->g.ch, sppa->g.lun, sppa->g.blk); 123 if (blkid > MAX_BLKS_PR_SYSBLK - 1) 124 return 0; 125 } 126 127 pr_err("nvm: sysblk failed get sysblk\n"); 128 return -EINVAL; 129} 130 131static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa, 132 u8 *blks, int nr_blks, 133 struct sysblk_scan *s) 134{ 135 int i, nr_sysblk = 0; 136 137 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); 138 if (nr_blks < 0) 139 return nr_blks; 140 141 for (i = 0; i < nr_blks; i++) { 142 if (blks[i] != NVM_BLK_T_HOST) 143 continue; 144 145 if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) { 146 pr_err("nvm: too many host blks\n"); 147 return -EINVAL; 148 } 149 150 ppa.g.blk = i; 151 152 s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa; 153 s->nr_ppas++; 154 nr_sysblk++; 155 } 156 157 return 0; 158} 159 160static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s, 161 struct ppa_addr *ppas, int get_free) 162{ 163 int i, nr_blks, ret = 0; 164 u8 *blks; 165 166 s->nr_ppas = 0; 167 nr_blks = dev->blks_per_lun * dev->plane_mode; 168 169 blks = kmalloc(nr_blks, GFP_KERNEL); 170 if (!blks) 171 return -ENOMEM; 172 173 for (i = 0; i < s->nr_rows; i++) { 174 s->row = i; 175 176 ret = nvm_get_bb_tbl(dev, ppas[i], blks); 177 if (ret) { 178 pr_err("nvm: failed bb tbl for ppa (%u %u)\n", 179 ppas[i].g.ch, 180 ppas[i].g.blk); 181 goto err_get; 182 } 183 184 if (get_free) 185 ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks, 186 s); 187 else 188 ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks, 189 s); 190 191 if (ret) 192 goto err_get; 193 } 194 195err_get: 196 kfree(blks); 197 return ret; 198} 199 200/* 201 * scans a block for latest sysblk. 202 * Returns: 203 * 0 - newer sysblk not found. PPA is updated to latest page. 204 * 1 - newer sysblk found and stored in *cur. PPA is updated to 205 * next valid page. 206 * <0- error. 207 */ 208static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa, 209 struct nvm_system_block *sblk) 210{ 211 struct nvm_system_block *cur; 212 int pg, ret, found = 0; 213 214 /* the full buffer for a flash page is allocated. Only the first of it 215 * contains the system block information 216 */ 217 cur = kmalloc(dev->pfpg_size, GFP_KERNEL); 218 if (!cur) 219 return -ENOMEM; 220 221 /* perform linear scan through the block */ 222 for (pg = 0; pg < dev->lps_per_blk; pg++) { 223 ppa->g.pg = ppa_to_slc(dev, pg); 224 225 ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE, 226 cur, dev->pfpg_size); 227 if (ret) { 228 if (ret == NVM_RSP_ERR_EMPTYPAGE) { 229 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n", 230 ppa->g.ch, 231 ppa->g.lun, 232 ppa->g.blk, 233 ppa->g.pg); 234 break; 235 } 236 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)", 237 ret, 238 ppa->g.ch, 239 ppa->g.lun, 240 ppa->g.blk, 241 ppa->g.pg); 242 break; /* if we can't read a page, continue to the 243 * next blk 244 */ 245 } 246 247 if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) { 248 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n", 249 ppa->g.ch, 250 ppa->g.lun, 251 ppa->g.blk, 252 ppa->g.pg); 253 break; /* last valid page already found */ 254 } 255 256 if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr)) 257 continue; 258 259 memcpy(sblk, cur, sizeof(struct nvm_system_block)); 260 found = 1; 261 } 262 263 kfree(cur); 264 265 return found; 266} 267 268static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type) 269{ 270 struct nvm_rq rqd; 271 int ret; 272 273 if (s->nr_ppas > dev->ops->max_phys_sect) { 274 pr_err("nvm: unable to update all sysblocks atomically\n"); 275 return -EINVAL; 276 } 277 278 memset(&rqd, 0, sizeof(struct nvm_rq)); 279 280 nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1); 281 nvm_generic_to_addr_mode(dev, &rqd); 282 283 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); 284 nvm_free_rqd_ppalist(dev, &rqd); 285 if (ret) { 286 pr_err("nvm: sysblk failed bb mark\n"); 287 return -EINVAL; 288 } 289 290 return 0; 291} 292 293static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info, 294 struct sysblk_scan *s) 295{ 296 struct nvm_system_block nvmsb; 297 void *buf; 298 int i, sect, ret = 0; 299 struct ppa_addr *ppas; 300 301 nvm_cpu_to_sysblk(&nvmsb, info); 302 303 buf = kzalloc(dev->pfpg_size, GFP_KERNEL); 304 if (!buf) 305 return -ENOMEM; 306 memcpy(buf, &nvmsb, sizeof(struct nvm_system_block)); 307 308 ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL); 309 if (!ppas) { 310 ret = -ENOMEM; 311 goto err; 312 } 313 314 /* Write and verify */ 315 for (i = 0; i < s->nr_rows; i++) { 316 ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])]; 317 318 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n", 319 ppas[0].g.ch, 320 ppas[0].g.lun, 321 ppas[0].g.blk, 322 ppas[0].g.pg); 323 324 /* Expand to all sectors within a flash page */ 325 if (dev->sec_per_pg > 1) { 326 for (sect = 1; sect < dev->sec_per_pg; sect++) { 327 ppas[sect].ppa = ppas[0].ppa; 328 ppas[sect].g.sec = sect; 329 } 330 } 331 332 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE, 333 NVM_IO_SLC_MODE, buf, dev->pfpg_size); 334 if (ret) { 335 pr_err("nvm: sysblk failed program (%u %u %u)\n", 336 ppas[0].g.ch, 337 ppas[0].g.lun, 338 ppas[0].g.blk); 339 break; 340 } 341 342 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD, 343 NVM_IO_SLC_MODE, buf, dev->pfpg_size); 344 if (ret) { 345 pr_err("nvm: sysblk failed read (%u %u %u)\n", 346 ppas[0].g.ch, 347 ppas[0].g.lun, 348 ppas[0].g.blk); 349 break; 350 } 351 352 if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) { 353 pr_err("nvm: sysblk failed verify (%u %u %u)\n", 354 ppas[0].g.ch, 355 ppas[0].g.lun, 356 ppas[0].g.blk); 357 ret = -EINVAL; 358 break; 359 } 360 } 361 362 kfree(ppas); 363err: 364 kfree(buf); 365 366 return ret; 367} 368 369static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s) 370{ 371 int i, ret; 372 unsigned long nxt_blk; 373 struct ppa_addr *ppa; 374 375 for (i = 0; i < s->nr_rows; i++) { 376 nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK; 377 ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)]; 378 ppa->g.pg = ppa_to_slc(dev, 0); 379 380 ret = nvm_erase_ppa(dev, ppa, 1); 381 if (ret) 382 return ret; 383 384 s->act_blk[i] = nxt_blk; 385 } 386 387 return 0; 388} 389 390int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) 391{ 392 struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; 393 struct sysblk_scan s; 394 struct nvm_system_block *cur; 395 int i, j, found = 0; 396 int ret = -ENOMEM; 397 398 /* 399 * 1. setup sysblk locations 400 * 2. get bad block list 401 * 3. filter on host-specific (type 3) 402 * 4. iterate through all and find the highest seq nr. 403 * 5. return superblock information 404 */ 405 406 if (!dev->ops->get_bb_tbl) 407 return -EINVAL; 408 409 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); 410 411 mutex_lock(&dev->mlock); 412 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); 413 if (ret) 414 goto err_sysblk; 415 416 /* no sysblocks initialized */ 417 if (!s.nr_ppas) 418 goto err_sysblk; 419 420 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL); 421 if (!cur) 422 goto err_sysblk; 423 424 /* find the latest block across all sysblocks */ 425 for (i = 0; i < s.nr_rows; i++) { 426 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) { 427 struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)]; 428 429 ret = nvm_scan_block(dev, &ppa, cur); 430 if (ret > 0) 431 found = 1; 432 else if (ret < 0) 433 break; 434 } 435 } 436 437 nvm_sysblk_to_cpu(info, cur); 438 439 kfree(cur); 440err_sysblk: 441 mutex_unlock(&dev->mlock); 442 443 if (found) 444 return 1; 445 return ret; 446} 447 448int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new) 449{ 450 /* 1. for each latest superblock 451 * 2. if room 452 * a. write new flash page entry with the updated information 453 * 3. if no room 454 * a. find next available block on lun (linear search) 455 * if none, continue to next lun 456 * if none at all, report error. also report that it wasn't 457 * possible to write to all superblocks. 458 * c. write data to block. 459 */ 460 struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; 461 struct sysblk_scan s; 462 struct nvm_system_block *cur; 463 int i, j, ppaidx, found = 0; 464 int ret = -ENOMEM; 465 466 if (!dev->ops->get_bb_tbl) 467 return -EINVAL; 468 469 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); 470 471 mutex_lock(&dev->mlock); 472 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); 473 if (ret) 474 goto err_sysblk; 475 476 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL); 477 if (!cur) 478 goto err_sysblk; 479 480 /* Get the latest sysblk for each sysblk row */ 481 for (i = 0; i < s.nr_rows; i++) { 482 found = 0; 483 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) { 484 ppaidx = scan_ppa_idx(i, j); 485 ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur); 486 if (ret > 0) { 487 s.act_blk[i] = j; 488 found = 1; 489 } else if (ret < 0) 490 break; 491 } 492 } 493 494 if (!found) { 495 pr_err("nvm: no valid sysblks found to update\n"); 496 ret = -EINVAL; 497 goto err_cur; 498 } 499 500 /* 501 * All sysblocks found. Check that they have same page id in their flash 502 * blocks 503 */ 504 for (i = 1; i < s.nr_rows; i++) { 505 struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])]; 506 struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])]; 507 508 if (l.g.pg != r.g.pg) { 509 pr_err("nvm: sysblks not on same page. Previous update failed.\n"); 510 ret = -EINVAL; 511 goto err_cur; 512 } 513 } 514 515 /* 516 * Check that there haven't been another update to the seqnr since we 517 * began 518 */ 519 if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) { 520 pr_err("nvm: seq is not sequential\n"); 521 ret = -EINVAL; 522 goto err_cur; 523 } 524 525 /* 526 * When all pages in a block has been written, a new block is selected 527 * and writing is performed on the new block. 528 */ 529 if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg == 530 dev->lps_per_blk - 1) { 531 ret = nvm_prepare_new_sysblks(dev, &s); 532 if (ret) 533 goto err_cur; 534 } 535 536 ret = nvm_write_and_verify(dev, new, &s); 537err_cur: 538 kfree(cur); 539err_sysblk: 540 mutex_unlock(&dev->mlock); 541 542 return ret; 543} 544 545int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) 546{ 547 struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; 548 struct sysblk_scan s; 549 int ret; 550 551 /* 552 * 1. select master blocks and select first available blks 553 * 2. get bad block list 554 * 3. mark MAX_SYSBLKS block as host-based device allocated. 555 * 4. write and verify data to block 556 */ 557 558 if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl) 559 return -EINVAL; 560 561 if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) { 562 pr_err("nvm: memory does not support SLC access\n"); 563 return -EINVAL; 564 } 565 566 /* Index all sysblocks and mark them as host-driven */ 567 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); 568 569 mutex_lock(&dev->mlock); 570 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1); 571 if (ret) 572 goto err_mark; 573 574 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST); 575 if (ret) 576 goto err_mark; 577 578 /* Write to the first block of each row */ 579 ret = nvm_write_and_verify(dev, info, &s); 580err_mark: 581 mutex_unlock(&dev->mlock); 582 return ret; 583} 584 585static int factory_nblks(int nblks) 586{ 587 /* Round up to nearest BITS_PER_LONG */ 588 return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); 589} 590 591static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa) 592{ 593 int nblks = factory_nblks(dev->blks_per_lun); 594 595 return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) / 596 BITS_PER_LONG; 597} 598 599static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa, 600 u8 *blks, int nr_blks, 601 unsigned long *blk_bitmap, int flags) 602{ 603 int i, lunoff; 604 605 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); 606 if (nr_blks < 0) 607 return nr_blks; 608 609 lunoff = factory_blk_offset(dev, ppa); 610 611 /* non-set bits correspond to the block must be erased */ 612 for (i = 0; i < nr_blks; i++) { 613 switch (blks[i]) { 614 case NVM_BLK_T_FREE: 615 if (flags & NVM_FACTORY_ERASE_ONLY_USER) 616 set_bit(i, &blk_bitmap[lunoff]); 617 break; 618 case NVM_BLK_T_HOST: 619 if (!(flags & NVM_FACTORY_RESET_HOST_BLKS)) 620 set_bit(i, &blk_bitmap[lunoff]); 621 break; 622 case NVM_BLK_T_GRWN_BAD: 623 if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS)) 624 set_bit(i, &blk_bitmap[lunoff]); 625 break; 626 default: 627 set_bit(i, &blk_bitmap[lunoff]); 628 break; 629 } 630 } 631 632 return 0; 633} 634 635static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list, 636 int max_ppas, unsigned long *blk_bitmap) 637{ 638 struct ppa_addr ppa; 639 int ch, lun, blkid, idx, done = 0, ppa_cnt = 0; 640 unsigned long *offset; 641 642 while (!done) { 643 done = 1; 644 nvm_for_each_lun_ppa(dev, ppa, ch, lun) { 645 idx = factory_blk_offset(dev, ppa); 646 offset = &blk_bitmap[idx]; 647 648 blkid = find_first_zero_bit(offset, 649 dev->blks_per_lun); 650 if (blkid >= dev->blks_per_lun) 651 continue; 652 set_bit(blkid, offset); 653 654 ppa.g.blk = blkid; 655 pr_debug("nvm: erase ppa (%u %u %u)\n", 656 ppa.g.ch, 657 ppa.g.lun, 658 ppa.g.blk); 659 660 erase_list[ppa_cnt] = ppa; 661 ppa_cnt++; 662 done = 0; 663 664 if (ppa_cnt == max_ppas) 665 return ppa_cnt; 666 } 667 } 668 669 return ppa_cnt; 670} 671 672static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap, 673 int flags) 674{ 675 struct ppa_addr ppa; 676 int ch, lun, nr_blks, ret = 0; 677 u8 *blks; 678 679 nr_blks = dev->blks_per_lun * dev->plane_mode; 680 blks = kmalloc(nr_blks, GFP_KERNEL); 681 if (!blks) 682 return -ENOMEM; 683 684 nvm_for_each_lun_ppa(dev, ppa, ch, lun) { 685 ret = nvm_get_bb_tbl(dev, ppa, blks); 686 if (ret) 687 pr_err("nvm: failed bb tbl for ch%u lun%u\n", 688 ppa.g.ch, ppa.g.blk); 689 690 ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap, 691 flags); 692 if (ret) 693 break; 694 } 695 696 kfree(blks); 697 return ret; 698} 699 700int nvm_dev_factory(struct nvm_dev *dev, int flags) 701{ 702 struct ppa_addr *ppas; 703 int ppa_cnt, ret = -ENOMEM; 704 int max_ppas = dev->ops->max_phys_sect / dev->nr_planes; 705 struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; 706 struct sysblk_scan s; 707 unsigned long *blk_bitmap; 708 709 blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns, 710 GFP_KERNEL); 711 if (!blk_bitmap) 712 return ret; 713 714 ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL); 715 if (!ppas) 716 goto err_blks; 717 718 /* create list of blks to be erased */ 719 ret = nvm_fact_select_blks(dev, blk_bitmap, flags); 720 if (ret) 721 goto err_ppas; 722 723 /* continue to erase until list of blks until empty */ 724 while ((ppa_cnt = 725 nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0) 726 nvm_erase_ppa(dev, ppas, ppa_cnt); 727 728 /* mark host reserved blocks free */ 729 if (flags & NVM_FACTORY_RESET_HOST_BLKS) { 730 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); 731 mutex_lock(&dev->mlock); 732 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); 733 if (!ret) 734 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE); 735 mutex_unlock(&dev->mlock); 736 } 737err_ppas: 738 kfree(ppas); 739err_blks: 740 kfree(blk_bitmap); 741 return ret; 742} 743EXPORT_SYMBOL(nvm_dev_factory);