at v4.8-rc2 745 lines 18 kB view raw
1/* 2 * Copyright (C) 2015 Matias Bjorling. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License version 6 * 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; see the file COPYING. If not, write to 15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 16 * USA. 17 * 18 */ 19 20#include <linux/lightnvm.h> 21 22#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */ 23#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases 24 * enables ~1.5M updates per sysblk unit 25 */ 26 27struct sysblk_scan { 28 /* A row is a collection of flash blocks for a system block. */ 29 int nr_rows; 30 int row; 31 int act_blk[MAX_SYSBLKS]; 32 33 int nr_ppas; 34 struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */ 35}; 36 37static inline int scan_ppa_idx(int row, int blkid) 38{ 39 return (row * MAX_BLKS_PR_SYSBLK) + blkid; 40} 41 42static void nvm_sysblk_to_cpu(struct nvm_sb_info *info, 43 struct nvm_system_block *sb) 44{ 45 info->seqnr = be32_to_cpu(sb->seqnr); 46 info->erase_cnt = be32_to_cpu(sb->erase_cnt); 47 info->version = be16_to_cpu(sb->version); 48 strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN); 49 info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa); 50} 51 52static void nvm_cpu_to_sysblk(struct nvm_system_block *sb, 53 struct nvm_sb_info *info) 54{ 55 sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC); 56 sb->seqnr = cpu_to_be32(info->seqnr); 57 sb->erase_cnt = cpu_to_be32(info->erase_cnt); 58 sb->version = cpu_to_be16(info->version); 59 strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN); 60 sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa); 61} 62 63static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas) 64{ 65 int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls); 66 int i; 67 68 for (i = 0; i < nr_rows; i++) 69 sysblk_ppas[i].ppa = 0; 70 71 /* if possible, place sysblk at first channel, middle channel and last 72 * channel of the device. If not, create only one or two sys blocks 73 */ 74 switch (dev->nr_chnls) { 75 case 2: 76 sysblk_ppas[1].g.ch = 1; 77 /* fall-through */ 78 case 1: 79 sysblk_ppas[0].g.ch = 0; 80 break; 81 default: 82 sysblk_ppas[0].g.ch = 0; 83 sysblk_ppas[1].g.ch = dev->nr_chnls / 2; 84 sysblk_ppas[2].g.ch = dev->nr_chnls - 1; 85 break; 86 } 87 88 return nr_rows; 89} 90 91static void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s, 92 struct ppa_addr *sysblk_ppas) 93{ 94 memset(s, 0, sizeof(struct sysblk_scan)); 95 s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas); 96} 97 98static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa, 99 u8 *blks, int nr_blks, 100 struct sysblk_scan *s) 101{ 102 struct ppa_addr *sppa; 103 int i, blkid = 0; 104 105 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); 106 if (nr_blks < 0) 107 return nr_blks; 108 109 for (i = 0; i < nr_blks; i++) { 110 if (blks[i] == NVM_BLK_T_HOST) 111 return -EEXIST; 112 113 if (blks[i] != NVM_BLK_T_FREE) 114 continue; 115 116 sppa = &s->ppas[scan_ppa_idx(s->row, blkid)]; 117 sppa->g.ch = ppa.g.ch; 118 sppa->g.lun = ppa.g.lun; 119 sppa->g.blk = i; 120 s->nr_ppas++; 121 blkid++; 122 123 pr_debug("nvm: use (%u %u %u) as sysblk\n", 124 sppa->g.ch, sppa->g.lun, sppa->g.blk); 125 if (blkid > MAX_BLKS_PR_SYSBLK - 1) 126 return 0; 127 } 128 129 pr_err("nvm: sysblk failed get sysblk\n"); 130 return -EINVAL; 131} 132 133static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa, 134 u8 *blks, int nr_blks, 135 struct sysblk_scan *s) 136{ 137 int i, nr_sysblk = 0; 138 139 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); 140 if (nr_blks < 0) 141 return nr_blks; 142 143 for (i = 0; i < nr_blks; i++) { 144 if (blks[i] != NVM_BLK_T_HOST) 145 continue; 146 147 if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) { 148 pr_err("nvm: too many host blks\n"); 149 return -EINVAL; 150 } 151 152 ppa.g.blk = i; 153 154 s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa; 155 s->nr_ppas++; 156 nr_sysblk++; 157 } 158 159 return 0; 160} 161 162static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s, 163 struct ppa_addr *ppas, int get_free) 164{ 165 int i, nr_blks, ret = 0; 166 u8 *blks; 167 168 s->nr_ppas = 0; 169 nr_blks = dev->blks_per_lun * dev->plane_mode; 170 171 blks = kmalloc(nr_blks, GFP_KERNEL); 172 if (!blks) 173 return -ENOMEM; 174 175 for (i = 0; i < s->nr_rows; i++) { 176 s->row = i; 177 178 ret = nvm_get_bb_tbl(dev, ppas[i], blks); 179 if (ret) { 180 pr_err("nvm: failed bb tbl for ppa (%u %u)\n", 181 ppas[i].g.ch, 182 ppas[i].g.blk); 183 goto err_get; 184 } 185 186 if (get_free) 187 ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks, 188 s); 189 else 190 ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks, 191 s); 192 193 if (ret) 194 goto err_get; 195 } 196 197err_get: 198 kfree(blks); 199 return ret; 200} 201 202/* 203 * scans a block for latest sysblk. 204 * Returns: 205 * 0 - newer sysblk not found. PPA is updated to latest page. 206 * 1 - newer sysblk found and stored in *cur. PPA is updated to 207 * next valid page. 208 * <0- error. 209 */ 210static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa, 211 struct nvm_system_block *sblk) 212{ 213 struct nvm_system_block *cur; 214 int pg, ret, found = 0; 215 216 /* the full buffer for a flash page is allocated. Only the first of it 217 * contains the system block information 218 */ 219 cur = kmalloc(dev->pfpg_size, GFP_KERNEL); 220 if (!cur) 221 return -ENOMEM; 222 223 /* perform linear scan through the block */ 224 for (pg = 0; pg < dev->lps_per_blk; pg++) { 225 ppa->g.pg = ppa_to_slc(dev, pg); 226 227 ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE, 228 cur, dev->pfpg_size); 229 if (ret) { 230 if (ret == NVM_RSP_ERR_EMPTYPAGE) { 231 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n", 232 ppa->g.ch, 233 ppa->g.lun, 234 ppa->g.blk, 235 ppa->g.pg); 236 break; 237 } 238 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)", 239 ret, 240 ppa->g.ch, 241 ppa->g.lun, 242 ppa->g.blk, 243 ppa->g.pg); 244 break; /* if we can't read a page, continue to the 245 * next blk 246 */ 247 } 248 249 if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) { 250 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n", 251 ppa->g.ch, 252 ppa->g.lun, 253 ppa->g.blk, 254 ppa->g.pg); 255 break; /* last valid page already found */ 256 } 257 258 if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr)) 259 continue; 260 261 memcpy(sblk, cur, sizeof(struct nvm_system_block)); 262 found = 1; 263 } 264 265 kfree(cur); 266 267 return found; 268} 269 270static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type) 271{ 272 struct nvm_rq rqd; 273 int ret; 274 275 if (s->nr_ppas > dev->ops->max_phys_sect) { 276 pr_err("nvm: unable to update all sysblocks atomically\n"); 277 return -EINVAL; 278 } 279 280 memset(&rqd, 0, sizeof(struct nvm_rq)); 281 282 nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1); 283 nvm_generic_to_addr_mode(dev, &rqd); 284 285 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); 286 nvm_free_rqd_ppalist(dev, &rqd); 287 if (ret) { 288 pr_err("nvm: sysblk failed bb mark\n"); 289 return -EINVAL; 290 } 291 292 return 0; 293} 294 295static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info, 296 struct sysblk_scan *s) 297{ 298 struct nvm_system_block nvmsb; 299 void *buf; 300 int i, sect, ret = 0; 301 struct ppa_addr *ppas; 302 303 nvm_cpu_to_sysblk(&nvmsb, info); 304 305 buf = kzalloc(dev->pfpg_size, GFP_KERNEL); 306 if (!buf) 307 return -ENOMEM; 308 memcpy(buf, &nvmsb, sizeof(struct nvm_system_block)); 309 310 ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL); 311 if (!ppas) { 312 ret = -ENOMEM; 313 goto err; 314 } 315 316 /* Write and verify */ 317 for (i = 0; i < s->nr_rows; i++) { 318 ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])]; 319 320 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n", 321 ppas[0].g.ch, 322 ppas[0].g.lun, 323 ppas[0].g.blk, 324 ppas[0].g.pg); 325 326 /* Expand to all sectors within a flash page */ 327 if (dev->sec_per_pg > 1) { 328 for (sect = 1; sect < dev->sec_per_pg; sect++) { 329 ppas[sect].ppa = ppas[0].ppa; 330 ppas[sect].g.sec = sect; 331 } 332 } 333 334 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE, 335 NVM_IO_SLC_MODE, buf, dev->pfpg_size); 336 if (ret) { 337 pr_err("nvm: sysblk failed program (%u %u %u)\n", 338 ppas[0].g.ch, 339 ppas[0].g.lun, 340 ppas[0].g.blk); 341 break; 342 } 343 344 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD, 345 NVM_IO_SLC_MODE, buf, dev->pfpg_size); 346 if (ret) { 347 pr_err("nvm: sysblk failed read (%u %u %u)\n", 348 ppas[0].g.ch, 349 ppas[0].g.lun, 350 ppas[0].g.blk); 351 break; 352 } 353 354 if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) { 355 pr_err("nvm: sysblk failed verify (%u %u %u)\n", 356 ppas[0].g.ch, 357 ppas[0].g.lun, 358 ppas[0].g.blk); 359 ret = -EINVAL; 360 break; 361 } 362 } 363 364 kfree(ppas); 365err: 366 kfree(buf); 367 368 return ret; 369} 370 371static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s) 372{ 373 int i, ret; 374 unsigned long nxt_blk; 375 struct ppa_addr *ppa; 376 377 for (i = 0; i < s->nr_rows; i++) { 378 nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK; 379 ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)]; 380 ppa->g.pg = ppa_to_slc(dev, 0); 381 382 ret = nvm_erase_ppa(dev, ppa, 1); 383 if (ret) 384 return ret; 385 386 s->act_blk[i] = nxt_blk; 387 } 388 389 return 0; 390} 391 392int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) 393{ 394 struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; 395 struct sysblk_scan s; 396 struct nvm_system_block *cur; 397 int i, j, found = 0; 398 int ret = -ENOMEM; 399 400 /* 401 * 1. setup sysblk locations 402 * 2. get bad block list 403 * 3. filter on host-specific (type 3) 404 * 4. iterate through all and find the highest seq nr. 405 * 5. return superblock information 406 */ 407 408 if (!dev->ops->get_bb_tbl) 409 return -EINVAL; 410 411 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); 412 413 mutex_lock(&dev->mlock); 414 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); 415 if (ret) 416 goto err_sysblk; 417 418 /* no sysblocks initialized */ 419 if (!s.nr_ppas) 420 goto err_sysblk; 421 422 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL); 423 if (!cur) 424 goto err_sysblk; 425 426 /* find the latest block across all sysblocks */ 427 for (i = 0; i < s.nr_rows; i++) { 428 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) { 429 struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)]; 430 431 ret = nvm_scan_block(dev, &ppa, cur); 432 if (ret > 0) 433 found = 1; 434 else if (ret < 0) 435 break; 436 } 437 } 438 439 nvm_sysblk_to_cpu(info, cur); 440 441 kfree(cur); 442err_sysblk: 443 mutex_unlock(&dev->mlock); 444 445 if (found) 446 return 1; 447 return ret; 448} 449 450int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new) 451{ 452 /* 1. for each latest superblock 453 * 2. if room 454 * a. write new flash page entry with the updated information 455 * 3. if no room 456 * a. find next available block on lun (linear search) 457 * if none, continue to next lun 458 * if none at all, report error. also report that it wasn't 459 * possible to write to all superblocks. 460 * c. write data to block. 461 */ 462 struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; 463 struct sysblk_scan s; 464 struct nvm_system_block *cur; 465 int i, j, ppaidx, found = 0; 466 int ret = -ENOMEM; 467 468 if (!dev->ops->get_bb_tbl) 469 return -EINVAL; 470 471 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); 472 473 mutex_lock(&dev->mlock); 474 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); 475 if (ret) 476 goto err_sysblk; 477 478 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL); 479 if (!cur) 480 goto err_sysblk; 481 482 /* Get the latest sysblk for each sysblk row */ 483 for (i = 0; i < s.nr_rows; i++) { 484 found = 0; 485 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) { 486 ppaidx = scan_ppa_idx(i, j); 487 ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur); 488 if (ret > 0) { 489 s.act_blk[i] = j; 490 found = 1; 491 } else if (ret < 0) 492 break; 493 } 494 } 495 496 if (!found) { 497 pr_err("nvm: no valid sysblks found to update\n"); 498 ret = -EINVAL; 499 goto err_cur; 500 } 501 502 /* 503 * All sysblocks found. Check that they have same page id in their flash 504 * blocks 505 */ 506 for (i = 1; i < s.nr_rows; i++) { 507 struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])]; 508 struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])]; 509 510 if (l.g.pg != r.g.pg) { 511 pr_err("nvm: sysblks not on same page. Previous update failed.\n"); 512 ret = -EINVAL; 513 goto err_cur; 514 } 515 } 516 517 /* 518 * Check that there haven't been another update to the seqnr since we 519 * began 520 */ 521 if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) { 522 pr_err("nvm: seq is not sequential\n"); 523 ret = -EINVAL; 524 goto err_cur; 525 } 526 527 /* 528 * When all pages in a block has been written, a new block is selected 529 * and writing is performed on the new block. 530 */ 531 if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg == 532 dev->lps_per_blk - 1) { 533 ret = nvm_prepare_new_sysblks(dev, &s); 534 if (ret) 535 goto err_cur; 536 } 537 538 ret = nvm_write_and_verify(dev, new, &s); 539err_cur: 540 kfree(cur); 541err_sysblk: 542 mutex_unlock(&dev->mlock); 543 544 return ret; 545} 546 547int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) 548{ 549 struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; 550 struct sysblk_scan s; 551 int ret; 552 553 /* 554 * 1. select master blocks and select first available blks 555 * 2. get bad block list 556 * 3. mark MAX_SYSBLKS block as host-based device allocated. 557 * 4. write and verify data to block 558 */ 559 560 if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl) 561 return -EINVAL; 562 563 if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) { 564 pr_err("nvm: memory does not support SLC access\n"); 565 return -EINVAL; 566 } 567 568 /* Index all sysblocks and mark them as host-driven */ 569 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); 570 571 mutex_lock(&dev->mlock); 572 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1); 573 if (ret) 574 goto err_mark; 575 576 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST); 577 if (ret) 578 goto err_mark; 579 580 /* Write to the first block of each row */ 581 ret = nvm_write_and_verify(dev, info, &s); 582err_mark: 583 mutex_unlock(&dev->mlock); 584 return ret; 585} 586 587static int factory_nblks(int nblks) 588{ 589 /* Round up to nearest BITS_PER_LONG */ 590 return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); 591} 592 593static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa) 594{ 595 int nblks = factory_nblks(dev->blks_per_lun); 596 597 return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) / 598 BITS_PER_LONG; 599} 600 601static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa, 602 u8 *blks, int nr_blks, 603 unsigned long *blk_bitmap, int flags) 604{ 605 int i, lunoff; 606 607 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); 608 if (nr_blks < 0) 609 return nr_blks; 610 611 lunoff = factory_blk_offset(dev, ppa); 612 613 /* non-set bits correspond to the block must be erased */ 614 for (i = 0; i < nr_blks; i++) { 615 switch (blks[i]) { 616 case NVM_BLK_T_FREE: 617 if (flags & NVM_FACTORY_ERASE_ONLY_USER) 618 set_bit(i, &blk_bitmap[lunoff]); 619 break; 620 case NVM_BLK_T_HOST: 621 if (!(flags & NVM_FACTORY_RESET_HOST_BLKS)) 622 set_bit(i, &blk_bitmap[lunoff]); 623 break; 624 case NVM_BLK_T_GRWN_BAD: 625 if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS)) 626 set_bit(i, &blk_bitmap[lunoff]); 627 break; 628 default: 629 set_bit(i, &blk_bitmap[lunoff]); 630 break; 631 } 632 } 633 634 return 0; 635} 636 637static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list, 638 int max_ppas, unsigned long *blk_bitmap) 639{ 640 struct ppa_addr ppa; 641 int ch, lun, blkid, idx, done = 0, ppa_cnt = 0; 642 unsigned long *offset; 643 644 while (!done) { 645 done = 1; 646 nvm_for_each_lun_ppa(dev, ppa, ch, lun) { 647 idx = factory_blk_offset(dev, ppa); 648 offset = &blk_bitmap[idx]; 649 650 blkid = find_first_zero_bit(offset, 651 dev->blks_per_lun); 652 if (blkid >= dev->blks_per_lun) 653 continue; 654 set_bit(blkid, offset); 655 656 ppa.g.blk = blkid; 657 pr_debug("nvm: erase ppa (%u %u %u)\n", 658 ppa.g.ch, 659 ppa.g.lun, 660 ppa.g.blk); 661 662 erase_list[ppa_cnt] = ppa; 663 ppa_cnt++; 664 done = 0; 665 666 if (ppa_cnt == max_ppas) 667 return ppa_cnt; 668 } 669 } 670 671 return ppa_cnt; 672} 673 674static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap, 675 int flags) 676{ 677 struct ppa_addr ppa; 678 int ch, lun, nr_blks, ret = 0; 679 u8 *blks; 680 681 nr_blks = dev->blks_per_lun * dev->plane_mode; 682 blks = kmalloc(nr_blks, GFP_KERNEL); 683 if (!blks) 684 return -ENOMEM; 685 686 nvm_for_each_lun_ppa(dev, ppa, ch, lun) { 687 ret = nvm_get_bb_tbl(dev, ppa, blks); 688 if (ret) 689 pr_err("nvm: failed bb tbl for ch%u lun%u\n", 690 ppa.g.ch, ppa.g.blk); 691 692 ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap, 693 flags); 694 if (ret) 695 break; 696 } 697 698 kfree(blks); 699 return ret; 700} 701 702int nvm_dev_factory(struct nvm_dev *dev, int flags) 703{ 704 struct ppa_addr *ppas; 705 int ppa_cnt, ret = -ENOMEM; 706 int max_ppas = dev->ops->max_phys_sect / dev->nr_planes; 707 struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; 708 struct sysblk_scan s; 709 unsigned long *blk_bitmap; 710 711 blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns, 712 GFP_KERNEL); 713 if (!blk_bitmap) 714 return ret; 715 716 ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL); 717 if (!ppas) 718 goto err_blks; 719 720 /* create list of blks to be erased */ 721 ret = nvm_fact_select_blks(dev, blk_bitmap, flags); 722 if (ret) 723 goto err_ppas; 724 725 /* continue to erase until list of blks until empty */ 726 while ((ppa_cnt = 727 nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0) 728 nvm_erase_ppa(dev, ppas, ppa_cnt); 729 730 /* mark host reserved blocks free */ 731 if (flags & NVM_FACTORY_RESET_HOST_BLKS) { 732 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); 733 mutex_lock(&dev->mlock); 734 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); 735 if (!ret) 736 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE); 737 mutex_unlock(&dev->mlock); 738 } 739err_ppas: 740 kfree(ppas); 741err_blks: 742 kfree(blk_bitmap); 743 return ret; 744} 745EXPORT_SYMBOL(nvm_dev_factory);