Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nand/for-4.8' of github.com:linux-nand/linux into mtd

Pull NAND changes from Boris Brezillon:
"""
This pull request contains only one notable change:
* Addition of the MTK NAND controller driver

And a bunch of specific NAND driver improvements/fixes. Here are the
changes that are worth mentioning:
* A few fixes/improvements for the xway NAND controller driver
* A few fixes for the sunxi NAND controller driver
* Support for DMA in the sunxi NAND driver
* Support for the sunxi NAND controller IP embedded in A23/A33 SoCs
* Addition for bitflips detection in erased pages to the brcmnand driver
* Support for new brcmnand IPs
* Update of the OMAP-GPMC binding to support DMA channel description
"""

+2982 -125
+6 -1
Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
··· 46 46 0 maps to GPMC_WAIT0 pin. 47 47 - gpio-cells: Must be set to 2 48 48 49 + Required properties when using NAND prefetch dma: 50 + - dmas GPMC NAND prefetch dma channel 51 + - dma-names Must be set to "rxtx" 52 + 49 53 Timing properties for child nodes. All are optional and default to 0. 50 54 51 55 - gpmc,sync-clk-ps: Minimum clock period for synchronous mode, in picoseconds ··· 141 137 ti,hwmods = "gpmc"; 142 138 reg = <0x50000000 0x2000>; 143 139 interrupts = <100>; 144 - 140 + dmas = <&edma 52 0>; 141 + dma-names = "rxtx"; 145 142 gpmc,num-cs = <8>; 146 143 gpmc,num-waitpins = <2>; 147 144 #address-cells = <2>;
+1
Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
··· 27 27 brcm,brcmnand-v6.2 28 28 brcm,brcmnand-v7.0 29 29 brcm,brcmnand-v7.1 30 + brcm,brcmnand-v7.2 30 31 brcm,brcmnand 31 32 - reg : the register start and length for NAND register region. 32 33 (optional) Flash DMA register range (if present)
+1 -1
Documentation/devicetree/bindings/mtd/gpmc-nand.txt
··· 39 39 40 40 "prefetch-polled" Prefetch polled mode (default) 41 41 "polled" Polled mode, without prefetch 42 - "prefetch-dma" Prefetch enabled sDMA mode 42 + "prefetch-dma" Prefetch enabled DMA mode 43 43 "prefetch-irq" Prefetch enabled irq mode 44 44 45 45 - elm_id: <deprecated> use "ti,elm-id" instead
+160
Documentation/devicetree/bindings/mtd/mtk-nand.txt
··· 1 + MTK SoCs NAND FLASH controller (NFC) DT binding 2 + 3 + This file documents the device tree bindings for MTK SoCs NAND controllers. 4 + The functional split of the controller requires two drivers to operate: 5 + the nand controller interface driver and the ECC engine driver. 6 + 7 + The hardware description for both devices must be captured as device 8 + tree nodes. 9 + 10 + 1) NFC NAND Controller Interface (NFI): 11 + ======================================= 12 + 13 + The first part of NFC is NAND Controller Interface (NFI) HW. 14 + Required NFI properties: 15 + - compatible: Should be "mediatek,mtxxxx-nfc". 16 + - reg: Base physical address and size of NFI. 17 + - interrupts: Interrupts of NFI. 18 + - clocks: NFI required clocks. 19 + - clock-names: NFI clocks internal name. 20 + - status: Disabled default. Then set "okay" by platform. 21 + - ecc-engine: Required ECC Engine node. 22 + - #address-cells: NAND chip index, should be 1. 23 + - #size-cells: Should be 0. 24 + 25 + Example: 26 + 27 + nandc: nfi@1100d000 { 28 + compatible = "mediatek,mt2701-nfc"; 29 + reg = <0 0x1100d000 0 0x1000>; 30 + interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_LOW>; 31 + clocks = <&pericfg CLK_PERI_NFI>, 32 + <&pericfg CLK_PERI_NFI_PAD>; 33 + clock-names = "nfi_clk", "pad_clk"; 34 + status = "disabled"; 35 + ecc-engine = <&bch>; 36 + #address-cells = <1>; 37 + #size-cells = <0>; 38 + }; 39 + 40 + Platform related properties, should be set in {platform_name}.dts: 41 + - children nodes: NAND chips. 42 + 43 + Children nodes properties: 44 + - reg: Chip Select Signal, default 0. 45 + Set as reg = <0>, <1> when need 2 CS. 46 + Optional: 47 + - nand-on-flash-bbt: Store BBT on NAND Flash. 48 + - nand-ecc-mode: the NAND ecc mode (check driver for supported modes) 49 + - nand-ecc-step-size: Number of data bytes covered by a single ECC step. 50 + valid values: 512 and 1024. 51 + 1024 is recommended for large page NANDs. 52 + - nand-ecc-strength: Number of bits to correct per ECC step. 53 + The valid values that the controller supports are: 4, 6, 54 + 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 40, 44, 55 + 48, 52, 56, 60. 56 + The strength should be calculated as follows: 57 + E = (S - F) * 8 / 14 58 + S = O / (P / Q) 59 + E : nand-ecc-strength. 60 + S : spare size per sector. 61 + F : FDM size, should be in the range [1,8]. 62 + It is used to store free oob data. 63 + O : oob size. 64 + P : page size. 65 + Q : nand-ecc-step-size. 66 + If the result does not match any one of the listed 67 + choices above, please select the smaller valid value from 68 + the list. 69 + (otherwise the driver will do the adjustment at runtime) 70 + - pinctrl-names: Default NAND pin GPIO setting name. 71 + - pinctrl-0: GPIO setting node. 72 + 73 + Example: 74 + &pio { 75 + nand_pins_default: nanddefault { 76 + pins_dat { 77 + pinmux = <MT2701_PIN_111_MSDC0_DAT7__FUNC_NLD7>, 78 + <MT2701_PIN_112_MSDC0_DAT6__FUNC_NLD6>, 79 + <MT2701_PIN_114_MSDC0_DAT4__FUNC_NLD4>, 80 + <MT2701_PIN_118_MSDC0_DAT3__FUNC_NLD3>, 81 + <MT2701_PIN_121_MSDC0_DAT0__FUNC_NLD0>, 82 + <MT2701_PIN_120_MSDC0_DAT1__FUNC_NLD1>, 83 + <MT2701_PIN_113_MSDC0_DAT5__FUNC_NLD5>, 84 + <MT2701_PIN_115_MSDC0_RSTB__FUNC_NLD8>, 85 + <MT2701_PIN_119_MSDC0_DAT2__FUNC_NLD2>; 86 + input-enable; 87 + drive-strength = <MTK_DRIVE_8mA>; 88 + bias-pull-up; 89 + }; 90 + 91 + pins_we { 92 + pinmux = <MT2701_PIN_117_MSDC0_CLK__FUNC_NWEB>; 93 + drive-strength = <MTK_DRIVE_8mA>; 94 + bias-pull-up = <MTK_PUPD_SET_R1R0_10>; 95 + }; 96 + 97 + pins_ale { 98 + pinmux = <MT2701_PIN_116_MSDC0_CMD__FUNC_NALE>; 99 + drive-strength = <MTK_DRIVE_8mA>; 100 + bias-pull-down = <MTK_PUPD_SET_R1R0_10>; 101 + }; 102 + }; 103 + }; 104 + 105 + &nandc { 106 + status = "okay"; 107 + pinctrl-names = "default"; 108 + pinctrl-0 = <&nand_pins_default>; 109 + nand@0 { 110 + reg = <0>; 111 + nand-on-flash-bbt; 112 + nand-ecc-mode = "hw"; 113 + nand-ecc-strength = <24>; 114 + nand-ecc-step-size = <1024>; 115 + }; 116 + }; 117 + 118 + NAND chip optional subnodes: 119 + - Partitions, see Documentation/devicetree/bindings/mtd/partition.txt 120 + 121 + Example: 122 + nand@0 { 123 + partitions { 124 + compatible = "fixed-partitions"; 125 + #address-cells = <1>; 126 + #size-cells = <1>; 127 + 128 + preloader@0 { 129 + label = "pl"; 130 + read-only; 131 + reg = <0x00000000 0x00400000>; 132 + }; 133 + android@0x00400000 { 134 + label = "android"; 135 + reg = <0x00400000 0x12c00000>; 136 + }; 137 + }; 138 + }; 139 + 140 + 2) ECC Engine: 141 + ============== 142 + 143 + Required BCH properties: 144 + - compatible: Should be "mediatek,mtxxxx-ecc". 145 + - reg: Base physical address and size of ECC. 146 + - interrupts: Interrupts of ECC. 147 + - clocks: ECC required clocks. 148 + - clock-names: ECC clocks internal name. 149 + - status: Disabled default. Then set "okay" by platform. 150 + 151 + Example: 152 + 153 + bch: ecc@1100e000 { 154 + compatible = "mediatek,mt2701-ecc"; 155 + reg = <0 0x1100e000 0 0x1000>; 156 + interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_LOW>; 157 + clocks = <&pericfg CLK_PERI_NFI_ECC>; 158 + clock-names = "nfiecc_clk"; 159 + status = "disabled"; 160 + };
+6
Documentation/devicetree/bindings/mtd/sunxi-nand.txt
··· 11 11 * "ahb" : AHB gating clock 12 12 * "mod" : nand controller clock 13 13 14 + Optional properties: 15 + - dmas : shall reference DMA channel associated to the NAND controller. 16 + - dma-names : shall be "rxtx". 17 + 14 18 Optional children nodes: 15 19 Children nodes represent the available nand chips. 16 20 17 21 Optional properties: 22 + - reset : phandle + reset specifier pair 23 + - reset-names : must contain "ahb" 18 24 - allwinner,rb : shall contain the native Ready/Busy ids. 19 25 or 20 26 - rb-gpios : shall contain the gpios used as R/B pins.
+7 -1
drivers/mtd/nand/Kconfig
··· 539 539 config MTD_NAND_XWAY 540 540 tristate "Support for NAND on Lantiq XWAY SoC" 541 541 depends on LANTIQ && SOC_TYPE_XWAY 542 - select MTD_NAND_PLATFORM 543 542 help 544 543 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached 545 544 to the External Bus Unit (EBU). ··· 561 562 help 562 563 Enables support for NAND flash chips on SoCs containing the EBI2 NAND 563 564 controller. This controller is found on IPQ806x SoC. 565 + 566 + config MTD_NAND_MTK 567 + tristate "Support for NAND controller on MTK SoCs" 568 + depends on HAS_DMA 569 + help 570 + Enables support for NAND controller on MTK SoCs. 571 + This controller is found on mt27xx, mt81xx, mt65xx SoCs. 564 572 565 573 endif # MTD_NAND
+1
drivers/mtd/nand/Makefile
··· 57 57 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o 58 58 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ 59 59 obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o 60 + obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o 60 61 61 62 nand-objs := nand_base.o nand_bbt.o nand_timings.o
+157 -14
drivers/mtd/nand/brcmnand/brcmnand.c
··· 340 340 [BRCMNAND_FC_BASE] = 0x400, 341 341 }; 342 342 343 + /* BRCMNAND v7.2 */ 344 + static const u16 brcmnand_regs_v72[] = { 345 + [BRCMNAND_CMD_START] = 0x04, 346 + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, 347 + [BRCMNAND_CMD_ADDRESS] = 0x0c, 348 + [BRCMNAND_INTFC_STATUS] = 0x14, 349 + [BRCMNAND_CS_SELECT] = 0x18, 350 + [BRCMNAND_CS_XOR] = 0x1c, 351 + [BRCMNAND_LL_OP] = 0x20, 352 + [BRCMNAND_CS0_BASE] = 0x50, 353 + [BRCMNAND_CS1_BASE] = 0, 354 + [BRCMNAND_CORR_THRESHOLD] = 0xdc, 355 + [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, 356 + [BRCMNAND_UNCORR_COUNT] = 0xfc, 357 + [BRCMNAND_CORR_COUNT] = 0x100, 358 + [BRCMNAND_CORR_EXT_ADDR] = 0x10c, 359 + [BRCMNAND_CORR_ADDR] = 0x110, 360 + [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, 361 + [BRCMNAND_UNCORR_ADDR] = 0x118, 362 + [BRCMNAND_SEMAPHORE] = 0x150, 363 + [BRCMNAND_ID] = 0x194, 364 + [BRCMNAND_ID_EXT] = 0x198, 365 + [BRCMNAND_LL_RDATA] = 0x19c, 366 + [BRCMNAND_OOB_READ_BASE] = 0x200, 367 + [BRCMNAND_OOB_READ_10_BASE] = 0, 368 + [BRCMNAND_OOB_WRITE_BASE] = 0x400, 369 + [BRCMNAND_OOB_WRITE_10_BASE] = 0, 370 + [BRCMNAND_FC_BASE] = 0x600, 371 + }; 372 + 343 373 enum brcmnand_cs_reg { 344 374 BRCMNAND_CS_CFG_EXT = 0, 345 375 BRCMNAND_CS_CFG, ··· 465 435 } 466 436 467 437 /* Register offsets */ 468 - if (ctrl->nand_version >= 0x0701) 438 + if (ctrl->nand_version >= 0x0702) 439 + ctrl->reg_offsets = brcmnand_regs_v72; 440 + else if (ctrl->nand_version >= 0x0701) 469 441 ctrl->reg_offsets = brcmnand_regs_v71; 470 442 else if (ctrl->nand_version >= 0x0600) 471 443 ctrl->reg_offsets = brcmnand_regs_v60; ··· 512 480 } 513 481 514 482 /* Maximum spare area sector size (per 512B) */ 515 - if (ctrl->nand_version >= 0x0600) 483 + if (ctrl->nand_version >= 0x0702) 484 + ctrl->max_oob = 128; 485 + else if (ctrl->nand_version >= 0x0600) 516 486 ctrl->max_oob = 64; 517 487 else if (ctrl->nand_version >= 0x0500) 518 488 ctrl->max_oob = 32; ··· 617 583 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; 618 584 int cs = host->cs; 619 585 620 - if (ctrl->nand_version >= 0x0600) 586 + if (ctrl->nand_version >= 0x0702) 587 + bits = 7; 588 + else if (ctrl->nand_version >= 0x0600) 621 589 bits = 6; 622 590 else if (ctrl->nand_version >= 0x0500) 623 591 bits = 5; 624 592 else 625 593 bits = 4; 626 594 627 - if (ctrl->nand_version >= 0x0600) { 595 + if (ctrl->nand_version >= 0x0702) { 596 + if (cs >= 4) 597 + reg = BRCMNAND_CORR_THRESHOLD_EXT; 598 + shift = (cs % 4) * bits; 599 + } else if (ctrl->nand_version >= 0x0600) { 628 600 if (cs >= 5) 629 601 reg = BRCMNAND_CORR_THRESHOLD_EXT; 630 602 shift = (cs % 5) * bits; ··· 671 631 672 632 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) 673 633 { 674 - if (ctrl->nand_version >= 0x0600) 634 + if (ctrl->nand_version >= 0x0702) 635 + return GENMASK(7, 0); 636 + else if (ctrl->nand_version >= 0x0600) 675 637 return GENMASK(6, 0); 676 638 else 677 639 return GENMASK(5, 0); 678 640 } 679 641 680 642 #define NAND_ACC_CONTROL_ECC_SHIFT 16 643 + #define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 681 644 682 645 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) 683 646 { 684 647 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; 685 648 686 - return mask << NAND_ACC_CONTROL_ECC_SHIFT; 649 + mask <<= NAND_ACC_CONTROL_ECC_SHIFT; 650 + 651 + /* v7.2 includes additional ECC levels */ 652 + if (ctrl->nand_version >= 0x0702) 653 + mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; 654 + 655 + return mask; 687 656 } 688 657 689 658 static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) ··· 716 667 717 668 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) 718 669 { 719 - if (ctrl->nand_version >= 0x0600) 670 + if (ctrl->nand_version >= 0x0702) 671 + return 9; 672 + else if (ctrl->nand_version >= 0x0600) 720 673 return 7; 721 674 else if (ctrl->nand_version >= 0x0500) 722 675 return 6; ··· 824 773 * Internal support functions 825 774 ***********************************************************************/ 826 775 827 - static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg) 776 + static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl, 777 + struct brcmnand_cfg *cfg) 828 778 { 829 - return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && 830 - cfg->ecc_level == 15; 779 + if (ctrl->nand_version <= 0x0701) 780 + return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && 781 + cfg->ecc_level == 15; 782 + else 783 + return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 && 784 + cfg->ecc_level == 15) || 785 + (cfg->spare_area_size == 28 && cfg->ecc_level == 16)); 831 786 } 832 787 833 788 /* ··· 988 931 if (p->sector_size_1k) 989 932 ecc_level <<= 1; 990 933 991 - if (is_hamming_ecc(p)) { 934 + if (is_hamming_ecc(host->ctrl, p)) { 992 935 ecc->bytes = 3 * sectors; 993 936 mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); 994 937 return 0; ··· 1602 1545 return ret; 1603 1546 } 1604 1547 1548 + /* 1549 + * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC 1550 + * error 1551 + * 1552 + * Because the HW ECC signals an ECC error if an erase paged has even a single 1553 + * bitflip, we must check each ECC error to see if it is actually an erased 1554 + * page with bitflips, not a truly corrupted page. 1555 + * 1556 + * On a real error, return a negative error code (-EBADMSG for ECC error), and 1557 + * buf will contain raw data. 1558 + * Otherwise, buf gets filled with 0xffs and return the maximum number of 1559 + * bitflips-per-ECC-sector to the caller. 1560 + * 1561 + */ 1562 + static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, 1563 + struct nand_chip *chip, void *buf, u64 addr) 1564 + { 1565 + int i, sas; 1566 + void *oob = chip->oob_poi; 1567 + int bitflips = 0; 1568 + int page = addr >> chip->page_shift; 1569 + int ret; 1570 + 1571 + if (!buf) { 1572 + buf = chip->buffers->databuf; 1573 + /* Invalidate page cache */ 1574 + chip->pagebuf = -1; 1575 + } 1576 + 1577 + sas = mtd->oobsize / chip->ecc.steps; 1578 + 1579 + /* read without ecc for verification */ 1580 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); 1581 + ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page); 1582 + if (ret) 1583 + return ret; 1584 + 1585 + for (i = 0; i < chip->ecc.steps; i++, oob += sas) { 1586 + ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size, 1587 + oob, sas, NULL, 0, 1588 + chip->ecc.strength); 1589 + if (ret < 0) 1590 + return ret; 1591 + 1592 + bitflips = max(bitflips, ret); 1593 + } 1594 + 1595 + return bitflips; 1596 + } 1597 + 1605 1598 static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, 1606 1599 u64 addr, unsigned int trans, u32 *buf, u8 *oob) 1607 1600 { ··· 1659 1552 struct brcmnand_controller *ctrl = host->ctrl; 1660 1553 u64 err_addr = 0; 1661 1554 int err; 1555 + bool retry = true; 1662 1556 1663 1557 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); 1664 1558 1559 + try_dmaread: 1665 1560 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); 1666 1561 1667 1562 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { ··· 1684 1575 } 1685 1576 1686 1577 if (mtd_is_eccerr(err)) { 1578 + /* 1579 + * On controller version and 7.0, 7.1 , DMA read after a 1580 + * prior PIO read that reported uncorrectable error, 1581 + * the DMA engine captures this error following DMA read 1582 + * cleared only on subsequent DMA read, so just retry once 1583 + * to clear a possible false error reported for current DMA 1584 + * read 1585 + */ 1586 + if ((ctrl->nand_version == 0x0700) || 1587 + (ctrl->nand_version == 0x0701)) { 1588 + if (retry) { 1589 + retry = false; 1590 + goto try_dmaread; 1591 + } 1592 + } 1593 + 1594 + /* 1595 + * Controller version 7.2 has hw encoder to detect erased page 1596 + * bitflips, apply sw verification for older controllers only 1597 + */ 1598 + if (ctrl->nand_version < 0x0702) { 1599 + err = brcmstb_nand_verify_erased_page(mtd, chip, buf, 1600 + addr); 1601 + /* erased page bitflips corrected */ 1602 + if (err > 0) 1603 + return err; 1604 + } 1605 + 1687 1606 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", 1688 1607 (unsigned long long)err_addr); 1689 1608 mtd->ecc_stats.failed++; ··· 1994 1857 return 0; 1995 1858 } 1996 1859 1997 - static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg) 1860 + static void brcmnand_print_cfg(struct brcmnand_host *host, 1861 + char *buf, struct brcmnand_cfg *cfg) 1998 1862 { 1999 1863 buf += sprintf(buf, 2000 1864 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", ··· 2006 1868 cfg->spare_area_size, cfg->device_width); 2007 1869 2008 1870 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ 2009 - if (is_hamming_ecc(cfg)) 1871 + if (is_hamming_ecc(host->ctrl, cfg)) 2010 1872 sprintf(buf, ", Hamming ECC"); 2011 1873 else if (cfg->sector_size_1k) 2012 1874 sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); ··· 2125 1987 2126 1988 brcmnand_set_ecc_enabled(host, 1); 2127 1989 2128 - brcmnand_print_cfg(msg, cfg); 1990 + brcmnand_print_cfg(host, msg, cfg); 2129 1991 dev_info(ctrl->dev, "detected %s\n", msg); 2130 1992 2131 1993 /* Configure ACC_CONTROL */ ··· 2133 1995 tmp = nand_readreg(ctrl, offs); 2134 1996 tmp &= ~ACC_CONTROL_PARTIAL_PAGE; 2135 1997 tmp &= ~ACC_CONTROL_RD_ERASED; 1998 + 1999 + /* We need to turn on Read from erased paged protected by ECC */ 2000 + if (ctrl->nand_version >= 0x0702) 2001 + tmp |= ACC_CONTROL_RD_ERASED; 2136 2002 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; 2137 2003 if (ctrl->features & BRCMNAND_HAS_PREFETCH) { 2138 2004 /* ··· 2337 2195 { .compatible = "brcm,brcmnand-v6.2" }, 2338 2196 { .compatible = "brcm,brcmnand-v7.0" }, 2339 2197 { .compatible = "brcm,brcmnand-v7.1" }, 2198 + { .compatible = "brcm,brcmnand-v7.2" }, 2340 2199 {}, 2341 2200 }; 2342 2201 MODULE_DEVICE_TABLE(of, brcmnand_of_match);
+1 -1
drivers/mtd/nand/jz4780_bch.c
··· 375 375 module_platform_driver(jz4780_bch_driver); 376 376 377 377 MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); 378 - MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>"); 378 + MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>"); 379 379 MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver"); 380 380 MODULE_LICENSE("GPL v2");
+1 -1
drivers/mtd/nand/jz4780_nand.c
··· 412 412 module_platform_driver(jz4780_nand_driver); 413 413 414 414 MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); 415 - MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>"); 415 + MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>"); 416 416 MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver"); 417 417 MODULE_LICENSE("GPL v2");
+530
drivers/mtd/nand/mtk_ecc.c
··· 1 + /* 2 + * MTK ECC controller driver. 3 + * Copyright (C) 2016 MediaTek Inc. 4 + * Authors: Xiaolei Li <xiaolei.li@mediatek.com> 5 + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + */ 16 + 17 + #include <linux/platform_device.h> 18 + #include <linux/dma-mapping.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/clk.h> 21 + #include <linux/module.h> 22 + #include <linux/iopoll.h> 23 + #include <linux/of.h> 24 + #include <linux/of_platform.h> 25 + #include <linux/mutex.h> 26 + 27 + #include "mtk_ecc.h" 28 + 29 + #define ECC_IDLE_MASK BIT(0) 30 + #define ECC_IRQ_EN BIT(0) 31 + #define ECC_OP_ENABLE (1) 32 + #define ECC_OP_DISABLE (0) 33 + 34 + #define ECC_ENCCON (0x00) 35 + #define ECC_ENCCNFG (0x04) 36 + #define ECC_CNFG_4BIT (0) 37 + #define ECC_CNFG_6BIT (1) 38 + #define ECC_CNFG_8BIT (2) 39 + #define ECC_CNFG_10BIT (3) 40 + #define ECC_CNFG_12BIT (4) 41 + #define ECC_CNFG_14BIT (5) 42 + #define ECC_CNFG_16BIT (6) 43 + #define ECC_CNFG_18BIT (7) 44 + #define ECC_CNFG_20BIT (8) 45 + #define ECC_CNFG_22BIT (9) 46 + #define ECC_CNFG_24BIT (0xa) 47 + #define ECC_CNFG_28BIT (0xb) 48 + #define ECC_CNFG_32BIT (0xc) 49 + #define ECC_CNFG_36BIT (0xd) 50 + #define ECC_CNFG_40BIT (0xe) 51 + #define ECC_CNFG_44BIT (0xf) 52 + #define ECC_CNFG_48BIT (0x10) 53 + #define ECC_CNFG_52BIT (0x11) 54 + #define ECC_CNFG_56BIT (0x12) 55 + #define ECC_CNFG_60BIT (0x13) 56 + #define ECC_MODE_SHIFT (5) 57 + #define ECC_MS_SHIFT (16) 58 + #define ECC_ENCDIADDR (0x08) 59 + #define ECC_ENCIDLE (0x0C) 60 + #define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32)) 61 + #define ECC_ENCIRQ_EN (0x80) 62 + #define ECC_ENCIRQ_STA (0x84) 63 + #define ECC_DECCON (0x100) 64 + #define ECC_DECCNFG (0x104) 65 + #define DEC_EMPTY_EN BIT(31) 66 + #define DEC_CNFG_CORRECT (0x3 << 12) 67 + #define ECC_DECIDLE (0x10C) 68 + #define ECC_DECENUM0 (0x114) 69 + #define ERR_MASK (0x3f) 70 + #define ECC_DECDONE (0x124) 71 + #define ECC_DECIRQ_EN (0x200) 72 + #define ECC_DECIRQ_STA (0x204) 73 + 74 + #define ECC_TIMEOUT (500000) 75 + 76 + #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) 77 + #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) 78 + #define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \ 79 + ECC_ENCIRQ_EN : ECC_DECIRQ_EN) 80 + 81 + struct mtk_ecc { 82 + struct device *dev; 83 + void __iomem *regs; 84 + struct clk *clk; 85 + 86 + struct completion done; 87 + struct mutex lock; 88 + u32 sectors; 89 + }; 90 + 91 + static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, 92 + enum mtk_ecc_operation op) 93 + { 94 + struct device *dev = ecc->dev; 95 + u32 val; 96 + int ret; 97 + 98 + ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val, 99 + val & ECC_IDLE_MASK, 100 + 10, ECC_TIMEOUT); 101 + if (ret) 102 + dev_warn(dev, "%s NOT idle\n", 103 + op == ECC_ENCODE ? "encoder" : "decoder"); 104 + } 105 + 106 + static irqreturn_t mtk_ecc_irq(int irq, void *id) 107 + { 108 + struct mtk_ecc *ecc = id; 109 + enum mtk_ecc_operation op; 110 + u32 dec, enc; 111 + 112 + dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN; 113 + if (dec) { 114 + op = ECC_DECODE; 115 + dec = readw(ecc->regs + ECC_DECDONE); 116 + if (dec & ecc->sectors) { 117 + ecc->sectors = 0; 118 + complete(&ecc->done); 119 + } else { 120 + return IRQ_HANDLED; 121 + } 122 + } else { 123 + enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN; 124 + if (enc) { 125 + op = ECC_ENCODE; 126 + complete(&ecc->done); 127 + } else { 128 + return IRQ_NONE; 129 + } 130 + } 131 + 132 + writel(0, ecc->regs + ECC_IRQ_REG(op)); 133 + 134 + return IRQ_HANDLED; 135 + } 136 + 137 + static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) 138 + { 139 + u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz; 140 + u32 reg; 141 + 142 + switch (config->strength) { 143 + case 4: 144 + ecc_bit = ECC_CNFG_4BIT; 145 + break; 146 + case 6: 147 + ecc_bit = ECC_CNFG_6BIT; 148 + break; 149 + case 8: 150 + ecc_bit = ECC_CNFG_8BIT; 151 + break; 152 + case 10: 153 + ecc_bit = ECC_CNFG_10BIT; 154 + break; 155 + case 12: 156 + ecc_bit = ECC_CNFG_12BIT; 157 + break; 158 + case 14: 159 + ecc_bit = ECC_CNFG_14BIT; 160 + break; 161 + case 16: 162 + ecc_bit = ECC_CNFG_16BIT; 163 + break; 164 + case 18: 165 + ecc_bit = ECC_CNFG_18BIT; 166 + break; 167 + case 20: 168 + ecc_bit = ECC_CNFG_20BIT; 169 + break; 170 + case 22: 171 + ecc_bit = ECC_CNFG_22BIT; 172 + break; 173 + case 24: 174 + ecc_bit = ECC_CNFG_24BIT; 175 + break; 176 + case 28: 177 + ecc_bit = ECC_CNFG_28BIT; 178 + break; 179 + case 32: 180 + ecc_bit = ECC_CNFG_32BIT; 181 + break; 182 + case 36: 183 + ecc_bit = ECC_CNFG_36BIT; 184 + break; 185 + case 40: 186 + ecc_bit = ECC_CNFG_40BIT; 187 + break; 188 + case 44: 189 + ecc_bit = ECC_CNFG_44BIT; 190 + break; 191 + case 48: 192 + ecc_bit = ECC_CNFG_48BIT; 193 + break; 194 + case 52: 195 + ecc_bit = ECC_CNFG_52BIT; 196 + break; 197 + case 56: 198 + ecc_bit = ECC_CNFG_56BIT; 199 + break; 200 + case 60: 201 + ecc_bit = ECC_CNFG_60BIT; 202 + break; 203 + default: 204 + dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n", 205 + config->strength); 206 + } 207 + 208 + if (config->op == ECC_ENCODE) { 209 + /* configure ECC encoder (in bits) */ 210 + enc_sz = config->len << 3; 211 + 212 + reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); 213 + reg |= (enc_sz << ECC_MS_SHIFT); 214 + writel(reg, ecc->regs + ECC_ENCCNFG); 215 + 216 + if (config->mode != ECC_NFI_MODE) 217 + writel(lower_32_bits(config->addr), 218 + ecc->regs + ECC_ENCDIADDR); 219 + 220 + } else { 221 + /* configure ECC decoder (in bits) */ 222 + dec_sz = (config->len << 3) + 223 + config->strength * ECC_PARITY_BITS; 224 + 225 + reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); 226 + reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; 227 + reg |= DEC_EMPTY_EN; 228 + writel(reg, ecc->regs + ECC_DECCNFG); 229 + 230 + if (config->sectors) 231 + ecc->sectors = 1 << (config->sectors - 1); 232 + } 233 + } 234 + 235 + void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, 236 + int sectors) 237 + { 238 + u32 offset, i, err; 239 + u32 bitflips = 0; 240 + 241 + stats->corrected = 0; 242 + stats->failed = 0; 243 + 244 + for (i = 0; i < sectors; i++) { 245 + offset = (i >> 2) << 2; 246 + err = readl(ecc->regs + ECC_DECENUM0 + offset); 247 + err = err >> ((i % 4) * 8); 248 + err &= ERR_MASK; 249 + if (err == ERR_MASK) { 250 + /* uncorrectable errors */ 251 + stats->failed++; 252 + continue; 253 + } 254 + 255 + stats->corrected += err; 256 + bitflips = max_t(u32, bitflips, err); 257 + } 258 + 259 + stats->bitflips = bitflips; 260 + } 261 + EXPORT_SYMBOL(mtk_ecc_get_stats); 262 + 263 + void mtk_ecc_release(struct mtk_ecc *ecc) 264 + { 265 + clk_disable_unprepare(ecc->clk); 266 + put_device(ecc->dev); 267 + } 268 + EXPORT_SYMBOL(mtk_ecc_release); 269 + 270 + static void mtk_ecc_hw_init(struct mtk_ecc *ecc) 271 + { 272 + mtk_ecc_wait_idle(ecc, ECC_ENCODE); 273 + writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON); 274 + 275 + mtk_ecc_wait_idle(ecc, ECC_DECODE); 276 + writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON); 277 + } 278 + 279 + static struct mtk_ecc *mtk_ecc_get(struct device_node *np) 280 + { 281 + struct platform_device *pdev; 282 + struct mtk_ecc *ecc; 283 + 284 + pdev = of_find_device_by_node(np); 285 + if (!pdev || !platform_get_drvdata(pdev)) 286 + return ERR_PTR(-EPROBE_DEFER); 287 + 288 + get_device(&pdev->dev); 289 + ecc = platform_get_drvdata(pdev); 290 + clk_prepare_enable(ecc->clk); 291 + mtk_ecc_hw_init(ecc); 292 + 293 + return ecc; 294 + } 295 + 296 + struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node) 297 + { 298 + struct mtk_ecc *ecc = NULL; 299 + struct device_node *np; 300 + 301 + np = of_parse_phandle(of_node, "ecc-engine", 0); 302 + if (np) { 303 + ecc = mtk_ecc_get(np); 304 + of_node_put(np); 305 + } 306 + 307 + return ecc; 308 + } 309 + EXPORT_SYMBOL(of_mtk_ecc_get); 310 + 311 + int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) 312 + { 313 + enum mtk_ecc_operation op = config->op; 314 + int ret; 315 + 316 + ret = mutex_lock_interruptible(&ecc->lock); 317 + if (ret) { 318 + dev_err(ecc->dev, "interrupted when attempting to lock\n"); 319 + return ret; 320 + } 321 + 322 + mtk_ecc_wait_idle(ecc, op); 323 + mtk_ecc_config(ecc, config); 324 + writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); 325 + 326 + init_completion(&ecc->done); 327 + writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op)); 328 + 329 + return 0; 330 + } 331 + EXPORT_SYMBOL(mtk_ecc_enable); 332 + 333 + void mtk_ecc_disable(struct mtk_ecc *ecc) 334 + { 335 + enum mtk_ecc_operation op = ECC_ENCODE; 336 + 337 + /* find out the running operation */ 338 + if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE) 339 + op = ECC_DECODE; 340 + 341 + /* disable it */ 342 + mtk_ecc_wait_idle(ecc, op); 343 + writew(0, ecc->regs + ECC_IRQ_REG(op)); 344 + writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); 345 + 346 + mutex_unlock(&ecc->lock); 347 + } 348 + EXPORT_SYMBOL(mtk_ecc_disable); 349 + 350 + int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op) 351 + { 352 + int ret; 353 + 354 + ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500)); 355 + if (!ret) { 356 + dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n", 357 + (op == ECC_ENCODE) ? "encoder" : "decoder"); 358 + return -ETIMEDOUT; 359 + } 360 + 361 + return 0; 362 + } 363 + EXPORT_SYMBOL(mtk_ecc_wait_done); 364 + 365 + int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, 366 + u8 *data, u32 bytes) 367 + { 368 + dma_addr_t addr; 369 + u32 *p, len, i; 370 + int ret = 0; 371 + 372 + addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); 373 + ret = dma_mapping_error(ecc->dev, addr); 374 + if (ret) { 375 + dev_err(ecc->dev, "dma mapping error\n"); 376 + return -EINVAL; 377 + } 378 + 379 + config->op = ECC_ENCODE; 380 + config->addr = addr; 381 + ret = mtk_ecc_enable(ecc, config); 382 + if (ret) { 383 + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); 384 + return ret; 385 + } 386 + 387 + ret = mtk_ecc_wait_done(ecc, ECC_ENCODE); 388 + if (ret) 389 + goto timeout; 390 + 391 + mtk_ecc_wait_idle(ecc, ECC_ENCODE); 392 + 393 + /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ 394 + len = (config->strength * ECC_PARITY_BITS + 7) >> 3; 395 + p = (u32 *)(data + bytes); 396 + 397 + /* write the parity bytes generated by the ECC back to the OOB region */ 398 + for (i = 0; i < len; i++) 399 + p[i] = readl(ecc->regs + ECC_ENCPAR(i)); 400 + timeout: 401 + 402 + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); 403 + mtk_ecc_disable(ecc); 404 + 405 + return ret; 406 + } 407 + EXPORT_SYMBOL(mtk_ecc_encode); 408 + 409 + void mtk_ecc_adjust_strength(u32 *p) 410 + { 411 + u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 412 + 40, 44, 48, 52, 56, 60}; 413 + int i; 414 + 415 + for (i = 0; i < ARRAY_SIZE(ecc); i++) { 416 + if (*p <= ecc[i]) { 417 + if (!i) 418 + *p = ecc[i]; 419 + else if (*p != ecc[i]) 420 + *p = ecc[i - 1]; 421 + return; 422 + } 423 + } 424 + 425 + *p = ecc[ARRAY_SIZE(ecc) - 1]; 426 + } 427 + EXPORT_SYMBOL(mtk_ecc_adjust_strength); 428 + 429 + static int mtk_ecc_probe(struct platform_device *pdev) 430 + { 431 + struct device *dev = &pdev->dev; 432 + struct mtk_ecc *ecc; 433 + struct resource *res; 434 + int irq, ret; 435 + 436 + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); 437 + if (!ecc) 438 + return -ENOMEM; 439 + 440 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 441 + ecc->regs = devm_ioremap_resource(dev, res); 442 + if (IS_ERR(ecc->regs)) { 443 + dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs)); 444 + return PTR_ERR(ecc->regs); 445 + } 446 + 447 + ecc->clk = devm_clk_get(dev, NULL); 448 + if (IS_ERR(ecc->clk)) { 449 + dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk)); 450 + return PTR_ERR(ecc->clk); 451 + } 452 + 453 + irq = platform_get_irq(pdev, 0); 454 + if (irq < 0) { 455 + dev_err(dev, "failed to get irq\n"); 456 + return -EINVAL; 457 + } 458 + 459 + ret = dma_set_mask(dev, DMA_BIT_MASK(32)); 460 + if (ret) { 461 + dev_err(dev, "failed to set DMA mask\n"); 462 + return ret; 463 + } 464 + 465 + ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc); 466 + if (ret) { 467 + dev_err(dev, "failed to request irq\n"); 468 + return -EINVAL; 469 + } 470 + 471 + ecc->dev = dev; 472 + mutex_init(&ecc->lock); 473 + platform_set_drvdata(pdev, ecc); 474 + dev_info(dev, "probed\n"); 475 + 476 + return 0; 477 + } 478 + 479 + #ifdef CONFIG_PM_SLEEP 480 + static int mtk_ecc_suspend(struct device *dev) 481 + { 482 + struct mtk_ecc *ecc = dev_get_drvdata(dev); 483 + 484 + clk_disable_unprepare(ecc->clk); 485 + 486 + return 0; 487 + } 488 + 489 + static int mtk_ecc_resume(struct device *dev) 490 + { 491 + struct mtk_ecc *ecc = dev_get_drvdata(dev); 492 + int ret; 493 + 494 + ret = clk_prepare_enable(ecc->clk); 495 + if (ret) { 496 + dev_err(dev, "failed to enable clk\n"); 497 + return ret; 498 + } 499 + 500 + mtk_ecc_hw_init(ecc); 501 + 502 + return 0; 503 + } 504 + 505 + static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); 506 + #endif 507 + 508 + static const struct of_device_id mtk_ecc_dt_match[] = { 509 + { .compatible = "mediatek,mt2701-ecc" }, 510 + {}, 511 + }; 512 + 513 + MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); 514 + 515 + static struct platform_driver mtk_ecc_driver = { 516 + .probe = mtk_ecc_probe, 517 + .driver = { 518 + .name = "mtk-ecc", 519 + .of_match_table = of_match_ptr(mtk_ecc_dt_match), 520 + #ifdef CONFIG_PM_SLEEP 521 + .pm = &mtk_ecc_pm_ops, 522 + #endif 523 + }, 524 + }; 525 + 526 + module_platform_driver(mtk_ecc_driver); 527 + 528 + MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); 529 + MODULE_DESCRIPTION("MTK Nand ECC Driver"); 530 + MODULE_LICENSE("GPL");
+50
drivers/mtd/nand/mtk_ecc.h
··· 1 + /* 2 + * MTK SDG1 ECC controller 3 + * 4 + * Copyright (c) 2016 Mediatek 5 + * Authors: Xiaolei Li <xiaolei.li@mediatek.com> 6 + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License version 2 as published 9 + * by the Free Software Foundation. 10 + */ 11 + 12 + #ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ 13 + #define __DRIVERS_MTD_NAND_MTK_ECC_H__ 14 + 15 + #include <linux/types.h> 16 + 17 + #define ECC_PARITY_BITS (14) 18 + 19 + enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; 20 + enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; 21 + 22 + struct device_node; 23 + struct mtk_ecc; 24 + 25 + struct mtk_ecc_stats { 26 + u32 corrected; 27 + u32 bitflips; 28 + u32 failed; 29 + }; 30 + 31 + struct mtk_ecc_config { 32 + enum mtk_ecc_operation op; 33 + enum mtk_ecc_mode mode; 34 + dma_addr_t addr; 35 + u32 strength; 36 + u32 sectors; 37 + u32 len; 38 + }; 39 + 40 + int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); 41 + void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); 42 + int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); 43 + int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); 44 + void mtk_ecc_disable(struct mtk_ecc *); 45 + void mtk_ecc_adjust_strength(u32 *); 46 + 47 + struct mtk_ecc *of_mtk_ecc_get(struct device_node *); 48 + void mtk_ecc_release(struct mtk_ecc *); 49 + 50 + #endif
+1526
drivers/mtd/nand/mtk_nand.c
··· 1 + /* 2 + * MTK NAND Flash controller driver. 3 + * Copyright (C) 2016 MediaTek Inc. 4 + * Authors: Xiaolei Li <xiaolei.li@mediatek.com> 5 + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + */ 16 + 17 + #include <linux/platform_device.h> 18 + #include <linux/dma-mapping.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/delay.h> 21 + #include <linux/clk.h> 22 + #include <linux/mtd/nand.h> 23 + #include <linux/mtd/mtd.h> 24 + #include <linux/module.h> 25 + #include <linux/iopoll.h> 26 + #include <linux/of.h> 27 + #include "mtk_ecc.h" 28 + 29 + /* NAND controller register definition */ 30 + #define NFI_CNFG (0x00) 31 + #define CNFG_AHB BIT(0) 32 + #define CNFG_READ_EN BIT(1) 33 + #define CNFG_DMA_BURST_EN BIT(2) 34 + #define CNFG_BYTE_RW BIT(6) 35 + #define CNFG_HW_ECC_EN BIT(8) 36 + #define CNFG_AUTO_FMT_EN BIT(9) 37 + #define CNFG_OP_CUST (6 << 12) 38 + #define NFI_PAGEFMT (0x04) 39 + #define PAGEFMT_FDM_ECC_SHIFT (12) 40 + #define PAGEFMT_FDM_SHIFT (8) 41 + #define PAGEFMT_SPARE_16 (0) 42 + #define PAGEFMT_SPARE_26 (1) 43 + #define PAGEFMT_SPARE_27 (2) 44 + #define PAGEFMT_SPARE_28 (3) 45 + #define PAGEFMT_SPARE_32 (4) 46 + #define PAGEFMT_SPARE_36 (5) 47 + #define PAGEFMT_SPARE_40 (6) 48 + #define PAGEFMT_SPARE_44 (7) 49 + #define PAGEFMT_SPARE_48 (8) 50 + #define PAGEFMT_SPARE_49 (9) 51 + #define PAGEFMT_SPARE_50 (0xa) 52 + #define PAGEFMT_SPARE_51 (0xb) 53 + #define PAGEFMT_SPARE_52 (0xc) 54 + #define PAGEFMT_SPARE_62 (0xd) 55 + #define PAGEFMT_SPARE_63 (0xe) 56 + #define PAGEFMT_SPARE_64 (0xf) 57 + #define PAGEFMT_SPARE_SHIFT (4) 58 + #define PAGEFMT_SEC_SEL_512 BIT(2) 59 + #define PAGEFMT_512_2K (0) 60 + #define PAGEFMT_2K_4K (1) 61 + #define PAGEFMT_4K_8K (2) 62 + #define PAGEFMT_8K_16K (3) 63 + /* NFI control */ 64 + #define NFI_CON (0x08) 65 + #define CON_FIFO_FLUSH BIT(0) 66 + #define CON_NFI_RST BIT(1) 67 + #define CON_BRD BIT(8) /* burst read */ 68 + #define CON_BWR BIT(9) /* burst write */ 69 + #define CON_SEC_SHIFT (12) 70 + /* Timming control register */ 71 + #define NFI_ACCCON (0x0C) 72 + #define NFI_INTR_EN (0x10) 73 + #define INTR_AHB_DONE_EN BIT(6) 74 + #define NFI_INTR_STA (0x14) 75 + #define NFI_CMD (0x20) 76 + #define NFI_ADDRNOB (0x30) 77 + #define NFI_COLADDR (0x34) 78 + #define NFI_ROWADDR (0x38) 79 + #define NFI_STRDATA (0x40) 80 + #define STAR_EN (1) 81 + #define STAR_DE (0) 82 + #define NFI_CNRNB (0x44) 83 + #define NFI_DATAW (0x50) 84 + #define NFI_DATAR (0x54) 85 + #define NFI_PIO_DIRDY (0x58) 86 + #define PIO_DI_RDY (0x01) 87 + #define NFI_STA (0x60) 88 + #define STA_CMD BIT(0) 89 + #define STA_ADDR BIT(1) 90 + #define STA_BUSY BIT(8) 91 + #define STA_EMP_PAGE BIT(12) 92 + #define NFI_FSM_CUSTDATA (0xe << 16) 93 + #define NFI_FSM_MASK (0xf << 16) 94 + #define NFI_ADDRCNTR (0x70) 95 + #define CNTR_MASK GENMASK(16, 12) 96 + #define NFI_STRADDR (0x80) 97 + #define NFI_BYTELEN (0x84) 98 + #define NFI_CSEL (0x90) 99 + #define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2) 100 + #define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2) 101 + #define NFI_FDM_MAX_SIZE (8) 102 + #define NFI_FDM_MIN_SIZE (1) 103 + #define NFI_MASTER_STA (0x224) 104 + #define MASTER_STA_MASK (0x0FFF) 105 + #define NFI_EMPTY_THRESH (0x23C) 106 + 107 + #define MTK_NAME "mtk-nand" 108 + #define KB(x) ((x) * 1024UL) 109 + #define MB(x) (KB(x) * 1024UL) 110 + 111 + #define MTK_TIMEOUT (500000) 112 + #define MTK_RESET_TIMEOUT (1000000) 113 + #define MTK_MAX_SECTOR (16) 114 + #define MTK_NAND_MAX_NSELS (2) 115 + 116 + struct mtk_nfc_bad_mark_ctl { 117 + void (*bm_swap)(struct mtd_info *, u8 *buf, int raw); 118 + u32 sec; 119 + u32 pos; 120 + }; 121 + 122 + /* 123 + * FDM: region used to store free OOB data 124 + */ 125 + struct mtk_nfc_fdm { 126 + u32 reg_size; 127 + u32 ecc_size; 128 + }; 129 + 130 + struct mtk_nfc_nand_chip { 131 + struct list_head node; 132 + struct nand_chip nand; 133 + 134 + struct mtk_nfc_bad_mark_ctl bad_mark; 135 + struct mtk_nfc_fdm fdm; 136 + u32 spare_per_sector; 137 + 138 + int nsels; 139 + u8 sels[0]; 140 + /* nothing after this field */ 141 + }; 142 + 143 + struct mtk_nfc_clk { 144 + struct clk *nfi_clk; 145 + struct clk *pad_clk; 146 + }; 147 + 148 + struct mtk_nfc { 149 + struct nand_hw_control controller; 150 + struct mtk_ecc_config ecc_cfg; 151 + struct mtk_nfc_clk clk; 152 + struct mtk_ecc *ecc; 153 + 154 + struct device *dev; 155 + void __iomem *regs; 156 + 157 + struct completion done; 158 + struct list_head chips; 159 + 160 + u8 *buffer; 161 + }; 162 + 163 + static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand) 164 + { 165 + return container_of(nand, struct mtk_nfc_nand_chip, nand); 166 + } 167 + 168 + static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i) 169 + { 170 + return (u8 *)p + i * chip->ecc.size; 171 + } 172 + 173 + static inline u8 *oob_ptr(struct nand_chip *chip, int i) 174 + { 175 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 176 + u8 *poi; 177 + 178 + /* map the sector's FDM data to free oob: 179 + * the beginning of the oob area stores the FDM data of bad mark sectors 180 + */ 181 + 182 + if (i < mtk_nand->bad_mark.sec) 183 + poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size; 184 + else if (i == mtk_nand->bad_mark.sec) 185 + poi = chip->oob_poi; 186 + else 187 + poi = chip->oob_poi + i * mtk_nand->fdm.reg_size; 188 + 189 + return poi; 190 + } 191 + 192 + static inline int mtk_data_len(struct nand_chip *chip) 193 + { 194 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 195 + 196 + return chip->ecc.size + mtk_nand->spare_per_sector; 197 + } 198 + 199 + static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i) 200 + { 201 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 202 + 203 + return nfc->buffer + i * mtk_data_len(chip); 204 + } 205 + 206 + static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i) 207 + { 208 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 209 + 210 + return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size; 211 + } 212 + 213 + static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg) 214 + { 215 + writel(val, nfc->regs + reg); 216 + } 217 + 218 + static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg) 219 + { 220 + writew(val, nfc->regs + reg); 221 + } 222 + 223 + static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg) 224 + { 225 + writeb(val, nfc->regs + reg); 226 + } 227 + 228 + static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg) 229 + { 230 + return readl_relaxed(nfc->regs + reg); 231 + } 232 + 233 + static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg) 234 + { 235 + return readw_relaxed(nfc->regs + reg); 236 + } 237 + 238 + static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg) 239 + { 240 + return readb_relaxed(nfc->regs + reg); 241 + } 242 + 243 + static void mtk_nfc_hw_reset(struct mtk_nfc *nfc) 244 + { 245 + struct device *dev = nfc->dev; 246 + u32 val; 247 + int ret; 248 + 249 + /* reset all registers and force the NFI master to terminate */ 250 + nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON); 251 + 252 + /* wait for the master to finish the last transaction */ 253 + ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val, 254 + !(val & MASTER_STA_MASK), 50, 255 + MTK_RESET_TIMEOUT); 256 + if (ret) 257 + dev_warn(dev, "master active in reset [0x%x] = 0x%x\n", 258 + NFI_MASTER_STA, val); 259 + 260 + /* ensure any status register affected by the NFI master is reset */ 261 + nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON); 262 + nfi_writew(nfc, STAR_DE, NFI_STRDATA); 263 + } 264 + 265 + static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command) 266 + { 267 + struct device *dev = nfc->dev; 268 + u32 val; 269 + int ret; 270 + 271 + nfi_writel(nfc, command, NFI_CMD); 272 + 273 + ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val, 274 + !(val & STA_CMD), 10, MTK_TIMEOUT); 275 + if (ret) { 276 + dev_warn(dev, "nfi core timed out entering command mode\n"); 277 + return -EIO; 278 + } 279 + 280 + return 0; 281 + } 282 + 283 + static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr) 284 + { 285 + struct device *dev = nfc->dev; 286 + u32 val; 287 + int ret; 288 + 289 + nfi_writel(nfc, addr, NFI_COLADDR); 290 + nfi_writel(nfc, 0, NFI_ROWADDR); 291 + nfi_writew(nfc, 1, NFI_ADDRNOB); 292 + 293 + ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val, 294 + !(val & STA_ADDR), 10, MTK_TIMEOUT); 295 + if (ret) { 296 + dev_warn(dev, "nfi core timed out entering address mode\n"); 297 + return -EIO; 298 + } 299 + 300 + return 0; 301 + } 302 + 303 + static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd) 304 + { 305 + struct nand_chip *chip = mtd_to_nand(mtd); 306 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 307 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 308 + u32 fmt, spare; 309 + 310 + if (!mtd->writesize) 311 + return 0; 312 + 313 + spare = mtk_nand->spare_per_sector; 314 + 315 + switch (mtd->writesize) { 316 + case 512: 317 + fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512; 318 + break; 319 + case KB(2): 320 + if (chip->ecc.size == 512) 321 + fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512; 322 + else 323 + fmt = PAGEFMT_512_2K; 324 + break; 325 + case KB(4): 326 + if (chip->ecc.size == 512) 327 + fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512; 328 + else 329 + fmt = PAGEFMT_2K_4K; 330 + break; 331 + case KB(8): 332 + if (chip->ecc.size == 512) 333 + fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512; 334 + else 335 + fmt = PAGEFMT_4K_8K; 336 + break; 337 + case KB(16): 338 + fmt = PAGEFMT_8K_16K; 339 + break; 340 + default: 341 + dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize); 342 + return -EINVAL; 343 + } 344 + 345 + /* 346 + * the hardware will double the value for this eccsize, so we need to 347 + * halve it 348 + */ 349 + if (chip->ecc.size == 1024) 350 + spare >>= 1; 351 + 352 + switch (spare) { 353 + case 16: 354 + fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT); 355 + break; 356 + case 26: 357 + fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT); 358 + break; 359 + case 27: 360 + fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT); 361 + break; 362 + case 28: 363 + fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT); 364 + break; 365 + case 32: 366 + fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT); 367 + break; 368 + case 36: 369 + fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT); 370 + break; 371 + case 40: 372 + fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT); 373 + break; 374 + case 44: 375 + fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT); 376 + break; 377 + case 48: 378 + fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT); 379 + break; 380 + case 49: 381 + fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT); 382 + break; 383 + case 50: 384 + fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT); 385 + break; 386 + case 51: 387 + fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT); 388 + break; 389 + case 52: 390 + fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT); 391 + break; 392 + case 62: 393 + fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT); 394 + break; 395 + case 63: 396 + fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT); 397 + break; 398 + case 64: 399 + fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT); 400 + break; 401 + default: 402 + dev_err(nfc->dev, "invalid spare per sector %d\n", spare); 403 + return -EINVAL; 404 + } 405 + 406 + fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT; 407 + fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT; 408 + nfi_writew(nfc, fmt, NFI_PAGEFMT); 409 + 410 + nfc->ecc_cfg.strength = chip->ecc.strength; 411 + nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size; 412 + 413 + return 0; 414 + } 415 + 416 + static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip) 417 + { 418 + struct nand_chip *nand = mtd_to_nand(mtd); 419 + struct mtk_nfc *nfc = nand_get_controller_data(nand); 420 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand); 421 + 422 + if (chip < 0) 423 + return; 424 + 425 + mtk_nfc_hw_runtime_config(mtd); 426 + 427 + nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL); 428 + } 429 + 430 + static int mtk_nfc_dev_ready(struct mtd_info *mtd) 431 + { 432 + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); 433 + 434 + if (nfi_readl(nfc, NFI_STA) & STA_BUSY) 435 + return 0; 436 + 437 + return 1; 438 + } 439 + 440 + static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) 441 + { 442 + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); 443 + 444 + if (ctrl & NAND_ALE) { 445 + mtk_nfc_send_address(nfc, dat); 446 + } else if (ctrl & NAND_CLE) { 447 + mtk_nfc_hw_reset(nfc); 448 + 449 + nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG); 450 + mtk_nfc_send_command(nfc, dat); 451 + } 452 + } 453 + 454 + static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc) 455 + { 456 + int rc; 457 + u8 val; 458 + 459 + rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val, 460 + val & PIO_DI_RDY, 10, MTK_TIMEOUT); 461 + if (rc < 0) 462 + dev_err(nfc->dev, "data not ready\n"); 463 + } 464 + 465 + static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd) 466 + { 467 + struct nand_chip *chip = mtd_to_nand(mtd); 468 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 469 + u32 reg; 470 + 471 + /* after each byte read, the NFI_STA reg is reset by the hardware */ 472 + reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK; 473 + if (reg != NFI_FSM_CUSTDATA) { 474 + reg = nfi_readw(nfc, NFI_CNFG); 475 + reg |= CNFG_BYTE_RW | CNFG_READ_EN; 476 + nfi_writew(nfc, reg, NFI_CNFG); 477 + 478 + /* 479 + * set to max sector to allow the HW to continue reading over 480 + * unaligned accesses 481 + */ 482 + reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD; 483 + nfi_writel(nfc, reg, NFI_CON); 484 + 485 + /* trigger to fetch data */ 486 + nfi_writew(nfc, STAR_EN, NFI_STRDATA); 487 + } 488 + 489 + mtk_nfc_wait_ioready(nfc); 490 + 491 + return nfi_readb(nfc, NFI_DATAR); 492 + } 493 + 494 + static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len) 495 + { 496 + int i; 497 + 498 + for (i = 0; i < len; i++) 499 + buf[i] = mtk_nfc_read_byte(mtd); 500 + } 501 + 502 + static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte) 503 + { 504 + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); 505 + u32 reg; 506 + 507 + reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK; 508 + 509 + if (reg != NFI_FSM_CUSTDATA) { 510 + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW; 511 + nfi_writew(nfc, reg, NFI_CNFG); 512 + 513 + reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR; 514 + nfi_writel(nfc, reg, NFI_CON); 515 + 516 + nfi_writew(nfc, STAR_EN, NFI_STRDATA); 517 + } 518 + 519 + mtk_nfc_wait_ioready(nfc); 520 + nfi_writeb(nfc, byte, NFI_DATAW); 521 + } 522 + 523 + static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 524 + { 525 + int i; 526 + 527 + for (i = 0; i < len; i++) 528 + mtk_nfc_write_byte(mtd, buf[i]); 529 + } 530 + 531 + static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data) 532 + { 533 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 534 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 535 + int size = chip->ecc.size + mtk_nand->fdm.reg_size; 536 + 537 + nfc->ecc_cfg.mode = ECC_DMA_MODE; 538 + nfc->ecc_cfg.op = ECC_ENCODE; 539 + 540 + return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size); 541 + } 542 + 543 + static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c) 544 + { 545 + /* nop */ 546 + } 547 + 548 + static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw) 549 + { 550 + struct nand_chip *chip = mtd_to_nand(mtd); 551 + struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip); 552 + u32 bad_pos = nand->bad_mark.pos; 553 + 554 + if (raw) 555 + bad_pos += nand->bad_mark.sec * mtk_data_len(chip); 556 + else 557 + bad_pos += nand->bad_mark.sec * chip->ecc.size; 558 + 559 + swap(chip->oob_poi[0], buf[bad_pos]); 560 + } 561 + 562 + static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset, 563 + u32 len, const u8 *buf) 564 + { 565 + struct nand_chip *chip = mtd_to_nand(mtd); 566 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 567 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 568 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; 569 + u32 start, end; 570 + int i, ret; 571 + 572 + start = offset / chip->ecc.size; 573 + end = DIV_ROUND_UP(offset + len, chip->ecc.size); 574 + 575 + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); 576 + for (i = 0; i < chip->ecc.steps; i++) { 577 + memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i), 578 + chip->ecc.size); 579 + 580 + if (start > i || i >= end) 581 + continue; 582 + 583 + if (i == mtk_nand->bad_mark.sec) 584 + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); 585 + 586 + memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size); 587 + 588 + /* program the CRC back to the OOB */ 589 + ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i)); 590 + if (ret < 0) 591 + return ret; 592 + } 593 + 594 + return 0; 595 + } 596 + 597 + static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf) 598 + { 599 + struct nand_chip *chip = mtd_to_nand(mtd); 600 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 601 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 602 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; 603 + u32 i; 604 + 605 + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); 606 + for (i = 0; i < chip->ecc.steps; i++) { 607 + if (buf) 608 + memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i), 609 + chip->ecc.size); 610 + 611 + if (i == mtk_nand->bad_mark.sec) 612 + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); 613 + 614 + memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size); 615 + } 616 + } 617 + 618 + static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start, 619 + u32 sectors) 620 + { 621 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 622 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 623 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; 624 + u32 vall, valm; 625 + u8 *oobptr; 626 + int i, j; 627 + 628 + for (i = 0; i < sectors; i++) { 629 + oobptr = oob_ptr(chip, start + i); 630 + vall = nfi_readl(nfc, NFI_FDML(i)); 631 + valm = nfi_readl(nfc, NFI_FDMM(i)); 632 + 633 + for (j = 0; j < fdm->reg_size; j++) 634 + oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); 635 + } 636 + } 637 + 638 + static inline void mtk_nfc_write_fdm(struct nand_chip *chip) 639 + { 640 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 641 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 642 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; 643 + u32 vall, valm; 644 + u8 *oobptr; 645 + int i, j; 646 + 647 + for (i = 0; i < chip->ecc.steps; i++) { 648 + oobptr = oob_ptr(chip, i); 649 + vall = 0; 650 + valm = 0; 651 + for (j = 0; j < 8; j++) { 652 + if (j < 4) 653 + vall |= (j < fdm->reg_size ? oobptr[j] : 0xff) 654 + << (j * 8); 655 + else 656 + valm |= (j < fdm->reg_size ? oobptr[j] : 0xff) 657 + << ((j - 4) * 8); 658 + } 659 + nfi_writel(nfc, vall, NFI_FDML(i)); 660 + nfi_writel(nfc, valm, NFI_FDMM(i)); 661 + } 662 + } 663 + 664 + static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip, 665 + const u8 *buf, int page, int len) 666 + { 667 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 668 + struct device *dev = nfc->dev; 669 + dma_addr_t addr; 670 + u32 reg; 671 + int ret; 672 + 673 + addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE); 674 + ret = dma_mapping_error(nfc->dev, addr); 675 + if (ret) { 676 + dev_err(nfc->dev, "dma mapping error\n"); 677 + return -EINVAL; 678 + } 679 + 680 + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN; 681 + nfi_writew(nfc, reg, NFI_CNFG); 682 + 683 + nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON); 684 + nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR); 685 + nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN); 686 + 687 + init_completion(&nfc->done); 688 + 689 + reg = nfi_readl(nfc, NFI_CON) | CON_BWR; 690 + nfi_writel(nfc, reg, NFI_CON); 691 + nfi_writew(nfc, STAR_EN, NFI_STRDATA); 692 + 693 + ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500)); 694 + if (!ret) { 695 + dev_err(dev, "program ahb done timeout\n"); 696 + nfi_writew(nfc, 0, NFI_INTR_EN); 697 + ret = -ETIMEDOUT; 698 + goto timeout; 699 + } 700 + 701 + ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, 702 + (reg & CNTR_MASK) >= chip->ecc.steps, 703 + 10, MTK_TIMEOUT); 704 + if (ret) 705 + dev_err(dev, "hwecc write timeout\n"); 706 + 707 + timeout: 708 + 709 + dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE); 710 + nfi_writel(nfc, 0, NFI_CON); 711 + 712 + return ret; 713 + } 714 + 715 + static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip, 716 + const u8 *buf, int page, int raw) 717 + { 718 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 719 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 720 + size_t len; 721 + const u8 *bufpoi; 722 + u32 reg; 723 + int ret; 724 + 725 + if (!raw) { 726 + /* OOB => FDM: from register, ECC: from HW */ 727 + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN; 728 + nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG); 729 + 730 + nfc->ecc_cfg.op = ECC_ENCODE; 731 + nfc->ecc_cfg.mode = ECC_NFI_MODE; 732 + ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg); 733 + if (ret) { 734 + /* clear NFI config */ 735 + reg = nfi_readw(nfc, NFI_CNFG); 736 + reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); 737 + nfi_writew(nfc, reg, NFI_CNFG); 738 + 739 + return ret; 740 + } 741 + 742 + memcpy(nfc->buffer, buf, mtd->writesize); 743 + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw); 744 + bufpoi = nfc->buffer; 745 + 746 + /* write OOB into the FDM registers (OOB area in MTK NAND) */ 747 + mtk_nfc_write_fdm(chip); 748 + } else { 749 + bufpoi = buf; 750 + } 751 + 752 + len = mtd->writesize + (raw ? mtd->oobsize : 0); 753 + ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len); 754 + 755 + if (!raw) 756 + mtk_ecc_disable(nfc->ecc); 757 + 758 + return ret; 759 + } 760 + 761 + static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd, 762 + struct nand_chip *chip, const u8 *buf, 763 + int oob_on, int page) 764 + { 765 + return mtk_nfc_write_page(mtd, chip, buf, page, 0); 766 + } 767 + 768 + static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 769 + const u8 *buf, int oob_on, int pg) 770 + { 771 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 772 + 773 + mtk_nfc_format_page(mtd, buf); 774 + return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1); 775 + } 776 + 777 + static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd, 778 + struct nand_chip *chip, u32 offset, 779 + u32 data_len, const u8 *buf, 780 + int oob_on, int page) 781 + { 782 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 783 + int ret; 784 + 785 + ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf); 786 + if (ret < 0) 787 + return ret; 788 + 789 + /* use the data in the private buffer (now with FDM and CRC) */ 790 + return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1); 791 + } 792 + 793 + static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 794 + int page) 795 + { 796 + int ret; 797 + 798 + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 799 + 800 + ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page); 801 + if (ret < 0) 802 + return -EIO; 803 + 804 + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 805 + ret = chip->waitfunc(mtd, chip); 806 + 807 + return ret & NAND_STATUS_FAIL ? -EIO : 0; 808 + } 809 + 810 + static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors) 811 + { 812 + struct nand_chip *chip = mtd_to_nand(mtd); 813 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 814 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 815 + struct mtk_ecc_stats stats; 816 + int rc, i; 817 + 818 + rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE; 819 + if (rc) { 820 + memset(buf, 0xff, sectors * chip->ecc.size); 821 + for (i = 0; i < sectors; i++) 822 + memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size); 823 + return 0; 824 + } 825 + 826 + mtk_ecc_get_stats(nfc->ecc, &stats, sectors); 827 + mtd->ecc_stats.corrected += stats.corrected; 828 + mtd->ecc_stats.failed += stats.failed; 829 + 830 + return stats.bitflips; 831 + } 832 + 833 + static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, 834 + u32 data_offs, u32 readlen, 835 + u8 *bufpoi, int page, int raw) 836 + { 837 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 838 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 839 + u32 spare = mtk_nand->spare_per_sector; 840 + u32 column, sectors, start, end, reg; 841 + dma_addr_t addr; 842 + int bitflips; 843 + size_t len; 844 + u8 *buf; 845 + int rc; 846 + 847 + start = data_offs / chip->ecc.size; 848 + end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); 849 + 850 + sectors = end - start; 851 + column = start * (chip->ecc.size + spare); 852 + 853 + len = sectors * chip->ecc.size + (raw ? sectors * spare : 0); 854 + buf = bufpoi + start * chip->ecc.size; 855 + 856 + if (column != 0) 857 + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1); 858 + 859 + addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE); 860 + rc = dma_mapping_error(nfc->dev, addr); 861 + if (rc) { 862 + dev_err(nfc->dev, "dma mapping error\n"); 863 + 864 + return -EINVAL; 865 + } 866 + 867 + reg = nfi_readw(nfc, NFI_CNFG); 868 + reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB; 869 + if (!raw) { 870 + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN; 871 + nfi_writew(nfc, reg, NFI_CNFG); 872 + 873 + nfc->ecc_cfg.mode = ECC_NFI_MODE; 874 + nfc->ecc_cfg.sectors = sectors; 875 + nfc->ecc_cfg.op = ECC_DECODE; 876 + rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg); 877 + if (rc) { 878 + dev_err(nfc->dev, "ecc enable\n"); 879 + /* clear NFI_CNFG */ 880 + reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN | 881 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); 882 + nfi_writew(nfc, reg, NFI_CNFG); 883 + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); 884 + 885 + return rc; 886 + } 887 + } else { 888 + nfi_writew(nfc, reg, NFI_CNFG); 889 + } 890 + 891 + nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON); 892 + nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN); 893 + nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR); 894 + 895 + init_completion(&nfc->done); 896 + reg = nfi_readl(nfc, NFI_CON) | CON_BRD; 897 + nfi_writel(nfc, reg, NFI_CON); 898 + nfi_writew(nfc, STAR_EN, NFI_STRDATA); 899 + 900 + rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500)); 901 + if (!rc) 902 + dev_warn(nfc->dev, "read ahb/dma done timeout\n"); 903 + 904 + rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, 905 + (reg & CNTR_MASK) >= sectors, 10, 906 + MTK_TIMEOUT); 907 + if (rc < 0) { 908 + dev_err(nfc->dev, "subpage done timeout\n"); 909 + bitflips = -EIO; 910 + } else { 911 + bitflips = 0; 912 + if (!raw) { 913 + rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); 914 + bitflips = rc < 0 ? -ETIMEDOUT : 915 + mtk_nfc_update_ecc_stats(mtd, buf, sectors); 916 + mtk_nfc_read_fdm(chip, start, sectors); 917 + } 918 + } 919 + 920 + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); 921 + 922 + if (raw) 923 + goto done; 924 + 925 + mtk_ecc_disable(nfc->ecc); 926 + 927 + if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec) 928 + mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw); 929 + done: 930 + nfi_writel(nfc, 0, NFI_CON); 931 + 932 + return bitflips; 933 + } 934 + 935 + static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd, 936 + struct nand_chip *chip, u32 off, 937 + u32 len, u8 *p, int pg) 938 + { 939 + return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0); 940 + } 941 + 942 + static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd, 943 + struct nand_chip *chip, u8 *p, 944 + int oob_on, int pg) 945 + { 946 + return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0); 947 + } 948 + 949 + static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 950 + u8 *buf, int oob_on, int page) 951 + { 952 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 953 + struct mtk_nfc *nfc = nand_get_controller_data(chip); 954 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; 955 + int i, ret; 956 + 957 + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); 958 + ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer, 959 + page, 1); 960 + if (ret < 0) 961 + return ret; 962 + 963 + for (i = 0; i < chip->ecc.steps; i++) { 964 + memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size); 965 + 966 + if (i == mtk_nand->bad_mark.sec) 967 + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); 968 + 969 + if (buf) 970 + memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i), 971 + chip->ecc.size); 972 + } 973 + 974 + return ret; 975 + } 976 + 977 + static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 978 + int page) 979 + { 980 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 981 + 982 + return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page); 983 + } 984 + 985 + static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc) 986 + { 987 + /* 988 + * ACCON: access timing control register 989 + * ------------------------------------- 990 + * 31:28: minimum required time for CS post pulling down after accessing 991 + * the device 992 + * 27:22: minimum required time for CS pre pulling down before accessing 993 + * the device 994 + * 21:16: minimum required time from NCEB low to NREB low 995 + * 15:12: minimum required time from NWEB high to NREB low. 996 + * 11:08: write enable hold time 997 + * 07:04: write wait states 998 + * 03:00: read wait states 999 + */ 1000 + nfi_writel(nfc, 0x10804211, NFI_ACCCON); 1001 + 1002 + /* 1003 + * CNRNB: nand ready/busy register 1004 + * ------------------------------- 1005 + * 7:4: timeout register for polling the NAND busy/ready signal 1006 + * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles. 1007 + */ 1008 + nfi_writew(nfc, 0xf1, NFI_CNRNB); 1009 + nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT); 1010 + 1011 + mtk_nfc_hw_reset(nfc); 1012 + 1013 + nfi_readl(nfc, NFI_INTR_STA); 1014 + nfi_writel(nfc, 0, NFI_INTR_EN); 1015 + } 1016 + 1017 + static irqreturn_t mtk_nfc_irq(int irq, void *id) 1018 + { 1019 + struct mtk_nfc *nfc = id; 1020 + u16 sta, ien; 1021 + 1022 + sta = nfi_readw(nfc, NFI_INTR_STA); 1023 + ien = nfi_readw(nfc, NFI_INTR_EN); 1024 + 1025 + if (!(sta & ien)) 1026 + return IRQ_NONE; 1027 + 1028 + nfi_writew(nfc, ~sta & ien, NFI_INTR_EN); 1029 + complete(&nfc->done); 1030 + 1031 + return IRQ_HANDLED; 1032 + } 1033 + 1034 + static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk) 1035 + { 1036 + int ret; 1037 + 1038 + ret = clk_prepare_enable(clk->nfi_clk); 1039 + if (ret) { 1040 + dev_err(dev, "failed to enable nfi clk\n"); 1041 + return ret; 1042 + } 1043 + 1044 + ret = clk_prepare_enable(clk->pad_clk); 1045 + if (ret) { 1046 + dev_err(dev, "failed to enable pad clk\n"); 1047 + clk_disable_unprepare(clk->nfi_clk); 1048 + return ret; 1049 + } 1050 + 1051 + return 0; 1052 + } 1053 + 1054 + static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk) 1055 + { 1056 + clk_disable_unprepare(clk->nfi_clk); 1057 + clk_disable_unprepare(clk->pad_clk); 1058 + } 1059 + 1060 + static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section, 1061 + struct mtd_oob_region *oob_region) 1062 + { 1063 + struct nand_chip *chip = mtd_to_nand(mtd); 1064 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 1065 + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; 1066 + u32 eccsteps; 1067 + 1068 + eccsteps = mtd->writesize / chip->ecc.size; 1069 + 1070 + if (section >= eccsteps) 1071 + return -ERANGE; 1072 + 1073 + oob_region->length = fdm->reg_size - fdm->ecc_size; 1074 + oob_region->offset = section * fdm->reg_size + fdm->ecc_size; 1075 + 1076 + return 0; 1077 + } 1078 + 1079 + static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, 1080 + struct mtd_oob_region *oob_region) 1081 + { 1082 + struct nand_chip *chip = mtd_to_nand(mtd); 1083 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 1084 + u32 eccsteps; 1085 + 1086 + if (section) 1087 + return -ERANGE; 1088 + 1089 + eccsteps = mtd->writesize / chip->ecc.size; 1090 + oob_region->offset = mtk_nand->fdm.reg_size * eccsteps; 1091 + oob_region->length = mtd->oobsize - oob_region->offset; 1092 + 1093 + return 0; 1094 + } 1095 + 1096 + static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = { 1097 + .free = mtk_nfc_ooblayout_free, 1098 + .ecc = mtk_nfc_ooblayout_ecc, 1099 + }; 1100 + 1101 + static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd) 1102 + { 1103 + struct nand_chip *nand = mtd_to_nand(mtd); 1104 + struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand); 1105 + u32 ecc_bytes; 1106 + 1107 + ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8); 1108 + 1109 + fdm->reg_size = chip->spare_per_sector - ecc_bytes; 1110 + if (fdm->reg_size > NFI_FDM_MAX_SIZE) 1111 + fdm->reg_size = NFI_FDM_MAX_SIZE; 1112 + 1113 + /* bad block mark storage */ 1114 + fdm->ecc_size = 1; 1115 + } 1116 + 1117 + static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl, 1118 + struct mtd_info *mtd) 1119 + { 1120 + struct nand_chip *nand = mtd_to_nand(mtd); 1121 + 1122 + if (mtd->writesize == 512) { 1123 + bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap; 1124 + } else { 1125 + bm_ctl->bm_swap = mtk_nfc_bad_mark_swap; 1126 + bm_ctl->sec = mtd->writesize / mtk_data_len(nand); 1127 + bm_ctl->pos = mtd->writesize % mtk_data_len(nand); 1128 + } 1129 + } 1130 + 1131 + static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd) 1132 + { 1133 + struct nand_chip *nand = mtd_to_nand(mtd); 1134 + u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44, 1135 + 48, 49, 50, 51, 52, 62, 63, 64}; 1136 + u32 eccsteps, i; 1137 + 1138 + eccsteps = mtd->writesize / nand->ecc.size; 1139 + *sps = mtd->oobsize / eccsteps; 1140 + 1141 + if (nand->ecc.size == 1024) 1142 + *sps >>= 1; 1143 + 1144 + for (i = 0; i < ARRAY_SIZE(spare); i++) { 1145 + if (*sps <= spare[i]) { 1146 + if (!i) 1147 + *sps = spare[i]; 1148 + else if (*sps != spare[i]) 1149 + *sps = spare[i - 1]; 1150 + break; 1151 + } 1152 + } 1153 + 1154 + if (i >= ARRAY_SIZE(spare)) 1155 + *sps = spare[ARRAY_SIZE(spare) - 1]; 1156 + 1157 + if (nand->ecc.size == 1024) 1158 + *sps <<= 1; 1159 + } 1160 + 1161 + static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd) 1162 + { 1163 + struct nand_chip *nand = mtd_to_nand(mtd); 1164 + u32 spare; 1165 + int free; 1166 + 1167 + /* support only ecc hw mode */ 1168 + if (nand->ecc.mode != NAND_ECC_HW) { 1169 + dev_err(dev, "ecc.mode not supported\n"); 1170 + return -EINVAL; 1171 + } 1172 + 1173 + /* if optional dt settings not present */ 1174 + if (!nand->ecc.size || !nand->ecc.strength) { 1175 + /* use datasheet requirements */ 1176 + nand->ecc.strength = nand->ecc_strength_ds; 1177 + nand->ecc.size = nand->ecc_step_ds; 1178 + 1179 + /* 1180 + * align eccstrength and eccsize 1181 + * this controller only supports 512 and 1024 sizes 1182 + */ 1183 + if (nand->ecc.size < 1024) { 1184 + if (mtd->writesize > 512) { 1185 + nand->ecc.size = 1024; 1186 + nand->ecc.strength <<= 1; 1187 + } else { 1188 + nand->ecc.size = 512; 1189 + } 1190 + } else { 1191 + nand->ecc.size = 1024; 1192 + } 1193 + 1194 + mtk_nfc_set_spare_per_sector(&spare, mtd); 1195 + 1196 + /* calculate oob bytes except ecc parity data */ 1197 + free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3; 1198 + free = spare - free; 1199 + 1200 + /* 1201 + * enhance ecc strength if oob left is bigger than max FDM size 1202 + * or reduce ecc strength if oob size is not enough for ecc 1203 + * parity data. 1204 + */ 1205 + if (free > NFI_FDM_MAX_SIZE) { 1206 + spare -= NFI_FDM_MAX_SIZE; 1207 + nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; 1208 + } else if (free < 0) { 1209 + spare -= NFI_FDM_MIN_SIZE; 1210 + nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; 1211 + } 1212 + } 1213 + 1214 + mtk_ecc_adjust_strength(&nand->ecc.strength); 1215 + 1216 + dev_info(dev, "eccsize %d eccstrength %d\n", 1217 + nand->ecc.size, nand->ecc.strength); 1218 + 1219 + return 0; 1220 + } 1221 + 1222 + static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, 1223 + struct device_node *np) 1224 + { 1225 + struct mtk_nfc_nand_chip *chip; 1226 + struct nand_chip *nand; 1227 + struct mtd_info *mtd; 1228 + int nsels, len; 1229 + u32 tmp; 1230 + int ret; 1231 + int i; 1232 + 1233 + if (!of_get_property(np, "reg", &nsels)) 1234 + return -ENODEV; 1235 + 1236 + nsels /= sizeof(u32); 1237 + if (!nsels || nsels > MTK_NAND_MAX_NSELS) { 1238 + dev_err(dev, "invalid reg property size %d\n", nsels); 1239 + return -EINVAL; 1240 + } 1241 + 1242 + chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8), 1243 + GFP_KERNEL); 1244 + if (!chip) 1245 + return -ENOMEM; 1246 + 1247 + chip->nsels = nsels; 1248 + for (i = 0; i < nsels; i++) { 1249 + ret = of_property_read_u32_index(np, "reg", i, &tmp); 1250 + if (ret) { 1251 + dev_err(dev, "reg property failure : %d\n", ret); 1252 + return ret; 1253 + } 1254 + chip->sels[i] = tmp; 1255 + } 1256 + 1257 + nand = &chip->nand; 1258 + nand->controller = &nfc->controller; 1259 + 1260 + nand_set_flash_node(nand, np); 1261 + nand_set_controller_data(nand, nfc); 1262 + 1263 + nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ; 1264 + nand->dev_ready = mtk_nfc_dev_ready; 1265 + nand->select_chip = mtk_nfc_select_chip; 1266 + nand->write_byte = mtk_nfc_write_byte; 1267 + nand->write_buf = mtk_nfc_write_buf; 1268 + nand->read_byte = mtk_nfc_read_byte; 1269 + nand->read_buf = mtk_nfc_read_buf; 1270 + nand->cmd_ctrl = mtk_nfc_cmd_ctrl; 1271 + 1272 + /* set default mode in case dt entry is missing */ 1273 + nand->ecc.mode = NAND_ECC_HW; 1274 + 1275 + nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc; 1276 + nand->ecc.write_page_raw = mtk_nfc_write_page_raw; 1277 + nand->ecc.write_page = mtk_nfc_write_page_hwecc; 1278 + nand->ecc.write_oob_raw = mtk_nfc_write_oob_std; 1279 + nand->ecc.write_oob = mtk_nfc_write_oob_std; 1280 + 1281 + nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc; 1282 + nand->ecc.read_page_raw = mtk_nfc_read_page_raw; 1283 + nand->ecc.read_page = mtk_nfc_read_page_hwecc; 1284 + nand->ecc.read_oob_raw = mtk_nfc_read_oob_std; 1285 + nand->ecc.read_oob = mtk_nfc_read_oob_std; 1286 + 1287 + mtd = nand_to_mtd(nand); 1288 + mtd->owner = THIS_MODULE; 1289 + mtd->dev.parent = dev; 1290 + mtd->name = MTK_NAME; 1291 + mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops); 1292 + 1293 + mtk_nfc_hw_init(nfc); 1294 + 1295 + ret = nand_scan_ident(mtd, nsels, NULL); 1296 + if (ret) 1297 + return -ENODEV; 1298 + 1299 + /* store bbt magic in page, cause OOB is not protected */ 1300 + if (nand->bbt_options & NAND_BBT_USE_FLASH) 1301 + nand->bbt_options |= NAND_BBT_NO_OOB; 1302 + 1303 + ret = mtk_nfc_ecc_init(dev, mtd); 1304 + if (ret) 1305 + return -EINVAL; 1306 + 1307 + if (nand->options & NAND_BUSWIDTH_16) { 1308 + dev_err(dev, "16bits buswidth not supported"); 1309 + return -EINVAL; 1310 + } 1311 + 1312 + mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd); 1313 + mtk_nfc_set_fdm(&chip->fdm, mtd); 1314 + mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd); 1315 + 1316 + len = mtd->writesize + mtd->oobsize; 1317 + nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL); 1318 + if (!nfc->buffer) 1319 + return -ENOMEM; 1320 + 1321 + ret = nand_scan_tail(mtd); 1322 + if (ret) 1323 + return -ENODEV; 1324 + 1325 + ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); 1326 + if (ret) { 1327 + dev_err(dev, "mtd parse partition error\n"); 1328 + nand_release(mtd); 1329 + return ret; 1330 + } 1331 + 1332 + list_add_tail(&chip->node, &nfc->chips); 1333 + 1334 + return 0; 1335 + } 1336 + 1337 + static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc) 1338 + { 1339 + struct device_node *np = dev->of_node; 1340 + struct device_node *nand_np; 1341 + int ret; 1342 + 1343 + for_each_child_of_node(np, nand_np) { 1344 + ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np); 1345 + if (ret) { 1346 + of_node_put(nand_np); 1347 + return ret; 1348 + } 1349 + } 1350 + 1351 + return 0; 1352 + } 1353 + 1354 + static int mtk_nfc_probe(struct platform_device *pdev) 1355 + { 1356 + struct device *dev = &pdev->dev; 1357 + struct device_node *np = dev->of_node; 1358 + struct mtk_nfc *nfc; 1359 + struct resource *res; 1360 + int ret, irq; 1361 + 1362 + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); 1363 + if (!nfc) 1364 + return -ENOMEM; 1365 + 1366 + spin_lock_init(&nfc->controller.lock); 1367 + init_waitqueue_head(&nfc->controller.wq); 1368 + INIT_LIST_HEAD(&nfc->chips); 1369 + 1370 + /* probe defer if not ready */ 1371 + nfc->ecc = of_mtk_ecc_get(np); 1372 + if (IS_ERR(nfc->ecc)) 1373 + return PTR_ERR(nfc->ecc); 1374 + else if (!nfc->ecc) 1375 + return -ENODEV; 1376 + 1377 + nfc->dev = dev; 1378 + 1379 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1380 + nfc->regs = devm_ioremap_resource(dev, res); 1381 + if (IS_ERR(nfc->regs)) { 1382 + ret = PTR_ERR(nfc->regs); 1383 + dev_err(dev, "no nfi base\n"); 1384 + goto release_ecc; 1385 + } 1386 + 1387 + nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk"); 1388 + if (IS_ERR(nfc->clk.nfi_clk)) { 1389 + dev_err(dev, "no clk\n"); 1390 + ret = PTR_ERR(nfc->clk.nfi_clk); 1391 + goto release_ecc; 1392 + } 1393 + 1394 + nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk"); 1395 + if (IS_ERR(nfc->clk.pad_clk)) { 1396 + dev_err(dev, "no pad clk\n"); 1397 + ret = PTR_ERR(nfc->clk.pad_clk); 1398 + goto release_ecc; 1399 + } 1400 + 1401 + ret = mtk_nfc_enable_clk(dev, &nfc->clk); 1402 + if (ret) 1403 + goto release_ecc; 1404 + 1405 + irq = platform_get_irq(pdev, 0); 1406 + if (irq < 0) { 1407 + dev_err(dev, "no nfi irq resource\n"); 1408 + ret = -EINVAL; 1409 + goto clk_disable; 1410 + } 1411 + 1412 + ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc); 1413 + if (ret) { 1414 + dev_err(dev, "failed to request nfi irq\n"); 1415 + goto clk_disable; 1416 + } 1417 + 1418 + ret = dma_set_mask(dev, DMA_BIT_MASK(32)); 1419 + if (ret) { 1420 + dev_err(dev, "failed to set dma mask\n"); 1421 + goto clk_disable; 1422 + } 1423 + 1424 + platform_set_drvdata(pdev, nfc); 1425 + 1426 + ret = mtk_nfc_nand_chips_init(dev, nfc); 1427 + if (ret) { 1428 + dev_err(dev, "failed to init nand chips\n"); 1429 + goto clk_disable; 1430 + } 1431 + 1432 + return 0; 1433 + 1434 + clk_disable: 1435 + mtk_nfc_disable_clk(&nfc->clk); 1436 + 1437 + release_ecc: 1438 + mtk_ecc_release(nfc->ecc); 1439 + 1440 + return ret; 1441 + } 1442 + 1443 + static int mtk_nfc_remove(struct platform_device *pdev) 1444 + { 1445 + struct mtk_nfc *nfc = platform_get_drvdata(pdev); 1446 + struct mtk_nfc_nand_chip *chip; 1447 + 1448 + while (!list_empty(&nfc->chips)) { 1449 + chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip, 1450 + node); 1451 + nand_release(nand_to_mtd(&chip->nand)); 1452 + list_del(&chip->node); 1453 + } 1454 + 1455 + mtk_ecc_release(nfc->ecc); 1456 + mtk_nfc_disable_clk(&nfc->clk); 1457 + 1458 + return 0; 1459 + } 1460 + 1461 + #ifdef CONFIG_PM_SLEEP 1462 + static int mtk_nfc_suspend(struct device *dev) 1463 + { 1464 + struct mtk_nfc *nfc = dev_get_drvdata(dev); 1465 + 1466 + mtk_nfc_disable_clk(&nfc->clk); 1467 + 1468 + return 0; 1469 + } 1470 + 1471 + static int mtk_nfc_resume(struct device *dev) 1472 + { 1473 + struct mtk_nfc *nfc = dev_get_drvdata(dev); 1474 + struct mtk_nfc_nand_chip *chip; 1475 + struct nand_chip *nand; 1476 + struct mtd_info *mtd; 1477 + int ret; 1478 + u32 i; 1479 + 1480 + udelay(200); 1481 + 1482 + ret = mtk_nfc_enable_clk(dev, &nfc->clk); 1483 + if (ret) 1484 + return ret; 1485 + 1486 + mtk_nfc_hw_init(nfc); 1487 + 1488 + /* reset NAND chip if VCC was powered off */ 1489 + list_for_each_entry(chip, &nfc->chips, node) { 1490 + nand = &chip->nand; 1491 + mtd = nand_to_mtd(nand); 1492 + for (i = 0; i < chip->nsels; i++) { 1493 + nand->select_chip(mtd, i); 1494 + nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 1495 + } 1496 + } 1497 + 1498 + return 0; 1499 + } 1500 + 1501 + static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume); 1502 + #endif 1503 + 1504 + static const struct of_device_id mtk_nfc_id_table[] = { 1505 + { .compatible = "mediatek,mt2701-nfc" }, 1506 + {} 1507 + }; 1508 + MODULE_DEVICE_TABLE(of, mtk_nfc_id_table); 1509 + 1510 + static struct platform_driver mtk_nfc_driver = { 1511 + .probe = mtk_nfc_probe, 1512 + .remove = mtk_nfc_remove, 1513 + .driver = { 1514 + .name = MTK_NAME, 1515 + .of_match_table = mtk_nfc_id_table, 1516 + #ifdef CONFIG_PM_SLEEP 1517 + .pm = &mtk_nfc_pm_ops, 1518 + #endif 1519 + }, 1520 + }; 1521 + 1522 + module_platform_driver(mtk_nfc_driver); 1523 + 1524 + MODULE_LICENSE("GPL"); 1525 + MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); 1526 + MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
+1
drivers/mtd/nand/nand_ids.c
··· 168 168 /* Manufacturer IDs */ 169 169 struct nand_manufacturers nand_manuf_ids[] = { 170 170 {NAND_MFR_TOSHIBA, "Toshiba"}, 171 + {NAND_MFR_ESMT, "ESMT"}, 171 172 {NAND_MFR_SAMSUNG, "Samsung"}, 172 173 {NAND_MFR_FUJITSU, "Fujitsu"}, 173 174 {NAND_MFR_NATIONAL, "National"},
+2 -5
drivers/mtd/nand/omap2.c
··· 118 118 #define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F) 119 119 #define STATUS_BUFF_EMPTY 0x00000001 120 120 121 - #define OMAP24XX_DMA_GPMC 4 122 - 123 121 #define SECTOR_BYTES 512 124 122 /* 4 bit padding to make byte aligned, 56 = 52 + 4 */ 125 123 #define BCH4_BIT_PAD 4 ··· 1806 1808 struct nand_chip *nand_chip; 1807 1809 int err; 1808 1810 dma_cap_mask_t mask; 1809 - unsigned sig; 1810 1811 struct resource *res; 1811 1812 struct device *dev = &pdev->dev; 1812 1813 int min_oobbytes = BADBLOCK_MARKER_LENGTH; ··· 1918 1921 case NAND_OMAP_PREFETCH_DMA: 1919 1922 dma_cap_zero(mask); 1920 1923 dma_cap_set(DMA_SLAVE, mask); 1921 - sig = OMAP24XX_DMA_GPMC; 1922 - info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1924 + info->dma = dma_request_chan(pdev->dev.parent, "rxtx"); 1925 + 1923 1926 if (!info->dma) { 1924 1927 dev_err(&pdev->dev, "DMA engine request failed\n"); 1925 1928 err = -ENXIO;
+388 -9
drivers/mtd/nand/sunxi_nand.c
··· 39 39 #include <linux/gpio.h> 40 40 #include <linux/interrupt.h> 41 41 #include <linux/iopoll.h> 42 + #include <linux/reset.h> 42 43 43 44 #define NFC_REG_CTL 0x0000 44 45 #define NFC_REG_ST 0x0004 ··· 154 153 155 154 /* define bit use in NFC_ECC_ST */ 156 155 #define NFC_ECC_ERR(x) BIT(x) 156 + #define NFC_ECC_ERR_MSK GENMASK(15, 0) 157 157 #define NFC_ECC_PAT_FOUND(x) BIT(x + 16) 158 158 #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff) 159 159 ··· 271 269 void __iomem *regs; 272 270 struct clk *ahb_clk; 273 271 struct clk *mod_clk; 272 + struct reset_control *reset; 274 273 unsigned long assigned_cs; 275 274 unsigned long clk_rate; 276 275 struct list_head chips; 277 276 struct completion complete; 277 + struct dma_chan *dmac; 278 278 }; 279 279 280 280 static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl) ··· 367 363 dev_err(nfc->dev, "wait for NAND controller reset timedout\n"); 368 364 369 365 return ret; 366 + } 367 + 368 + static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf, 369 + int chunksize, int nchunks, 370 + enum dma_data_direction ddir, 371 + struct scatterlist *sg) 372 + { 373 + struct nand_chip *nand = mtd_to_nand(mtd); 374 + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 375 + struct dma_async_tx_descriptor *dmad; 376 + enum dma_transfer_direction tdir; 377 + dma_cookie_t dmat; 378 + int ret; 379 + 380 + if (ddir == DMA_FROM_DEVICE) 381 + tdir = DMA_DEV_TO_MEM; 382 + else 383 + tdir = DMA_MEM_TO_DEV; 384 + 385 + sg_init_one(sg, buf, nchunks * chunksize); 386 + ret = dma_map_sg(nfc->dev, sg, 1, ddir); 387 + if (!ret) 388 + return -ENOMEM; 389 + 390 + dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK); 391 + if (!dmad) { 392 + ret = -EINVAL; 393 + goto err_unmap_buf; 394 + } 395 + 396 + writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD, 397 + nfc->regs + NFC_REG_CTL); 398 + writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM); 399 + writel(chunksize, nfc->regs + NFC_REG_CNT); 400 + dmat = dmaengine_submit(dmad); 401 + 402 + ret = dma_submit_error(dmat); 403 + if (ret) 404 + goto err_clr_dma_flag; 405 + 406 + return 0; 407 + 408 + err_clr_dma_flag: 409 + writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, 410 + nfc->regs + NFC_REG_CTL); 411 + 412 + err_unmap_buf: 413 + dma_unmap_sg(nfc->dev, sg, 1, ddir); 414 + return ret; 415 + } 416 + 417 + static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd, 418 + enum dma_data_direction ddir, 419 + struct scatterlist *sg) 420 + { 421 + struct nand_chip *nand = mtd_to_nand(mtd); 422 + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 423 + 424 + dma_unmap_sg(nfc->dev, sg, 1, ddir); 425 + writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, 426 + nfc->regs + NFC_REG_CTL); 370 427 } 371 428 372 429 static int sunxi_nfc_dev_ready(struct mtd_info *mtd) ··· 887 822 } 888 823 889 824 static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, 890 - int step, bool *erased) 825 + int step, u32 status, bool *erased) 891 826 { 892 827 struct nand_chip *nand = mtd_to_nand(mtd); 893 828 struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 894 829 struct nand_ecc_ctrl *ecc = &nand->ecc; 895 - u32 status, tmp; 830 + u32 tmp; 896 831 897 832 *erased = false; 898 - 899 - status = readl(nfc->regs + NFC_REG_ECC_ST); 900 833 901 834 if (status & NFC_ECC_ERR(step)) 902 835 return -EBADMSG; ··· 961 898 *cur_off = oob_off + ecc->bytes + 4; 962 899 963 900 ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, 901 + readl(nfc->regs + NFC_REG_ECC_ST), 964 902 &erased); 965 903 if (erased) 966 904 return 1; ··· 1029 965 1030 966 if (cur_off) 1031 967 *cur_off = mtd->oobsize + mtd->writesize; 968 + } 969 + 970 + static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, 971 + int oob_required, int page, 972 + int nchunks) 973 + { 974 + struct nand_chip *nand = mtd_to_nand(mtd); 975 + bool randomized = nand->options & NAND_NEED_SCRAMBLING; 976 + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 977 + struct nand_ecc_ctrl *ecc = &nand->ecc; 978 + unsigned int max_bitflips = 0; 979 + int ret, i, raw_mode = 0; 980 + struct scatterlist sg; 981 + u32 status; 982 + 983 + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 984 + if (ret) 985 + return ret; 986 + 987 + ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks, 988 + DMA_FROM_DEVICE, &sg); 989 + if (ret) 990 + return ret; 991 + 992 + sunxi_nfc_hw_ecc_enable(mtd); 993 + sunxi_nfc_randomizer_config(mtd, page, false); 994 + sunxi_nfc_randomizer_enable(mtd); 995 + 996 + writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | 997 + NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET); 998 + 999 + dma_async_issue_pending(nfc->dmac); 1000 + 1001 + writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS, 1002 + nfc->regs + NFC_REG_CMD); 1003 + 1004 + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); 1005 + if (ret) 1006 + dmaengine_terminate_all(nfc->dmac); 1007 + 1008 + sunxi_nfc_randomizer_disable(mtd); 1009 + sunxi_nfc_hw_ecc_disable(mtd); 1010 + 1011 + sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg); 1012 + 1013 + if (ret) 1014 + return ret; 1015 + 1016 + status = readl(nfc->regs + NFC_REG_ECC_ST); 1017 + 1018 + for (i = 0; i < nchunks; i++) { 1019 + int data_off = i * ecc->size; 1020 + int oob_off = i * (ecc->bytes + 4); 1021 + u8 *data = buf + data_off; 1022 + u8 *oob = nand->oob_poi + oob_off; 1023 + bool erased; 1024 + 1025 + ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL, 1026 + oob_required ? oob : NULL, 1027 + i, status, &erased); 1028 + 1029 + /* ECC errors are handled in the second loop. */ 1030 + if (ret < 0) 1031 + continue; 1032 + 1033 + if (oob_required && !erased) { 1034 + /* TODO: use DMA to retrieve OOB */ 1035 + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, 1036 + mtd->writesize + oob_off, -1); 1037 + nand->read_buf(mtd, oob, ecc->bytes + 4); 1038 + 1039 + sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i, 1040 + !i, page); 1041 + } 1042 + 1043 + if (erased) 1044 + raw_mode = 1; 1045 + 1046 + sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); 1047 + } 1048 + 1049 + if (status & NFC_ECC_ERR_MSK) { 1050 + for (i = 0; i < nchunks; i++) { 1051 + int data_off = i * ecc->size; 1052 + int oob_off = i * (ecc->bytes + 4); 1053 + u8 *data = buf + data_off; 1054 + u8 *oob = nand->oob_poi + oob_off; 1055 + 1056 + if (!(status & NFC_ECC_ERR(i))) 1057 + continue; 1058 + 1059 + /* 1060 + * Re-read the data with the randomizer disabled to 1061 + * identify bitflips in erased pages. 1062 + */ 1063 + if (randomized) { 1064 + /* TODO: use DMA to read page in raw mode */ 1065 + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, 1066 + data_off, -1); 1067 + nand->read_buf(mtd, data, ecc->size); 1068 + } 1069 + 1070 + /* TODO: use DMA to retrieve OOB */ 1071 + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, 1072 + mtd->writesize + oob_off, -1); 1073 + nand->read_buf(mtd, oob, ecc->bytes + 4); 1074 + 1075 + ret = nand_check_erased_ecc_chunk(data, ecc->size, 1076 + oob, ecc->bytes + 4, 1077 + NULL, 0, 1078 + ecc->strength); 1079 + if (ret >= 0) 1080 + raw_mode = 1; 1081 + 1082 + sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); 1083 + } 1084 + } 1085 + 1086 + if (oob_required) 1087 + sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi, 1088 + NULL, !raw_mode, 1089 + page); 1090 + 1091 + return max_bitflips; 1032 1092 } 1033 1093 1034 1094 static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, ··· 1253 1065 return max_bitflips; 1254 1066 } 1255 1067 1068 + static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd, 1069 + struct nand_chip *chip, u8 *buf, 1070 + int oob_required, int page) 1071 + { 1072 + int ret; 1073 + 1074 + ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page, 1075 + chip->ecc.steps); 1076 + if (ret >= 0) 1077 + return ret; 1078 + 1079 + /* Fallback to PIO mode */ 1080 + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1); 1081 + 1082 + return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page); 1083 + } 1084 + 1256 1085 static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, 1257 1086 struct nand_chip *chip, 1258 1087 u32 data_offs, u32 readlen, ··· 1303 1098 return max_bitflips; 1304 1099 } 1305 1100 1101 + static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd, 1102 + struct nand_chip *chip, 1103 + u32 data_offs, u32 readlen, 1104 + u8 *buf, int page) 1105 + { 1106 + int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); 1107 + int ret; 1108 + 1109 + ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks); 1110 + if (ret >= 0) 1111 + return ret; 1112 + 1113 + /* Fallback to PIO mode */ 1114 + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1); 1115 + 1116 + return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen, 1117 + buf, page); 1118 + } 1119 + 1306 1120 static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, 1307 1121 struct nand_chip *chip, 1308 1122 const uint8_t *buf, int oob_required, ··· 1352 1128 sunxi_nfc_hw_ecc_disable(mtd); 1353 1129 1354 1130 return 0; 1131 + } 1132 + 1133 + static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd, 1134 + struct nand_chip *chip, 1135 + u32 data_offs, u32 data_len, 1136 + const u8 *buf, int oob_required, 1137 + int page) 1138 + { 1139 + struct nand_ecc_ctrl *ecc = &chip->ecc; 1140 + int ret, i, cur_off = 0; 1141 + 1142 + sunxi_nfc_hw_ecc_enable(mtd); 1143 + 1144 + for (i = data_offs / ecc->size; 1145 + i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) { 1146 + int data_off = i * ecc->size; 1147 + int oob_off = i * (ecc->bytes + 4); 1148 + const u8 *data = buf + data_off; 1149 + const u8 *oob = chip->oob_poi + oob_off; 1150 + 1151 + ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, 1152 + oob_off + mtd->writesize, 1153 + &cur_off, !i, page); 1154 + if (ret) 1155 + return ret; 1156 + } 1157 + 1158 + sunxi_nfc_hw_ecc_disable(mtd); 1159 + 1160 + return 0; 1161 + } 1162 + 1163 + static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd, 1164 + struct nand_chip *chip, 1165 + const u8 *buf, 1166 + int oob_required, 1167 + int page) 1168 + { 1169 + struct nand_chip *nand = mtd_to_nand(mtd); 1170 + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); 1171 + struct nand_ecc_ctrl *ecc = &nand->ecc; 1172 + struct scatterlist sg; 1173 + int ret, i; 1174 + 1175 + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); 1176 + if (ret) 1177 + return ret; 1178 + 1179 + ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps, 1180 + DMA_TO_DEVICE, &sg); 1181 + if (ret) 1182 + goto pio_fallback; 1183 + 1184 + for (i = 0; i < ecc->steps; i++) { 1185 + const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4)); 1186 + 1187 + sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page); 1188 + } 1189 + 1190 + sunxi_nfc_hw_ecc_enable(mtd); 1191 + sunxi_nfc_randomizer_config(mtd, page, false); 1192 + sunxi_nfc_randomizer_enable(mtd); 1193 + 1194 + writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, 1195 + nfc->regs + NFC_REG_RCMD_SET); 1196 + 1197 + dma_async_issue_pending(nfc->dmac); 1198 + 1199 + writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | 1200 + NFC_DATA_TRANS | NFC_ACCESS_DIR, 1201 + nfc->regs + NFC_REG_CMD); 1202 + 1203 + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); 1204 + if (ret) 1205 + dmaengine_terminate_all(nfc->dmac); 1206 + 1207 + sunxi_nfc_randomizer_disable(mtd); 1208 + sunxi_nfc_hw_ecc_disable(mtd); 1209 + 1210 + sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg); 1211 + 1212 + if (ret) 1213 + return ret; 1214 + 1215 + if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) 1216 + /* TODO: use DMA to transfer extra OOB bytes ? */ 1217 + sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, 1218 + NULL, page); 1219 + 1220 + return 0; 1221 + 1222 + pio_fallback: 1223 + return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page); 1355 1224 } 1356 1225 1357 1226 static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, ··· 1814 1497 int ret; 1815 1498 int i; 1816 1499 1500 + if (ecc->size != 512 && ecc->size != 1024) 1501 + return -EINVAL; 1502 + 1817 1503 data = kzalloc(sizeof(*data), GFP_KERNEL); 1818 1504 if (!data) 1819 1505 return -ENOMEM; 1506 + 1507 + /* Prefer 1k ECC chunk over 512 ones */ 1508 + if (ecc->size == 512 && mtd->writesize > 512) { 1509 + ecc->size = 1024; 1510 + ecc->strength *= 2; 1511 + } 1820 1512 1821 1513 /* Add ECC info retrieval from DT */ 1822 1514 for (i = 0; i < ARRAY_SIZE(strengths); i++) { ··· 1876 1550 struct nand_ecc_ctrl *ecc, 1877 1551 struct device_node *np) 1878 1552 { 1553 + struct nand_chip *nand = mtd_to_nand(mtd); 1554 + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); 1555 + struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); 1879 1556 int ret; 1880 1557 1881 1558 ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); 1882 1559 if (ret) 1883 1560 return ret; 1884 1561 1885 - ecc->read_page = sunxi_nfc_hw_ecc_read_page; 1886 - ecc->write_page = sunxi_nfc_hw_ecc_write_page; 1562 + if (nfc->dmac) { 1563 + ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma; 1564 + ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma; 1565 + ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma; 1566 + nand->options |= NAND_USE_BOUNCE_BUFFER; 1567 + } else { 1568 + ecc->read_page = sunxi_nfc_hw_ecc_read_page; 1569 + ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; 1570 + ecc->write_page = sunxi_nfc_hw_ecc_write_page; 1571 + } 1572 + 1573 + /* TODO: support DMA for raw accesses and subpage write */ 1574 + ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage; 1887 1575 ecc->read_oob_raw = nand_read_oob_std; 1888 1576 ecc->write_oob_raw = nand_write_oob_std; 1889 1577 ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; ··· 2211 1871 if (ret) 2212 1872 goto out_ahb_clk_unprepare; 2213 1873 1874 + nfc->reset = devm_reset_control_get_optional(dev, "ahb"); 1875 + if (!IS_ERR(nfc->reset)) { 1876 + ret = reset_control_deassert(nfc->reset); 1877 + if (ret) { 1878 + dev_err(dev, "reset err %d\n", ret); 1879 + goto out_mod_clk_unprepare; 1880 + } 1881 + } else if (PTR_ERR(nfc->reset) != -ENOENT) { 1882 + ret = PTR_ERR(nfc->reset); 1883 + goto out_mod_clk_unprepare; 1884 + } 1885 + 2214 1886 ret = sunxi_nfc_rst(nfc); 2215 1887 if (ret) 2216 - goto out_mod_clk_unprepare; 1888 + goto out_ahb_reset_reassert; 2217 1889 2218 1890 writel(0, nfc->regs + NFC_REG_INT); 2219 1891 ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt, 2220 1892 0, "sunxi-nand", nfc); 2221 1893 if (ret) 2222 - goto out_mod_clk_unprepare; 1894 + goto out_ahb_reset_reassert; 1895 + 1896 + nfc->dmac = dma_request_slave_channel(dev, "rxtx"); 1897 + if (nfc->dmac) { 1898 + struct dma_slave_config dmac_cfg = { }; 1899 + 1900 + dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA; 1901 + dmac_cfg.dst_addr = dmac_cfg.src_addr; 1902 + dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1903 + dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width; 1904 + dmac_cfg.src_maxburst = 4; 1905 + dmac_cfg.dst_maxburst = 4; 1906 + dmaengine_slave_config(nfc->dmac, &dmac_cfg); 1907 + } else { 1908 + dev_warn(dev, "failed to request rxtx DMA channel\n"); 1909 + } 2223 1910 2224 1911 platform_set_drvdata(pdev, nfc); 2225 1912 2226 1913 ret = sunxi_nand_chips_init(dev, nfc); 2227 1914 if (ret) { 2228 1915 dev_err(dev, "failed to init nand chips\n"); 2229 - goto out_mod_clk_unprepare; 1916 + goto out_release_dmac; 2230 1917 } 2231 1918 2232 1919 return 0; 2233 1920 1921 + out_release_dmac: 1922 + if (nfc->dmac) 1923 + dma_release_channel(nfc->dmac); 1924 + out_ahb_reset_reassert: 1925 + if (!IS_ERR(nfc->reset)) 1926 + reset_control_assert(nfc->reset); 2234 1927 out_mod_clk_unprepare: 2235 1928 clk_disable_unprepare(nfc->mod_clk); 2236 1929 out_ahb_clk_unprepare: ··· 2277 1904 struct sunxi_nfc *nfc = platform_get_drvdata(pdev); 2278 1905 2279 1906 sunxi_nand_chips_cleanup(nfc); 1907 + 1908 + if (!IS_ERR(nfc->reset)) 1909 + reset_control_assert(nfc->reset); 1910 + 1911 + if (nfc->dmac) 1912 + dma_release_channel(nfc->dmac); 2280 1913 clk_disable_unprepare(nfc->mod_clk); 2281 1914 clk_disable_unprepare(nfc->ahb_clk); 2282 1915
+142 -91
drivers/mtd/nand/xway_nand.c
··· 4 4 * by the Free Software Foundation. 5 5 * 6 6 * Copyright © 2012 John Crispin <blogic@openwrt.org> 7 + * Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de> 7 8 */ 8 9 9 10 #include <linux/mtd/nand.h> ··· 17 16 #define EBU_ADDSEL1 0x24 18 17 #define EBU_NAND_CON 0xB0 19 18 #define EBU_NAND_WAIT 0xB4 19 + #define NAND_WAIT_RD BIT(0) /* NAND flash status output */ 20 + #define NAND_WAIT_WR_C BIT(3) /* NAND Write/Read complete */ 20 21 #define EBU_NAND_ECC0 0xB8 21 22 #define EBU_NAND_ECC_AC 0xBC 22 23 23 - /* nand commands */ 24 - #define NAND_CMD_ALE (1 << 2) 25 - #define NAND_CMD_CLE (1 << 3) 26 - #define NAND_CMD_CS (1 << 4) 27 - #define NAND_WRITE_CMD_RESET 0xff 24 + /* 25 + * nand commands 26 + * The pins of the NAND chip are selected based on the address bits of the 27 + * "register" read and write. There are no special registers, but an 28 + * address range and the lower address bits are used to activate the 29 + * correct line. For example when the bit (1 << 2) is set in the address 30 + * the ALE pin will be activated. 31 + */ 32 + #define NAND_CMD_ALE BIT(2) /* address latch enable */ 33 + #define NAND_CMD_CLE BIT(3) /* command latch enable */ 34 + #define NAND_CMD_CS BIT(4) /* chip select */ 35 + #define NAND_CMD_SE BIT(5) /* spare area access latch */ 36 + #define NAND_CMD_WP BIT(6) /* write protect */ 28 37 #define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE) 29 38 #define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE) 30 39 #define NAND_WRITE_DATA (NAND_CMD_CS) 31 40 #define NAND_READ_DATA (NAND_CMD_CS) 32 - #define NAND_WAIT_WR_C (1 << 3) 33 - #define NAND_WAIT_RD (0x1) 34 41 35 42 /* we need to tel the ebu which addr we mapped the nand to */ 36 43 #define ADDSEL1_MASK(x) (x << 4) ··· 63 54 #define NAND_CON_CSMUX (1 << 1) 64 55 #define NAND_CON_NANDM 1 65 56 66 - static void xway_reset_chip(struct nand_chip *chip) 57 + struct xway_nand_data { 58 + struct nand_chip chip; 59 + unsigned long csflags; 60 + void __iomem *nandaddr; 61 + }; 62 + 63 + static u8 xway_readb(struct mtd_info *mtd, int op) 67 64 { 68 - unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W; 69 - unsigned long flags; 65 + struct nand_chip *chip = mtd_to_nand(mtd); 66 + struct xway_nand_data *data = nand_get_controller_data(chip); 70 67 71 - nandaddr &= ~NAND_WRITE_ADDR; 72 - nandaddr |= NAND_WRITE_CMD; 73 - 74 - /* finish with a reset */ 75 - spin_lock_irqsave(&ebu_lock, flags); 76 - writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr); 77 - while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) 78 - ; 79 - spin_unlock_irqrestore(&ebu_lock, flags); 68 + return readb(data->nandaddr + op); 80 69 } 81 70 82 - static void xway_select_chip(struct mtd_info *mtd, int chip) 71 + static void xway_writeb(struct mtd_info *mtd, int op, u8 value) 83 72 { 73 + struct nand_chip *chip = mtd_to_nand(mtd); 74 + struct xway_nand_data *data = nand_get_controller_data(chip); 84 75 85 - switch (chip) { 76 + writeb(value, data->nandaddr + op); 77 + } 78 + 79 + static void xway_select_chip(struct mtd_info *mtd, int select) 80 + { 81 + struct nand_chip *chip = mtd_to_nand(mtd); 82 + struct xway_nand_data *data = nand_get_controller_data(chip); 83 + 84 + switch (select) { 86 85 case -1: 87 86 ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); 88 87 ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); 88 + spin_unlock_irqrestore(&ebu_lock, data->csflags); 89 89 break; 90 90 case 0: 91 + spin_lock_irqsave(&ebu_lock, data->csflags); 91 92 ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); 92 93 ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); 93 94 break; ··· 108 89 109 90 static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 110 91 { 111 - struct nand_chip *this = mtd_to_nand(mtd); 112 - unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; 113 - unsigned long flags; 92 + if (cmd == NAND_CMD_NONE) 93 + return; 114 94 115 - if (ctrl & NAND_CTRL_CHANGE) { 116 - nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR); 117 - if (ctrl & NAND_CLE) 118 - nandaddr |= NAND_WRITE_CMD; 119 - else 120 - nandaddr |= NAND_WRITE_ADDR; 121 - this->IO_ADDR_W = (void __iomem *) nandaddr; 122 - } 95 + if (ctrl & NAND_CLE) 96 + xway_writeb(mtd, NAND_WRITE_CMD, cmd); 97 + else if (ctrl & NAND_ALE) 98 + xway_writeb(mtd, NAND_WRITE_ADDR, cmd); 123 99 124 - if (cmd != NAND_CMD_NONE) { 125 - spin_lock_irqsave(&ebu_lock, flags); 126 - writeb(cmd, this->IO_ADDR_W); 127 - while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) 128 - ; 129 - spin_unlock_irqrestore(&ebu_lock, flags); 130 - } 100 + while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) 101 + ; 131 102 } 132 103 133 104 static int xway_dev_ready(struct mtd_info *mtd) ··· 127 118 128 119 static unsigned char xway_read_byte(struct mtd_info *mtd) 129 120 { 130 - struct nand_chip *this = mtd_to_nand(mtd); 131 - unsigned long nandaddr = (unsigned long) this->IO_ADDR_R; 132 - unsigned long flags; 133 - int ret; 134 - 135 - spin_lock_irqsave(&ebu_lock, flags); 136 - ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA)); 137 - spin_unlock_irqrestore(&ebu_lock, flags); 138 - 139 - return ret; 121 + return xway_readb(mtd, NAND_READ_DATA); 140 122 } 141 123 124 + static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len) 125 + { 126 + int i; 127 + 128 + for (i = 0; i < len; i++) 129 + buf[i] = xway_readb(mtd, NAND_WRITE_DATA); 130 + } 131 + 132 + static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 133 + { 134 + int i; 135 + 136 + for (i = 0; i < len; i++) 137 + xway_writeb(mtd, NAND_WRITE_DATA, buf[i]); 138 + } 139 + 140 + /* 141 + * Probe for the NAND device. 142 + */ 142 143 static int xway_nand_probe(struct platform_device *pdev) 143 144 { 144 - struct nand_chip *this = platform_get_drvdata(pdev); 145 - unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; 146 - const __be32 *cs = of_get_property(pdev->dev.of_node, 147 - "lantiq,cs", NULL); 145 + struct xway_nand_data *data; 146 + struct mtd_info *mtd; 147 + struct resource *res; 148 + int err; 149 + u32 cs; 148 150 u32 cs_flag = 0; 149 151 152 + /* Allocate memory for the device structure (and zero it) */ 153 + data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data), 154 + GFP_KERNEL); 155 + if (!data) 156 + return -ENOMEM; 157 + 158 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 159 + data->nandaddr = devm_ioremap_resource(&pdev->dev, res); 160 + if (IS_ERR(data->nandaddr)) 161 + return PTR_ERR(data->nandaddr); 162 + 163 + nand_set_flash_node(&data->chip, pdev->dev.of_node); 164 + mtd = nand_to_mtd(&data->chip); 165 + mtd->dev.parent = &pdev->dev; 166 + 167 + data->chip.cmd_ctrl = xway_cmd_ctrl; 168 + data->chip.dev_ready = xway_dev_ready; 169 + data->chip.select_chip = xway_select_chip; 170 + data->chip.write_buf = xway_write_buf; 171 + data->chip.read_buf = xway_read_buf; 172 + data->chip.read_byte = xway_read_byte; 173 + data->chip.chip_delay = 30; 174 + 175 + data->chip.ecc.mode = NAND_ECC_SOFT; 176 + data->chip.ecc.algo = NAND_ECC_HAMMING; 177 + 178 + platform_set_drvdata(pdev, data); 179 + nand_set_controller_data(&data->chip, data); 180 + 150 181 /* load our CS from the DT. Either we find a valid 1 or default to 0 */ 151 - if (cs && (*cs == 1)) 182 + err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs); 183 + if (!err && cs == 1) 152 184 cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; 153 185 154 186 /* setup the EBU to run in NAND mode on our base addr */ 155 - ltq_ebu_w32(CPHYSADDR(nandaddr) 156 - | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); 187 + ltq_ebu_w32(CPHYSADDR(data->nandaddr) 188 + | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); 157 189 158 190 ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 159 - | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 160 - | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); 191 + | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 192 + | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); 161 193 162 194 ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P 163 - | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P 164 - | cs_flag, EBU_NAND_CON); 195 + | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P 196 + | cs_flag, EBU_NAND_CON); 165 197 166 - /* finish with a reset */ 167 - xway_reset_chip(this); 198 + /* Scan to find existence of the device */ 199 + err = nand_scan(mtd, 1); 200 + if (err) 201 + return err; 168 202 169 - return 0; 203 + err = mtd_device_register(mtd, NULL, 0); 204 + if (err) 205 + nand_release(mtd); 206 + 207 + return err; 170 208 } 171 - 172 - static struct platform_nand_data xway_nand_data = { 173 - .chip = { 174 - .nr_chips = 1, 175 - .chip_delay = 30, 176 - }, 177 - .ctrl = { 178 - .probe = xway_nand_probe, 179 - .cmd_ctrl = xway_cmd_ctrl, 180 - .dev_ready = xway_dev_ready, 181 - .select_chip = xway_select_chip, 182 - .read_byte = xway_read_byte, 183 - } 184 - }; 185 209 186 210 /* 187 - * Try to find the node inside the DT. If it is available attach out 188 - * platform_nand_data 211 + * Remove a NAND device. 189 212 */ 190 - static int __init xway_register_nand(void) 213 + static int xway_nand_remove(struct platform_device *pdev) 191 214 { 192 - struct device_node *node; 193 - struct platform_device *pdev; 215 + struct xway_nand_data *data = platform_get_drvdata(pdev); 194 216 195 - node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway"); 196 - if (!node) 197 - return -ENOENT; 198 - pdev = of_find_device_by_node(node); 199 - if (!pdev) 200 - return -EINVAL; 201 - pdev->dev.platform_data = &xway_nand_data; 202 - of_node_put(node); 217 + nand_release(nand_to_mtd(&data->chip)); 218 + 203 219 return 0; 204 220 } 205 221 206 - subsys_initcall(xway_register_nand); 222 + static const struct of_device_id xway_nand_match[] = { 223 + { .compatible = "lantiq,nand-xway" }, 224 + {}, 225 + }; 226 + MODULE_DEVICE_TABLE(of, xway_nand_match); 227 + 228 + static struct platform_driver xway_nand_driver = { 229 + .probe = xway_nand_probe, 230 + .remove = xway_nand_remove, 231 + .driver = { 232 + .name = "lantiq,nand-xway", 233 + .of_match_table = xway_nand_match, 234 + }, 235 + }; 236 + 237 + module_platform_driver(xway_nand_driver); 238 + 239 + MODULE_LICENSE("GPL");
+1 -1
drivers/mtd/tests/nandbiterrs.c
··· 290 290 291 291 while (opno < max_overwrite) { 292 292 293 - err = rewrite_page(0); 293 + err = write_page(0); 294 294 if (err) 295 295 break; 296 296
+1
include/linux/mtd/nand.h
··· 783 783 * NAND Flash Manufacturer ID Codes 784 784 */ 785 785 #define NAND_MFR_TOSHIBA 0x98 786 + #define NAND_MFR_ESMT 0xc8 786 787 #define NAND_MFR_SAMSUNG 0xec 787 788 #define NAND_MFR_FUJITSU 0x04 788 789 #define NAND_MFR_NATIONAL 0x8f