Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nand/for-5.5' into mtd/next

Raw NAND core
* Useless extra checks dropped.
* Updated the detection of the bad block markers position

Raw NAND controller drivers:
* Cadence : New driver
* Brcmnand: Support for flash-dma v0 + fixes
* Denali : Support for the legacy controller/chip DT representation
dropped
* Superfluous dev_err() calls removed

+3132 -60
+53
Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
··· 1 + * Cadence NAND controller 2 + 3 + Required properties: 4 + - compatible : "cdns,hp-nfc" 5 + - reg : Contains two entries, each of which is a tuple consisting of a 6 + physical address and length. The first entry is the address and 7 + length of the controller register set. The second entry is the 8 + address and length of the Slave DMA data port. 9 + - reg-names: should contain "reg" and "sdma" 10 + - #address-cells: should be 1. The cell encodes the chip select connection. 11 + - #size-cells : should be 0. 12 + - interrupts : The interrupt number. 13 + - clocks: phandle of the controller core clock (nf_clk). 14 + 15 + Optional properties: 16 + - dmas: shall reference DMA channel associated to the NAND controller 17 + - cdns,board-delay-ps : Estimated Board delay. The value includes the total 18 + round trip delay for the signals and is used for deciding on values 19 + associated with data read capture. The example formula for SDR mode is 20 + the following: 21 + board delay = RE#PAD delay + PCB trace to device + PCB trace from device 22 + + DQ PAD delay 23 + 24 + Child nodes represent the available NAND chips. 25 + 26 + Required properties of NAND chips: 27 + - reg: shall contain the native Chip Select ids from 0 to max supported by 28 + the cadence nand flash controller 29 + 30 + See Documentation/devicetree/bindings/mtd/nand.txt for more details on 31 + generic bindings. 32 + 33 + Example: 34 + 35 + nand_controller: nand-controller@60000000 { 36 + compatible = "cdns,hp-nfc"; 37 + #address-cells = <1>; 38 + #size-cells = <0>; 39 + reg = <0x60000000 0x10000>, <0x80000000 0x10000>; 40 + reg-names = "reg", "sdma"; 41 + clocks = <&nf_clk>; 42 + cdns,board-delay-ps = <4830>; 43 + interrupts = <2 0>; 44 + nand@0 { 45 + reg = <0>; 46 + label = "nand-1"; 47 + }; 48 + nand@1 { 49 + reg = <1>; 50 + label = "nand-2"; 51 + }; 52 + 53 + };
+7
MAINTAINERS
··· 3594 3594 F: Documentation/devicetree/bindings/media/cdns,*.txt 3595 3595 F: drivers/media/platform/cadence/cdns-csi2* 3596 3596 3597 + CADENCE NAND DRIVER 3598 + M: Piotr Sroka <piotrs@cadence.com> 3599 + L: linux-mtd@lists.infradead.org 3600 + S: Maintained 3601 + F: drivers/mtd/nand/raw/cadence-nand-controller.c 3602 + F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt 3603 + 3597 3604 CADET FM/AM RADIO RECEIVER DRIVER 3598 3605 M: Hans Verkuil <hverkuil@xs4all.nl> 3599 3606 L: linux-media@vger.kernel.org
+7
drivers/mtd/nand/raw/Kconfig
··· 450 450 devices. You will need to provide platform-specific functions 451 451 via platform_data. 452 452 453 + config MTD_NAND_CADENCE 454 + tristate "Support Cadence NAND (HPNFC) controller" 455 + depends on OF || COMPILE_TEST 456 + help 457 + Enable the driver for NAND flash on platforms using a Cadence NAND 458 + controller. 459 + 453 460 comment "Misc" 454 461 455 462 config MTD_SM_COMMON
+1
drivers/mtd/nand/raw/Makefile
··· 57 57 obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o 58 58 obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o 59 59 obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o 60 + obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o 60 61 61 62 nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o 62 63 nand-objs += nand_onfi.o
+20 -3
drivers/mtd/nand/raw/brcmnand/brcmnand.c
··· 117 117 FLASH_DMA_CURRENT_DESC_EXT, 118 118 }; 119 119 120 + /* flash_dma registers v0*/ 121 + static const u16 flash_dma_regs_v0[] = { 122 + [FLASH_DMA_REVISION] = 0x00, 123 + [FLASH_DMA_FIRST_DESC] = 0x04, 124 + [FLASH_DMA_CTRL] = 0x08, 125 + [FLASH_DMA_MODE] = 0x0c, 126 + [FLASH_DMA_STATUS] = 0x10, 127 + [FLASH_DMA_INTERRUPT_DESC] = 0x14, 128 + [FLASH_DMA_ERROR_STATUS] = 0x18, 129 + [FLASH_DMA_CURRENT_DESC] = 0x1c, 130 + }; 131 + 120 132 /* flash_dma registers v1*/ 121 133 static const u16 flash_dma_regs_v1[] = { 122 134 [FLASH_DMA_REVISION] = 0x00, ··· 609 597 /* flash_dma register offsets */ 610 598 if (ctrl->nand_version >= 0x0703) 611 599 ctrl->flash_dma_offsets = flash_dma_regs_v4; 600 + else if (ctrl->nand_version == 0x0602) 601 + ctrl->flash_dma_offsets = flash_dma_regs_v0; 612 602 else 613 603 ctrl->flash_dma_offsets = flash_dma_regs_v1; 614 604 } ··· 932 918 return; 933 919 934 920 if (has_flash_dma(ctrl)) { 935 - ctrl->flash_dma_base = 0; 921 + ctrl->flash_dma_base = NULL; 936 922 disable_irq(ctrl->dma_irq); 937 923 } 938 924 ··· 1687 1673 1688 1674 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc)); 1689 1675 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC); 1690 - flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc)); 1691 - (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); 1676 + if (ctrl->nand_version > 0x0602) { 1677 + flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, 1678 + upper_32_bits(desc)); 1679 + (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); 1680 + } 1692 1681 1693 1682 /* Start FLASH_DMA engine */ 1694 1683 ctrl->dma_pending = true;
+3030
drivers/mtd/nand/raw/cadence-nand-controller.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Cadence NAND flash controller driver 4 + * 5 + * Copyright (C) 2019 Cadence 6 + * 7 + * Author: Piotr Sroka <piotrs@cadence.com> 8 + */ 9 + 10 + #include <linux/bitfield.h> 11 + #include <linux/clk.h> 12 + #include <linux/dma-mapping.h> 13 + #include <linux/dmaengine.h> 14 + #include <linux/interrupt.h> 15 + #include <linux/module.h> 16 + #include <linux/mtd/mtd.h> 17 + #include <linux/mtd/rawnand.h> 18 + #include <linux/of_device.h> 19 + #include <linux/iopoll.h> 20 + 21 + /* 22 + * HPNFC can work in 3 modes: 23 + * - PIO - can work in master or slave DMA 24 + * - CDMA - needs Master DMA for accessing command descriptors. 25 + * - Generic mode - can use only slave DMA. 26 + * CDMA and PIO modes can be used to execute only base commands. 27 + * Generic mode can be used to execute any command 28 + * on NAND flash memory. Driver uses CDMA mode for 29 + * block erasing, page reading, page programing. 30 + * Generic mode is used for executing rest of commands. 31 + */ 32 + 33 + #define MAX_OOB_SIZE_PER_SECTOR 32 34 + #define MAX_ADDRESS_CYC 6 35 + #define MAX_ERASE_ADDRESS_CYC 3 36 + #define MAX_DATA_SIZE 0xFFFC 37 + #define DMA_DATA_SIZE_ALIGN 8 38 + 39 + /* Register definition. */ 40 + /* 41 + * Command register 0. 42 + * Writing data to this register will initiate a new transaction 43 + * of the NF controller. 44 + */ 45 + #define CMD_REG0 0x0000 46 + /* Command type field mask. */ 47 + #define CMD_REG0_CT GENMASK(31, 30) 48 + /* Command type CDMA. */ 49 + #define CMD_REG0_CT_CDMA 0uL 50 + /* Command type generic. */ 51 + #define CMD_REG0_CT_GEN 3uL 52 + /* Command thread number field mask. */ 53 + #define CMD_REG0_TN GENMASK(27, 24) 54 + 55 + /* Command register 2. */ 56 + #define CMD_REG2 0x0008 57 + /* Command register 3. */ 58 + #define CMD_REG3 0x000C 59 + /* Pointer register to select which thread status will be selected. */ 60 + #define CMD_STATUS_PTR 0x0010 61 + /* Command status register for selected thread. */ 62 + #define CMD_STATUS 0x0014 63 + 64 + /* Interrupt status register. */ 65 + #define INTR_STATUS 0x0110 66 + #define INTR_STATUS_SDMA_ERR BIT(22) 67 + #define INTR_STATUS_SDMA_TRIGG BIT(21) 68 + #define INTR_STATUS_UNSUPP_CMD BIT(19) 69 + #define INTR_STATUS_DDMA_TERR BIT(18) 70 + #define INTR_STATUS_CDMA_TERR BIT(17) 71 + #define INTR_STATUS_CDMA_IDL BIT(16) 72 + 73 + /* Interrupt enable register. */ 74 + #define INTR_ENABLE 0x0114 75 + #define INTR_ENABLE_INTR_EN BIT(31) 76 + #define INTR_ENABLE_SDMA_ERR_EN BIT(22) 77 + #define INTR_ENABLE_SDMA_TRIGG_EN BIT(21) 78 + #define INTR_ENABLE_UNSUPP_CMD_EN BIT(19) 79 + #define INTR_ENABLE_DDMA_TERR_EN BIT(18) 80 + #define INTR_ENABLE_CDMA_TERR_EN BIT(17) 81 + #define INTR_ENABLE_CDMA_IDLE_EN BIT(16) 82 + 83 + /* Controller internal state. */ 84 + #define CTRL_STATUS 0x0118 85 + #define CTRL_STATUS_INIT_COMP BIT(9) 86 + #define CTRL_STATUS_CTRL_BUSY BIT(8) 87 + 88 + /* Command Engine threads state. */ 89 + #define TRD_STATUS 0x0120 90 + 91 + /* Command Engine interrupt thread error status. */ 92 + #define TRD_ERR_INT_STATUS 0x0128 93 + /* Command Engine interrupt thread error enable. */ 94 + #define TRD_ERR_INT_STATUS_EN 0x0130 95 + /* Command Engine interrupt thread complete status. */ 96 + #define TRD_COMP_INT_STATUS 0x0138 97 + 98 + /* 99 + * Transfer config 0 register. 100 + * Configures data transfer parameters. 101 + */ 102 + #define TRAN_CFG_0 0x0400 103 + /* Offset value from the beginning of the page. */ 104 + #define TRAN_CFG_0_OFFSET GENMASK(31, 16) 105 + /* Numbers of sectors to transfer within singlNF device's page. */ 106 + #define TRAN_CFG_0_SEC_CNT GENMASK(7, 0) 107 + 108 + /* 109 + * Transfer config 1 register. 110 + * Configures data transfer parameters. 111 + */ 112 + #define TRAN_CFG_1 0x0404 113 + /* Size of last data sector. */ 114 + #define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16) 115 + /* Size of not-last data sector. */ 116 + #define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0) 117 + 118 + /* ECC engine configuration register 0. */ 119 + #define ECC_CONFIG_0 0x0428 120 + /* Correction strength. */ 121 + #define ECC_CONFIG_0_CORR_STR GENMASK(10, 8) 122 + /* Enable erased pages detection mechanism. */ 123 + #define ECC_CONFIG_0_ERASE_DET_EN BIT(1) 124 + /* Enable controller ECC check bits generation and correction. */ 125 + #define ECC_CONFIG_0_ECC_EN BIT(0) 126 + 127 + /* ECC engine configuration register 1. */ 128 + #define ECC_CONFIG_1 0x042C 129 + 130 + /* Multiplane settings register. */ 131 + #define MULTIPLANE_CFG 0x0434 132 + /* Cache operation settings. */ 133 + #define CACHE_CFG 0x0438 134 + 135 + /* DMA settings register. */ 136 + #define DMA_SETINGS 0x043C 137 + /* Enable SDMA error report on access unprepared slave DMA interface. */ 138 + #define DMA_SETINGS_SDMA_ERR_RSP BIT(17) 139 + 140 + /* Transferred data block size for the slave DMA module. */ 141 + #define SDMA_SIZE 0x0440 142 + 143 + /* Thread number associated with transferred data block 144 + * for the slave DMA module. 145 + */ 146 + #define SDMA_TRD_NUM 0x0444 147 + /* Thread number mask. */ 148 + #define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0) 149 + 150 + #define CONTROL_DATA_CTRL 0x0494 151 + /* Thread number mask. */ 152 + #define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0) 153 + 154 + #define CTRL_VERSION 0x800 155 + #define CTRL_VERSION_REV GENMASK(7, 0) 156 + 157 + /* Available hardware features of the controller. */ 158 + #define CTRL_FEATURES 0x804 159 + /* Support for NV-DDR2/3 work mode. */ 160 + #define CTRL_FEATURES_NVDDR_2_3 BIT(28) 161 + /* Support for NV-DDR work mode. */ 162 + #define CTRL_FEATURES_NVDDR BIT(27) 163 + /* Support for asynchronous work mode. */ 164 + #define CTRL_FEATURES_ASYNC BIT(26) 165 + /* Support for asynchronous work mode. */ 166 + #define CTRL_FEATURES_N_BANKS GENMASK(25, 24) 167 + /* Slave and Master DMA data width. */ 168 + #define CTRL_FEATURES_DMA_DWITH64 BIT(21) 169 + /* Availability of Control Data feature.*/ 170 + #define CTRL_FEATURES_CONTROL_DATA BIT(10) 171 + 172 + /* BCH Engine identification register 0 - correction strengths. */ 173 + #define BCH_CFG_0 0x838 174 + #define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0) 175 + #define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8) 176 + #define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16) 177 + #define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24) 178 + 179 + /* BCH Engine identification register 1 - correction strengths. */ 180 + #define BCH_CFG_1 0x83C 181 + #define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0) 182 + #define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8) 183 + #define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16) 184 + #define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24) 185 + 186 + /* BCH Engine identification register 2 - sector sizes. */ 187 + #define BCH_CFG_2 0x840 188 + #define BCH_CFG_2_SECT_0 GENMASK(15, 0) 189 + #define BCH_CFG_2_SECT_1 GENMASK(31, 16) 190 + 191 + /* BCH Engine identification register 3. */ 192 + #define BCH_CFG_3 0x844 193 + 194 + /* Ready/Busy# line status. */ 195 + #define RBN_SETINGS 0x1004 196 + 197 + /* Common settings. */ 198 + #define COMMON_SET 0x1008 199 + /* 16 bit device connected to the NAND Flash interface. */ 200 + #define COMMON_SET_DEVICE_16BIT BIT(8) 201 + 202 + /* Skip_bytes registers. */ 203 + #define SKIP_BYTES_CONF 0x100C 204 + #define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16) 205 + #define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0) 206 + 207 + #define SKIP_BYTES_OFFSET 0x1010 208 + #define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0) 209 + 210 + /* Timings configuration. */ 211 + #define ASYNC_TOGGLE_TIMINGS 0x101c 212 + #define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24) 213 + #define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16) 214 + #define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8) 215 + #define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0) 216 + 217 + #define TIMINGS0 0x1024 218 + #define TIMINGS0_TADL GENMASK(31, 24) 219 + #define TIMINGS0_TCCS GENMASK(23, 16) 220 + #define TIMINGS0_TWHR GENMASK(15, 8) 221 + #define TIMINGS0_TRHW GENMASK(7, 0) 222 + 223 + #define TIMINGS1 0x1028 224 + #define TIMINGS1_TRHZ GENMASK(31, 24) 225 + #define TIMINGS1_TWB GENMASK(23, 16) 226 + #define TIMINGS1_TVDLY GENMASK(7, 0) 227 + 228 + #define TIMINGS2 0x102c 229 + #define TIMINGS2_TFEAT GENMASK(25, 16) 230 + #define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8) 231 + #define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0) 232 + 233 + /* Configuration of the resynchronization of slave DLL of PHY. */ 234 + #define DLL_PHY_CTRL 0x1034 235 + #define DLL_PHY_CTRL_DLL_RST_N BIT(24) 236 + #define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17) 237 + #define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16) 238 + #define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8) 239 + #define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0) 240 + 241 + /* Register controlling DQ related timing. */ 242 + #define PHY_DQ_TIMING 0x2000 243 + /* Register controlling DSQ related timing. */ 244 + #define PHY_DQS_TIMING 0x2004 245 + #define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0) 246 + #define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16) 247 + #define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20) 248 + 249 + /* Register controlling the gate and loopback control related timing. */ 250 + #define PHY_GATE_LPBK_CTRL 0x2008 251 + #define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19) 252 + 253 + /* Register holds the control for the master DLL logic. */ 254 + #define PHY_DLL_MASTER_CTRL 0x200C 255 + #define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23) 256 + 257 + /* Register holds the control for the slave DLL logic. */ 258 + #define PHY_DLL_SLAVE_CTRL 0x2010 259 + 260 + /* This register handles the global control settings for the PHY. */ 261 + #define PHY_CTRL 0x2080 262 + #define PHY_CTRL_SDR_DQS BIT(14) 263 + #define PHY_CTRL_PHONY_DQS GENMASK(9, 4) 264 + 265 + /* 266 + * This register handles the global control settings 267 + * for the termination selects for reads. 268 + */ 269 + #define PHY_TSEL 0x2084 270 + 271 + /* Generic command layout. */ 272 + #define GCMD_LAY_CS GENMASK_ULL(11, 8) 273 + /* 274 + * This bit informs the minicotroller if it has to wait for tWB 275 + * after sending the last CMD/ADDR/DATA in the sequence. 276 + */ 277 + #define GCMD_LAY_TWB BIT_ULL(6) 278 + /* Type of generic instruction. */ 279 + #define GCMD_LAY_INSTR GENMASK_ULL(5, 0) 280 + 281 + /* Generic CMD sequence type. */ 282 + #define GCMD_LAY_INSTR_CMD 0 283 + /* Generic ADDR sequence type. */ 284 + #define GCMD_LAY_INSTR_ADDR 1 285 + /* Generic data transfer sequence type. */ 286 + #define GCMD_LAY_INSTR_DATA 2 287 + 288 + /* Input part of generic command type of input is command. */ 289 + #define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16) 290 + 291 + /* Generic command address sequence - address fields. */ 292 + #define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16) 293 + /* Generic command address sequence - address size. */ 294 + #define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11) 295 + 296 + /* Transfer direction field of generic command data sequence. */ 297 + #define GCMD_DIR BIT_ULL(11) 298 + /* Read transfer direction of generic command data sequence. */ 299 + #define GCMD_DIR_READ 0 300 + /* Write transfer direction of generic command data sequence. */ 301 + #define GCMD_DIR_WRITE 1 302 + 303 + /* ECC enabled flag of generic command data sequence - ECC enabled. */ 304 + #define GCMD_ECC_EN BIT_ULL(12) 305 + /* Generic command data sequence - sector size. */ 306 + #define GCMD_SECT_SIZE GENMASK_ULL(31, 16) 307 + /* Generic command data sequence - sector count. */ 308 + #define GCMD_SECT_CNT GENMASK_ULL(39, 32) 309 + /* Generic command data sequence - last sector size. */ 310 + #define GCMD_LAST_SIZE GENMASK_ULL(55, 40) 311 + 312 + /* CDMA descriptor fields. */ 313 + /* Erase command type of CDMA descriptor. */ 314 + #define CDMA_CT_ERASE 0x1000 315 + /* Program page command type of CDMA descriptor. */ 316 + #define CDMA_CT_WR 0x2100 317 + /* Read page command type of CDMA descriptor. */ 318 + #define CDMA_CT_RD 0x2200 319 + 320 + /* Flash pointer memory shift. */ 321 + #define CDMA_CFPTR_MEM_SHIFT 24 322 + /* Flash pointer memory mask. */ 323 + #define CDMA_CFPTR_MEM GENMASK(26, 24) 324 + 325 + /* 326 + * Command DMA descriptor flags. If set causes issue interrupt after 327 + * the completion of descriptor processing. 328 + */ 329 + #define CDMA_CF_INT BIT(8) 330 + /* 331 + * Command DMA descriptor flags - the next descriptor 332 + * address field is valid and descriptor processing should continue. 333 + */ 334 + #define CDMA_CF_CONT BIT(9) 335 + /* DMA master flag of command DMA descriptor. */ 336 + #define CDMA_CF_DMA_MASTER BIT(10) 337 + 338 + /* Operation complete status of command descriptor. */ 339 + #define CDMA_CS_COMP BIT(15) 340 + /* Operation complete status of command descriptor. */ 341 + /* Command descriptor status - operation fail. */ 342 + #define CDMA_CS_FAIL BIT(14) 343 + /* Command descriptor status - page erased. */ 344 + #define CDMA_CS_ERP BIT(11) 345 + /* Command descriptor status - timeout occurred. */ 346 + #define CDMA_CS_TOUT BIT(10) 347 + /* 348 + * Maximum amount of correction applied to one ECC sector. 349 + * It is part of command descriptor status. 350 + */ 351 + #define CDMA_CS_MAXERR GENMASK(9, 2) 352 + /* Command descriptor status - uncorrectable ECC error. */ 353 + #define CDMA_CS_UNCE BIT(1) 354 + /* Command descriptor status - descriptor error. */ 355 + #define CDMA_CS_ERR BIT(0) 356 + 357 + /* Status of operation - OK. */ 358 + #define STAT_OK 0 359 + /* Status of operation - FAIL. */ 360 + #define STAT_FAIL 2 361 + /* Status of operation - uncorrectable ECC error. */ 362 + #define STAT_ECC_UNCORR 3 363 + /* Status of operation - page erased. */ 364 + #define STAT_ERASED 5 365 + /* Status of operation - correctable ECC error. */ 366 + #define STAT_ECC_CORR 6 367 + /* Status of operation - unsuspected state. */ 368 + #define STAT_UNKNOWN 7 369 + /* Status of operation - operation is not completed yet. */ 370 + #define STAT_BUSY 0xFF 371 + 372 + #define BCH_MAX_NUM_CORR_CAPS 8 373 + #define BCH_MAX_NUM_SECTOR_SIZES 2 374 + 375 + struct cadence_nand_timings { 376 + u32 async_toggle_timings; 377 + u32 timings0; 378 + u32 timings1; 379 + u32 timings2; 380 + u32 dll_phy_ctrl; 381 + u32 phy_ctrl; 382 + u32 phy_dqs_timing; 383 + u32 phy_gate_lpbk_ctrl; 384 + }; 385 + 386 + /* Command DMA descriptor. */ 387 + struct cadence_nand_cdma_desc { 388 + /* Next descriptor address. */ 389 + u64 next_pointer; 390 + 391 + /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */ 392 + u32 flash_pointer; 393 + /*field appears in HPNFC version 13*/ 394 + u16 bank; 395 + u16 rsvd0; 396 + 397 + /* Operation the controller needs to perform. */ 398 + u16 command_type; 399 + u16 rsvd1; 400 + /* Flags for operation of this command. */ 401 + u16 command_flags; 402 + u16 rsvd2; 403 + 404 + /* System/host memory address required for data DMA commands. */ 405 + u64 memory_pointer; 406 + 407 + /* Status of operation. */ 408 + u32 status; 409 + u32 rsvd3; 410 + 411 + /* Address pointer to sync buffer location. */ 412 + u64 sync_flag_pointer; 413 + 414 + /* Controls the buffer sync mechanism. */ 415 + u32 sync_arguments; 416 + u32 rsvd4; 417 + 418 + /* Control data pointer. */ 419 + u64 ctrl_data_ptr; 420 + }; 421 + 422 + /* Interrupt status. */ 423 + struct cadence_nand_irq_status { 424 + /* Thread operation complete status. */ 425 + u32 trd_status; 426 + /* Thread operation error. */ 427 + u32 trd_error; 428 + /* Controller status. */ 429 + u32 status; 430 + }; 431 + 432 + /* Cadence NAND flash controller capabilities get from driver data. */ 433 + struct cadence_nand_dt_devdata { 434 + /* Skew value of the output signals of the NAND Flash interface. */ 435 + u32 if_skew; 436 + /* It informs if slave DMA interface is connected to DMA engine. */ 437 + unsigned int has_dma:1; 438 + }; 439 + 440 + /* Cadence NAND flash controller capabilities read from registers. */ 441 + struct cdns_nand_caps { 442 + /* Maximum number of banks supported by hardware. */ 443 + u8 max_banks; 444 + /* Slave and Master DMA data width in bytes (4 or 8). */ 445 + u8 data_dma_width; 446 + /* Control Data feature supported. */ 447 + bool data_control_supp; 448 + /* Is PHY type DLL. */ 449 + bool is_phy_type_dll; 450 + }; 451 + 452 + struct cdns_nand_ctrl { 453 + struct device *dev; 454 + struct nand_controller controller; 455 + struct cadence_nand_cdma_desc *cdma_desc; 456 + /* IP capability. */ 457 + const struct cadence_nand_dt_devdata *caps1; 458 + struct cdns_nand_caps caps2; 459 + u8 ctrl_rev; 460 + dma_addr_t dma_cdma_desc; 461 + u8 *buf; 462 + u32 buf_size; 463 + u8 curr_corr_str_idx; 464 + 465 + /* Register interface. */ 466 + void __iomem *reg; 467 + 468 + struct { 469 + void __iomem *virt; 470 + dma_addr_t dma; 471 + } io; 472 + 473 + int irq; 474 + /* Interrupts that have happened. */ 475 + struct cadence_nand_irq_status irq_status; 476 + /* Interrupts we are waiting for. */ 477 + struct cadence_nand_irq_status irq_mask; 478 + struct completion complete; 479 + /* Protect irq_mask and irq_status. */ 480 + spinlock_t irq_lock; 481 + 482 + int ecc_strengths[BCH_MAX_NUM_CORR_CAPS]; 483 + struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES]; 484 + struct nand_ecc_caps ecc_caps; 485 + 486 + int curr_trans_type; 487 + 488 + struct dma_chan *dmac; 489 + 490 + u32 nf_clk_rate; 491 + /* 492 + * Estimated Board delay. The value includes the total 493 + * round trip delay for the signals and is used for deciding on values 494 + * associated with data read capture. 495 + */ 496 + u32 board_delay; 497 + 498 + struct nand_chip *selected_chip; 499 + 500 + unsigned long assigned_cs; 501 + struct list_head chips; 502 + }; 503 + 504 + struct cdns_nand_chip { 505 + struct cadence_nand_timings timings; 506 + struct nand_chip chip; 507 + u8 nsels; 508 + struct list_head node; 509 + 510 + /* 511 + * part of oob area of NAND flash memory page. 512 + * This part is available for user to read or write. 513 + */ 514 + u32 avail_oob_size; 515 + 516 + /* Sector size. There are few sectors per mtd->writesize */ 517 + u32 sector_size; 518 + u32 sector_count; 519 + 520 + /* Offset of BBM. */ 521 + u8 bbm_offs; 522 + /* Number of bytes reserved for BBM. */ 523 + u8 bbm_len; 524 + /* ECC strength index. */ 525 + u8 corr_str_idx; 526 + 527 + u8 cs[]; 528 + }; 529 + 530 + struct ecc_info { 531 + int (*calc_ecc_bytes)(int step_size, int strength); 532 + int max_step_size; 533 + }; 534 + 535 + static inline struct 536 + cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip) 537 + { 538 + return container_of(chip, struct cdns_nand_chip, chip); 539 + } 540 + 541 + static inline struct 542 + cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller) 543 + { 544 + return container_of(controller, struct cdns_nand_ctrl, controller); 545 + } 546 + 547 + static bool 548 + cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf, 549 + u32 buf_len) 550 + { 551 + u8 data_dma_width = cdns_ctrl->caps2.data_dma_width; 552 + 553 + return buf && virt_addr_valid(buf) && 554 + likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) && 555 + likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN)); 556 + } 557 + 558 + static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl, 559 + u32 reg_offset, u32 timeout_us, 560 + u32 mask, bool is_clear) 561 + { 562 + u32 val; 563 + int ret; 564 + 565 + ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset, 566 + val, !(val & mask) == is_clear, 567 + 10, timeout_us); 568 + 569 + if (ret < 0) { 570 + dev_err(cdns_ctrl->dev, 571 + "Timeout while waiting for reg %x with mask %x is clear %d\n", 572 + reg_offset, mask, is_clear); 573 + } 574 + 575 + return ret; 576 + } 577 + 578 + static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl, 579 + bool enable) 580 + { 581 + u32 reg; 582 + 583 + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, 584 + 1000000, 585 + CTRL_STATUS_CTRL_BUSY, true)) 586 + return -ETIMEDOUT; 587 + 588 + reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0); 589 + 590 + if (enable) 591 + reg |= ECC_CONFIG_0_ECC_EN; 592 + else 593 + reg &= ~ECC_CONFIG_0_ECC_EN; 594 + 595 + writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0); 596 + 597 + return 0; 598 + } 599 + 600 + static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl, 601 + u8 corr_str_idx) 602 + { 603 + u32 reg; 604 + 605 + if (cdns_ctrl->curr_corr_str_idx == corr_str_idx) 606 + return; 607 + 608 + reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0); 609 + reg &= ~ECC_CONFIG_0_CORR_STR; 610 + reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx); 611 + writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0); 612 + 613 + cdns_ctrl->curr_corr_str_idx = corr_str_idx; 614 + } 615 + 616 + static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl, 617 + u8 strength) 618 + { 619 + int i, corr_str_idx = -1; 620 + 621 + for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) { 622 + if (cdns_ctrl->ecc_strengths[i] == strength) { 623 + corr_str_idx = i; 624 + break; 625 + } 626 + } 627 + 628 + return corr_str_idx; 629 + } 630 + 631 + static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl, 632 + u16 marker_value) 633 + { 634 + u32 reg; 635 + 636 + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, 637 + 1000000, 638 + CTRL_STATUS_CTRL_BUSY, true)) 639 + return -ETIMEDOUT; 640 + 641 + reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF); 642 + reg &= ~SKIP_BYTES_MARKER_VALUE; 643 + reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE, 644 + marker_value); 645 + 646 + writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF); 647 + 648 + return 0; 649 + } 650 + 651 + static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl, 652 + u8 num_of_bytes, 653 + u32 offset_value, 654 + int enable) 655 + { 656 + u32 reg, skip_bytes_offset; 657 + 658 + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, 659 + 1000000, 660 + CTRL_STATUS_CTRL_BUSY, true)) 661 + return -ETIMEDOUT; 662 + 663 + if (!enable) { 664 + num_of_bytes = 0; 665 + offset_value = 0; 666 + } 667 + 668 + reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF); 669 + reg &= ~SKIP_BYTES_NUM_OF_BYTES; 670 + reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES, 671 + num_of_bytes); 672 + skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE, 673 + offset_value); 674 + 675 + writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF); 676 + writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET); 677 + 678 + return 0; 679 + } 680 + 681 + /* Functions enables/disables hardware detection of erased data */ 682 + static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl, 683 + bool enable, 684 + u8 bitflips_threshold) 685 + { 686 + u32 reg; 687 + 688 + reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0); 689 + 690 + if (enable) 691 + reg |= ECC_CONFIG_0_ERASE_DET_EN; 692 + else 693 + reg &= ~ECC_CONFIG_0_ERASE_DET_EN; 694 + 695 + writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0); 696 + writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1); 697 + } 698 + 699 + static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl, 700 + bool bit_bus16) 701 + { 702 + u32 reg; 703 + 704 + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, 705 + 1000000, 706 + CTRL_STATUS_CTRL_BUSY, true)) 707 + return -ETIMEDOUT; 708 + 709 + reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET); 710 + 711 + if (!bit_bus16) 712 + reg &= ~COMMON_SET_DEVICE_16BIT; 713 + else 714 + reg |= COMMON_SET_DEVICE_16BIT; 715 + writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET); 716 + 717 + return 0; 718 + } 719 + 720 + static void 721 + cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl, 722 + struct cadence_nand_irq_status *irq_status) 723 + { 724 + writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS); 725 + writel_relaxed(irq_status->trd_status, 726 + cdns_ctrl->reg + TRD_COMP_INT_STATUS); 727 + writel_relaxed(irq_status->trd_error, 728 + cdns_ctrl->reg + TRD_ERR_INT_STATUS); 729 + } 730 + 731 + static void 732 + cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl, 733 + struct cadence_nand_irq_status *irq_status) 734 + { 735 + irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS); 736 + irq_status->trd_status = readl_relaxed(cdns_ctrl->reg 737 + + TRD_COMP_INT_STATUS); 738 + irq_status->trd_error = readl_relaxed(cdns_ctrl->reg 739 + + TRD_ERR_INT_STATUS); 740 + } 741 + 742 + static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl, 743 + struct cadence_nand_irq_status *irq_status) 744 + { 745 + cadence_nand_read_int_status(cdns_ctrl, irq_status); 746 + 747 + return irq_status->status || irq_status->trd_status || 748 + irq_status->trd_error; 749 + } 750 + 751 + static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl) 752 + { 753 + unsigned long flags; 754 + 755 + spin_lock_irqsave(&cdns_ctrl->irq_lock, flags); 756 + memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status)); 757 + memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask)); 758 + spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags); 759 + } 760 + 761 + /* 762 + * This is the interrupt service routine. It handles all interrupts 763 + * sent to this device. 764 + */ 765 + static irqreturn_t cadence_nand_isr(int irq, void *dev_id) 766 + { 767 + struct cdns_nand_ctrl *cdns_ctrl = dev_id; 768 + struct cadence_nand_irq_status irq_status; 769 + irqreturn_t result = IRQ_NONE; 770 + 771 + spin_lock(&cdns_ctrl->irq_lock); 772 + 773 + if (irq_detected(cdns_ctrl, &irq_status)) { 774 + /* Handle interrupt. */ 775 + /* First acknowledge it. */ 776 + cadence_nand_clear_interrupt(cdns_ctrl, &irq_status); 777 + /* Status in the device context for someone to read. */ 778 + cdns_ctrl->irq_status.status |= irq_status.status; 779 + cdns_ctrl->irq_status.trd_status |= irq_status.trd_status; 780 + cdns_ctrl->irq_status.trd_error |= irq_status.trd_error; 781 + /* Notify anyone who cares that it happened. */ 782 + complete(&cdns_ctrl->complete); 783 + /* Tell the OS that we've handled this. */ 784 + result = IRQ_HANDLED; 785 + } 786 + spin_unlock(&cdns_ctrl->irq_lock); 787 + 788 + return result; 789 + } 790 + 791 + static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl, 792 + struct cadence_nand_irq_status *irq_mask) 793 + { 794 + writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status, 795 + cdns_ctrl->reg + INTR_ENABLE); 796 + 797 + writel_relaxed(irq_mask->trd_error, 798 + cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN); 799 + } 800 + 801 + static void 802 + cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl, 803 + struct cadence_nand_irq_status *irq_mask, 804 + struct cadence_nand_irq_status *irq_status) 805 + { 806 + unsigned long timeout = msecs_to_jiffies(10000); 807 + unsigned long time_left; 808 + 809 + time_left = wait_for_completion_timeout(&cdns_ctrl->complete, 810 + timeout); 811 + 812 + *irq_status = cdns_ctrl->irq_status; 813 + if (time_left == 0) { 814 + /* Timeout error. */ 815 + dev_err(cdns_ctrl->dev, "timeout occurred:\n"); 816 + dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n", 817 + irq_status->status, irq_mask->status); 818 + dev_err(cdns_ctrl->dev, 819 + "\ttrd_status = 0x%x, trd_status mask = 0x%x\n", 820 + irq_status->trd_status, irq_mask->trd_status); 821 + dev_err(cdns_ctrl->dev, 822 + "\t trd_error = 0x%x, trd_error mask = 0x%x\n", 823 + irq_status->trd_error, irq_mask->trd_error); 824 + } 825 + } 826 + 827 + /* Execute generic command on NAND controller. */ 828 + static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl, 829 + u8 chip_nr, 830 + u64 mini_ctrl_cmd) 831 + { 832 + u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg; 833 + 834 + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr); 835 + mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF; 836 + mini_ctrl_cmd_h = mini_ctrl_cmd >> 32; 837 + 838 + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, 839 + 1000000, 840 + CTRL_STATUS_CTRL_BUSY, true)) 841 + return -ETIMEDOUT; 842 + 843 + cadence_nand_reset_irq(cdns_ctrl); 844 + 845 + writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2); 846 + writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3); 847 + 848 + /* Select generic command. */ 849 + reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN); 850 + /* Thread number. */ 851 + reg |= FIELD_PREP(CMD_REG0_TN, 0); 852 + 853 + /* Issue command. */ 854 + writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0); 855 + 856 + return 0; 857 + } 858 + 859 + /* Wait for data on slave DMA interface. */ 860 + static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl, 861 + u8 *out_sdma_trd, 862 + u32 *out_sdma_size) 863 + { 864 + struct cadence_nand_irq_status irq_mask, irq_status; 865 + 866 + irq_mask.trd_status = 0; 867 + irq_mask.trd_error = 0; 868 + irq_mask.status = INTR_STATUS_SDMA_TRIGG 869 + | INTR_STATUS_SDMA_ERR 870 + | INTR_STATUS_UNSUPP_CMD; 871 + 872 + cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask); 873 + cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status); 874 + if (irq_status.status == 0) { 875 + dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n"); 876 + return -ETIMEDOUT; 877 + } 878 + 879 + if (irq_status.status & INTR_STATUS_SDMA_TRIGG) { 880 + *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE); 881 + *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM); 882 + *out_sdma_trd = 883 + FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd); 884 + } else { 885 + dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n", 886 + irq_status.status); 887 + return -EIO; 888 + } 889 + 890 + return 0; 891 + } 892 + 893 + static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl) 894 + { 895 + u32 reg; 896 + 897 + reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES); 898 + 899 + cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg); 900 + 901 + if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg)) 902 + cdns_ctrl->caps2.data_dma_width = 8; 903 + else 904 + cdns_ctrl->caps2.data_dma_width = 4; 905 + 906 + if (reg & CTRL_FEATURES_CONTROL_DATA) 907 + cdns_ctrl->caps2.data_control_supp = true; 908 + 909 + if (reg & (CTRL_FEATURES_NVDDR_2_3 910 + | CTRL_FEATURES_NVDDR)) 911 + cdns_ctrl->caps2.is_phy_type_dll = true; 912 + } 913 + 914 + /* Prepare CDMA descriptor. */ 915 + static void 916 + cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl, 917 + char nf_mem, u32 flash_ptr, char *mem_ptr, 918 + char *ctrl_data_ptr, u16 ctype) 919 + { 920 + struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc; 921 + 922 + memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc)); 923 + 924 + /* Set fields for one descriptor. */ 925 + cdma_desc->flash_pointer = flash_ptr; 926 + if (cdns_ctrl->ctrl_rev >= 13) 927 + cdma_desc->bank = nf_mem; 928 + else 929 + cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT); 930 + 931 + cdma_desc->command_flags |= CDMA_CF_DMA_MASTER; 932 + cdma_desc->command_flags |= CDMA_CF_INT; 933 + 934 + cdma_desc->memory_pointer = (uintptr_t)mem_ptr; 935 + cdma_desc->status = 0; 936 + cdma_desc->sync_flag_pointer = 0; 937 + cdma_desc->sync_arguments = 0; 938 + 939 + cdma_desc->command_type = ctype; 940 + cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr; 941 + } 942 + 943 + static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl, 944 + u32 desc_status) 945 + { 946 + if (desc_status & CDMA_CS_ERP) 947 + return STAT_ERASED; 948 + 949 + if (desc_status & CDMA_CS_UNCE) 950 + return STAT_ECC_UNCORR; 951 + 952 + if (desc_status & CDMA_CS_ERR) { 953 + dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n"); 954 + return STAT_FAIL; 955 + } 956 + 957 + if (FIELD_GET(CDMA_CS_MAXERR, desc_status)) 958 + return STAT_ECC_CORR; 959 + 960 + return STAT_FAIL; 961 + } 962 + 963 + static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl) 964 + { 965 + struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc; 966 + u8 status = STAT_BUSY; 967 + 968 + if (desc_ptr->status & CDMA_CS_FAIL) { 969 + status = cadence_nand_check_desc_error(cdns_ctrl, 970 + desc_ptr->status); 971 + dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status); 972 + } else if (desc_ptr->status & CDMA_CS_COMP) { 973 + /* Descriptor finished with no errors. */ 974 + if (desc_ptr->command_flags & CDMA_CF_CONT) { 975 + dev_info(cdns_ctrl->dev, "DMA unsupported flag is set"); 976 + status = STAT_UNKNOWN; 977 + } else { 978 + /* Last descriptor. */ 979 + status = STAT_OK; 980 + } 981 + } 982 + 983 + return status; 984 + } 985 + 986 + static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl, 987 + u8 thread) 988 + { 989 + u32 reg; 990 + int status; 991 + 992 + /* Wait for thread ready. */ 993 + status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS, 994 + 1000000, 995 + BIT(thread), true); 996 + if (status) 997 + return status; 998 + 999 + cadence_nand_reset_irq(cdns_ctrl); 1000 + 1001 + writel_relaxed((u32)cdns_ctrl->dma_cdma_desc, 1002 + cdns_ctrl->reg + CMD_REG2); 1003 + writel_relaxed(0, cdns_ctrl->reg + CMD_REG3); 1004 + 1005 + /* Select CDMA mode. */ 1006 + reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA); 1007 + /* Thread number. */ 1008 + reg |= FIELD_PREP(CMD_REG0_TN, thread); 1009 + /* Issue command. */ 1010 + writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0); 1011 + 1012 + return 0; 1013 + } 1014 + 1015 + /* Send SDMA command and wait for finish. */ 1016 + static u32 1017 + cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl, 1018 + u8 thread) 1019 + { 1020 + struct cadence_nand_irq_status irq_mask, irq_status = {0}; 1021 + int status; 1022 + 1023 + irq_mask.trd_status = BIT(thread); 1024 + irq_mask.trd_error = BIT(thread); 1025 + irq_mask.status = INTR_STATUS_CDMA_TERR; 1026 + 1027 + cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask); 1028 + 1029 + status = cadence_nand_cdma_send(cdns_ctrl, thread); 1030 + if (status) 1031 + return status; 1032 + 1033 + cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status); 1034 + 1035 + if (irq_status.status == 0 && irq_status.trd_status == 0 && 1036 + irq_status.trd_error == 0) { 1037 + dev_err(cdns_ctrl->dev, "CDMA command timeout\n"); 1038 + return -ETIMEDOUT; 1039 + } 1040 + if (irq_status.status & irq_mask.status) { 1041 + dev_err(cdns_ctrl->dev, "CDMA command failed\n"); 1042 + return -EIO; 1043 + } 1044 + 1045 + return 0; 1046 + } 1047 + 1048 + /* 1049 + * ECC size depends on configured ECC strength and on maximum supported 1050 + * ECC step size. 1051 + */ 1052 + static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength) 1053 + { 1054 + int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8); 1055 + 1056 + return ALIGN(nbytes, 2); 1057 + } 1058 + 1059 + #define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \ 1060 + static int \ 1061 + cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \ 1062 + int strength)\ 1063 + {\ 1064 + return cadence_nand_calc_ecc_bytes(max_step_size, strength);\ 1065 + } 1066 + 1067 + CADENCE_NAND_CALC_ECC_BYTES(256) 1068 + CADENCE_NAND_CALC_ECC_BYTES(512) 1069 + CADENCE_NAND_CALC_ECC_BYTES(1024) 1070 + CADENCE_NAND_CALC_ECC_BYTES(2048) 1071 + CADENCE_NAND_CALC_ECC_BYTES(4096) 1072 + 1073 + /* Function reads BCH capabilities. */ 1074 + static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl) 1075 + { 1076 + struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps; 1077 + int max_step_size = 0, nstrengths, i; 1078 + u32 reg; 1079 + 1080 + reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0); 1081 + cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg); 1082 + cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg); 1083 + cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg); 1084 + cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg); 1085 + 1086 + reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1); 1087 + cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg); 1088 + cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg); 1089 + cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg); 1090 + cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg); 1091 + 1092 + reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2); 1093 + cdns_ctrl->ecc_stepinfos[0].stepsize = 1094 + FIELD_GET(BCH_CFG_2_SECT_0, reg); 1095 + 1096 + cdns_ctrl->ecc_stepinfos[1].stepsize = 1097 + FIELD_GET(BCH_CFG_2_SECT_1, reg); 1098 + 1099 + nstrengths = 0; 1100 + for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) { 1101 + if (cdns_ctrl->ecc_strengths[i] != 0) 1102 + nstrengths++; 1103 + } 1104 + 1105 + ecc_caps->nstepinfos = 0; 1106 + for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) { 1107 + /* ECC strengths are common for all step infos. */ 1108 + cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths; 1109 + cdns_ctrl->ecc_stepinfos[i].strengths = 1110 + cdns_ctrl->ecc_strengths; 1111 + 1112 + if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0) 1113 + ecc_caps->nstepinfos++; 1114 + 1115 + if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size) 1116 + max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize; 1117 + } 1118 + ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0]; 1119 + 1120 + switch (max_step_size) { 1121 + case 256: 1122 + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256; 1123 + break; 1124 + case 512: 1125 + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512; 1126 + break; 1127 + case 1024: 1128 + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024; 1129 + break; 1130 + case 2048: 1131 + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048; 1132 + break; 1133 + case 4096: 1134 + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096; 1135 + break; 1136 + default: 1137 + dev_err(cdns_ctrl->dev, 1138 + "Unsupported sector size(ecc step size) %d\n", 1139 + max_step_size); 1140 + return -EIO; 1141 + } 1142 + 1143 + return 0; 1144 + } 1145 + 1146 + /* Hardware initialization. */ 1147 + static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl) 1148 + { 1149 + int status; 1150 + u32 reg; 1151 + 1152 + status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, 1153 + 1000000, 1154 + CTRL_STATUS_INIT_COMP, false); 1155 + if (status) 1156 + return status; 1157 + 1158 + reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION); 1159 + cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg); 1160 + 1161 + dev_info(cdns_ctrl->dev, 1162 + "%s: cadence nand controller version reg %x\n", 1163 + __func__, reg); 1164 + 1165 + /* Disable cache and multiplane. */ 1166 + writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG); 1167 + writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG); 1168 + 1169 + /* Clear all interrupts. */ 1170 + writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS); 1171 + 1172 + cadence_nand_get_caps(cdns_ctrl); 1173 + cadence_nand_read_bch_caps(cdns_ctrl); 1174 + 1175 + /* 1176 + * Set IO width access to 8. 1177 + * It is because during SW device discovering width access 1178 + * is expected to be 8. 1179 + */ 1180 + status = cadence_nand_set_access_width16(cdns_ctrl, false); 1181 + 1182 + return status; 1183 + } 1184 + 1185 + #define TT_MAIN_OOB_AREAS 2 1186 + #define TT_RAW_PAGE 3 1187 + #define TT_BBM 4 1188 + #define TT_MAIN_OOB_AREA_EXT 5 1189 + 1190 + /* Prepare size of data to transfer. */ 1191 + static void 1192 + cadence_nand_prepare_data_size(struct nand_chip *chip, 1193 + int transfer_type) 1194 + { 1195 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1196 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 1197 + struct mtd_info *mtd = nand_to_mtd(chip); 1198 + u32 sec_size = 0, offset = 0, sec_cnt = 1; 1199 + u32 last_sec_size = cdns_chip->sector_size; 1200 + u32 data_ctrl_size = 0; 1201 + u32 reg = 0; 1202 + 1203 + if (cdns_ctrl->curr_trans_type == transfer_type) 1204 + return; 1205 + 1206 + switch (transfer_type) { 1207 + case TT_MAIN_OOB_AREA_EXT: 1208 + sec_cnt = cdns_chip->sector_count; 1209 + sec_size = cdns_chip->sector_size; 1210 + data_ctrl_size = cdns_chip->avail_oob_size; 1211 + break; 1212 + case TT_MAIN_OOB_AREAS: 1213 + sec_cnt = cdns_chip->sector_count; 1214 + last_sec_size = cdns_chip->sector_size 1215 + + cdns_chip->avail_oob_size; 1216 + sec_size = cdns_chip->sector_size; 1217 + break; 1218 + case TT_RAW_PAGE: 1219 + last_sec_size = mtd->writesize + mtd->oobsize; 1220 + break; 1221 + case TT_BBM: 1222 + offset = mtd->writesize + cdns_chip->bbm_offs; 1223 + last_sec_size = 8; 1224 + break; 1225 + } 1226 + 1227 + reg = 0; 1228 + reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset); 1229 + reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt); 1230 + writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0); 1231 + 1232 + reg = 0; 1233 + reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size); 1234 + reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size); 1235 + writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1); 1236 + 1237 + if (cdns_ctrl->caps2.data_control_supp) { 1238 + reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL); 1239 + reg &= ~CONTROL_DATA_CTRL_SIZE; 1240 + reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size); 1241 + writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL); 1242 + } 1243 + 1244 + cdns_ctrl->curr_trans_type = transfer_type; 1245 + } 1246 + 1247 + static int 1248 + cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr, 1249 + int page, void *buf, void *ctrl_dat, u32 buf_size, 1250 + u32 ctrl_dat_size, enum dma_data_direction dir, 1251 + bool with_ecc) 1252 + { 1253 + dma_addr_t dma_buf, dma_ctrl_dat = 0; 1254 + u8 thread_nr = chip_nr; 1255 + int status; 1256 + u16 ctype; 1257 + 1258 + if (dir == DMA_FROM_DEVICE) 1259 + ctype = CDMA_CT_RD; 1260 + else 1261 + ctype = CDMA_CT_WR; 1262 + 1263 + cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc); 1264 + 1265 + dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir); 1266 + if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) { 1267 + dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n"); 1268 + return -EIO; 1269 + } 1270 + 1271 + if (ctrl_dat && ctrl_dat_size) { 1272 + dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat, 1273 + ctrl_dat_size, dir); 1274 + if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) { 1275 + dma_unmap_single(cdns_ctrl->dev, dma_buf, 1276 + buf_size, dir); 1277 + dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n"); 1278 + return -EIO; 1279 + } 1280 + } 1281 + 1282 + cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page, 1283 + (void *)dma_buf, (void *)dma_ctrl_dat, 1284 + ctype); 1285 + 1286 + status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); 1287 + 1288 + dma_unmap_single(cdns_ctrl->dev, dma_buf, 1289 + buf_size, dir); 1290 + 1291 + if (ctrl_dat && ctrl_dat_size) 1292 + dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat, 1293 + ctrl_dat_size, dir); 1294 + if (status) 1295 + return status; 1296 + 1297 + return cadence_nand_cdma_finish(cdns_ctrl); 1298 + } 1299 + 1300 + static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl, 1301 + struct cadence_nand_timings *t) 1302 + { 1303 + writel_relaxed(t->async_toggle_timings, 1304 + cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS); 1305 + writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0); 1306 + writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1); 1307 + writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2); 1308 + 1309 + if (cdns_ctrl->caps2.is_phy_type_dll) 1310 + writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL); 1311 + 1312 + writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL); 1313 + 1314 + if (cdns_ctrl->caps2.is_phy_type_dll) { 1315 + writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL); 1316 + writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING); 1317 + writel_relaxed(t->phy_dqs_timing, 1318 + cdns_ctrl->reg + PHY_DQS_TIMING); 1319 + writel_relaxed(t->phy_gate_lpbk_ctrl, 1320 + cdns_ctrl->reg + PHY_GATE_LPBK_CTRL); 1321 + writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE, 1322 + cdns_ctrl->reg + PHY_DLL_MASTER_CTRL); 1323 + writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL); 1324 + } 1325 + } 1326 + 1327 + static int cadence_nand_select_target(struct nand_chip *chip) 1328 + { 1329 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1330 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 1331 + 1332 + if (chip == cdns_ctrl->selected_chip) 1333 + return 0; 1334 + 1335 + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, 1336 + 1000000, 1337 + CTRL_STATUS_CTRL_BUSY, true)) 1338 + return -ETIMEDOUT; 1339 + 1340 + cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings); 1341 + 1342 + cadence_nand_set_ecc_strength(cdns_ctrl, 1343 + cdns_chip->corr_str_idx); 1344 + 1345 + cadence_nand_set_erase_detection(cdns_ctrl, true, 1346 + chip->ecc.strength); 1347 + 1348 + cdns_ctrl->curr_trans_type = -1; 1349 + cdns_ctrl->selected_chip = chip; 1350 + 1351 + return 0; 1352 + } 1353 + 1354 + static int cadence_nand_erase(struct nand_chip *chip, u32 page) 1355 + { 1356 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1357 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 1358 + int status; 1359 + u8 thread_nr = cdns_chip->cs[chip->cur_cs]; 1360 + 1361 + cadence_nand_cdma_desc_prepare(cdns_ctrl, 1362 + cdns_chip->cs[chip->cur_cs], 1363 + page, NULL, NULL, 1364 + CDMA_CT_ERASE); 1365 + status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); 1366 + if (status) { 1367 + dev_err(cdns_ctrl->dev, "erase operation failed\n"); 1368 + return -EIO; 1369 + } 1370 + 1371 + status = cadence_nand_cdma_finish(cdns_ctrl); 1372 + if (status) 1373 + return status; 1374 + 1375 + return 0; 1376 + } 1377 + 1378 + static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf) 1379 + { 1380 + int status; 1381 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1382 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 1383 + struct mtd_info *mtd = nand_to_mtd(chip); 1384 + 1385 + cadence_nand_prepare_data_size(chip, TT_BBM); 1386 + 1387 + cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0); 1388 + 1389 + /* 1390 + * Read only bad block marker from offset 1391 + * defined by a memory manufacturer. 1392 + */ 1393 + status = cadence_nand_cdma_transfer(cdns_ctrl, 1394 + cdns_chip->cs[chip->cur_cs], 1395 + page, cdns_ctrl->buf, NULL, 1396 + mtd->oobsize, 1397 + 0, DMA_FROM_DEVICE, false); 1398 + if (status) { 1399 + dev_err(cdns_ctrl->dev, "read BBM failed\n"); 1400 + return -EIO; 1401 + } 1402 + 1403 + memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len); 1404 + 1405 + return 0; 1406 + } 1407 + 1408 + static int cadence_nand_write_page(struct nand_chip *chip, 1409 + const u8 *buf, int oob_required, 1410 + int page) 1411 + { 1412 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1413 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 1414 + struct mtd_info *mtd = nand_to_mtd(chip); 1415 + int status; 1416 + u16 marker_val = 0xFFFF; 1417 + 1418 + status = cadence_nand_select_target(chip); 1419 + if (status) 1420 + return status; 1421 + 1422 + cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len, 1423 + mtd->writesize 1424 + + cdns_chip->bbm_offs, 1425 + 1); 1426 + 1427 + if (oob_required) { 1428 + marker_val = *(u16 *)(chip->oob_poi 1429 + + cdns_chip->bbm_offs); 1430 + } else { 1431 + /* Set oob data to 0xFF. */ 1432 + memset(cdns_ctrl->buf + mtd->writesize, 0xFF, 1433 + cdns_chip->avail_oob_size); 1434 + } 1435 + 1436 + cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val); 1437 + 1438 + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT); 1439 + 1440 + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) && 1441 + cdns_ctrl->caps2.data_control_supp) { 1442 + u8 *oob; 1443 + 1444 + if (oob_required) 1445 + oob = chip->oob_poi; 1446 + else 1447 + oob = cdns_ctrl->buf + mtd->writesize; 1448 + 1449 + status = cadence_nand_cdma_transfer(cdns_ctrl, 1450 + cdns_chip->cs[chip->cur_cs], 1451 + page, (void *)buf, oob, 1452 + mtd->writesize, 1453 + cdns_chip->avail_oob_size, 1454 + DMA_TO_DEVICE, true); 1455 + if (status) { 1456 + dev_err(cdns_ctrl->dev, "write page failed\n"); 1457 + return -EIO; 1458 + } 1459 + 1460 + return 0; 1461 + } 1462 + 1463 + if (oob_required) { 1464 + /* Transfer the data to the oob area. */ 1465 + memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi, 1466 + cdns_chip->avail_oob_size); 1467 + } 1468 + 1469 + memcpy(cdns_ctrl->buf, buf, mtd->writesize); 1470 + 1471 + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS); 1472 + 1473 + return cadence_nand_cdma_transfer(cdns_ctrl, 1474 + cdns_chip->cs[chip->cur_cs], 1475 + page, cdns_ctrl->buf, NULL, 1476 + mtd->writesize 1477 + + cdns_chip->avail_oob_size, 1478 + 0, DMA_TO_DEVICE, true); 1479 + } 1480 + 1481 + static int cadence_nand_write_oob(struct nand_chip *chip, int page) 1482 + { 1483 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1484 + struct mtd_info *mtd = nand_to_mtd(chip); 1485 + 1486 + memset(cdns_ctrl->buf, 0xFF, mtd->writesize); 1487 + 1488 + return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page); 1489 + } 1490 + 1491 + static int cadence_nand_write_page_raw(struct nand_chip *chip, 1492 + const u8 *buf, int oob_required, 1493 + int page) 1494 + { 1495 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1496 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 1497 + struct mtd_info *mtd = nand_to_mtd(chip); 1498 + int writesize = mtd->writesize; 1499 + int oobsize = mtd->oobsize; 1500 + int ecc_steps = chip->ecc.steps; 1501 + int ecc_size = chip->ecc.size; 1502 + int ecc_bytes = chip->ecc.bytes; 1503 + void *tmp_buf = cdns_ctrl->buf; 1504 + int oob_skip = cdns_chip->bbm_len; 1505 + size_t size = writesize + oobsize; 1506 + int i, pos, len; 1507 + int status = 0; 1508 + 1509 + status = cadence_nand_select_target(chip); 1510 + if (status) 1511 + return status; 1512 + 1513 + /* 1514 + * Fill the buffer with 0xff first except the full page transfer. 1515 + * This simplifies the logic. 1516 + */ 1517 + if (!buf || !oob_required) 1518 + memset(tmp_buf, 0xff, size); 1519 + 1520 + cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0); 1521 + 1522 + /* Arrange the buffer for syndrome payload/ecc layout. */ 1523 + if (buf) { 1524 + for (i = 0; i < ecc_steps; i++) { 1525 + pos = i * (ecc_size + ecc_bytes); 1526 + len = ecc_size; 1527 + 1528 + if (pos >= writesize) 1529 + pos += oob_skip; 1530 + else if (pos + len > writesize) 1531 + len = writesize - pos; 1532 + 1533 + memcpy(tmp_buf + pos, buf, len); 1534 + buf += len; 1535 + if (len < ecc_size) { 1536 + len = ecc_size - len; 1537 + memcpy(tmp_buf + writesize + oob_skip, buf, 1538 + len); 1539 + buf += len; 1540 + } 1541 + } 1542 + } 1543 + 1544 + if (oob_required) { 1545 + const u8 *oob = chip->oob_poi; 1546 + u32 oob_data_offset = (cdns_chip->sector_count - 1) * 1547 + (cdns_chip->sector_size + chip->ecc.bytes) 1548 + + cdns_chip->sector_size + oob_skip; 1549 + 1550 + /* BBM at the beginning of the OOB area. */ 1551 + memcpy(tmp_buf + writesize, oob, oob_skip); 1552 + 1553 + /* OOB free. */ 1554 + memcpy(tmp_buf + oob_data_offset, oob, 1555 + cdns_chip->avail_oob_size); 1556 + oob += cdns_chip->avail_oob_size; 1557 + 1558 + /* OOB ECC. */ 1559 + for (i = 0; i < ecc_steps; i++) { 1560 + pos = ecc_size + i * (ecc_size + ecc_bytes); 1561 + if (i == (ecc_steps - 1)) 1562 + pos += cdns_chip->avail_oob_size; 1563 + 1564 + len = ecc_bytes; 1565 + 1566 + if (pos >= writesize) 1567 + pos += oob_skip; 1568 + else if (pos + len > writesize) 1569 + len = writesize - pos; 1570 + 1571 + memcpy(tmp_buf + pos, oob, len); 1572 + oob += len; 1573 + if (len < ecc_bytes) { 1574 + len = ecc_bytes - len; 1575 + memcpy(tmp_buf + writesize + oob_skip, oob, 1576 + len); 1577 + oob += len; 1578 + } 1579 + } 1580 + } 1581 + 1582 + cadence_nand_prepare_data_size(chip, TT_RAW_PAGE); 1583 + 1584 + return cadence_nand_cdma_transfer(cdns_ctrl, 1585 + cdns_chip->cs[chip->cur_cs], 1586 + page, cdns_ctrl->buf, NULL, 1587 + mtd->writesize + 1588 + mtd->oobsize, 1589 + 0, DMA_TO_DEVICE, false); 1590 + } 1591 + 1592 + static int cadence_nand_write_oob_raw(struct nand_chip *chip, 1593 + int page) 1594 + { 1595 + return cadence_nand_write_page_raw(chip, NULL, true, page); 1596 + } 1597 + 1598 + static int cadence_nand_read_page(struct nand_chip *chip, 1599 + u8 *buf, int oob_required, int page) 1600 + { 1601 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1602 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 1603 + struct mtd_info *mtd = nand_to_mtd(chip); 1604 + int status = 0; 1605 + int ecc_err_count = 0; 1606 + 1607 + status = cadence_nand_select_target(chip); 1608 + if (status) 1609 + return status; 1610 + 1611 + cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len, 1612 + mtd->writesize 1613 + + cdns_chip->bbm_offs, 1); 1614 + 1615 + /* 1616 + * If data buffer can be accessed by DMA and data_control feature 1617 + * is supported then transfer data and oob directly. 1618 + */ 1619 + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) && 1620 + cdns_ctrl->caps2.data_control_supp) { 1621 + u8 *oob; 1622 + 1623 + if (oob_required) 1624 + oob = chip->oob_poi; 1625 + else 1626 + oob = cdns_ctrl->buf + mtd->writesize; 1627 + 1628 + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT); 1629 + status = cadence_nand_cdma_transfer(cdns_ctrl, 1630 + cdns_chip->cs[chip->cur_cs], 1631 + page, buf, oob, 1632 + mtd->writesize, 1633 + cdns_chip->avail_oob_size, 1634 + DMA_FROM_DEVICE, true); 1635 + /* Otherwise use bounce buffer. */ 1636 + } else { 1637 + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS); 1638 + status = cadence_nand_cdma_transfer(cdns_ctrl, 1639 + cdns_chip->cs[chip->cur_cs], 1640 + page, cdns_ctrl->buf, 1641 + NULL, mtd->writesize 1642 + + cdns_chip->avail_oob_size, 1643 + 0, DMA_FROM_DEVICE, true); 1644 + 1645 + memcpy(buf, cdns_ctrl->buf, mtd->writesize); 1646 + if (oob_required) 1647 + memcpy(chip->oob_poi, 1648 + cdns_ctrl->buf + mtd->writesize, 1649 + mtd->oobsize); 1650 + } 1651 + 1652 + switch (status) { 1653 + case STAT_ECC_UNCORR: 1654 + mtd->ecc_stats.failed++; 1655 + ecc_err_count++; 1656 + break; 1657 + case STAT_ECC_CORR: 1658 + ecc_err_count = FIELD_GET(CDMA_CS_MAXERR, 1659 + cdns_ctrl->cdma_desc->status); 1660 + mtd->ecc_stats.corrected += ecc_err_count; 1661 + break; 1662 + case STAT_ERASED: 1663 + case STAT_OK: 1664 + break; 1665 + default: 1666 + dev_err(cdns_ctrl->dev, "read page failed\n"); 1667 + return -EIO; 1668 + } 1669 + 1670 + if (oob_required) 1671 + if (cadence_nand_read_bbm(chip, page, chip->oob_poi)) 1672 + return -EIO; 1673 + 1674 + return ecc_err_count; 1675 + } 1676 + 1677 + /* Reads OOB data from the device. */ 1678 + static int cadence_nand_read_oob(struct nand_chip *chip, int page) 1679 + { 1680 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1681 + 1682 + return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page); 1683 + } 1684 + 1685 + static int cadence_nand_read_page_raw(struct nand_chip *chip, 1686 + u8 *buf, int oob_required, int page) 1687 + { 1688 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1689 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 1690 + struct mtd_info *mtd = nand_to_mtd(chip); 1691 + int oob_skip = cdns_chip->bbm_len; 1692 + int writesize = mtd->writesize; 1693 + int ecc_steps = chip->ecc.steps; 1694 + int ecc_size = chip->ecc.size; 1695 + int ecc_bytes = chip->ecc.bytes; 1696 + void *tmp_buf = cdns_ctrl->buf; 1697 + int i, pos, len; 1698 + int status = 0; 1699 + 1700 + status = cadence_nand_select_target(chip); 1701 + if (status) 1702 + return status; 1703 + 1704 + cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0); 1705 + 1706 + cadence_nand_prepare_data_size(chip, TT_RAW_PAGE); 1707 + status = cadence_nand_cdma_transfer(cdns_ctrl, 1708 + cdns_chip->cs[chip->cur_cs], 1709 + page, cdns_ctrl->buf, NULL, 1710 + mtd->writesize 1711 + + mtd->oobsize, 1712 + 0, DMA_FROM_DEVICE, false); 1713 + 1714 + switch (status) { 1715 + case STAT_ERASED: 1716 + case STAT_OK: 1717 + break; 1718 + default: 1719 + dev_err(cdns_ctrl->dev, "read raw page failed\n"); 1720 + return -EIO; 1721 + } 1722 + 1723 + /* Arrange the buffer for syndrome payload/ecc layout. */ 1724 + if (buf) { 1725 + for (i = 0; i < ecc_steps; i++) { 1726 + pos = i * (ecc_size + ecc_bytes); 1727 + len = ecc_size; 1728 + 1729 + if (pos >= writesize) 1730 + pos += oob_skip; 1731 + else if (pos + len > writesize) 1732 + len = writesize - pos; 1733 + 1734 + memcpy(buf, tmp_buf + pos, len); 1735 + buf += len; 1736 + if (len < ecc_size) { 1737 + len = ecc_size - len; 1738 + memcpy(buf, tmp_buf + writesize + oob_skip, 1739 + len); 1740 + buf += len; 1741 + } 1742 + } 1743 + } 1744 + 1745 + if (oob_required) { 1746 + u8 *oob = chip->oob_poi; 1747 + u32 oob_data_offset = (cdns_chip->sector_count - 1) * 1748 + (cdns_chip->sector_size + chip->ecc.bytes) 1749 + + cdns_chip->sector_size + oob_skip; 1750 + 1751 + /* OOB free. */ 1752 + memcpy(oob, tmp_buf + oob_data_offset, 1753 + cdns_chip->avail_oob_size); 1754 + 1755 + /* BBM at the beginning of the OOB area. */ 1756 + memcpy(oob, tmp_buf + writesize, oob_skip); 1757 + 1758 + oob += cdns_chip->avail_oob_size; 1759 + 1760 + /* OOB ECC */ 1761 + for (i = 0; i < ecc_steps; i++) { 1762 + pos = ecc_size + i * (ecc_size + ecc_bytes); 1763 + len = ecc_bytes; 1764 + 1765 + if (i == (ecc_steps - 1)) 1766 + pos += cdns_chip->avail_oob_size; 1767 + 1768 + if (pos >= writesize) 1769 + pos += oob_skip; 1770 + else if (pos + len > writesize) 1771 + len = writesize - pos; 1772 + 1773 + memcpy(oob, tmp_buf + pos, len); 1774 + oob += len; 1775 + if (len < ecc_bytes) { 1776 + len = ecc_bytes - len; 1777 + memcpy(oob, tmp_buf + writesize + oob_skip, 1778 + len); 1779 + oob += len; 1780 + } 1781 + } 1782 + } 1783 + 1784 + return 0; 1785 + } 1786 + 1787 + static int cadence_nand_read_oob_raw(struct nand_chip *chip, 1788 + int page) 1789 + { 1790 + return cadence_nand_read_page_raw(chip, NULL, true, page); 1791 + } 1792 + 1793 + static void cadence_nand_slave_dma_transfer_finished(void *data) 1794 + { 1795 + struct completion *finished = data; 1796 + 1797 + complete(finished); 1798 + } 1799 + 1800 + static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl, 1801 + void *buf, 1802 + dma_addr_t dev_dma, size_t len, 1803 + enum dma_data_direction dir) 1804 + { 1805 + DECLARE_COMPLETION_ONSTACK(finished); 1806 + struct dma_chan *chan; 1807 + struct dma_device *dma_dev; 1808 + dma_addr_t src_dma, dst_dma, buf_dma; 1809 + struct dma_async_tx_descriptor *tx; 1810 + dma_cookie_t cookie; 1811 + 1812 + chan = cdns_ctrl->dmac; 1813 + dma_dev = chan->device; 1814 + 1815 + buf_dma = dma_map_single(dma_dev->dev, buf, len, dir); 1816 + if (dma_mapping_error(dma_dev->dev, buf_dma)) { 1817 + dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n"); 1818 + goto err; 1819 + } 1820 + 1821 + if (dir == DMA_FROM_DEVICE) { 1822 + src_dma = cdns_ctrl->io.dma; 1823 + dst_dma = buf_dma; 1824 + } else { 1825 + src_dma = buf_dma; 1826 + dst_dma = cdns_ctrl->io.dma; 1827 + } 1828 + 1829 + tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len, 1830 + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); 1831 + if (!tx) { 1832 + dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n"); 1833 + goto err_unmap; 1834 + } 1835 + 1836 + tx->callback = cadence_nand_slave_dma_transfer_finished; 1837 + tx->callback_param = &finished; 1838 + 1839 + cookie = dmaengine_submit(tx); 1840 + if (dma_submit_error(cookie)) { 1841 + dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n"); 1842 + goto err_unmap; 1843 + } 1844 + 1845 + dma_async_issue_pending(cdns_ctrl->dmac); 1846 + wait_for_completion(&finished); 1847 + 1848 + dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); 1849 + 1850 + return 0; 1851 + 1852 + err_unmap: 1853 + dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); 1854 + 1855 + err: 1856 + dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n"); 1857 + 1858 + return -EIO; 1859 + } 1860 + 1861 + static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl, 1862 + u8 *buf, int len) 1863 + { 1864 + u8 thread_nr = 0; 1865 + u32 sdma_size; 1866 + int status; 1867 + 1868 + /* Wait until slave DMA interface is ready to data transfer. */ 1869 + status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size); 1870 + if (status) 1871 + return status; 1872 + 1873 + if (!cdns_ctrl->caps1->has_dma) { 1874 + int len_in_words = len >> 2; 1875 + 1876 + /* read alingment data */ 1877 + ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words); 1878 + if (sdma_size > len) { 1879 + /* read rest data from slave DMA interface if any */ 1880 + ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf, 1881 + sdma_size / 4 - len_in_words); 1882 + /* copy rest of data */ 1883 + memcpy(buf + (len_in_words << 2), cdns_ctrl->buf, 1884 + len - (len_in_words << 2)); 1885 + } 1886 + return 0; 1887 + } 1888 + 1889 + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) { 1890 + status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf, 1891 + cdns_ctrl->io.dma, 1892 + len, DMA_FROM_DEVICE); 1893 + if (status == 0) 1894 + return 0; 1895 + 1896 + dev_warn(cdns_ctrl->dev, 1897 + "Slave DMA transfer failed. Try again using bounce buffer."); 1898 + } 1899 + 1900 + /* If DMA transfer is not possible or failed then use bounce buffer. */ 1901 + status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf, 1902 + cdns_ctrl->io.dma, 1903 + sdma_size, DMA_FROM_DEVICE); 1904 + 1905 + if (status) { 1906 + dev_err(cdns_ctrl->dev, "Slave DMA transfer failed"); 1907 + return status; 1908 + } 1909 + 1910 + memcpy(buf, cdns_ctrl->buf, len); 1911 + 1912 + return 0; 1913 + } 1914 + 1915 + static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl, 1916 + const u8 *buf, int len) 1917 + { 1918 + u8 thread_nr = 0; 1919 + u32 sdma_size; 1920 + int status; 1921 + 1922 + /* Wait until slave DMA interface is ready to data transfer. */ 1923 + status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size); 1924 + if (status) 1925 + return status; 1926 + 1927 + if (!cdns_ctrl->caps1->has_dma) { 1928 + int len_in_words = len >> 2; 1929 + 1930 + iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words); 1931 + if (sdma_size > len) { 1932 + /* copy rest of data */ 1933 + memcpy(cdns_ctrl->buf, buf + (len_in_words << 2), 1934 + len - (len_in_words << 2)); 1935 + /* write all expected by nand controller data */ 1936 + iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf, 1937 + sdma_size / 4 - len_in_words); 1938 + } 1939 + 1940 + return 0; 1941 + } 1942 + 1943 + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) { 1944 + status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf, 1945 + cdns_ctrl->io.dma, 1946 + len, DMA_TO_DEVICE); 1947 + if (status == 0) 1948 + return 0; 1949 + 1950 + dev_warn(cdns_ctrl->dev, 1951 + "Slave DMA transfer failed. Try again using bounce buffer."); 1952 + } 1953 + 1954 + /* If DMA transfer is not possible or failed then use bounce buffer. */ 1955 + memcpy(cdns_ctrl->buf, buf, len); 1956 + 1957 + status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf, 1958 + cdns_ctrl->io.dma, 1959 + sdma_size, DMA_TO_DEVICE); 1960 + 1961 + if (status) 1962 + dev_err(cdns_ctrl->dev, "Slave DMA transfer failed"); 1963 + 1964 + return status; 1965 + } 1966 + 1967 + static int cadence_nand_force_byte_access(struct nand_chip *chip, 1968 + bool force_8bit) 1969 + { 1970 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1971 + int status; 1972 + 1973 + /* 1974 + * Callers of this function do not verify if the NAND is using a 16-bit 1975 + * an 8-bit bus for normal operations, so we need to take care of that 1976 + * here by leaving the configuration unchanged if the NAND does not have 1977 + * the NAND_BUSWIDTH_16 flag set. 1978 + */ 1979 + if (!(chip->options & NAND_BUSWIDTH_16)) 1980 + return 0; 1981 + 1982 + status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit); 1983 + 1984 + return status; 1985 + } 1986 + 1987 + static int cadence_nand_cmd_opcode(struct nand_chip *chip, 1988 + const struct nand_subop *subop) 1989 + { 1990 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 1991 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 1992 + const struct nand_op_instr *instr; 1993 + unsigned int op_id = 0; 1994 + u64 mini_ctrl_cmd = 0; 1995 + int ret; 1996 + 1997 + instr = &subop->instrs[op_id]; 1998 + 1999 + if (instr->delay_ns > 0) 2000 + mini_ctrl_cmd |= GCMD_LAY_TWB; 2001 + 2002 + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, 2003 + GCMD_LAY_INSTR_CMD); 2004 + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD, 2005 + instr->ctx.cmd.opcode); 2006 + 2007 + ret = cadence_nand_generic_cmd_send(cdns_ctrl, 2008 + cdns_chip->cs[chip->cur_cs], 2009 + mini_ctrl_cmd); 2010 + if (ret) 2011 + dev_err(cdns_ctrl->dev, "send cmd %x failed\n", 2012 + instr->ctx.cmd.opcode); 2013 + 2014 + return ret; 2015 + } 2016 + 2017 + static int cadence_nand_cmd_address(struct nand_chip *chip, 2018 + const struct nand_subop *subop) 2019 + { 2020 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 2021 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 2022 + const struct nand_op_instr *instr; 2023 + unsigned int op_id = 0; 2024 + u64 mini_ctrl_cmd = 0; 2025 + unsigned int offset, naddrs; 2026 + u64 address = 0; 2027 + const u8 *addrs; 2028 + int ret; 2029 + int i; 2030 + 2031 + instr = &subop->instrs[op_id]; 2032 + 2033 + if (instr->delay_ns > 0) 2034 + mini_ctrl_cmd |= GCMD_LAY_TWB; 2035 + 2036 + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, 2037 + GCMD_LAY_INSTR_ADDR); 2038 + 2039 + offset = nand_subop_get_addr_start_off(subop, op_id); 2040 + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); 2041 + addrs = &instr->ctx.addr.addrs[offset]; 2042 + 2043 + for (i = 0; i < naddrs; i++) 2044 + address |= (u64)addrs[i] << (8 * i); 2045 + 2046 + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR, 2047 + address); 2048 + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE, 2049 + naddrs - 1); 2050 + 2051 + ret = cadence_nand_generic_cmd_send(cdns_ctrl, 2052 + cdns_chip->cs[chip->cur_cs], 2053 + mini_ctrl_cmd); 2054 + if (ret) 2055 + dev_err(cdns_ctrl->dev, "send address %llx failed\n", address); 2056 + 2057 + return ret; 2058 + } 2059 + 2060 + static int cadence_nand_cmd_erase(struct nand_chip *chip, 2061 + const struct nand_subop *subop) 2062 + { 2063 + unsigned int op_id; 2064 + 2065 + if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) { 2066 + int i; 2067 + const struct nand_op_instr *instr = NULL; 2068 + unsigned int offset, naddrs; 2069 + const u8 *addrs; 2070 + u32 page = 0; 2071 + 2072 + instr = &subop->instrs[1]; 2073 + offset = nand_subop_get_addr_start_off(subop, 1); 2074 + naddrs = nand_subop_get_num_addr_cyc(subop, 1); 2075 + addrs = &instr->ctx.addr.addrs[offset]; 2076 + 2077 + for (i = 0; i < naddrs; i++) 2078 + page |= (u32)addrs[i] << (8 * i); 2079 + 2080 + return cadence_nand_erase(chip, page); 2081 + } 2082 + 2083 + /* 2084 + * If it is not an erase operation then handle operation 2085 + * by calling exec_op function. 2086 + */ 2087 + for (op_id = 0; op_id < subop->ninstrs; op_id++) { 2088 + int ret; 2089 + const struct nand_operation nand_op = { 2090 + .cs = chip->cur_cs, 2091 + .instrs = &subop->instrs[op_id], 2092 + .ninstrs = 1}; 2093 + ret = chip->controller->ops->exec_op(chip, &nand_op, false); 2094 + if (ret) 2095 + return ret; 2096 + } 2097 + 2098 + return 0; 2099 + } 2100 + 2101 + static int cadence_nand_cmd_data(struct nand_chip *chip, 2102 + const struct nand_subop *subop) 2103 + { 2104 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 2105 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 2106 + const struct nand_op_instr *instr; 2107 + unsigned int offset, op_id = 0; 2108 + u64 mini_ctrl_cmd = 0; 2109 + int len = 0; 2110 + int ret; 2111 + 2112 + instr = &subop->instrs[op_id]; 2113 + 2114 + if (instr->delay_ns > 0) 2115 + mini_ctrl_cmd |= GCMD_LAY_TWB; 2116 + 2117 + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, 2118 + GCMD_LAY_INSTR_DATA); 2119 + 2120 + if (instr->type == NAND_OP_DATA_OUT_INSTR) 2121 + mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR, 2122 + GCMD_DIR_WRITE); 2123 + 2124 + len = nand_subop_get_data_len(subop, op_id); 2125 + offset = nand_subop_get_data_start_off(subop, op_id); 2126 + mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1); 2127 + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len); 2128 + if (instr->ctx.data.force_8bit) { 2129 + ret = cadence_nand_force_byte_access(chip, true); 2130 + if (ret) { 2131 + dev_err(cdns_ctrl->dev, 2132 + "cannot change byte access generic data cmd failed\n"); 2133 + return ret; 2134 + } 2135 + } 2136 + 2137 + ret = cadence_nand_generic_cmd_send(cdns_ctrl, 2138 + cdns_chip->cs[chip->cur_cs], 2139 + mini_ctrl_cmd); 2140 + if (ret) { 2141 + dev_err(cdns_ctrl->dev, "send generic data cmd failed\n"); 2142 + return ret; 2143 + } 2144 + 2145 + if (instr->type == NAND_OP_DATA_IN_INSTR) { 2146 + void *buf = instr->ctx.data.buf.in + offset; 2147 + 2148 + ret = cadence_nand_read_buf(cdns_ctrl, buf, len); 2149 + } else { 2150 + const void *buf = instr->ctx.data.buf.out + offset; 2151 + 2152 + ret = cadence_nand_write_buf(cdns_ctrl, buf, len); 2153 + } 2154 + 2155 + if (ret) { 2156 + dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n"); 2157 + return ret; 2158 + } 2159 + 2160 + if (instr->ctx.data.force_8bit) { 2161 + ret = cadence_nand_force_byte_access(chip, false); 2162 + if (ret) { 2163 + dev_err(cdns_ctrl->dev, 2164 + "cannot change byte access generic data cmd failed\n"); 2165 + } 2166 + } 2167 + 2168 + return ret; 2169 + } 2170 + 2171 + static int cadence_nand_cmd_waitrdy(struct nand_chip *chip, 2172 + const struct nand_subop *subop) 2173 + { 2174 + int status; 2175 + unsigned int op_id = 0; 2176 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 2177 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 2178 + const struct nand_op_instr *instr = &subop->instrs[op_id]; 2179 + u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000; 2180 + 2181 + status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS, 2182 + timeout_us, 2183 + BIT(cdns_chip->cs[chip->cur_cs]), 2184 + false); 2185 + return status; 2186 + } 2187 + 2188 + static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER( 2189 + NAND_OP_PARSER_PATTERN( 2190 + cadence_nand_cmd_erase, 2191 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 2192 + NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC), 2193 + NAND_OP_PARSER_PAT_CMD_ELEM(false), 2194 + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), 2195 + NAND_OP_PARSER_PATTERN( 2196 + cadence_nand_cmd_opcode, 2197 + NAND_OP_PARSER_PAT_CMD_ELEM(false)), 2198 + NAND_OP_PARSER_PATTERN( 2199 + cadence_nand_cmd_address, 2200 + NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)), 2201 + NAND_OP_PARSER_PATTERN( 2202 + cadence_nand_cmd_data, 2203 + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)), 2204 + NAND_OP_PARSER_PATTERN( 2205 + cadence_nand_cmd_data, 2206 + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)), 2207 + NAND_OP_PARSER_PATTERN( 2208 + cadence_nand_cmd_waitrdy, 2209 + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)) 2210 + ); 2211 + 2212 + static int cadence_nand_exec_op(struct nand_chip *chip, 2213 + const struct nand_operation *op, 2214 + bool check_only) 2215 + { 2216 + int status = cadence_nand_select_target(chip); 2217 + 2218 + if (status) 2219 + return status; 2220 + 2221 + return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op, 2222 + check_only); 2223 + } 2224 + 2225 + static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section, 2226 + struct mtd_oob_region *oobregion) 2227 + { 2228 + struct nand_chip *chip = mtd_to_nand(mtd); 2229 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 2230 + 2231 + if (section) 2232 + return -ERANGE; 2233 + 2234 + oobregion->offset = cdns_chip->bbm_len; 2235 + oobregion->length = cdns_chip->avail_oob_size 2236 + - cdns_chip->bbm_len; 2237 + 2238 + return 0; 2239 + } 2240 + 2241 + static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section, 2242 + struct mtd_oob_region *oobregion) 2243 + { 2244 + struct nand_chip *chip = mtd_to_nand(mtd); 2245 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 2246 + 2247 + if (section) 2248 + return -ERANGE; 2249 + 2250 + oobregion->offset = cdns_chip->avail_oob_size; 2251 + oobregion->length = chip->ecc.total; 2252 + 2253 + return 0; 2254 + } 2255 + 2256 + static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = { 2257 + .free = cadence_nand_ooblayout_free, 2258 + .ecc = cadence_nand_ooblayout_ecc, 2259 + }; 2260 + 2261 + static int calc_cycl(u32 timing, u32 clock) 2262 + { 2263 + if (timing == 0 || clock == 0) 2264 + return 0; 2265 + 2266 + if ((timing % clock) > 0) 2267 + return timing / clock; 2268 + else 2269 + return timing / clock - 1; 2270 + } 2271 + 2272 + /* Calculate max data valid window. */ 2273 + static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min, 2274 + u32 board_delay_skew_min, u32 ext_mode) 2275 + { 2276 + if (ext_mode == 0) 2277 + clk_period /= 2; 2278 + 2279 + return (trp_cnt + 1) * clk_period + trhoh_min + 2280 + board_delay_skew_min; 2281 + } 2282 + 2283 + /* Calculate data valid window. */ 2284 + static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min, 2285 + u32 trea_max, u32 ext_mode) 2286 + { 2287 + if (ext_mode == 0) 2288 + clk_period /= 2; 2289 + 2290 + return (trp_cnt + 1) * clk_period + trhoh_min - trea_max; 2291 + } 2292 + 2293 + static int 2294 + cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr, 2295 + const struct nand_data_interface *conf) 2296 + { 2297 + const struct nand_sdr_timings *sdr; 2298 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 2299 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 2300 + struct cadence_nand_timings *t = &cdns_chip->timings; 2301 + u32 reg; 2302 + u32 board_delay = cdns_ctrl->board_delay; 2303 + u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL, 2304 + cdns_ctrl->nf_clk_rate); 2305 + u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt; 2306 + u32 tfeat_cnt, trhz_cnt, tvdly_cnt; 2307 + u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt; 2308 + u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0; 2309 + u32 if_skew = cdns_ctrl->caps1->if_skew; 2310 + u32 board_delay_skew_min = board_delay - if_skew; 2311 + u32 board_delay_skew_max = board_delay + if_skew; 2312 + u32 dqs_sampl_res, phony_dqs_mod; 2313 + u32 tdvw, tdvw_min, tdvw_max; 2314 + u32 ext_rd_mode, ext_wr_mode; 2315 + u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0; 2316 + u32 sampling_point; 2317 + 2318 + sdr = nand_get_sdr_timings(conf); 2319 + if (IS_ERR(sdr)) 2320 + return PTR_ERR(sdr); 2321 + 2322 + memset(t, 0, sizeof(*t)); 2323 + /* Sampling point calculation. */ 2324 + 2325 + if (cdns_ctrl->caps2.is_phy_type_dll) 2326 + phony_dqs_mod = 2; 2327 + else 2328 + phony_dqs_mod = 1; 2329 + 2330 + dqs_sampl_res = clk_period / phony_dqs_mod; 2331 + 2332 + tdvw_min = sdr->tREA_max + board_delay_skew_max; 2333 + /* 2334 + * The idea of those calculation is to get the optimum value 2335 + * for tRP and tRH timings. If it is NOT possible to sample data 2336 + * with optimal tRP/tRH settings, the parameters will be extended. 2337 + * If clk_period is 50ns (the lowest value) this condition is met 2338 + * for asynchronous timing modes 1, 2, 3, 4 and 5. 2339 + * If clk_period is 20ns the condition is met only 2340 + * for asynchronous timing mode 5. 2341 + */ 2342 + if (sdr->tRC_min <= clk_period && 2343 + sdr->tRP_min <= (clk_period / 2) && 2344 + sdr->tREH_min <= (clk_period / 2)) { 2345 + /* Performance mode. */ 2346 + ext_rd_mode = 0; 2347 + tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min, 2348 + sdr->tREA_max, ext_rd_mode); 2349 + tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min, 2350 + board_delay_skew_min, 2351 + ext_rd_mode); 2352 + /* 2353 + * Check if data valid window and sampling point can be found 2354 + * and is not on the edge (ie. we have hold margin). 2355 + * If not extend the tRP timings. 2356 + */ 2357 + if (tdvw > 0) { 2358 + if (tdvw_max <= tdvw_min || 2359 + (tdvw_max % dqs_sampl_res) == 0) { 2360 + /* 2361 + * No valid sampling point so the RE pulse need 2362 + * to be widen widening by half clock cycle. 2363 + */ 2364 + ext_rd_mode = 1; 2365 + } 2366 + } else { 2367 + /* 2368 + * There is no valid window 2369 + * to be able to sample data the tRP need to be widen. 2370 + * Very safe calculations are performed here. 2371 + */ 2372 + trp_cnt = (sdr->tREA_max + board_delay_skew_max 2373 + + dqs_sampl_res) / clk_period; 2374 + ext_rd_mode = 1; 2375 + } 2376 + 2377 + } else { 2378 + /* Extended read mode. */ 2379 + u32 trh; 2380 + 2381 + ext_rd_mode = 1; 2382 + trp_cnt = calc_cycl(sdr->tRP_min, clk_period); 2383 + trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period); 2384 + if (sdr->tREH_min >= trh) 2385 + trh_cnt = calc_cycl(sdr->tREH_min, clk_period); 2386 + else 2387 + trh_cnt = calc_cycl(trh, clk_period); 2388 + 2389 + tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min, 2390 + sdr->tREA_max, ext_rd_mode); 2391 + /* 2392 + * Check if data valid window and sampling point can be found 2393 + * or if it is at the edge check if previous is valid 2394 + * - if not extend the tRP timings. 2395 + */ 2396 + if (tdvw > 0) { 2397 + tdvw_max = calc_tdvw_max(trp_cnt, clk_period, 2398 + sdr->tRHOH_min, 2399 + board_delay_skew_min, 2400 + ext_rd_mode); 2401 + 2402 + if ((((tdvw_max / dqs_sampl_res) 2403 + * dqs_sampl_res) <= tdvw_min) || 2404 + (((tdvw_max % dqs_sampl_res) == 0) && 2405 + (((tdvw_max / dqs_sampl_res - 1) 2406 + * dqs_sampl_res) <= tdvw_min))) { 2407 + /* 2408 + * Data valid window width is lower than 2409 + * sampling resolution and do not hit any 2410 + * sampling point to be sure the sampling point 2411 + * will be found the RE low pulse width will be 2412 + * extended by one clock cycle. 2413 + */ 2414 + trp_cnt = trp_cnt + 1; 2415 + } 2416 + } else { 2417 + /* 2418 + * There is no valid window to be able to sample data. 2419 + * The tRP need to be widen. 2420 + * Very safe calculations are performed here. 2421 + */ 2422 + trp_cnt = (sdr->tREA_max + board_delay_skew_max 2423 + + dqs_sampl_res) / clk_period; 2424 + } 2425 + } 2426 + 2427 + tdvw_max = calc_tdvw_max(trp_cnt, clk_period, 2428 + sdr->tRHOH_min, 2429 + board_delay_skew_min, ext_rd_mode); 2430 + 2431 + if (sdr->tWC_min <= clk_period && 2432 + (sdr->tWP_min + if_skew) <= (clk_period / 2) && 2433 + (sdr->tWH_min + if_skew) <= (clk_period / 2)) { 2434 + ext_wr_mode = 0; 2435 + } else { 2436 + u32 twh; 2437 + 2438 + ext_wr_mode = 1; 2439 + twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period); 2440 + if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew)) 2441 + twp_cnt = calc_cycl(sdr->tALS_min + if_skew, 2442 + clk_period); 2443 + 2444 + twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period); 2445 + if (sdr->tWH_min >= twh) 2446 + twh = sdr->tWH_min; 2447 + 2448 + twh_cnt = calc_cycl(twh + if_skew, clk_period); 2449 + } 2450 + 2451 + reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt); 2452 + reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt); 2453 + reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt); 2454 + reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt); 2455 + t->async_toggle_timings = reg; 2456 + dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg); 2457 + 2458 + tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period); 2459 + tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period); 2460 + twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period); 2461 + trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period); 2462 + reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt); 2463 + 2464 + /* 2465 + * If timing exceeds delay field in timing register 2466 + * then use maximum value. 2467 + */ 2468 + if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt)) 2469 + reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt); 2470 + else 2471 + reg |= TIMINGS0_TCCS; 2472 + 2473 + reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt); 2474 + reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt); 2475 + t->timings0 = reg; 2476 + dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg); 2477 + 2478 + /* The following is related to single signal so skew is not needed. */ 2479 + trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period); 2480 + trhz_cnt = trhz_cnt + 1; 2481 + twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period); 2482 + /* 2483 + * Because of the two stage syncflop the value must be increased by 3 2484 + * first value is related with sync, second value is related 2485 + * with output if delay. 2486 + */ 2487 + twb_cnt = twb_cnt + 3 + 5; 2488 + /* 2489 + * The following is related to the we edge of the random data input 2490 + * sequence so skew is not needed. 2491 + */ 2492 + tvdly_cnt = calc_cycl(500000 + if_skew, clk_period); 2493 + reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt); 2494 + reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt); 2495 + reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt); 2496 + t->timings1 = reg; 2497 + dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg); 2498 + 2499 + tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period); 2500 + if (tfeat_cnt < twb_cnt) 2501 + tfeat_cnt = twb_cnt; 2502 + 2503 + tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period); 2504 + tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period); 2505 + 2506 + reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt); 2507 + reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt); 2508 + reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt); 2509 + t->timings2 = reg; 2510 + dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg); 2511 + 2512 + if (cdns_ctrl->caps2.is_phy_type_dll) { 2513 + reg = DLL_PHY_CTRL_DLL_RST_N; 2514 + if (ext_wr_mode) 2515 + reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE; 2516 + if (ext_rd_mode) 2517 + reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE; 2518 + 2519 + reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7); 2520 + reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7); 2521 + t->dll_phy_ctrl = reg; 2522 + dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg); 2523 + } 2524 + 2525 + /* Sampling point calculation. */ 2526 + if ((tdvw_max % dqs_sampl_res) > 0) 2527 + sampling_point = tdvw_max / dqs_sampl_res; 2528 + else 2529 + sampling_point = (tdvw_max / dqs_sampl_res - 1); 2530 + 2531 + if (sampling_point * dqs_sampl_res > tdvw_min) { 2532 + dll_phy_dqs_timing = 2533 + FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4); 2534 + dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS; 2535 + phony_dqs_timing = sampling_point / phony_dqs_mod; 2536 + 2537 + if ((sampling_point % 2) > 0) { 2538 + dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL; 2539 + if ((tdvw_max % dqs_sampl_res) == 0) 2540 + /* 2541 + * Calculation for sampling point at the edge 2542 + * of data and being odd number. 2543 + */ 2544 + phony_dqs_timing = (tdvw_max / dqs_sampl_res) 2545 + / phony_dqs_mod - 1; 2546 + 2547 + if (!cdns_ctrl->caps2.is_phy_type_dll) 2548 + phony_dqs_timing--; 2549 + 2550 + } else { 2551 + phony_dqs_timing--; 2552 + } 2553 + rd_del_sel = phony_dqs_timing + 3; 2554 + } else { 2555 + dev_warn(cdns_ctrl->dev, 2556 + "ERROR : cannot find valid sampling point\n"); 2557 + } 2558 + 2559 + reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing); 2560 + if (cdns_ctrl->caps2.is_phy_type_dll) 2561 + reg |= PHY_CTRL_SDR_DQS; 2562 + t->phy_ctrl = reg; 2563 + dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg); 2564 + 2565 + if (cdns_ctrl->caps2.is_phy_type_dll) { 2566 + dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0); 2567 + dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2); 2568 + dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n", 2569 + dll_phy_dqs_timing); 2570 + t->phy_dqs_timing = dll_phy_dqs_timing; 2571 + 2572 + reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel); 2573 + dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n", 2574 + reg); 2575 + t->phy_gate_lpbk_ctrl = reg; 2576 + 2577 + dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n", 2578 + PHY_DLL_MASTER_CTRL_BYPASS_MODE); 2579 + dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0); 2580 + } 2581 + 2582 + return 0; 2583 + } 2584 + 2585 + int cadence_nand_attach_chip(struct nand_chip *chip) 2586 + { 2587 + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); 2588 + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); 2589 + u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes; 2590 + struct mtd_info *mtd = nand_to_mtd(chip); 2591 + u32 max_oob_data_size; 2592 + int ret; 2593 + 2594 + if (chip->options & NAND_BUSWIDTH_16) { 2595 + ret = cadence_nand_set_access_width16(cdns_ctrl, true); 2596 + if (ret) 2597 + return ret; 2598 + } 2599 + 2600 + chip->bbt_options |= NAND_BBT_USE_FLASH; 2601 + chip->bbt_options |= NAND_BBT_NO_OOB; 2602 + chip->ecc.mode = NAND_ECC_HW; 2603 + 2604 + chip->options |= NAND_NO_SUBPAGE_WRITE; 2605 + 2606 + cdns_chip->bbm_offs = chip->badblockpos; 2607 + if (chip->options & NAND_BUSWIDTH_16) { 2608 + cdns_chip->bbm_offs &= ~0x01; 2609 + cdns_chip->bbm_len = 2; 2610 + } else { 2611 + cdns_chip->bbm_len = 1; 2612 + } 2613 + 2614 + ret = nand_ecc_choose_conf(chip, 2615 + &cdns_ctrl->ecc_caps, 2616 + mtd->oobsize - cdns_chip->bbm_len); 2617 + if (ret) { 2618 + dev_err(cdns_ctrl->dev, "ECC configuration failed\n"); 2619 + return ret; 2620 + } 2621 + 2622 + dev_dbg(cdns_ctrl->dev, 2623 + "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 2624 + chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 2625 + 2626 + /* Error correction configuration. */ 2627 + cdns_chip->sector_size = chip->ecc.size; 2628 + cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size; 2629 + 2630 + cdns_chip->avail_oob_size = mtd->oobsize - ecc_size; 2631 + 2632 + max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR; 2633 + 2634 + if (cdns_chip->avail_oob_size > max_oob_data_size) 2635 + cdns_chip->avail_oob_size = max_oob_data_size; 2636 + 2637 + if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size) 2638 + > mtd->oobsize) 2639 + cdns_chip->avail_oob_size -= 4; 2640 + 2641 + ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength); 2642 + if (ret < 0) 2643 + return -EINVAL; 2644 + 2645 + cdns_chip->corr_str_idx = (u8)ret; 2646 + 2647 + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, 2648 + 1000000, 2649 + CTRL_STATUS_CTRL_BUSY, true)) 2650 + return -ETIMEDOUT; 2651 + 2652 + cadence_nand_set_ecc_strength(cdns_ctrl, 2653 + cdns_chip->corr_str_idx); 2654 + 2655 + cadence_nand_set_erase_detection(cdns_ctrl, true, 2656 + chip->ecc.strength); 2657 + 2658 + /* Override the default read operations. */ 2659 + chip->ecc.read_page = cadence_nand_read_page; 2660 + chip->ecc.read_page_raw = cadence_nand_read_page_raw; 2661 + chip->ecc.write_page = cadence_nand_write_page; 2662 + chip->ecc.write_page_raw = cadence_nand_write_page_raw; 2663 + chip->ecc.read_oob = cadence_nand_read_oob; 2664 + chip->ecc.write_oob = cadence_nand_write_oob; 2665 + chip->ecc.read_oob_raw = cadence_nand_read_oob_raw; 2666 + chip->ecc.write_oob_raw = cadence_nand_write_oob_raw; 2667 + 2668 + if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size) 2669 + cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize; 2670 + 2671 + /* Is 32-bit DMA supported? */ 2672 + ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32)); 2673 + if (ret) { 2674 + dev_err(cdns_ctrl->dev, "no usable DMA configuration\n"); 2675 + return ret; 2676 + } 2677 + 2678 + mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops); 2679 + 2680 + return 0; 2681 + } 2682 + 2683 + static const struct nand_controller_ops cadence_nand_controller_ops = { 2684 + .attach_chip = cadence_nand_attach_chip, 2685 + .exec_op = cadence_nand_exec_op, 2686 + .setup_data_interface = cadence_nand_setup_data_interface, 2687 + }; 2688 + 2689 + static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl, 2690 + struct device_node *np) 2691 + { 2692 + struct cdns_nand_chip *cdns_chip; 2693 + struct mtd_info *mtd; 2694 + struct nand_chip *chip; 2695 + int nsels, ret, i; 2696 + u32 cs; 2697 + 2698 + nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32)); 2699 + if (nsels <= 0) { 2700 + dev_err(cdns_ctrl->dev, "missing/invalid reg property\n"); 2701 + return -EINVAL; 2702 + } 2703 + 2704 + /* Allocate the nand chip structure. */ 2705 + cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) + 2706 + (nsels * sizeof(u8)), 2707 + GFP_KERNEL); 2708 + if (!cdns_chip) { 2709 + dev_err(cdns_ctrl->dev, "could not allocate chip structure\n"); 2710 + return -ENOMEM; 2711 + } 2712 + 2713 + cdns_chip->nsels = nsels; 2714 + 2715 + for (i = 0; i < nsels; i++) { 2716 + /* Retrieve CS id. */ 2717 + ret = of_property_read_u32_index(np, "reg", i, &cs); 2718 + if (ret) { 2719 + dev_err(cdns_ctrl->dev, 2720 + "could not retrieve reg property: %d\n", 2721 + ret); 2722 + return ret; 2723 + } 2724 + 2725 + if (cs >= cdns_ctrl->caps2.max_banks) { 2726 + dev_err(cdns_ctrl->dev, 2727 + "invalid reg value: %u (max CS = %d)\n", 2728 + cs, cdns_ctrl->caps2.max_banks); 2729 + return -EINVAL; 2730 + } 2731 + 2732 + if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) { 2733 + dev_err(cdns_ctrl->dev, 2734 + "CS %d already assigned\n", cs); 2735 + return -EINVAL; 2736 + } 2737 + 2738 + cdns_chip->cs[i] = cs; 2739 + } 2740 + 2741 + chip = &cdns_chip->chip; 2742 + chip->controller = &cdns_ctrl->controller; 2743 + nand_set_flash_node(chip, np); 2744 + 2745 + mtd = nand_to_mtd(chip); 2746 + mtd->dev.parent = cdns_ctrl->dev; 2747 + 2748 + /* 2749 + * Default to HW ECC engine mode. If the nand-ecc-mode property is given 2750 + * in the DT node, this entry will be overwritten in nand_scan_ident(). 2751 + */ 2752 + chip->ecc.mode = NAND_ECC_HW; 2753 + 2754 + ret = nand_scan(chip, cdns_chip->nsels); 2755 + if (ret) { 2756 + dev_err(cdns_ctrl->dev, "could not scan the nand chip\n"); 2757 + return ret; 2758 + } 2759 + 2760 + ret = mtd_device_register(mtd, NULL, 0); 2761 + if (ret) { 2762 + dev_err(cdns_ctrl->dev, 2763 + "failed to register mtd device: %d\n", ret); 2764 + nand_cleanup(chip); 2765 + return ret; 2766 + } 2767 + 2768 + list_add_tail(&cdns_chip->node, &cdns_ctrl->chips); 2769 + 2770 + return 0; 2771 + } 2772 + 2773 + static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl) 2774 + { 2775 + struct cdns_nand_chip *entry, *temp; 2776 + 2777 + list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) { 2778 + nand_release(&entry->chip); 2779 + list_del(&entry->node); 2780 + } 2781 + } 2782 + 2783 + static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl) 2784 + { 2785 + struct device_node *np = cdns_ctrl->dev->of_node; 2786 + struct device_node *nand_np; 2787 + int max_cs = cdns_ctrl->caps2.max_banks; 2788 + int nchips, ret; 2789 + 2790 + nchips = of_get_child_count(np); 2791 + 2792 + if (nchips > max_cs) { 2793 + dev_err(cdns_ctrl->dev, 2794 + "too many NAND chips: %d (max = %d CS)\n", 2795 + nchips, max_cs); 2796 + return -EINVAL; 2797 + } 2798 + 2799 + for_each_child_of_node(np, nand_np) { 2800 + ret = cadence_nand_chip_init(cdns_ctrl, nand_np); 2801 + if (ret) { 2802 + of_node_put(nand_np); 2803 + cadence_nand_chips_cleanup(cdns_ctrl); 2804 + return ret; 2805 + } 2806 + } 2807 + 2808 + return 0; 2809 + } 2810 + 2811 + static void 2812 + cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl) 2813 + { 2814 + /* Disable interrupts. */ 2815 + writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE); 2816 + } 2817 + 2818 + static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) 2819 + { 2820 + dma_cap_mask_t mask; 2821 + int ret; 2822 + 2823 + cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev, 2824 + sizeof(*cdns_ctrl->cdma_desc), 2825 + &cdns_ctrl->dma_cdma_desc, 2826 + GFP_KERNEL); 2827 + if (!cdns_ctrl->dma_cdma_desc) 2828 + return -ENOMEM; 2829 + 2830 + cdns_ctrl->buf_size = SZ_16K; 2831 + cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL); 2832 + if (!cdns_ctrl->buf) { 2833 + ret = -ENOMEM; 2834 + goto free_buf_desc; 2835 + } 2836 + 2837 + if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr, 2838 + IRQF_SHARED, "cadence-nand-controller", 2839 + cdns_ctrl)) { 2840 + dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n"); 2841 + ret = -ENODEV; 2842 + goto free_buf; 2843 + } 2844 + 2845 + spin_lock_init(&cdns_ctrl->irq_lock); 2846 + init_completion(&cdns_ctrl->complete); 2847 + 2848 + ret = cadence_nand_hw_init(cdns_ctrl); 2849 + if (ret) 2850 + goto disable_irq; 2851 + 2852 + dma_cap_zero(mask); 2853 + dma_cap_set(DMA_MEMCPY, mask); 2854 + 2855 + if (cdns_ctrl->caps1->has_dma) { 2856 + cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL); 2857 + if (!cdns_ctrl->dmac) { 2858 + dev_err(cdns_ctrl->dev, 2859 + "Unable to get a DMA channel\n"); 2860 + ret = -EBUSY; 2861 + goto disable_irq; 2862 + } 2863 + } 2864 + 2865 + nand_controller_init(&cdns_ctrl->controller); 2866 + INIT_LIST_HEAD(&cdns_ctrl->chips); 2867 + 2868 + cdns_ctrl->controller.ops = &cadence_nand_controller_ops; 2869 + cdns_ctrl->curr_corr_str_idx = 0xFF; 2870 + 2871 + ret = cadence_nand_chips_init(cdns_ctrl); 2872 + if (ret) { 2873 + dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n", 2874 + ret); 2875 + goto dma_release_chnl; 2876 + } 2877 + 2878 + kfree(cdns_ctrl->buf); 2879 + cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL); 2880 + if (!cdns_ctrl->buf) { 2881 + ret = -ENOMEM; 2882 + goto dma_release_chnl; 2883 + } 2884 + 2885 + return 0; 2886 + 2887 + dma_release_chnl: 2888 + if (cdns_ctrl->dmac) 2889 + dma_release_channel(cdns_ctrl->dmac); 2890 + 2891 + disable_irq: 2892 + cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); 2893 + 2894 + free_buf: 2895 + kfree(cdns_ctrl->buf); 2896 + 2897 + free_buf_desc: 2898 + dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), 2899 + cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc); 2900 + 2901 + return ret; 2902 + } 2903 + 2904 + /* Driver exit point. */ 2905 + static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl) 2906 + { 2907 + cadence_nand_chips_cleanup(cdns_ctrl); 2908 + cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); 2909 + kfree(cdns_ctrl->buf); 2910 + dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), 2911 + cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc); 2912 + 2913 + if (cdns_ctrl->dmac) 2914 + dma_release_channel(cdns_ctrl->dmac); 2915 + } 2916 + 2917 + struct cadence_nand_dt { 2918 + struct cdns_nand_ctrl cdns_ctrl; 2919 + struct clk *clk; 2920 + }; 2921 + 2922 + static const struct cadence_nand_dt_devdata cadence_nand_default = { 2923 + .if_skew = 0, 2924 + .has_dma = 1, 2925 + }; 2926 + 2927 + static const struct of_device_id cadence_nand_dt_ids[] = { 2928 + { 2929 + .compatible = "cdns,hp-nfc", 2930 + .data = &cadence_nand_default 2931 + }, {} 2932 + }; 2933 + 2934 + MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids); 2935 + 2936 + static int cadence_nand_dt_probe(struct platform_device *ofdev) 2937 + { 2938 + struct resource *res; 2939 + struct cadence_nand_dt *dt; 2940 + struct cdns_nand_ctrl *cdns_ctrl; 2941 + int ret; 2942 + const struct of_device_id *of_id; 2943 + const struct cadence_nand_dt_devdata *devdata; 2944 + u32 val; 2945 + 2946 + of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev); 2947 + if (of_id) { 2948 + ofdev->id_entry = of_id->data; 2949 + devdata = of_id->data; 2950 + } else { 2951 + pr_err("Failed to find the right device id.\n"); 2952 + return -ENOMEM; 2953 + } 2954 + 2955 + dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL); 2956 + if (!dt) 2957 + return -ENOMEM; 2958 + 2959 + cdns_ctrl = &dt->cdns_ctrl; 2960 + cdns_ctrl->caps1 = devdata; 2961 + 2962 + cdns_ctrl->dev = &ofdev->dev; 2963 + cdns_ctrl->irq = platform_get_irq(ofdev, 0); 2964 + if (cdns_ctrl->irq < 0) 2965 + return cdns_ctrl->irq; 2966 + 2967 + dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq); 2968 + 2969 + cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0); 2970 + if (IS_ERR(cdns_ctrl->reg)) { 2971 + dev_err(&ofdev->dev, "devm_ioremap_resource res 0 failed\n"); 2972 + return PTR_ERR(cdns_ctrl->reg); 2973 + } 2974 + 2975 + res = platform_get_resource(ofdev, IORESOURCE_MEM, 1); 2976 + cdns_ctrl->io.dma = res->start; 2977 + cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res); 2978 + if (IS_ERR(cdns_ctrl->io.virt)) { 2979 + dev_err(cdns_ctrl->dev, "devm_ioremap_resource res 1 failed\n"); 2980 + return PTR_ERR(cdns_ctrl->io.virt); 2981 + } 2982 + 2983 + dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk"); 2984 + if (IS_ERR(dt->clk)) 2985 + return PTR_ERR(dt->clk); 2986 + 2987 + cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk); 2988 + 2989 + ret = of_property_read_u32(ofdev->dev.of_node, 2990 + "cdns,board-delay-ps", &val); 2991 + if (ret) { 2992 + val = 4830; 2993 + dev_info(cdns_ctrl->dev, 2994 + "missing cdns,board-delay-ps property, %d was set\n", 2995 + val); 2996 + } 2997 + cdns_ctrl->board_delay = val; 2998 + 2999 + ret = cadence_nand_init(cdns_ctrl); 3000 + if (ret) 3001 + return ret; 3002 + 3003 + platform_set_drvdata(ofdev, dt); 3004 + return 0; 3005 + } 3006 + 3007 + static int cadence_nand_dt_remove(struct platform_device *ofdev) 3008 + { 3009 + struct cadence_nand_dt *dt = platform_get_drvdata(ofdev); 3010 + 3011 + cadence_nand_remove(&dt->cdns_ctrl); 3012 + 3013 + return 0; 3014 + } 3015 + 3016 + static struct platform_driver cadence_nand_dt_driver = { 3017 + .probe = cadence_nand_dt_probe, 3018 + .remove = cadence_nand_dt_remove, 3019 + .driver = { 3020 + .name = "cadence-nand-controller", 3021 + .of_match_table = cadence_nand_dt_ids, 3022 + }, 3023 + }; 3024 + 3025 + module_platform_driver(cadence_nand_dt_driver); 3026 + 3027 + MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>"); 3028 + MODULE_LICENSE("GPL v2"); 3029 + MODULE_DESCRIPTION("Driver for Cadence NAND flash controller"); 3030 +
+4 -51
drivers/mtd/nand/raw/denali_dt.c
··· 102 102 return denali_chip_init(denali, dchip); 103 103 } 104 104 105 - /* Backward compatibility for old platforms */ 106 - static int denali_dt_legacy_chip_init(struct denali_controller *denali) 107 - { 108 - struct denali_chip *dchip; 109 - int nsels, i; 110 - 111 - nsels = denali->nbanks; 112 - 113 - dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels), 114 - GFP_KERNEL); 115 - if (!dchip) 116 - return -ENOMEM; 117 - 118 - dchip->nsels = nsels; 119 - 120 - for (i = 0; i < nsels; i++) 121 - dchip->sels[i].bank = i; 122 - 123 - nand_set_flash_node(&dchip->chip, denali->dev->of_node); 124 - 125 - return denali_chip_init(denali, dchip); 126 - } 127 - 128 - /* 129 - * Check the DT binding. 130 - * The new binding expects chip subnodes in the controller node. 131 - * So, #address-cells = <1>; #size-cells = <0>; are required. 132 - * Check the #size-cells to distinguish the binding. 133 - */ 134 - static bool denali_dt_is_legacy_binding(struct device_node *np) 135 - { 136 - u32 cells; 137 - int ret; 138 - 139 - ret = of_property_read_u32(np, "#size-cells", &cells); 140 - if (ret) 141 - return true; 142 - 143 - return cells != 0; 144 - } 145 - 146 105 static int denali_dt_probe(struct platform_device *pdev) 147 106 { 148 107 struct device *dev = &pdev->dev; ··· 170 211 if (ret) 171 212 goto out_disable_clk_ecc; 172 213 173 - if (denali_dt_is_legacy_binding(dev->of_node)) { 174 - ret = denali_dt_legacy_chip_init(denali); 175 - if (ret) 214 + for_each_child_of_node(dev->of_node, np) { 215 + ret = denali_dt_chip_init(denali, np); 216 + if (ret) { 217 + of_node_put(np); 176 218 goto out_remove_denali; 177 - } else { 178 - for_each_child_of_node(dev->of_node, np) { 179 - ret = denali_dt_chip_init(denali, np); 180 - if (ret) { 181 - of_node_put(np); 182 - goto out_remove_denali; 183 - } 184 219 } 185 220 } 186 221
+1 -3
drivers/mtd/nand/raw/mxic_nand.c
··· 524 524 nand_chip->controller = &nfc->controller; 525 525 526 526 irq = platform_get_irq(pdev, 0); 527 - if (irq < 0) { 528 - dev_err(&pdev->dev, "failed to retrieve irq\n"); 527 + if (irq < 0) 529 528 return irq; 530 - } 531 529 532 530 mxic_nfc_hw_init(nfc); 533 531
+6 -2
drivers/mtd/nand/raw/nand_base.c
··· 292 292 struct mtd_info *mtd = nand_to_mtd(chip); 293 293 int last_page = ((mtd->erasesize - mtd->writesize) >> 294 294 chip->page_shift) & chip->pagemask; 295 + unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE 296 + | NAND_BBM_LASTPAGE; 295 297 298 + if (page == 0 && !(chip->options & bbm_flags)) 299 + return 0; 296 300 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) 297 301 return 0; 298 - else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) 302 + if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) 299 303 return 1; 300 - else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) 304 + if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) 301 305 return last_page; 302 306 303 307 return -EINVAL;
+3 -1
drivers/mtd/nand/raw/nand_micron.c
··· 446 446 if (ret) 447 447 goto err_free_manuf_data; 448 448 449 + chip->options |= NAND_BBM_FIRSTPAGE; 450 + 449 451 if (mtd->writesize == 2048) 450 - chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE; 452 + chip->options |= NAND_BBM_SECONDPAGE; 451 453 452 454 ondie = micron_supports_on_die_ecc(chip); 453 455