Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mtd: add LPC32xx MLC NAND driver

This patch adds a driver for the MLC NAND controller of the LPC32xx SoC.

[dwmw2: 21st century pedantry]

Signed-off-by: Roland Stigge <stigge@antcom.de>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>

authored by

Roland Stigge and committed by
David Woodhouse
70f7cb78 d5842ab7

+998
+50
Documentation/devicetree/bindings/mtd/lpc32xx-mlc.txt
··· 1 + NXP LPC32xx SoC NAND MLC controller 2 + 3 + Required properties: 4 + - compatible: "nxp,lpc3220-mlc" 5 + - reg: Address and size of the controller 6 + - interrupts: The NAND interrupt specification 7 + - gpios: GPIO specification for NAND write protect 8 + 9 + The following required properties are very controller specific. See the LPC32xx 10 + User Manual 7.5.14 MLC NAND Timing Register (the values here are specified in 11 + Hz, to make them independent of actual clock speed and to provide for good 12 + accuracy:) 13 + - nxp,tcea_delay: TCEA_DELAY 14 + - nxp,busy_delay: BUSY_DELAY 15 + - nxp,nand_ta: NAND_TA 16 + - nxp,rd_high: RD_HIGH 17 + - nxp,rd_low: RD_LOW 18 + - nxp,wr_high: WR_HIGH 19 + - nxp,wr_low: WR_LOW 20 + 21 + Optional subnodes: 22 + - Partitions, see Documentation/devicetree/bindings/mtd/partition.txt 23 + 24 + Example: 25 + 26 + mlc: flash@200A8000 { 27 + compatible = "nxp,lpc3220-mlc"; 28 + reg = <0x200A8000 0x11000>; 29 + interrupts = <11 0>; 30 + #address-cells = <1>; 31 + #size-cells = <1>; 32 + 33 + nxp,tcea-delay = <333333333>; 34 + nxp,busy-delay = <10000000>; 35 + nxp,nand-ta = <18181818>; 36 + nxp,rd-high = <31250000>; 37 + nxp,rd-low = <45454545>; 38 + nxp,wr-high = <40000000>; 39 + nxp,wr-low = <83333333>; 40 + gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */ 41 + 42 + mtd0@00000000 { 43 + label = "boot"; 44 + reg = <0x00000000 0x00064000>; 45 + read-only; 46 + }; 47 + 48 + ... 49 + 50 + };
+11
drivers/mtd/nand/Kconfig
··· 425 425 Please check the actual NAND chip connected and its support 426 426 by the SLC NAND controller. 427 427 428 + config MTD_NAND_MLC_LPC32XX 429 + tristate "NXP LPC32xx MLC Controller" 430 + depends on ARCH_LPC32XX 431 + help 432 + Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND 433 + controller. This is the default for the WORK92105 controller 434 + board. 435 + 436 + Please check the actual NAND chip connected and its support 437 + by the MLC NAND controller. 438 + 428 439 config MTD_NAND_CM_X270 429 440 tristate "Support for NAND Flash on CM-X270 modules" 430 441 depends on MACH_ARMCORE
+1
drivers/mtd/nand/Makefile
··· 41 41 obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o 42 42 obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o 43 43 obj-$(CONFIG_MTD_NAND_SLC_LPC32XX) += lpc32xx_slc.o 44 + obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o 44 45 obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o 45 46 obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o 46 47 obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
+936
drivers/mtd/nand/lpc32xx_mlc.c
··· 1 + /* 2 + * Driver for NAND MLC Controller in LPC32xx 3 + * 4 + * Author: Roland Stigge <stigge@antcom.de> 5 + * 6 + * Copyright © 2011 WORK Microwave GmbH 7 + * Copyright © 2011, 2012 Roland Stigge 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License, or 12 + * (at your option) any later version. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 + * GNU General Public License for more details. 18 + * 19 + * 20 + * NAND Flash Controller Operation: 21 + * - Read: Auto Decode 22 + * - Write: Auto Encode 23 + * - Tested Page Sizes: 2048, 4096 24 + */ 25 + 26 + #include <linux/slab.h> 27 + #include <linux/module.h> 28 + #include <linux/platform_device.h> 29 + #include <linux/mtd/mtd.h> 30 + #include <linux/mtd/nand.h> 31 + #include <linux/mtd/partitions.h> 32 + #include <linux/clk.h> 33 + #include <linux/err.h> 34 + #include <linux/delay.h> 35 + #include <linux/completion.h> 36 + #include <linux/interrupt.h> 37 + #include <linux/of.h> 38 + #include <linux/of_mtd.h> 39 + #include <linux/of_gpio.h> 40 + #include <linux/amba/pl08x.h> 41 + #include <linux/io.h> 42 + #include <linux/mm.h> 43 + #include <linux/dma-mapping.h> 44 + #include <linux/dmaengine.h> 45 + #include <linux/mtd/nand_ecc.h> 46 + 47 + #define DRV_NAME "lpc32xx_mlc" 48 + 49 + /********************************************************************** 50 + * MLC NAND controller register offsets 51 + **********************************************************************/ 52 + 53 + #define MLC_BUFF(x) (x + 0x00000) 54 + #define MLC_DATA(x) (x + 0x08000) 55 + #define MLC_CMD(x) (x + 0x10000) 56 + #define MLC_ADDR(x) (x + 0x10004) 57 + #define MLC_ECC_ENC_REG(x) (x + 0x10008) 58 + #define MLC_ECC_DEC_REG(x) (x + 0x1000C) 59 + #define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010) 60 + #define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014) 61 + #define MLC_RPR(x) (x + 0x10018) 62 + #define MLC_WPR(x) (x + 0x1001C) 63 + #define MLC_RUBP(x) (x + 0x10020) 64 + #define MLC_ROBP(x) (x + 0x10024) 65 + #define MLC_SW_WP_ADD_LOW(x) (x + 0x10028) 66 + #define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C) 67 + #define MLC_ICR(x) (x + 0x10030) 68 + #define MLC_TIME_REG(x) (x + 0x10034) 69 + #define MLC_IRQ_MR(x) (x + 0x10038) 70 + #define MLC_IRQ_SR(x) (x + 0x1003C) 71 + #define MLC_LOCK_PR(x) (x + 0x10044) 72 + #define MLC_ISR(x) (x + 0x10048) 73 + #define MLC_CEH(x) (x + 0x1004C) 74 + 75 + /********************************************************************** 76 + * MLC_CMD bit definitions 77 + **********************************************************************/ 78 + #define MLCCMD_RESET 0xFF 79 + 80 + /********************************************************************** 81 + * MLC_ICR bit definitions 82 + **********************************************************************/ 83 + #define MLCICR_WPROT (1 << 3) 84 + #define MLCICR_LARGEBLOCK (1 << 2) 85 + #define MLCICR_LONGADDR (1 << 1) 86 + #define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */ 87 + 88 + /********************************************************************** 89 + * MLC_TIME_REG bit definitions 90 + **********************************************************************/ 91 + #define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24) 92 + #define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19) 93 + #define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16) 94 + #define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12) 95 + #define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8) 96 + #define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4) 97 + #define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0) 98 + 99 + /********************************************************************** 100 + * MLC_IRQ_MR and MLC_IRQ_SR bit definitions 101 + **********************************************************************/ 102 + #define MLCIRQ_NAND_READY (1 << 5) 103 + #define MLCIRQ_CONTROLLER_READY (1 << 4) 104 + #define MLCIRQ_DECODE_FAILURE (1 << 3) 105 + #define MLCIRQ_DECODE_ERROR (1 << 2) 106 + #define MLCIRQ_ECC_READY (1 << 1) 107 + #define MLCIRQ_WRPROT_FAULT (1 << 0) 108 + 109 + /********************************************************************** 110 + * MLC_LOCK_PR bit definitions 111 + **********************************************************************/ 112 + #define MLCLOCKPR_MAGIC 0xA25E 113 + 114 + /********************************************************************** 115 + * MLC_ISR bit definitions 116 + **********************************************************************/ 117 + #define MLCISR_DECODER_FAILURE (1 << 6) 118 + #define MLCISR_ERRORS ((1 << 4) | (1 << 5)) 119 + #define MLCISR_ERRORS_DETECTED (1 << 3) 120 + #define MLCISR_ECC_READY (1 << 2) 121 + #define MLCISR_CONTROLLER_READY (1 << 1) 122 + #define MLCISR_NAND_READY (1 << 0) 123 + 124 + /********************************************************************** 125 + * MLC_CEH bit definitions 126 + **********************************************************************/ 127 + #define MLCCEH_NORMAL (1 << 0) 128 + 129 + struct lpc32xx_nand_cfg_mlc { 130 + uint32_t tcea_delay; 131 + uint32_t busy_delay; 132 + uint32_t nand_ta; 133 + uint32_t rd_high; 134 + uint32_t rd_low; 135 + uint32_t wr_high; 136 + uint32_t wr_low; 137 + int wp_gpio; 138 + struct mtd_partition *parts; 139 + unsigned num_parts; 140 + }; 141 + 142 + static struct nand_ecclayout lpc32xx_nand_oob = { 143 + .eccbytes = 40, 144 + .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 145 + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 146 + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 147 + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }, 148 + .oobfree = { 149 + { .offset = 0, 150 + .length = 6, }, 151 + { .offset = 16, 152 + .length = 6, }, 153 + { .offset = 32, 154 + .length = 6, }, 155 + { .offset = 48, 156 + .length = 6, }, 157 + }, 158 + }; 159 + 160 + static struct nand_bbt_descr lpc32xx_nand_bbt = { 161 + .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB | 162 + NAND_BBT_WRITE, 163 + .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 }, 164 + }; 165 + 166 + static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = { 167 + .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB | 168 + NAND_BBT_WRITE, 169 + .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 }, 170 + }; 171 + 172 + struct lpc32xx_nand_host { 173 + struct nand_chip nand_chip; 174 + struct clk *clk; 175 + struct mtd_info mtd; 176 + void __iomem *io_base; 177 + int irq; 178 + struct lpc32xx_nand_cfg_mlc *ncfg; 179 + struct completion comp_nand; 180 + struct completion comp_controller; 181 + uint32_t llptr; 182 + /* 183 + * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer 184 + */ 185 + dma_addr_t oob_buf_phy; 186 + /* 187 + * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer 188 + */ 189 + uint8_t *oob_buf; 190 + /* Physical address of DMA base address */ 191 + dma_addr_t io_base_phy; 192 + 193 + struct completion comp_dma; 194 + struct dma_chan *dma_chan; 195 + struct dma_slave_config dma_slave_config; 196 + struct scatterlist sgl; 197 + uint8_t *dma_buf; 198 + uint8_t *dummy_buf; 199 + int mlcsubpages; /* number of 512bytes-subpages */ 200 + }; 201 + 202 + /* 203 + * Activate/Deactivate DMA Operation: 204 + * 205 + * Using the PL080 DMA Controller for transferring the 512 byte subpages 206 + * instead of doing readl() / writel() in a loop slows it down significantly. 207 + * Measurements via getnstimeofday() upon 512 byte subpage reads reveal: 208 + * 209 + * - readl() of 128 x 32 bits in a loop: ~20us 210 + * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us 211 + * - DMA read of 512 bytes (32 bit, no bursts): ~100us 212 + * 213 + * This applies to the transfer itself. In the DMA case: only the 214 + * wait_for_completion() (DMA setup _not_ included). 215 + * 216 + * Note that the 512 bytes subpage transfer is done directly from/to a 217 + * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a 218 + * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND 219 + * controller transferring data between its internal buffer to/from the NAND 220 + * chip.) 221 + * 222 + * Therefore, using the PL080 DMA is disabled by default, for now. 223 + * 224 + */ 225 + static int use_dma; 226 + 227 + static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host) 228 + { 229 + uint32_t clkrate, tmp; 230 + 231 + /* Reset MLC controller */ 232 + writel(MLCCMD_RESET, MLC_CMD(host->io_base)); 233 + udelay(1000); 234 + 235 + /* Get base clock for MLC block */ 236 + clkrate = clk_get_rate(host->clk); 237 + if (clkrate == 0) 238 + clkrate = 104000000; 239 + 240 + /* Unlock MLC_ICR 241 + * (among others, will be locked again automatically) */ 242 + writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); 243 + 244 + /* Configure MLC Controller: Large Block, 5 Byte Address */ 245 + tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR; 246 + writel(tmp, MLC_ICR(host->io_base)); 247 + 248 + /* Unlock MLC_TIME_REG 249 + * (among others, will be locked again automatically) */ 250 + writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base)); 251 + 252 + /* Compute clock setup values, see LPC and NAND manual */ 253 + tmp = 0; 254 + tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1); 255 + tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1); 256 + tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1); 257 + tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1); 258 + tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low); 259 + tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1); 260 + tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low); 261 + writel(tmp, MLC_TIME_REG(host->io_base)); 262 + 263 + /* Enable IRQ for CONTROLLER_READY and NAND_READY */ 264 + writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY, 265 + MLC_IRQ_MR(host->io_base)); 266 + 267 + /* Normal nCE operation: nCE controlled by controller */ 268 + writel(MLCCEH_NORMAL, MLC_CEH(host->io_base)); 269 + } 270 + 271 + /* 272 + * Hardware specific access to control lines 273 + */ 274 + static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, 275 + unsigned int ctrl) 276 + { 277 + struct nand_chip *nand_chip = mtd->priv; 278 + struct lpc32xx_nand_host *host = nand_chip->priv; 279 + 280 + if (cmd != NAND_CMD_NONE) { 281 + if (ctrl & NAND_CLE) 282 + writel(cmd, MLC_CMD(host->io_base)); 283 + else 284 + writel(cmd, MLC_ADDR(host->io_base)); 285 + } 286 + } 287 + 288 + /* 289 + * Read Device Ready (NAND device _and_ controller ready) 290 + */ 291 + static int lpc32xx_nand_device_ready(struct mtd_info *mtd) 292 + { 293 + struct nand_chip *nand_chip = mtd->priv; 294 + struct lpc32xx_nand_host *host = nand_chip->priv; 295 + 296 + if ((readb(MLC_ISR(host->io_base)) & 297 + (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) == 298 + (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) 299 + return 1; 300 + 301 + return 0; 302 + } 303 + 304 + static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host) 305 + { 306 + uint8_t sr; 307 + 308 + /* Clear interrupt flag by reading status */ 309 + sr = readb(MLC_IRQ_SR(host->io_base)); 310 + if (sr & MLCIRQ_NAND_READY) 311 + complete(&host->comp_nand); 312 + if (sr & MLCIRQ_CONTROLLER_READY) 313 + complete(&host->comp_controller); 314 + 315 + return IRQ_HANDLED; 316 + } 317 + 318 + static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip) 319 + { 320 + struct lpc32xx_nand_host *host = chip->priv; 321 + 322 + if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY) 323 + goto exit; 324 + 325 + wait_for_completion(&host->comp_nand); 326 + 327 + while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) { 328 + /* Seems to be delayed sometimes by controller */ 329 + dev_dbg(&mtd->dev, "Warning: NAND not ready.\n"); 330 + cpu_relax(); 331 + } 332 + 333 + exit: 334 + return NAND_STATUS_READY; 335 + } 336 + 337 + static int lpc32xx_waitfunc_controller(struct mtd_info *mtd, 338 + struct nand_chip *chip) 339 + { 340 + struct lpc32xx_nand_host *host = chip->priv; 341 + 342 + if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY) 343 + goto exit; 344 + 345 + wait_for_completion(&host->comp_controller); 346 + 347 + while (!(readb(MLC_ISR(host->io_base)) & 348 + MLCISR_CONTROLLER_READY)) { 349 + dev_dbg(&mtd->dev, "Warning: Controller not ready.\n"); 350 + cpu_relax(); 351 + } 352 + 353 + exit: 354 + return NAND_STATUS_READY; 355 + } 356 + 357 + static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) 358 + { 359 + lpc32xx_waitfunc_nand(mtd, chip); 360 + lpc32xx_waitfunc_controller(mtd, chip); 361 + 362 + return NAND_STATUS_READY; 363 + } 364 + 365 + /* 366 + * Enable NAND write protect 367 + */ 368 + static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host) 369 + { 370 + if (gpio_is_valid(host->ncfg->wp_gpio)) 371 + gpio_set_value(host->ncfg->wp_gpio, 0); 372 + } 373 + 374 + /* 375 + * Disable NAND write protect 376 + */ 377 + static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host) 378 + { 379 + if (gpio_is_valid(host->ncfg->wp_gpio)) 380 + gpio_set_value(host->ncfg->wp_gpio, 1); 381 + } 382 + 383 + static void lpc32xx_dma_complete_func(void *completion) 384 + { 385 + complete(completion); 386 + } 387 + 388 + static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len, 389 + enum dma_transfer_direction dir) 390 + { 391 + struct nand_chip *chip = mtd->priv; 392 + struct lpc32xx_nand_host *host = chip->priv; 393 + struct dma_async_tx_descriptor *desc; 394 + int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 395 + int res; 396 + 397 + sg_init_one(&host->sgl, mem, len); 398 + 399 + res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1, 400 + DMA_BIDIRECTIONAL); 401 + if (res != 1) { 402 + dev_err(mtd->dev.parent, "Failed to map sg list\n"); 403 + return -ENXIO; 404 + } 405 + desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir, 406 + flags); 407 + if (!desc) { 408 + dev_err(mtd->dev.parent, "Failed to prepare slave sg\n"); 409 + goto out1; 410 + } 411 + 412 + init_completion(&host->comp_dma); 413 + desc->callback = lpc32xx_dma_complete_func; 414 + desc->callback_param = &host->comp_dma; 415 + 416 + dmaengine_submit(desc); 417 + dma_async_issue_pending(host->dma_chan); 418 + 419 + wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000)); 420 + 421 + dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, 422 + DMA_BIDIRECTIONAL); 423 + return 0; 424 + out1: 425 + dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1, 426 + DMA_BIDIRECTIONAL); 427 + return -ENXIO; 428 + } 429 + 430 + static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip, 431 + uint8_t *buf, int oob_required, int page) 432 + { 433 + struct lpc32xx_nand_host *host = chip->priv; 434 + int i, j; 435 + uint8_t *oobbuf = chip->oob_poi; 436 + uint32_t mlc_isr; 437 + int res; 438 + uint8_t *dma_buf; 439 + bool dma_mapped; 440 + 441 + if ((void *)buf <= high_memory) { 442 + dma_buf = buf; 443 + dma_mapped = true; 444 + } else { 445 + dma_buf = host->dma_buf; 446 + dma_mapped = false; 447 + } 448 + 449 + /* Writing Command and Address */ 450 + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 451 + 452 + /* For all sub-pages */ 453 + for (i = 0; i < host->mlcsubpages; i++) { 454 + /* Start Auto Decode Command */ 455 + writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base)); 456 + 457 + /* Wait for Controller Ready */ 458 + lpc32xx_waitfunc_controller(mtd, chip); 459 + 460 + /* Check ECC Error status */ 461 + mlc_isr = readl(MLC_ISR(host->io_base)); 462 + if (mlc_isr & MLCISR_DECODER_FAILURE) { 463 + mtd->ecc_stats.failed++; 464 + dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__); 465 + } else if (mlc_isr & MLCISR_ERRORS_DETECTED) { 466 + mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1; 467 + } 468 + 469 + /* Read 512 + 16 Bytes */ 470 + if (use_dma) { 471 + res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512, 472 + DMA_DEV_TO_MEM); 473 + if (res) 474 + return res; 475 + } else { 476 + for (j = 0; j < (512 >> 2); j++) { 477 + *((uint32_t *)(buf)) = 478 + readl(MLC_BUFF(host->io_base)); 479 + buf += 4; 480 + } 481 + } 482 + for (j = 0; j < (16 >> 2); j++) { 483 + *((uint32_t *)(oobbuf)) = 484 + readl(MLC_BUFF(host->io_base)); 485 + oobbuf += 4; 486 + } 487 + } 488 + 489 + if (use_dma && !dma_mapped) 490 + memcpy(buf, dma_buf, mtd->writesize); 491 + 492 + return 0; 493 + } 494 + 495 + static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd, 496 + struct nand_chip *chip, 497 + const uint8_t *buf, int oob_required) 498 + { 499 + struct lpc32xx_nand_host *host = chip->priv; 500 + const uint8_t *oobbuf = chip->oob_poi; 501 + uint8_t *dma_buf = (uint8_t *)buf; 502 + int res; 503 + int i, j; 504 + 505 + if (use_dma && (void *)buf >= high_memory) { 506 + dma_buf = host->dma_buf; 507 + memcpy(dma_buf, buf, mtd->writesize); 508 + } 509 + 510 + for (i = 0; i < host->mlcsubpages; i++) { 511 + /* Start Encode */ 512 + writeb(0x00, MLC_ECC_ENC_REG(host->io_base)); 513 + 514 + /* Write 512 + 6 Bytes to Buffer */ 515 + if (use_dma) { 516 + res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512, 517 + DMA_MEM_TO_DEV); 518 + if (res) 519 + return res; 520 + } else { 521 + for (j = 0; j < (512 >> 2); j++) { 522 + writel(*((uint32_t *)(buf)), 523 + MLC_BUFF(host->io_base)); 524 + buf += 4; 525 + } 526 + } 527 + writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base)); 528 + oobbuf += 4; 529 + writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base)); 530 + oobbuf += 12; 531 + 532 + /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */ 533 + writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base)); 534 + 535 + /* Wait for Controller Ready */ 536 + lpc32xx_waitfunc_controller(mtd, chip); 537 + } 538 + return 0; 539 + } 540 + 541 + static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip, 542 + const uint8_t *buf, int oob_required, int page, 543 + int cached, int raw) 544 + { 545 + int res; 546 + 547 + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 548 + res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required); 549 + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 550 + lpc32xx_waitfunc(mtd, chip); 551 + 552 + return res; 553 + } 554 + 555 + static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 556 + int page) 557 + { 558 + struct lpc32xx_nand_host *host = chip->priv; 559 + 560 + /* Read whole page - necessary with MLC controller! */ 561 + lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page); 562 + 563 + return 0; 564 + } 565 + 566 + static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 567 + int page) 568 + { 569 + /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */ 570 + return 0; 571 + } 572 + 573 + /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */ 574 + static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode) 575 + { 576 + /* Always enabled! */ 577 + } 578 + 579 + static bool lpc32xx_dma_filter(struct dma_chan *chan, void *param) 580 + { 581 + struct pl08x_dma_chan *ch = 582 + container_of(chan, struct pl08x_dma_chan, chan); 583 + 584 + /* In LPC32xx's PL080 DMA wiring, the MLC NAND DMA signal is #12 */ 585 + if (ch->cd->min_signal == 12) 586 + return true; 587 + return false; 588 + } 589 + 590 + static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host) 591 + { 592 + struct mtd_info *mtd = &host->mtd; 593 + dma_cap_mask_t mask; 594 + 595 + dma_cap_zero(mask); 596 + dma_cap_set(DMA_SLAVE, mask); 597 + host->dma_chan = dma_request_channel(mask, lpc32xx_dma_filter, NULL); 598 + if (!host->dma_chan) { 599 + dev_err(mtd->dev.parent, "Failed to request DMA channel\n"); 600 + return -EBUSY; 601 + } 602 + 603 + /* 604 + * Set direction to a sensible value even if the dmaengine driver 605 + * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x 606 + * driver criticizes it as "alien transfer direction". 607 + */ 608 + host->dma_slave_config.direction = DMA_DEV_TO_MEM; 609 + host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 610 + host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 611 + host->dma_slave_config.src_maxburst = 128; 612 + host->dma_slave_config.dst_maxburst = 128; 613 + /* DMA controller does flow control: */ 614 + host->dma_slave_config.device_fc = false; 615 + host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy); 616 + host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy); 617 + if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) { 618 + dev_err(mtd->dev.parent, "Failed to setup DMA slave\n"); 619 + goto out1; 620 + } 621 + 622 + return 0; 623 + out1: 624 + dma_release_channel(host->dma_chan); 625 + return -ENXIO; 626 + } 627 + 628 + #ifdef CONFIG_OF 629 + static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev) 630 + { 631 + struct lpc32xx_nand_cfg_mlc *pdata; 632 + struct device_node *np = dev->of_node; 633 + 634 + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 635 + if (!pdata) { 636 + dev_err(dev, "could not allocate memory for platform data\n"); 637 + return NULL; 638 + } 639 + 640 + of_property_read_u32(np, "nxp,tcea-delay", &pdata->tcea_delay); 641 + of_property_read_u32(np, "nxp,busy-delay", &pdata->busy_delay); 642 + of_property_read_u32(np, "nxp,nand-ta", &pdata->nand_ta); 643 + of_property_read_u32(np, "nxp,rd-high", &pdata->rd_high); 644 + of_property_read_u32(np, "nxp,rd-low", &pdata->rd_low); 645 + of_property_read_u32(np, "nxp,wr-high", &pdata->wr_high); 646 + of_property_read_u32(np, "nxp,wr-low", &pdata->wr_low); 647 + 648 + if (!pdata->tcea_delay || !pdata->busy_delay || !pdata->nand_ta || 649 + !pdata->rd_high || !pdata->rd_low || !pdata->wr_high || 650 + !pdata->wr_low) { 651 + dev_err(dev, "chip parameters not specified correctly\n"); 652 + return NULL; 653 + } 654 + 655 + pdata->wp_gpio = of_get_named_gpio(np, "gpios", 0); 656 + 657 + return pdata; 658 + } 659 + #else 660 + static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev) 661 + { 662 + return NULL; 663 + } 664 + #endif 665 + 666 + /* 667 + * Probe for NAND controller 668 + */ 669 + static int __devinit lpc32xx_nand_probe(struct platform_device *pdev) 670 + { 671 + struct lpc32xx_nand_host *host; 672 + struct mtd_info *mtd; 673 + struct nand_chip *nand_chip; 674 + struct resource *rc; 675 + int res; 676 + struct mtd_part_parser_data ppdata = {}; 677 + 678 + /* Allocate memory for the device structure (and zero it) */ 679 + host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); 680 + if (!host) { 681 + dev_err(&pdev->dev, "failed to allocate device structure.\n"); 682 + return -ENOMEM; 683 + } 684 + 685 + rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); 686 + if (rc == NULL) { 687 + dev_err(&pdev->dev, "No memory resource found for device!\r\n"); 688 + return -ENXIO; 689 + } 690 + 691 + host->io_base = devm_request_and_ioremap(&pdev->dev, rc); 692 + if (host->io_base == NULL) { 693 + dev_err(&pdev->dev, "ioremap failed\n"); 694 + return -EIO; 695 + } 696 + host->io_base_phy = rc->start; 697 + 698 + mtd = &host->mtd; 699 + nand_chip = &host->nand_chip; 700 + if (pdev->dev.of_node) 701 + host->ncfg = lpc32xx_parse_dt(&pdev->dev); 702 + else 703 + host->ncfg = pdev->dev.platform_data; 704 + if (!host->ncfg) { 705 + dev_err(&pdev->dev, "Missing platform data\n"); 706 + return -ENOENT; 707 + } 708 + if (host->ncfg->wp_gpio == -EPROBE_DEFER) 709 + return -EPROBE_DEFER; 710 + if (gpio_is_valid(host->ncfg->wp_gpio) && 711 + gpio_request(host->ncfg->wp_gpio, "NAND WP")) { 712 + dev_err(&pdev->dev, "GPIO not available\n"); 713 + return -EBUSY; 714 + } 715 + lpc32xx_wp_disable(host); 716 + 717 + nand_chip->priv = host; /* link the private data structures */ 718 + mtd->priv = nand_chip; 719 + mtd->owner = THIS_MODULE; 720 + mtd->dev.parent = &pdev->dev; 721 + 722 + /* Get NAND clock */ 723 + host->clk = clk_get(&pdev->dev, NULL); 724 + if (IS_ERR(host->clk)) { 725 + dev_err(&pdev->dev, "Clock initialization failure\n"); 726 + res = -ENOENT; 727 + goto err_exit1; 728 + } 729 + clk_enable(host->clk); 730 + 731 + nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; 732 + nand_chip->dev_ready = lpc32xx_nand_device_ready; 733 + nand_chip->chip_delay = 25; /* us */ 734 + nand_chip->IO_ADDR_R = MLC_DATA(host->io_base); 735 + nand_chip->IO_ADDR_W = MLC_DATA(host->io_base); 736 + 737 + /* Init NAND controller */ 738 + lpc32xx_nand_setup(host); 739 + 740 + platform_set_drvdata(pdev, host); 741 + 742 + /* Initialize function pointers */ 743 + nand_chip->ecc.hwctl = lpc32xx_ecc_enable; 744 + nand_chip->ecc.read_page_raw = lpc32xx_read_page; 745 + nand_chip->ecc.read_page = lpc32xx_read_page; 746 + nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel; 747 + nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel; 748 + nand_chip->ecc.write_oob = lpc32xx_write_oob; 749 + nand_chip->ecc.read_oob = lpc32xx_read_oob; 750 + nand_chip->ecc.strength = 4; 751 + nand_chip->write_page = lpc32xx_write_page; 752 + nand_chip->waitfunc = lpc32xx_waitfunc; 753 + 754 + nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 755 + nand_chip->bbt_td = &lpc32xx_nand_bbt; 756 + nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror; 757 + 758 + /* bitflip_threshold's default is defined as ecc_strength anyway. 759 + * Unfortunately, it is set only later at add_mtd_device(). Meanwhile 760 + * being 0, it causes bad block table scanning errors in 761 + * nand_scan_tail(), so preparing it here. */ 762 + mtd->bitflip_threshold = nand_chip->ecc.strength; 763 + 764 + if (use_dma) { 765 + res = lpc32xx_dma_setup(host); 766 + if (res) { 767 + res = -EIO; 768 + goto err_exit2; 769 + } 770 + } 771 + 772 + /* 773 + * Scan to find existance of the device and 774 + * Get the type of NAND device SMALL block or LARGE block 775 + */ 776 + if (nand_scan_ident(mtd, 1, NULL)) { 777 + res = -ENXIO; 778 + goto err_exit3; 779 + } 780 + 781 + host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL); 782 + if (!host->dma_buf) { 783 + dev_err(&pdev->dev, "Error allocating dma_buf memory\n"); 784 + res = -ENOMEM; 785 + goto err_exit3; 786 + } 787 + 788 + host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL); 789 + if (!host->dummy_buf) { 790 + dev_err(&pdev->dev, "Error allocating dummy_buf memory\n"); 791 + res = -ENOMEM; 792 + goto err_exit3; 793 + } 794 + 795 + nand_chip->ecc.mode = NAND_ECC_HW; 796 + nand_chip->ecc.size = mtd->writesize; 797 + nand_chip->ecc.layout = &lpc32xx_nand_oob; 798 + host->mlcsubpages = mtd->writesize / 512; 799 + 800 + /* initially clear interrupt status */ 801 + readb(MLC_IRQ_SR(host->io_base)); 802 + 803 + init_completion(&host->comp_nand); 804 + init_completion(&host->comp_controller); 805 + 806 + host->irq = platform_get_irq(pdev, 0); 807 + if ((host->irq < 0) || (host->irq >= NR_IRQS)) { 808 + dev_err(&pdev->dev, "failed to get platform irq\n"); 809 + res = -EINVAL; 810 + goto err_exit3; 811 + } 812 + 813 + if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq, 814 + IRQF_TRIGGER_HIGH, DRV_NAME, host)) { 815 + dev_err(&pdev->dev, "Error requesting NAND IRQ\n"); 816 + res = -ENXIO; 817 + goto err_exit3; 818 + } 819 + 820 + /* 821 + * Fills out all the uninitialized function pointers with the defaults 822 + * And scans for a bad block table if appropriate. 823 + */ 824 + if (nand_scan_tail(mtd)) { 825 + res = -ENXIO; 826 + goto err_exit4; 827 + } 828 + 829 + mtd->name = DRV_NAME; 830 + 831 + ppdata.of_node = pdev->dev.of_node; 832 + res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts, 833 + host->ncfg->num_parts); 834 + if (!res) 835 + return res; 836 + 837 + nand_release(mtd); 838 + 839 + err_exit4: 840 + free_irq(host->irq, host); 841 + err_exit3: 842 + if (use_dma) 843 + dma_release_channel(host->dma_chan); 844 + err_exit2: 845 + clk_disable(host->clk); 846 + clk_put(host->clk); 847 + platform_set_drvdata(pdev, NULL); 848 + err_exit1: 849 + lpc32xx_wp_enable(host); 850 + gpio_free(host->ncfg->wp_gpio); 851 + 852 + return res; 853 + } 854 + 855 + /* 856 + * Remove NAND device 857 + */ 858 + static int __devexit lpc32xx_nand_remove(struct platform_device *pdev) 859 + { 860 + struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); 861 + struct mtd_info *mtd = &host->mtd; 862 + 863 + nand_release(mtd); 864 + free_irq(host->irq, host); 865 + if (use_dma) 866 + dma_release_channel(host->dma_chan); 867 + 868 + clk_disable(host->clk); 869 + clk_put(host->clk); 870 + platform_set_drvdata(pdev, NULL); 871 + 872 + lpc32xx_wp_enable(host); 873 + gpio_free(host->ncfg->wp_gpio); 874 + 875 + return 0; 876 + } 877 + 878 + #ifdef CONFIG_PM 879 + static int lpc32xx_nand_resume(struct platform_device *pdev) 880 + { 881 + struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); 882 + 883 + /* Re-enable NAND clock */ 884 + clk_enable(host->clk); 885 + 886 + /* Fresh init of NAND controller */ 887 + lpc32xx_nand_setup(host); 888 + 889 + /* Disable write protect */ 890 + lpc32xx_wp_disable(host); 891 + 892 + return 0; 893 + } 894 + 895 + static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm) 896 + { 897 + struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); 898 + 899 + /* Enable write protect for safety */ 900 + lpc32xx_wp_enable(host); 901 + 902 + /* Disable clock */ 903 + clk_disable(host->clk); 904 + return 0; 905 + } 906 + 907 + #else 908 + #define lpc32xx_nand_resume NULL 909 + #define lpc32xx_nand_suspend NULL 910 + #endif 911 + 912 + #if defined(CONFIG_OF) 913 + static const struct of_device_id lpc32xx_nand_match[] = { 914 + { .compatible = "nxp,lpc3220-mlc" }, 915 + { /* sentinel */ }, 916 + }; 917 + MODULE_DEVICE_TABLE(of, lpc32xx_nand_match); 918 + #endif 919 + 920 + static struct platform_driver lpc32xx_nand_driver = { 921 + .probe = lpc32xx_nand_probe, 922 + .remove = __devexit_p(lpc32xx_nand_remove), 923 + .resume = lpc32xx_nand_resume, 924 + .suspend = lpc32xx_nand_suspend, 925 + .driver = { 926 + .name = DRV_NAME, 927 + .owner = THIS_MODULE, 928 + .of_match_table = of_match_ptr(lpc32xx_nand_match), 929 + }, 930 + }; 931 + 932 + module_platform_driver(lpc32xx_nand_driver); 933 + 934 + MODULE_LICENSE("GPL"); 935 + MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); 936 + MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");