Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nand/for-4.15' of git://git.infradead.org/l2-mtd

From Boris:
"
Core changes:
* Add a flag to mark NANDs that require 3 address cycles to encode a
page address
* Set a default ECC/free layout when NAND_ECC_NONE is requested
* Fix a bug in panic_nand_write()

Driver changes:
* Another batch of cleanups for the denali driver
* Fix PM support in the atmel driver
* Remove support for platform data in the omap driver
* Fix subpage write in the omap driver
* Fix irq handling in the mtk driver
* Change link order of mtk_ecc and mtk_nand drivers to speed up boot
time
* Change log level of ECC error messages in the mxc driver
* Patch the pxa3xx driver to support Armada 8k platforms
* Add BAM DMA support to the qcom driver
* Convert gpio-nand to the GPIO desc API
* Fix ECC handling in the mt29f driver
"

+681 -488
+1 -1
Documentation/devicetree/bindings/mtd/denali-nand.txt
··· 29 29 #address-cells = <1>; 30 30 #size-cells = <1>; 31 31 compatible = "altr,socfpga-denali-nand"; 32 - reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; 32 + reg = <0xff900000 0x20>, <0xffb80000 0x1000>; 33 33 reg-names = "nand_data", "denali_reg"; 34 34 interrupts = <0 144 4>; 35 35 };
+4
Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
··· 5 5 - compatible: Should be set to one of the following: 6 6 marvell,pxa3xx-nand 7 7 marvell,armada370-nand 8 + marvell,armada-8k-nand 8 9 - reg: The register base for the controller 9 10 - interrupts: The interrupt to map 10 11 - #address-cells: Set to <1> if the node includes partitions 12 + - marvell,system-controller: Set to retrieve the syscon node that handles 13 + NAND controller related registers (only required 14 + with marvell,armada-8k-nand compatible). 11 15 12 16 Optional properties: 13 17
+13 -6
arch/arm/mach-pxa/cm-x255.c
··· 14 14 #include <linux/mtd/partitions.h> 15 15 #include <linux/mtd/physmap.h> 16 16 #include <linux/mtd/nand-gpio.h> 17 - 17 + #include <linux/gpio/machine.h> 18 18 #include <linux/spi/spi.h> 19 19 #include <linux/spi/pxa2xx_spi.h> 20 20 ··· 176 176 #endif 177 177 178 178 #if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE) 179 + 180 + static struct gpiod_lookup_table cmx255_nand_gpiod_table = { 181 + .dev_id = "gpio-nand", 182 + .table = { 183 + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CS, "nce", GPIO_ACTIVE_HIGH), 184 + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CLE, "cle", GPIO_ACTIVE_HIGH), 185 + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_ALE, "ale", GPIO_ACTIVE_HIGH), 186 + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_RB, "rdy", GPIO_ACTIVE_HIGH), 187 + }, 188 + }; 189 + 179 190 static struct resource cmx255_nand_resource[] = { 180 191 [0] = { 181 192 .start = PXA_CS1_PHYS, ··· 209 198 }; 210 199 211 200 static struct gpio_nand_platdata cmx255_nand_platdata = { 212 - .gpio_nce = GPIO_NAND_CS, 213 - .gpio_cle = GPIO_NAND_CLE, 214 - .gpio_ale = GPIO_NAND_ALE, 215 - .gpio_rdy = GPIO_NAND_RB, 216 - .gpio_nwp = -1, 217 201 .parts = cmx255_nand_parts, 218 202 .num_parts = ARRAY_SIZE(cmx255_nand_parts), 219 203 .chip_delay = 25, ··· 226 220 227 221 static void __init cmx255_init_nand(void) 228 222 { 223 + gpiod_add_lookup_table(&cmx255_nand_gpiod_table); 229 224 platform_device_register(&cmx255_nand); 230 225 } 231 226 #else
+4 -1
drivers/mtd/nand/Kconfig
··· 317 317 tristate "NAND support on PXA3xx and Armada 370/XP" 318 318 depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU 319 319 help 320 + 320 321 This enables the driver for the NAND flash device found on 321 - PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2). 322 + PXA3xx processors (NFCv1) and also on 32-bit Armada 323 + platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada 324 + platforms (7K, 8K) (NFCv2). 322 325 323 326 config MTD_NAND_SLC_LPC32XX 324 327 tristate "NXP LPC32xx SLC Controller"
+1 -1
drivers/mtd/nand/Makefile
··· 58 58 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o 59 59 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ 60 60 obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o 61 - obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o 61 + obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o 62 62 63 63 nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o 64 64 nand-objs += nand_amd.o
+5 -2
drivers/mtd/nand/atmel/nand-controller.c
··· 718 718 nc->op.addrs[nc->op.naddrs++] = page; 719 719 nc->op.addrs[nc->op.naddrs++] = page >> 8; 720 720 721 - if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) || 722 - (mtd->writesize <= 512 && chip->chipsize > SZ_32M)) 721 + if (chip->options & NAND_ROW_ADDR_3) 723 722 nc->op.addrs[nc->op.naddrs++] = page >> 16; 724 723 } 725 724 } ··· 2529 2530 struct atmel_nand_controller *nc = dev_get_drvdata(dev); 2530 2531 struct atmel_nand *nand; 2531 2532 2533 + if (nc->pmecc) 2534 + atmel_pmecc_reset(nc->pmecc); 2535 + 2532 2536 list_for_each_entry(nand, &nc->chips, node) { 2533 2537 int i; 2534 2538 ··· 2549 2547 .driver = { 2550 2548 .name = "atmel-nand-controller", 2551 2549 .of_match_table = of_match_ptr(atmel_nand_controller_of_ids), 2550 + .pm = &atmel_nand_controller_pm_ops, 2552 2551 }, 2553 2552 .probe = atmel_nand_controller_probe, 2554 2553 .remove = atmel_nand_controller_remove,
+9 -8
drivers/mtd/nand/atmel/pmecc.c
··· 765 765 } 766 766 EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes); 767 767 768 + void atmel_pmecc_reset(struct atmel_pmecc *pmecc) 769 + { 770 + writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); 771 + writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); 772 + } 773 + EXPORT_SYMBOL_GPL(atmel_pmecc_reset); 774 + 768 775 int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op) 769 776 { 770 777 struct atmel_pmecc *pmecc = user->pmecc; ··· 804 797 805 798 void atmel_pmecc_disable(struct atmel_pmecc_user *user) 806 799 { 807 - struct atmel_pmecc *pmecc = user->pmecc; 808 - 809 - writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); 810 - writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); 800 + atmel_pmecc_reset(user->pmecc); 811 801 mutex_unlock(&user->pmecc->lock); 812 802 } 813 803 EXPORT_SYMBOL_GPL(atmel_pmecc_disable); ··· 859 855 860 856 /* Disable all interrupts before registering the PMECC handler. */ 861 857 writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR); 862 - 863 - /* Reset the ECC engine */ 864 - writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); 865 - writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); 858 + atmel_pmecc_reset(pmecc); 866 859 867 860 return pmecc; 868 861 }
+1
drivers/mtd/nand/atmel/pmecc.h
··· 61 61 struct atmel_pmecc_user_req *req); 62 62 void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user); 63 63 64 + void atmel_pmecc_reset(struct atmel_pmecc *pmecc); 64 65 int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); 65 66 void atmel_pmecc_disable(struct atmel_pmecc_user *user); 66 67 int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
+1 -2
drivers/mtd/nand/au1550nd.c
··· 331 331 332 332 ctx->write_byte(mtd, (u8)(page_addr >> 8)); 333 333 334 - /* One more address cycle for devices > 32MiB */ 335 - if (this->chipsize > (32 << 20)) 334 + if (this->options & NAND_ROW_ADDR_3) 336 335 ctx->write_byte(mtd, 337 336 ((page_addr >> 16) & 0x0f)); 338 337 }
+126 -165
drivers/mtd/nand/denali.c
··· 10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 12 * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program; if not, write to the Free Software Foundation, Inc., 16 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 - * 18 13 */ 19 - #include <linux/interrupt.h> 20 - #include <linux/delay.h> 14 + 15 + #include <linux/bitfield.h> 16 + #include <linux/completion.h> 21 17 #include <linux/dma-mapping.h> 22 - #include <linux/wait.h> 23 - #include <linux/mutex.h> 24 - #include <linux/mtd/mtd.h> 18 + #include <linux/interrupt.h> 19 + #include <linux/io.h> 25 20 #include <linux/module.h> 21 + #include <linux/mtd/mtd.h> 22 + #include <linux/mtd/rawnand.h> 26 23 #include <linux/slab.h> 24 + #include <linux/spinlock.h> 27 25 28 26 #include "denali.h" 29 27 ··· 29 31 30 32 #define DENALI_NAND_NAME "denali-nand" 31 33 32 - /* Host Data/Command Interface */ 33 - #define DENALI_HOST_ADDR 0x00 34 - #define DENALI_HOST_DATA 0x10 34 + /* for Indexed Addressing */ 35 + #define DENALI_INDEXED_CTRL 0x00 36 + #define DENALI_INDEXED_DATA 0x10 35 37 36 38 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 37 39 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ ··· 59 61 */ 60 62 #define DENALI_CLK_X_MULT 6 61 63 62 - /* 63 - * this macro allows us to convert from an MTD structure to our own 64 - * device context (denali) structure. 65 - */ 66 64 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) 67 65 { 68 66 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); 69 67 } 70 68 71 - static void denali_host_write(struct denali_nand_info *denali, 72 - uint32_t addr, uint32_t data) 69 + /* 70 + * Direct Addressing - the slave address forms the control information (command 71 + * type, bank, block, and page address). The slave data is the actual data to 72 + * be transferred. This mode requires 28 bits of address region allocated. 73 + */ 74 + static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) 73 75 { 74 - iowrite32(addr, denali->host + DENALI_HOST_ADDR); 75 - iowrite32(data, denali->host + DENALI_HOST_DATA); 76 + return ioread32(denali->host + addr); 77 + } 78 + 79 + static void denali_direct_write(struct denali_nand_info *denali, u32 addr, 80 + u32 data) 81 + { 82 + iowrite32(data, denali->host + addr); 83 + } 84 + 85 + /* 86 + * Indexed Addressing - address translation module intervenes in passing the 87 + * control information. This mode reduces the required address range. The 88 + * control information and transferred data are latched by the registers in 89 + * the translation module. 90 + */ 91 + static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) 92 + { 93 + iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 94 + return ioread32(denali->host + DENALI_INDEXED_DATA); 95 + } 96 + 97 + static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, 98 + u32 data) 99 + { 100 + iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 101 + iowrite32(data, denali->host + DENALI_INDEXED_DATA); 76 102 } 77 103 78 104 /* 79 105 * Use the configuration feature register to determine the maximum number of 80 106 * banks that the hardware supports. 81 107 */ 82 - static void detect_max_banks(struct denali_nand_info *denali) 108 + static void denali_detect_max_banks(struct denali_nand_info *denali) 83 109 { 84 110 uint32_t features = ioread32(denali->reg + FEATURES); 85 111 86 - denali->max_banks = 1 << (features & FEATURES__N_BANKS); 112 + denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 87 113 88 114 /* the encoding changed from rev 5.0 to 5.1 */ 89 115 if (denali->revision < 0x0501) ··· 211 189 msecs_to_jiffies(1000)); 212 190 if (!time_left) { 213 191 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 214 - denali->irq_mask); 192 + irq_mask); 215 193 return 0; 216 194 } 217 195 ··· 230 208 return irq_status; 231 209 } 232 210 233 - /* 234 - * This helper function setups the registers for ECC and whether or not 235 - * the spare area will be transferred. 236 - */ 237 - static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, 238 - bool transfer_spare) 239 - { 240 - int ecc_en_flag, transfer_spare_flag; 241 - 242 - /* set ECC, transfer spare bits if needed */ 243 - ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; 244 - transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; 245 - 246 - /* Enable spare area/ECC per user's request. */ 247 - iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE); 248 - iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG); 249 - } 250 - 251 211 static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 252 212 { 253 213 struct denali_nand_info *denali = mtd_to_denali(mtd); 214 + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 254 215 int i; 255 216 256 - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), 257 - denali->host + DENALI_HOST_ADDR); 258 - 259 217 for (i = 0; i < len; i++) 260 - buf[i] = ioread32(denali->host + DENALI_HOST_DATA); 218 + buf[i] = denali->host_read(denali, addr); 261 219 } 262 220 263 221 static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 264 222 { 265 223 struct denali_nand_info *denali = mtd_to_denali(mtd); 224 + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 266 225 int i; 267 226 268 - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), 269 - denali->host + DENALI_HOST_ADDR); 270 - 271 227 for (i = 0; i < len; i++) 272 - iowrite32(buf[i], denali->host + DENALI_HOST_DATA); 228 + denali->host_write(denali, addr, buf[i]); 273 229 } 274 230 275 231 static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 276 232 { 277 233 struct denali_nand_info *denali = mtd_to_denali(mtd); 234 + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 278 235 uint16_t *buf16 = (uint16_t *)buf; 279 236 int i; 280 237 281 - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), 282 - denali->host + DENALI_HOST_ADDR); 283 - 284 238 for (i = 0; i < len / 2; i++) 285 - buf16[i] = ioread32(denali->host + DENALI_HOST_DATA); 239 + buf16[i] = denali->host_read(denali, addr); 286 240 } 287 241 288 242 static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, 289 243 int len) 290 244 { 291 245 struct denali_nand_info *denali = mtd_to_denali(mtd); 246 + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 292 247 const uint16_t *buf16 = (const uint16_t *)buf; 293 248 int i; 294 249 295 - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), 296 - denali->host + DENALI_HOST_ADDR); 297 - 298 250 for (i = 0; i < len / 2; i++) 299 - iowrite32(buf16[i], denali->host + DENALI_HOST_DATA); 251 + denali->host_write(denali, addr, buf16[i]); 300 252 } 301 253 302 254 static uint8_t denali_read_byte(struct mtd_info *mtd) ··· 315 319 if (ctrl & NAND_CTRL_CHANGE) 316 320 denali_reset_irq(denali); 317 321 318 - denali_host_write(denali, DENALI_BANK(denali) | type, dat); 322 + denali->host_write(denali, DENALI_BANK(denali) | type, dat); 319 323 } 320 324 321 325 static int denali_dev_ready(struct mtd_info *mtd) ··· 385 389 return 0; 386 390 } 387 391 388 - max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS; 392 + max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 389 393 390 394 /* 391 395 * The register holds the maximum of per-sector corrected bitflips. ··· 397 401 398 402 return max_bitflips; 399 403 } 400 - 401 - #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) 402 - #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) 403 - #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) 404 - #define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE) 405 - #define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8) 406 - #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) 407 404 408 405 static int denali_sw_ecc_fixup(struct mtd_info *mtd, 409 406 struct denali_nand_info *denali, ··· 415 426 416 427 do { 417 428 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 418 - err_sector = ECC_SECTOR(err_addr); 419 - err_byte = ECC_BYTE(err_addr); 429 + err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 430 + err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 420 431 421 432 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 422 - err_cor_value = ECC_CORRECTION_VALUE(err_cor_info); 423 - err_device = ECC_ERR_DEVICE(err_cor_info); 433 + err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 434 + err_cor_info); 435 + err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 436 + err_cor_info); 424 437 425 438 /* reset the bitflip counter when crossing ECC sector */ 426 439 if (err_sector != prev_sector) 427 440 bitflips = 0; 428 441 429 - if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) { 442 + if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 430 443 /* 431 444 * Check later if this is a real ECC error, or 432 445 * an erased sector. ··· 458 467 } 459 468 460 469 prev_sector = err_sector; 461 - } while (!ECC_LAST_ERR(err_cor_info)); 470 + } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 462 471 463 472 /* 464 - * Once handle all ecc errors, controller will trigger a 465 - * ECC_TRANSACTION_DONE interrupt, so here just wait for 466 - * a while for this interrupt 473 + * Once handle all ECC errors, controller will trigger an 474 + * ECC_TRANSACTION_DONE interrupt. 467 475 */ 468 476 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 469 477 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 470 478 return -EIO; 471 479 472 480 return max_bitflips; 473 - } 474 - 475 - /* programs the controller to either enable/disable DMA transfers */ 476 - static void denali_enable_dma(struct denali_nand_info *denali, bool en) 477 - { 478 - iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE); 479 - ioread32(denali->reg + DMA_ENABLE); 480 481 } 481 482 482 483 static void denali_setup_dma64(struct denali_nand_info *denali, ··· 485 502 * 1. setup transfer type, interrupt when complete, 486 503 * burst len = 64 bytes, the number of pages 487 504 */ 488 - denali_host_write(denali, mode, 489 - 0x01002000 | (64 << 16) | (write << 8) | page_count); 505 + denali->host_write(denali, mode, 506 + 0x01002000 | (64 << 16) | (write << 8) | page_count); 490 507 491 508 /* 2. set memory low address */ 492 - denali_host_write(denali, mode, dma_addr); 509 + denali->host_write(denali, mode, lower_32_bits(dma_addr)); 493 510 494 511 /* 3. set memory high address */ 495 - denali_host_write(denali, mode, (uint64_t)dma_addr >> 32); 512 + denali->host_write(denali, mode, upper_32_bits(dma_addr)); 496 513 } 497 514 498 515 static void denali_setup_dma32(struct denali_nand_info *denali, ··· 506 523 /* DMA is a four step process */ 507 524 508 525 /* 1. setup transfer type and # of pages */ 509 - denali_host_write(denali, mode | page, 510 - 0x2000 | (write << 8) | page_count); 526 + denali->host_write(denali, mode | page, 527 + 0x2000 | (write << 8) | page_count); 511 528 512 529 /* 2. set memory high address bits 23:8 */ 513 - denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 530 + denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 514 531 515 532 /* 3. set memory low address bits 23:8 */ 516 - denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 533 + denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 517 534 518 535 /* 4. interrupt when complete, burst len = 64 bytes */ 519 - denali_host_write(denali, mode | 0x14000, 0x2400); 520 - } 521 - 522 - static void denali_setup_dma(struct denali_nand_info *denali, 523 - dma_addr_t dma_addr, int page, int write) 524 - { 525 - if (denali->caps & DENALI_CAP_DMA_64BIT) 526 - denali_setup_dma64(denali, dma_addr, page, write); 527 - else 528 - denali_setup_dma32(denali, dma_addr, page, write); 536 + denali->host_write(denali, mode | 0x14000, 0x2400); 529 537 } 530 538 531 539 static int denali_pio_read(struct denali_nand_info *denali, void *buf, 532 540 size_t size, int page, int raw) 533 541 { 534 - uint32_t addr = DENALI_BANK(denali) | page; 542 + u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 535 543 uint32_t *buf32 = (uint32_t *)buf; 536 544 uint32_t irq_status, ecc_err_mask; 537 545 int i; ··· 534 560 535 561 denali_reset_irq(denali); 536 562 537 - iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); 538 563 for (i = 0; i < size / 4; i++) 539 - *buf32++ = ioread32(denali->host + DENALI_HOST_DATA); 564 + *buf32++ = denali->host_read(denali, addr); 540 565 541 566 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 542 567 if (!(irq_status & INTR__PAGE_XFER_INC)) ··· 550 577 static int denali_pio_write(struct denali_nand_info *denali, 551 578 const void *buf, size_t size, int page, int raw) 552 579 { 553 - uint32_t addr = DENALI_BANK(denali) | page; 580 + u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 554 581 const uint32_t *buf32 = (uint32_t *)buf; 555 582 uint32_t irq_status; 556 583 int i; 557 584 558 585 denali_reset_irq(denali); 559 586 560 - iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); 561 587 for (i = 0; i < size / 4; i++) 562 - iowrite32(*buf32++, denali->host + DENALI_HOST_DATA); 588 + denali->host_write(denali, addr, *buf32++); 563 589 564 590 irq_status = denali_wait_for_irq(denali, 565 591 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); ··· 607 635 ecc_err_mask = INTR__ECC_ERR; 608 636 } 609 637 610 - denali_enable_dma(denali, true); 638 + iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 611 639 612 640 denali_reset_irq(denali); 613 - denali_setup_dma(denali, dma_addr, page, write); 641 + denali->setup_dma(denali, dma_addr, page, write); 614 642 615 - /* wait for operation to complete */ 616 643 irq_status = denali_wait_for_irq(denali, irq_mask); 617 644 if (!(irq_status & INTR__DMA_CMD_COMP)) 618 645 ret = -EIO; 619 646 else if (irq_status & ecc_err_mask) 620 647 ret = -EBADMSG; 621 648 622 - denali_enable_dma(denali, false); 649 + iowrite32(0, denali->reg + DMA_ENABLE); 650 + 623 651 dma_unmap_single(denali->dev, dma_addr, size, dir); 624 652 625 653 if (irq_status & INTR__ERASED_PAGE) ··· 631 659 static int denali_data_xfer(struct denali_nand_info *denali, void *buf, 632 660 size_t size, int page, int raw, int write) 633 661 { 634 - setup_ecc_for_xfer(denali, !raw, raw); 662 + iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 663 + iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, 664 + denali->reg + TRANSFER_SPARE_REG); 635 665 636 666 if (denali->dma_avail) 637 667 return denali_dma_xfer(denali, buf, size, page, raw, write); ··· 944 970 945 971 denali_reset_irq(denali); 946 972 947 - denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, 948 - DENALI_ERASE); 973 + denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, 974 + DENALI_ERASE); 949 975 950 976 /* wait for erase to complete or failure to occur */ 951 977 irq_status = denali_wait_for_irq(denali, ··· 983 1009 984 1010 tmp = ioread32(denali->reg + ACC_CLKS); 985 1011 tmp &= ~ACC_CLKS__VALUE; 986 - tmp |= acc_clks; 1012 + tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 987 1013 iowrite32(tmp, denali->reg + ACC_CLKS); 988 1014 989 1015 /* tRWH -> RE_2_WE */ ··· 992 1018 993 1019 tmp = ioread32(denali->reg + RE_2_WE); 994 1020 tmp &= ~RE_2_WE__VALUE; 995 - tmp |= re_2_we; 1021 + tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 996 1022 iowrite32(tmp, denali->reg + RE_2_WE); 997 1023 998 1024 /* tRHZ -> RE_2_RE */ ··· 1001 1027 1002 1028 tmp = ioread32(denali->reg + RE_2_RE); 1003 1029 tmp &= ~RE_2_RE__VALUE; 1004 - tmp |= re_2_re; 1030 + tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 1005 1031 iowrite32(tmp, denali->reg + RE_2_RE); 1006 1032 1007 - /* tWHR -> WE_2_RE */ 1008 - we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk); 1033 + /* 1034 + * tCCS, tWHR -> WE_2_RE 1035 + * 1036 + * With WE_2_RE properly set, the Denali controller automatically takes 1037 + * care of the delay; the driver need not set NAND_WAIT_TCCS. 1038 + */ 1039 + we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), 1040 + t_clk); 1009 1041 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 1010 1042 1011 1043 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 1012 1044 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 1013 - tmp |= we_2_re; 1045 + tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 1014 1046 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); 1015 1047 1016 1048 /* tADL -> ADDR_2_DATA */ ··· 1030 1050 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 1031 1051 1032 1052 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 1033 - tmp &= ~addr_2_data_mask; 1034 - tmp |= addr_2_data; 1053 + tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1054 + tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 1035 1055 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); 1036 1056 1037 1057 /* tREH, tWH -> RDWR_EN_HI_CNT */ ··· 1041 1061 1042 1062 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 1043 1063 tmp &= ~RDWR_EN_HI_CNT__VALUE; 1044 - tmp |= rdwr_en_hi; 1064 + tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 1045 1065 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); 1046 1066 1047 1067 /* tRP, tWP -> RDWR_EN_LO_CNT */ ··· 1055 1075 1056 1076 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 1057 1077 tmp &= ~RDWR_EN_LO_CNT__VALUE; 1058 - tmp |= rdwr_en_lo; 1078 + tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 1059 1079 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); 1060 1080 1061 1081 /* tCS, tCEA -> CS_SETUP_CNT */ ··· 1066 1086 1067 1087 tmp = ioread32(denali->reg + CS_SETUP_CNT); 1068 1088 tmp &= ~CS_SETUP_CNT__VALUE; 1069 - tmp |= cs_setup; 1089 + tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 1070 1090 iowrite32(tmp, denali->reg + CS_SETUP_CNT); 1071 1091 1072 1092 return 0; ··· 1111 1131 * if this value is 0, just let it be. 1112 1132 */ 1113 1133 denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); 1114 - detect_max_banks(denali); 1134 + denali_detect_max_banks(denali); 1115 1135 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); 1116 1136 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1117 1137 1118 1138 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1119 - 1120 - /* Should set value for these registers when init */ 1121 - iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES); 1122 - iowrite32(1, denali->reg + ECC_ENABLE); 1123 1139 } 1124 1140 1125 1141 int denali_calc_ecc_bytes(int step_size, int strength) ··· 1187 1211 .free = denali_ooblayout_free, 1188 1212 }; 1189 1213 1190 - /* initialize driver data structures */ 1191 - static void denali_drv_init(struct denali_nand_info *denali) 1192 - { 1193 - /* 1194 - * the completion object will be used to notify 1195 - * the callee that the interrupt is done 1196 - */ 1197 - init_completion(&denali->complete); 1198 - 1199 - /* 1200 - * the spinlock will be used to synchronize the ISR with any 1201 - * element that might be access shared data (interrupt status) 1202 - */ 1203 - spin_lock_init(&denali->irq_lock); 1204 - } 1205 - 1206 1214 static int denali_multidev_fixup(struct denali_nand_info *denali) 1207 1215 { 1208 1216 struct nand_chip *chip = &denali->nand; ··· 1242 1282 { 1243 1283 struct nand_chip *chip = &denali->nand; 1244 1284 struct mtd_info *mtd = nand_to_mtd(chip); 1285 + u32 features = ioread32(denali->reg + FEATURES); 1245 1286 int ret; 1246 1287 1247 1288 mtd->dev.parent = denali->dev; 1248 1289 denali_hw_init(denali); 1249 - denali_drv_init(denali); 1290 + 1291 + init_completion(&denali->complete); 1292 + spin_lock_init(&denali->irq_lock); 1250 1293 1251 1294 denali_clear_irq_all(denali); 1252 1295 1253 - /* Request IRQ after all the hardware initialization is finished */ 1254 1296 ret = devm_request_irq(denali->dev, denali->irq, denali_isr, 1255 1297 IRQF_SHARED, DENALI_NAND_NAME, denali); 1256 1298 if (ret) { ··· 1270 1308 if (!mtd->name) 1271 1309 mtd->name = "denali-nand"; 1272 1310 1273 - /* register the driver with the NAND core subsystem */ 1274 1311 chip->select_chip = denali_select_chip; 1275 1312 chip->read_byte = denali_read_byte; 1276 1313 chip->write_byte = denali_write_byte; ··· 1278 1317 chip->dev_ready = denali_dev_ready; 1279 1318 chip->waitfunc = denali_waitfunc; 1280 1319 1320 + if (features & FEATURES__INDEX_ADDR) { 1321 + denali->host_read = denali_indexed_read; 1322 + denali->host_write = denali_indexed_write; 1323 + } else { 1324 + denali->host_read = denali_direct_read; 1325 + denali->host_write = denali_direct_write; 1326 + } 1327 + 1281 1328 /* clk rate info is needed for setup_data_interface */ 1282 1329 if (denali->clk_x_rate) 1283 1330 chip->setup_data_interface = denali_setup_data_interface; 1284 1331 1285 - /* 1286 - * scan for NAND devices attached to the controller 1287 - * this is the first stage in a two step process to register 1288 - * with the nand subsystem 1289 - */ 1290 1332 ret = nand_scan_ident(mtd, denali->max_banks, NULL); 1291 1333 if (ret) 1292 1334 goto disable_irq; ··· 1311 1347 if (denali->dma_avail) { 1312 1348 chip->options |= NAND_USE_BOUNCE_BUFFER; 1313 1349 chip->buf_align = 16; 1350 + if (denali->caps & DENALI_CAP_DMA_64BIT) 1351 + denali->setup_dma = denali_setup_dma64; 1352 + else 1353 + denali->setup_dma = denali_setup_dma32; 1314 1354 } 1315 - 1316 - /* 1317 - * second stage of the NAND scan 1318 - * this stage requires information regarding ECC and 1319 - * bad block management. 1320 - */ 1321 1355 1322 1356 chip->bbt_options |= NAND_BBT_USE_FLASH; 1323 1357 chip->bbt_options |= NAND_BBT_NO_OOB; 1324 - 1325 1358 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 1326 - 1327 - /* no subpage writes on denali */ 1328 1359 chip->options |= NAND_NO_SUBPAGE_WRITE; 1329 1360 1330 1361 ret = denali_ecc_setup(mtd, chip, denali); ··· 1332 1373 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1333 1374 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1334 1375 1335 - iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1), 1376 + iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 1377 + FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 1336 1378 denali->reg + ECC_CORRECTION); 1337 1379 iowrite32(mtd->erasesize / mtd->writesize, 1338 1380 denali->reg + PAGES_PER_BLOCK); 1339 1381 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 1340 1382 denali->reg + DEVICE_WIDTH); 1383 + iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, 1384 + denali->reg + TWO_ROW_ADDR_CYCLES); 1341 1385 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 1342 1386 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 1343 1387 ··· 1403 1441 } 1404 1442 EXPORT_SYMBOL(denali_init); 1405 1443 1406 - /* driver exit point */ 1407 1444 void denali_remove(struct denali_nand_info *denali) 1408 1445 { 1409 1446 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
+20 -24
drivers/mtd/nand/denali.h
··· 10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 12 * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program; if not, write to the Free Software Foundation, Inc., 16 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 - * 18 13 */ 19 14 20 15 #ifndef __DENALI_H__ 21 16 #define __DENALI_H__ 22 17 23 18 #include <linux/bitops.h> 19 + #include <linux/completion.h> 24 20 #include <linux/mtd/rawnand.h> 21 + #include <linux/spinlock_types.h> 22 + #include <linux/types.h> 25 23 26 24 #define DEVICE_RESET 0x0 27 25 #define DEVICE_RESET__BANK(bank) BIT(bank) ··· 109 111 #define ECC_CORRECTION 0x1b0 110 112 #define ECC_CORRECTION__VALUE GENMASK(4, 0) 111 113 #define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16) 112 - #define MAKE_ECC_CORRECTION(val, thresh) \ 113 - (((val) & (ECC_CORRECTION__VALUE)) | \ 114 - (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD))) 115 114 116 115 #define READ_MODE 0x1c0 117 116 #define READ_MODE__VALUE GENMASK(3, 0) ··· 250 255 251 256 #define ECC_ERROR_ADDRESS 0x630 252 257 #define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0) 253 - #define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12) 258 + #define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12) 254 259 255 260 #define ERR_CORRECTION_INFO 0x640 256 - #define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0) 257 - #define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8) 258 - #define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14) 259 - #define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15) 261 + #define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0) 262 + #define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8) 263 + #define ERR_CORRECTION_INFO__UNCOR BIT(14) 264 + #define ERR_CORRECTION_INFO__LAST_ERR BIT(15) 260 265 261 266 #define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10) 262 267 #define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8) ··· 305 310 struct device *dev; 306 311 void __iomem *reg; /* Register Interface */ 307 312 void __iomem *host; /* Host Data/Command Interface */ 308 - 309 - /* elements used by ISR */ 310 313 struct completion complete; 311 - spinlock_t irq_lock; 312 - uint32_t irq_mask; 313 - uint32_t irq_status; 314 + spinlock_t irq_lock; /* protect irq_mask and irq_status */ 315 + u32 irq_mask; /* interrupts we are waiting for */ 316 + u32 irq_status; /* interrupts that have happened */ 314 317 int irq; 315 - 316 - void *buf; 318 + void *buf; /* for syndrome layout conversion */ 317 319 dma_addr_t dma_addr; 318 - int dma_avail; 320 + int dma_avail; /* can support DMA? */ 319 321 int devs_per_cs; /* devices connected in parallel */ 320 - int oob_skip_bytes; 322 + int oob_skip_bytes; /* number of bytes reserved for BBM */ 321 323 int max_banks; 322 - unsigned int revision; 323 - unsigned int caps; 324 + unsigned int revision; /* IP revision */ 325 + unsigned int caps; /* IP capability (or quirk) */ 324 326 const struct nand_ecc_caps *ecc_caps; 327 + u32 (*host_read)(struct denali_nand_info *denali, u32 addr); 328 + void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data); 329 + void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr, 330 + int page, int write); 325 331 }; 326 332 327 333 #define DENALI_CAP_HW_ECC_FIXUP BIT(0)
+2 -2
drivers/mtd/nand/denali_dt.c
··· 12 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 13 * more details. 14 14 */ 15 + 15 16 #include <linux/clk.h> 16 17 #include <linux/err.h> 17 18 #include <linux/io.h> 18 19 #include <linux/ioport.h> 19 20 #include <linux/kernel.h> 20 21 #include <linux/module.h> 21 - #include <linux/platform_device.h> 22 22 #include <linux/of.h> 23 23 #include <linux/of_device.h> 24 + #include <linux/platform_device.h> 24 25 25 26 #include "denali.h" 26 27 ··· 156 155 .of_match_table = denali_nand_dt_ids, 157 156 }, 158 157 }; 159 - 160 158 module_platform_driver(denali_dt_driver); 161 159 162 160 MODULE_LICENSE("GPL");
+3 -2
drivers/mtd/nand/denali_pci.c
··· 11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 12 * more details. 13 13 */ 14 + 15 + #include <linux/errno.h> 16 + #include <linux/io.h> 14 17 #include <linux/kernel.h> 15 18 #include <linux/module.h> 16 19 #include <linux/pci.h> ··· 109 106 return ret; 110 107 } 111 108 112 - /* driver exit point */ 113 109 static void denali_pci_remove(struct pci_dev *dev) 114 110 { 115 111 struct denali_nand_info *denali = pci_get_drvdata(dev); ··· 124 122 .probe = denali_pci_probe, 125 123 .remove = denali_pci_remove, 126 124 }; 127 - 128 125 module_pci_driver(denali_pci_driver);
+1 -2
drivers/mtd/nand/diskonchip.c
··· 705 705 if (page_addr != -1) { 706 706 WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); 707 707 WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); 708 - /* One more address cycle for higher density devices */ 709 - if (this->chipsize & 0x0c000000) { 708 + if (this->options & NAND_ROW_ADDR_3) { 710 709 WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); 711 710 printk("high density\n"); 712 711 }
+57 -55
drivers/mtd/nand/gpio.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/module.h> 25 25 #include <linux/platform_device.h> 26 - #include <linux/gpio.h> 26 + #include <linux/gpio/consumer.h> 27 27 #include <linux/io.h> 28 28 #include <linux/mtd/mtd.h> 29 29 #include <linux/mtd/rawnand.h> ··· 31 31 #include <linux/mtd/nand-gpio.h> 32 32 #include <linux/of.h> 33 33 #include <linux/of_address.h> 34 - #include <linux/of_gpio.h> 35 34 36 35 struct gpiomtd { 37 36 void __iomem *io_sync; 38 37 struct nand_chip nand_chip; 39 38 struct gpio_nand_platdata plat; 39 + struct gpio_desc *nce; /* Optional chip enable */ 40 + struct gpio_desc *cle; 41 + struct gpio_desc *ale; 42 + struct gpio_desc *rdy; 43 + struct gpio_desc *nwp; /* Optional write protection */ 40 44 }; 41 45 42 46 static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd) ··· 82 78 gpio_nand_dosync(gpiomtd); 83 79 84 80 if (ctrl & NAND_CTRL_CHANGE) { 85 - if (gpio_is_valid(gpiomtd->plat.gpio_nce)) 86 - gpio_set_value(gpiomtd->plat.gpio_nce, 87 - !(ctrl & NAND_NCE)); 88 - gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE)); 89 - gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE)); 81 + if (gpiomtd->nce) 82 + gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE)); 83 + gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE)); 84 + gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE)); 90 85 gpio_nand_dosync(gpiomtd); 91 86 } 92 87 if (cmd == NAND_CMD_NONE) ··· 99 96 { 100 97 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); 101 98 102 - return gpio_get_value(gpiomtd->plat.gpio_rdy); 99 + return gpiod_get_value(gpiomtd->rdy); 103 100 } 104 101 105 102 #ifdef CONFIG_OF ··· 125 122 return -EINVAL; 126 123 } 127 124 } 128 - 129 - plat->gpio_rdy = of_get_gpio(dev->of_node, 0); 130 - plat->gpio_nce = of_get_gpio(dev->of_node, 1); 131 - plat->gpio_ale = of_get_gpio(dev->of_node, 2); 132 - plat->gpio_cle = of_get_gpio(dev->of_node, 3); 133 - plat->gpio_nwp = of_get_gpio(dev->of_node, 4); 134 125 135 126 if (!of_property_read_u32(dev->of_node, "chip-delay", &val)) 136 127 plat->chip_delay = val; ··· 198 201 199 202 nand_release(nand_to_mtd(&gpiomtd->nand_chip)); 200 203 201 - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 202 - gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 203 - if (gpio_is_valid(gpiomtd->plat.gpio_nce)) 204 - gpio_set_value(gpiomtd->plat.gpio_nce, 1); 204 + /* Enable write protection and disable the chip */ 205 + if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) 206 + gpiod_set_value(gpiomtd->nwp, 0); 207 + if (gpiomtd->nce && !IS_ERR(gpiomtd->nce)) 208 + gpiod_set_value(gpiomtd->nce, 0); 205 209 206 210 return 0; 207 211 } ··· 213 215 struct nand_chip *chip; 214 216 struct mtd_info *mtd; 215 217 struct resource *res; 218 + struct device *dev = &pdev->dev; 216 219 int ret = 0; 217 220 218 - if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev)) 221 + if (!dev->of_node && !dev_get_platdata(dev)) 219 222 return -EINVAL; 220 223 221 - gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL); 224 + gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL); 222 225 if (!gpiomtd) 223 226 return -ENOMEM; 224 227 225 228 chip = &gpiomtd->nand_chip; 226 229 227 230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 228 - chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); 231 + chip->IO_ADDR_R = devm_ioremap_resource(dev, res); 229 232 if (IS_ERR(chip->IO_ADDR_R)) 230 233 return PTR_ERR(chip->IO_ADDR_R); 231 234 232 235 res = gpio_nand_get_io_sync(pdev); 233 236 if (res) { 234 - gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res); 237 + gpiomtd->io_sync = devm_ioremap_resource(dev, res); 235 238 if (IS_ERR(gpiomtd->io_sync)) 236 239 return PTR_ERR(gpiomtd->io_sync); 237 240 } 238 241 239 - ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat); 242 + ret = gpio_nand_get_config(dev, &gpiomtd->plat); 240 243 if (ret) 241 244 return ret; 242 245 243 - if (gpio_is_valid(gpiomtd->plat.gpio_nce)) { 244 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, 245 - "NAND NCE"); 246 - if (ret) 247 - return ret; 248 - gpio_direction_output(gpiomtd->plat.gpio_nce, 1); 246 + /* Just enable the chip */ 247 + gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH); 248 + if (IS_ERR(gpiomtd->nce)) 249 + return PTR_ERR(gpiomtd->nce); 250 + 251 + /* We disable write protection once we know probe() will succeed */ 252 + gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW); 253 + if (IS_ERR(gpiomtd->nwp)) { 254 + ret = PTR_ERR(gpiomtd->nwp); 255 + goto out_ce; 249 256 } 250 257 251 - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) { 252 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp, 253 - "NAND NWP"); 254 - if (ret) 255 - return ret; 258 + gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW); 259 + if (IS_ERR(gpiomtd->nwp)) { 260 + ret = PTR_ERR(gpiomtd->nwp); 261 + goto out_ce; 256 262 } 257 263 258 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE"); 259 - if (ret) 260 - return ret; 261 - gpio_direction_output(gpiomtd->plat.gpio_ale, 0); 264 + gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW); 265 + if (IS_ERR(gpiomtd->cle)) { 266 + ret = PTR_ERR(gpiomtd->cle); 267 + goto out_ce; 268 + } 262 269 263 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE"); 264 - if (ret) 265 - return ret; 266 - gpio_direction_output(gpiomtd->plat.gpio_cle, 0); 267 - 268 - if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) { 269 - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy, 270 - "NAND RDY"); 271 - if (ret) 272 - return ret; 273 - gpio_direction_input(gpiomtd->plat.gpio_rdy); 270 + gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN); 271 + if (IS_ERR(gpiomtd->rdy)) { 272 + ret = PTR_ERR(gpiomtd->rdy); 273 + goto out_ce; 274 + } 275 + /* Using RDY pin */ 276 + if (gpiomtd->rdy) 274 277 chip->dev_ready = gpio_nand_devready; 275 - } 276 278 277 279 nand_set_flash_node(chip, pdev->dev.of_node); 278 280 chip->IO_ADDR_W = chip->IO_ADDR_R; ··· 283 285 chip->cmd_ctrl = gpio_nand_cmd_ctrl; 284 286 285 287 mtd = nand_to_mtd(chip); 286 - mtd->dev.parent = &pdev->dev; 288 + mtd->dev.parent = dev; 287 289 288 290 platform_set_drvdata(pdev, gpiomtd); 289 291 290 - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 291 - gpio_direction_output(gpiomtd->plat.gpio_nwp, 1); 292 + /* Disable write protection, if wired up */ 293 + if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) 294 + gpiod_direction_output(gpiomtd->nwp, 1); 292 295 293 296 ret = nand_scan(mtd, 1); 294 297 if (ret) ··· 304 305 return 0; 305 306 306 307 err_wp: 307 - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 308 - gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 308 + if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) 309 + gpiod_set_value(gpiomtd->nwp, 0); 310 + out_ce: 311 + if (gpiomtd->nce && !IS_ERR(gpiomtd->nce)) 312 + gpiod_set_value(gpiomtd->nce, 0); 309 313 310 314 return ret; 311 315 }
+1 -2
drivers/mtd/nand/hisi504_nand.c
··· 432 432 host->addr_value[0] |= (page_addr & 0xffff) 433 433 << (host->addr_cycle * 8); 434 434 host->addr_cycle += 2; 435 - /* One more address cycle for devices > 128MiB */ 436 - if (chip->chipsize > (128 << 20)) { 435 + if (chip->options & NAND_ROW_ADDR_3) { 437 436 host->addr_cycle += 1; 438 437 if (host->command == NAND_CMD_ERASE1) 439 438 host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
+11 -2
drivers/mtd/nand/mtk_ecc.c
··· 115 115 op = ECC_DECODE; 116 116 dec = readw(ecc->regs + ECC_DECDONE); 117 117 if (dec & ecc->sectors) { 118 + /* 119 + * Clear decode IRQ status once again to ensure that 120 + * there will be no extra IRQ. 121 + */ 122 + readw(ecc->regs + ECC_DECIRQ_STA); 118 123 ecc->sectors = 0; 119 124 complete(&ecc->done); 120 125 } else { ··· 134 129 return IRQ_NONE; 135 130 } 136 131 } 137 - 138 - writel(0, ecc->regs + ECC_IRQ_REG(op)); 139 132 140 133 return IRQ_HANDLED; 141 134 } ··· 310 307 311 308 /* disable it */ 312 309 mtk_ecc_wait_idle(ecc, op); 310 + if (op == ECC_DECODE) 311 + /* 312 + * Clear decode IRQ status in case there is a timeout to wait 313 + * decode IRQ. 314 + */ 315 + readw(ecc->regs + ECC_DECIRQ_STA); 313 316 writew(0, ecc->regs + ECC_IRQ_REG(op)); 314 317 writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); 315 318
+9 -10
drivers/mtd/nand/mxc_nand.c
··· 415 415 * waits for completion. */ 416 416 static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) 417 417 { 418 - pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq); 418 + dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); 419 419 420 420 writew(cmd, NFC_V1_V2_FLASH_CMD); 421 421 writew(NFC_CMD, NFC_V1_V2_CONFIG2); ··· 431 431 udelay(1); 432 432 } 433 433 if (max_retries < 0) 434 - pr_debug("%s: RESET failed\n", __func__); 434 + dev_dbg(host->dev, "%s: RESET failed\n", __func__); 435 435 } else { 436 436 /* Wait for operation to complete */ 437 437 wait_op_done(host, useirq); ··· 454 454 * a NAND command. */ 455 455 static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) 456 456 { 457 - pr_debug("send_addr(host, 0x%x %d)\n", addr, islast); 457 + dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast); 458 458 459 459 writew(addr, NFC_V1_V2_FLASH_ADDR); 460 460 writew(NFC_ADDR, NFC_V1_V2_CONFIG2); ··· 607 607 uint16_t ecc_status = get_ecc_status_v1(host); 608 608 609 609 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { 610 - pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); 610 + dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n"); 611 611 return -EBADMSG; 612 612 } 613 613 ··· 634 634 do { 635 635 err = ecc_stat & ecc_bit_mask; 636 636 if (err > err_limit) { 637 - printk(KERN_WARNING "UnCorrectable RS-ECC Error\n"); 637 + dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n"); 638 638 return -EBADMSG; 639 639 } else { 640 640 ret += err; ··· 642 642 ecc_stat >>= 4; 643 643 } while (--no_subpages); 644 644 645 - pr_debug("%d Symbol Correctable RS-ECC Error\n", ret); 645 + dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret); 646 646 647 647 return ret; 648 648 } ··· 673 673 host->buf_start++; 674 674 } 675 675 676 - pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); 676 + dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); 677 677 return ret; 678 678 } 679 679 ··· 859 859 host->devtype_data->send_addr(host, 860 860 (page_addr >> 8) & 0xff, true); 861 861 } else { 862 - /* One more address cycle for higher density devices */ 863 - if (mtd->size >= 0x4000000) { 862 + if (nand_chip->options & NAND_ROW_ADDR_3) { 864 863 /* paddr_8 - paddr_15 */ 865 864 host->devtype_data->send_addr(host, 866 865 (page_addr >> 8) & 0xff, ··· 1211 1212 struct nand_chip *nand_chip = mtd_to_nand(mtd); 1212 1213 struct mxc_nand_host *host = nand_get_controller_data(nand_chip); 1213 1214 1214 - pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", 1215 + dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", 1215 1216 command, column, page_addr); 1216 1217 1217 1218 /* Reset command state information */
+26 -8
drivers/mtd/nand/nand_base.c
··· 115 115 struct nand_chip *chip = mtd_to_nand(mtd); 116 116 struct nand_ecc_ctrl *ecc = &chip->ecc; 117 117 118 - if (section) 118 + if (section || !ecc->total) 119 119 return -ERANGE; 120 120 121 121 oobregion->length = ecc->total; ··· 727 727 chip->cmd_ctrl(mtd, page_addr, ctrl); 728 728 ctrl &= ~NAND_CTRL_CHANGE; 729 729 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl); 730 - /* One more address cycle for devices > 32MiB */ 731 - if (chip->chipsize > (32 << 20)) 730 + if (chip->options & NAND_ROW_ADDR_3) 732 731 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl); 733 732 } 734 733 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); ··· 853 854 chip->cmd_ctrl(mtd, page_addr, ctrl); 854 855 chip->cmd_ctrl(mtd, page_addr >> 8, 855 856 NAND_NCE | NAND_ALE); 856 - /* One more address cycle for devices > 128MiB */ 857 - if (chip->chipsize > (128 << 20)) 857 + if (chip->options & NAND_ROW_ADDR_3) 858 858 chip->cmd_ctrl(mtd, page_addr >> 16, 859 859 NAND_NCE | NAND_ALE); 860 860 } ··· 1244 1246 1245 1247 return 0; 1246 1248 } 1249 + EXPORT_SYMBOL_GPL(nand_reset); 1247 1250 1248 1251 /** 1249 1252 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data ··· 2798 2799 size_t *retlen, const uint8_t *buf) 2799 2800 { 2800 2801 struct nand_chip *chip = mtd_to_nand(mtd); 2802 + int chipnr = (int)(to >> chip->chip_shift); 2801 2803 struct mtd_oob_ops ops; 2802 2804 int ret; 2803 2805 2804 - /* Wait for the device to get ready */ 2805 - panic_nand_wait(mtd, chip, 400); 2806 - 2807 2806 /* Grab the device */ 2808 2807 panic_nand_get_device(chip, mtd, FL_WRITING); 2808 + 2809 + chip->select_chip(mtd, chipnr); 2810 + 2811 + /* Wait for the device to get ready */ 2812 + panic_nand_wait(mtd, chip, 400); 2809 2813 2810 2814 memset(&ops, 0, sizeof(ops)); 2811 2815 ops.len = len; ··· 4001 3999 chip->chip_shift += 32 - 1; 4002 4000 } 4003 4001 4002 + if (chip->chip_shift - chip->page_shift > 16) 4003 + chip->options |= NAND_ROW_ADDR_3; 4004 + 4004 4005 chip->badblockbits = 8; 4005 4006 chip->erase = single_erase; 4006 4007 ··· 4705 4700 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); 4706 4701 break; 4707 4702 default: 4703 + /* 4704 + * Expose the whole OOB area to users if ECC_NONE 4705 + * is passed. We could do that for all kind of 4706 + * ->oobsize, but we must keep the old large/small 4707 + * page with ECC layout when ->oobsize <= 128 for 4708 + * compatibility reasons. 4709 + */ 4710 + if (ecc->mode == NAND_ECC_NONE) { 4711 + mtd_set_ooblayout(mtd, 4712 + &nand_ooblayout_lp_ops); 4713 + break; 4714 + } 4715 + 4708 4716 WARN(1, "No oob scheme defined for oobsize %d\n", 4709 4717 mtd->oobsize); 4710 4718 ret = -EINVAL;
+1 -1
drivers/mtd/nand/nuc900_nand.c
··· 154 154 if (page_addr != -1) { 155 155 write_addr_reg(nand, page_addr); 156 156 157 - if (chip->chipsize > (128 << 20)) { 157 + if (chip->options & NAND_ROW_ADDR_3) { 158 158 write_addr_reg(nand, page_addr >> 8); 159 159 write_addr_reg(nand, page_addr >> 16 | ENDADDR); 160 160 } else {
+232 -145
drivers/mtd/nand/omap2.c
··· 1133 1133 0x97, 0x79, 0xe5, 0x24, 0xb5}; 1134 1134 1135 1135 /** 1136 - * omap_calculate_ecc_bch - Generate bytes of ECC bytes 1136 + * _omap_calculate_ecc_bch - Generate ECC bytes for one sector 1137 1137 * @mtd: MTD device structure 1138 1138 * @dat: The pointer to data on which ecc is computed 1139 1139 * @ecc_code: The ecc_code buffer 1140 + * @i: The sector number (for a multi sector page) 1140 1141 * 1141 - * Support calculating of BCH4/8 ecc vectors for the page 1142 + * Support calculating of BCH4/8/16 ECC vectors for one sector 1143 + * within a page. Sector number is in @i. 1142 1144 */ 1143 - static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, 1144 - const u_char *dat, u_char *ecc_calc) 1145 + static int _omap_calculate_ecc_bch(struct mtd_info *mtd, 1146 + const u_char *dat, u_char *ecc_calc, int i) 1145 1147 { 1146 1148 struct omap_nand_info *info = mtd_to_omap(mtd); 1147 1149 int eccbytes = info->nand.ecc.bytes; 1148 1150 struct gpmc_nand_regs *gpmc_regs = &info->reg; 1149 1151 u8 *ecc_code; 1150 - unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; 1152 + unsigned long bch_val1, bch_val2, bch_val3, bch_val4; 1151 1153 u32 val; 1152 - int i, j; 1154 + int j; 1155 + 1156 + ecc_code = ecc_calc; 1157 + switch (info->ecc_opt) { 1158 + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1159 + case OMAP_ECC_BCH8_CODE_HW: 1160 + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1161 + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1162 + bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); 1163 + bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); 1164 + *ecc_code++ = (bch_val4 & 0xFF); 1165 + *ecc_code++ = ((bch_val3 >> 24) & 0xFF); 1166 + *ecc_code++ = ((bch_val3 >> 16) & 0xFF); 1167 + *ecc_code++ = ((bch_val3 >> 8) & 0xFF); 1168 + *ecc_code++ = (bch_val3 & 0xFF); 1169 + *ecc_code++ = ((bch_val2 >> 24) & 0xFF); 1170 + *ecc_code++ = ((bch_val2 >> 16) & 0xFF); 1171 + *ecc_code++ = ((bch_val2 >> 8) & 0xFF); 1172 + *ecc_code++ = (bch_val2 & 0xFF); 1173 + *ecc_code++ = ((bch_val1 >> 24) & 0xFF); 1174 + *ecc_code++ = ((bch_val1 >> 16) & 0xFF); 1175 + *ecc_code++ = ((bch_val1 >> 8) & 0xFF); 1176 + *ecc_code++ = (bch_val1 & 0xFF); 1177 + break; 1178 + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1179 + case OMAP_ECC_BCH4_CODE_HW: 1180 + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1181 + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1182 + *ecc_code++ = ((bch_val2 >> 12) & 0xFF); 1183 + *ecc_code++ = ((bch_val2 >> 4) & 0xFF); 1184 + *ecc_code++ = ((bch_val2 & 0xF) << 4) | 1185 + ((bch_val1 >> 28) & 0xF); 1186 + *ecc_code++ = ((bch_val1 >> 20) & 0xFF); 1187 + *ecc_code++ = ((bch_val1 >> 12) & 0xFF); 1188 + *ecc_code++ = ((bch_val1 >> 4) & 0xFF); 1189 + *ecc_code++ = ((bch_val1 & 0xF) << 4); 1190 + break; 1191 + case OMAP_ECC_BCH16_CODE_HW: 1192 + val = readl(gpmc_regs->gpmc_bch_result6[i]); 1193 + ecc_code[0] = ((val >> 8) & 0xFF); 1194 + ecc_code[1] = ((val >> 0) & 0xFF); 1195 + val = readl(gpmc_regs->gpmc_bch_result5[i]); 1196 + ecc_code[2] = ((val >> 24) & 0xFF); 1197 + ecc_code[3] = ((val >> 16) & 0xFF); 1198 + ecc_code[4] = ((val >> 8) & 0xFF); 1199 + ecc_code[5] = ((val >> 0) & 0xFF); 1200 + val = readl(gpmc_regs->gpmc_bch_result4[i]); 1201 + ecc_code[6] = ((val >> 24) & 0xFF); 1202 + ecc_code[7] = ((val >> 16) & 0xFF); 1203 + ecc_code[8] = ((val >> 8) & 0xFF); 1204 + ecc_code[9] = ((val >> 0) & 0xFF); 1205 + val = readl(gpmc_regs->gpmc_bch_result3[i]); 1206 + ecc_code[10] = ((val >> 24) & 0xFF); 1207 + ecc_code[11] = ((val >> 16) & 0xFF); 1208 + ecc_code[12] = ((val >> 8) & 0xFF); 1209 + ecc_code[13] = ((val >> 0) & 0xFF); 1210 + val = readl(gpmc_regs->gpmc_bch_result2[i]); 1211 + ecc_code[14] = ((val >> 24) & 0xFF); 1212 + ecc_code[15] = ((val >> 16) & 0xFF); 1213 + ecc_code[16] = ((val >> 8) & 0xFF); 1214 + ecc_code[17] = ((val >> 0) & 0xFF); 1215 + val = readl(gpmc_regs->gpmc_bch_result1[i]); 1216 + ecc_code[18] = ((val >> 24) & 0xFF); 1217 + ecc_code[19] = ((val >> 16) & 0xFF); 1218 + ecc_code[20] = ((val >> 8) & 0xFF); 1219 + ecc_code[21] = ((val >> 0) & 0xFF); 1220 + val = readl(gpmc_regs->gpmc_bch_result0[i]); 1221 + ecc_code[22] = ((val >> 24) & 0xFF); 1222 + ecc_code[23] = ((val >> 16) & 0xFF); 1223 + ecc_code[24] = ((val >> 8) & 0xFF); 1224 + ecc_code[25] = ((val >> 0) & 0xFF); 1225 + break; 1226 + default: 1227 + return -EINVAL; 1228 + } 1229 + 1230 + /* ECC scheme specific syndrome customizations */ 1231 + switch (info->ecc_opt) { 1232 + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1233 + /* Add constant polynomial to remainder, so that 1234 + * ECC of blank pages results in 0x0 on reading back 1235 + */ 1236 + for (j = 0; j < eccbytes; j++) 1237 + ecc_calc[j] ^= bch4_polynomial[j]; 1238 + break; 1239 + case OMAP_ECC_BCH4_CODE_HW: 1240 + /* Set 8th ECC byte as 0x0 for ROM compatibility */ 1241 + ecc_calc[eccbytes - 1] = 0x0; 1242 + break; 1243 + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1244 + /* Add constant polynomial to remainder, so that 1245 + * ECC of blank pages results in 0x0 on reading back 1246 + */ 1247 + for (j = 0; j < eccbytes; j++) 1248 + ecc_calc[j] ^= bch8_polynomial[j]; 1249 + break; 1250 + case OMAP_ECC_BCH8_CODE_HW: 1251 + /* Set 14th ECC byte as 0x0 for ROM compatibility */ 1252 + ecc_calc[eccbytes - 1] = 0x0; 1253 + break; 1254 + case OMAP_ECC_BCH16_CODE_HW: 1255 + break; 1256 + default: 1257 + return -EINVAL; 1258 + } 1259 + 1260 + return 0; 1261 + } 1262 + 1263 + /** 1264 + * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction 1265 + * @mtd: MTD device structure 1266 + * @dat: The pointer to data on which ecc is computed 1267 + * @ecc_code: The ecc_code buffer 1268 + * 1269 + * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used 1270 + * when SW based correction is required as ECC is required for one sector 1271 + * at a time. 1272 + */ 1273 + static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd, 1274 + const u_char *dat, u_char *ecc_calc) 1275 + { 1276 + return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0); 1277 + } 1278 + 1279 + /** 1280 + * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors 1281 + * @mtd: MTD device structure 1282 + * @dat: The pointer to data on which ecc is computed 1283 + * @ecc_code: The ecc_code buffer 1284 + * 1285 + * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go. 1286 + */ 1287 + static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd, 1288 + const u_char *dat, u_char *ecc_calc) 1289 + { 1290 + struct omap_nand_info *info = mtd_to_omap(mtd); 1291 + int eccbytes = info->nand.ecc.bytes; 1292 + unsigned long nsectors; 1293 + int i, ret; 1153 1294 1154 1295 nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; 1155 1296 for (i = 0; i < nsectors; i++) { 1156 - ecc_code = ecc_calc; 1157 - switch (info->ecc_opt) { 1158 - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1159 - case OMAP_ECC_BCH8_CODE_HW: 1160 - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1161 - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1162 - bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); 1163 - bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); 1164 - *ecc_code++ = (bch_val4 & 0xFF); 1165 - *ecc_code++ = ((bch_val3 >> 24) & 0xFF); 1166 - *ecc_code++ = ((bch_val3 >> 16) & 0xFF); 1167 - *ecc_code++ = ((bch_val3 >> 8) & 0xFF); 1168 - *ecc_code++ = (bch_val3 & 0xFF); 1169 - *ecc_code++ = ((bch_val2 >> 24) & 0xFF); 1170 - *ecc_code++ = ((bch_val2 >> 16) & 0xFF); 1171 - *ecc_code++ = ((bch_val2 >> 8) & 0xFF); 1172 - *ecc_code++ = (bch_val2 & 0xFF); 1173 - *ecc_code++ = ((bch_val1 >> 24) & 0xFF); 1174 - *ecc_code++ = ((bch_val1 >> 16) & 0xFF); 1175 - *ecc_code++ = ((bch_val1 >> 8) & 0xFF); 1176 - *ecc_code++ = (bch_val1 & 0xFF); 1177 - break; 1178 - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1179 - case OMAP_ECC_BCH4_CODE_HW: 1180 - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1181 - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1182 - *ecc_code++ = ((bch_val2 >> 12) & 0xFF); 1183 - *ecc_code++ = ((bch_val2 >> 4) & 0xFF); 1184 - *ecc_code++ = ((bch_val2 & 0xF) << 4) | 1185 - ((bch_val1 >> 28) & 0xF); 1186 - *ecc_code++ = ((bch_val1 >> 20) & 0xFF); 1187 - *ecc_code++ = ((bch_val1 >> 12) & 0xFF); 1188 - *ecc_code++ = ((bch_val1 >> 4) & 0xFF); 1189 - *ecc_code++ = ((bch_val1 & 0xF) << 4); 1190 - break; 1191 - case OMAP_ECC_BCH16_CODE_HW: 1192 - val = readl(gpmc_regs->gpmc_bch_result6[i]); 1193 - ecc_code[0] = ((val >> 8) & 0xFF); 1194 - ecc_code[1] = ((val >> 0) & 0xFF); 1195 - val = readl(gpmc_regs->gpmc_bch_result5[i]); 1196 - ecc_code[2] = ((val >> 24) & 0xFF); 1197 - ecc_code[3] = ((val >> 16) & 0xFF); 1198 - ecc_code[4] = ((val >> 8) & 0xFF); 1199 - ecc_code[5] = ((val >> 0) & 0xFF); 1200 - val = readl(gpmc_regs->gpmc_bch_result4[i]); 1201 - ecc_code[6] = ((val >> 24) & 0xFF); 1202 - ecc_code[7] = ((val >> 16) & 0xFF); 1203 - ecc_code[8] = ((val >> 8) & 0xFF); 1204 - ecc_code[9] = ((val >> 0) & 0xFF); 1205 - val = readl(gpmc_regs->gpmc_bch_result3[i]); 1206 - ecc_code[10] = ((val >> 24) & 0xFF); 1207 - ecc_code[11] = ((val >> 16) & 0xFF); 1208 - ecc_code[12] = ((val >> 8) & 0xFF); 1209 - ecc_code[13] = ((val >> 0) & 0xFF); 1210 - val = readl(gpmc_regs->gpmc_bch_result2[i]); 1211 - ecc_code[14] = ((val >> 24) & 0xFF); 1212 - ecc_code[15] = ((val >> 16) & 0xFF); 1213 - ecc_code[16] = ((val >> 8) & 0xFF); 1214 - ecc_code[17] = ((val >> 0) & 0xFF); 1215 - val = readl(gpmc_regs->gpmc_bch_result1[i]); 1216 - ecc_code[18] = ((val >> 24) & 0xFF); 1217 - ecc_code[19] = ((val >> 16) & 0xFF); 1218 - ecc_code[20] = ((val >> 8) & 0xFF); 1219 - ecc_code[21] = ((val >> 0) & 0xFF); 1220 - val = readl(gpmc_regs->gpmc_bch_result0[i]); 1221 - ecc_code[22] = ((val >> 24) & 0xFF); 1222 - ecc_code[23] = ((val >> 16) & 0xFF); 1223 - ecc_code[24] = ((val >> 8) & 0xFF); 1224 - ecc_code[25] = ((val >> 0) & 0xFF); 1225 - break; 1226 - default: 1227 - return -EINVAL; 1228 - } 1297 + ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i); 1298 + if (ret) 1299 + return ret; 1229 1300 1230 - /* ECC scheme specific syndrome customizations */ 1231 - switch (info->ecc_opt) { 1232 - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1233 - /* Add constant polynomial to remainder, so that 1234 - * ECC of blank pages results in 0x0 on reading back */ 1235 - for (j = 0; j < eccbytes; j++) 1236 - ecc_calc[j] ^= bch4_polynomial[j]; 1237 - break; 1238 - case OMAP_ECC_BCH4_CODE_HW: 1239 - /* Set 8th ECC byte as 0x0 for ROM compatibility */ 1240 - ecc_calc[eccbytes - 1] = 0x0; 1241 - break; 1242 - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1243 - /* Add constant polynomial to remainder, so that 1244 - * ECC of blank pages results in 0x0 on reading back */ 1245 - for (j = 0; j < eccbytes; j++) 1246 - ecc_calc[j] ^= bch8_polynomial[j]; 1247 - break; 1248 - case OMAP_ECC_BCH8_CODE_HW: 1249 - /* Set 14th ECC byte as 0x0 for ROM compatibility */ 1250 - ecc_calc[eccbytes - 1] = 0x0; 1251 - break; 1252 - case OMAP_ECC_BCH16_CODE_HW: 1253 - break; 1254 - default: 1255 - return -EINVAL; 1256 - } 1257 - 1258 - ecc_calc += eccbytes; 1301 + ecc_calc += eccbytes; 1259 1302 } 1260 1303 1261 1304 return 0; ··· 1539 1496 chip->write_buf(mtd, buf, mtd->writesize); 1540 1497 1541 1498 /* Update ecc vector from GPMC result registers */ 1542 - chip->ecc.calculate(mtd, buf, &ecc_calc[0]); 1499 + omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]); 1543 1500 1544 1501 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 1545 1502 chip->ecc.total); ··· 1548 1505 1549 1506 /* Write ecc vector to OOB area */ 1550 1507 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1508 + return 0; 1509 + } 1510 + 1511 + /** 1512 + * omap_write_subpage_bch - BCH hardware ECC based subpage write 1513 + * @mtd: mtd info structure 1514 + * @chip: nand chip info structure 1515 + * @offset: column address of subpage within the page 1516 + * @data_len: data length 1517 + * @buf: data buffer 1518 + * @oob_required: must write chip->oob_poi to OOB 1519 + * @page: page number to write 1520 + * 1521 + * OMAP optimized subpage write method. 1522 + */ 1523 + static int omap_write_subpage_bch(struct mtd_info *mtd, 1524 + struct nand_chip *chip, u32 offset, 1525 + u32 data_len, const u8 *buf, 1526 + int oob_required, int page) 1527 + { 1528 + u8 *ecc_calc = chip->buffers->ecccalc; 1529 + int ecc_size = chip->ecc.size; 1530 + int ecc_bytes = chip->ecc.bytes; 1531 + int ecc_steps = chip->ecc.steps; 1532 + u32 start_step = offset / ecc_size; 1533 + u32 end_step = (offset + data_len - 1) / ecc_size; 1534 + int step, ret = 0; 1535 + 1536 + /* 1537 + * Write entire page at one go as it would be optimal 1538 + * as ECC is calculated by hardware. 1539 + * ECC is calculated for all subpages but we choose 1540 + * only what we want. 1541 + */ 1542 + 1543 + /* Enable GPMC ECC engine */ 1544 + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); 1545 + 1546 + /* Write data */ 1547 + chip->write_buf(mtd, buf, mtd->writesize); 1548 + 1549 + for (step = 0; step < ecc_steps; step++) { 1550 + /* mask ECC of un-touched subpages by padding 0xFF */ 1551 + if (step < start_step || step > end_step) 1552 + memset(ecc_calc, 0xff, ecc_bytes); 1553 + else 1554 + ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step); 1555 + 1556 + if (ret) 1557 + return ret; 1558 + 1559 + buf += ecc_size; 1560 + ecc_calc += ecc_bytes; 1561 + } 1562 + 1563 + /* copy calculated ECC for whole page to chip->buffer->oob */ 1564 + /* this include masked-value(0xFF) for unwritten subpages */ 1565 + ecc_calc = chip->buffers->ecccalc; 1566 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 1567 + chip->ecc.total); 1568 + if (ret) 1569 + return ret; 1570 + 1571 + /* write OOB buffer to NAND device */ 1572 + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1573 + 1551 1574 return 0; 1552 1575 } 1553 1576 ··· 1653 1544 chip->ecc.total); 1654 1545 1655 1546 /* Calculate ecc bytes */ 1656 - chip->ecc.calculate(mtd, buf, ecc_calc); 1547 + omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc); 1657 1548 1658 1549 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 1659 1550 chip->ecc.total); ··· 1697 1588 return true; 1698 1589 } 1699 1590 1700 - static bool omap2_nand_ecc_check(struct omap_nand_info *info, 1701 - struct omap_nand_platform_data *pdata) 1591 + static bool omap2_nand_ecc_check(struct omap_nand_info *info) 1702 1592 { 1703 1593 bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm; 1704 1594 ··· 1912 1804 static int omap_nand_probe(struct platform_device *pdev) 1913 1805 { 1914 1806 struct omap_nand_info *info; 1915 - struct omap_nand_platform_data *pdata = NULL; 1916 1807 struct mtd_info *mtd; 1917 1808 struct nand_chip *nand_chip; 1918 1809 int err; ··· 1928 1821 1929 1822 info->pdev = pdev; 1930 1823 1931 - if (dev->of_node) { 1932 - if (omap_get_dt_info(dev, info)) 1933 - return -EINVAL; 1934 - } else { 1935 - pdata = dev_get_platdata(&pdev->dev); 1936 - if (!pdata) { 1937 - dev_err(&pdev->dev, "platform data missing\n"); 1938 - return -EINVAL; 1939 - } 1824 + err = omap_get_dt_info(dev, info); 1825 + if (err) 1826 + return err; 1940 1827 1941 - info->gpmc_cs = pdata->cs; 1942 - info->reg = pdata->reg; 1943 - info->ecc_opt = pdata->ecc_opt; 1944 - if (pdata->dev_ready) 1945 - dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n"); 1946 - 1947 - info->xfer_type = pdata->xfer_type; 1948 - info->devsize = pdata->devsize; 1949 - info->elm_of_node = pdata->elm_of_node; 1950 - info->flash_bbt = pdata->flash_bbt; 1951 - } 1952 - 1953 - platform_set_drvdata(pdev, info); 1954 1828 info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs); 1955 1829 if (!info->ops) { 1956 1830 dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n"); ··· 2090 2002 goto return_error; 2091 2003 } 2092 2004 2093 - if (!omap2_nand_ecc_check(info, pdata)) { 2005 + if (!omap2_nand_ecc_check(info)) { 2094 2006 err = -EINVAL; 2095 2007 goto return_error; 2096 2008 } ··· 2132 2044 nand_chip->ecc.strength = 4; 2133 2045 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2134 2046 nand_chip->ecc.correct = nand_bch_correct_data; 2135 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2047 + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; 2136 2048 mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); 2137 2049 /* Reserve one byte for the OMAP marker */ 2138 2050 oobbytes_per_step = nand_chip->ecc.bytes + 1; ··· 2154 2066 nand_chip->ecc.strength = 4; 2155 2067 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2156 2068 nand_chip->ecc.correct = omap_elm_correct_data; 2157 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2158 2069 nand_chip->ecc.read_page = omap_read_page_bch; 2159 2070 nand_chip->ecc.write_page = omap_write_page_bch; 2071 + nand_chip->ecc.write_subpage = omap_write_subpage_bch; 2160 2072 mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 2161 2073 oobbytes_per_step = nand_chip->ecc.bytes; 2162 2074 ··· 2175 2087 nand_chip->ecc.strength = 8; 2176 2088 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2177 2089 nand_chip->ecc.correct = nand_bch_correct_data; 2178 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2090 + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; 2179 2091 mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); 2180 2092 /* Reserve one byte for the OMAP marker */ 2181 2093 oobbytes_per_step = nand_chip->ecc.bytes + 1; ··· 2197 2109 nand_chip->ecc.strength = 8; 2198 2110 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2199 2111 nand_chip->ecc.correct = omap_elm_correct_data; 2200 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2201 2112 nand_chip->ecc.read_page = omap_read_page_bch; 2202 2113 nand_chip->ecc.write_page = omap_write_page_bch; 2114 + nand_chip->ecc.write_subpage = omap_write_subpage_bch; 2203 2115 mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 2204 2116 oobbytes_per_step = nand_chip->ecc.bytes; 2205 2117 ··· 2219 2131 nand_chip->ecc.strength = 16; 2220 2132 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2221 2133 nand_chip->ecc.correct = omap_elm_correct_data; 2222 - nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2223 2134 nand_chip->ecc.read_page = omap_read_page_bch; 2224 2135 nand_chip->ecc.write_page = omap_write_page_bch; 2136 + nand_chip->ecc.write_subpage = omap_write_subpage_bch; 2225 2137 mtd_set_ooblayout(mtd, &omap_ooblayout_ops); 2226 2138 oobbytes_per_step = nand_chip->ecc.bytes; 2227 2139 ··· 2255 2167 if (err) 2256 2168 goto return_error; 2257 2169 2258 - if (dev->of_node) 2259 - mtd_device_register(mtd, NULL, 0); 2260 - else 2261 - mtd_device_register(mtd, pdata->parts, pdata->nr_parts); 2170 + err = mtd_device_register(mtd, NULL, 0); 2171 + if (err) 2172 + goto return_error; 2262 2173 2263 2174 platform_set_drvdata(pdev, mtd); 2264 2175
+37 -4
drivers/mtd/nand/pxa3xx_nand.c
··· 30 30 #include <linux/of.h> 31 31 #include <linux/of_device.h> 32 32 #include <linux/platform_data/mtd-nand-pxa3xx.h> 33 + #include <linux/mfd/syscon.h> 34 + #include <linux/regmap.h> 33 35 34 36 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200) 35 37 #define NAND_STOP_DELAY msecs_to_jiffies(40) ··· 46 44 * Hence this buffer should be at least 512 x 3. Let's pick 2048. 47 45 */ 48 46 #define INIT_BUFFER_SIZE 2048 47 + 48 + /* System control register and bit to enable NAND on some SoCs */ 49 + #define GENCONF_SOC_DEVICE_MUX 0x208 50 + #define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0) 49 51 50 52 /* registers and bit definitions */ 51 53 #define NDCR (0x00) /* Control register */ ··· 180 174 enum pxa3xx_nand_variant { 181 175 PXA3XX_NAND_VARIANT_PXA, 182 176 PXA3XX_NAND_VARIANT_ARMADA370, 177 + PXA3XX_NAND_VARIANT_ARMADA_8K, 183 178 }; 184 179 185 180 struct pxa3xx_nand_host { ··· 431 424 { 432 425 .compatible = "marvell,armada370-nand", 433 426 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370, 427 + }, 428 + { 429 + .compatible = "marvell,armada-8k-nand", 430 + .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K, 434 431 }, 435 432 {} 436 433 }; ··· 836 825 info->retcode = ERR_UNCORERR; 837 826 if (status & NDSR_CORERR) { 838 827 info->retcode = ERR_CORERR; 839 - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 && 828 + if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || 829 + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) && 840 830 info->ecc_bch) 841 831 info->ecc_err_cnt = NDSR_ERR_CNT(status); 842 832 else ··· 900 888 nand_writel(info, NDCB0, info->ndcb2); 901 889 902 890 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */ 903 - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) 891 + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || 892 + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) 904 893 nand_writel(info, NDCB0, info->ndcb3); 905 894 } 906 895 ··· 1684 1671 chip->options |= NAND_BUSWIDTH_16; 1685 1672 1686 1673 /* Device detection must be done with ECC disabled */ 1687 - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) 1674 + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || 1675 + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) 1688 1676 nand_writel(info, NDECCCTRL, 0x0); 1689 1677 1690 1678 if (pdata->flash_bbt) ··· 1723 1709 * (aka splitted) command handling, 1724 1710 */ 1725 1711 if (mtd->writesize > PAGE_CHUNK_SIZE) { 1726 - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) { 1712 + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || 1713 + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) { 1727 1714 chip->cmdfunc = nand_cmdfunc_extended; 1728 1715 } else { 1729 1716 dev_err(&info->pdev->dev, ··· 1942 1927 1943 1928 if (!of_id) 1944 1929 return 0; 1930 + 1931 + /* 1932 + * Some SoCs like A7k/A8k need to enable manually the NAND 1933 + * controller to avoid being bootloader dependent. This is done 1934 + * through the use of a single bit in the System Functions registers. 1935 + */ 1936 + if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) { 1937 + struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle( 1938 + pdev->dev.of_node, "marvell,system-controller"); 1939 + u32 reg; 1940 + 1941 + if (IS_ERR(sysctrl_base)) 1942 + return PTR_ERR(sysctrl_base); 1943 + 1944 + regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg); 1945 + reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN; 1946 + regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg); 1947 + } 1945 1948 1946 1949 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1947 1950 if (!pdata)
+110 -17
drivers/mtd/nand/qcom_nandc.c
··· 22 22 #include <linux/of.h> 23 23 #include <linux/of_device.h> 24 24 #include <linux/delay.h> 25 + #include <linux/dma/qcom_bam_dma.h> 25 26 26 27 /* NANDc reg offsets */ 27 28 #define NAND_FLASH_CMD 0x00 ··· 200 199 */ 201 200 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) 202 201 202 + /* Returns the NAND register physical address */ 203 + #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset)) 204 + 205 + /* Returns the dma address for reg read buffer */ 206 + #define reg_buf_dma_addr(chip, vaddr) \ 207 + ((chip)->reg_read_dma + \ 208 + ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf)) 209 + 210 + #define QPIC_PER_CW_CMD_ELEMENTS 32 203 211 #define QPIC_PER_CW_CMD_SGL 32 204 212 #define QPIC_PER_CW_DATA_SGL 8 205 213 ··· 231 221 /* 232 222 * This data type corresponds to the BAM transaction which will be used for all 233 223 * NAND transfers. 224 + * @bam_ce - the array of BAM command elements 234 225 * @cmd_sgl - sgl for NAND BAM command pipe 235 226 * @data_sgl - sgl for NAND BAM consumer/producer pipe 227 + * @bam_ce_pos - the index in bam_ce which is available for next sgl 228 + * @bam_ce_start - the index in bam_ce which marks the start position ce 229 + * for current sgl. It will be used for size calculation 230 + * for current sgl 236 231 * @cmd_sgl_pos - current index in command sgl. 237 232 * @cmd_sgl_start - start index in command sgl. 238 233 * @tx_sgl_pos - current index in data sgl for tx. ··· 246 231 * @rx_sgl_start - start index in data sgl for rx. 247 232 */ 248 233 struct bam_transaction { 234 + struct bam_cmd_element *bam_ce; 249 235 struct scatterlist *cmd_sgl; 250 236 struct scatterlist *data_sgl; 237 + u32 bam_ce_pos; 238 + u32 bam_ce_start; 251 239 u32 cmd_sgl_pos; 252 240 u32 cmd_sgl_start; 253 241 u32 tx_sgl_pos; ··· 325 307 * controller 326 308 * @dev: parent device 327 309 * @base: MMIO base 328 - * @base_dma: physical base address of controller registers 310 + * @base_phys: physical base address of controller registers 311 + * @base_dma: dma base address of controller registers 329 312 * @core_clk: controller clock 330 313 * @aon_clk: another controller clock 331 314 * ··· 359 340 struct device *dev; 360 341 361 342 void __iomem *base; 343 + phys_addr_t base_phys; 362 344 dma_addr_t base_dma; 363 345 364 346 struct clk *core_clk; ··· 482 462 483 463 bam_txn_size = 484 464 sizeof(*bam_txn) + num_cw * 485 - ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + 465 + ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) + 466 + (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + 486 467 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); 487 468 488 469 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL); ··· 492 471 493 472 bam_txn = bam_txn_buf; 494 473 bam_txn_buf += sizeof(*bam_txn); 474 + 475 + bam_txn->bam_ce = bam_txn_buf; 476 + bam_txn_buf += 477 + sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; 495 478 496 479 bam_txn->cmd_sgl = bam_txn_buf; 497 480 bam_txn_buf += ··· 514 489 if (!nandc->props->is_bam) 515 490 return; 516 491 492 + bam_txn->bam_ce_pos = 0; 493 + bam_txn->bam_ce_start = 0; 517 494 bam_txn->cmd_sgl_pos = 0; 518 495 bam_txn->cmd_sgl_start = 0; 519 496 bam_txn->tx_sgl_pos = 0; ··· 761 734 } 762 735 763 736 /* 737 + * Prepares the command descriptor for BAM DMA which will be used for NAND 738 + * register reads and writes. The command descriptor requires the command 739 + * to be formed in command element type so this function uses the command 740 + * element from bam transaction ce array and fills the same with required 741 + * data. A single SGL can contain multiple command elements so 742 + * NAND_BAM_NEXT_SGL will be used for starting the separate SGL 743 + * after the current command element. 744 + */ 745 + static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, 746 + int reg_off, const void *vaddr, 747 + int size, unsigned int flags) 748 + { 749 + int bam_ce_size; 750 + int i, ret; 751 + struct bam_cmd_element *bam_ce_buffer; 752 + struct bam_transaction *bam_txn = nandc->bam_txn; 753 + 754 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; 755 + 756 + /* fill the command desc */ 757 + for (i = 0; i < size; i++) { 758 + if (read) 759 + bam_prep_ce(&bam_ce_buffer[i], 760 + nandc_reg_phys(nandc, reg_off + 4 * i), 761 + BAM_READ_COMMAND, 762 + reg_buf_dma_addr(nandc, 763 + (__le32 *)vaddr + i)); 764 + else 765 + bam_prep_ce_le32(&bam_ce_buffer[i], 766 + nandc_reg_phys(nandc, reg_off + 4 * i), 767 + BAM_WRITE_COMMAND, 768 + *((__le32 *)vaddr + i)); 769 + } 770 + 771 + bam_txn->bam_ce_pos += size; 772 + 773 + /* use the separate sgl after this command */ 774 + if (flags & NAND_BAM_NEXT_SGL) { 775 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; 776 + bam_ce_size = (bam_txn->bam_ce_pos - 777 + bam_txn->bam_ce_start) * 778 + sizeof(struct bam_cmd_element); 779 + sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos], 780 + bam_ce_buffer, bam_ce_size); 781 + bam_txn->cmd_sgl_pos++; 782 + bam_txn->bam_ce_start = bam_txn->bam_ce_pos; 783 + 784 + if (flags & NAND_BAM_NWD) { 785 + ret = prepare_bam_async_desc(nandc, nandc->cmd_chan, 786 + DMA_PREP_FENCE | 787 + DMA_PREP_CMD); 788 + if (ret) 789 + return ret; 790 + } 791 + } 792 + 793 + return 0; 794 + } 795 + 796 + /* 764 797 * Prepares the data descriptor for BAM DMA which will be used for NAND 765 798 * data reads and writes. 766 799 */ ··· 938 851 { 939 852 bool flow_control = false; 940 853 void *vaddr; 941 - int size; 942 854 943 - if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) 944 - flow_control = true; 855 + vaddr = nandc->reg_read_buf + nandc->reg_read_pos; 856 + nandc->reg_read_pos += num_regs; 945 857 946 858 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) 947 859 first = dev_cmd_reg_addr(nandc, first); 948 860 949 - size = num_regs * sizeof(u32); 950 - vaddr = nandc->reg_read_buf + nandc->reg_read_pos; 951 - nandc->reg_read_pos += num_regs; 861 + if (nandc->props->is_bam) 862 + return prep_bam_dma_desc_cmd(nandc, true, first, vaddr, 863 + num_regs, flags); 952 864 953 - return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control); 865 + if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) 866 + flow_control = true; 867 + 868 + return prep_adm_dma_desc(nandc, true, first, vaddr, 869 + num_regs * sizeof(u32), flow_control); 954 870 } 955 871 956 872 /* ··· 970 880 bool flow_control = false; 971 881 struct nandc_regs *regs = nandc->regs; 972 882 void *vaddr; 973 - int size; 974 883 975 884 vaddr = offset_to_nandc_reg(regs, first); 976 - 977 - if (first == NAND_FLASH_CMD) 978 - flow_control = true; 979 885 980 886 if (first == NAND_ERASED_CW_DETECT_CFG) { 981 887 if (flags & NAND_ERASED_CW_SET) ··· 989 903 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) 990 904 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); 991 905 992 - size = num_regs * sizeof(u32); 906 + if (nandc->props->is_bam) 907 + return prep_bam_dma_desc_cmd(nandc, false, first, vaddr, 908 + num_regs, flags); 993 909 994 - return prep_adm_dma_desc(nandc, false, first, vaddr, size, 995 - flow_control); 910 + if (first == NAND_FLASH_CMD) 911 + flow_control = true; 912 + 913 + return prep_adm_dma_desc(nandc, false, first, vaddr, 914 + num_regs * sizeof(u32), flow_control); 996 915 } 997 916 998 917 /* ··· 1261 1170 } 1262 1171 1263 1172 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { 1264 - r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0); 1173 + r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 1174 + DMA_PREP_CMD); 1265 1175 if (r) 1266 1176 return r; 1267 1177 } ··· 2797 2705 if (IS_ERR(nandc->base)) 2798 2706 return PTR_ERR(nandc->base); 2799 2707 2708 + nandc->base_phys = res->start; 2800 2709 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start); 2801 2710 2802 2711 nandc->core_clk = devm_clk_get(dev, "core");
+3 -6
drivers/mtd/nand/sh_flctl.c
··· 1094 1094 1095 1095 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) 1096 1096 { 1097 - const struct of_device_id *match; 1098 - struct flctl_soc_config *config; 1097 + const struct flctl_soc_config *config; 1099 1098 struct sh_flctl_platform_data *pdata; 1100 1099 1101 - match = of_match_device(of_flctl_match, dev); 1102 - if (match) 1103 - config = (struct flctl_soc_config *)match->data; 1104 - else { 1100 + config = of_device_get_match_data(dev); 1101 + if (!config) { 1105 1102 dev_err(dev, "%s: no OF configuration attached\n", __func__); 1106 1103 return NULL; 1107 1104 }
-5
include/linux/mtd/nand-gpio.h
··· 4 4 #include <linux/mtd/rawnand.h> 5 5 6 6 struct gpio_nand_platdata { 7 - int gpio_nce; 8 - int gpio_nwp; 9 - int gpio_cle; 10 - int gpio_ale; 11 - int gpio_rdy; 12 7 void (*adjust_parts)(struct gpio_nand_platdata *, size_t); 13 8 struct mtd_partition *parts; 14 9 unsigned int num_parts;
+3
include/linux/mtd/rawnand.h
··· 177 177 */ 178 178 #define NAND_NEED_SCRAMBLING 0x00002000 179 179 180 + /* Device needs 3rd row address cycle */ 181 + #define NAND_ROW_ADDR_3 0x00004000 182 + 180 183 /* Options valid for Samsung large page devices */ 181 184 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG 182 185
-17
include/linux/platform_data/mtd-nand-omap2.h
··· 66 66 /* Deprecated. Do not use */ 67 67 void __iomem *gpmc_status; 68 68 }; 69 - 70 - struct omap_nand_platform_data { 71 - int cs; 72 - struct mtd_partition *parts; 73 - int nr_parts; 74 - bool flash_bbt; 75 - enum nand_io xfer_type; 76 - int devsize; 77 - enum omap_ecc ecc_opt; 78 - 79 - struct device_node *elm_of_node; 80 - 81 - /* deprecated */ 82 - struct gpmc_nand_regs reg; 83 - struct device_node *of_node; 84 - bool dev_ready; 85 - }; 86 69 #endif