Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-20130301' of git://git.infradead.org/linux-mtd

Pull MTD update from David Woodhouse:
"Fairly unexciting MTD merge for 3.9:

- misc clean-ups in the MTD command-line partitioning parser
(cmdlinepart)
- add flash locking support for STmicro chips serial flash chips, as
well as for CFI command set 2 chips.
- new driver for the ELM error correction HW module found in various
TI chips, enable the OMAP NAND driver to use the ELM HW error
correction
- added number of new serial flash IDs
- various fixes and improvements in the gpmi NAND driver
- bcm47xx NAND driver improvements
- make the mtdpart module actually removable"

* tag 'for-linus-20130301' of git://git.infradead.org/linux-mtd: (45 commits)
mtd: map: BUG() in non handled cases
mtd: bcm47xxnflash: use pr_fmt for module prefix in messages
mtd: davinci_nand: Use managed resources
mtd: mtd_torturetest can cause stack overflows
mtd: physmap_of: Convert device allocation to managed devm_kzalloc()
mtd: at91: atmel_nand: for PMECC, add code to check the ONFI parameter ECC requirement.
mtd: atmel_nand: make pmecc-cap, pmecc-sector-size in dts is optional.
mtd: atmel_nand: avoid to report an error when lookup table offset is 0.
mtd: bcm47xxsflash: adjust names of bus-specific functions
mtd: bcm47xxpart: improve probing of nvram partition
mtd: bcm47xxpart: add support for other erase sizes
mtd: bcm47xxnflash: register this as normal driver
mtd: bcm47xxnflash: fix message
mtd: bcm47xxsflash: register this as normal driver
mtd: bcm47xxsflash: write number of written bytes
mtd: gpmi: add sanity check for the ECC
mtd: gpmi: set the Golois Field bit for mx6q's BCH
mtd: devices: elm: Removes <xx> literals in elm DT node
mtd: gpmi: fix a dereferencing freed memory error
mtd: fix the wrong timeo for panic_nand_wait()
...

+1881 -331
+16
Documentation/devicetree/bindings/mtd/elm.txt
··· 1 + Error location module 2 + 3 + Required properties: 4 + - compatible: Must be "ti,am33xx-elm" 5 + - reg: physical base address and size of the registers map. 6 + - interrupts: Interrupt number for the elm. 7 + 8 + Optional properties: 9 + - ti,hwmods: Name of the hwmod associated to the elm 10 + 11 + Example: 12 + elm: elm@0 { 13 + compatible = "ti,am3352-elm"; 14 + reg = <0x48080000 0x2000>; 15 + interrupts = <4>; 16 + };
+3
Documentation/devicetree/bindings/mtd/mtd-physmap.txt
··· 26 26 - linux,mtd-name: allow to specify the mtd name for retro capability with 27 27 physmap-flash drivers as boot loader pass the mtd partition via the old 28 28 device name physmap-flash. 29 + - use-advanced-sector-protection: boolean to enable support for the 30 + advanced sector protection (Spansion: PPB - Persistent Protection 31 + Bits) locking. 29 32 30 33 For JEDEC compatible devices, the following additional properties 31 34 are defined:
+2 -2
drivers/mtd/Kconfig
··· 74 74 endif # MTD_REDBOOT_PARTS 75 75 76 76 config MTD_CMDLINE_PARTS 77 - bool "Command line partition table parsing" 78 - depends on MTD = "y" 77 + tristate "Command line partition table parsing" 78 + depends on MTD 79 79 ---help--- 80 80 Allow generic configuration of the MTD partition tables via the kernel 81 81 command line. Multiple flash resources are supported for hardware where
+6
drivers/mtd/ar7part.c
··· 142 142 return register_mtd_parser(&ar7_parser); 143 143 } 144 144 145 + static void __exit ar7_parser_exit(void) 146 + { 147 + deregister_mtd_parser(&ar7_parser); 148 + } 149 + 145 150 module_init(ar7_parser_init); 151 + module_exit(ar7_parser_exit); 146 152 147 153 MODULE_LICENSE("GPL"); 148 154 MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
+34 -15
drivers/mtd/bcm47xxpart.c
··· 19 19 /* 10 parts were found on sflash on Netgear WNDR4500 */ 20 20 #define BCM47XXPART_MAX_PARTS 12 21 21 22 - /* 23 - * Amount of bytes we read when analyzing each block of flash memory. 24 - * Set it big enough to allow detecting partition and reading important data. 25 - */ 26 - #define BCM47XXPART_BYTES_TO_READ 0x404 27 - 28 22 /* Magics */ 29 23 #define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ 30 24 #define POT_MAGIC1 0x54544f50 /* POTT */ ··· 53 59 uint32_t *buf; 54 60 size_t bytes_read; 55 61 uint32_t offset; 56 - uint32_t blocksize = 0x10000; 62 + uint32_t blocksize = master->erasesize; 57 63 struct trx_header *trx; 64 + int trx_part = -1; 65 + int last_trx_part = -1; 66 + int max_bytes_to_read = 0x8004; 67 + 68 + if (blocksize <= 0x10000) 69 + blocksize = 0x10000; 70 + if (blocksize == 0x20000) 71 + max_bytes_to_read = 0x18004; 58 72 59 73 /* Alloc */ 60 74 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, 61 75 GFP_KERNEL); 62 - buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL); 76 + buf = kzalloc(max_bytes_to_read, GFP_KERNEL); 63 77 64 78 /* Parse block by block looking for magics */ 65 79 for (offset = 0; offset <= master->size - blocksize; ··· 82 80 } 83 81 84 82 /* Read beginning of the block */ 85 - if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, 83 + if (mtd_read(master, offset, max_bytes_to_read, 86 84 &bytes_read, (uint8_t *)buf) < 0) { 87 85 pr_err("mtd_read error while parsing (offset: 0x%X)!\n", 88 86 offset); ··· 97 95 } 98 96 99 97 /* Standard NVRAM */ 100 - if (buf[0x000 / 4] == NVRAM_HEADER) { 98 + if (buf[0x000 / 4] == NVRAM_HEADER || 99 + buf[0x1000 / 4] == NVRAM_HEADER || 100 + buf[0x8000 / 4] == NVRAM_HEADER || 101 + (blocksize == 0x20000 && ( 102 + buf[0x10000 / 4] == NVRAM_HEADER || 103 + buf[0x11000 / 4] == NVRAM_HEADER || 104 + buf[0x18000 / 4] == NVRAM_HEADER))) { 101 105 bcm47xxpart_add_part(&parts[curr_part++], "nvram", 102 106 offset, 0); 107 + offset = rounddown(offset, blocksize); 103 108 continue; 104 109 } 105 110 ··· 140 131 if (buf[0x000 / 4] == TRX_MAGIC) { 141 132 trx = (struct trx_header *)buf; 142 133 134 + trx_part = curr_part; 135 + bcm47xxpart_add_part(&parts[curr_part++], "firmware", 136 + offset, 0); 137 + 143 138 i = 0; 144 139 /* We have LZMA loader if offset[2] points to sth */ 145 140 if (trx->offset[2]) { ··· 167 154 offset + trx->offset[i], 0); 168 155 i++; 169 156 157 + last_trx_part = curr_part - 1; 158 + 170 159 /* 171 160 * We have whole TRX scanned, skip to the next part. Use 172 161 * roundown (not roundup), as the loop will increase ··· 184 169 * Assume that partitions end at the beginning of the one they are 185 170 * followed by. 186 171 */ 187 - for (i = 0; i < curr_part - 1; i++) 188 - parts[i].size = parts[i + 1].offset - parts[i].offset; 189 - if (curr_part > 0) 190 - parts[curr_part - 1].size = 191 - master->size - parts[curr_part - 1].offset; 172 + for (i = 0; i < curr_part; i++) { 173 + u64 next_part_offset = (i < curr_part - 1) ? 174 + parts[i + 1].offset : master->size; 175 + 176 + parts[i].size = next_part_offset - parts[i].offset; 177 + if (i == last_trx_part && trx_part >= 0) 178 + parts[trx_part].size = next_part_offset - 179 + parts[trx_part].offset; 180 + } 192 181 193 182 *pparts = parts; 194 183 return curr_part;
+217
drivers/mtd/chips/cfi_cmdset_0002.c
··· 33 33 #include <linux/delay.h> 34 34 #include <linux/interrupt.h> 35 35 #include <linux/reboot.h> 36 + #include <linux/of.h> 37 + #include <linux/of_platform.h> 36 38 #include <linux/mtd/map.h> 37 39 #include <linux/mtd/mtd.h> 38 40 #include <linux/mtd/cfi.h> ··· 75 73 76 74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 77 75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 76 + 77 + static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 78 + static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 79 + static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); 78 80 79 81 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 80 82 .probe = NULL, /* Not usable directly */ ··· 502 496 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 503 497 { 504 498 struct cfi_private *cfi = map->fldrv_priv; 499 + struct device_node __maybe_unused *np = map->device_node; 505 500 struct mtd_info *mtd; 506 501 int i; 507 502 ··· 575 568 #ifdef DEBUG_CFI_FEATURES 576 569 /* Tell the user about it in lots of lovely detail */ 577 570 cfi_tell_features(extp); 571 + #endif 572 + 573 + #ifdef CONFIG_OF 574 + if (np && of_property_read_bool( 575 + np, "use-advanced-sector-protection") 576 + && extp->BlkProtUnprot == 8) { 577 + printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); 578 + mtd->_lock = cfi_ppb_lock; 579 + mtd->_unlock = cfi_ppb_unlock; 580 + mtd->_is_locked = cfi_ppb_is_locked; 581 + } 578 582 #endif 579 583 580 584 bootloc = extp->TopBottom; ··· 2190 2172 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2191 2173 } 2192 2174 2175 + /* 2176 + * Advanced Sector Protection - PPB (Persistent Protection Bit) locking 2177 + */ 2178 + 2179 + struct ppb_lock { 2180 + struct flchip *chip; 2181 + loff_t offset; 2182 + int locked; 2183 + }; 2184 + 2185 + #define MAX_SECTORS 512 2186 + 2187 + #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) 2188 + #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) 2189 + #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) 2190 + 2191 + static int __maybe_unused do_ppb_xxlock(struct map_info *map, 2192 + struct flchip *chip, 2193 + unsigned long adr, int len, void *thunk) 2194 + { 2195 + struct cfi_private *cfi = map->fldrv_priv; 2196 + unsigned long timeo; 2197 + int ret; 2198 + 2199 + mutex_lock(&chip->mutex); 2200 + ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2201 + if (ret) { 2202 + mutex_unlock(&chip->mutex); 2203 + return ret; 2204 + } 2205 + 2206 + pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); 2207 + 2208 + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2209 + cfi->device_type, NULL); 2210 + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2211 + cfi->device_type, NULL); 2212 + /* PPB entry command */ 2213 + cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, 2214 + cfi->device_type, NULL); 2215 + 2216 + if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2217 + chip->state = FL_LOCKING; 2218 + map_write(map, CMD(0xA0), chip->start + adr); 2219 + map_write(map, CMD(0x00), chip->start + adr); 2220 + } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2221 + /* 2222 + * Unlocking of one specific sector is not supported, so we 2223 + * have to unlock all sectors of this device instead 2224 + */ 2225 + chip->state = FL_UNLOCKING; 2226 + map_write(map, CMD(0x80), chip->start); 2227 + map_write(map, CMD(0x30), chip->start); 2228 + } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { 2229 + chip->state = FL_JEDEC_QUERY; 2230 + /* Return locked status: 0->locked, 1->unlocked */ 2231 + ret = !cfi_read_query(map, adr); 2232 + } else 2233 + BUG(); 2234 + 2235 + /* 2236 + * Wait for some time as unlocking of all sectors takes quite long 2237 + */ 2238 + timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ 2239 + for (;;) { 2240 + if (chip_ready(map, adr)) 2241 + break; 2242 + 2243 + if (time_after(jiffies, timeo)) { 2244 + printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 2245 + ret = -EIO; 2246 + break; 2247 + } 2248 + 2249 + UDELAY(map, chip, adr, 1); 2250 + } 2251 + 2252 + /* Exit BC commands */ 2253 + map_write(map, CMD(0x90), chip->start); 2254 + map_write(map, CMD(0x00), chip->start); 2255 + 2256 + chip->state = FL_READY; 2257 + put_chip(map, chip, adr + chip->start); 2258 + mutex_unlock(&chip->mutex); 2259 + 2260 + return ret; 2261 + } 2262 + 2263 + static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, 2264 + uint64_t len) 2265 + { 2266 + return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2267 + DO_XXLOCK_ONEBLOCK_LOCK); 2268 + } 2269 + 2270 + static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, 2271 + uint64_t len) 2272 + { 2273 + struct mtd_erase_region_info *regions = mtd->eraseregions; 2274 + struct map_info *map = mtd->priv; 2275 + struct cfi_private *cfi = map->fldrv_priv; 2276 + struct ppb_lock *sect; 2277 + unsigned long adr; 2278 + loff_t offset; 2279 + uint64_t length; 2280 + int chipnum; 2281 + int i; 2282 + int sectors; 2283 + int ret; 2284 + 2285 + /* 2286 + * PPB unlocking always unlocks all sectors of the flash chip. 2287 + * We need to re-lock all previously locked sectors. So lets 2288 + * first check the locking status of all sectors and save 2289 + * it for future use. 2290 + */ 2291 + sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL); 2292 + if (!sect) 2293 + return -ENOMEM; 2294 + 2295 + /* 2296 + * This code to walk all sectors is a slightly modified version 2297 + * of the cfi_varsize_frob() code. 2298 + */ 2299 + i = 0; 2300 + chipnum = 0; 2301 + adr = 0; 2302 + sectors = 0; 2303 + offset = 0; 2304 + length = mtd->size; 2305 + 2306 + while (length) { 2307 + int size = regions[i].erasesize; 2308 + 2309 + /* 2310 + * Only test sectors that shall not be unlocked. The other 2311 + * sectors shall be unlocked, so lets keep their locking 2312 + * status at "unlocked" (locked=0) for the final re-locking. 2313 + */ 2314 + if ((adr < ofs) || (adr >= (ofs + len))) { 2315 + sect[sectors].chip = &cfi->chips[chipnum]; 2316 + sect[sectors].offset = offset; 2317 + sect[sectors].locked = do_ppb_xxlock( 2318 + map, &cfi->chips[chipnum], adr, 0, 2319 + DO_XXLOCK_ONEBLOCK_GETLOCK); 2320 + } 2321 + 2322 + adr += size; 2323 + offset += size; 2324 + length -= size; 2325 + 2326 + if (offset == regions[i].offset + size * regions[i].numblocks) 2327 + i++; 2328 + 2329 + if (adr >> cfi->chipshift) { 2330 + adr = 0; 2331 + chipnum++; 2332 + 2333 + if (chipnum >= cfi->numchips) 2334 + break; 2335 + } 2336 + 2337 + sectors++; 2338 + if (sectors >= MAX_SECTORS) { 2339 + printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", 2340 + MAX_SECTORS); 2341 + kfree(sect); 2342 + return -EINVAL; 2343 + } 2344 + } 2345 + 2346 + /* Now unlock the whole chip */ 2347 + ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2348 + DO_XXLOCK_ONEBLOCK_UNLOCK); 2349 + if (ret) { 2350 + kfree(sect); 2351 + return ret; 2352 + } 2353 + 2354 + /* 2355 + * PPB unlocking always unlocks all sectors of the flash chip. 2356 + * We need to re-lock all previously locked sectors. 2357 + */ 2358 + for (i = 0; i < sectors; i++) { 2359 + if (sect[i].locked) 2360 + do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, 2361 + DO_XXLOCK_ONEBLOCK_LOCK); 2362 + } 2363 + 2364 + kfree(sect); 2365 + return ret; 2366 + } 2367 + 2368 + static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, 2369 + uint64_t len) 2370 + { 2371 + return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2372 + DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; 2373 + } 2193 2374 2194 2375 static void cfi_amdstd_sync (struct mtd_info *mtd) 2195 2376 {
+36 -13
drivers/mtd/cmdlinepart.c
··· 22 22 * 23 23 * mtdparts=<mtddef>[;<mtddef] 24 24 * <mtddef> := <mtd-id>:<partdef>[,<partdef>] 25 - * where <mtd-id> is the name from the "cat /proc/mtd" command 26 - * <partdef> := <size>[@offset][<name>][ro][lk] 25 + * <partdef> := <size>[@<offset>][<name>][ro][lk] 27 26 * <mtd-id> := unique name used in mapping driver/device (mtd->name) 28 27 * <size> := standard linux memsize OR "-" to denote all remaining space 28 + * size is automatically truncated at end of device 29 + * if specified or trucated size is 0 the part is skipped 30 + * <offset> := standard linux memsize 31 + * if omitted the part will immediately follow the previous part 32 + * or 0 if the first part 29 33 * <name> := '(' NAME ')' 34 + * NAME will appear in /proc/mtd 35 + * 36 + * <size> and <offset> can be specified such that the parts are out of order 37 + * in physical memory and may even overlap. 38 + * 39 + * The parts are assigned MTD numbers in the order they are specified in the 40 + * command line regardless of their order in physical memory. 30 41 * 31 42 * Examples: 32 43 * ··· 81 70 static struct cmdline_mtd_partition *partitions; 82 71 83 72 /* the command line passed to mtdpart_setup() */ 73 + static char *mtdparts; 84 74 static char *cmdline; 85 75 static int cmdline_parsed; 86 76 ··· 342 330 if (part->parts[i].size == SIZE_REMAINING) 343 331 part->parts[i].size = master->size - offset; 344 332 345 - if (part->parts[i].size == 0) { 346 - printk(KERN_WARNING ERRP 347 - "%s: skipping zero sized partition\n", 348 - part->mtd_id); 349 - part->num_parts--; 350 - memmove(&part->parts[i], &part->parts[i + 1], 351 - sizeof(*part->parts) * (part->num_parts - i)); 352 - continue; 353 - } 354 - 355 333 if (offset + part->parts[i].size > master->size) { 356 334 printk(KERN_WARNING ERRP 357 335 "%s: partitioning exceeds flash size, truncating\n", ··· 349 347 part->parts[i].size = master->size - offset; 350 348 } 351 349 offset += part->parts[i].size; 350 + 351 + if (part->parts[i].size == 0) { 352 + printk(KERN_WARNING ERRP 353 + "%s: skipping zero sized partition\n", 354 + part->mtd_id); 355 + part->num_parts--; 356 + memmove(&part->parts[i], &part->parts[i + 1], 357 + sizeof(*part->parts) * (part->num_parts - i)); 358 + i--; 359 + } 352 360 } 353 361 354 362 *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts, ··· 377 365 * 378 366 * This function needs to be visible for bootloaders. 379 367 */ 380 - static int mtdpart_setup(char *s) 368 + static int __init mtdpart_setup(char *s) 381 369 { 382 370 cmdline = s; 383 371 return 1; ··· 393 381 394 382 static int __init cmdline_parser_init(void) 395 383 { 384 + if (mtdparts) 385 + mtdpart_setup(mtdparts); 396 386 return register_mtd_parser(&cmdline_parser); 397 387 } 398 388 389 + static void __exit cmdline_parser_exit(void) 390 + { 391 + deregister_mtd_parser(&cmdline_parser); 392 + } 393 + 399 394 module_init(cmdline_parser_init); 395 + module_exit(cmdline_parser_exit); 396 + 397 + MODULE_PARM_DESC(mtdparts, "Partitioning specification"); 398 + module_param(mtdparts, charp, 0); 400 399 401 400 MODULE_LICENSE("GPL"); 402 401 MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
+2
drivers/mtd/devices/Makefile
··· 17 17 obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 18 18 obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 19 19 obj-$(CONFIG_MTD_M25P80) += m25p80.o 20 + obj-$(CONFIG_MTD_NAND_OMAP_BCH) += elm.o 20 21 obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o 21 22 obj-$(CONFIG_MTD_SST25L) += sst25l.o 22 23 obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o 24 + 23 25 24 26 CFLAGS_docg3.o += -I$(src)
+38 -17
drivers/mtd/devices/bcm47xxsflash.c
··· 5 5 #include <linux/platform_device.h> 6 6 #include <linux/bcma/bcma.h> 7 7 8 + #include "bcm47xxsflash.h" 9 + 8 10 MODULE_LICENSE("GPL"); 9 11 MODULE_DESCRIPTION("Serial flash driver for BCMA bus"); 10 12 ··· 15 13 static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len, 16 14 size_t *retlen, u_char *buf) 17 15 { 18 - struct bcma_sflash *sflash = mtd->priv; 16 + struct bcm47xxsflash *b47s = mtd->priv; 19 17 20 18 /* Check address range */ 21 19 if ((from + len) > mtd->size) 22 20 return -EINVAL; 23 21 24 - memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from), 22 + memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from), 25 23 len); 24 + *retlen = len; 26 25 27 26 return len; 28 27 } 29 28 30 - static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash, 31 - struct mtd_info *mtd) 29 + static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s) 32 30 { 33 - mtd->priv = sflash; 31 + struct mtd_info *mtd = &b47s->mtd; 32 + 33 + mtd->priv = b47s; 34 34 mtd->name = "bcm47xxsflash"; 35 35 mtd->owner = THIS_MODULE; 36 36 mtd->type = MTD_ROM; 37 - mtd->size = sflash->size; 37 + mtd->size = b47s->size; 38 38 mtd->_read = bcm47xxsflash_read; 39 39 40 40 /* TODO: implement writing support and verify/change following code */ ··· 44 40 mtd->writebufsize = mtd->writesize = 1; 45 41 } 46 42 47 - static int bcm47xxsflash_probe(struct platform_device *pdev) 43 + /************************************************** 44 + * BCMA 45 + **************************************************/ 46 + 47 + static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) 48 48 { 49 49 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); 50 + struct bcm47xxsflash *b47s; 50 51 int err; 51 52 52 - sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); 53 - if (!sflash->mtd) { 53 + b47s = kzalloc(sizeof(*b47s), GFP_KERNEL); 54 + if (!b47s) { 54 55 err = -ENOMEM; 55 56 goto out; 56 57 } 57 - bcm47xxsflash_fill_mtd(sflash, sflash->mtd); 58 + sflash->priv = b47s; 58 59 59 - err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0); 60 + b47s->window = sflash->window; 61 + b47s->blocksize = sflash->blocksize; 62 + b47s->numblocks = sflash->numblocks; 63 + b47s->size = sflash->size; 64 + bcm47xxsflash_fill_mtd(b47s); 65 + 66 + err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0); 60 67 if (err) { 61 68 pr_err("Failed to register MTD device: %d\n", err); 62 69 goto err_dev_reg; ··· 76 61 return 0; 77 62 78 63 err_dev_reg: 79 - kfree(sflash->mtd); 64 + kfree(&b47s->mtd); 80 65 out: 81 66 return err; 82 67 } 83 68 84 - static int bcm47xxsflash_remove(struct platform_device *pdev) 69 + static int bcm47xxsflash_bcma_remove(struct platform_device *pdev) 85 70 { 86 71 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); 72 + struct bcm47xxsflash *b47s = sflash->priv; 87 73 88 - mtd_device_unregister(sflash->mtd); 89 - kfree(sflash->mtd); 74 + mtd_device_unregister(&b47s->mtd); 75 + kfree(b47s); 90 76 91 77 return 0; 92 78 } 93 79 94 80 static struct platform_driver bcma_sflash_driver = { 95 - .remove = bcm47xxsflash_remove, 81 + .probe = bcm47xxsflash_bcma_probe, 82 + .remove = bcm47xxsflash_bcma_remove, 96 83 .driver = { 97 84 .name = "bcma_sflash", 98 85 .owner = THIS_MODULE, 99 86 }, 100 87 }; 101 88 89 + /************************************************** 90 + * Init 91 + **************************************************/ 92 + 102 93 static int __init bcm47xxsflash_init(void) 103 94 { 104 95 int err; 105 96 106 - err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe); 97 + err = platform_driver_register(&bcma_sflash_driver); 107 98 if (err) 108 99 pr_err("Failed to register BCMA serial flash driver: %d\n", 109 100 err);
+15
drivers/mtd/devices/bcm47xxsflash.h
··· 1 + #ifndef __BCM47XXSFLASH_H 2 + #define __BCM47XXSFLASH_H 3 + 4 + #include <linux/mtd/mtd.h> 5 + 6 + struct bcm47xxsflash { 7 + u32 window; 8 + u32 blocksize; 9 + u16 numblocks; 10 + u32 size; 11 + 12 + struct mtd_info mtd; 13 + }; 14 + 15 + #endif /* BCM47XXSFLASH */
+404
drivers/mtd/devices/elm.c
··· 1 + /* 2 + * Error Location Module 3 + * 4 + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + */ 17 + 18 + #include <linux/platform_device.h> 19 + #include <linux/module.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/io.h> 22 + #include <linux/of.h> 23 + #include <linux/pm_runtime.h> 24 + #include <linux/platform_data/elm.h> 25 + 26 + #define ELM_IRQSTATUS 0x018 27 + #define ELM_IRQENABLE 0x01c 28 + #define ELM_LOCATION_CONFIG 0x020 29 + #define ELM_PAGE_CTRL 0x080 30 + #define ELM_SYNDROME_FRAGMENT_0 0x400 31 + #define ELM_SYNDROME_FRAGMENT_6 0x418 32 + #define ELM_LOCATION_STATUS 0x800 33 + #define ELM_ERROR_LOCATION_0 0x880 34 + 35 + /* ELM Interrupt Status Register */ 36 + #define INTR_STATUS_PAGE_VALID BIT(8) 37 + 38 + /* ELM Interrupt Enable Register */ 39 + #define INTR_EN_PAGE_MASK BIT(8) 40 + 41 + /* ELM Location Configuration Register */ 42 + #define ECC_BCH_LEVEL_MASK 0x3 43 + 44 + /* ELM syndrome */ 45 + #define ELM_SYNDROME_VALID BIT(16) 46 + 47 + /* ELM_LOCATION_STATUS Register */ 48 + #define ECC_CORRECTABLE_MASK BIT(8) 49 + #define ECC_NB_ERRORS_MASK 0x1f 50 + 51 + /* ELM_ERROR_LOCATION_0-15 Registers */ 52 + #define ECC_ERROR_LOCATION_MASK 0x1fff 53 + 54 + #define ELM_ECC_SIZE 0x7ff 55 + 56 + #define SYNDROME_FRAGMENT_REG_SIZE 0x40 57 + #define ERROR_LOCATION_SIZE 0x100 58 + 59 + struct elm_info { 60 + struct device *dev; 61 + void __iomem *elm_base; 62 + struct completion elm_completion; 63 + struct list_head list; 64 + enum bch_ecc bch_type; 65 + }; 66 + 67 + static LIST_HEAD(elm_devices); 68 + 69 + static void elm_write_reg(struct elm_info *info, int offset, u32 val) 70 + { 71 + writel(val, info->elm_base + offset); 72 + } 73 + 74 + static u32 elm_read_reg(struct elm_info *info, int offset) 75 + { 76 + return readl(info->elm_base + offset); 77 + } 78 + 79 + /** 80 + * elm_config - Configure ELM module 81 + * @dev: ELM device 82 + * @bch_type: Type of BCH ecc 83 + */ 84 + void elm_config(struct device *dev, enum bch_ecc bch_type) 85 + { 86 + u32 reg_val; 87 + struct elm_info *info = dev_get_drvdata(dev); 88 + 89 + reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16); 90 + elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val); 91 + info->bch_type = bch_type; 92 + } 93 + EXPORT_SYMBOL(elm_config); 94 + 95 + /** 96 + * elm_configure_page_mode - Enable/Disable page mode 97 + * @info: elm info 98 + * @index: index number of syndrome fragment vector 99 + * @enable: enable/disable flag for page mode 100 + * 101 + * Enable page mode for syndrome fragment index 102 + */ 103 + static void elm_configure_page_mode(struct elm_info *info, int index, 104 + bool enable) 105 + { 106 + u32 reg_val; 107 + 108 + reg_val = elm_read_reg(info, ELM_PAGE_CTRL); 109 + if (enable) 110 + reg_val |= BIT(index); /* enable page mode */ 111 + else 112 + reg_val &= ~BIT(index); /* disable page mode */ 113 + 114 + elm_write_reg(info, ELM_PAGE_CTRL, reg_val); 115 + } 116 + 117 + /** 118 + * elm_load_syndrome - Load ELM syndrome reg 119 + * @info: elm info 120 + * @err_vec: elm error vectors 121 + * @ecc: buffer with calculated ecc 122 + * 123 + * Load syndrome fragment registers with calculated ecc in reverse order. 124 + */ 125 + static void elm_load_syndrome(struct elm_info *info, 126 + struct elm_errorvec *err_vec, u8 *ecc) 127 + { 128 + int i, offset; 129 + u32 val; 130 + 131 + for (i = 0; i < ERROR_VECTOR_MAX; i++) { 132 + 133 + /* Check error reported */ 134 + if (err_vec[i].error_reported) { 135 + elm_configure_page_mode(info, i, true); 136 + offset = ELM_SYNDROME_FRAGMENT_0 + 137 + SYNDROME_FRAGMENT_REG_SIZE * i; 138 + 139 + /* BCH8 */ 140 + if (info->bch_type) { 141 + 142 + /* syndrome fragment 0 = ecc[9-12B] */ 143 + val = cpu_to_be32(*(u32 *) &ecc[9]); 144 + elm_write_reg(info, offset, val); 145 + 146 + /* syndrome fragment 1 = ecc[5-8B] */ 147 + offset += 4; 148 + val = cpu_to_be32(*(u32 *) &ecc[5]); 149 + elm_write_reg(info, offset, val); 150 + 151 + /* syndrome fragment 2 = ecc[1-4B] */ 152 + offset += 4; 153 + val = cpu_to_be32(*(u32 *) &ecc[1]); 154 + elm_write_reg(info, offset, val); 155 + 156 + /* syndrome fragment 3 = ecc[0B] */ 157 + offset += 4; 158 + val = ecc[0]; 159 + elm_write_reg(info, offset, val); 160 + } else { 161 + /* syndrome fragment 0 = ecc[20-52b] bits */ 162 + val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) | 163 + ((ecc[2] & 0xf) << 28); 164 + elm_write_reg(info, offset, val); 165 + 166 + /* syndrome fragment 1 = ecc[0-20b] bits */ 167 + offset += 4; 168 + val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12; 169 + elm_write_reg(info, offset, val); 170 + } 171 + } 172 + 173 + /* Update ecc pointer with ecc byte size */ 174 + ecc += info->bch_type ? BCH8_SIZE : BCH4_SIZE; 175 + } 176 + } 177 + 178 + /** 179 + * elm_start_processing - start elm syndrome processing 180 + * @info: elm info 181 + * @err_vec: elm error vectors 182 + * 183 + * Set syndrome valid bit for syndrome fragment registers for which 184 + * elm syndrome fragment registers are loaded. This enables elm module 185 + * to start processing syndrome vectors. 186 + */ 187 + static void elm_start_processing(struct elm_info *info, 188 + struct elm_errorvec *err_vec) 189 + { 190 + int i, offset; 191 + u32 reg_val; 192 + 193 + /* 194 + * Set syndrome vector valid, so that ELM module 195 + * will process it for vectors error is reported 196 + */ 197 + for (i = 0; i < ERROR_VECTOR_MAX; i++) { 198 + if (err_vec[i].error_reported) { 199 + offset = ELM_SYNDROME_FRAGMENT_6 + 200 + SYNDROME_FRAGMENT_REG_SIZE * i; 201 + reg_val = elm_read_reg(info, offset); 202 + reg_val |= ELM_SYNDROME_VALID; 203 + elm_write_reg(info, offset, reg_val); 204 + } 205 + } 206 + } 207 + 208 + /** 209 + * elm_error_correction - locate correctable error position 210 + * @info: elm info 211 + * @err_vec: elm error vectors 212 + * 213 + * On completion of processing by elm module, error location status 214 + * register updated with correctable/uncorrectable error information. 215 + * In case of correctable errors, number of errors located from 216 + * elm location status register & read the positions from 217 + * elm error location register. 218 + */ 219 + static void elm_error_correction(struct elm_info *info, 220 + struct elm_errorvec *err_vec) 221 + { 222 + int i, j, errors = 0; 223 + int offset; 224 + u32 reg_val; 225 + 226 + for (i = 0; i < ERROR_VECTOR_MAX; i++) { 227 + 228 + /* Check error reported */ 229 + if (err_vec[i].error_reported) { 230 + offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i; 231 + reg_val = elm_read_reg(info, offset); 232 + 233 + /* Check correctable error or not */ 234 + if (reg_val & ECC_CORRECTABLE_MASK) { 235 + offset = ELM_ERROR_LOCATION_0 + 236 + ERROR_LOCATION_SIZE * i; 237 + 238 + /* Read count of correctable errors */ 239 + err_vec[i].error_count = reg_val & 240 + ECC_NB_ERRORS_MASK; 241 + 242 + /* Update the error locations in error vector */ 243 + for (j = 0; j < err_vec[i].error_count; j++) { 244 + 245 + reg_val = elm_read_reg(info, offset); 246 + err_vec[i].error_loc[j] = reg_val & 247 + ECC_ERROR_LOCATION_MASK; 248 + 249 + /* Update error location register */ 250 + offset += 4; 251 + } 252 + 253 + errors += err_vec[i].error_count; 254 + } else { 255 + err_vec[i].error_uncorrectable = true; 256 + } 257 + 258 + /* Clearing interrupts for processed error vectors */ 259 + elm_write_reg(info, ELM_IRQSTATUS, BIT(i)); 260 + 261 + /* Disable page mode */ 262 + elm_configure_page_mode(info, i, false); 263 + } 264 + } 265 + } 266 + 267 + /** 268 + * elm_decode_bch_error_page - Locate error position 269 + * @dev: device pointer 270 + * @ecc_calc: calculated ECC bytes from GPMC 271 + * @err_vec: elm error vectors 272 + * 273 + * Called with one or more error reported vectors & vectors with 274 + * error reported is updated in err_vec[].error_reported 275 + */ 276 + void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, 277 + struct elm_errorvec *err_vec) 278 + { 279 + struct elm_info *info = dev_get_drvdata(dev); 280 + u32 reg_val; 281 + 282 + /* Enable page mode interrupt */ 283 + reg_val = elm_read_reg(info, ELM_IRQSTATUS); 284 + elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID); 285 + elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK); 286 + 287 + /* Load valid ecc byte to syndrome fragment register */ 288 + elm_load_syndrome(info, err_vec, ecc_calc); 289 + 290 + /* Enable syndrome processing for which syndrome fragment is updated */ 291 + elm_start_processing(info, err_vec); 292 + 293 + /* Wait for ELM module to finish locating error correction */ 294 + wait_for_completion(&info->elm_completion); 295 + 296 + /* Disable page mode interrupt */ 297 + reg_val = elm_read_reg(info, ELM_IRQENABLE); 298 + elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK); 299 + elm_error_correction(info, err_vec); 300 + } 301 + EXPORT_SYMBOL(elm_decode_bch_error_page); 302 + 303 + static irqreturn_t elm_isr(int this_irq, void *dev_id) 304 + { 305 + u32 reg_val; 306 + struct elm_info *info = dev_id; 307 + 308 + reg_val = elm_read_reg(info, ELM_IRQSTATUS); 309 + 310 + /* All error vectors processed */ 311 + if (reg_val & INTR_STATUS_PAGE_VALID) { 312 + elm_write_reg(info, ELM_IRQSTATUS, 313 + reg_val & INTR_STATUS_PAGE_VALID); 314 + complete(&info->elm_completion); 315 + return IRQ_HANDLED; 316 + } 317 + 318 + return IRQ_NONE; 319 + } 320 + 321 + static int elm_probe(struct platform_device *pdev) 322 + { 323 + int ret = 0; 324 + struct resource *res, *irq; 325 + struct elm_info *info; 326 + 327 + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 328 + if (!info) { 329 + dev_err(&pdev->dev, "failed to allocate memory\n"); 330 + return -ENOMEM; 331 + } 332 + 333 + info->dev = &pdev->dev; 334 + 335 + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 336 + if (!irq) { 337 + dev_err(&pdev->dev, "no irq resource defined\n"); 338 + return -ENODEV; 339 + } 340 + 341 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 342 + if (!res) { 343 + dev_err(&pdev->dev, "no memory resource defined\n"); 344 + return -ENODEV; 345 + } 346 + 347 + info->elm_base = devm_request_and_ioremap(&pdev->dev, res); 348 + if (!info->elm_base) 349 + return -EADDRNOTAVAIL; 350 + 351 + ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0, 352 + pdev->name, info); 353 + if (ret) { 354 + dev_err(&pdev->dev, "failure requesting irq %i\n", irq->start); 355 + return ret; 356 + } 357 + 358 + pm_runtime_enable(&pdev->dev); 359 + if (pm_runtime_get_sync(&pdev->dev)) { 360 + ret = -EINVAL; 361 + pm_runtime_disable(&pdev->dev); 362 + dev_err(&pdev->dev, "can't enable clock\n"); 363 + return ret; 364 + } 365 + 366 + init_completion(&info->elm_completion); 367 + INIT_LIST_HEAD(&info->list); 368 + list_add(&info->list, &elm_devices); 369 + platform_set_drvdata(pdev, info); 370 + return ret; 371 + } 372 + 373 + static int elm_remove(struct platform_device *pdev) 374 + { 375 + pm_runtime_put_sync(&pdev->dev); 376 + pm_runtime_disable(&pdev->dev); 377 + platform_set_drvdata(pdev, NULL); 378 + return 0; 379 + } 380 + 381 + #ifdef CONFIG_OF 382 + static const struct of_device_id elm_of_match[] = { 383 + { .compatible = "ti,am3352-elm" }, 384 + {}, 385 + }; 386 + MODULE_DEVICE_TABLE(of, elm_of_match); 387 + #endif 388 + 389 + static struct platform_driver elm_driver = { 390 + .driver = { 391 + .name = "elm", 392 + .owner = THIS_MODULE, 393 + .of_match_table = of_match_ptr(elm_of_match), 394 + }, 395 + .probe = elm_probe, 396 + .remove = elm_remove, 397 + }; 398 + 399 + module_platform_driver(elm_driver); 400 + 401 + MODULE_DESCRIPTION("ELM driver for BCH error correction"); 402 + MODULE_AUTHOR("Texas Instruments"); 403 + MODULE_ALIAS("platform: elm"); 404 + MODULE_LICENSE("GPL v2");
+100
drivers/mtd/devices/m25p80.c
··· 565 565 return ret; 566 566 } 567 567 568 + static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 569 + { 570 + struct m25p *flash = mtd_to_m25p(mtd); 571 + uint32_t offset = ofs; 572 + uint8_t status_old, status_new; 573 + int res = 0; 574 + 575 + mutex_lock(&flash->lock); 576 + /* Wait until finished previous command */ 577 + if (wait_till_ready(flash)) { 578 + res = 1; 579 + goto err; 580 + } 581 + 582 + status_old = read_sr(flash); 583 + 584 + if (offset < flash->mtd.size-(flash->mtd.size/2)) 585 + status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0; 586 + else if (offset < flash->mtd.size-(flash->mtd.size/4)) 587 + status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; 588 + else if (offset < flash->mtd.size-(flash->mtd.size/8)) 589 + status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; 590 + else if (offset < flash->mtd.size-(flash->mtd.size/16)) 591 + status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2; 592 + else if (offset < flash->mtd.size-(flash->mtd.size/32)) 593 + status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; 594 + else if (offset < flash->mtd.size-(flash->mtd.size/64)) 595 + status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1; 596 + else 597 + status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0; 598 + 599 + /* Only modify protection if it will not unlock other areas */ 600 + if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) > 601 + (status_old&(SR_BP2|SR_BP1|SR_BP0))) { 602 + write_enable(flash); 603 + if (write_sr(flash, status_new) < 0) { 604 + res = 1; 605 + goto err; 606 + } 607 + } 608 + 609 + err: mutex_unlock(&flash->lock); 610 + return res; 611 + } 612 + 613 + static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 614 + { 615 + struct m25p *flash = mtd_to_m25p(mtd); 616 + uint32_t offset = ofs; 617 + uint8_t status_old, status_new; 618 + int res = 0; 619 + 620 + mutex_lock(&flash->lock); 621 + /* Wait until finished previous command */ 622 + if (wait_till_ready(flash)) { 623 + res = 1; 624 + goto err; 625 + } 626 + 627 + status_old = read_sr(flash); 628 + 629 + if (offset+len > flash->mtd.size-(flash->mtd.size/64)) 630 + status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0); 631 + else if (offset+len > flash->mtd.size-(flash->mtd.size/32)) 632 + status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0; 633 + else if (offset+len > flash->mtd.size-(flash->mtd.size/16)) 634 + status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1; 635 + else if (offset+len > flash->mtd.size-(flash->mtd.size/8)) 636 + status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; 637 + else if (offset+len > flash->mtd.size-(flash->mtd.size/4)) 638 + status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2; 639 + else if (offset+len > flash->mtd.size-(flash->mtd.size/2)) 640 + status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; 641 + else 642 + status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; 643 + 644 + /* Only modify protection if it will not lock other areas */ 645 + if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) < 646 + (status_old&(SR_BP2|SR_BP1|SR_BP0))) { 647 + write_enable(flash); 648 + if (write_sr(flash, status_new) < 0) { 649 + res = 1; 650 + goto err; 651 + } 652 + } 653 + 654 + err: mutex_unlock(&flash->lock); 655 + return res; 656 + } 657 + 568 658 /****************************************************************************/ 569 659 570 660 /* ··· 731 641 732 642 /* Everspin */ 733 643 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) }, 644 + 645 + /* GigaDevice */ 646 + { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, 647 + { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, 734 648 735 649 /* Intel/Numonyx -- xxxs33b */ 736 650 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, ··· 992 898 flash->mtd.size = info->sector_size * info->n_sectors; 993 899 flash->mtd._erase = m25p80_erase; 994 900 flash->mtd._read = m25p80_read; 901 + 902 + /* flash protection support for STmicro chips */ 903 + if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) { 904 + flash->mtd._lock = m25p80_lock; 905 + flash->mtd._unlock = m25p80_unlock; 906 + } 995 907 996 908 /* sst flash chips use AAI word program */ 997 909 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
+1 -1
drivers/mtd/maps/Kconfig
··· 429 429 430 430 config MTD_UCLINUX 431 431 bool "Generic uClinux RAM/ROM filesystem support" 432 - depends on MTD_RAM=y && (!MMU || COLDFIRE) 432 + depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE) 433 433 help 434 434 Map driver to support image based filesystems for uClinux. 435 435
+4 -5
drivers/mtd/maps/physmap_of.c
··· 68 68 kfree(info->list[i].res); 69 69 } 70 70 } 71 - 72 - kfree(info); 73 - 74 71 return 0; 75 72 } 76 73 ··· 196 199 map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access"); 197 200 198 201 err = -ENOMEM; 199 - info = kzalloc(sizeof(struct of_flash) + 200 - sizeof(struct of_flash_list) * count, GFP_KERNEL); 202 + info = devm_kzalloc(&dev->dev, 203 + sizeof(struct of_flash) + 204 + sizeof(struct of_flash_list) * count, GFP_KERNEL); 201 205 if (!info) 202 206 goto err_flash_remove; 203 207 ··· 239 241 info->list[i].map.phys = res.start; 240 242 info->list[i].map.size = res_size; 241 243 info->list[i].map.bankwidth = be32_to_cpup(width); 244 + info->list[i].map.device_node = dp; 242 245 243 246 err = -ENOMEM; 244 247 info->list[i].map.virt = ioremap(info->list[i].map.phys,
+25 -5
drivers/mtd/maps/uclinux.c
··· 23 23 24 24 /****************************************************************************/ 25 25 26 + #ifdef CONFIG_MTD_ROM 27 + #define MAP_NAME "rom" 28 + #else 29 + #define MAP_NAME "ram" 30 + #endif 31 + 32 + /* 33 + * Blackfin uses uclinux_ram_map during startup, so it must not be static. 34 + * Provide a dummy declaration to make sparse happy. 35 + */ 36 + extern struct map_info uclinux_ram_map; 37 + 26 38 struct map_info uclinux_ram_map = { 27 - .name = "RAM", 28 - .phys = (unsigned long)__bss_stop, 39 + .name = MAP_NAME, 29 40 .size = 0, 30 41 }; 42 + 43 + static unsigned long physaddr = -1; 44 + module_param(physaddr, ulong, S_IRUGO); 31 45 32 46 static struct mtd_info *uclinux_ram_mtdinfo; 33 47 ··· 74 60 struct map_info *mapp; 75 61 76 62 mapp = &uclinux_ram_map; 63 + 64 + if (physaddr == -1) 65 + mapp->phys = (resource_size_t)__bss_stop; 66 + else 67 + mapp->phys = physaddr; 68 + 77 69 if (!mapp->size) 78 70 mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8)))); 79 71 mapp->bankwidth = 4; 80 72 81 - printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n", 73 + printk("uclinux[mtd]: probe address=0x%x size=0x%x\n", 82 74 (int) mapp->phys, (int) mapp->size); 83 75 84 76 /* ··· 102 82 103 83 simple_map_init(mapp); 104 84 105 - mtd = do_map_probe("map_ram", mapp); 85 + mtd = do_map_probe("map_" MAP_NAME, mapp); 106 86 if (!mtd) { 107 87 printk("uclinux[mtd]: failed to find a mapping?\n"); 108 88 return(-ENXIO); ··· 138 118 139 119 MODULE_LICENSE("GPL"); 140 120 MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>"); 141 - MODULE_DESCRIPTION("Generic RAM based MTD for uClinux"); 121 + MODULE_DESCRIPTION("Generic MTD for uClinux"); 142 122 143 123 /****************************************************************************/
+117 -24
drivers/mtd/nand/atmel_nand.c
··· 101 101 u8 pmecc_corr_cap; 102 102 u16 pmecc_sector_size; 103 103 u32 pmecc_lookup_table_offset; 104 + u32 pmecc_lookup_table_offset_512; 105 + u32 pmecc_lookup_table_offset_1024; 104 106 105 107 int pmecc_bytes_per_sector; 106 108 int pmecc_sector_number; ··· 910 908 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE); 911 909 } 912 910 911 + /* 912 + * Get ECC requirement in ONFI parameters, returns -1 if ONFI 913 + * parameters is not supported. 914 + * return 0 if success to get the ECC requirement. 915 + */ 916 + static int get_onfi_ecc_param(struct nand_chip *chip, 917 + int *ecc_bits, int *sector_size) 918 + { 919 + *ecc_bits = *sector_size = 0; 920 + 921 + if (chip->onfi_params.ecc_bits == 0xff) 922 + /* TODO: the sector_size and ecc_bits need to be find in 923 + * extended ecc parameter, currently we don't support it. 924 + */ 925 + return -1; 926 + 927 + *ecc_bits = chip->onfi_params.ecc_bits; 928 + 929 + /* The default sector size (ecc codeword size) is 512 */ 930 + *sector_size = 512; 931 + 932 + return 0; 933 + } 934 + 935 + /* 936 + * Get ecc requirement from ONFI parameters ecc requirement. 937 + * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function 938 + * will set them according to ONFI ecc requirement. Otherwise, use the 939 + * value in DTS file. 940 + * return 0 if success. otherwise return error code. 941 + */ 942 + static int pmecc_choose_ecc(struct atmel_nand_host *host, 943 + int *cap, int *sector_size) 944 + { 945 + /* Get ECC requirement from ONFI parameters */ 946 + *cap = *sector_size = 0; 947 + if (host->nand_chip.onfi_version) { 948 + if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size)) 949 + dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n", 950 + *cap, *sector_size); 951 + else 952 + dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n"); 953 + } else { 954 + dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes"); 955 + } 956 + if (*cap == 0 && *sector_size == 0) { 957 + *cap = 2; 958 + *sector_size = 512; 959 + } 960 + 961 + /* If dts file doesn't specify then use the one in ONFI parameters */ 962 + if (host->pmecc_corr_cap == 0) { 963 + /* use the most fitable ecc bits (the near bigger one ) */ 964 + if (*cap <= 2) 965 + host->pmecc_corr_cap = 2; 966 + else if (*cap <= 4) 967 + host->pmecc_corr_cap = 4; 968 + else if (*cap < 8) 969 + host->pmecc_corr_cap = 8; 970 + else if (*cap < 12) 971 + host->pmecc_corr_cap = 12; 972 + else if (*cap < 24) 973 + host->pmecc_corr_cap = 24; 974 + else 975 + return -EINVAL; 976 + } 977 + if (host->pmecc_sector_size == 0) { 978 + /* use the most fitable sector size (the near smaller one ) */ 979 + if (*sector_size >= 1024) 980 + host->pmecc_sector_size = 1024; 981 + else if (*sector_size >= 512) 982 + host->pmecc_sector_size = 512; 983 + else 984 + return -EINVAL; 985 + } 986 + return 0; 987 + } 988 + 913 989 static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev, 914 990 struct atmel_nand_host *host) 915 991 { ··· 996 916 struct resource *regs, *regs_pmerr, *regs_rom; 997 917 int cap, sector_size, err_no; 998 918 919 + err_no = pmecc_choose_ecc(host, &cap, &sector_size); 920 + if (err_no) { 921 + dev_err(host->dev, "The NAND flash's ECC requirement are not support!"); 922 + return err_no; 923 + } 924 + 925 + if (cap != host->pmecc_corr_cap || 926 + sector_size != host->pmecc_sector_size) 927 + dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n"); 928 + 999 929 cap = host->pmecc_corr_cap; 1000 930 sector_size = host->pmecc_sector_size; 931 + host->pmecc_lookup_table_offset = (sector_size == 512) ? 932 + host->pmecc_lookup_table_offset_512 : 933 + host->pmecc_lookup_table_offset_1024; 934 + 1001 935 dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n", 1002 936 cap, sector_size); 1003 937 ··· 1309 1215 static int atmel_of_init_port(struct atmel_nand_host *host, 1310 1216 struct device_node *np) 1311 1217 { 1312 - u32 val, table_offset; 1218 + u32 val; 1313 1219 u32 offset[2]; 1314 1220 int ecc_mode; 1315 1221 struct atmel_nand_data *board = &host->board; ··· 1353 1259 1354 1260 /* use PMECC, get correction capability, sector size and lookup 1355 1261 * table offset. 1262 + * If correction bits and sector size are not specified, then find 1263 + * them from NAND ONFI parameters. 1356 1264 */ 1357 - if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) { 1358 - dev_err(host->dev, "Cannot decide PMECC Capability\n"); 1359 - return -EINVAL; 1360 - } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) && 1361 - (val != 24)) { 1362 - dev_err(host->dev, 1363 - "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n", 1364 - val); 1365 - return -EINVAL; 1265 + if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) { 1266 + if ((val != 2) && (val != 4) && (val != 8) && (val != 12) && 1267 + (val != 24)) { 1268 + dev_err(host->dev, 1269 + "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n", 1270 + val); 1271 + return -EINVAL; 1272 + } 1273 + host->pmecc_corr_cap = (u8)val; 1366 1274 } 1367 - host->pmecc_corr_cap = (u8)val; 1368 1275 1369 - if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) { 1370 - dev_err(host->dev, "Cannot decide PMECC Sector Size\n"); 1371 - return -EINVAL; 1372 - } else if ((val != 512) && (val != 1024)) { 1373 - dev_err(host->dev, 1374 - "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n", 1375 - val); 1376 - return -EINVAL; 1276 + if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) { 1277 + if ((val != 512) && (val != 1024)) { 1278 + dev_err(host->dev, 1279 + "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n", 1280 + val); 1281 + return -EINVAL; 1282 + } 1283 + host->pmecc_sector_size = (u16)val; 1377 1284 } 1378 - host->pmecc_sector_size = (u16)val; 1379 1285 1380 1286 if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset", 1381 1287 offset, 2) != 0) { 1382 1288 dev_err(host->dev, "Cannot get PMECC lookup table offset\n"); 1383 1289 return -EINVAL; 1384 1290 } 1385 - table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1]; 1386 - 1387 - if (!table_offset) { 1291 + if (!offset[0] && !offset[1]) { 1388 1292 dev_err(host->dev, "Invalid PMECC lookup table offset\n"); 1389 1293 return -EINVAL; 1390 1294 } 1391 - host->pmecc_lookup_table_offset = table_offset; 1295 + host->pmecc_lookup_table_offset_512 = offset[0]; 1296 + host->pmecc_lookup_table_offset_1024 = offset[1]; 1392 1297 1393 1298 return 0; 1394 1299 }
+4
drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
··· 1 1 #ifndef __BCM47XXNFLASH_H 2 2 #define __BCM47XXNFLASH_H 3 3 4 + #ifndef pr_fmt 5 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 + #endif 7 + 4 8 #include <linux/mtd/mtd.h> 5 9 #include <linux/mtd/nand.h> 6 10
+6 -8
drivers/mtd/nand/bcm47xxnflash/main.c
··· 9 9 * 10 10 */ 11 11 12 + #include "bcm47xxnflash.h" 13 + 12 14 #include <linux/module.h> 13 15 #include <linux/kernel.h> 14 16 #include <linux/slab.h> 15 17 #include <linux/platform_device.h> 16 18 #include <linux/bcma/bcma.h> 17 - 18 - #include "bcm47xxnflash.h" 19 19 20 20 MODULE_DESCRIPTION("NAND flash driver for BCMA bus"); 21 21 MODULE_LICENSE("GPL"); ··· 77 77 } 78 78 79 79 static struct platform_driver bcm47xxnflash_driver = { 80 + .probe = bcm47xxnflash_probe, 80 81 .remove = bcm47xxnflash_remove, 81 82 .driver = { 82 83 .name = "bcma_nflash", ··· 89 88 { 90 89 int err; 91 90 92 - /* 93 - * Platform device "bcma_nflash" exists on SoCs and is registered very 94 - * early, it won't be added during runtime (use platform_driver_probe). 95 - */ 96 - err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe); 91 + err = platform_driver_register(&bcm47xxnflash_driver); 97 92 if (err) 98 - pr_err("Failed to register serial flash driver: %d\n", err); 93 + pr_err("Failed to register bcm47xx nand flash driver: %d\n", 94 + err); 99 95 100 96 return err; 101 97 }
+2 -2
drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
··· 9 9 * 10 10 */ 11 11 12 + #include "bcm47xxnflash.h" 13 + 12 14 #include <linux/module.h> 13 15 #include <linux/kernel.h> 14 16 #include <linux/slab.h> 15 17 #include <linux/bcma/bcma.h> 16 - 17 - #include "bcm47xxnflash.h" 18 18 19 19 /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has 20 20 * shown ~1000 retries as maxiumum. */
+5 -19
drivers/mtd/nand/davinci_nand.c
··· 606 606 if (pdev->id < 0 || pdev->id > 3) 607 607 return -ENODEV; 608 608 609 - info = kzalloc(sizeof(*info), GFP_KERNEL); 609 + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 610 610 if (!info) { 611 611 dev_err(&pdev->dev, "unable to allocate memory\n"); 612 612 ret = -ENOMEM; ··· 623 623 goto err_nomem; 624 624 } 625 625 626 - vaddr = ioremap(res1->start, resource_size(res1)); 627 - base = ioremap(res2->start, resource_size(res2)); 626 + vaddr = devm_request_and_ioremap(&pdev->dev, res1); 627 + base = devm_request_and_ioremap(&pdev->dev, res2); 628 628 if (!vaddr || !base) { 629 629 dev_err(&pdev->dev, "ioremap failed\n"); 630 - ret = -EINVAL; 630 + ret = -EADDRNOTAVAIL; 631 631 goto err_ioremap; 632 632 } 633 633 ··· 717 717 } 718 718 info->chip.ecc.mode = ecc_mode; 719 719 720 - info->clk = clk_get(&pdev->dev, "aemif"); 720 + info->clk = devm_clk_get(&pdev->dev, "aemif"); 721 721 if (IS_ERR(info->clk)) { 722 722 ret = PTR_ERR(info->clk); 723 723 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); ··· 845 845 clk_disable_unprepare(info->clk); 846 846 847 847 err_clk_enable: 848 - clk_put(info->clk); 849 - 850 848 spin_lock_irq(&davinci_nand_lock); 851 849 if (ecc_mode == NAND_ECC_HW_SYNDROME) 852 850 ecc4_busy = false; ··· 853 855 err_ecc: 854 856 err_clk: 855 857 err_ioremap: 856 - if (base) 857 - iounmap(base); 858 - if (vaddr) 859 - iounmap(vaddr); 860 - 861 858 err_nomem: 862 - kfree(info); 863 859 return ret; 864 860 } 865 861 ··· 866 874 ecc4_busy = false; 867 875 spin_unlock_irq(&davinci_nand_lock); 868 876 869 - iounmap(info->base); 870 - iounmap(info->vaddr); 871 - 872 877 nand_release(&info->mtd); 873 878 874 879 clk_disable_unprepare(info->clk); 875 - clk_put(info->clk); 876 - 877 - kfree(info); 878 880 879 881 return 0; 880 882 }
+118 -113
drivers/mtd/nand/fsl_ifc_nand.c
··· 176 176 177 177 ifc_nand_ctrl->page = page_addr; 178 178 /* Program ROW0/COL0 */ 179 - out_be32(&ifc->ifc_nand.row0, page_addr); 180 - out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column); 179 + iowrite32be(page_addr, &ifc->ifc_nand.row0); 180 + iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); 181 181 182 182 buf_num = page_addr & priv->bufnum_mask; 183 183 ··· 239 239 int i; 240 240 241 241 /* set the chip select for NAND Transaction */ 242 - out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT); 242 + iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT, 243 + &ifc->ifc_nand.nand_csel); 243 244 244 245 dev_vdbg(priv->dev, 245 246 "%s: fir0=%08x fcr0=%08x\n", 246 247 __func__, 247 - in_be32(&ifc->ifc_nand.nand_fir0), 248 - in_be32(&ifc->ifc_nand.nand_fcr0)); 248 + ioread32be(&ifc->ifc_nand.nand_fir0), 249 + ioread32be(&ifc->ifc_nand.nand_fcr0)); 249 250 250 251 ctrl->nand_stat = 0; 251 252 252 253 /* start read/write seq */ 253 - out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT); 254 + iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); 254 255 255 256 /* wait for command complete flag or timeout */ 256 257 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, ··· 274 273 int sector_end = sector + chip->ecc.steps - 1; 275 274 276 275 for (i = sector / 4; i <= sector_end / 4; i++) 277 - eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]); 276 + eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]); 278 277 279 278 for (i = sector; i <= sector_end; i++) { 280 279 errors = check_read_ecc(mtd, ctrl, eccstat, i); ··· 314 313 315 314 /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ 316 315 if (mtd->writesize > 512) { 317 - out_be32(&ifc->ifc_nand.nand_fir0, 318 - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 319 - (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 320 - (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 321 - (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | 322 - (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT)); 323 - out_be32(&ifc->ifc_nand.nand_fir1, 0x0); 316 + iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 317 + (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 318 + (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 319 + (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | 320 + (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), 321 + &ifc->ifc_nand.nand_fir0); 322 + iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); 324 323 325 - out_be32(&ifc->ifc_nand.nand_fcr0, 326 - (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | 327 - (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT)); 324 + iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | 325 + (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), 326 + &ifc->ifc_nand.nand_fcr0); 328 327 } else { 329 - out_be32(&ifc->ifc_nand.nand_fir0, 330 - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 331 - (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 332 - (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 333 - (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT)); 334 - out_be32(&ifc->ifc_nand.nand_fir1, 0x0); 328 + iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 329 + (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 330 + (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 331 + (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), 332 + &ifc->ifc_nand.nand_fir0); 333 + iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); 335 334 336 335 if (oob) 337 - out_be32(&ifc->ifc_nand.nand_fcr0, 338 - NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT); 336 + iowrite32be(NAND_CMD_READOOB << 337 + IFC_NAND_FCR0_CMD0_SHIFT, 338 + &ifc->ifc_nand.nand_fcr0); 339 339 else 340 - out_be32(&ifc->ifc_nand.nand_fcr0, 341 - NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT); 340 + iowrite32be(NAND_CMD_READ0 << 341 + IFC_NAND_FCR0_CMD0_SHIFT, 342 + &ifc->ifc_nand.nand_fcr0); 342 343 } 343 344 } 344 345 ··· 360 357 switch (command) { 361 358 /* READ0 read the entire buffer to use hardware ECC. */ 362 359 case NAND_CMD_READ0: 363 - out_be32(&ifc->ifc_nand.nand_fbcr, 0); 360 + iowrite32be(0, &ifc->ifc_nand.nand_fbcr); 364 361 set_addr(mtd, 0, page_addr, 0); 365 362 366 363 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; ··· 375 372 376 373 /* READOOB reads only the OOB because no ECC is performed. */ 377 374 case NAND_CMD_READOOB: 378 - out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column); 375 + iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); 379 376 set_addr(mtd, column, page_addr, 1); 380 377 381 378 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; ··· 391 388 if (command == NAND_CMD_PARAM) 392 389 timing = IFC_FIR_OP_RBCD; 393 390 394 - out_be32(&ifc->ifc_nand.nand_fir0, 395 - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 396 - (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 397 - (timing << IFC_NAND_FIR0_OP2_SHIFT)); 398 - out_be32(&ifc->ifc_nand.nand_fcr0, 399 - command << IFC_NAND_FCR0_CMD0_SHIFT); 400 - out_be32(&ifc->ifc_nand.row3, column); 391 + iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 392 + (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 393 + (timing << IFC_NAND_FIR0_OP2_SHIFT), 394 + &ifc->ifc_nand.nand_fir0); 395 + iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT, 396 + &ifc->ifc_nand.nand_fcr0); 397 + iowrite32be(column, &ifc->ifc_nand.row3); 401 398 402 399 /* 403 400 * although currently it's 8 bytes for READID, we always read 404 401 * the maximum 256 bytes(for PARAM) 405 402 */ 406 - out_be32(&ifc->ifc_nand.nand_fbcr, 256); 403 + iowrite32be(256, &ifc->ifc_nand.nand_fbcr); 407 404 ifc_nand_ctrl->read_bytes = 256; 408 405 409 406 set_addr(mtd, 0, 0, 0); ··· 418 415 419 416 /* ERASE2 uses the block and page address from ERASE1 */ 420 417 case NAND_CMD_ERASE2: 421 - out_be32(&ifc->ifc_nand.nand_fir0, 422 - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 423 - (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | 424 - (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT)); 418 + iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 419 + (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | 420 + (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), 421 + &ifc->ifc_nand.nand_fir0); 425 422 426 - out_be32(&ifc->ifc_nand.nand_fcr0, 427 - (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | 428 - (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT)); 423 + iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | 424 + (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), 425 + &ifc->ifc_nand.nand_fcr0); 429 426 430 - out_be32(&ifc->ifc_nand.nand_fbcr, 0); 427 + iowrite32be(0, &ifc->ifc_nand.nand_fbcr); 431 428 ifc_nand_ctrl->read_bytes = 0; 432 429 fsl_ifc_run_command(mtd); 433 430 return; ··· 443 440 (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) | 444 441 (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT); 445 442 446 - out_be32(&ifc->ifc_nand.nand_fir0, 447 - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 448 - (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 449 - (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 450 - (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | 451 - (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT)); 443 + iowrite32be( 444 + (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 445 + (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | 446 + (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | 447 + (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | 448 + (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT), 449 + &ifc->ifc_nand.nand_fir0); 452 450 } else { 453 451 nand_fcr0 = ((NAND_CMD_PAGEPROG << 454 452 IFC_NAND_FCR0_CMD1_SHIFT) | 455 453 (NAND_CMD_SEQIN << 456 454 IFC_NAND_FCR0_CMD2_SHIFT)); 457 455 458 - out_be32(&ifc->ifc_nand.nand_fir0, 459 - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 460 - (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) | 461 - (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) | 462 - (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) | 463 - (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT)); 464 - out_be32(&ifc->ifc_nand.nand_fir1, 465 - (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT)); 456 + iowrite32be( 457 + (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 458 + (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) | 459 + (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) | 460 + (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) | 461 + (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT), 462 + &ifc->ifc_nand.nand_fir0); 463 + iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT, 464 + &ifc->ifc_nand.nand_fir1); 466 465 467 466 if (column >= mtd->writesize) 468 467 nand_fcr0 |= ··· 479 474 column -= mtd->writesize; 480 475 ifc_nand_ctrl->oob = 1; 481 476 } 482 - out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0); 477 + iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0); 483 478 set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob); 484 479 return; 485 480 } ··· 487 482 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 488 483 case NAND_CMD_PAGEPROG: { 489 484 if (ifc_nand_ctrl->oob) { 490 - out_be32(&ifc->ifc_nand.nand_fbcr, 491 - ifc_nand_ctrl->index - ifc_nand_ctrl->column); 485 + iowrite32be(ifc_nand_ctrl->index - 486 + ifc_nand_ctrl->column, 487 + &ifc->ifc_nand.nand_fbcr); 492 488 } else { 493 - out_be32(&ifc->ifc_nand.nand_fbcr, 0); 489 + iowrite32be(0, &ifc->ifc_nand.nand_fbcr); 494 490 } 495 491 496 492 fsl_ifc_run_command(mtd); ··· 499 493 } 500 494 501 495 case NAND_CMD_STATUS: 502 - out_be32(&ifc->ifc_nand.nand_fir0, 503 - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 504 - (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT)); 505 - out_be32(&ifc->ifc_nand.nand_fcr0, 506 - NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT); 507 - out_be32(&ifc->ifc_nand.nand_fbcr, 1); 496 + iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 497 + (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), 498 + &ifc->ifc_nand.nand_fir0); 499 + iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, 500 + &ifc->ifc_nand.nand_fcr0); 501 + iowrite32be(1, &ifc->ifc_nand.nand_fbcr); 508 502 set_addr(mtd, 0, 0, 0); 509 503 ifc_nand_ctrl->read_bytes = 1; 510 504 ··· 518 512 return; 519 513 520 514 case NAND_CMD_RESET: 521 - out_be32(&ifc->ifc_nand.nand_fir0, 522 - IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT); 523 - out_be32(&ifc->ifc_nand.nand_fcr0, 524 - NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT); 515 + iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, 516 + &ifc->ifc_nand.nand_fir0); 517 + iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, 518 + &ifc->ifc_nand.nand_fcr0); 525 519 fsl_ifc_run_command(mtd); 526 520 return; 527 521 ··· 645 639 u32 nand_fsr; 646 640 647 641 /* Use READ_STATUS command, but wait for the device to be ready */ 648 - out_be32(&ifc->ifc_nand.nand_fir0, 649 - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 650 - (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT)); 651 - out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS << 652 - IFC_NAND_FCR0_CMD0_SHIFT); 653 - out_be32(&ifc->ifc_nand.nand_fbcr, 1); 642 + iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 643 + (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), 644 + &ifc->ifc_nand.nand_fir0); 645 + iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, 646 + &ifc->ifc_nand.nand_fcr0); 647 + iowrite32be(1, &ifc->ifc_nand.nand_fbcr); 654 648 set_addr(mtd, 0, 0, 0); 655 649 ifc_nand_ctrl->read_bytes = 1; 656 650 657 651 fsl_ifc_run_command(mtd); 658 652 659 - nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr); 653 + nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr); 660 654 661 655 /* 662 656 * The chip always seems to report that it is ··· 750 744 uint32_t cs = priv->bank; 751 745 752 746 /* Save CSOR and CSOR_ext */ 753 - csor = in_be32(&ifc->csor_cs[cs].csor); 754 - csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext); 747 + csor = ioread32be(&ifc->csor_cs[cs].csor); 748 + csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext); 755 749 756 750 /* chage PageSize 8K and SpareSize 1K*/ 757 751 csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; 758 - out_be32(&ifc->csor_cs[cs].csor, csor_8k); 759 - out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400); 752 + iowrite32be(csor_8k, &ifc->csor_cs[cs].csor); 753 + iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext); 760 754 761 755 /* READID */ 762 - out_be32(&ifc->ifc_nand.nand_fir0, 763 - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 764 - (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 765 - (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT)); 766 - out_be32(&ifc->ifc_nand.nand_fcr0, 767 - NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT); 768 - out_be32(&ifc->ifc_nand.row3, 0x0); 756 + iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 757 + (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 758 + (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), 759 + &ifc->ifc_nand.nand_fir0); 760 + iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, 761 + &ifc->ifc_nand.nand_fcr0); 762 + iowrite32be(0x0, &ifc->ifc_nand.row3); 769 763 770 - out_be32(&ifc->ifc_nand.nand_fbcr, 0x0); 764 + iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr); 771 765 772 766 /* Program ROW0/COL0 */ 773 - out_be32(&ifc->ifc_nand.row0, 0x0); 774 - out_be32(&ifc->ifc_nand.col0, 0x0); 767 + iowrite32be(0x0, &ifc->ifc_nand.row0); 768 + iowrite32be(0x0, &ifc->ifc_nand.col0); 775 769 776 770 /* set the chip select for NAND Transaction */ 777 - out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT); 771 + iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel); 778 772 779 773 /* start read seq */ 780 - out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT); 774 + iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); 781 775 782 776 /* wait for command complete flag or timeout */ 783 777 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, ··· 787 781 printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n"); 788 782 789 783 /* Restore CSOR and CSOR_ext */ 790 - out_be32(&ifc->csor_cs[cs].csor, csor); 791 - out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext); 784 + iowrite32be(csor, &ifc->csor_cs[cs].csor); 785 + iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext); 792 786 } 793 787 794 788 static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) ··· 805 799 806 800 /* fill in nand_chip structure */ 807 801 /* set up function call table */ 808 - if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) 802 + if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) 809 803 chip->read_byte = fsl_ifc_read_byte16; 810 804 else 811 805 chip->read_byte = fsl_ifc_read_byte; ··· 819 813 chip->bbt_td = &bbt_main_descr; 820 814 chip->bbt_md = &bbt_mirror_descr; 821 815 822 - out_be32(&ifc->ifc_nand.ncfgr, 0x0); 816 + iowrite32be(0x0, &ifc->ifc_nand.ncfgr); 823 817 824 818 /* set up nand options */ 825 819 chip->bbt_options = NAND_BBT_USE_FLASH; 826 820 827 821 828 - if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { 822 + if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { 829 823 chip->read_byte = fsl_ifc_read_byte16; 830 824 chip->options |= NAND_BUSWIDTH_16; 831 825 } else { ··· 838 832 chip->ecc.read_page = fsl_ifc_read_page; 839 833 chip->ecc.write_page = fsl_ifc_write_page; 840 834 841 - csor = in_be32(&ifc->csor_cs[priv->bank].csor); 835 + csor = ioread32be(&ifc->csor_cs[priv->bank].csor); 842 836 843 837 /* Hardware generates ECC per 512 Bytes */ 844 838 chip->ecc.size = 512; ··· 890 884 chip->ecc.mode = NAND_ECC_SOFT; 891 885 } 892 886 893 - ver = in_be32(&ifc->ifc_rev); 887 + ver = ioread32be(&ifc->ifc_rev); 894 888 if (ver == FSL_IFC_V1_1_0) 895 889 fsl_ifc_sram_init(priv); 896 890 ··· 916 910 static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank, 917 911 phys_addr_t addr) 918 912 { 919 - u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr); 913 + u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr); 920 914 921 915 if (!(cspr & CSPR_V)) 922 916 return 0; ··· 1003 997 1004 998 dev_set_drvdata(priv->dev, priv); 1005 999 1006 - out_be32(&ifc->ifc_nand.nand_evter_en, 1007 - IFC_NAND_EVTER_EN_OPC_EN | 1008 - IFC_NAND_EVTER_EN_FTOER_EN | 1009 - IFC_NAND_EVTER_EN_WPER_EN); 1000 + iowrite32be(IFC_NAND_EVTER_EN_OPC_EN | 1001 + IFC_NAND_EVTER_EN_FTOER_EN | 1002 + IFC_NAND_EVTER_EN_WPER_EN, 1003 + &ifc->ifc_nand.nand_evter_en); 1010 1004 1011 1005 /* enable NAND Machine Interrupts */ 1012 - out_be32(&ifc->ifc_nand.nand_evter_intr_en, 1013 - IFC_NAND_EVTER_INTR_OPCIR_EN | 1014 - IFC_NAND_EVTER_INTR_FTOERIR_EN | 1015 - IFC_NAND_EVTER_INTR_WPERIR_EN); 1016 - 1006 + iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN | 1007 + IFC_NAND_EVTER_INTR_FTOERIR_EN | 1008 + IFC_NAND_EVTER_INTR_WPERIR_EN, 1009 + &ifc->ifc_nand.nand_evter_intr_en); 1017 1010 priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start); 1018 1011 if (!priv->mtd.name) { 1019 1012 ret = -ENOMEM;
+22
drivers/mtd/nand/gpmi-nand/bch-regs.h
··· 61 61 & BM_BCH_FLASH0LAYOUT0_ECC0) \ 62 62 ) 63 63 64 + #define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14 10 65 + #define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \ 66 + (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) 67 + #define BF_BCH_FLASH0LAYOUT0_GF(v, x) \ 68 + ((GPMI_IS_MX6Q(x) && ((v) == 14)) \ 69 + ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \ 70 + & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \ 71 + : 0 \ 72 + ) 73 + 64 74 #define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0 65 75 #define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \ 66 76 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE) ··· 103 93 & BM_BCH_FLASH0LAYOUT1_ECCN) \ 104 94 ) 105 95 96 + #define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14 10 97 + #define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \ 98 + (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) 99 + #define BF_BCH_FLASH0LAYOUT1_GF(v, x) \ 100 + ((GPMI_IS_MX6Q(x) && ((v) == 14)) \ 101 + ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \ 102 + & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \ 103 + : 0 \ 104 + ) 105 + 106 106 #define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0 107 107 #define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \ 108 108 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) ··· 123 103 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ 124 104 : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ 125 105 ) 106 + 107 + #define HW_BCH_VERSION 0x00000160 126 108 #endif
+9
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
··· 208 208 } 209 209 210 210 /* start to print out the BCH info */ 211 + pr_err("Show BCH registers :\n"); 212 + for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) { 213 + reg = readl(r->bch_regs + i * 0x10); 214 + pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); 215 + } 211 216 pr_err("BCH Geometry :\n"); 212 217 pr_err("GF length : %u\n", geo->gf_len); 213 218 pr_err("ECC Strength : %u\n", geo->ecc_strength); ··· 237 232 unsigned int metadata_size; 238 233 unsigned int ecc_strength; 239 234 unsigned int page_size; 235 + unsigned int gf_len; 240 236 int ret; 241 237 242 238 if (common_nfc_set_geometry(this)) ··· 248 242 metadata_size = bch_geo->metadata_size; 249 243 ecc_strength = bch_geo->ecc_strength >> 1; 250 244 page_size = bch_geo->page_size; 245 + gf_len = bch_geo->gf_len; 251 246 252 247 ret = gpmi_enable_clk(this); 253 248 if (ret) ··· 270 263 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) 271 264 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) 272 265 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) 266 + | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) 273 267 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this), 274 268 r->bch_regs + HW_BCH_FLASH0LAYOUT0); 275 269 276 270 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) 277 271 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) 272 + | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) 278 273 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this), 279 274 r->bch_regs + HW_BCH_FLASH0LAYOUT1); 280 275
+39 -24
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
··· 94 94 return round_down(ecc_strength, 2); 95 95 } 96 96 97 + static inline bool gpmi_check_ecc(struct gpmi_nand_data *this) 98 + { 99 + struct bch_geometry *geo = &this->bch_geometry; 100 + 101 + /* Do the sanity check. */ 102 + if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) { 103 + /* The mx23/mx28 only support the GF13. */ 104 + if (geo->gf_len == 14) 105 + return false; 106 + 107 + if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX) 108 + return false; 109 + } else if (GPMI_IS_MX6Q(this)) { 110 + if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX) 111 + return false; 112 + } 113 + return true; 114 + } 115 + 97 116 int common_nfc_set_geometry(struct gpmi_nand_data *this) 98 117 { 99 118 struct bch_geometry *geo = &this->bch_geometry; ··· 131 112 /* The default for the length of Galois Field. */ 132 113 geo->gf_len = 13; 133 114 134 - /* The default for chunk size. There is no oobsize greater then 512. */ 115 + /* The default for chunk size. */ 135 116 geo->ecc_chunk_size = 512; 136 - while (geo->ecc_chunk_size < mtd->oobsize) 117 + while (geo->ecc_chunk_size < mtd->oobsize) { 137 118 geo->ecc_chunk_size *= 2; /* keep C >= O */ 119 + geo->gf_len = 14; 120 + } 138 121 139 122 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; 140 123 141 124 /* We use the same ECC strength for all chunks. */ 142 125 geo->ecc_strength = get_ecc_strength(this); 143 - if (!geo->ecc_strength) { 144 - pr_err("wrong ECC strength.\n"); 126 + if (!gpmi_check_ecc(this)) { 127 + dev_err(this->dev, 128 + "We can not support this nand chip." 129 + " Its required ecc strength(%d) is beyond our" 130 + " capability(%d).\n", geo->ecc_strength, 131 + (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX 132 + : MXS_ECC_STRENGTH_MAX)); 145 133 return -EINVAL; 146 134 } 147 135 ··· 946 920 dma_addr_t auxiliary_phys; 947 921 unsigned int i; 948 922 unsigned char *status; 949 - unsigned int failed; 950 - unsigned int corrected; 923 + unsigned int max_bitflips = 0; 951 924 int ret; 952 925 953 926 pr_debug("page number is : %d\n", page); ··· 970 945 payload_virt, payload_phys); 971 946 if (ret) { 972 947 pr_err("Error in ECC-based read: %d\n", ret); 973 - goto exit_nfc; 948 + return ret; 974 949 } 975 950 976 951 /* handle the block mark swapping */ 977 952 block_mark_swapping(this, payload_virt, auxiliary_virt); 978 953 979 954 /* Loop over status bytes, accumulating ECC status. */ 980 - failed = 0; 981 - corrected = 0; 982 - status = auxiliary_virt + nfc_geo->auxiliary_status_offset; 955 + status = auxiliary_virt + nfc_geo->auxiliary_status_offset; 983 956 984 957 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { 985 958 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) 986 959 continue; 987 960 988 961 if (*status == STATUS_UNCORRECTABLE) { 989 - failed++; 962 + mtd->ecc_stats.failed++; 990 963 continue; 991 964 } 992 - corrected += *status; 993 - } 994 - 995 - /* 996 - * Propagate ECC status to the owning MTD only when failed or 997 - * corrected times nearly reaches our ECC correction threshold. 998 - */ 999 - if (failed || corrected >= (nfc_geo->ecc_strength - 1)) { 1000 - mtd->ecc_stats.failed += failed; 1001 - mtd->ecc_stats.corrected += corrected; 965 + mtd->ecc_stats.corrected += *status; 966 + max_bitflips = max_t(unsigned int, max_bitflips, *status); 1002 967 } 1003 968 1004 969 if (oob_required) { ··· 1010 995 this->payload_virt, this->payload_phys, 1011 996 nfc_geo->payload_size, 1012 997 payload_virt, payload_phys); 1013 - exit_nfc: 1014 - return ret; 998 + 999 + return max_bitflips; 1015 1000 } 1016 1001 1017 1002 static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, ··· 1683 1668 release_resources(this); 1684 1669 exit_acquire_resources: 1685 1670 platform_set_drvdata(pdev, NULL); 1686 - kfree(this); 1687 1671 dev_err(this->dev, "driver registration failed: %d\n", ret); 1672 + kfree(this); 1688 1673 1689 1674 return ret; 1690 1675 }
+4
drivers/mtd/nand/gpmi-nand/gpmi-nand.h
··· 284 284 #define STATUS_ERASED 0xff 285 285 #define STATUS_UNCORRECTABLE 0xfe 286 286 287 + /* BCH's bit correction capability. */ 288 + #define MXS_ECC_STRENGTH_MAX 20 /* mx23 and mx28 */ 289 + #define MX6_ECC_STRENGTH_MAX 40 290 + 287 291 /* Use the platform_id to distinguish different Archs. */ 288 292 #define IS_MX23 0x0 289 293 #define IS_MX28 0x1
+11
drivers/mtd/nand/mxc_nand.c
··· 530 530 531 531 static void send_read_id_v3(struct mxc_nand_host *host) 532 532 { 533 + struct nand_chip *this = &host->nand; 534 + 533 535 /* Read ID into main buffer */ 534 536 writel(NFC_ID, NFC_V3_LAUNCH); 535 537 536 538 wait_op_done(host, true); 537 539 538 540 memcpy32_fromio(host->data_buf, host->main_area0, 16); 541 + 542 + if (this->options & NAND_BUSWIDTH_16) { 543 + /* compress the ID info */ 544 + host->data_buf[1] = host->data_buf[2]; 545 + host->data_buf[2] = host->data_buf[4]; 546 + host->data_buf[3] = host->data_buf[6]; 547 + host->data_buf[4] = host->data_buf[8]; 548 + host->data_buf[5] = host->data_buf[10]; 549 + } 539 550 } 540 551 541 552 /* Request the NANDFC to perform a read of the NAND device ID. */
+2 -6
drivers/mtd/nand/nand_base.c
··· 825 825 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) 826 826 { 827 827 828 - unsigned long timeo = jiffies; 829 828 int status, state = chip->state; 830 - 831 - if (state == FL_ERASING) 832 - timeo += (HZ * 400) / 1000; 833 - else 834 - timeo += (HZ * 20) / 1000; 829 + unsigned long timeo = (state == FL_ERASING ? 400 : 20); 835 830 836 831 led_trigger_event(nand_led_trigger, LED_FULL); 837 832 ··· 844 849 if (in_interrupt() || oops_in_progress) 845 850 panic_nand_wait(mtd, chip, timeo); 846 851 else { 852 + timeo = jiffies + msecs_to_jiffies(timeo); 847 853 while (time_before(jiffies, timeo)) { 848 854 if (chip->dev_ready) { 849 855 if (chip->dev_ready(mtd))
+2 -3
drivers/mtd/nand/nand_ecc.c
··· 55 55 #define MODULE_AUTHOR(x) /* x */ 56 56 #define MODULE_DESCRIPTION(x) /* x */ 57 57 58 - #define printk printf 59 - #define KERN_ERR "" 58 + #define pr_err printf 60 59 #endif 61 60 62 61 /* ··· 506 507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) 507 508 return 1; /* error in ECC data; no action needed */ 508 509 509 - printk(KERN_ERR "uncorrectable error : "); 510 + pr_err("%s: uncorrectable ECC error", __func__); 510 511 return -1; 511 512 } 512 513 EXPORT_SYMBOL(__nand_correct_data);
+3 -3
drivers/mtd/nand/nandsim.c
··· 1468 1468 1469 1469 void do_bit_flips(struct nandsim *ns, int num) 1470 1470 { 1471 - if (bitflips && random32() < (1 << 22)) { 1471 + if (bitflips && prandom_u32() < (1 << 22)) { 1472 1472 int flips = 1; 1473 1473 if (bitflips > 1) 1474 - flips = (random32() % (int) bitflips) + 1; 1474 + flips = (prandom_u32() % (int) bitflips) + 1; 1475 1475 while (flips--) { 1476 - int pos = random32() % (num * 8); 1476 + int pos = prandom_u32() % (num * 8); 1477 1477 ns->buf.byte[pos / 8] ^= (1 << (pos % 8)); 1478 1478 NS_WARN("read_page: flipping bit %d in page %d " 1479 1479 "reading from %d ecc: corrected=%u failed=%u\n",
+542 -43
drivers/mtd/nand/omap2.c
··· 22 22 #include <linux/omap-dma.h> 23 23 #include <linux/io.h> 24 24 #include <linux/slab.h> 25 + #include <linux/of.h> 26 + #include <linux/of_device.h> 25 27 26 28 #ifdef CONFIG_MTD_NAND_OMAP_BCH 27 29 #include <linux/bch.h> 30 + #include <linux/platform_data/elm.h> 28 31 #endif 29 32 30 33 #include <linux/platform_data/mtd-nand-omap2.h> ··· 120 117 121 118 #define OMAP24XX_DMA_GPMC 4 122 119 120 + #define BCH8_MAX_ERROR 8 /* upto 8 bit correctable */ 121 + #define BCH4_MAX_ERROR 4 /* upto 4 bit correctable */ 122 + 123 + #define SECTOR_BYTES 512 124 + /* 4 bit padding to make byte aligned, 56 = 52 + 4 */ 125 + #define BCH4_BIT_PAD 4 126 + #define BCH8_ECC_MAX ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8) 127 + #define BCH4_ECC_MAX ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8) 128 + 129 + /* GPMC ecc engine settings for read */ 130 + #define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */ 131 + #define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */ 132 + #define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */ 133 + #define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */ 134 + #define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */ 135 + 136 + /* GPMC ecc engine settings for write */ 137 + #define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */ 138 + #define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */ 139 + #define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */ 140 + 141 + #ifdef CONFIG_MTD_NAND_OMAP_BCH 142 + static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc, 143 + 0xac, 0x6b, 0xff, 0x99, 0x7b}; 144 + static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10}; 145 + #endif 146 + 123 147 /* oob info generated runtime depending on ecc algorithm and layout selected */ 124 148 static struct nand_ecclayout omap_oobinfo; 125 149 /* Define some generic bad / good block scan pattern which are used ··· 186 156 #ifdef CONFIG_MTD_NAND_OMAP_BCH 187 157 struct bch_control *bch; 188 158 struct nand_ecclayout ecclayout; 159 + bool is_elm_used; 160 + struct device *elm_dev; 161 + struct device_node *of_node; 189 162 #endif 190 163 }; 191 164 ··· 1064 1031 * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction 1065 1032 * @mtd: MTD device structure 1066 1033 * @mode: Read/Write mode 1034 + * 1035 + * When using BCH, sector size is hardcoded to 512 bytes. 1036 + * Using wrapping mode 6 both for reading and writing if ELM module not uses 1037 + * for error correction. 1038 + * On writing, 1039 + * eccsize0 = 0 (no additional protected byte in spare area) 1040 + * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area) 1067 1041 */ 1068 1042 static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode) 1069 1043 { ··· 1079 1039 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1080 1040 mtd); 1081 1041 struct nand_chip *chip = mtd->priv; 1082 - u32 val; 1042 + u32 val, wr_mode; 1043 + unsigned int ecc_size1, ecc_size0; 1083 1044 1084 - nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4; 1085 - dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; 1086 - nsectors = 1; 1045 + /* Using wrapping mode 6 for writing */ 1046 + wr_mode = BCH_WRAPMODE_6; 1047 + 1087 1048 /* 1088 - * Program GPMC to perform correction on one 512-byte sector at a time. 1089 - * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and 1090 - * gives a slight (5%) performance gain (but requires additional code). 1049 + * ECC engine enabled for valid ecc_size0 nibbles 1050 + * and disabled for ecc_size1 nibbles. 1091 1051 */ 1052 + ecc_size0 = BCH_ECC_SIZE0; 1053 + ecc_size1 = BCH_ECC_SIZE1; 1054 + 1055 + /* Perform ecc calculation on 512-byte sector */ 1056 + nsectors = 1; 1057 + 1058 + /* Update number of error correction */ 1059 + nerrors = info->nand.ecc.strength; 1060 + 1061 + /* Multi sector reading/writing for NAND flash with page size < 4096 */ 1062 + if (info->is_elm_used && (mtd->writesize <= 4096)) { 1063 + if (mode == NAND_ECC_READ) { 1064 + /* Using wrapping mode 1 for reading */ 1065 + wr_mode = BCH_WRAPMODE_1; 1066 + 1067 + /* 1068 + * ECC engine enabled for ecc_size0 nibbles 1069 + * and disabled for ecc_size1 nibbles. 1070 + */ 1071 + ecc_size0 = (nerrors == 8) ? 1072 + BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0; 1073 + ecc_size1 = (nerrors == 8) ? 1074 + BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1; 1075 + } 1076 + 1077 + /* Perform ecc calculation for one page (< 4096) */ 1078 + nsectors = info->nand.ecc.steps; 1079 + } 1092 1080 1093 1081 writel(ECC1, info->reg.gpmc_ecc_control); 1094 1082 1095 - /* 1096 - * When using BCH, sector size is hardcoded to 512 bytes. 1097 - * Here we are using wrapping mode 6 both for reading and writing, with: 1098 - * size0 = 0 (no additional protected byte in spare area) 1099 - * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area) 1100 - */ 1101 - val = (32 << ECCSIZE1_SHIFT) | (0 << ECCSIZE0_SHIFT); 1083 + /* Configure ecc size for BCH */ 1084 + val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT); 1102 1085 writel(val, info->reg.gpmc_ecc_size_config); 1086 + 1087 + dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; 1103 1088 1104 1089 /* BCH configuration */ 1105 1090 val = ((1 << 16) | /* enable BCH */ 1106 1091 (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */ 1107 - (0x06 << 8) | /* wrap mode = 6 */ 1092 + (wr_mode << 8) | /* wrap mode */ 1108 1093 (dev_width << 7) | /* bus width */ 1109 1094 (((nsectors-1) & 0x7) << 4) | /* number of sectors */ 1110 1095 (info->gpmc_cs << 1) | /* ECC CS */ ··· 1137 1072 1138 1073 writel(val, info->reg.gpmc_ecc_config); 1139 1074 1140 - /* clear ecc and enable bits */ 1075 + /* Clear ecc and enable bits */ 1141 1076 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control); 1142 1077 } 1143 1078 ··· 1227 1162 } 1228 1163 1229 1164 /** 1165 + * omap3_calculate_ecc_bch - Generate bytes of ECC bytes 1166 + * @mtd: MTD device structure 1167 + * @dat: The pointer to data on which ecc is computed 1168 + * @ecc_code: The ecc_code buffer 1169 + * 1170 + * Support calculating of BCH4/8 ecc vectors for the page 1171 + */ 1172 + static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat, 1173 + u_char *ecc_code) 1174 + { 1175 + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1176 + mtd); 1177 + unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; 1178 + int i, eccbchtsel; 1179 + 1180 + nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; 1181 + /* 1182 + * find BCH scheme used 1183 + * 0 -> BCH4 1184 + * 1 -> BCH8 1185 + */ 1186 + eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3); 1187 + 1188 + for (i = 0; i < nsectors; i++) { 1189 + 1190 + /* Read hw-computed remainder */ 1191 + bch_val1 = readl(info->reg.gpmc_bch_result0[i]); 1192 + bch_val2 = readl(info->reg.gpmc_bch_result1[i]); 1193 + if (eccbchtsel) { 1194 + bch_val3 = readl(info->reg.gpmc_bch_result2[i]); 1195 + bch_val4 = readl(info->reg.gpmc_bch_result3[i]); 1196 + } 1197 + 1198 + if (eccbchtsel) { 1199 + /* BCH8 ecc scheme */ 1200 + *ecc_code++ = (bch_val4 & 0xFF); 1201 + *ecc_code++ = ((bch_val3 >> 24) & 0xFF); 1202 + *ecc_code++ = ((bch_val3 >> 16) & 0xFF); 1203 + *ecc_code++ = ((bch_val3 >> 8) & 0xFF); 1204 + *ecc_code++ = (bch_val3 & 0xFF); 1205 + *ecc_code++ = ((bch_val2 >> 24) & 0xFF); 1206 + *ecc_code++ = ((bch_val2 >> 16) & 0xFF); 1207 + *ecc_code++ = ((bch_val2 >> 8) & 0xFF); 1208 + *ecc_code++ = (bch_val2 & 0xFF); 1209 + *ecc_code++ = ((bch_val1 >> 24) & 0xFF); 1210 + *ecc_code++ = ((bch_val1 >> 16) & 0xFF); 1211 + *ecc_code++ = ((bch_val1 >> 8) & 0xFF); 1212 + *ecc_code++ = (bch_val1 & 0xFF); 1213 + /* 1214 + * Setting 14th byte to zero to handle 1215 + * erased page & maintain compatibility 1216 + * with RBL 1217 + */ 1218 + *ecc_code++ = 0x0; 1219 + } else { 1220 + /* BCH4 ecc scheme */ 1221 + *ecc_code++ = ((bch_val2 >> 12) & 0xFF); 1222 + *ecc_code++ = ((bch_val2 >> 4) & 0xFF); 1223 + *ecc_code++ = ((bch_val2 & 0xF) << 4) | 1224 + ((bch_val1 >> 28) & 0xF); 1225 + *ecc_code++ = ((bch_val1 >> 20) & 0xFF); 1226 + *ecc_code++ = ((bch_val1 >> 12) & 0xFF); 1227 + *ecc_code++ = ((bch_val1 >> 4) & 0xFF); 1228 + *ecc_code++ = ((bch_val1 & 0xF) << 4); 1229 + /* 1230 + * Setting 8th byte to zero to handle 1231 + * erased page 1232 + */ 1233 + *ecc_code++ = 0x0; 1234 + } 1235 + } 1236 + 1237 + return 0; 1238 + } 1239 + 1240 + /** 1241 + * erased_sector_bitflips - count bit flips 1242 + * @data: data sector buffer 1243 + * @oob: oob buffer 1244 + * @info: omap_nand_info 1245 + * 1246 + * Check the bit flips in erased page falls below correctable level. 1247 + * If falls below, report the page as erased with correctable bit 1248 + * flip, else report as uncorrectable page. 1249 + */ 1250 + static int erased_sector_bitflips(u_char *data, u_char *oob, 1251 + struct omap_nand_info *info) 1252 + { 1253 + int flip_bits = 0, i; 1254 + 1255 + for (i = 0; i < info->nand.ecc.size; i++) { 1256 + flip_bits += hweight8(~data[i]); 1257 + if (flip_bits > info->nand.ecc.strength) 1258 + return 0; 1259 + } 1260 + 1261 + for (i = 0; i < info->nand.ecc.bytes - 1; i++) { 1262 + flip_bits += hweight8(~oob[i]); 1263 + if (flip_bits > info->nand.ecc.strength) 1264 + return 0; 1265 + } 1266 + 1267 + /* 1268 + * Bit flips falls in correctable level. 1269 + * Fill data area with 0xFF 1270 + */ 1271 + if (flip_bits) { 1272 + memset(data, 0xFF, info->nand.ecc.size); 1273 + memset(oob, 0xFF, info->nand.ecc.bytes); 1274 + } 1275 + 1276 + return flip_bits; 1277 + } 1278 + 1279 + /** 1280 + * omap_elm_correct_data - corrects page data area in case error reported 1281 + * @mtd: MTD device structure 1282 + * @data: page data 1283 + * @read_ecc: ecc read from nand flash 1284 + * @calc_ecc: ecc read from HW ECC registers 1285 + * 1286 + * Calculated ecc vector reported as zero in case of non-error pages. 1287 + * In case of error/erased pages non-zero error vector is reported. 1288 + * In case of non-zero ecc vector, check read_ecc at fixed offset 1289 + * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not. 1290 + * To handle bit flips in this data, count the number of 0's in 1291 + * read_ecc[x] and check if it greater than 4. If it is less, it is 1292 + * programmed page, else erased page. 1293 + * 1294 + * 1. If page is erased, check with standard ecc vector (ecc vector 1295 + * for erased page to find any bit flip). If check fails, bit flip 1296 + * is present in erased page. Count the bit flips in erased page and 1297 + * if it falls under correctable level, report page with 0xFF and 1298 + * update the correctable bit information. 1299 + * 2. If error is reported on programmed page, update elm error 1300 + * vector and correct the page with ELM error correction routine. 1301 + * 1302 + */ 1303 + static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data, 1304 + u_char *read_ecc, u_char *calc_ecc) 1305 + { 1306 + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1307 + mtd); 1308 + int eccsteps = info->nand.ecc.steps; 1309 + int i , j, stat = 0; 1310 + int eccsize, eccflag, ecc_vector_size; 1311 + struct elm_errorvec err_vec[ERROR_VECTOR_MAX]; 1312 + u_char *ecc_vec = calc_ecc; 1313 + u_char *spare_ecc = read_ecc; 1314 + u_char *erased_ecc_vec; 1315 + enum bch_ecc type; 1316 + bool is_error_reported = false; 1317 + 1318 + /* Initialize elm error vector to zero */ 1319 + memset(err_vec, 0, sizeof(err_vec)); 1320 + 1321 + if (info->nand.ecc.strength == BCH8_MAX_ERROR) { 1322 + type = BCH8_ECC; 1323 + erased_ecc_vec = bch8_vector; 1324 + } else { 1325 + type = BCH4_ECC; 1326 + erased_ecc_vec = bch4_vector; 1327 + } 1328 + 1329 + ecc_vector_size = info->nand.ecc.bytes; 1330 + 1331 + /* 1332 + * Remove extra byte padding for BCH8 RBL 1333 + * compatibility and erased page handling 1334 + */ 1335 + eccsize = ecc_vector_size - 1; 1336 + 1337 + for (i = 0; i < eccsteps ; i++) { 1338 + eccflag = 0; /* initialize eccflag */ 1339 + 1340 + /* 1341 + * Check any error reported, 1342 + * In case of error, non zero ecc reported. 1343 + */ 1344 + 1345 + for (j = 0; (j < eccsize); j++) { 1346 + if (calc_ecc[j] != 0) { 1347 + eccflag = 1; /* non zero ecc, error present */ 1348 + break; 1349 + } 1350 + } 1351 + 1352 + if (eccflag == 1) { 1353 + /* 1354 + * Set threshold to minimum of 4, half of ecc.strength/2 1355 + * to allow max bit flip in byte to 4 1356 + */ 1357 + unsigned int threshold = min_t(unsigned int, 4, 1358 + info->nand.ecc.strength / 2); 1359 + 1360 + /* 1361 + * Check data area is programmed by counting 1362 + * number of 0's at fixed offset in spare area. 1363 + * Checking count of 0's against threshold. 1364 + * In case programmed page expects at least threshold 1365 + * zeros in byte. 1366 + * If zeros are less than threshold for programmed page/ 1367 + * zeros are more than threshold erased page, either 1368 + * case page reported as uncorrectable. 1369 + */ 1370 + if (hweight8(~read_ecc[eccsize]) >= threshold) { 1371 + /* 1372 + * Update elm error vector as 1373 + * data area is programmed 1374 + */ 1375 + err_vec[i].error_reported = true; 1376 + is_error_reported = true; 1377 + } else { 1378 + /* Error reported in erased page */ 1379 + int bitflip_count; 1380 + u_char *buf = &data[info->nand.ecc.size * i]; 1381 + 1382 + if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) { 1383 + bitflip_count = erased_sector_bitflips( 1384 + buf, read_ecc, info); 1385 + 1386 + if (bitflip_count) 1387 + stat += bitflip_count; 1388 + else 1389 + return -EINVAL; 1390 + } 1391 + } 1392 + } 1393 + 1394 + /* Update the ecc vector */ 1395 + calc_ecc += ecc_vector_size; 1396 + read_ecc += ecc_vector_size; 1397 + } 1398 + 1399 + /* Check if any error reported */ 1400 + if (!is_error_reported) 1401 + return 0; 1402 + 1403 + /* Decode BCH error using ELM module */ 1404 + elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec); 1405 + 1406 + for (i = 0; i < eccsteps; i++) { 1407 + if (err_vec[i].error_reported) { 1408 + for (j = 0; j < err_vec[i].error_count; j++) { 1409 + u32 bit_pos, byte_pos, error_max, pos; 1410 + 1411 + if (type == BCH8_ECC) 1412 + error_max = BCH8_ECC_MAX; 1413 + else 1414 + error_max = BCH4_ECC_MAX; 1415 + 1416 + if (info->nand.ecc.strength == BCH8_MAX_ERROR) 1417 + pos = err_vec[i].error_loc[j]; 1418 + else 1419 + /* Add 4 to take care 4 bit padding */ 1420 + pos = err_vec[i].error_loc[j] + 1421 + BCH4_BIT_PAD; 1422 + 1423 + /* Calculate bit position of error */ 1424 + bit_pos = pos % 8; 1425 + 1426 + /* Calculate byte position of error */ 1427 + byte_pos = (error_max - pos - 1) / 8; 1428 + 1429 + if (pos < error_max) { 1430 + if (byte_pos < 512) 1431 + data[byte_pos] ^= 1 << bit_pos; 1432 + else 1433 + spare_ecc[byte_pos - 512] ^= 1434 + 1 << bit_pos; 1435 + } 1436 + /* else, not interested to correct ecc */ 1437 + } 1438 + } 1439 + 1440 + /* Update number of correctable errors */ 1441 + stat += err_vec[i].error_count; 1442 + 1443 + /* Update page data with sector size */ 1444 + data += info->nand.ecc.size; 1445 + spare_ecc += ecc_vector_size; 1446 + } 1447 + 1448 + for (i = 0; i < eccsteps; i++) 1449 + /* Return error if uncorrectable error present */ 1450 + if (err_vec[i].error_uncorrectable) 1451 + return -EINVAL; 1452 + 1453 + return stat; 1454 + } 1455 + 1456 + /** 1230 1457 * omap3_correct_data_bch - Decode received data and correct errors 1231 1458 * @mtd: MTD device structure 1232 1459 * @data: page data ··· 1551 1194 } 1552 1195 1553 1196 /** 1197 + * omap_write_page_bch - BCH ecc based write page function for entire page 1198 + * @mtd: mtd info structure 1199 + * @chip: nand chip info structure 1200 + * @buf: data buffer 1201 + * @oob_required: must write chip->oob_poi to OOB 1202 + * 1203 + * Custom write page method evolved to support multi sector writing in one shot 1204 + */ 1205 + static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, 1206 + const uint8_t *buf, int oob_required) 1207 + { 1208 + int i; 1209 + uint8_t *ecc_calc = chip->buffers->ecccalc; 1210 + uint32_t *eccpos = chip->ecc.layout->eccpos; 1211 + 1212 + /* Enable GPMC ecc engine */ 1213 + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); 1214 + 1215 + /* Write data */ 1216 + chip->write_buf(mtd, buf, mtd->writesize); 1217 + 1218 + /* Update ecc vector from GPMC result registers */ 1219 + chip->ecc.calculate(mtd, buf, &ecc_calc[0]); 1220 + 1221 + for (i = 0; i < chip->ecc.total; i++) 1222 + chip->oob_poi[eccpos[i]] = ecc_calc[i]; 1223 + 1224 + /* Write ecc vector to OOB area */ 1225 + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1226 + return 0; 1227 + } 1228 + 1229 + /** 1230 + * omap_read_page_bch - BCH ecc based page read function for entire page 1231 + * @mtd: mtd info structure 1232 + * @chip: nand chip info structure 1233 + * @buf: buffer to store read data 1234 + * @oob_required: caller requires OOB data read to chip->oob_poi 1235 + * @page: page number to read 1236 + * 1237 + * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module 1238 + * used for error correction. 1239 + * Custom method evolved to support ELM error correction & multi sector 1240 + * reading. On reading page data area is read along with OOB data with 1241 + * ecc engine enabled. ecc vector updated after read of OOB data. 1242 + * For non error pages ecc vector reported as zero. 1243 + */ 1244 + static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, 1245 + uint8_t *buf, int oob_required, int page) 1246 + { 1247 + uint8_t *ecc_calc = chip->buffers->ecccalc; 1248 + uint8_t *ecc_code = chip->buffers->ecccode; 1249 + uint32_t *eccpos = chip->ecc.layout->eccpos; 1250 + uint8_t *oob = &chip->oob_poi[eccpos[0]]; 1251 + uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0]; 1252 + int stat; 1253 + unsigned int max_bitflips = 0; 1254 + 1255 + /* Enable GPMC ecc engine */ 1256 + chip->ecc.hwctl(mtd, NAND_ECC_READ); 1257 + 1258 + /* Read data */ 1259 + chip->read_buf(mtd, buf, mtd->writesize); 1260 + 1261 + /* Read oob bytes */ 1262 + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1); 1263 + chip->read_buf(mtd, oob, chip->ecc.total); 1264 + 1265 + /* Calculate ecc bytes */ 1266 + chip->ecc.calculate(mtd, buf, ecc_calc); 1267 + 1268 + memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total); 1269 + 1270 + stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc); 1271 + 1272 + if (stat < 0) { 1273 + mtd->ecc_stats.failed++; 1274 + } else { 1275 + mtd->ecc_stats.corrected += stat; 1276 + max_bitflips = max_t(unsigned int, max_bitflips, stat); 1277 + } 1278 + 1279 + return max_bitflips; 1280 + } 1281 + 1282 + /** 1554 1283 * omap3_free_bch - Release BCH ecc resources 1555 1284 * @mtd: MTD device structure 1556 1285 */ ··· 1661 1218 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1662 1219 mtd); 1663 1220 #ifdef CONFIG_MTD_NAND_OMAP_BCH8 1664 - const int hw_errors = 8; 1221 + const int hw_errors = BCH8_MAX_ERROR; 1665 1222 #else 1666 - const int hw_errors = 4; 1223 + const int hw_errors = BCH4_MAX_ERROR; 1667 1224 #endif 1225 + enum bch_ecc bch_type; 1226 + const __be32 *parp; 1227 + int lenp; 1228 + struct device_node *elm_node; 1229 + 1668 1230 info->bch = NULL; 1669 1231 1670 - max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4; 1232 + max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 1233 + BCH8_MAX_ERROR : BCH4_MAX_ERROR; 1671 1234 if (max_errors != hw_errors) { 1672 1235 pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported", 1673 1236 max_errors, hw_errors); 1674 1237 goto fail; 1675 1238 } 1676 1239 1677 - /* software bch library is only used to detect and locate errors */ 1678 - info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */); 1679 - if (!info->bch) 1680 - goto fail; 1240 + info->nand.ecc.size = 512; 1241 + info->nand.ecc.hwctl = omap3_enable_hwecc_bch; 1242 + info->nand.ecc.mode = NAND_ECC_HW; 1243 + info->nand.ecc.strength = max_errors; 1681 1244 1682 - info->nand.ecc.size = 512; 1683 - info->nand.ecc.hwctl = omap3_enable_hwecc_bch; 1684 - info->nand.ecc.correct = omap3_correct_data_bch; 1685 - info->nand.ecc.mode = NAND_ECC_HW; 1245 + if (hw_errors == BCH8_MAX_ERROR) 1246 + bch_type = BCH8_ECC; 1247 + else 1248 + bch_type = BCH4_ECC; 1686 1249 1687 - /* 1688 - * The number of corrected errors in an ecc block that will trigger 1689 - * block scrubbing defaults to the ecc strength (4 or 8). 1690 - * Set mtd->bitflip_threshold here to define a custom threshold. 1691 - */ 1692 - 1693 - if (max_errors == 8) { 1694 - info->nand.ecc.strength = 8; 1695 - info->nand.ecc.bytes = 13; 1696 - info->nand.ecc.calculate = omap3_calculate_ecc_bch8; 1250 + /* Detect availability of ELM module */ 1251 + parp = of_get_property(info->of_node, "elm_id", &lenp); 1252 + if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { 1253 + pr_err("Missing elm_id property, fall back to Software BCH\n"); 1254 + info->is_elm_used = false; 1697 1255 } else { 1698 - info->nand.ecc.strength = 4; 1699 - info->nand.ecc.bytes = 7; 1700 - info->nand.ecc.calculate = omap3_calculate_ecc_bch4; 1256 + struct platform_device *pdev; 1257 + 1258 + elm_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1259 + pdev = of_find_device_by_node(elm_node); 1260 + info->elm_dev = &pdev->dev; 1261 + elm_config(info->elm_dev, bch_type); 1262 + info->is_elm_used = true; 1263 + } 1264 + 1265 + if (info->is_elm_used && (mtd->writesize <= 4096)) { 1266 + 1267 + if (hw_errors == BCH8_MAX_ERROR) 1268 + info->nand.ecc.bytes = BCH8_SIZE; 1269 + else 1270 + info->nand.ecc.bytes = BCH4_SIZE; 1271 + 1272 + info->nand.ecc.correct = omap_elm_correct_data; 1273 + info->nand.ecc.calculate = omap3_calculate_ecc_bch; 1274 + info->nand.ecc.read_page = omap_read_page_bch; 1275 + info->nand.ecc.write_page = omap_write_page_bch; 1276 + } else { 1277 + /* 1278 + * software bch library is only used to detect and 1279 + * locate errors 1280 + */ 1281 + info->bch = init_bch(13, max_errors, 1282 + 0x201b /* hw polynomial */); 1283 + if (!info->bch) 1284 + goto fail; 1285 + 1286 + info->nand.ecc.correct = omap3_correct_data_bch; 1287 + 1288 + /* 1289 + * The number of corrected errors in an ecc block that will 1290 + * trigger block scrubbing defaults to the ecc strength (4 or 8) 1291 + * Set mtd->bitflip_threshold here to define a custom threshold. 1292 + */ 1293 + 1294 + if (max_errors == 8) { 1295 + info->nand.ecc.bytes = 13; 1296 + info->nand.ecc.calculate = omap3_calculate_ecc_bch8; 1297 + } else { 1298 + info->nand.ecc.bytes = 7; 1299 + info->nand.ecc.calculate = omap3_calculate_ecc_bch4; 1300 + } 1701 1301 } 1702 1302 1703 1303 pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors); ··· 1756 1270 */ 1757 1271 static int omap3_init_bch_tail(struct mtd_info *mtd) 1758 1272 { 1759 - int i, steps; 1273 + int i, steps, offset; 1760 1274 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1761 1275 mtd); 1762 1276 struct nand_ecclayout *layout = &info->ecclayout; ··· 1778 1292 goto fail; 1779 1293 } 1780 1294 1295 + /* ECC layout compatible with RBL for BCH8 */ 1296 + if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE)) 1297 + offset = 2; 1298 + else 1299 + offset = mtd->oobsize - layout->eccbytes; 1300 + 1781 1301 /* put ecc bytes at oob tail */ 1782 1302 for (i = 0; i < layout->eccbytes; i++) 1783 - layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; 1303 + layout->eccpos[i] = offset + i; 1784 1304 1785 - layout->oobfree[0].offset = 2; 1305 + if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE)) 1306 + layout->oobfree[0].offset = 2 + layout->eccbytes * steps; 1307 + else 1308 + layout->oobfree[0].offset = 2; 1309 + 1786 1310 layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; 1787 1311 info->nand.ecc.layout = layout; 1788 1312 ··· 1856 1360 1857 1361 info->nand.options = pdata->devsize; 1858 1362 info->nand.options |= NAND_SKIP_BBTSCAN; 1363 + #ifdef CONFIG_MTD_NAND_OMAP_BCH 1364 + info->of_node = pdata->of_node; 1365 + #endif 1859 1366 1860 1367 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1861 1368 if (res == NULL) {
+7
drivers/mtd/ofpart.c
··· 174 174 return rc; 175 175 } 176 176 177 + static void __exit ofpart_parser_exit(void) 178 + { 179 + deregister_mtd_parser(&ofpart_parser); 180 + deregister_mtd_parser(&ofoldpart_parser); 181 + } 182 + 177 183 module_init(ofpart_parser_init); 184 + module_exit(ofpart_parser_exit); 178 185 179 186 MODULE_LICENSE("GPL"); 180 187 MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
+5 -5
drivers/mtd/tests/mtd_nandecctest.c
··· 44 44 static void single_bit_error_data(void *error_data, void *correct_data, 45 45 size_t size) 46 46 { 47 - unsigned int offset = random32() % (size * BITS_PER_BYTE); 47 + unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE); 48 48 49 49 memcpy(error_data, correct_data, size); 50 50 __change_bit_le(offset, error_data); ··· 55 55 { 56 56 unsigned int offset[2]; 57 57 58 - offset[0] = random32() % (size * BITS_PER_BYTE); 58 + offset[0] = prandom_u32() % (size * BITS_PER_BYTE); 59 59 do { 60 - offset[1] = random32() % (size * BITS_PER_BYTE); 60 + offset[1] = prandom_u32() % (size * BITS_PER_BYTE); 61 61 } while (offset[0] == offset[1]); 62 62 63 63 memcpy(error_data, correct_data, size); ··· 68 68 69 69 static unsigned int random_ecc_bit(size_t size) 70 70 { 71 - unsigned int offset = random32() % (3 * BITS_PER_BYTE); 71 + unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE); 72 72 73 73 if (size == 256) { 74 74 /* ··· 76 76 * and 17th bit) in ECC code for 256 byte data block 77 77 */ 78 78 while (offset == 16 || offset == 17) 79 - offset = random32() % (3 * BITS_PER_BYTE); 79 + offset = prandom_u32() % (3 * BITS_PER_BYTE); 80 80 } 81 81 82 82 return offset;
+4 -4
drivers/mtd/tests/mtd_stresstest.c
··· 55 55 unsigned int eb; 56 56 57 57 again: 58 - eb = random32(); 58 + eb = prandom_u32(); 59 59 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ 60 60 eb %= (ebcnt - 1); 61 61 if (bbt[eb]) ··· 67 67 { 68 68 unsigned int offs; 69 69 70 - offs = random32(); 70 + offs = prandom_u32(); 71 71 offs %= bufsize; 72 72 return offs; 73 73 } ··· 76 76 { 77 77 unsigned int len; 78 78 79 - len = random32(); 79 + len = prandom_u32(); 80 80 len %= (bufsize - offs); 81 81 return len; 82 82 } ··· 191 191 192 192 static int do_operation(void) 193 193 { 194 - if (random32() & 1) 194 + if (prandom_u32() & 1) 195 195 return do_read(); 196 196 else 197 197 return do_write();
+11 -14
drivers/mtd/tests/mtd_torturetest.c
··· 208 208 static int __init tort_init(void) 209 209 { 210 210 int err = 0, i, infinite = !cycles_count; 211 - int bad_ebs[ebcnt]; 211 + int *bad_ebs; 212 212 213 213 printk(KERN_INFO "\n"); 214 214 printk(KERN_INFO "=================================================\n"); ··· 250 250 251 251 err = -ENOMEM; 252 252 patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL); 253 - if (!patt_5A5) { 254 - pr_err("error: cannot allocate memory\n"); 253 + if (!patt_5A5) 255 254 goto out_mtd; 256 - } 257 255 258 256 patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL); 259 - if (!patt_A5A) { 260 - pr_err("error: cannot allocate memory\n"); 257 + if (!patt_A5A) 261 258 goto out_patt_5A5; 262 - } 263 259 264 260 patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL); 265 - if (!patt_FF) { 266 - pr_err("error: cannot allocate memory\n"); 261 + if (!patt_FF) 267 262 goto out_patt_A5A; 268 - } 269 263 270 264 check_buf = kmalloc(mtd->erasesize, GFP_KERNEL); 271 - if (!check_buf) { 272 - pr_err("error: cannot allocate memory\n"); 265 + if (!check_buf) 273 266 goto out_patt_FF; 274 - } 267 + 268 + bad_ebs = kcalloc(ebcnt, sizeof(*bad_ebs), GFP_KERNEL); 269 + if (!bad_ebs) 270 + goto out_check_buf; 275 271 276 272 err = 0; 277 273 ··· 286 290 /* 287 291 * Check if there is a bad eraseblock among those we are going to test. 288 292 */ 289 - memset(&bad_ebs[0], 0, sizeof(int) * ebcnt); 290 293 if (mtd_can_have_bb(mtd)) { 291 294 for (i = eb; i < eb + ebcnt; i++) { 292 295 err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize); ··· 389 394 390 395 pr_info("finished after %u erase cycles\n", 391 396 erase_cycles); 397 + kfree(bad_ebs); 398 + out_check_buf: 392 399 kfree(check_buf); 393 400 out_patt_FF: 394 401 kfree(patt_FF);
+3 -3
drivers/mtd/ubi/debug.h
··· 86 86 static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) 87 87 { 88 88 if (ubi->dbg.emulate_bitflips) 89 - return !(random32() % 200); 89 + return !(prandom_u32() % 200); 90 90 return 0; 91 91 } 92 92 ··· 100 100 static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi) 101 101 { 102 102 if (ubi->dbg.emulate_io_failures) 103 - return !(random32() % 500); 103 + return !(prandom_u32() % 500); 104 104 return 0; 105 105 } 106 106 ··· 114 114 static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi) 115 115 { 116 116 if (ubi->dbg.emulate_io_failures) 117 - return !(random32() % 400); 117 + return !(prandom_u32() % 400); 118 118 return 0; 119 119 } 120 120
+1
include/linux/bcma/bcma_driver_chipcommon.h
··· 528 528 u32 size; 529 529 530 530 struct mtd_info *mtd; 531 + void *priv; 531 532 }; 532 533 #endif 533 534
+7 -2
include/linux/mtd/map.h
··· 245 245 unsigned long pfow_base; 246 246 unsigned long map_priv_1; 247 247 unsigned long map_priv_2; 248 + struct device_node *device_node; 248 249 void *fldrv_priv; 249 250 struct mtd_chip_driver *fldrv; 250 251 }; ··· 329 328 330 329 static inline map_word map_word_load(struct map_info *map, const void *ptr) 331 330 { 332 - map_word r = {{0} }; 331 + map_word r; 333 332 334 333 if (map_bankwidth_is_1(map)) 335 334 r.x[0] = *(unsigned char *)ptr; ··· 343 342 #endif 344 343 else if (map_bankwidth_is_large(map)) 345 344 memcpy(r.x, ptr, map->bankwidth); 345 + else 346 + BUG(); 346 347 347 348 return r; 348 349 } ··· 394 391 395 392 static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) 396 393 { 397 - map_word uninitialized_var(r); 394 + map_word r; 398 395 399 396 if (map_bankwidth_is_1(map)) 400 397 r.x[0] = __raw_readb(map->virt + ofs); ··· 428 425 #endif 429 426 else if (map_bankwidth_is_large(map)) 430 427 memcpy_toio(map->virt+ofs, datum.x, map->bankwidth); 428 + else 429 + BUG(); 431 430 mb(); 432 431 } 433 432
+54
include/linux/platform_data/elm.h
··· 1 + /* 2 + * BCH Error Location Module 3 + * 4 + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + */ 17 + 18 + #ifndef __ELM_H 19 + #define __ELM_H 20 + 21 + enum bch_ecc { 22 + BCH4_ECC = 0, 23 + BCH8_ECC, 24 + }; 25 + 26 + /* ELM support 8 error syndrome process */ 27 + #define ERROR_VECTOR_MAX 8 28 + 29 + #define BCH8_ECC_OOB_BYTES 13 30 + #define BCH4_ECC_OOB_BYTES 7 31 + /* RBL requires 14 byte even though BCH8 uses only 13 byte */ 32 + #define BCH8_SIZE (BCH8_ECC_OOB_BYTES + 1) 33 + /* Uses 1 extra byte to handle erased pages */ 34 + #define BCH4_SIZE (BCH4_ECC_OOB_BYTES + 1) 35 + 36 + /** 37 + * struct elm_errorvec - error vector for elm 38 + * @error_reported: set true for vectors error is reported 39 + * @error_uncorrectable: number of uncorrectable errors 40 + * @error_count: number of correctable errors in the sector 41 + * @error_loc: buffer for error location 42 + * 43 + */ 44 + struct elm_errorvec { 45 + bool error_reported; 46 + bool error_uncorrectable; 47 + int error_count; 48 + int error_loc[ERROR_VECTOR_MAX]; 49 + }; 50 + 51 + void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, 52 + struct elm_errorvec *err_vec); 53 + void elm_config(struct device *dev, enum bch_ecc bch_type); 54 + #endif /* __ELM_H */